[PATCH] Generic BUG for x86-64
[linux-block.git] / arch / x86_64 / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/traps.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
6 *
7 * Pentium III FXSR, SSE support
8 * Gareth Hughes <gareth@valinux.com>, May 2000
1da177e4
LT
9 */
10
11/*
12 * 'Traps.c' handles hardware traps and faults after we have saved some
13 * state in 'entry.S'.
14 */
1da177e4
LT
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/timer.h>
21#include <linux/mm.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
4b0ff1a9 26#include <linux/kallsyms.h>
1da177e4
LT
27#include <linux/module.h>
28#include <linux/moduleparam.h>
35faa714 29#include <linux/nmi.h>
0f2fbdcb 30#include <linux/kprobes.h>
8bcc5280 31#include <linux/kexec.h>
b538ed27 32#include <linux/unwind.h>
ab2bf0c1 33#include <linux/uaccess.h>
c31a0bf3 34#include <linux/bug.h>
1da177e4
LT
35
36#include <asm/system.h>
1da177e4
LT
37#include <asm/io.h>
38#include <asm/atomic.h>
39#include <asm/debugreg.h>
40#include <asm/desc.h>
41#include <asm/i387.h>
42#include <asm/kdebug.h>
43#include <asm/processor.h>
b538ed27 44#include <asm/unwind.h>
1da177e4
LT
45#include <asm/smp.h>
46#include <asm/pgalloc.h>
47#include <asm/pda.h>
48#include <asm/proto.h>
49#include <asm/nmi.h>
c0b766f1 50#include <asm/stacktrace.h>
1da177e4 51
1da177e4
LT
52asmlinkage void divide_error(void);
53asmlinkage void debug(void);
54asmlinkage void nmi(void);
55asmlinkage void int3(void);
56asmlinkage void overflow(void);
57asmlinkage void bounds(void);
58asmlinkage void invalid_op(void);
59asmlinkage void device_not_available(void);
60asmlinkage void double_fault(void);
61asmlinkage void coprocessor_segment_overrun(void);
62asmlinkage void invalid_TSS(void);
63asmlinkage void segment_not_present(void);
64asmlinkage void stack_segment(void);
65asmlinkage void general_protection(void);
66asmlinkage void page_fault(void);
67asmlinkage void coprocessor_error(void);
68asmlinkage void simd_coprocessor_error(void);
69asmlinkage void reserved(void);
70asmlinkage void alignment_check(void);
71asmlinkage void machine_check(void);
72asmlinkage void spurious_interrupt_bug(void);
1da177e4 73
e041c683 74ATOMIC_NOTIFIER_HEAD(die_chain);
2ee60e17 75EXPORT_SYMBOL(die_chain);
1da177e4
LT
76
77int register_die_notifier(struct notifier_block *nb)
78{
8c914cb7 79 vmalloc_sync_all();
e041c683
AS
80 return atomic_notifier_chain_register(&die_chain, nb);
81}
1454aed9 82EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
e041c683
AS
83
84int unregister_die_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_unregister(&die_chain, nb);
1da177e4 87}
1454aed9 88EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
1da177e4
LT
89
90static inline void conditional_sti(struct pt_regs *regs)
91{
92 if (regs->eflags & X86_EFLAGS_IF)
93 local_irq_enable();
94}
95
a65d17c9
JB
96static inline void preempt_conditional_sti(struct pt_regs *regs)
97{
98 preempt_disable();
99 if (regs->eflags & X86_EFLAGS_IF)
100 local_irq_enable();
101}
102
103static inline void preempt_conditional_cli(struct pt_regs *regs)
104{
105 if (regs->eflags & X86_EFLAGS_IF)
106 local_irq_disable();
40e59a61
AK
107 /* Make sure to not schedule here because we could be running
108 on an exception stack. */
a65d17c9
JB
109 preempt_enable_no_resched();
110}
111
0741f4d2 112int kstack_depth_to_print = 12;
ea424055 113#ifdef CONFIG_STACK_UNWIND
c33bd9aa 114static int call_trace = 1;
ea424055
JB
115#else
116#define call_trace (-1)
117#endif
1da177e4
LT
118
119#ifdef CONFIG_KALLSYMS
3ac94932
IM
120void printk_address(unsigned long address)
121{
1da177e4
LT
122 unsigned long offset = 0, symsize;
123 const char *symname;
124 char *modname;
3ac94932 125 char *delim = ":";
1da177e4
LT
126 char namebuf[128];
127
3ac94932
IM
128 symname = kallsyms_lookup(address, &symsize, &offset,
129 &modname, namebuf);
130 if (!symname) {
131 printk(" [<%016lx>]\n", address);
132 return;
133 }
134 if (!modname)
1da177e4 135 modname = delim = "";
3ac94932
IM
136 printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
137 address, delim, modname, delim, symname, offset, symsize);
138}
1da177e4 139#else
3ac94932
IM
140void printk_address(unsigned long address)
141{
142 printk(" [<%016lx>]\n", address);
143}
1da177e4
LT
144#endif
145
0a658002 146static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
c0b766f1 147 unsigned *usedp, char **idp)
0a658002 148{
b556b35e 149 static char ids[][8] = {
0a658002
AK
150 [DEBUG_STACK - 1] = "#DB",
151 [NMI_STACK - 1] = "NMI",
152 [DOUBLEFAULT_STACK - 1] = "#DF",
153 [STACKFAULT_STACK - 1] = "#SS",
154 [MCE_STACK - 1] = "#MC",
b556b35e
JB
155#if DEBUG_STKSZ > EXCEPTION_STKSZ
156 [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
157#endif
0a658002
AK
158 };
159 unsigned k;
1da177e4 160
c9ca1ba5
IM
161 /*
162 * Iterate over all exception stacks, and figure out whether
163 * 'stack' is in one of them:
164 */
0a658002 165 for (k = 0; k < N_EXCEPTION_STACKS; k++) {
f5741644 166 unsigned long end = per_cpu(orig_ist, cpu).ist[k];
c9ca1ba5
IM
167 /*
168 * Is 'stack' above this exception frame's end?
169 * If yes then skip to the next frame.
170 */
0a658002
AK
171 if (stack >= end)
172 continue;
c9ca1ba5
IM
173 /*
174 * Is 'stack' above this exception frame's start address?
175 * If yes then we found the right frame.
176 */
0a658002 177 if (stack >= end - EXCEPTION_STKSZ) {
c9ca1ba5
IM
178 /*
179 * Make sure we only iterate through an exception
180 * stack once. If it comes up for the second time
181 * then there's something wrong going on - just
182 * break out and return NULL:
183 */
0a658002
AK
184 if (*usedp & (1U << k))
185 break;
186 *usedp |= 1U << k;
187 *idp = ids[k];
188 return (unsigned long *)end;
189 }
c9ca1ba5
IM
190 /*
191 * If this is a debug stack, and if it has a larger size than
192 * the usual exception stacks, then 'stack' might still
193 * be within the lower portion of the debug stack:
194 */
b556b35e
JB
195#if DEBUG_STKSZ > EXCEPTION_STKSZ
196 if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
197 unsigned j = N_EXCEPTION_STACKS - 1;
198
c9ca1ba5
IM
199 /*
200 * Black magic. A large debug stack is composed of
201 * multiple exception stack entries, which we
202 * iterate through now. Dont look:
203 */
b556b35e
JB
204 do {
205 ++j;
206 end -= EXCEPTION_STKSZ;
207 ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
208 } while (stack < end - EXCEPTION_STKSZ);
209 if (*usedp & (1U << j))
210 break;
211 *usedp |= 1U << j;
212 *idp = ids[j];
213 return (unsigned long *)end;
214 }
215#endif
1da177e4
LT
216 }
217 return NULL;
0a658002 218}
1da177e4 219
c0b766f1
AK
220struct ops_and_data {
221 struct stacktrace_ops *ops;
222 void *data;
223};
224
225static int dump_trace_unwind(struct unwind_frame_info *info, void *context)
b538ed27 226{
c0b766f1 227 struct ops_and_data *oad = (struct ops_and_data *)context;
3ac94932 228 int n = 0;
359ad0d4 229 unsigned long sp = UNW_SP(info);
b538ed27 230
359ad0d4
JB
231 if (arch_unw_user_mode(info))
232 return -1;
b538ed27 233 while (unwind(info) == 0 && UNW_PC(info)) {
3ac94932 234 n++;
c0b766f1 235 oad->ops->address(oad->data, UNW_PC(info));
b538ed27
JB
236 if (arch_unw_user_mode(info))
237 break;
359ad0d4
JB
238 if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
239 && sp > UNW_SP(info))
240 break;
241 sp = UNW_SP(info);
b538ed27 242 }
c33bd9aa 243 return n;
b538ed27
JB
244}
245
b615ebda
AK
246#define MSG(txt) ops->warning(data, txt)
247
1da177e4
LT
248/*
249 * x86-64 can have upto three kernel stacks:
250 * process stack
251 * interrupt stack
0a658002 252 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
1da177e4
LT
253 */
254
c547c77e
AK
255static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
256{
257 void *t = (void *)tinfo;
258 return p > t && p < t + THREAD_SIZE - 3;
259}
260
b615ebda
AK
261void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
262 unsigned long *stack,
c0b766f1 263 struct stacktrace_ops *ops, void *data)
1da177e4 264{
da68933e 265 const unsigned cpu = get_cpu();
b615ebda 266 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
0a658002 267 unsigned used = 0;
c547c77e 268 struct thread_info *tinfo;
1da177e4 269
b538ed27
JB
270 if (!tsk)
271 tsk = current;
272
c33bd9aa
JB
273 if (call_trace >= 0) {
274 int unw_ret = 0;
275 struct unwind_frame_info info;
c0b766f1 276 struct ops_and_data oad = { .ops = ops, .data = data };
c33bd9aa
JB
277
278 if (regs) {
279 if (unwind_init_frame_info(&info, tsk, regs) == 0)
c0b766f1 280 unw_ret = dump_trace_unwind(&info, &oad);
c33bd9aa 281 } else if (tsk == current)
b615ebda
AK
282 unw_ret = unwind_init_running(&info, dump_trace_unwind,
283 &oad);
c33bd9aa
JB
284 else {
285 if (unwind_init_blocked(&info, tsk) == 0)
c0b766f1 286 unw_ret = dump_trace_unwind(&info, &oad);
b538ed27 287 }
ea424055
JB
288 if (unw_ret > 0) {
289 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
b615ebda 290 ops->warning_symbol(data,
dd315df1 291 "DWARF2 unwinder stuck at %s",
ea424055
JB
292 UNW_PC(&info));
293 if ((long)UNW_SP(&info) < 0) {
b615ebda 294 MSG("Leftover inexact backtrace:");
ea424055 295 stack = (unsigned long *)UNW_SP(&info);
be7a9170 296 if (!stack)
da68933e 297 goto out;
ea424055 298 } else
dd315df1 299 MSG("Full inexact backtrace again:");
ea424055 300 } else if (call_trace >= 1)
da68933e 301 goto out;
b13761ec 302 else
dd315df1 303 MSG("Full inexact backtrace again:");
ea424055 304 } else
dd315df1 305 MSG("Inexact backtrace:");
c0b766f1
AK
306 }
307 if (!stack) {
308 unsigned long dummy;
309 stack = &dummy;
310 if (tsk && tsk != current)
311 stack = (unsigned long *)tsk->thread.rsp;
b538ed27
JB
312 }
313
c9ca1ba5
IM
314 /*
315 * Print function call entries within a stack. 'cond' is the
316 * "end of stackframe" condition, that the 'stack++'
317 * iteration will eventually trigger.
318 */
0a658002
AK
319#define HANDLE_STACK(cond) \
320 do while (cond) { \
1b2f6304 321 unsigned long addr = *stack++; \
446f713b
AK
322 /* Use unlocked access here because except for NMIs \
323 we should be already protected against module unloads */ \
324 if (__kernel_text_address(addr)) { \
0a658002
AK
325 /* \
326 * If the address is either in the text segment of the \
327 * kernel, or in the region which contains vmalloc'ed \
328 * memory, it *may* be the address of a calling \
329 * routine; if so, print it so that someone tracing \
330 * down the cause of the crash will be able to figure \
331 * out the call path that was taken. \
332 */ \
c0b766f1 333 ops->address(data, addr); \
0a658002
AK
334 } \
335 } while (0)
336
c9ca1ba5
IM
337 /*
338 * Print function call entries in all stacks, starting at the
339 * current stack address. If the stacks consist of nested
340 * exceptions
341 */
c0b766f1
AK
342 for (;;) {
343 char *id;
0a658002
AK
344 unsigned long *estack_end;
345 estack_end = in_exception_stack(cpu, (unsigned long)stack,
346 &used, &id);
347
348 if (estack_end) {
c0b766f1
AK
349 if (ops->stack(data, id) < 0)
350 break;
0a658002 351 HANDLE_STACK (stack < estack_end);
c0b766f1 352 ops->stack(data, "<EOE>");
c9ca1ba5
IM
353 /*
354 * We link to the next stack via the
355 * second-to-last pointer (index -2 to end) in the
356 * exception stack:
357 */
0a658002
AK
358 stack = (unsigned long *) estack_end[-2];
359 continue;
1da177e4 360 }
0a658002
AK
361 if (irqstack_end) {
362 unsigned long *irqstack;
363 irqstack = irqstack_end -
364 (IRQSTACKSIZE - 64) / sizeof(*irqstack);
365
366 if (stack >= irqstack && stack < irqstack_end) {
c0b766f1
AK
367 if (ops->stack(data, "IRQ") < 0)
368 break;
0a658002 369 HANDLE_STACK (stack < irqstack_end);
c9ca1ba5
IM
370 /*
371 * We link to the next stack (which would be
372 * the process stack normally) the last
373 * pointer (index -1 to end) in the IRQ stack:
374 */
0a658002
AK
375 stack = (unsigned long *) (irqstack_end[-1]);
376 irqstack_end = NULL;
c0b766f1 377 ops->stack(data, "EOI");
0a658002 378 continue;
1da177e4 379 }
1da177e4 380 }
0a658002 381 break;
1da177e4 382 }
0a658002 383
c9ca1ba5 384 /*
c0b766f1 385 * This handles the process stack:
c9ca1ba5 386 */
c547c77e
AK
387 tinfo = current_thread_info();
388 HANDLE_STACK (valid_stack_ptr(tinfo, stack));
0a658002 389#undef HANDLE_STACK
da68933e
AM
390out:
391 put_cpu();
c0b766f1
AK
392}
393EXPORT_SYMBOL(dump_trace);
394
395static void
396print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
397{
398 print_symbol(msg, symbol);
399 printk("\n");
400}
401
402static void print_trace_warning(void *data, char *msg)
403{
404 printk("%s\n", msg);
405}
406
407static int print_trace_stack(void *data, char *name)
408{
409 printk(" <%s> ", name);
410 return 0;
411}
3ac94932 412
c0b766f1
AK
413static void print_trace_address(void *data, unsigned long addr)
414{
415 printk_address(addr);
416}
417
418static struct stacktrace_ops print_trace_ops = {
419 .warning = print_trace_warning,
420 .warning_symbol = print_trace_warning_symbol,
421 .stack = print_trace_stack,
422 .address = print_trace_address,
423};
424
425void
426show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long *stack)
427{
428 printk("\nCall Trace:\n");
429 dump_trace(tsk, regs, stack, &print_trace_ops, NULL);
1da177e4
LT
430 printk("\n");
431}
432
c0b766f1
AK
433static void
434_show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *rsp)
1da177e4
LT
435{
436 unsigned long *stack;
437 int i;
151f8cc1 438 const int cpu = smp_processor_id();
df79efde
RT
439 unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
440 unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
1da177e4
LT
441
442 // debugging aid: "show_stack(NULL, NULL);" prints the
443 // back trace for this cpu.
444
445 if (rsp == NULL) {
446 if (tsk)
447 rsp = (unsigned long *)tsk->thread.rsp;
448 else
449 rsp = (unsigned long *)&rsp;
450 }
451
452 stack = rsp;
453 for(i=0; i < kstack_depth_to_print; i++) {
454 if (stack >= irqstack && stack <= irqstack_end) {
455 if (stack == irqstack_end) {
456 stack = (unsigned long *) (irqstack_end[-1]);
457 printk(" <EOI> ");
458 }
459 } else {
460 if (((long) stack & (THREAD_SIZE-1)) == 0)
461 break;
462 }
463 if (i && ((i % 4) == 0))
3ac94932
IM
464 printk("\n");
465 printk(" %016lx", *stack++);
35faa714 466 touch_nmi_watchdog();
1da177e4 467 }
b538ed27
JB
468 show_trace(tsk, regs, rsp);
469}
470
471void show_stack(struct task_struct *tsk, unsigned long * rsp)
472{
473 _show_stack(tsk, NULL, rsp);
1da177e4
LT
474}
475
476/*
477 * The architecture-independent dump_stack generator
478 */
479void dump_stack(void)
480{
481 unsigned long dummy;
b538ed27 482 show_trace(NULL, NULL, &dummy);
1da177e4
LT
483}
484
485EXPORT_SYMBOL(dump_stack);
486
487void show_registers(struct pt_regs *regs)
488{
489 int i;
76381fee 490 int in_kernel = !user_mode(regs);
1da177e4 491 unsigned long rsp;
151f8cc1 492 const int cpu = smp_processor_id();
df79efde 493 struct task_struct *cur = cpu_pda(cpu)->pcurrent;
1da177e4
LT
494
495 rsp = regs->rsp;
496
497 printk("CPU %d ", cpu);
498 __show_regs(regs);
499 printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
e4f17c43 500 cur->comm, cur->pid, task_thread_info(cur), cur);
1da177e4
LT
501
502 /*
503 * When in-kernel, we also print out the stack and code at the
504 * time of the fault..
505 */
506 if (in_kernel) {
507
508 printk("Stack: ");
b538ed27 509 _show_stack(NULL, regs, (unsigned long*)rsp);
1da177e4
LT
510
511 printk("\nCode: ");
2b692a87 512 if (regs->rip < PAGE_OFFSET)
1da177e4
LT
513 goto bad;
514
2b692a87 515 for (i=0; i<20; i++) {
1da177e4 516 unsigned char c;
2b692a87 517 if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
1da177e4
LT
518bad:
519 printk(" Bad RIP value.");
520 break;
521 }
522 printk("%02x ", c);
523 }
524 }
525 printk("\n");
526}
527
c31a0bf3
JF
528int is_valid_bugaddr(unsigned long rip)
529{
530 unsigned short ud2;
531
532 if (__copy_from_user(&ud2, (const void __user *) rip, sizeof(ud2)))
533 return 0;
534
535 return ud2 == 0x0b0f;
536}
1da177e4 537
4f60fdf6 538#ifdef CONFIG_BUG
1da177e4
LT
539void out_of_line_bug(void)
540{
541 BUG();
542}
2ee60e17 543EXPORT_SYMBOL(out_of_line_bug);
4f60fdf6 544#endif
1da177e4
LT
545
546static DEFINE_SPINLOCK(die_lock);
547static int die_owner = -1;
cdc60a4c 548static unsigned int die_nest_count;
1da177e4 549
eddb6fb9 550unsigned __kprobes long oops_begin(void)
1da177e4 551{
151f8cc1 552 int cpu = smp_processor_id();
1209140c
JB
553 unsigned long flags;
554
abf0f109
AM
555 oops_enter();
556
1209140c
JB
557 /* racy, but better than risking deadlock. */
558 local_irq_save(flags);
1da177e4
LT
559 if (!spin_trylock(&die_lock)) {
560 if (cpu == die_owner)
561 /* nested oops. should stop eventually */;
562 else
1209140c 563 spin_lock(&die_lock);
1da177e4 564 }
cdc60a4c 565 die_nest_count++;
1209140c 566 die_owner = cpu;
1da177e4 567 console_verbose();
1209140c
JB
568 bust_spinlocks(1);
569 return flags;
1da177e4
LT
570}
571
eddb6fb9 572void __kprobes oops_end(unsigned long flags)
1da177e4
LT
573{
574 die_owner = -1;
1209140c 575 bust_spinlocks(0);
cdc60a4c
CM
576 die_nest_count--;
577 if (die_nest_count)
578 /* We still own the lock */
579 local_irq_restore(flags);
580 else
581 /* Nest count reaches zero, release the lock. */
582 spin_unlock_irqrestore(&die_lock, flags);
1da177e4 583 if (panic_on_oops)
012c437d 584 panic("Fatal exception");
abf0f109 585 oops_exit();
1209140c 586}
1da177e4 587
eddb6fb9 588void __kprobes __die(const char * str, struct pt_regs * regs, long err)
1da177e4
LT
589{
590 static int die_counter;
591 printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
592#ifdef CONFIG_PREEMPT
593 printk("PREEMPT ");
594#endif
595#ifdef CONFIG_SMP
596 printk("SMP ");
597#endif
598#ifdef CONFIG_DEBUG_PAGEALLOC
599 printk("DEBUG_PAGEALLOC");
600#endif
601 printk("\n");
6e3f3617 602 notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
1da177e4
LT
603 show_registers(regs);
604 /* Executive summary in case the oops scrolled away */
605 printk(KERN_ALERT "RIP ");
606 printk_address(regs->rip);
607 printk(" RSP <%016lx>\n", regs->rsp);
8bcc5280
VG
608 if (kexec_should_crash(current))
609 crash_kexec(regs);
1da177e4
LT
610}
611
612void die(const char * str, struct pt_regs * regs, long err)
613{
1209140c
JB
614 unsigned long flags = oops_begin();
615
c31a0bf3
JF
616 if (!user_mode(regs))
617 report_bug(regs->rip);
618
1da177e4 619 __die(str, regs, err);
1209140c 620 oops_end(flags);
1da177e4
LT
621 do_exit(SIGSEGV);
622}
1da177e4 623
fac58550 624void __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
1da177e4 625{
1209140c
JB
626 unsigned long flags = oops_begin();
627
1da177e4
LT
628 /*
629 * We are in trouble anyway, lets at least try
630 * to get a message out.
631 */
151f8cc1 632 printk(str, smp_processor_id());
1da177e4 633 show_registers(regs);
8bcc5280
VG
634 if (kexec_should_crash(current))
635 crash_kexec(regs);
fac58550
AK
636 if (do_panic || panic_on_oops)
637 panic("Non maskable interrupt");
1209140c 638 oops_end(flags);
8b1ffe95
CM
639 nmi_exit();
640 local_irq_enable();
1da177e4
LT
641 do_exit(SIGSEGV);
642}
643
0f2fbdcb
PP
644static void __kprobes do_trap(int trapnr, int signr, char *str,
645 struct pt_regs * regs, long error_code,
646 siginfo_t *info)
1da177e4 647{
6e3f3617
JB
648 struct task_struct *tsk = current;
649
6e3f3617
JB
650 tsk->thread.error_code = error_code;
651 tsk->thread.trap_no = trapnr;
1da177e4 652
6e3f3617 653 if (user_mode(regs)) {
1da177e4
LT
654 if (exception_trace && unhandled_signal(tsk, signr))
655 printk(KERN_INFO
656 "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
657 tsk->comm, tsk->pid, str,
2b692a87 658 regs->rip, regs->rsp, error_code);
1da177e4 659
1da177e4
LT
660 if (info)
661 force_sig_info(signr, info, tsk);
662 else
663 force_sig(signr, tsk);
664 return;
665 }
666
667
668 /* kernel trap */
669 {
670 const struct exception_table_entry *fixup;
671 fixup = search_exception_tables(regs->rip);
2b692a87 672 if (fixup)
1da177e4 673 regs->rip = fixup->fixup;
2b692a87 674 else
1da177e4
LT
675 die(str, regs, error_code);
676 return;
677 }
678}
679
680#define DO_ERROR(trapnr, signr, str, name) \
681asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
682{ \
683 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
684 == NOTIFY_STOP) \
685 return; \
40e59a61 686 conditional_sti(regs); \
1da177e4
LT
687 do_trap(trapnr, signr, str, regs, error_code, NULL); \
688}
689
690#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
691asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
692{ \
693 siginfo_t info; \
694 info.si_signo = signr; \
695 info.si_errno = 0; \
696 info.si_code = sicode; \
697 info.si_addr = (void __user *)siaddr; \
698 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
699 == NOTIFY_STOP) \
700 return; \
40e59a61 701 conditional_sti(regs); \
1da177e4
LT
702 do_trap(trapnr, signr, str, regs, error_code, &info); \
703}
704
705DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
706DO_ERROR( 4, SIGSEGV, "overflow", overflow)
707DO_ERROR( 5, SIGSEGV, "bounds", bounds)
100c0e36 708DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
1da177e4
LT
709DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
710DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
711DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
712DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
713DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
714DO_ERROR(18, SIGSEGV, "reserved", reserved)
40e59a61
AK
715
716/* Runs on IST stack */
717asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
718{
719 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
720 12, SIGBUS) == NOTIFY_STOP)
721 return;
722 preempt_conditional_sti(regs);
723 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
724 preempt_conditional_cli(regs);
725}
eca37c18
JB
726
727asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
728{
729 static const char str[] = "double fault";
730 struct task_struct *tsk = current;
731
732 /* Return not checked because double check cannot be ignored */
733 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
734
735 tsk->thread.error_code = error_code;
736 tsk->thread.trap_no = 8;
737
738 /* This is always a kernel trap and never fixable (and thus must
739 never return). */
740 for (;;)
741 die(str, regs, error_code);
742}
1da177e4 743
0f2fbdcb
PP
744asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
745 long error_code)
1da177e4 746{
6e3f3617
JB
747 struct task_struct *tsk = current;
748
1da177e4
LT
749 conditional_sti(regs);
750
6e3f3617
JB
751 tsk->thread.error_code = error_code;
752 tsk->thread.trap_no = 13;
1da177e4 753
6e3f3617 754 if (user_mode(regs)) {
1da177e4
LT
755 if (exception_trace && unhandled_signal(tsk, SIGSEGV))
756 printk(KERN_INFO
757 "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
758 tsk->comm, tsk->pid,
2b692a87 759 regs->rip, regs->rsp, error_code);
1da177e4 760
1da177e4
LT
761 force_sig(SIGSEGV, tsk);
762 return;
763 }
764
765 /* kernel gp */
766 {
767 const struct exception_table_entry *fixup;
768 fixup = search_exception_tables(regs->rip);
769 if (fixup) {
770 regs->rip = fixup->fixup;
771 return;
772 }
773 if (notify_die(DIE_GPF, "general protection fault", regs,
774 error_code, 13, SIGSEGV) == NOTIFY_STOP)
775 return;
776 die("general protection fault", regs, error_code);
777 }
778}
779
eddb6fb9
AK
780static __kprobes void
781mem_parity_error(unsigned char reason, struct pt_regs * regs)
1da177e4 782{
c41c5cd3
DZ
783 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
784 reason);
9c5f8be4 785 printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
c41c5cd3 786
8da5adda 787 if (panic_on_unrecovered_nmi)
c41c5cd3
DZ
788 panic("NMI: Not continuing");
789
790 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
791
792 /* Clear and disable the memory parity error line. */
793 reason = (reason & 0xf) | 4;
794 outb(reason, 0x61);
795}
796
eddb6fb9
AK
797static __kprobes void
798io_check_error(unsigned char reason, struct pt_regs * regs)
1da177e4
LT
799{
800 printk("NMI: IOCK error (debug interrupt?)\n");
801 show_registers(regs);
802
803 /* Re-enable the IOCK line, wait for a few seconds */
804 reason = (reason & 0xf) | 8;
805 outb(reason, 0x61);
806 mdelay(2000);
807 reason &= ~8;
808 outb(reason, 0x61);
809}
810
eddb6fb9
AK
811static __kprobes void
812unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
c41c5cd3
DZ
813{
814 printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x.\n",
815 reason);
816 printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
8da5adda
DZ
817
818 if (panic_on_unrecovered_nmi)
c41c5cd3 819 panic("NMI: Not continuing");
8da5adda 820
c41c5cd3 821 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
1da177e4
LT
822}
823
6fefb0d1
AK
824/* Runs on IST stack. This code must keep interrupts off all the time.
825 Nested NMIs are prevented by the CPU. */
eddb6fb9 826asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
1da177e4
LT
827{
828 unsigned char reason = 0;
76e4f660
AR
829 int cpu;
830
831 cpu = smp_processor_id();
1da177e4
LT
832
833 /* Only the BSP gets external NMIs from the system. */
76e4f660 834 if (!cpu)
1da177e4
LT
835 reason = get_nmi_reason();
836
837 if (!(reason & 0xc0)) {
6e3f3617 838 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
1da177e4
LT
839 == NOTIFY_STOP)
840 return;
1da177e4
LT
841 /*
842 * Ok, so this is none of the documented NMI sources,
843 * so it must be the NMI watchdog.
844 */
3adbbcce 845 if (nmi_watchdog_tick(regs,reason))
1da177e4 846 return;
3adbbcce 847 if (!do_nmi_callback(regs,cpu))
3adbbcce
DZ
848 unknown_nmi_error(reason, regs);
849
1da177e4
LT
850 return;
851 }
6e3f3617 852 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
1da177e4
LT
853 return;
854
855 /* AK: following checks seem to be broken on modern chipsets. FIXME */
856
857 if (reason & 0x80)
858 mem_parity_error(reason, regs);
859 if (reason & 0x40)
860 io_check_error(reason, regs);
861}
862
b556b35e 863/* runs on IST stack. */
0f2fbdcb 864asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
1da177e4
LT
865{
866 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
867 return;
868 }
40e59a61 869 preempt_conditional_sti(regs);
1da177e4 870 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
40e59a61 871 preempt_conditional_cli(regs);
1da177e4
LT
872}
873
6fefb0d1
AK
874/* Help handler running on IST stack to switch back to user stack
875 for scheduling or signal handling. The actual stack switch is done in
876 entry.S */
eddb6fb9 877asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
6fefb0d1
AK
878{
879 struct pt_regs *regs = eregs;
880 /* Did already sync */
881 if (eregs == (struct pt_regs *)eregs->rsp)
882 ;
883 /* Exception from user space */
76381fee 884 else if (user_mode(eregs))
bb049232 885 regs = task_pt_regs(current);
6fefb0d1
AK
886 /* Exception from kernel and interrupts are enabled. Move to
887 kernel process stack. */
888 else if (eregs->eflags & X86_EFLAGS_IF)
889 regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
890 if (eregs != regs)
891 *regs = *eregs;
892 return regs;
893}
894
1da177e4 895/* runs on IST stack. */
0f2fbdcb
PP
896asmlinkage void __kprobes do_debug(struct pt_regs * regs,
897 unsigned long error_code)
1da177e4 898{
1da177e4
LT
899 unsigned long condition;
900 struct task_struct *tsk = current;
901 siginfo_t info;
902
e9129e56 903 get_debugreg(condition, 6);
1da177e4
LT
904
905 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
daeeafec 906 SIGTRAP) == NOTIFY_STOP)
6fefb0d1 907 return;
daeeafec 908
a65d17c9 909 preempt_conditional_sti(regs);
1da177e4
LT
910
911 /* Mask out spurious debug traps due to lazy DR7 setting */
912 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
913 if (!tsk->thread.debugreg7) {
914 goto clear_dr7;
915 }
916 }
917
918 tsk->thread.debugreg6 = condition;
919
920 /* Mask out spurious TF errors due to lazy TF clearing */
daeeafec 921 if (condition & DR_STEP) {
1da177e4
LT
922 /*
923 * The TF error should be masked out only if the current
924 * process is not traced and if the TRAP flag has been set
925 * previously by a tracing process (condition detected by
926 * the PT_DTRACE flag); remember that the i386 TRAP flag
927 * can be modified by the process itself in user mode,
928 * allowing programs to debug themselves without the ptrace()
929 * interface.
930 */
76381fee 931 if (!user_mode(regs))
1da177e4 932 goto clear_TF_reenable;
be61bff7
AK
933 /*
934 * Was the TF flag set by a debugger? If so, clear it now,
935 * so that register information is correct.
936 */
937 if (tsk->ptrace & PT_DTRACE) {
938 regs->eflags &= ~TF_MASK;
939 tsk->ptrace &= ~PT_DTRACE;
940 }
1da177e4
LT
941 }
942
943 /* Ok, finally something we can handle */
944 tsk->thread.trap_no = 1;
945 tsk->thread.error_code = error_code;
946 info.si_signo = SIGTRAP;
947 info.si_errno = 0;
948 info.si_code = TRAP_BRKPT;
01b8faae
JB
949 info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
950 force_sig_info(SIGTRAP, &info, tsk);
1da177e4 951
1da177e4 952clear_dr7:
e9129e56 953 set_debugreg(0UL, 7);
a65d17c9 954 preempt_conditional_cli(regs);
6fefb0d1 955 return;
1da177e4
LT
956
957clear_TF_reenable:
958 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
1da177e4 959 regs->eflags &= ~TF_MASK;
a65d17c9 960 preempt_conditional_cli(regs);
1da177e4
LT
961}
962
6e3f3617 963static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
1da177e4
LT
964{
965 const struct exception_table_entry *fixup;
966 fixup = search_exception_tables(regs->rip);
967 if (fixup) {
968 regs->rip = fixup->fixup;
969 return 1;
970 }
6e3f3617 971 notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
3a848f63 972 /* Illegal floating point operation in the kernel */
6e3f3617 973 current->thread.trap_no = trapnr;
1da177e4 974 die(str, regs, 0);
1da177e4
LT
975 return 0;
976}
977
978/*
979 * Note that we play around with the 'TS' bit in an attempt to get
980 * the correct behaviour even in the presence of the asynchronous
981 * IRQ13 behaviour
982 */
983asmlinkage void do_coprocessor_error(struct pt_regs *regs)
984{
985 void __user *rip = (void __user *)(regs->rip);
986 struct task_struct * task;
987 siginfo_t info;
988 unsigned short cwd, swd;
989
990 conditional_sti(regs);
76381fee 991 if (!user_mode(regs) &&
6e3f3617 992 kernel_math_error(regs, "kernel x87 math error", 16))
1da177e4
LT
993 return;
994
995 /*
996 * Save the info for the exception handler and clear the error.
997 */
998 task = current;
999 save_init_fpu(task);
1000 task->thread.trap_no = 16;
1001 task->thread.error_code = 0;
1002 info.si_signo = SIGFPE;
1003 info.si_errno = 0;
1004 info.si_code = __SI_FAULT;
1005 info.si_addr = rip;
1006 /*
1007 * (~cwd & swd) will mask out exceptions that are not set to unmasked
1008 * status. 0x3f is the exception bits in these regs, 0x200 is the
1009 * C1 reg you need in case of a stack fault, 0x040 is the stack
1010 * fault bit. We should only be taking one exception at a time,
1011 * so if this combination doesn't produce any single exception,
1012 * then we have a bad program that isn't synchronizing its FPU usage
1013 * and it will suffer the consequences since we won't be able to
1014 * fully reproduce the context of the exception
1015 */
1016 cwd = get_fpu_cwd(task);
1017 swd = get_fpu_swd(task);
ff347b22 1018 switch (swd & ~cwd & 0x3f) {
1da177e4
LT
1019 case 0x000:
1020 default:
1021 break;
1022 case 0x001: /* Invalid Op */
ff347b22
CE
1023 /*
1024 * swd & 0x240 == 0x040: Stack Underflow
1025 * swd & 0x240 == 0x240: Stack Overflow
1026 * User must clear the SF bit (0x40) if set
1027 */
1da177e4
LT
1028 info.si_code = FPE_FLTINV;
1029 break;
1030 case 0x002: /* Denormalize */
1031 case 0x010: /* Underflow */
1032 info.si_code = FPE_FLTUND;
1033 break;
1034 case 0x004: /* Zero Divide */
1035 info.si_code = FPE_FLTDIV;
1036 break;
1037 case 0x008: /* Overflow */
1038 info.si_code = FPE_FLTOVF;
1039 break;
1040 case 0x020: /* Precision */
1041 info.si_code = FPE_FLTRES;
1042 break;
1043 }
1044 force_sig_info(SIGFPE, &info, task);
1045}
1046
1047asmlinkage void bad_intr(void)
1048{
1049 printk("bad interrupt");
1050}
1051
1052asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
1053{
1054 void __user *rip = (void __user *)(regs->rip);
1055 struct task_struct * task;
1056 siginfo_t info;
1057 unsigned short mxcsr;
1058
1059 conditional_sti(regs);
76381fee 1060 if (!user_mode(regs) &&
6e3f3617 1061 kernel_math_error(regs, "kernel simd math error", 19))
1da177e4
LT
1062 return;
1063
1064 /*
1065 * Save the info for the exception handler and clear the error.
1066 */
1067 task = current;
1068 save_init_fpu(task);
1069 task->thread.trap_no = 19;
1070 task->thread.error_code = 0;
1071 info.si_signo = SIGFPE;
1072 info.si_errno = 0;
1073 info.si_code = __SI_FAULT;
1074 info.si_addr = rip;
1075 /*
1076 * The SIMD FPU exceptions are handled a little differently, as there
1077 * is only a single status/control register. Thus, to determine which
1078 * unmasked exception was caught we must mask the exception mask bits
1079 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
1080 */
1081 mxcsr = get_fpu_mxcsr(task);
1082 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
1083 case 0x000:
1084 default:
1085 break;
1086 case 0x001: /* Invalid Op */
1087 info.si_code = FPE_FLTINV;
1088 break;
1089 case 0x002: /* Denormalize */
1090 case 0x010: /* Underflow */
1091 info.si_code = FPE_FLTUND;
1092 break;
1093 case 0x004: /* Zero Divide */
1094 info.si_code = FPE_FLTDIV;
1095 break;
1096 case 0x008: /* Overflow */
1097 info.si_code = FPE_FLTOVF;
1098 break;
1099 case 0x020: /* Precision */
1100 info.si_code = FPE_FLTRES;
1101 break;
1102 }
1103 force_sig_info(SIGFPE, &info, task);
1104}
1105
1106asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
1107{
1108}
1109
1110asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
89b831ef
JS
1111{
1112}
1113
1114asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
1da177e4
LT
1115{
1116}
1117
1118/*
1119 * 'math_state_restore()' saves the current math information in the
1120 * old math state array, and gets the new ones from the current task
1121 *
1122 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1123 * Don't touch unless you *really* know how it works.
1124 */
1125asmlinkage void math_state_restore(void)
1126{
1127 struct task_struct *me = current;
1128 clts(); /* Allow maths ops (or we recurse) */
1129
1130 if (!used_math())
1131 init_fpu(me);
1132 restore_fpu_checking(&me->thread.i387.fxsave);
e4f17c43 1133 task_thread_info(me)->status |= TS_USEDFPU;
e07e23e1 1134 me->fpu_counter++;
1da177e4
LT
1135}
1136
1da177e4
LT
1137void __init trap_init(void)
1138{
1139 set_intr_gate(0,&divide_error);
1140 set_intr_gate_ist(1,&debug,DEBUG_STACK);
1141 set_intr_gate_ist(2,&nmi,NMI_STACK);
b556b35e 1142 set_system_gate_ist(3,&int3,DEBUG_STACK); /* int3 can be called from all */
0a521588
JB
1143 set_system_gate(4,&overflow); /* int4 can be called from all */
1144 set_intr_gate(5,&bounds);
1da177e4
LT
1145 set_intr_gate(6,&invalid_op);
1146 set_intr_gate(7,&device_not_available);
1147 set_intr_gate_ist(8,&double_fault, DOUBLEFAULT_STACK);
1148 set_intr_gate(9,&coprocessor_segment_overrun);
1149 set_intr_gate(10,&invalid_TSS);
1150 set_intr_gate(11,&segment_not_present);
1151 set_intr_gate_ist(12,&stack_segment,STACKFAULT_STACK);
1152 set_intr_gate(13,&general_protection);
1153 set_intr_gate(14,&page_fault);
1154 set_intr_gate(15,&spurious_interrupt_bug);
1155 set_intr_gate(16,&coprocessor_error);
1156 set_intr_gate(17,&alignment_check);
1157#ifdef CONFIG_X86_MCE
1158 set_intr_gate_ist(18,&machine_check, MCE_STACK);
1159#endif
1160 set_intr_gate(19,&simd_coprocessor_error);
1161
1162#ifdef CONFIG_IA32_EMULATION
1163 set_system_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
1164#endif
1165
1da177e4
LT
1166 /*
1167 * Should be a barrier for any external CPU state.
1168 */
1169 cpu_init();
1170}
1171
1172
2c8c0e6b 1173static int __init oops_setup(char *s)
1da177e4 1174{
2c8c0e6b
AK
1175 if (!s)
1176 return -EINVAL;
1177 if (!strcmp(s, "panic"))
1178 panic_on_oops = 1;
1179 return 0;
1da177e4 1180}
2c8c0e6b 1181early_param("oops", oops_setup);
1da177e4
LT
1182
1183static int __init kstack_setup(char *s)
1184{
2c8c0e6b
AK
1185 if (!s)
1186 return -EINVAL;
1da177e4 1187 kstack_depth_to_print = simple_strtoul(s,NULL,0);
2c8c0e6b 1188 return 0;
1da177e4 1189}
2c8c0e6b 1190early_param("kstack", kstack_setup);
1da177e4 1191
ea424055 1192#ifdef CONFIG_STACK_UNWIND
c33bd9aa
JB
1193static int __init call_trace_setup(char *s)
1194{
2c8c0e6b
AK
1195 if (!s)
1196 return -EINVAL;
c33bd9aa
JB
1197 if (strcmp(s, "old") == 0)
1198 call_trace = -1;
1199 else if (strcmp(s, "both") == 0)
1200 call_trace = 0;
b13761ec 1201 else if (strcmp(s, "newfallback") == 0)
c33bd9aa 1202 call_trace = 1;
b13761ec
AK
1203 else if (strcmp(s, "new") == 0)
1204 call_trace = 2;
2c8c0e6b 1205 return 0;
c33bd9aa 1206}
2c8c0e6b 1207early_param("call_trace", call_trace_setup);
ea424055 1208#endif