2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * Handle hardware traps and faults.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/timer.h>
34 #include <linux/init.h>
35 #include <linux/bug.h>
36 #include <linux/nmi.h>
38 #include <linux/smp.h>
40 #include <linux/hardirq.h>
41 #include <linux/atomic.h>
43 #include <asm/stacktrace.h>
44 #include <asm/processor.h>
45 #include <asm/debugreg.h>
46 #include <asm/text-patching.h>
47 #include <asm/ftrace.h>
48 #include <asm/traps.h>
50 #include <asm/fpu/internal.h>
52 #include <asm/cpu_entry_area.h>
54 #include <asm/fixmap.h>
55 #include <asm/mach_traps.h>
56 #include <asm/alternative.h>
57 #include <asm/fpu/xstate.h>
61 #include <asm/insn-eval.h>
64 #include <asm/x86_init.h>
65 #include <asm/pgalloc.h>
66 #include <asm/proto.h>
68 #include <asm/processor-flags.h>
69 #include <asm/setup.h>
70 #include <asm/proto.h>
73 DECLARE_BITMAP(system_vectors, NR_VECTORS);
75 static inline void cond_local_irq_enable(struct pt_regs *regs)
77 if (regs->flags & X86_EFLAGS_IF)
81 static inline void cond_local_irq_disable(struct pt_regs *regs)
83 if (regs->flags & X86_EFLAGS_IF)
87 int is_valid_bugaddr(unsigned long addr)
91 if (addr < TASK_SIZE_MAX)
94 if (probe_kernel_address((unsigned short *)addr, ud))
97 return ud == INSN_UD0 || ud == INSN_UD2;
100 int fixup_bug(struct pt_regs *regs, int trapnr)
102 if (trapnr != X86_TRAP_UD)
105 switch (report_bug(regs->ip, regs)) {
106 case BUG_TRAP_TYPE_NONE:
107 case BUG_TRAP_TYPE_BUG:
110 case BUG_TRAP_TYPE_WARN:
118 static nokprobe_inline int
119 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
120 struct pt_regs *regs, long error_code)
122 if (v8086_mode(regs)) {
124 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
125 * On nmi (interrupt 2), do_trap should not be called.
127 if (trapnr < X86_TRAP_UD) {
128 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
132 } else if (!user_mode(regs)) {
133 if (fixup_exception(regs, trapnr, error_code, 0))
136 tsk->thread.error_code = error_code;
137 tsk->thread.trap_nr = trapnr;
138 die(str, regs, error_code);
142 * We want error_code and trap_nr set for userspace faults and
143 * kernelspace faults which result in die(), but not
144 * kernelspace faults which are fixed up. die() gives the
145 * process no chance to handle the signal and notice the
146 * kernel fault information, so that won't result in polluting
147 * the information about previously queued, but not yet
148 * delivered, faults. See also exc_general_protection below.
150 tsk->thread.error_code = error_code;
151 tsk->thread.trap_nr = trapnr;
156 static void show_signal(struct task_struct *tsk, int signr,
157 const char *type, const char *desc,
158 struct pt_regs *regs, long error_code)
160 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
161 printk_ratelimit()) {
162 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
163 tsk->comm, task_pid_nr(tsk), type, desc,
164 regs->ip, regs->sp, error_code);
165 print_vma_addr(KERN_CONT " in ", regs->ip);
171 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
172 long error_code, int sicode, void __user *addr)
174 struct task_struct *tsk = current;
176 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
179 show_signal(tsk, signr, "trap ", str, regs, error_code);
184 force_sig_fault(signr, sicode, addr);
186 NOKPROBE_SYMBOL(do_trap);
188 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
189 unsigned long trapnr, int signr, int sicode, void __user *addr)
191 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
194 * WARN*()s end up here; fix them up before we call the
197 if (!user_mode(regs) && fixup_bug(regs, trapnr))
200 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
202 cond_local_irq_enable(regs);
203 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
204 cond_local_irq_disable(regs);
209 * Posix requires to provide the address of the faulting instruction for
210 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
212 * This address is usually regs->ip, but when an uprobe moved the code out
213 * of line then regs->ip points to the XOL code which would confuse
214 * anything which analyzes the fault address vs. the unmodified binary. If
215 * a trap happened in XOL code then uprobe maps regs->ip back to the
216 * original instruction address.
218 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
220 return (void __user *)uprobe_get_trap_addr(regs);
223 DEFINE_IDTENTRY(exc_divide_error)
225 do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
226 FPE_INTDIV, error_get_trap_addr(regs));
229 DEFINE_IDTENTRY(exc_overflow)
231 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
234 #ifdef CONFIG_X86_F00F_BUG
235 void handle_invalid_op(struct pt_regs *regs)
237 static inline void handle_invalid_op(struct pt_regs *regs)
240 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
241 ILL_ILLOPN, error_get_trap_addr(regs));
244 DEFINE_IDTENTRY(exc_invalid_op)
246 handle_invalid_op(regs);
249 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
251 do_error_trap(regs, 0, "coprocessor segment overrun",
252 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
255 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
257 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
261 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
263 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
267 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
269 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
273 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
275 char *str = "alignment check";
277 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
280 if (!user_mode(regs))
281 die("Split lock detected\n", regs, error_code);
285 if (handle_user_split_lock(regs, error_code))
288 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
289 error_code, BUS_ADRALN, NULL);
292 #ifdef CONFIG_VMAP_STACK
293 __visible void __noreturn handle_stack_overflow(const char *message,
294 struct pt_regs *regs,
295 unsigned long fault_address)
297 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
298 (void *)fault_address, current->stack,
299 (char *)current->stack + THREAD_SIZE - 1);
300 die(message, regs, 0);
302 /* Be absolutely certain we don't return. */
303 panic("%s", message);
308 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
310 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
311 * SDM's warnings about double faults being unrecoverable, returning works as
312 * expected. Presumably what the SDM actually means is that the CPU may get
313 * the register state wrong on entry, so returning could be a bad idea.
315 * Various CPU engineers have promised that double faults due to an IRET fault
316 * while the stack is read-only are, in fact, recoverable.
318 * On x86_32, this is entered through a task gate, and regs are synthesized
319 * from the TSS. Returning is, in principle, okay, but changes to regs will
320 * be lost. If, for some reason, we need to return to a context with modified
321 * regs, the shim code could be adjusted to synchronize the registers.
323 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
324 * to be read before doing anything else.
326 DEFINE_IDTENTRY_DF(exc_double_fault)
328 static const char str[] = "double fault";
329 struct task_struct *tsk = current;
331 #ifdef CONFIG_VMAP_STACK
332 unsigned long address = read_cr2();
335 #ifdef CONFIG_X86_ESPFIX64
336 extern unsigned char native_irq_return_iret[];
339 * If IRET takes a non-IST fault on the espfix64 stack, then we
340 * end up promoting it to a doublefault. In that case, take
341 * advantage of the fact that we're not using the normal (TSS.sp0)
342 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
343 * and then modify our own IRET frame so that, when we return,
344 * we land directly at the #GP(0) vector with the stack already
345 * set up according to its expectations.
347 * The net result is that our #GP handler will think that we
348 * entered from usermode with the bad user context.
350 * No need for nmi_enter() here because we don't use RCU.
352 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
353 regs->cs == __KERNEL_CS &&
354 regs->ip == (unsigned long)native_irq_return_iret)
356 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
357 unsigned long *p = (unsigned long *)regs->sp;
360 * regs->sp points to the failing IRET frame on the
361 * ESPFIX64 stack. Copy it to the entry stack. This fills
362 * in gpregs->ss through gpregs->ip.
367 gpregs->flags = p[2];
370 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
373 * Adjust our frame so that we return straight to the #GP
374 * vector with the expected RSP value. This is safe because
375 * we won't enable interupts or schedule before we invoke
376 * general_protection, so nothing will clobber the stack
377 * frame we just set up.
379 * We will enter general_protection with kernel GSBASE,
380 * which is what the stub expects, given that the faulting
381 * RIP will be the IRET instruction.
383 regs->ip = (unsigned long)asm_exc_general_protection;
384 regs->sp = (unsigned long)&gpregs->orig_ax;
391 instrumentation_begin();
392 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
394 tsk->thread.error_code = error_code;
395 tsk->thread.trap_nr = X86_TRAP_DF;
397 #ifdef CONFIG_VMAP_STACK
399 * If we overflow the stack into a guard page, the CPU will fail
400 * to deliver #PF and will send #DF instead. Similarly, if we
401 * take any non-IST exception while too close to the bottom of
402 * the stack, the processor will get a page fault while
403 * delivering the exception and will generate a double fault.
405 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
406 * Page-Fault Exception (#PF):
408 * Processors update CR2 whenever a page fault is detected. If a
409 * second page fault occurs while an earlier page fault is being
410 * delivered, the faulting linear address of the second fault will
411 * overwrite the contents of CR2 (replacing the previous
412 * address). These updates to CR2 occur even if the page fault
413 * results in a double fault or occurs during the delivery of a
416 * The logic below has a small possibility of incorrectly diagnosing
417 * some errors as stack overflows. For example, if the IDT or GDT
418 * gets corrupted such that #GP delivery fails due to a bad descriptor
419 * causing #GP and we hit this condition while CR2 coincidentally
420 * points to the stack guard page, we'll think we overflowed the
421 * stack. Given that we're going to panic one way or another
422 * if this happens, this isn't necessarily worth fixing.
424 * If necessary, we could improve the test by only diagnosing
425 * a stack overflow if the saved RSP points within 47 bytes of
426 * the bottom of the stack: if RSP == tsk_stack + 48 and we
427 * take an exception, the stack is already aligned and there
428 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
429 * possible error code, so a stack overflow would *not* double
430 * fault. With any less space left, exception delivery could
431 * fail, and, as a practical matter, we've overflowed the
432 * stack even if the actual trigger for the double fault was
435 if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
436 handle_stack_overflow("kernel stack overflow (double-fault)",
441 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
442 die("double fault", regs, error_code);
443 panic("Machine halted.");
444 instrumentation_end();
447 DEFINE_IDTENTRY(exc_bounds)
449 if (notify_die(DIE_TRAP, "bounds", regs, 0,
450 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
452 cond_local_irq_enable(regs);
454 if (!user_mode(regs))
455 die("bounds", regs, 0);
457 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
459 cond_local_irq_disable(regs);
462 enum kernel_gp_hint {
469 * When an uncaught #GP occurs, try to determine the memory address accessed by
470 * the instruction and return that address to the caller. Also, try to figure
471 * out whether any part of the access to that address was non-canonical.
473 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
476 u8 insn_buf[MAX_INSN_SIZE];
479 if (probe_kernel_read(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
482 kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
483 insn_get_modrm(&insn);
486 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
493 * - the operand is not in the kernel half
494 * - the last byte of the operand is not in the user canonical half
496 if (*addr < ~__VIRTUAL_MASK &&
497 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
498 return GP_NON_CANONICAL;
504 #define GPFSTR "general protection fault"
506 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
508 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
509 enum kernel_gp_hint hint = GP_NO_HINT;
510 struct task_struct *tsk;
511 unsigned long gp_addr;
514 cond_local_irq_enable(regs);
516 if (static_cpu_has(X86_FEATURE_UMIP)) {
517 if (user_mode(regs) && fixup_umip_exception(regs))
521 if (v8086_mode(regs)) {
523 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
530 if (user_mode(regs)) {
531 tsk->thread.error_code = error_code;
532 tsk->thread.trap_nr = X86_TRAP_GP;
534 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
539 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
542 tsk->thread.error_code = error_code;
543 tsk->thread.trap_nr = X86_TRAP_GP;
546 * To be potentially processing a kprobe fault and to trust the result
547 * from kprobe_running(), we have to be non-preemptible.
549 if (!preemptible() &&
551 kprobe_fault_handler(regs, X86_TRAP_GP))
554 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
555 if (ret == NOTIFY_STOP)
559 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
561 hint = get_kernel_gp_address(regs, &gp_addr);
563 if (hint != GP_NO_HINT)
564 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
565 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
566 : "maybe for address",
570 * KASAN is interested only in the non-canonical case, clear it
573 if (hint != GP_NON_CANONICAL)
576 die_addr(desc, regs, error_code, gp_addr);
579 cond_local_irq_disable(regs);
582 static bool do_int3(struct pt_regs *regs)
586 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
587 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
588 SIGTRAP) == NOTIFY_STOP)
590 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
592 #ifdef CONFIG_KPROBES
593 if (kprobe_int3_handler(regs))
596 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
598 return res == NOTIFY_STOP;
601 static void do_int3_user(struct pt_regs *regs)
606 cond_local_irq_enable(regs);
607 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
608 cond_local_irq_disable(regs);
611 DEFINE_IDTENTRY_RAW(exc_int3)
614 * poke_int3_handler() is completely self contained code; it does (and
615 * must) *NOT* call out to anything, lest it hits upon yet another
618 if (poke_int3_handler(regs))
622 * idtentry_enter_user() uses static_branch_{,un}likely() and therefore
623 * can trigger INT3, hence poke_int3_handler() must be done
624 * before. If the entry came from kernel mode, then use nmi_enter()
625 * because the INT3 could have been hit in any context including
628 if (user_mode(regs)) {
629 idtentry_enter_user(regs);
630 instrumentation_begin();
632 instrumentation_end();
633 idtentry_exit_user(regs);
636 instrumentation_begin();
637 trace_hardirqs_off_prepare();
639 die("int3", regs, 0);
640 if (regs->flags & X86_EFLAGS_IF)
641 trace_hardirqs_on_prepare();
642 instrumentation_end();
649 * Help handler running on a per-cpu (IST or entry trampoline) stack
650 * to switch to the normal thread stack if the interrupted code was in
651 * user mode. The actual stack switch is done in entry_64.S
653 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
655 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
661 struct bad_iret_stack {
662 void *error_entry_ret;
666 asmlinkage __visible noinstr
667 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
670 * This is called from entry_64.S early in handling a fault
671 * caused by a bad iret to user mode. To handle the fault
672 * correctly, we want to move our stack frame to where it would
673 * be had we entered directly on the entry stack (rather than
674 * just below the IRET frame) and we want to pretend that the
675 * exception came from the IRET target.
677 struct bad_iret_stack tmp, *new_stack =
678 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
680 /* Copy the IRET target to the temporary storage. */
681 memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
683 /* Copy the remainder of the stack from the current stack. */
684 memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
686 /* Update the entry stack */
687 memcpy(new_stack, &tmp, sizeof(tmp));
689 BUG_ON(!user_mode(&new_stack->regs));
694 static bool is_sysenter_singlestep(struct pt_regs *regs)
697 * We don't try for precision here. If we're anywhere in the region of
698 * code that can be single-stepped in the SYSENTER entry path, then
699 * assume that this is a useless single-step trap due to SYSENTER
700 * being invoked with TF set. (We don't know in advance exactly
701 * which instructions will be hit because BTF could plausibly
705 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
706 (unsigned long)__end_SYSENTER_singlestep_region -
707 (unsigned long)__begin_SYSENTER_singlestep_region;
708 #elif defined(CONFIG_IA32_EMULATION)
709 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
710 (unsigned long)__end_entry_SYSENTER_compat -
711 (unsigned long)entry_SYSENTER_compat;
717 static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
720 * Disable breakpoints during exception handling; recursive exceptions
721 * are exceedingly 'fun'.
723 * Since this function is NOKPROBE, and that also applies to
724 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
725 * HW_BREAKPOINT_W on our stack)
727 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
728 * includes the entry stack is excluded for everything.
730 get_debugreg(*dr7, 7);
734 * Ensure the compiler doesn't lower the above statements into
735 * the critical section; disabling breakpoints late would not
741 * The Intel SDM says:
743 * Certain debug exceptions may clear bits 0-3. The remaining
744 * contents of the DR6 register are never cleared by the
745 * processor. To avoid confusion in identifying debug
746 * exceptions, debug handlers should clear the register before
747 * returning to the interrupted task.
749 * Keep it simple: clear DR6 immediately.
751 get_debugreg(*dr6, 6);
753 /* Filter out all the reserved bits which are preset to 1 */
754 *dr6 &= ~DR6_RESERVED;
757 static __always_inline void debug_exit(unsigned long dr7)
760 * Ensure the compiler doesn't raise this statement into
761 * the critical section; enabling breakpoints early would
765 set_debugreg(dr7, 7);
769 * Our handling of the processor debug registers is non-trivial.
770 * We do not clear them on entry and exit from the kernel. Therefore
771 * it is possible to get a watchpoint trap here from inside the kernel.
772 * However, the code in ./ptrace.c has ensured that the user can
773 * only set watchpoints on userspace addresses. Therefore the in-kernel
774 * watchpoint trap can only occur in code which is reading/writing
775 * from user space. Such code must not hold kernel locks (since it
776 * can equally take a page fault), therefore it is safe to call
777 * force_sig_info even though that claims and releases locks.
779 * Code in ./signal.c ensures that the debug control register
780 * is restored before we deliver any signal, and therefore that
781 * user code runs with the correct debug control register even though
784 * Being careful here means that we don't have to be as careful in a
785 * lot of more complicated places (task switching can be a bit lazy
786 * about restoring all the debug state, and ptrace doesn't have to
787 * find every occurrence of the TF bit that could be saved away even
790 * May run on IST stack.
792 static void noinstr handle_debug(struct pt_regs *regs, unsigned long dr6,
795 struct task_struct *tsk = current;
798 /* Store the virtualized DR6 value */
799 tsk->thread.debugreg6 = dr6;
801 instrumentation_begin();
802 #ifdef CONFIG_KPROBES
803 if (kprobe_debug_handler(regs)) {
804 instrumentation_end();
809 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
810 SIGTRAP) == NOTIFY_STOP) {
811 instrumentation_end();
816 * Let others (NMI) know that the debug stack is in use
817 * as we may switch to the interrupt stack.
819 debug_stack_usage_inc();
821 /* It's safe to allow irq's after DR6 has been saved */
822 cond_local_irq_enable(regs);
824 if (v8086_mode(regs)) {
825 handle_vm86_trap((struct kernel_vm86_regs *) regs, 0,
830 if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
832 * Historical junk that used to handle SYSENTER single-stepping.
833 * This should be unreachable now. If we survive for a while
834 * without anyone hitting this warning, we'll turn this into
837 tsk->thread.debugreg6 &= ~DR_STEP;
838 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
839 regs->flags &= ~X86_EFLAGS_TF;
842 si_code = get_si_code(tsk->thread.debugreg6);
843 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
844 send_sigtrap(regs, 0, si_code);
847 cond_local_irq_disable(regs);
848 debug_stack_usage_dec();
849 instrumentation_end();
852 static __always_inline void exc_debug_kernel(struct pt_regs *regs,
856 instrumentation_begin();
857 trace_hardirqs_off_prepare();
858 instrumentation_end();
861 * The SDM says "The processor clears the BTF flag when it
862 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
863 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
865 clear_thread_flag(TIF_BLOCKSTEP);
868 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
869 * watchpoint at the same time then that will still be handled.
871 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
875 * If DR6 is zero, no point in trying to handle it. The kernel is
879 handle_debug(regs, dr6, false);
881 instrumentation_begin();
882 if (regs->flags & X86_EFLAGS_IF)
883 trace_hardirqs_on_prepare();
884 instrumentation_end();
888 static __always_inline void exc_debug_user(struct pt_regs *regs,
891 idtentry_enter_user(regs);
892 clear_thread_flag(TIF_BLOCKSTEP);
895 * If dr6 has no reason to give us about the origin of this trap,
896 * then it's very likely the result of an icebp/int01 trap.
897 * User wants a sigtrap for that.
899 handle_debug(regs, dr6, !dr6);
900 idtentry_exit_user(regs);
904 /* IST stack entry */
905 DEFINE_IDTENTRY_DEBUG(exc_debug)
907 unsigned long dr6, dr7;
909 debug_enter(&dr6, &dr7);
910 exc_debug_kernel(regs, dr6);
914 /* User entry, runs on regular task stack */
915 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
917 unsigned long dr6, dr7;
919 debug_enter(&dr6, &dr7);
920 exc_debug_user(regs, dr6);
924 /* 32 bit does not have separate entry points. */
925 DEFINE_IDTENTRY_DEBUG(exc_debug)
927 unsigned long dr6, dr7;
929 debug_enter(&dr6, &dr7);
932 exc_debug_user(regs, dr6);
934 exc_debug_kernel(regs, dr6);
941 * Note that we play around with the 'TS' bit in an attempt to get
942 * the correct behaviour even in the presence of the asynchronous
945 static void math_error(struct pt_regs *regs, int trapnr)
947 struct task_struct *task = current;
948 struct fpu *fpu = &task->thread.fpu;
950 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
953 cond_local_irq_enable(regs);
955 if (!user_mode(regs)) {
956 if (fixup_exception(regs, trapnr, 0, 0))
959 task->thread.error_code = 0;
960 task->thread.trap_nr = trapnr;
962 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
963 SIGFPE) != NOTIFY_STOP)
969 * Save the info for the exception handler and clear the error.
973 task->thread.trap_nr = trapnr;
974 task->thread.error_code = 0;
976 si_code = fpu__exception_code(fpu, trapnr);
977 /* Retry when we get spurious exceptions: */
981 force_sig_fault(SIGFPE, si_code,
982 (void __user *)uprobe_get_trap_addr(regs));
984 cond_local_irq_disable(regs);
987 DEFINE_IDTENTRY(exc_coprocessor_error)
989 math_error(regs, X86_TRAP_MF);
992 DEFINE_IDTENTRY(exc_simd_coprocessor_error)
994 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
995 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
996 if (!static_cpu_has(X86_FEATURE_XMM)) {
997 __exc_general_protection(regs, 0);
1001 math_error(regs, X86_TRAP_XF);
1004 DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1007 * This addresses a Pentium Pro Erratum:
1009 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1010 * Virtual Wire mode implemented through the local APIC, an
1011 * interrupt vector of 0Fh (Intel reserved encoding) may be
1012 * generated by the local APIC (Int 15). This vector may be
1013 * generated upon receipt of a spurious interrupt (an interrupt
1014 * which is removed before the system receives the INTA sequence)
1015 * instead of the programmed 8259 spurious interrupt vector.
1017 * IMPLICATION: The spurious interrupt vector programmed in the
1018 * 8259 is normally handled by an operating system's spurious
1019 * interrupt handler. However, a vector of 0Fh is unknown to some
1020 * operating systems, which would crash if this erratum occurred.
1022 * In theory this could be limited to 32bit, but the handler is not
1023 * hurting and who knows which other CPUs suffer from this.
1027 DEFINE_IDTENTRY(exc_device_not_available)
1029 unsigned long cr0 = read_cr0();
1031 #ifdef CONFIG_MATH_EMULATION
1032 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1033 struct math_emu_info info = { };
1035 cond_local_irq_enable(regs);
1038 math_emulate(&info);
1040 cond_local_irq_disable(regs);
1045 /* This should not happen. */
1046 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1047 /* Try to fix it up and carry on. */
1048 write_cr0(cr0 & ~X86_CR0_TS);
1051 * Something terrible happened, and we're better off trying
1052 * to kill the task than getting stuck in a never-ending
1053 * loop of #NM faults.
1055 die("unexpected #NM exception", regs, 0);
1059 #ifdef CONFIG_X86_32
1060 DEFINE_IDTENTRY_SW(iret_error)
1063 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1064 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1065 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1066 ILL_BADSTK, (void __user *)NULL);
1068 local_irq_disable();
1072 void __init trap_init(void)
1074 /* Init cpu_entry_area before IST entries are set up */
1075 setup_cpu_entry_areas();
1080 * Set the IDT descriptor to a fixed read-only location, so that the
1081 * "sidt" instruction will not leak the location of the kernel, and
1082 * to defend the IDT against arbitrary memory write vulnerabilities.
1083 * It will be reloaded in cpu_init() */
1084 cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
1086 idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
1089 * Should be a barrier for any external CPU state:
1093 idt_setup_ist_traps();
1095 idt_setup_debugidt_traps();