1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86_64/entry.S
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
11 * Some of this is documented in Documentation/arch/x86/entry_64.rst
13 * A note on terminology:
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
15 * at the top of the kernel process stack.
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
21 #include <linux/export.h>
22 #include <linux/linkage.h>
23 #include <asm/segment.h>
24 #include <asm/cache.h>
25 #include <asm/errno.h>
26 #include <asm/asm-offsets.h>
28 #include <asm/unistd.h>
29 #include <asm/thread_info.h>
30 #include <asm/hw_irq.h>
31 #include <asm/page_types.h>
32 #include <asm/irqflags.h>
33 #include <asm/paravirt.h>
34 #include <asm/percpu.h>
37 #include <asm/pgtable_types.h>
38 #include <asm/frame.h>
39 #include <asm/trapnr.h>
40 #include <asm/nospec-branch.h>
41 #include <asm/fsgsbase.h>
42 #include <linux/err.h>
47 .section .entry.text, "ax"
50 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
52 * This is the only entry point used for 64-bit system calls. The
53 * hardware interface is reasonably well designed and the register to
54 * argument mapping Linux uses fits well with the registers that are
55 * available when SYSCALL is used.
57 * SYSCALL instructions can be found inlined in libc implementations as
58 * well as some other programs and libraries. There are also a handful
59 * of SYSCALL instructions in the vDSO used, for example, as a
60 * clock_gettimeofday fallback.
62 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
63 * then loads new ss, cs, and rip from previously programmed MSRs.
64 * rflags gets masked by a value from another MSR (so CLD and CLAC
65 * are not needed). SYSCALL does not save anything on the stack
66 * and does not change rsp.
69 * rax system call number
71 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
75 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
78 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
80 * Only called from user space.
82 * When user can change pt_regs->foo always force IRET. That is because
83 * it deals with uncanonical addresses better. SYSRET has trouble
84 * with them due to bugs in both AMD and Intel CPUs.
87 SYM_CODE_START(entry_SYSCALL_64)
92 /* tss.sp2 is scratch space. */
93 movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2)
94 SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
95 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
97 SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
100 /* Construct struct pt_regs on stack */
101 pushq $__USER_DS /* pt_regs->ss */
102 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
103 pushq %r11 /* pt_regs->flags */
104 pushq $__USER_CS /* pt_regs->cs */
105 pushq %rcx /* pt_regs->ip */
106 SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
107 pushq %rax /* pt_regs->orig_ax */
109 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
113 /* Sign extend the lower 32bit as syscall numbers are treated as int */
116 /* clobbers %rax, make sure it is after saving the syscall nr */
120 call do_syscall_64 /* returns with IRQs disabled */
123 * Try to use SYSRET instead of IRET if we're returning to
124 * a completely clean 64-bit userspace context. If we're not,
125 * go to the slow exit path.
126 * In the Xen PV case we must use iret anyway.
129 ALTERNATIVE "testb %al, %al; jz swapgs_restore_regs_and_return_to_usermode", \
130 "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
133 * We win! This label is here just for ease of understanding
134 * perf profiles. Nothing jumps here.
136 syscall_return_via_sysret:
141 * Now all regs are restored except RSP and RDI.
142 * Save old stack pointer and switch to trampoline stack.
145 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
146 UNWIND_HINT_END_OF_STACK
148 pushq RSP-RDI(%rdi) /* RSP */
149 pushq (%rdi) /* RDI */
152 * We are on the trampoline stack. All regs except RDI are live.
153 * We can do future final exit work right here.
155 STACKLEAK_ERASE_NOCLOBBER
157 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
161 SYM_INNER_LABEL(entry_SYSRETQ_unsafe_stack, SYM_L_GLOBAL)
165 SYM_INNER_LABEL(entry_SYSRETQ_end, SYM_L_GLOBAL)
168 SYM_CODE_END(entry_SYSCALL_64)
174 .pushsection .text, "ax"
175 SYM_FUNC_START(__switch_to_asm)
177 * Save callee-saved registers
178 * This must match the order in inactive_task_frame
188 movq %rsp, TASK_threadsp(%rdi)
189 movq TASK_threadsp(%rsi), %rsp
191 #ifdef CONFIG_STACKPROTECTOR
192 movq TASK_stack_canary(%rsi), %rbx
193 movq %rbx, PER_CPU_VAR(fixed_percpu_data) + FIXED_stack_canary
197 * When switching from a shallower to a deeper call stack
198 * the RSB may either underflow or use entries populated
199 * with userspace addresses. On CPUs where those concerns
200 * exist, overwrite the RSB with entries which capture
201 * speculative execution to prevent attack.
203 FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
205 /* restore callee-saved registers */
214 SYM_FUNC_END(__switch_to_asm)
218 * A newly forked process directly context switches into this address.
220 * rax: prev task we switched from
221 * rbx: kernel thread func (NULL for user thread)
222 * r12: kernel thread arg
224 .pushsection .text, "ax"
225 SYM_CODE_START(ret_from_fork_asm)
227 * This is the start of the kernel stack; even through there's a
228 * register set at the top, the regset isn't necessarily coherent
229 * (consider kthreads) and one cannot unwind further.
231 * This ensures stack unwinds of kernel threads terminate in a known
234 UNWIND_HINT_END_OF_STACK
235 ANNOTATE_NOENDBR // copy_thread
238 movq %rax, %rdi /* prev */
239 movq %rsp, %rsi /* regs */
240 movq %rbx, %rdx /* fn */
241 movq %r12, %rcx /* fn_arg */
245 * Set the stack state to what is expected for the target function
246 * -- at this point the register set should be a valid user set
247 * and unwind should work normally.
250 jmp swapgs_restore_regs_and_return_to_usermode
251 SYM_CODE_END(ret_from_fork_asm)
254 .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
255 #ifdef CONFIG_DEBUG_ENTRY
258 testl $X86_EFLAGS_IF, %eax
266 SYM_CODE_START(xen_error_entry)
269 PUSH_AND_CLEAR_REGS save_ret=1
270 ENCODE_FRAME_POINTER 8
271 UNTRAIN_RET_FROM_CALL
273 SYM_CODE_END(xen_error_entry)
276 * idtentry_body - Macro to emit code calling the C function
277 * @cfunc: C function to be called
278 * @has_error_code: Hardware pushed error code on stack
280 .macro idtentry_body cfunc has_error_code:req
283 * Call error_entry() and switch to the task stack if from userspace.
285 * When in XENPV, it is already in the task stack, and it can't fault
286 * for native_iret() nor native_load_gs_index() since XENPV uses its
287 * own pvops for IRET and load_gs_index(). And it doesn't need to
288 * switch the CR3. So it can skip invoking error_entry().
290 ALTERNATIVE "call error_entry; movq %rax, %rsp", \
291 "call xen_error_entry", X86_FEATURE_XENPV
296 movq %rsp, %rdi /* pt_regs pointer into 1st argument*/
298 .if \has_error_code == 1
299 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
300 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
305 /* For some configurations \cfunc ends up being a noreturn. */
312 * idtentry - Macro to generate entry stubs for simple IDT entries
313 * @vector: Vector number
314 * @asmsym: ASM symbol for the entry point
315 * @cfunc: C function to be called
316 * @has_error_code: Hardware pushed error code on stack
318 * The macro emits code to set up the kernel context for straight forward
319 * and simple IDT entries. No IST stack, no paranoid entry checks.
321 .macro idtentry vector asmsym cfunc has_error_code:req
322 SYM_CODE_START(\asmsym)
324 .if \vector == X86_TRAP_BP
325 /* #BP advances %rip to the next instruction */
326 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8 signal=0
328 UNWIND_HINT_IRET_ENTRY offset=\has_error_code*8
335 .if \has_error_code == 0
336 pushq $-1 /* ORIG_RAX: no syscall to restart */
339 .if \vector == X86_TRAP_BP
341 * If coming from kernel space, create a 6-word gap to allow the
342 * int3 handler to emulate a call instruction.
344 testb $3, CS-ORIG_RAX(%rsp)
345 jnz .Lfrom_usermode_no_gap_\@
349 UNWIND_HINT_IRET_REGS offset=8
350 .Lfrom_usermode_no_gap_\@:
353 idtentry_body \cfunc \has_error_code
355 _ASM_NOKPROBE(\asmsym)
356 SYM_CODE_END(\asmsym)
360 * Interrupt entry/exit.
362 + The interrupt stubs push (vector) onto the stack, which is the error_code
363 * position of idtentry exceptions, and jump to one of the two idtentry points
366 * common_interrupt is a hotpath, align it to a cache line
368 .macro idtentry_irq vector cfunc
369 .p2align CONFIG_X86_L1_CACHE_SHIFT
370 idtentry \vector asm_\cfunc \cfunc has_error_code=1
374 * System vectors which invoke their handlers directly and are not
375 * going through the regular common device interrupt handling code.
377 .macro idtentry_sysvec vector cfunc
378 idtentry \vector asm_\cfunc \cfunc has_error_code=0
382 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
383 * @vector: Vector number
384 * @asmsym: ASM symbol for the entry point
385 * @cfunc: C function to be called
387 * The macro emits code to set up the kernel context for #MC and #DB
389 * If the entry comes from user space it uses the normal entry path
390 * including the return to user space work and preemption checks on
393 * If hits in kernel mode then it needs to go through the paranoid
394 * entry as the exception can hit any random state. No preemption
395 * check on exit to keep the paranoid path simple.
397 .macro idtentry_mce_db vector asmsym cfunc
398 SYM_CODE_START(\asmsym)
399 UNWIND_HINT_IRET_ENTRY
404 pushq $-1 /* ORIG_RAX: no syscall to restart */
407 * If the entry is from userspace, switch stacks and treat it as
410 testb $3, CS-ORIG_RAX(%rsp)
411 jnz .Lfrom_usermode_switch_stack_\@
413 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
418 movq %rsp, %rdi /* pt_regs pointer */
424 /* Switch to the regular task stack and use the noist entry point */
425 .Lfrom_usermode_switch_stack_\@:
426 idtentry_body noist_\cfunc, has_error_code=0
428 _ASM_NOKPROBE(\asmsym)
429 SYM_CODE_END(\asmsym)
432 #ifdef CONFIG_AMD_MEM_ENCRYPT
434 * idtentry_vc - Macro to generate entry stub for #VC
435 * @vector: Vector number
436 * @asmsym: ASM symbol for the entry point
437 * @cfunc: C function to be called
439 * The macro emits code to set up the kernel context for #VC. The #VC handler
440 * runs on an IST stack and needs to be able to cause nested #VC exceptions.
442 * To make this work the #VC entry code tries its best to pretend it doesn't use
443 * an IST stack by switching to the task stack if coming from user-space (which
444 * includes early SYSCALL entry path) or back to the stack in the IRET frame if
445 * entered from kernel-mode.
447 * If entered from kernel-mode the return stack is validated first, and if it is
448 * not safe to use (e.g. because it points to the entry stack) the #VC handler
449 * will switch to a fall-back stack (VC2) and call a special handler function.
451 * The macro is only used for one vector, but it is planned to be extended in
452 * the future for the #HV exception.
454 .macro idtentry_vc vector asmsym cfunc
455 SYM_CODE_START(\asmsym)
456 UNWIND_HINT_IRET_ENTRY
462 * If the entry is from userspace, switch stacks and treat it as
465 testb $3, CS-ORIG_RAX(%rsp)
466 jnz .Lfrom_usermode_switch_stack_\@
469 * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX.
470 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
477 * Switch off the IST stack to make it free for nested exceptions. The
478 * vc_switch_off_ist() function will switch back to the interrupted
479 * stack if it is safe to do so. If not it switches to the VC fall-back
482 movq %rsp, %rdi /* pt_regs pointer */
483 call vc_switch_off_ist
484 movq %rax, %rsp /* Switch to new stack */
490 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
491 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
493 movq %rsp, %rdi /* pt_regs pointer */
498 * No need to switch back to the IST stack. The current stack is either
499 * identical to the stack in the IRET frame or the VC fall-back stack,
500 * so it is definitely mapped even with PTI enabled.
504 /* Switch to the regular task stack */
505 .Lfrom_usermode_switch_stack_\@:
506 idtentry_body user_\cfunc, has_error_code=1
508 _ASM_NOKPROBE(\asmsym)
509 SYM_CODE_END(\asmsym)
514 * Double fault entry. Straight paranoid. No checks from which context
515 * this comes because for the espfix induced #DF this would do the wrong
518 .macro idtentry_df vector asmsym cfunc
519 SYM_CODE_START(\asmsym)
520 UNWIND_HINT_IRET_ENTRY offset=8
525 /* paranoid_entry returns GS information for paranoid_exit in EBX. */
529 movq %rsp, %rdi /* pt_regs pointer into first argument */
530 movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/
531 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
534 /* For some configurations \cfunc ends up being a noreturn. */
539 _ASM_NOKPROBE(\asmsym)
540 SYM_CODE_END(\asmsym)
544 * Include the defines which emit the idt entries which are shared
545 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
546 * so the stacktrace boundary checks work.
549 .globl __irqentry_text_start
550 __irqentry_text_start:
552 #include <asm/idtentry.h>
555 .globl __irqentry_text_end
559 SYM_CODE_START_LOCAL(common_interrupt_return)
560 SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
563 ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
565 #ifdef CONFIG_PAGE_TABLE_ISOLATION
566 ALTERNATIVE "", "jmp .Lpti_restore_regs_and_return_to_usermode", X86_FEATURE_PTI
571 add $8, %rsp /* orig_ax */
572 UNWIND_HINT_IRET_REGS
576 /* Assert that the IRET frame indicates user mode. */
581 #ifdef CONFIG_PAGE_TABLE_ISOLATION
582 .Lpti_restore_regs_and_return_to_usermode:
586 * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS.
587 * Save old stack pointer and switch to trampoline stack.
590 movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
591 UNWIND_HINT_END_OF_STACK
593 /* Copy the IRET frame to the trampoline stack. */
594 pushq 6*8(%rdi) /* SS */
595 pushq 5*8(%rdi) /* RSP */
596 pushq 4*8(%rdi) /* EFLAGS */
597 pushq 3*8(%rdi) /* CS */
598 pushq 2*8(%rdi) /* RIP */
600 /* Push user RDI on the trampoline stack. */
604 * We are on the trampoline stack. All regs except RDI are live.
605 * We can do future final exit work right here.
607 STACKLEAK_ERASE_NOCLOBBER
610 SWITCH_TO_USER_CR3 scratch_reg=%rdi scratch_reg2=%rax
615 jmp .Lswapgs_and_iret
618 SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
619 #ifdef CONFIG_DEBUG_ENTRY
620 /* Assert that pt_regs indicates kernel mode. */
627 addq $8, %rsp /* skip regs->orig_ax */
629 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
630 * when returning from IPI handler.
633 SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
636 .long .Lnative_iret - (. + 4)
640 UNWIND_HINT_IRET_REGS
642 * Are we returning to a stack segment from the LDT? Note: in
643 * 64-bit mode SS:RSP on the exception stack is always valid.
645 #ifdef CONFIG_X86_ESPFIX64
646 testb $4, (SS-RIP)(%rsp)
647 jnz native_irq_return_ldt
650 SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
651 ANNOTATE_NOENDBR // exc_double_fault
653 * This may fault. Non-paranoid faults on return to userspace are
654 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
655 * Double-faults due to espfix64 are handled in exc_double_fault.
656 * Other faults here are fatal.
660 #ifdef CONFIG_X86_ESPFIX64
661 native_irq_return_ldt:
663 * We are running with user GSBASE. All GPRs contain their user
664 * values. We have a percpu ESPFIX stack that is eight slots
665 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
666 * of the ESPFIX stack.
668 * We clobber RAX and RDI in this code. We stash RDI on the
669 * normal stack and RAX on the ESPFIX stack.
671 * The ESPFIX stack layout we set up looks like this:
673 * --- top of ESPFIX stack ---
678 * RIP <-- RSP points here when we're done
679 * RAX <-- espfix_waddr points here
680 * --- bottom of ESPFIX stack ---
683 pushq %rdi /* Stash user RDI */
684 swapgs /* to kernel GS */
685 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
687 movq PER_CPU_VAR(espfix_waddr), %rdi
688 movq %rax, (0*8)(%rdi) /* user RAX */
689 movq (1*8)(%rsp), %rax /* user RIP */
690 movq %rax, (1*8)(%rdi)
691 movq (2*8)(%rsp), %rax /* user CS */
692 movq %rax, (2*8)(%rdi)
693 movq (3*8)(%rsp), %rax /* user RFLAGS */
694 movq %rax, (3*8)(%rdi)
695 movq (5*8)(%rsp), %rax /* user SS */
696 movq %rax, (5*8)(%rdi)
697 movq (4*8)(%rsp), %rax /* user RSP */
698 movq %rax, (4*8)(%rdi)
699 /* Now RAX == RSP. */
701 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
704 * espfix_stack[31:16] == 0. The page tables are set up such that
705 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
706 * espfix_waddr for any X. That is, there are 65536 RO aliases of
707 * the same page. Set up RSP so that RSP[31:16] contains the
708 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
709 * still points to an RO alias of the ESPFIX stack.
711 orq PER_CPU_VAR(espfix_stack), %rax
713 SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
714 swapgs /* to user GS */
715 popq %rdi /* Restore user RDI */
718 UNWIND_HINT_IRET_REGS offset=8
721 * At this point, we cannot write to the stack any more, but we can
724 popq %rax /* Restore user RAX */
727 * RSP now points to an ordinary IRET frame, except that the page
728 * is read-only and RSP[31:16] are preloaded with the userspace
729 * values. We can now IRET back to userspace.
731 jmp native_irq_return_iret
733 SYM_CODE_END(common_interrupt_return)
734 _ASM_NOKPROBE(common_interrupt_return)
737 * Reload gs selector with exception handling
740 * Is in entry.text as it shouldn't be instrumented.
742 SYM_FUNC_START(asm_load_gs_index)
746 ANNOTATE_NOENDBR // error_entry
748 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
753 /* running with kernelgs */
755 swapgs /* switch back to user gs */
757 /* This can't be a string because the preprocessor needs to see it. */
758 movl $__USER_DS, %eax
761 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
766 _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
768 SYM_FUNC_END(asm_load_gs_index)
769 EXPORT_SYMBOL(asm_load_gs_index)
773 * A note on the "critical region" in our callback handler.
774 * We want to avoid stacking callback handlers due to events occurring
775 * during handling of the last event. To do this, we keep events disabled
776 * until we've done all processing. HOWEVER, we must enable events before
777 * popping the stack frame (can't be done atomically) and so it would still
778 * be possible to get enough handler activations to overflow the stack.
779 * Although unlikely, bugs of that kind are hard to track down, so we'd
780 * like to avoid the possibility.
781 * So, on entry to the handler we detect whether we interrupted an
782 * existing activation in its critical region -- if so, we pop the current
783 * activation and restart the handler using the previous one.
785 * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs)
788 SYM_CODE_START_LOCAL_NOALIGN(exc_xen_hypervisor_callback)
791 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
792 * see the correct pointer to the pt_regs
795 movq %rdi, %rsp /* we don't return, adjust the stack frame */
798 call xen_pv_evtchn_do_upcall
801 SYM_CODE_END(exc_xen_hypervisor_callback)
804 * Hypervisor uses this for application faults while it executes.
805 * We get here for two reasons:
806 * 1. Fault while reloading DS, ES, FS or GS
807 * 2. Fault while executing IRET
808 * Category 1 we do not need to fix up as Xen has already reloaded all segment
809 * registers that could be reloaded and zeroed the others.
810 * Category 2 we fix up by killing the current process. We cannot use the
811 * normal Linux return path in this case because if we use the IRET hypercall
812 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
813 * We distinguish between categories by comparing each saved segment register
814 * with its current contents: any discrepancy means we in category 1.
817 SYM_CODE_START_NOALIGN(xen_failsafe_callback)
818 UNWIND_HINT_UNDEFINED
832 /* All segments match their saved values => Category 2 (Bad IRET). */
837 UNWIND_HINT_IRET_REGS offset=8
838 jmp asm_exc_general_protection
839 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
843 UNWIND_HINT_IRET_REGS
844 pushq $-1 /* orig_ax = -1 => not a system call */
848 SYM_CODE_END(xen_failsafe_callback)
849 #endif /* CONFIG_XEN_PV */
852 * Save all registers in pt_regs. Return GSBASE related information
853 * in EBX depending on the availability of the FSGSBASE instructions:
856 * N 0 -> SWAPGS on exit
857 * 1 -> no SWAPGS on exit
859 * Y GSBASE value at entry, must be restored in paranoid_exit
862 * R15 - old SPEC_CTRL
864 SYM_CODE_START(paranoid_entry)
867 PUSH_AND_CLEAR_REGS save_ret=1
868 ENCODE_FRAME_POINTER 8
871 * Always stash CR3 in %r14. This value will be restored,
872 * verbatim, at exit. Needed if paranoid_entry interrupted
873 * another entry that already switched to the user CR3 value
874 * but has not yet returned to userspace.
876 * This is also why CS (stashed in the "iret frame" by the
877 * hardware at entry) can not be used: this may be a return
878 * to kernel code, but with a user CR3 value.
880 * Switching CR3 does not depend on kernel GSBASE so it can
881 * be done before switching to the kernel GSBASE. This is
882 * required for FSGSBASE because the kernel GSBASE has to
883 * be retrieved from a kernel internal table.
885 SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
888 * Handling GSBASE depends on the availability of FSGSBASE.
890 * Without FSGSBASE the kernel enforces that negative GSBASE
891 * values indicate kernel GSBASE. With FSGSBASE no assumptions
892 * can be made about the GSBASE value when entering from user
895 ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE
898 * Read the current GSBASE and store it in %rbx unconditionally,
899 * retrieve and set the current CPUs kernel GSBASE. The stored value
900 * has to be restored in paranoid_exit unconditionally.
902 * The unconditional write to GS base below ensures that no subsequent
903 * loads based on a mispredicted GS base can happen, therefore no LFENCE
906 SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
907 jmp .Lparanoid_gsbase_done
909 .Lparanoid_entry_checkgs:
910 /* EBX = 1 -> kernel GSBASE active, no restore required */
914 * The kernel-enforced convention is a negative GSBASE indicates
915 * a kernel value. No SWAPGS needed on entry and exit.
917 movl $MSR_GS_BASE, %ecx
920 js .Lparanoid_kernel_gsbase
922 /* EBX = 0 -> SWAPGS required on exit */
925 .Lparanoid_kernel_gsbase:
926 FENCE_SWAPGS_KERNEL_ENTRY
927 .Lparanoid_gsbase_done:
930 * Once we have CR3 and %GS setup save and set SPEC_CTRL. Just like
931 * CR3 above, keep the old value in a callee saved register.
933 IBRS_ENTER save_reg=%r15
934 UNTRAIN_RET_FROM_CALL
937 SYM_CODE_END(paranoid_entry)
940 * "Paranoid" exit path from exception stack. This is invoked
941 * only on return from non-NMI IST interrupts that came
944 * We may be returning to very strange contexts (e.g. very early
945 * in syscall entry), so checking for preemption here would
946 * be complicated. Fortunately, there's no good reason to try
947 * to handle preemption here.
949 * R/EBX contains the GSBASE related information depending on the
950 * availability of the FSGSBASE instructions:
953 * N 0 -> SWAPGS on exit
954 * 1 -> no SWAPGS on exit
956 * Y User space GSBASE, must be restored unconditionally
959 * R15 - old SPEC_CTRL
961 SYM_CODE_START_LOCAL(paranoid_exit)
965 * Must restore IBRS state before both CR3 and %GS since we need access
966 * to the per-CPU x86_spec_ctrl_shadow variable.
968 IBRS_EXIT save_reg=%r15
971 * The order of operations is important. RESTORE_CR3 requires
974 * NB to anyone to try to optimize this code: this code does
975 * not execute at all for exceptions from user mode. Those
976 * exceptions go through error_return instead.
978 RESTORE_CR3 scratch_reg=%rax save_reg=%r14
980 /* Handle the three GSBASE cases */
981 ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE
983 /* With FSGSBASE enabled, unconditionally restore GSBASE */
985 jmp restore_regs_and_return_to_kernel
987 .Lparanoid_exit_checkgs:
988 /* On non-FSGSBASE systems, conditionally do SWAPGS */
990 jnz restore_regs_and_return_to_kernel
992 /* We are returning to a context with user GSBASE */
994 jmp restore_regs_and_return_to_kernel
995 SYM_CODE_END(paranoid_exit)
998 * Switch GS and CR3 if needed.
1000 SYM_CODE_START(error_entry)
1004 PUSH_AND_CLEAR_REGS save_ret=1
1005 ENCODE_FRAME_POINTER 8
1007 testb $3, CS+8(%rsp)
1008 jz .Lerror_kernelspace
1011 * We entered from user mode or we're pretending to have entered
1012 * from user mode due to an IRET fault.
1015 FENCE_SWAPGS_USER_ENTRY
1016 /* We have user CR3. Change to kernel CR3. */
1017 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1019 UNTRAIN_RET_FROM_CALL
1021 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1022 /* Put us onto the real thread stack. */
1026 * There are two places in the kernel that can potentially fault with
1027 * usergs. Handle them here. B stepping K8s sometimes report a
1028 * truncated RIP for IRET exceptions returning to compat mode. Check
1029 * for these here too.
1031 .Lerror_kernelspace:
1032 leaq native_irq_return_iret(%rip), %rcx
1033 cmpq %rcx, RIP+8(%rsp)
1035 movl %ecx, %eax /* zero extend */
1036 cmpq %rax, RIP+8(%rsp)
1038 cmpq $.Lgs_change, RIP+8(%rsp)
1039 jne .Lerror_entry_done_lfence
1042 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
1043 * gsbase and proceed. We'll fix up the exception and land in
1044 * .Lgs_change's error handler with kernel gsbase.
1049 * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
1050 * kernel or user gsbase.
1052 .Lerror_entry_done_lfence:
1053 FENCE_SWAPGS_KERNEL_ENTRY
1055 leaq 8(%rsp), %rax /* return pt_regs pointer */
1060 /* Fix truncated RIP */
1061 movq %rcx, RIP+8(%rsp)
1066 * We came from an IRET to user mode, so we have user
1067 * gsbase and CR3. Switch to kernel gsbase and CR3:
1070 FENCE_SWAPGS_USER_ENTRY
1071 SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
1073 UNTRAIN_RET_FROM_CALL
1076 * Pretend that the exception came from user mode: set up pt_regs
1077 * as if we faulted immediately after IRET.
1079 leaq 8(%rsp), %rdi /* arg0 = pt_regs pointer */
1083 SYM_CODE_END(error_entry)
1085 SYM_CODE_START_LOCAL(error_return)
1087 DEBUG_ENTRY_ASSERT_IRQS_OFF
1089 jz restore_regs_and_return_to_kernel
1090 jmp swapgs_restore_regs_and_return_to_usermode
1091 SYM_CODE_END(error_return)
1094 * Runs on exception stack. Xen PV does not go through this path at all,
1095 * so we can use real assembly here.
1098 * %r14: Used to save/restore the CR3 of the interrupted context
1099 * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
1101 SYM_CODE_START(asm_exc_nmi)
1102 UNWIND_HINT_IRET_ENTRY
1106 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1107 * the iretq it performs will take us out of NMI context.
1108 * This means that we can have nested NMIs where the next
1109 * NMI is using the top of the stack of the previous NMI. We
1110 * can't let it execute because the nested NMI will corrupt the
1111 * stack of the previous NMI. NMI handlers are not re-entrant
1114 * To handle this case we do the following:
1115 * Check a special location on the stack that contains a
1116 * variable that is set when NMIs are executing.
1117 * The interrupted task's stack is also checked to see if it
1119 * If the variable is not set and the stack is not the NMI
1121 * o Set the special variable on the stack
1122 * o Copy the interrupt frame into an "outermost" location on the
1124 * o Copy the interrupt frame into an "iret" location on the stack
1125 * o Continue processing the NMI
1126 * If the variable is set or the previous stack is the NMI stack:
1127 * o Modify the "iret" location to jump to the repeat_nmi
1128 * o return back to the first NMI
1130 * Now on exit of the first NMI, we first clear the stack variable
1131 * The NMI stack will tell any nested NMIs at that point that it is
1132 * nested. Then we pop the stack normally with iret, and if there was
1133 * a nested NMI that updated the copy interrupt stack frame, a
1134 * jump will be made to the repeat_nmi code that will handle the second
1137 * However, espfix prevents us from directly returning to userspace
1138 * with a single IRET instruction. Similarly, IRET to user mode
1139 * can fault. We therefore handle NMIs from user space like
1140 * other IST entries.
1146 /* Use %rdx as our temp variable throughout */
1149 testb $3, CS-RIP+8(%rsp)
1150 jz .Lnmi_from_kernel
1153 * NMI from user mode. We need to run on the thread stack, but we
1154 * can't go through the normal entry paths: NMIs are masked, and
1155 * we don't want to enable interrupts, because then we'll end
1156 * up in an awkward situation in which IRQs are on but NMIs
1159 * We also must not push anything to the stack before switching
1160 * stacks lest we corrupt the "NMI executing" variable.
1164 FENCE_SWAPGS_USER_ENTRY
1165 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
1167 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rsp
1168 UNWIND_HINT_IRET_REGS base=%rdx offset=8
1169 pushq 5*8(%rdx) /* pt_regs->ss */
1170 pushq 4*8(%rdx) /* pt_regs->rsp */
1171 pushq 3*8(%rdx) /* pt_regs->flags */
1172 pushq 2*8(%rdx) /* pt_regs->cs */
1173 pushq 1*8(%rdx) /* pt_regs->rip */
1174 UNWIND_HINT_IRET_REGS
1175 pushq $-1 /* pt_regs->orig_ax */
1176 PUSH_AND_CLEAR_REGS rdx=(%rdx)
1177 ENCODE_FRAME_POINTER
1183 * At this point we no longer need to worry about stack damage
1184 * due to nesting -- we're on the normal thread stack and we're
1185 * done with the NMI stack.
1192 * Return back to user mode. We must *not* do the normal exit
1193 * work, because we don't want to enable interrupts.
1195 jmp swapgs_restore_regs_and_return_to_usermode
1199 * Here's what our stack frame will look like:
1200 * +---------------------------------------------------------+
1202 * | original Return RSP |
1203 * | original RFLAGS |
1206 * +---------------------------------------------------------+
1207 * | temp storage for rdx |
1208 * +---------------------------------------------------------+
1209 * | "NMI executing" variable |
1210 * +---------------------------------------------------------+
1211 * | iret SS } Copied from "outermost" frame |
1212 * | iret Return RSP } on each loop iteration; overwritten |
1213 * | iret RFLAGS } by a nested NMI to force another |
1214 * | iret CS } iteration if needed. |
1216 * +---------------------------------------------------------+
1217 * | outermost SS } initialized in first_nmi; |
1218 * | outermost Return RSP } will not be changed before |
1219 * | outermost RFLAGS } NMI processing is done. |
1220 * | outermost CS } Copied to "iret" frame on each |
1221 * | outermost RIP } iteration. |
1222 * +---------------------------------------------------------+
1224 * +---------------------------------------------------------+
1226 * The "original" frame is used by hardware. Before re-enabling
1227 * NMIs, we need to be done with it, and we need to leave enough
1228 * space for the asm code here.
1230 * We return by executing IRET while RSP points to the "iret" frame.
1231 * That will either return for real or it will loop back into NMI
1234 * The "outermost" frame is copied to the "iret" frame on each
1235 * iteration of the loop, so each iteration starts with the "iret"
1236 * frame pointing to the final return target.
1240 * Determine whether we're a nested NMI.
1242 * If we interrupted kernel code between repeat_nmi and
1243 * end_repeat_nmi, then we are a nested NMI. We must not
1244 * modify the "iret" frame because it's being written by
1245 * the outer NMI. That's okay; the outer NMI handler is
1246 * about to call exc_nmi() anyway, so we can just resume
1250 movq $repeat_nmi, %rdx
1253 movq $end_repeat_nmi, %rdx
1259 * Now check "NMI executing". If it's set, then we're nested.
1260 * This will not detect if we interrupted an outer NMI just
1267 * Now test if the previous stack was an NMI stack. This covers
1268 * the case where we interrupt an outer NMI after it clears
1269 * "NMI executing" but before IRET. We need to be careful, though:
1270 * there is one case in which RSP could point to the NMI stack
1271 * despite there being no NMI active: naughty userspace controls
1272 * RSP at the very beginning of the SYSCALL targets. We can
1273 * pull a fast one on naughty userspace, though: we program
1274 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1275 * if it controls the kernel's RSP. We set DF before we clear
1279 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1280 cmpq %rdx, 4*8(%rsp)
1281 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1284 subq $EXCEPTION_STKSZ, %rdx
1285 cmpq %rdx, 4*8(%rsp)
1286 /* If it is below the NMI stack, it is a normal NMI */
1289 /* Ah, it is within the NMI stack. */
1291 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1292 jz first_nmi /* RSP was user controlled. */
1294 /* This is a nested NMI. */
1298 * Modify the "iret" frame to point to repeat_nmi, forcing another
1299 * iteration of NMI handling.
1302 leaq -10*8(%rsp), %rdx
1309 /* Put stack back */
1315 /* We are returning to kernel mode, so this cannot result in a fault. */
1322 /* Make room for "NMI executing". */
1325 /* Leave room for the "iret" frame */
1328 /* Copy the "original" frame to the "outermost" frame */
1332 UNWIND_HINT_IRET_REGS
1334 /* Everything up to here is safe from nested NMIs */
1336 #ifdef CONFIG_DEBUG_ENTRY
1338 * For ease of testing, unmask NMIs right away. Disabled by
1339 * default because IRET is very expensive.
1342 pushq %rsp /* RSP (minus 8 because of the previous push) */
1343 addq $8, (%rsp) /* Fix up RSP */
1345 pushq $__KERNEL_CS /* CS */
1347 iretq /* continues at repeat_nmi below */
1348 UNWIND_HINT_IRET_REGS
1353 ANNOTATE_NOENDBR // this code
1355 * If there was a nested NMI, the first NMI's iret will return
1356 * here. But NMIs are still enabled and we can take another
1357 * nested NMI. The nested NMI checks the interrupted RIP to see
1358 * if it is between repeat_nmi and end_repeat_nmi, and if so
1359 * it will just return, as we are about to repeat an NMI anyway.
1360 * This makes it safe to copy to the stack frame that a nested
1363 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1364 * we're repeating an NMI, gsbase has the same value that it had on
1365 * the first iteration. paranoid_entry will load the kernel
1366 * gsbase if needed before we call exc_nmi(). "NMI executing"
1369 movq $1, 10*8(%rsp) /* Set "NMI executing". */
1372 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1373 * here must not modify the "iret" frame while we're writing to
1374 * it or it will end up containing garbage.
1382 ANNOTATE_NOENDBR // this code
1385 * Everything below this point can be preempted by a nested NMI.
1386 * If this happens, then the inner NMI will change the "iret"
1387 * frame to point back to repeat_nmi.
1389 pushq $-1 /* ORIG_RAX: no syscall to restart */
1392 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
1393 * as we should not be calling schedule in NMI context.
1394 * Even with normal interrupts enabled. An NMI should not be
1395 * setting NEED_RESCHED or anything that normal interrupts and
1396 * exceptions might do.
1404 /* Always restore stashed SPEC_CTRL value (see paranoid_entry) */
1405 IBRS_EXIT save_reg=%r15
1407 /* Always restore stashed CR3 value (see paranoid_entry) */
1408 RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
1411 * The above invocation of paranoid_entry stored the GSBASE
1412 * related information in R/EBX depending on the availability
1415 * If FSGSBASE is enabled, restore the saved GSBASE value
1416 * unconditionally, otherwise take the conditional SWAPGS path.
1418 ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE
1424 /* EBX == 0 -> invoke SWAPGS */
1435 * Skip orig_ax and the "outermost" frame to point RSP at the "iret"
1436 * at the "iret" frame.
1441 * Clear "NMI executing". Set DF first so that we can easily
1442 * distinguish the remaining code between here and IRET from
1443 * the SYSCALL entry and exit paths.
1445 * We arguably should just inspect RIP instead, but I (Andy) wrote
1446 * this code when I had the misapprehension that Xen PV supported
1447 * NMIs, and Xen PV would break that approach.
1450 movq $0, 5*8(%rsp) /* clear "NMI executing" */
1453 * iretq reads the "iret" frame and exits the NMI stack in a
1454 * single instruction. We are returning to kernel mode, so this
1455 * cannot result in a fault. Similarly, we don't need to worry
1456 * about espfix64 on the way back to kernel mode.
1459 SYM_CODE_END(asm_exc_nmi)
1462 * This handles SYSCALL from 32-bit code. There is no way to program
1463 * MSRs to fully disable 32-bit SYSCALL.
1465 SYM_CODE_START(entry_SYSCALL32_ignore)
1466 UNWIND_HINT_END_OF_STACK
1470 SYM_CODE_END(entry_SYSCALL32_ignore)
1472 .pushsection .text, "ax"
1474 SYM_CODE_START_NOALIGN(rewind_stack_and_make_dead)
1476 /* Prevent any naive code from trying to unwind to our caller. */
1479 movq PER_CPU_VAR(pcpu_hot + X86_top_of_stack), %rax
1480 leaq -PTREGS_SIZE(%rax), %rsp
1484 SYM_CODE_END(rewind_stack_and_make_dead)