3 * Copyright (C) 1991, 1992 Linus Torvalds
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
17 * Stack layout in 'syscall_exit':
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
41 * "current" is in register %ebx during any slow entries.
44 #include <linux/linkage.h>
45 #include <linux/err.h>
46 #include <asm/thread_info.h>
47 #include <asm/irqflags.h>
48 #include <asm/errno.h>
49 #include <asm/segment.h>
51 #include <asm/page_types.h>
52 #include <asm/percpu.h>
53 #include <asm/processor-flags.h>
54 #include <asm/ftrace.h>
55 #include <asm/irq_vectors.h>
56 #include <asm/cpufeature.h>
57 #include <asm/alternative-asm.h>
61 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62 #include <linux/elf-em.h>
63 #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
64 #define __AUDIT_ARCH_LE 0x40000000
66 #ifndef CONFIG_AUDITSYSCALL
67 #define sysenter_audit syscall_trace_entry
68 #define sysexit_audit syscall_exit_work
71 .section .entry.text, "ax"
74 * We use macros for low-level operations which need to be overridden
75 * for paravirtualization. The following will never clobber any registers:
76 * INTERRUPT_RETURN (aka. "iret")
77 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
78 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
80 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
81 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
82 * Allowing a register to be clobbered can shrink the paravirt replacement
83 * enough to patch inline, increasing performance.
87 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89 #define preempt_stop(clobbers)
90 #define resume_kernel restore_all
93 .macro TRACE_IRQS_IRET
94 #ifdef CONFIG_TRACE_IRQFLAGS
95 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
103 * User gs save/restore
105 * %gs is used for userland TLS and kernel only uses it for stack
106 * canary which is required to be at %gs:20 by gcc. Read the comment
107 * at the top of stackprotector.h for more info.
109 * Local labels 98 and 99 are used.
111 #ifdef CONFIG_X86_32_LAZY_GS
113 /* unfortunately push/pop can't be no-op */
118 addl $(4 + \pop), %esp
123 /* all the rest are no-op */
130 .macro REG_TO_PTGS reg
132 .macro SET_KERNEL_GS reg
135 #else /* CONFIG_X86_32_LAZY_GS */
148 .pushsection .fixup, "ax"
152 _ASM_EXTABLE(98b,99b)
156 98: mov PT_GS(%esp), %gs
159 .pushsection .fixup, "ax"
160 99: movl $0, PT_GS(%esp)
163 _ASM_EXTABLE(98b,99b)
169 .macro REG_TO_PTGS reg
170 movl \reg, PT_GS(%esp)
172 .macro SET_KERNEL_GS reg
173 movl $(__KERNEL_STACK_CANARY), \reg
177 #endif /* CONFIG_X86_32_LAZY_GS */
192 movl $(__USER_DS), %edx
195 movl $(__KERNEL_PERCPU), %edx
200 .macro RESTORE_INT_REGS
210 .macro RESTORE_REGS pop=0
216 .pushsection .fixup, "ax"
233 GET_THREAD_INFO(%ebp)
235 pushl $0x0202 # Reset kernel eflags
240 ENTRY(ret_from_kernel_thread)
243 GET_THREAD_INFO(%ebp)
245 pushl $0x0202 # Reset kernel eflags
247 movl PT_EBP(%esp),%eax
251 ENDPROC(ret_from_kernel_thread)
254 * Return to user mode is not as complex as all this looks,
255 * but we want the default path for a system call return to
256 * go as quickly as possible which is why some of this is
257 * less clear than it otherwise should be.
260 # userspace resumption stub bypassing syscall exit tracing
263 preempt_stop(CLBR_ANY)
265 GET_THREAD_INFO(%ebp)
267 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
268 movb PT_CS(%esp), %al
269 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
272 * We can be coming here from child spawned by kernel_thread().
274 movl PT_CS(%esp), %eax
275 andl $SEGMENT_RPL_MASK, %eax
278 jb resume_kernel # not returning to v8086 or userspace
280 ENTRY(resume_userspace)
282 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
283 # setting need_resched or sigpending
284 # between sampling and the iret
286 movl TI_flags(%ebp), %ecx
287 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
288 # int/exception return?
291 END(ret_from_exception)
293 #ifdef CONFIG_PREEMPT
295 DISABLE_INTERRUPTS(CLBR_ANY)
297 cmpl $0,PER_CPU_VAR(__preempt_count)
299 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
301 call preempt_schedule_irq
306 /* SYSENTER_RETURN points to after the "sysenter" instruction in
307 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
309 # sysenter call handler stub
310 ENTRY(ia32_sysenter_target)
311 movl TSS_sysenter_sp0(%esp),%esp
314 * Interrupts are disabled here, but we can't trace it until
315 * enough kernel state to call TRACE_IRQS_OFF can be called - but
316 * we immediately enable interrupts at that point anyway.
321 orl $X86_EFLAGS_IF, (%esp)
324 * Push current_thread_info()->sysenter_return to the stack.
325 * A tiny bit of offset fixup is necessary: TI_sysenter_return
326 * is relative to thread_info, which is at the bottom of the
327 * kernel stack page. 4*4 means the 4 words pushed above;
328 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
329 * and THREAD_SIZE takes us to the bottom.
331 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
335 ENABLE_INTERRUPTS(CLBR_NONE)
338 * Load the potential sixth argument from user stack.
339 * Careful about security.
341 cmpl $__PAGE_OFFSET-3,%ebp
346 movl %ebp,PT_EBP(%esp)
347 _ASM_EXTABLE(1b,syscall_fault)
349 GET_THREAD_INFO(%ebp)
351 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
354 cmpl $(NR_syscalls), %eax
356 call *sys_call_table(,%eax,4)
358 movl %eax,PT_EAX(%esp)
360 DISABLE_INTERRUPTS(CLBR_ANY)
362 movl TI_flags(%ebp), %ecx
363 testl $_TIF_ALLWORK_MASK, %ecx
366 /* if something modifies registers it must also disable sysexit */
367 movl PT_EIP(%esp), %edx
368 movl PT_OLDESP(%esp), %ecx
371 1: mov PT_FS(%esp), %fs
373 ENABLE_INTERRUPTS_SYSEXIT
375 #ifdef CONFIG_AUDITSYSCALL
377 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
378 jnz syscall_trace_entry
379 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
380 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
381 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
382 pushl PT_ESI(%esp) /* a3: 5th arg */
383 pushl PT_EDX+4(%esp) /* a2: 4th arg */
384 call __audit_syscall_entry
385 popl %ecx /* get that remapped edx off the stack */
386 popl %ecx /* get that remapped esi off the stack */
387 movl PT_EAX(%esp),%eax /* reload syscall number */
391 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
392 jnz syscall_exit_work
394 ENABLE_INTERRUPTS(CLBR_ANY)
395 movl %eax,%edx /* second arg, syscall return value */
396 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
397 setbe %al /* 1 if so, 0 if not */
398 movzbl %al,%eax /* zero-extend that */
399 call __audit_syscall_exit
400 DISABLE_INTERRUPTS(CLBR_ANY)
402 movl TI_flags(%ebp), %ecx
403 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
404 jnz syscall_exit_work
405 movl PT_EAX(%esp),%eax /* reload syscall return value */
409 .pushsection .fixup,"ax"
410 2: movl $0,PT_FS(%esp)
415 ENDPROC(ia32_sysenter_target)
417 # system call handler stub
420 pushl %eax # save orig_eax
422 GET_THREAD_INFO(%ebp)
423 # system call tracing in operation / emulation
424 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
425 jnz syscall_trace_entry
426 cmpl $(NR_syscalls), %eax
429 call *sys_call_table(,%eax,4)
431 movl %eax,PT_EAX(%esp) # store the return value
434 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
435 # setting need_resched or sigpending
436 # between sampling and the iret
438 movl TI_flags(%ebp), %ecx
439 testl $_TIF_ALLWORK_MASK, %ecx # current->work
440 jnz syscall_exit_work
445 #ifdef CONFIG_X86_ESPFIX32
446 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
447 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
448 # are returning to the kernel.
449 # See comments in process.c:copy_thread() for details.
450 movb PT_OLDSS(%esp), %ah
451 movb PT_CS(%esp), %al
452 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
453 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
454 je ldt_ss # returning to user-space with LDT SS
457 RESTORE_REGS 4 # skip orig_eax/error_code
462 pushl $0 # no error code
466 _ASM_EXTABLE(irq_return,iret_exc)
468 #ifdef CONFIG_X86_ESPFIX32
470 #ifdef CONFIG_PARAVIRT
472 * The kernel can't run on a non-flat stack if paravirt mode
473 * is active. Rather than try to fixup the high bits of
474 * ESP, bypass this code entirely. This may break DOSemu
475 * and/or Wine support in a paravirt VM, although the option
476 * is still available to implement the setting of the high
477 * 16-bits in the INTERRUPT_RETURN paravirt-op.
479 cmpl $0, pv_info+PARAVIRT_enabled
484 * Setup and switch to ESPFIX stack
486 * We're returning to userspace with a 16 bit stack. The CPU will not
487 * restore the high word of ESP for us on executing iret... This is an
488 * "official" bug of all the x86-compatible CPUs, which we can work
489 * around to make dosemu and wine happy. We do this by preloading the
490 * high word of ESP with the high word of the userspace ESP while
491 * compensating for the offset by changing to the ESPFIX segment with
492 * a base address that matches for the difference.
494 #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
495 mov %esp, %edx /* load kernel esp */
496 mov PT_OLDESP(%esp), %eax /* load userspace esp */
497 mov %dx, %ax /* eax: new kernel esp */
498 sub %eax, %edx /* offset (low word is 0) */
500 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
501 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
503 pushl %eax /* new kernel esp */
504 /* Disable interrupts, but do not irqtrace this section: we
505 * will soon execute iret and the tracer was already set to
506 * the irqstate after the iret */
507 DISABLE_INTERRUPTS(CLBR_EAX)
508 lss (%esp), %esp /* switch to espfix segment */
513 # perform work that needs to be done immediately before resumption
516 testb $_TIF_NEED_RESCHED, %cl
521 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
522 # setting need_resched or sigpending
523 # between sampling and the iret
525 movl TI_flags(%ebp), %ecx
526 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
527 # than syscall tracing?
529 testb $_TIF_NEED_RESCHED, %cl
532 work_notifysig: # deal with pending signals and
533 # notify-resume requests
535 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
537 jnz work_notifysig_v86 # returning to kernel-space or
544 ENABLE_INTERRUPTS(CLBR_NONE)
545 movb PT_CS(%esp), %bl
546 andb $SEGMENT_RPL_MASK, %bl
550 call do_notify_resume
556 pushl %ecx # save ti_flags for do_notify_resume
557 call save_v86_state # %eax contains pt_regs pointer
564 # perform syscall exit tracing
567 movl $-ENOSYS,PT_EAX(%esp)
569 call syscall_trace_enter
570 /* What it returned is what we'll actually use. */
571 cmpl $(NR_syscalls), %eax
574 END(syscall_trace_entry)
576 # perform syscall exit tracing
579 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
582 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
585 call syscall_trace_leave
587 END(syscall_exit_work)
591 GET_THREAD_INFO(%ebp)
592 movl $-EFAULT,PT_EAX(%esp)
598 jmp syscall_after_call
603 jmp sysenter_after_call
606 .macro FIXUP_ESPFIX_STACK
608 * Switch back for ESPFIX stack to the normal zerobased stack
610 * We can't call C functions using the ESPFIX stack. This code reads
611 * the high word of the segment base from the GDT and swiches to the
612 * normal stack and adjusts ESP with the matching offset.
614 #ifdef CONFIG_X86_ESPFIX32
615 /* fixup the stack */
616 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
617 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
619 addl %esp, %eax /* the adjusted stack pointer */
622 lss (%esp), %esp /* switch to the normal stack segment */
625 .macro UNWIND_ESPFIX_STACK
626 #ifdef CONFIG_X86_ESPFIX32
628 /* see if on espfix stack */
629 cmpw $__ESPFIX_SS, %ax
631 movl $__KERNEL_DS, %eax
634 /* switch to normal stack */
641 * Build the entry stubs with some assembler magic.
642 * We pack 1 stub into every 8-byte block.
645 ENTRY(irq_entries_start)
646 vector=FIRST_EXTERNAL_VECTOR
647 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
648 pushl $(~vector+0x80) /* Note: always in signed byte range */
653 END(irq_entries_start)
656 * the CPU automatically disables interrupts when executing an IRQ vector,
657 * so IRQ-flags tracing has to follow that:
659 .p2align CONFIG_X86_L1_CACHE_SHIFT
662 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
668 ENDPROC(common_interrupt)
670 #define BUILD_INTERRUPT3(name, nr, fn) \
682 #ifdef CONFIG_TRACING
683 #define TRACE_BUILD_INTERRUPT(name, nr) \
684 BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
686 #define TRACE_BUILD_INTERRUPT(name, nr)
689 #define BUILD_INTERRUPT(name, nr) \
690 BUILD_INTERRUPT3(name, nr, smp_##name); \
691 TRACE_BUILD_INTERRUPT(name, nr)
693 /* The include is where all of the SMP etc. interrupts come from */
694 #include <asm/entry_arch.h>
696 ENTRY(coprocessor_error)
699 pushl $do_coprocessor_error
701 END(coprocessor_error)
703 ENTRY(simd_coprocessor_error)
706 #ifdef CONFIG_X86_INVD_BUG
707 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
708 ALTERNATIVE "pushl $do_general_protection", \
709 "pushl $do_simd_coprocessor_error", \
712 pushl $do_simd_coprocessor_error
715 END(simd_coprocessor_error)
717 ENTRY(device_not_available)
719 pushl $-1 # mark this as an int
720 pushl $do_device_not_available
722 END(device_not_available)
724 #ifdef CONFIG_PARAVIRT
727 _ASM_EXTABLE(native_iret, iret_exc)
730 ENTRY(native_irq_enable_sysexit)
733 END(native_irq_enable_sysexit)
757 ENTRY(coprocessor_segment_overrun)
760 pushl $do_coprocessor_segment_overrun
762 END(coprocessor_segment_overrun)
766 pushl $do_invalid_TSS
770 ENTRY(segment_not_present)
772 pushl $do_segment_not_present
774 END(segment_not_present)
778 pushl $do_stack_segment
782 ENTRY(alignment_check)
784 pushl $do_alignment_check
790 pushl $0 # no error code
791 pushl $do_divide_error
795 #ifdef CONFIG_X86_MCE
799 pushl machine_check_vector
804 ENTRY(spurious_interrupt_bug)
807 pushl $do_spurious_interrupt_bug
809 END(spurious_interrupt_bug)
812 /* Xen doesn't set %esp to be precisely what the normal sysenter
813 entrypoint expects, so fix it up before using the normal path. */
814 ENTRY(xen_sysenter_target)
815 addl $5*4, %esp /* remove xen-provided frame */
816 jmp sysenter_past_esp
818 ENTRY(xen_hypervisor_callback)
819 pushl $-1 /* orig_ax = -1 => not a system call */
823 /* Check to see if we got the event in the critical
824 region in xen_iret_direct, after we've reenabled
825 events and checked for pending events. This simulates
826 iret instruction's behaviour where it delivers a
827 pending interrupt when enabling interrupts. */
828 movl PT_EIP(%esp),%eax
829 cmpl $xen_iret_start_crit,%eax
831 cmpl $xen_iret_end_crit,%eax
834 jmp xen_iret_crit_fixup
838 call xen_evtchn_do_upcall
839 #ifndef CONFIG_PREEMPT
840 call xen_maybe_preempt_hcall
843 ENDPROC(xen_hypervisor_callback)
845 # Hypervisor uses this for application faults while it executes.
846 # We get here for two reasons:
847 # 1. Fault while reloading DS, ES, FS or GS
848 # 2. Fault while executing IRET
849 # Category 1 we fix up by reattempting the load, and zeroing the segment
850 # register if the load fails.
851 # Category 2 we fix up by jumping to do_iret_error. We cannot use the
852 # normal Linux return path in this case because if we use the IRET hypercall
853 # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
854 # We distinguish between categories by maintaining a status value in EAX.
855 ENTRY(xen_failsafe_callback)
862 /* EAX == 0 => Category 1 (Bad segment)
863 EAX != 0 => Category 2 (Bad IRET) */
869 5: pushl $-1 /* orig_ax = -1 => not a system call */
871 jmp ret_from_exception
891 ENDPROC(xen_failsafe_callback)
893 BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
894 xen_evtchn_do_upcall)
896 #endif /* CONFIG_XEN */
898 #if IS_ENABLED(CONFIG_HYPERV)
900 BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
901 hyperv_vector_handler)
903 #endif /* CONFIG_HYPERV */
905 #ifdef CONFIG_FUNCTION_TRACER
906 #ifdef CONFIG_DYNAMIC_FTRACE
916 pushl $0 /* Pass NULL as regs pointer */
919 movl function_trace_op, %ecx
920 subl $MCOUNT_INSN_SIZE, %eax
926 addl $4,%esp /* skip NULL pointer */
931 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
932 .globl ftrace_graph_call
942 ENTRY(ftrace_regs_caller)
943 pushf /* push flags before compare (in cs location) */
946 * i386 does not save SS and ESP when coming from kernel.
947 * Instead, to get sp, ®s->sp is used (see ptrace.h).
948 * Unfortunately, that means eflags must be at the same location
949 * as the current return ip is. We move the return ip into the
950 * ip location, and move flags into the return ip location.
952 pushl 4(%esp) /* save return ip into ip slot */
954 pushl $0 /* Load 0 into orig_ax */
967 movl 13*4(%esp), %eax /* Get the saved flags */
968 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
969 /* clobbering return ip */
970 movl $__KERNEL_CS,13*4(%esp)
972 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
973 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
974 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
975 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
976 pushl %esp /* Save pt_regs as 4th parameter */
978 GLOBAL(ftrace_regs_call)
981 addl $4, %esp /* Skip pt_regs */
982 movl 14*4(%esp), %eax /* Move flags back into cs */
983 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
984 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
985 movl %eax, 14*4(%esp) /* Put return ip back for ret */
998 addl $8, %esp /* Skip orig_ax and ip */
999 popf /* Pop flags at end (no addl to corrupt flags) */
1004 #else /* ! CONFIG_DYNAMIC_FTRACE */
1007 cmpl $__PAGE_OFFSET, %esp
1008 jb ftrace_stub /* Paging not enabled yet? */
1010 cmpl $ftrace_stub, ftrace_trace_function
1012 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1013 cmpl $ftrace_stub, ftrace_graph_return
1014 jnz ftrace_graph_caller
1016 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1017 jnz ftrace_graph_caller
1023 /* taken from glibc */
1028 movl 0xc(%esp), %eax
1029 movl 0x4(%ebp), %edx
1030 subl $MCOUNT_INSN_SIZE, %eax
1032 call *ftrace_trace_function
1039 #endif /* CONFIG_DYNAMIC_FTRACE */
1040 #endif /* CONFIG_FUNCTION_TRACER */
1042 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1043 ENTRY(ftrace_graph_caller)
1047 movl 0xc(%esp), %eax
1050 subl $MCOUNT_INSN_SIZE, %eax
1051 call prepare_ftrace_return
1056 END(ftrace_graph_caller)
1058 .globl return_to_handler
1063 call ftrace_return_to_handler
1070 #ifdef CONFIG_TRACING
1071 ENTRY(trace_page_fault)
1073 pushl $trace_do_page_fault
1075 END(trace_page_fault)
1080 pushl $do_page_fault
1083 /* the function address is in %gs's slot on the stack */
1095 movl $(__KERNEL_PERCPU), %ecx
1099 movl PT_GS(%esp), %edi # get the function address
1100 movl PT_ORIG_EAX(%esp), %edx # get the error code
1101 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
1104 movl $(__USER_DS), %ecx
1108 movl %esp,%eax # pt_regs pointer
1110 jmp ret_from_exception
1114 * Debug traps and NMI can happen at the one SYSENTER instruction
1115 * that sets up the real kernel stack. Check here, since we can't
1116 * allow the wrong stack to be used.
1118 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1119 * already pushed 3 words if it hits on the sysenter instruction:
1120 * eflags, cs and eip.
1122 * We just load the right stack, and push the three (known) values
1123 * by hand onto the new stack - while updating the return eip past
1124 * the instruction that would have done it for sysenter.
1126 .macro FIX_STACK offset ok label
1127 cmpw $__KERNEL_CS, 4(%esp)
1130 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1133 pushl $sysenter_past_esp
1138 cmpl $ia32_sysenter_target,(%esp)
1139 jne debug_stack_correct
1140 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1141 debug_stack_correct:
1142 pushl $-1 # mark this as an int
1145 xorl %edx,%edx # error code 0
1146 movl %esp,%eax # pt_regs pointer
1148 jmp ret_from_exception
1152 * NMI is doubly nasty. It can happen _while_ we're handling
1153 * a debug fault, and the debug fault hasn't yet been able to
1154 * clear up the stack. So we first check whether we got an
1155 * NMI on the sysenter entry path, but after that we need to
1156 * check whether we got an NMI on the debug path where the debug
1157 * fault happened on the sysenter path.
1161 #ifdef CONFIG_X86_ESPFIX32
1164 cmpw $__ESPFIX_SS, %ax
1168 cmpl $ia32_sysenter_target,(%esp)
1172 /* Do not access memory above the end of our stack page,
1173 * it might not exist.
1175 andl $(THREAD_SIZE-1),%eax
1176 cmpl $(THREAD_SIZE-20),%eax
1178 jae nmi_stack_correct
1179 cmpl $ia32_sysenter_target,12(%esp)
1180 je nmi_debug_stack_check
1184 xorl %edx,%edx # zero error code
1185 movl %esp,%eax # pt_regs pointer
1187 jmp restore_all_notrace
1190 FIX_STACK 12, nmi_stack_correct, 1
1191 jmp nmi_stack_correct
1193 nmi_debug_stack_check:
1194 cmpw $__KERNEL_CS,16(%esp)
1195 jne nmi_stack_correct
1197 jb nmi_stack_correct
1198 cmpl $debug_esp_fix_insn,(%esp)
1199 ja nmi_stack_correct
1200 FIX_STACK 24, nmi_stack_correct, 1
1201 jmp nmi_stack_correct
1203 #ifdef CONFIG_X86_ESPFIX32
1206 * create the pointer to lss back
1211 /* copy the iret frame of 12 bytes */
1217 FIXUP_ESPFIX_STACK # %eax == %esp
1218 xorl %edx,%edx # zero error code
1221 lss 12+4(%esp), %esp # back to espfix stack
1228 pushl $-1 # mark this as an int
1231 xorl %edx,%edx # zero error code
1232 movl %esp,%eax # pt_regs pointer
1234 jmp ret_from_exception
1237 ENTRY(general_protection)
1238 pushl $do_general_protection
1240 END(general_protection)
1242 #ifdef CONFIG_KVM_GUEST
1243 ENTRY(async_page_fault)
1245 pushl $do_async_page_fault
1247 END(async_page_fault)