x86/asm/entry: Untangle 'ia32_sysenter_target' into two entry points: entry_SYSENTER_...
[linux-2.6-block.git] / arch / x86 / entry / entry_32.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
889f21ce 17 * Stack layout in 'syscall_exit':
1da177e4
LT
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
464d1a78 32 * 24(%esp) - %fs
ccbeed3a
TH
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
1da177e4
LT
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
1da177e4 44#include <linux/linkage.h>
d7e7528b 45#include <linux/err.h>
1da177e4 46#include <asm/thread_info.h>
55f327fa 47#include <asm/irqflags.h>
1da177e4
LT
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
0341c14d 51#include <asm/page_types.h>
be44d2aa 52#include <asm/percpu.h>
ab68ed98 53#include <asm/processor-flags.h>
395a59d0 54#include <asm/ftrace.h>
9b7dc567 55#include <asm/irq_vectors.h>
40d2e763 56#include <asm/cpufeature.h>
b4ca46e4 57#include <asm/alternative-asm.h>
6837a54d 58#include <asm/asm.h>
e59d1b0a 59#include <asm/smap.h>
1da177e4 60
af0575bb
RM
61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
62#include <linux/elf-em.h>
63#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
64#define __AUDIT_ARCH_LE 0x40000000
65
66#ifndef CONFIG_AUDITSYSCALL
67#define sysenter_audit syscall_trace_entry
68#define sysexit_audit syscall_exit_work
69#endif
70
ea714547
JO
71 .section .entry.text, "ax"
72
139ec7c4
RR
73/*
74 * We use macros for low-level operations which need to be overridden
75 * for paravirtualization. The following will never clobber any registers:
76 * INTERRUPT_RETURN (aka. "iret")
77 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
d75cd22f 78 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
139ec7c4
RR
79 *
80 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
81 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
82 * Allowing a register to be clobbered can shrink the paravirt replacement
83 * enough to patch inline, increasing performance.
84 */
85
1da177e4 86#ifdef CONFIG_PREEMPT
139ec7c4 87#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1da177e4 88#else
139ec7c4 89#define preempt_stop(clobbers)
2e04bc76 90#define resume_kernel restore_all
1da177e4
LT
91#endif
92
55f327fa
IM
93.macro TRACE_IRQS_IRET
94#ifdef CONFIG_TRACE_IRQFLAGS
ab68ed98 95 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
55f327fa
IM
96 jz 1f
97 TRACE_IRQS_ON
981:
99#endif
100.endm
101
ccbeed3a
TH
102/*
103 * User gs save/restore
104 *
105 * %gs is used for userland TLS and kernel only uses it for stack
106 * canary which is required to be at %gs:20 by gcc. Read the comment
107 * at the top of stackprotector.h for more info.
108 *
109 * Local labels 98 and 99 are used.
110 */
111#ifdef CONFIG_X86_32_LAZY_GS
112
113 /* unfortunately push/pop can't be no-op */
114.macro PUSH_GS
131484c8 115 pushl $0
ccbeed3a
TH
116.endm
117.macro POP_GS pop=0
118 addl $(4 + \pop), %esp
ccbeed3a
TH
119.endm
120.macro POP_GS_EX
121.endm
122
123 /* all the rest are no-op */
124.macro PTGS_TO_GS
125.endm
126.macro PTGS_TO_GS_EX
127.endm
128.macro GS_TO_REG reg
129.endm
130.macro REG_TO_PTGS reg
131.endm
132.macro SET_KERNEL_GS reg
133.endm
134
135#else /* CONFIG_X86_32_LAZY_GS */
136
137.macro PUSH_GS
131484c8 138 pushl %gs
ccbeed3a
TH
139.endm
140
141.macro POP_GS pop=0
131484c8 14298: popl %gs
ccbeed3a
TH
143 .if \pop <> 0
144 add $\pop, %esp
ccbeed3a
TH
145 .endif
146.endm
147.macro POP_GS_EX
148.pushsection .fixup, "ax"
14999: movl $0, (%esp)
150 jmp 98b
ccbeed3a 151.popsection
6837a54d 152 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
153.endm
154
155.macro PTGS_TO_GS
15698: mov PT_GS(%esp), %gs
157.endm
158.macro PTGS_TO_GS_EX
159.pushsection .fixup, "ax"
16099: movl $0, PT_GS(%esp)
161 jmp 98b
ccbeed3a 162.popsection
6837a54d 163 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
164.endm
165
166.macro GS_TO_REG reg
167 movl %gs, \reg
ccbeed3a
TH
168.endm
169.macro REG_TO_PTGS reg
170 movl \reg, PT_GS(%esp)
ccbeed3a
TH
171.endm
172.macro SET_KERNEL_GS reg
60a5317f 173 movl $(__KERNEL_STACK_CANARY), \reg
ccbeed3a
TH
174 movl \reg, %gs
175.endm
176
177#endif /* CONFIG_X86_32_LAZY_GS */
178
f0d96110
TH
179.macro SAVE_ALL
180 cld
ccbeed3a 181 PUSH_GS
131484c8
IM
182 pushl %fs
183 pushl %es
184 pushl %ds
185 pushl %eax
186 pushl %ebp
187 pushl %edi
188 pushl %esi
189 pushl %edx
190 pushl %ecx
191 pushl %ebx
f0d96110
TH
192 movl $(__USER_DS), %edx
193 movl %edx, %ds
194 movl %edx, %es
195 movl $(__KERNEL_PERCPU), %edx
464d1a78 196 movl %edx, %fs
ccbeed3a 197 SET_KERNEL_GS %edx
f0d96110 198.endm
1da177e4 199
f0d96110 200.macro RESTORE_INT_REGS
131484c8
IM
201 popl %ebx
202 popl %ecx
203 popl %edx
204 popl %esi
205 popl %edi
206 popl %ebp
207 popl %eax
f0d96110 208.endm
1da177e4 209
ccbeed3a 210.macro RESTORE_REGS pop=0
f0d96110 211 RESTORE_INT_REGS
131484c8
IM
2121: popl %ds
2132: popl %es
2143: popl %fs
ccbeed3a 215 POP_GS \pop
f0d96110
TH
216.pushsection .fixup, "ax"
2174: movl $0, (%esp)
218 jmp 1b
2195: movl $0, (%esp)
220 jmp 2b
2216: movl $0, (%esp)
222 jmp 3b
f95d47ca 223.popsection
6837a54d
PA
224 _ASM_EXTABLE(1b,4b)
225 _ASM_EXTABLE(2b,5b)
226 _ASM_EXTABLE(3b,6b)
ccbeed3a 227 POP_GS_EX
f0d96110 228.endm
1da177e4 229
1da177e4 230ENTRY(ret_from_fork)
131484c8 231 pushl %eax
1da177e4
LT
232 call schedule_tail
233 GET_THREAD_INFO(%ebp)
131484c8
IM
234 popl %eax
235 pushl $0x0202 # Reset kernel eflags
236 popfl
1da177e4 237 jmp syscall_exit
47a55cd7 238END(ret_from_fork)
1da177e4 239
22e2430d 240ENTRY(ret_from_kernel_thread)
131484c8 241 pushl %eax
22e2430d 242 call schedule_tail
6783eaa2 243 GET_THREAD_INFO(%ebp)
131484c8
IM
244 popl %eax
245 pushl $0x0202 # Reset kernel eflags
246 popfl
22e2430d
AV
247 movl PT_EBP(%esp),%eax
248 call *PT_EBX(%esp)
249 movl $0,PT_EAX(%esp)
6783eaa2 250 jmp syscall_exit
22e2430d 251ENDPROC(ret_from_kernel_thread)
6783eaa2 252
1da177e4
LT
253/*
254 * Return to user mode is not as complex as all this looks,
255 * but we want the default path for a system call return to
256 * go as quickly as possible which is why some of this is
257 * less clear than it otherwise should be.
258 */
259
260 # userspace resumption stub bypassing syscall exit tracing
261 ALIGN
262ret_from_exception:
139ec7c4 263 preempt_stop(CLBR_ANY)
1da177e4
LT
264ret_from_intr:
265 GET_THREAD_INFO(%ebp)
29a2e283 266#ifdef CONFIG_VM86
eb5b7b9d
JF
267 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
268 movb PT_CS(%esp), %al
ab68ed98 269 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
29a2e283
DA
270#else
271 /*
6783eaa2 272 * We can be coming here from child spawned by kernel_thread().
29a2e283
DA
273 */
274 movl PT_CS(%esp), %eax
275 andl $SEGMENT_RPL_MASK, %eax
276#endif
78be3706
RR
277 cmpl $USER_RPL, %eax
278 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 279
1da177e4 280ENTRY(resume_userspace)
c7e872e7 281 LOCKDEP_SYS_EXIT
139ec7c4 282 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
283 # setting need_resched or sigpending
284 # between sampling and the iret
e32e58a9 285 TRACE_IRQS_OFF
1da177e4
LT
286 movl TI_flags(%ebp), %ecx
287 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
288 # int/exception return?
289 jne work_pending
290 jmp restore_all
47a55cd7 291END(ret_from_exception)
1da177e4
LT
292
293#ifdef CONFIG_PREEMPT
294ENTRY(resume_kernel)
139ec7c4 295 DISABLE_INTERRUPTS(CLBR_ANY)
1da177e4 296need_resched:
c2daa3be
PZ
297 cmpl $0,PER_CPU_VAR(__preempt_count)
298 jnz restore_all
ab68ed98 299 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
1da177e4
LT
300 jz restore_all
301 call preempt_schedule_irq
302 jmp need_resched
47a55cd7 303END(resume_kernel)
1da177e4
LT
304#endif
305
306/* SYSENTER_RETURN points to after the "sysenter" instruction in
307 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
308
309 # sysenter call handler stub
4c8cd0c5 310ENTRY(entry_SYSENTER_32)
faca6227 311 movl TSS_sysenter_sp0(%esp),%esp
1da177e4 312sysenter_past_esp:
55f327fa 313 /*
d93c870b
JF
314 * Interrupts are disabled here, but we can't trace it until
315 * enough kernel state to call TRACE_IRQS_OFF can be called - but
316 * we immediately enable interrupts at that point anyway.
55f327fa 317 */
131484c8
IM
318 pushl $__USER_DS
319 pushl %ebp
320 pushfl
d93c870b 321 orl $X86_EFLAGS_IF, (%esp)
131484c8 322 pushl $__USER_CS
e6e5494c
IM
323 /*
324 * Push current_thread_info()->sysenter_return to the stack.
ff8287f3
AL
325 * A tiny bit of offset fixup is necessary: TI_sysenter_return
326 * is relative to thread_info, which is at the bottom of the
327 * kernel stack page. 4*4 means the 4 words pushed above;
328 * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
329 * and THREAD_SIZE takes us to the bottom.
e6e5494c 330 */
131484c8 331 pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
1da177e4 332
131484c8 333 pushl %eax
d93c870b
JF
334 SAVE_ALL
335 ENABLE_INTERRUPTS(CLBR_NONE)
336
1da177e4
LT
337/*
338 * Load the potential sixth argument from user stack.
339 * Careful about security.
340 */
341 cmpl $__PAGE_OFFSET-3,%ebp
342 jae syscall_fault
e59d1b0a 343 ASM_STAC
1da177e4 3441: movl (%ebp),%ebp
e59d1b0a 345 ASM_CLAC
d93c870b 346 movl %ebp,PT_EBP(%esp)
6837a54d 347 _ASM_EXTABLE(1b,syscall_fault)
1da177e4 348
1da177e4
LT
349 GET_THREAD_INFO(%ebp)
350
88200bc2 351 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
af0575bb
RM
352 jnz sysenter_audit
353sysenter_do_call:
303395ac 354 cmpl $(NR_syscalls), %eax
554086d8 355 jae sysenter_badsys
1da177e4 356 call *sys_call_table(,%eax,4)
554086d8 357sysenter_after_call:
8142b215 358 movl %eax,PT_EAX(%esp)
c7e872e7 359 LOCKDEP_SYS_EXIT
42c24fa2 360 DISABLE_INTERRUPTS(CLBR_ANY)
55f327fa 361 TRACE_IRQS_OFF
1da177e4 362 movl TI_flags(%ebp), %ecx
88200bc2 363 testl $_TIF_ALLWORK_MASK, %ecx
54cfa458 364 jnz sysexit_audit
af0575bb 365sysenter_exit:
1da177e4 366/* if something modifies registers it must also disable sysexit */
eb5b7b9d
JF
367 movl PT_EIP(%esp), %edx
368 movl PT_OLDESP(%esp), %ecx
1da177e4 369 xorl %ebp,%ebp
55f327fa 370 TRACE_IRQS_ON
464d1a78 3711: mov PT_FS(%esp), %fs
ccbeed3a 372 PTGS_TO_GS
d75cd22f 373 ENABLE_INTERRUPTS_SYSEXIT
af0575bb
RM
374
375#ifdef CONFIG_AUDITSYSCALL
376sysenter_audit:
88200bc2 377 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
af0575bb 378 jnz syscall_trace_entry
26c2d2b3
EP
379 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
380 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
381 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
131484c8
IM
382 pushl PT_ESI(%esp) /* a3: 5th arg */
383 pushl PT_EDX+4(%esp) /* a2: 4th arg */
b05d8447 384 call __audit_syscall_entry
131484c8
IM
385 popl %ecx /* get that remapped edx off the stack */
386 popl %ecx /* get that remapped esi off the stack */
af0575bb
RM
387 movl PT_EAX(%esp),%eax /* reload syscall number */
388 jmp sysenter_do_call
389
390sysexit_audit:
88200bc2 391 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
54cfa458 392 jnz syscall_exit_work
af0575bb
RM
393 TRACE_IRQS_ON
394 ENABLE_INTERRUPTS(CLBR_ANY)
395 movl %eax,%edx /* second arg, syscall return value */
d7e7528b
EP
396 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
397 setbe %al /* 1 if so, 0 if not */
af0575bb 398 movzbl %al,%eax /* zero-extend that */
d7e7528b 399 call __audit_syscall_exit
af0575bb
RM
400 DISABLE_INTERRUPTS(CLBR_ANY)
401 TRACE_IRQS_OFF
402 movl TI_flags(%ebp), %ecx
88200bc2 403 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
54cfa458 404 jnz syscall_exit_work
af0575bb
RM
405 movl PT_EAX(%esp),%eax /* reload syscall return value */
406 jmp sysenter_exit
407#endif
408
f95d47ca 409.pushsection .fixup,"ax"
464d1a78 4102: movl $0,PT_FS(%esp)
f95d47ca 411 jmp 1b
f95d47ca 412.popsection
6837a54d 413 _ASM_EXTABLE(1b,2b)
ccbeed3a 414 PTGS_TO_GS_EX
4c8cd0c5 415ENDPROC(entry_SYSENTER_32)
1da177e4
LT
416
417 # system call handler stub
418ENTRY(system_call)
e59d1b0a 419 ASM_CLAC
131484c8 420 pushl %eax # save orig_eax
1da177e4
LT
421 SAVE_ALL
422 GET_THREAD_INFO(%ebp)
ed75e8d5 423 # system call tracing in operation / emulation
88200bc2 424 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1da177e4 425 jnz syscall_trace_entry
303395ac 426 cmpl $(NR_syscalls), %eax
1da177e4
LT
427 jae syscall_badsys
428syscall_call:
429 call *sys_call_table(,%eax,4)
8142b215 430syscall_after_call:
eb5b7b9d 431 movl %eax,PT_EAX(%esp) # store the return value
1da177e4 432syscall_exit:
c7e872e7 433 LOCKDEP_SYS_EXIT
139ec7c4 434 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
435 # setting need_resched or sigpending
436 # between sampling and the iret
55f327fa 437 TRACE_IRQS_OFF
1da177e4 438 movl TI_flags(%ebp), %ecx
88200bc2 439 testl $_TIF_ALLWORK_MASK, %ecx # current->work
54cfa458 440 jnz syscall_exit_work
1da177e4
LT
441
442restore_all:
2e04bc76
AH
443 TRACE_IRQS_IRET
444restore_all_notrace:
34273f41 445#ifdef CONFIG_X86_ESPFIX32
eb5b7b9d
JF
446 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
447 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
5df24082
SS
448 # are returning to the kernel.
449 # See comments in process.c:copy_thread() for details.
eb5b7b9d
JF
450 movb PT_OLDSS(%esp), %ah
451 movb PT_CS(%esp), %al
ab68ed98 452 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
78be3706 453 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
1da177e4 454 je ldt_ss # returning to user-space with LDT SS
34273f41 455#endif
1da177e4 456restore_nocheck:
ccbeed3a 457 RESTORE_REGS 4 # skip orig_eax/error_code
f7f3d791 458irq_return:
3701d863 459 INTERRUPT_RETURN
1da177e4 460.section .fixup,"ax"
90e9f536 461ENTRY(iret_exc)
a879cbbb
LT
462 pushl $0 # no error code
463 pushl $do_iret_error
464 jmp error_code
1da177e4 465.previous
6837a54d 466 _ASM_EXTABLE(irq_return,iret_exc)
1da177e4 467
34273f41 468#ifdef CONFIG_X86_ESPFIX32
1da177e4 469ldt_ss:
d3561b7f
RR
470#ifdef CONFIG_PARAVIRT
471 /*
472 * The kernel can't run on a non-flat stack if paravirt mode
473 * is active. Rather than try to fixup the high bits of
474 * ESP, bypass this code entirely. This may break DOSemu
475 * and/or Wine support in a paravirt VM, although the option
476 * is still available to implement the setting of the high
477 * 16-bits in the INTERRUPT_RETURN paravirt-op.
478 */
93b1eab3 479 cmpl $0, pv_info+PARAVIRT_enabled
d3561b7f
RR
480 jne restore_nocheck
481#endif
482
dc4c2a0a
AH
483/*
484 * Setup and switch to ESPFIX stack
485 *
486 * We're returning to userspace with a 16 bit stack. The CPU will not
487 * restore the high word of ESP for us on executing iret... This is an
488 * "official" bug of all the x86-compatible CPUs, which we can work
489 * around to make dosemu and wine happy. We do this by preloading the
490 * high word of ESP with the high word of the userspace ESP while
491 * compensating for the offset by changing to the ESPFIX segment with
492 * a base address that matches for the difference.
493 */
72c511dd 494#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
dc4c2a0a
AH
495 mov %esp, %edx /* load kernel esp */
496 mov PT_OLDESP(%esp), %eax /* load userspace esp */
497 mov %dx, %ax /* eax: new kernel esp */
498 sub %eax, %edx /* offset (low word is 0) */
dc4c2a0a 499 shr $16, %edx
72c511dd
BG
500 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
501 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
131484c8
IM
502 pushl $__ESPFIX_SS
503 pushl %eax /* new kernel esp */
2e04bc76
AH
504 /* Disable interrupts, but do not irqtrace this section: we
505 * will soon execute iret and the tracer was already set to
506 * the irqstate after the iret */
139ec7c4 507 DISABLE_INTERRUPTS(CLBR_EAX)
dc4c2a0a 508 lss (%esp), %esp /* switch to espfix segment */
be44d2aa 509 jmp restore_nocheck
34273f41 510#endif
47a55cd7 511ENDPROC(system_call)
1da177e4
LT
512
513 # perform work that needs to be done immediately before resumption
514 ALIGN
515work_pending:
516 testb $_TIF_NEED_RESCHED, %cl
517 jz work_notifysig
518work_resched:
519 call schedule
c7e872e7 520 LOCKDEP_SYS_EXIT
139ec7c4 521 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
522 # setting need_resched or sigpending
523 # between sampling and the iret
55f327fa 524 TRACE_IRQS_OFF
1da177e4
LT
525 movl TI_flags(%ebp), %ecx
526 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
527 # than syscall tracing?
528 jz restore_all
529 testb $_TIF_NEED_RESCHED, %cl
530 jnz work_resched
531
532work_notifysig: # deal with pending signals and
533 # notify-resume requests
74b47a78 534#ifdef CONFIG_VM86
ab68ed98 535 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
1da177e4 536 movl %esp, %eax
54cfa458 537 jnz work_notifysig_v86 # returning to kernel-space or
1da177e4 538 # vm86-space
969ae0bf
AV
5391:
540#else
541 movl %esp, %eax
542#endif
3596ff4e
SD
543 TRACE_IRQS_ON
544 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
545 movb PT_CS(%esp), %bl
546 andb $SEGMENT_RPL_MASK, %bl
547 cmpb $USER_RPL, %bl
548 jb resume_kernel
1da177e4
LT
549 xorl %edx, %edx
550 call do_notify_resume
44fbbb3d 551 jmp resume_userspace
1da177e4 552
969ae0bf 553#ifdef CONFIG_VM86
1da177e4
LT
554 ALIGN
555work_notifysig_v86:
131484c8 556 pushl %ecx # save ti_flags for do_notify_resume
1da177e4 557 call save_v86_state # %eax contains pt_regs pointer
131484c8 558 popl %ecx
1da177e4 559 movl %eax, %esp
969ae0bf 560 jmp 1b
74b47a78 561#endif
47a55cd7 562END(work_pending)
1da177e4
LT
563
564 # perform syscall exit tracing
565 ALIGN
566syscall_trace_entry:
eb5b7b9d 567 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 568 movl %esp, %eax
d4d67150
RM
569 call syscall_trace_enter
570 /* What it returned is what we'll actually use. */
303395ac 571 cmpl $(NR_syscalls), %eax
1da177e4
LT
572 jnae syscall_call
573 jmp syscall_exit
47a55cd7 574END(syscall_trace_entry)
1da177e4
LT
575
576 # perform syscall exit tracing
577 ALIGN
578syscall_exit_work:
88200bc2 579 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
1da177e4 580 jz work_pending
55f327fa 581 TRACE_IRQS_ON
d4d67150 582 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
1da177e4
LT
583 # schedule() instead
584 movl %esp, %eax
d4d67150 585 call syscall_trace_leave
1da177e4 586 jmp resume_userspace
47a55cd7 587END(syscall_exit_work)
1da177e4 588
1da177e4 589syscall_fault:
e59d1b0a 590 ASM_CLAC
1da177e4 591 GET_THREAD_INFO(%ebp)
eb5b7b9d 592 movl $-EFAULT,PT_EAX(%esp)
1da177e4 593 jmp resume_userspace
47a55cd7 594END(syscall_fault)
1da177e4 595
1da177e4 596syscall_badsys:
8142b215
SW
597 movl $-ENOSYS,%eax
598 jmp syscall_after_call
554086d8
AL
599END(syscall_badsys)
600
601sysenter_badsys:
8142b215 602 movl $-ENOSYS,%eax
554086d8 603 jmp sysenter_after_call
fb21b84e 604END(sysenter_badsys)
1da177e4 605
f0d96110 606.macro FIXUP_ESPFIX_STACK
dc4c2a0a
AH
607/*
608 * Switch back for ESPFIX stack to the normal zerobased stack
609 *
610 * We can't call C functions using the ESPFIX stack. This code reads
611 * the high word of the segment base from the GDT and swiches to the
612 * normal stack and adjusts ESP with the matching offset.
613 */
34273f41 614#ifdef CONFIG_X86_ESPFIX32
dc4c2a0a 615 /* fixup the stack */
72c511dd
BG
616 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
617 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
dc4c2a0a
AH
618 shl $16, %eax
619 addl %esp, %eax /* the adjusted stack pointer */
131484c8
IM
620 pushl $__KERNEL_DS
621 pushl %eax
dc4c2a0a 622 lss (%esp), %esp /* switch to the normal stack segment */
34273f41 623#endif
f0d96110
TH
624.endm
625.macro UNWIND_ESPFIX_STACK
34273f41 626#ifdef CONFIG_X86_ESPFIX32
f0d96110
TH
627 movl %ss, %eax
628 /* see if on espfix stack */
629 cmpw $__ESPFIX_SS, %ax
630 jne 27f
631 movl $__KERNEL_DS, %eax
632 movl %eax, %ds
633 movl %eax, %es
634 /* switch to normal stack */
635 FIXUP_ESPFIX_STACK
63627:
34273f41 637#endif
f0d96110 638.endm
1da177e4
LT
639
640/*
3304c9c3
DV
641 * Build the entry stubs with some assembler magic.
642 * We pack 1 stub into every 8-byte block.
1da177e4 643 */
3304c9c3 644 .align 8
1da177e4 645ENTRY(irq_entries_start)
3304c9c3
DV
646 vector=FIRST_EXTERNAL_VECTOR
647 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
131484c8 648 pushl $(~vector+0x80) /* Note: always in signed byte range */
3304c9c3
DV
649 vector=vector+1
650 jmp common_interrupt
3304c9c3
DV
651 .align 8
652 .endr
47a55cd7
JB
653END(irq_entries_start)
654
55f327fa
IM
655/*
656 * the CPU automatically disables interrupts when executing an IRQ vector,
657 * so IRQ-flags tracing has to follow that:
658 */
b7c6244f 659 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 660common_interrupt:
e59d1b0a 661 ASM_CLAC
b7c6244f 662 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
1da177e4 663 SAVE_ALL
55f327fa 664 TRACE_IRQS_OFF
1da177e4
LT
665 movl %esp,%eax
666 call do_IRQ
667 jmp ret_from_intr
47a55cd7 668ENDPROC(common_interrupt)
1da177e4 669
02cf94c3 670#define BUILD_INTERRUPT3(name, nr, fn) \
1da177e4 671ENTRY(name) \
e59d1b0a 672 ASM_CLAC; \
131484c8 673 pushl $~(nr); \
fe7cacc1 674 SAVE_ALL; \
55f327fa 675 TRACE_IRQS_OFF \
1da177e4 676 movl %esp,%eax; \
02cf94c3 677 call fn; \
55f327fa 678 jmp ret_from_intr; \
47a55cd7 679ENDPROC(name)
1da177e4 680
cf910e83
SA
681
682#ifdef CONFIG_TRACING
683#define TRACE_BUILD_INTERRUPT(name, nr) \
684 BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
685#else
686#define TRACE_BUILD_INTERRUPT(name, nr)
687#endif
688
689#define BUILD_INTERRUPT(name, nr) \
690 BUILD_INTERRUPT3(name, nr, smp_##name); \
691 TRACE_BUILD_INTERRUPT(name, nr)
02cf94c3 692
1da177e4 693/* The include is where all of the SMP etc. interrupts come from */
1164dd00 694#include <asm/entry_arch.h>
1da177e4 695
1da177e4 696ENTRY(coprocessor_error)
e59d1b0a 697 ASM_CLAC
131484c8
IM
698 pushl $0
699 pushl $do_coprocessor_error
1da177e4 700 jmp error_code
47a55cd7 701END(coprocessor_error)
1da177e4
LT
702
703ENTRY(simd_coprocessor_error)
e59d1b0a 704 ASM_CLAC
131484c8 705 pushl $0
40d2e763
BG
706#ifdef CONFIG_X86_INVD_BUG
707 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
131484c8 708 ALTERNATIVE "pushl $do_general_protection", \
8e65f6e0
BP
709 "pushl $do_simd_coprocessor_error", \
710 X86_FEATURE_XMM
40d2e763 711#else
131484c8 712 pushl $do_simd_coprocessor_error
40d2e763 713#endif
1da177e4 714 jmp error_code
47a55cd7 715END(simd_coprocessor_error)
1da177e4
LT
716
717ENTRY(device_not_available)
e59d1b0a 718 ASM_CLAC
131484c8
IM
719 pushl $-1 # mark this as an int
720 pushl $do_device_not_available
7643e9b9 721 jmp error_code
47a55cd7 722END(device_not_available)
1da177e4 723
d3561b7f
RR
724#ifdef CONFIG_PARAVIRT
725ENTRY(native_iret)
3701d863 726 iret
6837a54d 727 _ASM_EXTABLE(native_iret, iret_exc)
47a55cd7 728END(native_iret)
d3561b7f 729
d75cd22f 730ENTRY(native_irq_enable_sysexit)
d3561b7f
RR
731 sti
732 sysexit
d75cd22f 733END(native_irq_enable_sysexit)
d3561b7f
RR
734#endif
735
1da177e4 736ENTRY(overflow)
e59d1b0a 737 ASM_CLAC
131484c8
IM
738 pushl $0
739 pushl $do_overflow
1da177e4 740 jmp error_code
47a55cd7 741END(overflow)
1da177e4
LT
742
743ENTRY(bounds)
e59d1b0a 744 ASM_CLAC
131484c8
IM
745 pushl $0
746 pushl $do_bounds
1da177e4 747 jmp error_code
47a55cd7 748END(bounds)
1da177e4
LT
749
750ENTRY(invalid_op)
e59d1b0a 751 ASM_CLAC
131484c8
IM
752 pushl $0
753 pushl $do_invalid_op
1da177e4 754 jmp error_code
47a55cd7 755END(invalid_op)
1da177e4
LT
756
757ENTRY(coprocessor_segment_overrun)
e59d1b0a 758 ASM_CLAC
131484c8
IM
759 pushl $0
760 pushl $do_coprocessor_segment_overrun
1da177e4 761 jmp error_code
47a55cd7 762END(coprocessor_segment_overrun)
1da177e4
LT
763
764ENTRY(invalid_TSS)
e59d1b0a 765 ASM_CLAC
131484c8 766 pushl $do_invalid_TSS
1da177e4 767 jmp error_code
47a55cd7 768END(invalid_TSS)
1da177e4
LT
769
770ENTRY(segment_not_present)
e59d1b0a 771 ASM_CLAC
131484c8 772 pushl $do_segment_not_present
1da177e4 773 jmp error_code
47a55cd7 774END(segment_not_present)
1da177e4
LT
775
776ENTRY(stack_segment)
e59d1b0a 777 ASM_CLAC
131484c8 778 pushl $do_stack_segment
1da177e4 779 jmp error_code
47a55cd7 780END(stack_segment)
1da177e4 781
1da177e4 782ENTRY(alignment_check)
e59d1b0a 783 ASM_CLAC
131484c8 784 pushl $do_alignment_check
1da177e4 785 jmp error_code
47a55cd7 786END(alignment_check)
1da177e4 787
d28c4393 788ENTRY(divide_error)
e59d1b0a 789 ASM_CLAC
131484c8
IM
790 pushl $0 # no error code
791 pushl $do_divide_error
1da177e4 792 jmp error_code
47a55cd7 793END(divide_error)
1da177e4
LT
794
795#ifdef CONFIG_X86_MCE
796ENTRY(machine_check)
e59d1b0a 797 ASM_CLAC
131484c8
IM
798 pushl $0
799 pushl machine_check_vector
1da177e4 800 jmp error_code
47a55cd7 801END(machine_check)
1da177e4
LT
802#endif
803
804ENTRY(spurious_interrupt_bug)
e59d1b0a 805 ASM_CLAC
131484c8
IM
806 pushl $0
807 pushl $do_spurious_interrupt_bug
1da177e4 808 jmp error_code
47a55cd7 809END(spurious_interrupt_bug)
1da177e4 810
5ead97c8 811#ifdef CONFIG_XEN
e2a81baf
JF
812/* Xen doesn't set %esp to be precisely what the normal sysenter
813 entrypoint expects, so fix it up before using the normal path. */
814ENTRY(xen_sysenter_target)
e2a81baf
JF
815 addl $5*4, %esp /* remove xen-provided frame */
816 jmp sysenter_past_esp
817
5ead97c8 818ENTRY(xen_hypervisor_callback)
131484c8 819 pushl $-1 /* orig_ax = -1 => not a system call */
5ead97c8
JF
820 SAVE_ALL
821 TRACE_IRQS_OFF
9ec2b804
JF
822
823 /* Check to see if we got the event in the critical
824 region in xen_iret_direct, after we've reenabled
825 events and checked for pending events. This simulates
826 iret instruction's behaviour where it delivers a
827 pending interrupt when enabling interrupts. */
828 movl PT_EIP(%esp),%eax
829 cmpl $xen_iret_start_crit,%eax
830 jb 1f
831 cmpl $xen_iret_end_crit,%eax
832 jae 1f
833
0f2c8769 834 jmp xen_iret_crit_fixup
e2a81baf 835
e2a81baf 836ENTRY(xen_do_upcall)
b77797fb 8371: mov %esp, %eax
5ead97c8 838 call xen_evtchn_do_upcall
fdfd811d
DV
839#ifndef CONFIG_PREEMPT
840 call xen_maybe_preempt_hcall
841#endif
5ead97c8 842 jmp ret_from_intr
5ead97c8
JF
843ENDPROC(xen_hypervisor_callback)
844
845# Hypervisor uses this for application faults while it executes.
846# We get here for two reasons:
847# 1. Fault while reloading DS, ES, FS or GS
848# 2. Fault while executing IRET
849# Category 1 we fix up by reattempting the load, and zeroing the segment
850# register if the load fails.
851# Category 2 we fix up by jumping to do_iret_error. We cannot use the
852# normal Linux return path in this case because if we use the IRET hypercall
853# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
854# We distinguish between categories by maintaining a status value in EAX.
855ENTRY(xen_failsafe_callback)
131484c8 856 pushl %eax
5ead97c8
JF
857 movl $1,%eax
8581: mov 4(%esp),%ds
8592: mov 8(%esp),%es
8603: mov 12(%esp),%fs
8614: mov 16(%esp),%gs
a349e23d
DV
862 /* EAX == 0 => Category 1 (Bad segment)
863 EAX != 0 => Category 2 (Bad IRET) */
5ead97c8 864 testl %eax,%eax
131484c8 865 popl %eax
5ead97c8 866 lea 16(%esp),%esp
5ead97c8 867 jz 5f
a349e23d 868 jmp iret_exc
131484c8 8695: pushl $-1 /* orig_ax = -1 => not a system call */
5ead97c8
JF
870 SAVE_ALL
871 jmp ret_from_exception
5ead97c8
JF
872
873.section .fixup,"ax"
8746: xorl %eax,%eax
875 movl %eax,4(%esp)
876 jmp 1b
8777: xorl %eax,%eax
878 movl %eax,8(%esp)
879 jmp 2b
8808: xorl %eax,%eax
881 movl %eax,12(%esp)
882 jmp 3b
8839: xorl %eax,%eax
884 movl %eax,16(%esp)
885 jmp 4b
886.previous
6837a54d
PA
887 _ASM_EXTABLE(1b,6b)
888 _ASM_EXTABLE(2b,7b)
889 _ASM_EXTABLE(3b,8b)
890 _ASM_EXTABLE(4b,9b)
5ead97c8
JF
891ENDPROC(xen_failsafe_callback)
892
bc2b0331 893BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
38e20b07
SY
894 xen_evtchn_do_upcall)
895
5ead97c8 896#endif /* CONFIG_XEN */
bc2b0331
S
897
898#if IS_ENABLED(CONFIG_HYPERV)
899
900BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
901 hyperv_vector_handler)
902
903#endif /* CONFIG_HYPERV */
5ead97c8 904
606576ce 905#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
906#ifdef CONFIG_DYNAMIC_FTRACE
907
908ENTRY(mcount)
d61f82d0
SR
909 ret
910END(mcount)
911
912ENTRY(ftrace_caller)
913 pushl %eax
914 pushl %ecx
915 pushl %edx
08f6fba5
SR
916 pushl $0 /* Pass NULL as regs pointer */
917 movl 4*4(%esp), %eax
d61f82d0 918 movl 0x4(%ebp), %edx
1739f09e 919 movl function_trace_op, %ecx
395a59d0 920 subl $MCOUNT_INSN_SIZE, %eax
d61f82d0
SR
921
922.globl ftrace_call
923ftrace_call:
924 call ftrace_stub
925
08f6fba5 926 addl $4,%esp /* skip NULL pointer */
d61f82d0
SR
927 popl %edx
928 popl %ecx
929 popl %eax
4de72395 930ftrace_ret:
5a45cfe1
SR
931#ifdef CONFIG_FUNCTION_GRAPH_TRACER
932.globl ftrace_graph_call
933ftrace_graph_call:
934 jmp ftrace_stub
935#endif
d61f82d0
SR
936
937.globl ftrace_stub
938ftrace_stub:
939 ret
940END(ftrace_caller)
941
4de72395
SR
942ENTRY(ftrace_regs_caller)
943 pushf /* push flags before compare (in cs location) */
4de72395
SR
944
945 /*
946 * i386 does not save SS and ESP when coming from kernel.
947 * Instead, to get sp, &regs->sp is used (see ptrace.h).
948 * Unfortunately, that means eflags must be at the same location
949 * as the current return ip is. We move the return ip into the
950 * ip location, and move flags into the return ip location.
951 */
952 pushl 4(%esp) /* save return ip into ip slot */
4de72395
SR
953
954 pushl $0 /* Load 0 into orig_ax */
955 pushl %gs
956 pushl %fs
957 pushl %es
958 pushl %ds
959 pushl %eax
960 pushl %ebp
961 pushl %edi
962 pushl %esi
963 pushl %edx
964 pushl %ecx
965 pushl %ebx
966
967 movl 13*4(%esp), %eax /* Get the saved flags */
968 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
969 /* clobbering return ip */
970 movl $__KERNEL_CS,13*4(%esp)
971
972 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
a5e37863 973 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
e4ea3f6b 974 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1739f09e 975 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
e4ea3f6b 976 pushl %esp /* Save pt_regs as 4th parameter */
4de72395
SR
977
978GLOBAL(ftrace_regs_call)
979 call ftrace_stub
980
981 addl $4, %esp /* Skip pt_regs */
982 movl 14*4(%esp), %eax /* Move flags back into cs */
983 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
984 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
4de72395
SR
985 movl %eax, 14*4(%esp) /* Put return ip back for ret */
986
987 popl %ebx
988 popl %ecx
989 popl %edx
990 popl %esi
991 popl %edi
992 popl %ebp
993 popl %eax
994 popl %ds
995 popl %es
996 popl %fs
997 popl %gs
998 addl $8, %esp /* Skip orig_ax and ip */
999 popf /* Pop flags at end (no addl to corrupt flags) */
1000 jmp ftrace_ret
1001
4de72395
SR
1002 popf
1003 jmp ftrace_stub
d61f82d0
SR
1004#else /* ! CONFIG_DYNAMIC_FTRACE */
1005
16444a8a 1006ENTRY(mcount)
af058ab0
PA
1007 cmpl $__PAGE_OFFSET, %esp
1008 jb ftrace_stub /* Paging not enabled yet? */
1009
16444a8a
ACM
1010 cmpl $ftrace_stub, ftrace_trace_function
1011 jnz trace
fb52607a 1012#ifdef CONFIG_FUNCTION_GRAPH_TRACER
c2324b69 1013 cmpl $ftrace_stub, ftrace_graph_return
fb52607a 1014 jnz ftrace_graph_caller
e49dc19c
SR
1015
1016 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1017 jnz ftrace_graph_caller
caf4b323 1018#endif
16444a8a
ACM
1019.globl ftrace_stub
1020ftrace_stub:
1021 ret
1022
1023 /* taken from glibc */
1024trace:
1025 pushl %eax
1026 pushl %ecx
1027 pushl %edx
1028 movl 0xc(%esp), %eax
1029 movl 0x4(%ebp), %edx
395a59d0 1030 subl $MCOUNT_INSN_SIZE, %eax
16444a8a 1031
d61f82d0 1032 call *ftrace_trace_function
16444a8a
ACM
1033
1034 popl %edx
1035 popl %ecx
1036 popl %eax
16444a8a
ACM
1037 jmp ftrace_stub
1038END(mcount)
d61f82d0 1039#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 1040#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 1041
fb52607a
FW
1042#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1043ENTRY(ftrace_graph_caller)
caf4b323
FW
1044 pushl %eax
1045 pushl %ecx
1046 pushl %edx
f823b37b
SRRH
1047 movl 0xc(%esp), %eax
1048 lea 0x4(%ebp), %edx
71e308a2 1049 movl (%ebp), %ecx
f823b37b 1050 subl $MCOUNT_INSN_SIZE, %eax
caf4b323 1051 call prepare_ftrace_return
caf4b323
FW
1052 popl %edx
1053 popl %ecx
1054 popl %eax
e7d3737e 1055 ret
fb52607a 1056END(ftrace_graph_caller)
caf4b323
FW
1057
1058.globl return_to_handler
1059return_to_handler:
caf4b323 1060 pushl %eax
caf4b323 1061 pushl %edx
71e308a2 1062 movl %ebp, %eax
caf4b323 1063 call ftrace_return_to_handler
194ec341 1064 movl %eax, %ecx
caf4b323 1065 popl %edx
caf4b323 1066 popl %eax
194ec341 1067 jmp *%ecx
e7d3737e 1068#endif
16444a8a 1069
25c74b10
SA
1070#ifdef CONFIG_TRACING
1071ENTRY(trace_page_fault)
25c74b10 1072 ASM_CLAC
131484c8 1073 pushl $trace_do_page_fault
25c74b10 1074 jmp error_code
25c74b10
SA
1075END(trace_page_fault)
1076#endif
1077
d211af05 1078ENTRY(page_fault)
e59d1b0a 1079 ASM_CLAC
131484c8 1080 pushl $do_page_fault
d211af05
AH
1081 ALIGN
1082error_code:
ccbeed3a 1083 /* the function address is in %gs's slot on the stack */
131484c8
IM
1084 pushl %fs
1085 pushl %es
1086 pushl %ds
1087 pushl %eax
1088 pushl %ebp
1089 pushl %edi
1090 pushl %esi
1091 pushl %edx
1092 pushl %ecx
1093 pushl %ebx
d211af05 1094 cld
d211af05
AH
1095 movl $(__KERNEL_PERCPU), %ecx
1096 movl %ecx, %fs
1097 UNWIND_ESPFIX_STACK
ccbeed3a
TH
1098 GS_TO_REG %ecx
1099 movl PT_GS(%esp), %edi # get the function address
d211af05
AH
1100 movl PT_ORIG_EAX(%esp), %edx # get the error code
1101 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
ccbeed3a
TH
1102 REG_TO_PTGS %ecx
1103 SET_KERNEL_GS %ecx
d211af05
AH
1104 movl $(__USER_DS), %ecx
1105 movl %ecx, %ds
1106 movl %ecx, %es
1107 TRACE_IRQS_OFF
1108 movl %esp,%eax # pt_regs pointer
1109 call *%edi
1110 jmp ret_from_exception
d211af05
AH
1111END(page_fault)
1112
1113/*
1114 * Debug traps and NMI can happen at the one SYSENTER instruction
1115 * that sets up the real kernel stack. Check here, since we can't
1116 * allow the wrong stack to be used.
1117 *
1118 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1119 * already pushed 3 words if it hits on the sysenter instruction:
1120 * eflags, cs and eip.
1121 *
1122 * We just load the right stack, and push the three (known) values
1123 * by hand onto the new stack - while updating the return eip past
1124 * the instruction that would have done it for sysenter.
1125 */
f0d96110
TH
1126.macro FIX_STACK offset ok label
1127 cmpw $__KERNEL_CS, 4(%esp)
1128 jne \ok
1129\label:
1130 movl TSS_sysenter_sp0 + \offset(%esp), %esp
131484c8
IM
1131 pushfl
1132 pushl $__KERNEL_CS
1133 pushl $sysenter_past_esp
f0d96110 1134.endm
d211af05
AH
1135
1136ENTRY(debug)
e59d1b0a 1137 ASM_CLAC
4c8cd0c5 1138 cmpl $entry_SYSENTER_32,(%esp)
d211af05 1139 jne debug_stack_correct
f0d96110 1140 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
d211af05 1141debug_stack_correct:
131484c8 1142 pushl $-1 # mark this as an int
d211af05
AH
1143 SAVE_ALL
1144 TRACE_IRQS_OFF
1145 xorl %edx,%edx # error code 0
1146 movl %esp,%eax # pt_regs pointer
1147 call do_debug
1148 jmp ret_from_exception
d211af05
AH
1149END(debug)
1150
1151/*
1152 * NMI is doubly nasty. It can happen _while_ we're handling
1153 * a debug fault, and the debug fault hasn't yet been able to
1154 * clear up the stack. So we first check whether we got an
1155 * NMI on the sysenter entry path, but after that we need to
1156 * check whether we got an NMI on the debug path where the debug
1157 * fault happened on the sysenter path.
1158 */
1159ENTRY(nmi)
e59d1b0a 1160 ASM_CLAC
34273f41 1161#ifdef CONFIG_X86_ESPFIX32
131484c8 1162 pushl %eax
d211af05
AH
1163 movl %ss, %eax
1164 cmpw $__ESPFIX_SS, %ax
131484c8 1165 popl %eax
d211af05 1166 je nmi_espfix_stack
34273f41 1167#endif
4c8cd0c5 1168 cmpl $entry_SYSENTER_32,(%esp)
d211af05 1169 je nmi_stack_fixup
131484c8 1170 pushl %eax
d211af05
AH
1171 movl %esp,%eax
1172 /* Do not access memory above the end of our stack page,
1173 * it might not exist.
1174 */
1175 andl $(THREAD_SIZE-1),%eax
1176 cmpl $(THREAD_SIZE-20),%eax
131484c8 1177 popl %eax
d211af05 1178 jae nmi_stack_correct
4c8cd0c5 1179 cmpl $entry_SYSENTER_32,12(%esp)
d211af05
AH
1180 je nmi_debug_stack_check
1181nmi_stack_correct:
131484c8 1182 pushl %eax
d211af05 1183 SAVE_ALL
d211af05
AH
1184 xorl %edx,%edx # zero error code
1185 movl %esp,%eax # pt_regs pointer
1186 call do_nmi
2e04bc76 1187 jmp restore_all_notrace
d211af05
AH
1188
1189nmi_stack_fixup:
f0d96110 1190 FIX_STACK 12, nmi_stack_correct, 1
d211af05
AH
1191 jmp nmi_stack_correct
1192
1193nmi_debug_stack_check:
d211af05
AH
1194 cmpw $__KERNEL_CS,16(%esp)
1195 jne nmi_stack_correct
1196 cmpl $debug,(%esp)
1197 jb nmi_stack_correct
1198 cmpl $debug_esp_fix_insn,(%esp)
1199 ja nmi_stack_correct
f0d96110 1200 FIX_STACK 24, nmi_stack_correct, 1
d211af05
AH
1201 jmp nmi_stack_correct
1202
34273f41 1203#ifdef CONFIG_X86_ESPFIX32
d211af05 1204nmi_espfix_stack:
131484c8 1205 /*
d211af05
AH
1206 * create the pointer to lss back
1207 */
131484c8
IM
1208 pushl %ss
1209 pushl %esp
bda3a897 1210 addl $4, (%esp)
d211af05
AH
1211 /* copy the iret frame of 12 bytes */
1212 .rept 3
131484c8 1213 pushl 16(%esp)
d211af05 1214 .endr
131484c8 1215 pushl %eax
d211af05 1216 SAVE_ALL
d211af05
AH
1217 FIXUP_ESPFIX_STACK # %eax == %esp
1218 xorl %edx,%edx # zero error code
1219 call do_nmi
1220 RESTORE_REGS
1221 lss 12+4(%esp), %esp # back to espfix stack
d211af05 1222 jmp irq_return
34273f41 1223#endif
d211af05
AH
1224END(nmi)
1225
1226ENTRY(int3)
e59d1b0a 1227 ASM_CLAC
131484c8 1228 pushl $-1 # mark this as an int
d211af05
AH
1229 SAVE_ALL
1230 TRACE_IRQS_OFF
1231 xorl %edx,%edx # zero error code
1232 movl %esp,%eax # pt_regs pointer
1233 call do_int3
1234 jmp ret_from_exception
d211af05
AH
1235END(int3)
1236
1237ENTRY(general_protection)
131484c8 1238 pushl $do_general_protection
d211af05 1239 jmp error_code
d211af05
AH
1240END(general_protection)
1241
631bc487
GN
1242#ifdef CONFIG_KVM_GUEST
1243ENTRY(async_page_fault)
e59d1b0a 1244 ASM_CLAC
131484c8 1245 pushl $do_async_page_fault
631bc487 1246 jmp error_code
2ae9d293 1247END(async_page_fault)
631bc487
GN
1248#endif
1249