Merge commit '683b6c6f82a60fabf47012581c2cfbf1b037ab95' into stable/for-linus-3.15
[linux-2.6-block.git] / arch / x86 / kernel / entry_32.S
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 */
5
6/*
7 * entry.S contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all interrupts
9 * and faults that can result in a task-switch.
10 *
11 * NOTE: This code handles signal-recognition, which happens every time
12 * after a timer-interrupt and after each system call.
13 *
14 * I changed all the .align's to 4 (16 byte alignment), as that's faster
15 * on a 486.
16 *
889f21ce 17 * Stack layout in 'syscall_exit':
1da177e4
LT
18 * ptrace needs to have all regs on the stack.
19 * if the order here is changed, it needs to be
20 * updated in fork.c:copy_process, signal.c:do_signal,
21 * ptrace.c and ptrace.h
22 *
23 * 0(%esp) - %ebx
24 * 4(%esp) - %ecx
25 * 8(%esp) - %edx
26 * C(%esp) - %esi
27 * 10(%esp) - %edi
28 * 14(%esp) - %ebp
29 * 18(%esp) - %eax
30 * 1C(%esp) - %ds
31 * 20(%esp) - %es
464d1a78 32 * 24(%esp) - %fs
ccbeed3a
TH
33 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
34 * 2C(%esp) - orig_eax
35 * 30(%esp) - %eip
36 * 34(%esp) - %cs
37 * 38(%esp) - %eflags
38 * 3C(%esp) - %oldesp
39 * 40(%esp) - %oldss
1da177e4
LT
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
1da177e4 44#include <linux/linkage.h>
d7e7528b 45#include <linux/err.h>
1da177e4 46#include <asm/thread_info.h>
55f327fa 47#include <asm/irqflags.h>
1da177e4
LT
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
0341c14d 51#include <asm/page_types.h>
be44d2aa 52#include <asm/percpu.h>
fe7cacc1 53#include <asm/dwarf2.h>
ab68ed98 54#include <asm/processor-flags.h>
395a59d0 55#include <asm/ftrace.h>
9b7dc567 56#include <asm/irq_vectors.h>
40d2e763 57#include <asm/cpufeature.h>
b4ca46e4 58#include <asm/alternative-asm.h>
6837a54d 59#include <asm/asm.h>
e59d1b0a 60#include <asm/smap.h>
1da177e4 61
af0575bb
RM
62/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
63#include <linux/elf-em.h>
64#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
65#define __AUDIT_ARCH_LE 0x40000000
66
67#ifndef CONFIG_AUDITSYSCALL
68#define sysenter_audit syscall_trace_entry
69#define sysexit_audit syscall_exit_work
70#endif
71
ea714547
JO
72 .section .entry.text, "ax"
73
139ec7c4
RR
74/*
75 * We use macros for low-level operations which need to be overridden
76 * for paravirtualization. The following will never clobber any registers:
77 * INTERRUPT_RETURN (aka. "iret")
78 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
d75cd22f 79 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
139ec7c4
RR
80 *
81 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
82 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
83 * Allowing a register to be clobbered can shrink the paravirt replacement
84 * enough to patch inline, increasing performance.
85 */
86
1da177e4 87#ifdef CONFIG_PREEMPT
139ec7c4 88#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1da177e4 89#else
139ec7c4 90#define preempt_stop(clobbers)
2e04bc76 91#define resume_kernel restore_all
1da177e4
LT
92#endif
93
55f327fa
IM
94.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS
ab68ed98 96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
55f327fa
IM
97 jz 1f
98 TRACE_IRQS_ON
991:
100#endif
101.endm
102
ccbeed3a
TH
103/*
104 * User gs save/restore
105 *
106 * %gs is used for userland TLS and kernel only uses it for stack
107 * canary which is required to be at %gs:20 by gcc. Read the comment
108 * at the top of stackprotector.h for more info.
109 *
110 * Local labels 98 and 99 are used.
111 */
112#ifdef CONFIG_X86_32_LAZY_GS
113
114 /* unfortunately push/pop can't be no-op */
115.macro PUSH_GS
df5d1874 116 pushl_cfi $0
ccbeed3a
TH
117.endm
118.macro POP_GS pop=0
119 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121.endm
122.macro POP_GS_EX
123.endm
124
125 /* all the rest are no-op */
126.macro PTGS_TO_GS
127.endm
128.macro PTGS_TO_GS_EX
129.endm
130.macro GS_TO_REG reg
131.endm
132.macro REG_TO_PTGS reg
133.endm
134.macro SET_KERNEL_GS reg
135.endm
136
137#else /* CONFIG_X86_32_LAZY_GS */
138
139.macro PUSH_GS
df5d1874 140 pushl_cfi %gs
ccbeed3a
TH
141 /*CFI_REL_OFFSET gs, 0*/
142.endm
143
144.macro POP_GS pop=0
df5d1874 14598: popl_cfi %gs
ccbeed3a
TH
146 /*CFI_RESTORE gs*/
147 .if \pop <> 0
148 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif
151.endm
152.macro POP_GS_EX
153.pushsection .fixup, "ax"
15499: movl $0, (%esp)
155 jmp 98b
ccbeed3a 156.popsection
6837a54d 157 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
158.endm
159
160.macro PTGS_TO_GS
16198: mov PT_GS(%esp), %gs
162.endm
163.macro PTGS_TO_GS_EX
164.pushsection .fixup, "ax"
16599: movl $0, PT_GS(%esp)
166 jmp 98b
ccbeed3a 167.popsection
6837a54d 168 _ASM_EXTABLE(98b,99b)
ccbeed3a
TH
169.endm
170
171.macro GS_TO_REG reg
172 movl %gs, \reg
173 /*CFI_REGISTER gs, \reg*/
174.endm
175.macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp)
177 /*CFI_REL_OFFSET gs, PT_GS*/
178.endm
179.macro SET_KERNEL_GS reg
60a5317f 180 movl $(__KERNEL_STACK_CANARY), \reg
ccbeed3a
TH
181 movl \reg, %gs
182.endm
183
184#endif /* CONFIG_X86_32_LAZY_GS */
185
f0d96110
TH
186.macro SAVE_ALL
187 cld
ccbeed3a 188 PUSH_GS
df5d1874 189 pushl_cfi %fs
f0d96110 190 /*CFI_REL_OFFSET fs, 0;*/
df5d1874 191 pushl_cfi %es
f0d96110 192 /*CFI_REL_OFFSET es, 0;*/
df5d1874 193 pushl_cfi %ds
f0d96110 194 /*CFI_REL_OFFSET ds, 0;*/
df5d1874 195 pushl_cfi %eax
f0d96110 196 CFI_REL_OFFSET eax, 0
df5d1874 197 pushl_cfi %ebp
f0d96110 198 CFI_REL_OFFSET ebp, 0
df5d1874 199 pushl_cfi %edi
f0d96110 200 CFI_REL_OFFSET edi, 0
df5d1874 201 pushl_cfi %esi
f0d96110 202 CFI_REL_OFFSET esi, 0
df5d1874 203 pushl_cfi %edx
f0d96110 204 CFI_REL_OFFSET edx, 0
df5d1874 205 pushl_cfi %ecx
f0d96110 206 CFI_REL_OFFSET ecx, 0
df5d1874 207 pushl_cfi %ebx
f0d96110
TH
208 CFI_REL_OFFSET ebx, 0
209 movl $(__USER_DS), %edx
210 movl %edx, %ds
211 movl %edx, %es
212 movl $(__KERNEL_PERCPU), %edx
464d1a78 213 movl %edx, %fs
ccbeed3a 214 SET_KERNEL_GS %edx
f0d96110 215.endm
1da177e4 216
f0d96110 217.macro RESTORE_INT_REGS
df5d1874 218 popl_cfi %ebx
f0d96110 219 CFI_RESTORE ebx
df5d1874 220 popl_cfi %ecx
f0d96110 221 CFI_RESTORE ecx
df5d1874 222 popl_cfi %edx
f0d96110 223 CFI_RESTORE edx
df5d1874 224 popl_cfi %esi
f0d96110 225 CFI_RESTORE esi
df5d1874 226 popl_cfi %edi
f0d96110 227 CFI_RESTORE edi
df5d1874 228 popl_cfi %ebp
f0d96110 229 CFI_RESTORE ebp
df5d1874 230 popl_cfi %eax
fe7cacc1 231 CFI_RESTORE eax
f0d96110 232.endm
1da177e4 233
ccbeed3a 234.macro RESTORE_REGS pop=0
f0d96110 235 RESTORE_INT_REGS
df5d1874 2361: popl_cfi %ds
f0d96110 237 /*CFI_RESTORE ds;*/
df5d1874 2382: popl_cfi %es
f0d96110 239 /*CFI_RESTORE es;*/
df5d1874 2403: popl_cfi %fs
f0d96110 241 /*CFI_RESTORE fs;*/
ccbeed3a 242 POP_GS \pop
f0d96110
TH
243.pushsection .fixup, "ax"
2444: movl $0, (%esp)
245 jmp 1b
2465: movl $0, (%esp)
247 jmp 2b
2486: movl $0, (%esp)
249 jmp 3b
f95d47ca 250.popsection
6837a54d
PA
251 _ASM_EXTABLE(1b,4b)
252 _ASM_EXTABLE(2b,5b)
253 _ASM_EXTABLE(3b,6b)
ccbeed3a 254 POP_GS_EX
f0d96110 255.endm
1da177e4 256
f0d96110
TH
257.macro RING0_INT_FRAME
258 CFI_STARTPROC simple
259 CFI_SIGNAL_FRAME
260 CFI_DEF_CFA esp, 3*4
261 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 262 CFI_OFFSET eip, -3*4
f0d96110 263.endm
fe7cacc1 264
f0d96110
TH
265.macro RING0_EC_FRAME
266 CFI_STARTPROC simple
267 CFI_SIGNAL_FRAME
268 CFI_DEF_CFA esp, 4*4
269 /*CFI_OFFSET cs, -2*4;*/
fe7cacc1 270 CFI_OFFSET eip, -3*4
f0d96110 271.endm
fe7cacc1 272
f0d96110
TH
273.macro RING0_PTREGS_FRAME
274 CFI_STARTPROC simple
275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
277 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
278 CFI_OFFSET eip, PT_EIP-PT_OLDESP
279 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
280 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
281 CFI_OFFSET eax, PT_EAX-PT_OLDESP
282 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
283 CFI_OFFSET edi, PT_EDI-PT_OLDESP
284 CFI_OFFSET esi, PT_ESI-PT_OLDESP
285 CFI_OFFSET edx, PT_EDX-PT_OLDESP
286 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
eb5b7b9d 287 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
f0d96110 288.endm
1da177e4
LT
289
290ENTRY(ret_from_fork)
fe7cacc1 291 CFI_STARTPROC
df5d1874 292 pushl_cfi %eax
1da177e4
LT
293 call schedule_tail
294 GET_THREAD_INFO(%ebp)
df5d1874
JB
295 popl_cfi %eax
296 pushl_cfi $0x0202 # Reset kernel eflags
297 popfl_cfi
1da177e4 298 jmp syscall_exit
fe7cacc1 299 CFI_ENDPROC
47a55cd7 300END(ret_from_fork)
1da177e4 301
22e2430d
AV
302ENTRY(ret_from_kernel_thread)
303 CFI_STARTPROC
304 pushl_cfi %eax
305 call schedule_tail
6783eaa2 306 GET_THREAD_INFO(%ebp)
22e2430d
AV
307 popl_cfi %eax
308 pushl_cfi $0x0202 # Reset kernel eflags
309 popfl_cfi
310 movl PT_EBP(%esp),%eax
311 call *PT_EBX(%esp)
312 movl $0,PT_EAX(%esp)
6783eaa2 313 jmp syscall_exit
22e2430d
AV
314 CFI_ENDPROC
315ENDPROC(ret_from_kernel_thread)
6783eaa2 316
a00e817f
MH
317/*
318 * Interrupt exit functions should be protected against kprobes
319 */
320 .pushsection .kprobes.text, "ax"
1da177e4
LT
321/*
322 * Return to user mode is not as complex as all this looks,
323 * but we want the default path for a system call return to
324 * go as quickly as possible which is why some of this is
325 * less clear than it otherwise should be.
326 */
327
328 # userspace resumption stub bypassing syscall exit tracing
329 ALIGN
fe7cacc1 330 RING0_PTREGS_FRAME
1da177e4 331ret_from_exception:
139ec7c4 332 preempt_stop(CLBR_ANY)
1da177e4
LT
333ret_from_intr:
334 GET_THREAD_INFO(%ebp)
29a2e283 335#ifdef CONFIG_VM86
eb5b7b9d
JF
336 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
337 movb PT_CS(%esp), %al
ab68ed98 338 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
29a2e283
DA
339#else
340 /*
6783eaa2 341 * We can be coming here from child spawned by kernel_thread().
29a2e283
DA
342 */
343 movl PT_CS(%esp), %eax
344 andl $SEGMENT_RPL_MASK, %eax
345#endif
78be3706
RR
346 cmpl $USER_RPL, %eax
347 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 348
1da177e4 349ENTRY(resume_userspace)
c7e872e7 350 LOCKDEP_SYS_EXIT
139ec7c4 351 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
352 # setting need_resched or sigpending
353 # between sampling and the iret
e32e58a9 354 TRACE_IRQS_OFF
1da177e4
LT
355 movl TI_flags(%ebp), %ecx
356 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
357 # int/exception return?
358 jne work_pending
359 jmp restore_all
47a55cd7 360END(ret_from_exception)
1da177e4
LT
361
362#ifdef CONFIG_PREEMPT
363ENTRY(resume_kernel)
139ec7c4 364 DISABLE_INTERRUPTS(CLBR_ANY)
1da177e4 365need_resched:
c2daa3be
PZ
366 cmpl $0,PER_CPU_VAR(__preempt_count)
367 jnz restore_all
ab68ed98 368 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
1da177e4
LT
369 jz restore_all
370 call preempt_schedule_irq
371 jmp need_resched
47a55cd7 372END(resume_kernel)
1da177e4 373#endif
fe7cacc1 374 CFI_ENDPROC
a00e817f
MH
375/*
376 * End of kprobes section
377 */
378 .popsection
1da177e4
LT
379
380/* SYSENTER_RETURN points to after the "sysenter" instruction in
381 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
382
383 # sysenter call handler stub
0aa97fb2 384ENTRY(ia32_sysenter_target)
fe7cacc1 385 CFI_STARTPROC simple
adf14236 386 CFI_SIGNAL_FRAME
fe7cacc1
JB
387 CFI_DEF_CFA esp, 0
388 CFI_REGISTER esp, ebp
faca6227 389 movl TSS_sysenter_sp0(%esp),%esp
1da177e4 390sysenter_past_esp:
55f327fa 391 /*
d93c870b
JF
392 * Interrupts are disabled here, but we can't trace it until
393 * enough kernel state to call TRACE_IRQS_OFF can be called - but
394 * we immediately enable interrupts at that point anyway.
55f327fa 395 */
3234282f 396 pushl_cfi $__USER_DS
fe7cacc1 397 /*CFI_REL_OFFSET ss, 0*/
df5d1874 398 pushl_cfi %ebp
fe7cacc1 399 CFI_REL_OFFSET esp, 0
df5d1874 400 pushfl_cfi
d93c870b 401 orl $X86_EFLAGS_IF, (%esp)
3234282f 402 pushl_cfi $__USER_CS
fe7cacc1 403 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
404 /*
405 * Push current_thread_info()->sysenter_return to the stack.
406 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
407 * pushed above; +8 corresponds to copy_thread's esp0 setting.
408 */
7bf04be8 409 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1 410 CFI_REL_OFFSET eip, 0
1da177e4 411
df5d1874 412 pushl_cfi %eax
d93c870b
JF
413 SAVE_ALL
414 ENABLE_INTERRUPTS(CLBR_NONE)
415
1da177e4
LT
416/*
417 * Load the potential sixth argument from user stack.
418 * Careful about security.
419 */
420 cmpl $__PAGE_OFFSET-3,%ebp
421 jae syscall_fault
e59d1b0a 422 ASM_STAC
1da177e4 4231: movl (%ebp),%ebp
e59d1b0a 424 ASM_CLAC
d93c870b 425 movl %ebp,PT_EBP(%esp)
6837a54d 426 _ASM_EXTABLE(1b,syscall_fault)
1da177e4 427
1da177e4
LT
428 GET_THREAD_INFO(%ebp)
429
88200bc2 430 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
af0575bb
RM
431 jnz sysenter_audit
432sysenter_do_call:
303395ac 433 cmpl $(NR_syscalls), %eax
1da177e4
LT
434 jae syscall_badsys
435 call *sys_call_table(,%eax,4)
eb5b7b9d 436 movl %eax,PT_EAX(%esp)
c7e872e7 437 LOCKDEP_SYS_EXIT
42c24fa2 438 DISABLE_INTERRUPTS(CLBR_ANY)
55f327fa 439 TRACE_IRQS_OFF
1da177e4 440 movl TI_flags(%ebp), %ecx
88200bc2 441 testl $_TIF_ALLWORK_MASK, %ecx
af0575bb
RM
442 jne sysexit_audit
443sysenter_exit:
1da177e4 444/* if something modifies registers it must also disable sysexit */
eb5b7b9d
JF
445 movl PT_EIP(%esp), %edx
446 movl PT_OLDESP(%esp), %ecx
1da177e4 447 xorl %ebp,%ebp
55f327fa 448 TRACE_IRQS_ON
464d1a78 4491: mov PT_FS(%esp), %fs
ccbeed3a 450 PTGS_TO_GS
d75cd22f 451 ENABLE_INTERRUPTS_SYSEXIT
af0575bb
RM
452
453#ifdef CONFIG_AUDITSYSCALL
454sysenter_audit:
88200bc2 455 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
af0575bb
RM
456 jnz syscall_trace_entry
457 addl $4,%esp
458 CFI_ADJUST_CFA_OFFSET -4
459 /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
460 /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
461 /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
462 movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
463 movl %eax,%edx /* 2nd arg: syscall number */
464 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
b05d8447 465 call __audit_syscall_entry
df5d1874 466 pushl_cfi %ebx
af0575bb
RM
467 movl PT_EAX(%esp),%eax /* reload syscall number */
468 jmp sysenter_do_call
469
470sysexit_audit:
88200bc2 471 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
472 jne syscall_exit_work
473 TRACE_IRQS_ON
474 ENABLE_INTERRUPTS(CLBR_ANY)
475 movl %eax,%edx /* second arg, syscall return value */
d7e7528b
EP
476 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
477 setbe %al /* 1 if so, 0 if not */
af0575bb 478 movzbl %al,%eax /* zero-extend that */
d7e7528b 479 call __audit_syscall_exit
af0575bb
RM
480 DISABLE_INTERRUPTS(CLBR_ANY)
481 TRACE_IRQS_OFF
482 movl TI_flags(%ebp), %ecx
88200bc2 483 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
af0575bb
RM
484 jne syscall_exit_work
485 movl PT_EAX(%esp),%eax /* reload syscall return value */
486 jmp sysenter_exit
487#endif
488
fe7cacc1 489 CFI_ENDPROC
f95d47ca 490.pushsection .fixup,"ax"
464d1a78 4912: movl $0,PT_FS(%esp)
f95d47ca 492 jmp 1b
f95d47ca 493.popsection
6837a54d 494 _ASM_EXTABLE(1b,2b)
ccbeed3a 495 PTGS_TO_GS_EX
0aa97fb2 496ENDPROC(ia32_sysenter_target)
1da177e4 497
a00e817f
MH
498/*
499 * syscall stub including irq exit should be protected against kprobes
500 */
501 .pushsection .kprobes.text, "ax"
1da177e4
LT
502 # system call handler stub
503ENTRY(system_call)
fe7cacc1 504 RING0_INT_FRAME # can't unwind into user space anyway
e59d1b0a 505 ASM_CLAC
df5d1874 506 pushl_cfi %eax # save orig_eax
1da177e4
LT
507 SAVE_ALL
508 GET_THREAD_INFO(%ebp)
ed75e8d5 509 # system call tracing in operation / emulation
88200bc2 510 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
1da177e4 511 jnz syscall_trace_entry
303395ac 512 cmpl $(NR_syscalls), %eax
1da177e4
LT
513 jae syscall_badsys
514syscall_call:
515 call *sys_call_table(,%eax,4)
eb5b7b9d 516 movl %eax,PT_EAX(%esp) # store the return value
1da177e4 517syscall_exit:
c7e872e7 518 LOCKDEP_SYS_EXIT
139ec7c4 519 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
520 # setting need_resched or sigpending
521 # between sampling and the iret
55f327fa 522 TRACE_IRQS_OFF
1da177e4 523 movl TI_flags(%ebp), %ecx
88200bc2 524 testl $_TIF_ALLWORK_MASK, %ecx # current->work
1da177e4
LT
525 jne syscall_exit_work
526
527restore_all:
2e04bc76
AH
528 TRACE_IRQS_IRET
529restore_all_notrace:
eb5b7b9d
JF
530 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
531 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
5df24082
SS
532 # are returning to the kernel.
533 # See comments in process.c:copy_thread() for details.
eb5b7b9d
JF
534 movb PT_OLDSS(%esp), %ah
535 movb PT_CS(%esp), %al
ab68ed98 536 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
78be3706 537 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 538 CFI_REMEMBER_STATE
1da177e4
LT
539 je ldt_ss # returning to user-space with LDT SS
540restore_nocheck:
ccbeed3a 541 RESTORE_REGS 4 # skip orig_eax/error_code
f7f3d791 542irq_return:
3701d863 543 INTERRUPT_RETURN
1da177e4 544.section .fixup,"ax"
90e9f536 545ENTRY(iret_exc)
a879cbbb
LT
546 pushl $0 # no error code
547 pushl $do_iret_error
548 jmp error_code
1da177e4 549.previous
6837a54d 550 _ASM_EXTABLE(irq_return,iret_exc)
1da177e4 551
fe7cacc1 552 CFI_RESTORE_STATE
1da177e4 553ldt_ss:
eb5b7b9d 554 larl PT_OLDSS(%esp), %eax
1da177e4
LT
555 jnz restore_nocheck
556 testl $0x00400000, %eax # returning to 32bit stack?
557 jnz restore_nocheck # allright, normal return
d3561b7f
RR
558
559#ifdef CONFIG_PARAVIRT
560 /*
561 * The kernel can't run on a non-flat stack if paravirt mode
562 * is active. Rather than try to fixup the high bits of
563 * ESP, bypass this code entirely. This may break DOSemu
564 * and/or Wine support in a paravirt VM, although the option
565 * is still available to implement the setting of the high
566 * 16-bits in the INTERRUPT_RETURN paravirt-op.
567 */
93b1eab3 568 cmpl $0, pv_info+PARAVIRT_enabled
d3561b7f
RR
569 jne restore_nocheck
570#endif
571
dc4c2a0a
AH
572/*
573 * Setup and switch to ESPFIX stack
574 *
575 * We're returning to userspace with a 16 bit stack. The CPU will not
576 * restore the high word of ESP for us on executing iret... This is an
577 * "official" bug of all the x86-compatible CPUs, which we can work
578 * around to make dosemu and wine happy. We do this by preloading the
579 * high word of ESP with the high word of the userspace ESP while
580 * compensating for the offset by changing to the ESPFIX segment with
581 * a base address that matches for the difference.
582 */
72c511dd 583#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
dc4c2a0a
AH
584 mov %esp, %edx /* load kernel esp */
585 mov PT_OLDESP(%esp), %eax /* load userspace esp */
586 mov %dx, %ax /* eax: new kernel esp */
587 sub %eax, %edx /* offset (low word is 0) */
dc4c2a0a 588 shr $16, %edx
72c511dd
BG
589 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
590 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
df5d1874
JB
591 pushl_cfi $__ESPFIX_SS
592 pushl_cfi %eax /* new kernel esp */
2e04bc76
AH
593 /* Disable interrupts, but do not irqtrace this section: we
594 * will soon execute iret and the tracer was already set to
595 * the irqstate after the iret */
139ec7c4 596 DISABLE_INTERRUPTS(CLBR_EAX)
dc4c2a0a 597 lss (%esp), %esp /* switch to espfix segment */
be44d2aa
SS
598 CFI_ADJUST_CFA_OFFSET -8
599 jmp restore_nocheck
fe7cacc1 600 CFI_ENDPROC
47a55cd7 601ENDPROC(system_call)
1da177e4
LT
602
603 # perform work that needs to be done immediately before resumption
604 ALIGN
fe7cacc1 605 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
606work_pending:
607 testb $_TIF_NEED_RESCHED, %cl
608 jz work_notifysig
609work_resched:
610 call schedule
c7e872e7 611 LOCKDEP_SYS_EXIT
139ec7c4 612 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
1da177e4
LT
613 # setting need_resched or sigpending
614 # between sampling and the iret
55f327fa 615 TRACE_IRQS_OFF
1da177e4
LT
616 movl TI_flags(%ebp), %ecx
617 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
618 # than syscall tracing?
619 jz restore_all
620 testb $_TIF_NEED_RESCHED, %cl
621 jnz work_resched
622
623work_notifysig: # deal with pending signals and
624 # notify-resume requests
74b47a78 625#ifdef CONFIG_VM86
ab68ed98 626 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
1da177e4
LT
627 movl %esp, %eax
628 jne work_notifysig_v86 # returning to kernel-space or
629 # vm86-space
969ae0bf
AV
6301:
631#else
632 movl %esp, %eax
633#endif
3596ff4e
SD
634 TRACE_IRQS_ON
635 ENABLE_INTERRUPTS(CLBR_NONE)
44fbbb3d
AV
636 movb PT_CS(%esp), %bl
637 andb $SEGMENT_RPL_MASK, %bl
638 cmpb $USER_RPL, %bl
639 jb resume_kernel
1da177e4
LT
640 xorl %edx, %edx
641 call do_notify_resume
44fbbb3d 642 jmp resume_userspace
1da177e4 643
969ae0bf 644#ifdef CONFIG_VM86
1da177e4
LT
645 ALIGN
646work_notifysig_v86:
df5d1874 647 pushl_cfi %ecx # save ti_flags for do_notify_resume
1da177e4 648 call save_v86_state # %eax contains pt_regs pointer
df5d1874 649 popl_cfi %ecx
1da177e4 650 movl %eax, %esp
969ae0bf 651 jmp 1b
74b47a78 652#endif
47a55cd7 653END(work_pending)
1da177e4
LT
654
655 # perform syscall exit tracing
656 ALIGN
657syscall_trace_entry:
eb5b7b9d 658 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 659 movl %esp, %eax
d4d67150
RM
660 call syscall_trace_enter
661 /* What it returned is what we'll actually use. */
303395ac 662 cmpl $(NR_syscalls), %eax
1da177e4
LT
663 jnae syscall_call
664 jmp syscall_exit
47a55cd7 665END(syscall_trace_entry)
1da177e4
LT
666
667 # perform syscall exit tracing
668 ALIGN
669syscall_exit_work:
88200bc2 670 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
1da177e4 671 jz work_pending
55f327fa 672 TRACE_IRQS_ON
d4d67150 673 ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
1da177e4
LT
674 # schedule() instead
675 movl %esp, %eax
d4d67150 676 call syscall_trace_leave
1da177e4 677 jmp resume_userspace
47a55cd7 678END(syscall_exit_work)
fe7cacc1 679 CFI_ENDPROC
1da177e4 680
fe7cacc1 681 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 682syscall_fault:
e59d1b0a 683 ASM_CLAC
1da177e4 684 GET_THREAD_INFO(%ebp)
eb5b7b9d 685 movl $-EFAULT,PT_EAX(%esp)
1da177e4 686 jmp resume_userspace
47a55cd7 687END(syscall_fault)
1da177e4 688
1da177e4 689syscall_badsys:
eb5b7b9d 690 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 691 jmp resume_userspace
47a55cd7 692END(syscall_badsys)
fe7cacc1 693 CFI_ENDPROC
a00e817f
MH
694/*
695 * End of kprobes section
696 */
697 .popsection
1da177e4 698
f0d96110 699.macro FIXUP_ESPFIX_STACK
dc4c2a0a
AH
700/*
701 * Switch back for ESPFIX stack to the normal zerobased stack
702 *
703 * We can't call C functions using the ESPFIX stack. This code reads
704 * the high word of the segment base from the GDT and swiches to the
705 * normal stack and adjusts ESP with the matching offset.
706 */
707 /* fixup the stack */
72c511dd
BG
708 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
709 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
dc4c2a0a
AH
710 shl $16, %eax
711 addl %esp, %eax /* the adjusted stack pointer */
df5d1874
JB
712 pushl_cfi $__KERNEL_DS
713 pushl_cfi %eax
dc4c2a0a 714 lss (%esp), %esp /* switch to the normal stack segment */
f0d96110
TH
715 CFI_ADJUST_CFA_OFFSET -8
716.endm
717.macro UNWIND_ESPFIX_STACK
718 movl %ss, %eax
719 /* see if on espfix stack */
720 cmpw $__ESPFIX_SS, %ax
721 jne 27f
722 movl $__KERNEL_DS, %eax
723 movl %eax, %ds
724 movl %eax, %es
725 /* switch to normal stack */
726 FIXUP_ESPFIX_STACK
72727:
728.endm
1da177e4
LT
729
730/*
b7c6244f
PA
731 * Build the entry stubs and pointer table with some assembler magic.
732 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
733 * single cache line on all modern x86 implementations.
1da177e4 734 */
4687518c 735.section .init.rodata,"a"
1da177e4 736ENTRY(interrupt)
ea714547 737.section .entry.text, "ax"
b7c6244f
PA
738 .p2align 5
739 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 740ENTRY(irq_entries_start)
fe7cacc1 741 RING0_INT_FRAME
4687518c 742vector=FIRST_EXTERNAL_VECTOR
b7c6244f
PA
743.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
744 .balign 32
745 .rept 7
746 .if vector < NR_VECTORS
8665596e 747 .if vector <> FIRST_EXTERNAL_VECTOR
fe7cacc1 748 CFI_ADJUST_CFA_OFFSET -4
b7c6244f 749 .endif
df5d1874 7501: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
8665596e 751 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
b7c6244f
PA
752 jmp 2f
753 .endif
754 .previous
1da177e4 755 .long 1b
ea714547 756 .section .entry.text, "ax"
1da177e4 757vector=vector+1
b7c6244f
PA
758 .endif
759 .endr
7602: jmp common_interrupt
1da177e4 761.endr
47a55cd7
JB
762END(irq_entries_start)
763
764.previous
765END(interrupt)
766.previous
1da177e4 767
55f327fa
IM
768/*
769 * the CPU automatically disables interrupts when executing an IRQ vector,
770 * so IRQ-flags tracing has to follow that:
771 */
b7c6244f 772 .p2align CONFIG_X86_L1_CACHE_SHIFT
1da177e4 773common_interrupt:
e59d1b0a 774 ASM_CLAC
b7c6244f 775 addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
1da177e4 776 SAVE_ALL
55f327fa 777 TRACE_IRQS_OFF
1da177e4
LT
778 movl %esp,%eax
779 call do_IRQ
780 jmp ret_from_intr
47a55cd7 781ENDPROC(common_interrupt)
fe7cacc1 782 CFI_ENDPROC
1da177e4 783
a00e817f
MH
784/*
785 * Irq entries should be protected against kprobes
786 */
787 .pushsection .kprobes.text, "ax"
02cf94c3 788#define BUILD_INTERRUPT3(name, nr, fn) \
1da177e4 789ENTRY(name) \
fe7cacc1 790 RING0_INT_FRAME; \
e59d1b0a 791 ASM_CLAC; \
df5d1874 792 pushl_cfi $~(nr); \
fe7cacc1 793 SAVE_ALL; \
55f327fa 794 TRACE_IRQS_OFF \
1da177e4 795 movl %esp,%eax; \
02cf94c3 796 call fn; \
55f327fa 797 jmp ret_from_intr; \
47a55cd7
JB
798 CFI_ENDPROC; \
799ENDPROC(name)
1da177e4 800
cf910e83
SA
801
802#ifdef CONFIG_TRACING
803#define TRACE_BUILD_INTERRUPT(name, nr) \
804 BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
805#else
806#define TRACE_BUILD_INTERRUPT(name, nr)
807#endif
808
809#define BUILD_INTERRUPT(name, nr) \
810 BUILD_INTERRUPT3(name, nr, smp_##name); \
811 TRACE_BUILD_INTERRUPT(name, nr)
02cf94c3 812
1da177e4 813/* The include is where all of the SMP etc. interrupts come from */
1164dd00 814#include <asm/entry_arch.h>
1da177e4 815
1da177e4 816ENTRY(coprocessor_error)
fe7cacc1 817 RING0_INT_FRAME
e59d1b0a 818 ASM_CLAC
df5d1874
JB
819 pushl_cfi $0
820 pushl_cfi $do_coprocessor_error
1da177e4 821 jmp error_code
fe7cacc1 822 CFI_ENDPROC
47a55cd7 823END(coprocessor_error)
1da177e4
LT
824
825ENTRY(simd_coprocessor_error)
fe7cacc1 826 RING0_INT_FRAME
e59d1b0a 827 ASM_CLAC
df5d1874 828 pushl_cfi $0
40d2e763
BG
829#ifdef CONFIG_X86_INVD_BUG
830 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
df5d1874 831661: pushl_cfi $do_general_protection
40d2e763
BG
832662:
833.section .altinstructions,"a"
b4ca46e4 834 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
40d2e763
BG
835.previous
836.section .altinstr_replacement,"ax"
837663: pushl $do_simd_coprocessor_error
838664:
839.previous
840#else
df5d1874 841 pushl_cfi $do_simd_coprocessor_error
40d2e763 842#endif
1da177e4 843 jmp error_code
fe7cacc1 844 CFI_ENDPROC
47a55cd7 845END(simd_coprocessor_error)
1da177e4
LT
846
847ENTRY(device_not_available)
fe7cacc1 848 RING0_INT_FRAME
e59d1b0a 849 ASM_CLAC
df5d1874
JB
850 pushl_cfi $-1 # mark this as an int
851 pushl_cfi $do_device_not_available
7643e9b9 852 jmp error_code
fe7cacc1 853 CFI_ENDPROC
47a55cd7 854END(device_not_available)
1da177e4 855
d3561b7f
RR
856#ifdef CONFIG_PARAVIRT
857ENTRY(native_iret)
3701d863 858 iret
6837a54d 859 _ASM_EXTABLE(native_iret, iret_exc)
47a55cd7 860END(native_iret)
d3561b7f 861
d75cd22f 862ENTRY(native_irq_enable_sysexit)
d3561b7f
RR
863 sti
864 sysexit
d75cd22f 865END(native_irq_enable_sysexit)
d3561b7f
RR
866#endif
867
1da177e4 868ENTRY(overflow)
fe7cacc1 869 RING0_INT_FRAME
e59d1b0a 870 ASM_CLAC
df5d1874
JB
871 pushl_cfi $0
872 pushl_cfi $do_overflow
1da177e4 873 jmp error_code
fe7cacc1 874 CFI_ENDPROC
47a55cd7 875END(overflow)
1da177e4
LT
876
877ENTRY(bounds)
fe7cacc1 878 RING0_INT_FRAME
e59d1b0a 879 ASM_CLAC
df5d1874
JB
880 pushl_cfi $0
881 pushl_cfi $do_bounds
1da177e4 882 jmp error_code
fe7cacc1 883 CFI_ENDPROC
47a55cd7 884END(bounds)
1da177e4
LT
885
886ENTRY(invalid_op)
fe7cacc1 887 RING0_INT_FRAME
e59d1b0a 888 ASM_CLAC
df5d1874
JB
889 pushl_cfi $0
890 pushl_cfi $do_invalid_op
1da177e4 891 jmp error_code
fe7cacc1 892 CFI_ENDPROC
47a55cd7 893END(invalid_op)
1da177e4
LT
894
895ENTRY(coprocessor_segment_overrun)
fe7cacc1 896 RING0_INT_FRAME
e59d1b0a 897 ASM_CLAC
df5d1874
JB
898 pushl_cfi $0
899 pushl_cfi $do_coprocessor_segment_overrun
1da177e4 900 jmp error_code
fe7cacc1 901 CFI_ENDPROC
47a55cd7 902END(coprocessor_segment_overrun)
1da177e4
LT
903
904ENTRY(invalid_TSS)
fe7cacc1 905 RING0_EC_FRAME
e59d1b0a 906 ASM_CLAC
df5d1874 907 pushl_cfi $do_invalid_TSS
1da177e4 908 jmp error_code
fe7cacc1 909 CFI_ENDPROC
47a55cd7 910END(invalid_TSS)
1da177e4
LT
911
912ENTRY(segment_not_present)
fe7cacc1 913 RING0_EC_FRAME
e59d1b0a 914 ASM_CLAC
df5d1874 915 pushl_cfi $do_segment_not_present
1da177e4 916 jmp error_code
fe7cacc1 917 CFI_ENDPROC
47a55cd7 918END(segment_not_present)
1da177e4
LT
919
920ENTRY(stack_segment)
fe7cacc1 921 RING0_EC_FRAME
e59d1b0a 922 ASM_CLAC
df5d1874 923 pushl_cfi $do_stack_segment
1da177e4 924 jmp error_code
fe7cacc1 925 CFI_ENDPROC
47a55cd7 926END(stack_segment)
1da177e4 927
1da177e4 928ENTRY(alignment_check)
fe7cacc1 929 RING0_EC_FRAME
e59d1b0a 930 ASM_CLAC
df5d1874 931 pushl_cfi $do_alignment_check
1da177e4 932 jmp error_code
fe7cacc1 933 CFI_ENDPROC
47a55cd7 934END(alignment_check)
1da177e4 935
d28c4393
P
936ENTRY(divide_error)
937 RING0_INT_FRAME
e59d1b0a 938 ASM_CLAC
df5d1874
JB
939 pushl_cfi $0 # no error code
940 pushl_cfi $do_divide_error
1da177e4 941 jmp error_code
fe7cacc1 942 CFI_ENDPROC
47a55cd7 943END(divide_error)
1da177e4
LT
944
945#ifdef CONFIG_X86_MCE
946ENTRY(machine_check)
fe7cacc1 947 RING0_INT_FRAME
e59d1b0a 948 ASM_CLAC
df5d1874
JB
949 pushl_cfi $0
950 pushl_cfi machine_check_vector
1da177e4 951 jmp error_code
fe7cacc1 952 CFI_ENDPROC
47a55cd7 953END(machine_check)
1da177e4
LT
954#endif
955
956ENTRY(spurious_interrupt_bug)
fe7cacc1 957 RING0_INT_FRAME
e59d1b0a 958 ASM_CLAC
df5d1874
JB
959 pushl_cfi $0
960 pushl_cfi $do_spurious_interrupt_bug
1da177e4 961 jmp error_code
fe7cacc1 962 CFI_ENDPROC
47a55cd7 963END(spurious_interrupt_bug)
a00e817f
MH
964/*
965 * End of kprobes section
966 */
967 .popsection
1da177e4 968
5ead97c8 969#ifdef CONFIG_XEN
e2a81baf
JF
970/* Xen doesn't set %esp to be precisely what the normal sysenter
971 entrypoint expects, so fix it up before using the normal path. */
972ENTRY(xen_sysenter_target)
973 RING0_INT_FRAME
974 addl $5*4, %esp /* remove xen-provided frame */
2ddf9b7b 975 CFI_ADJUST_CFA_OFFSET -5*4
e2a81baf 976 jmp sysenter_past_esp
557d7d4e 977 CFI_ENDPROC
e2a81baf 978
5ead97c8
JF
979ENTRY(xen_hypervisor_callback)
980 CFI_STARTPROC
a349e23d 981 pushl_cfi $-1 /* orig_ax = -1 => not a system call */
5ead97c8
JF
982 SAVE_ALL
983 TRACE_IRQS_OFF
9ec2b804
JF
984
985 /* Check to see if we got the event in the critical
986 region in xen_iret_direct, after we've reenabled
987 events and checked for pending events. This simulates
988 iret instruction's behaviour where it delivers a
989 pending interrupt when enabling interrupts. */
990 movl PT_EIP(%esp),%eax
991 cmpl $xen_iret_start_crit,%eax
992 jb 1f
993 cmpl $xen_iret_end_crit,%eax
994 jae 1f
995
0f2c8769 996 jmp xen_iret_crit_fixup
e2a81baf 997
e2a81baf 998ENTRY(xen_do_upcall)
b77797fb 9991: mov %esp, %eax
5ead97c8
JF
1000 call xen_evtchn_do_upcall
1001 jmp ret_from_intr
1002 CFI_ENDPROC
1003ENDPROC(xen_hypervisor_callback)
1004
1005# Hypervisor uses this for application faults while it executes.
1006# We get here for two reasons:
1007# 1. Fault while reloading DS, ES, FS or GS
1008# 2. Fault while executing IRET
1009# Category 1 we fix up by reattempting the load, and zeroing the segment
1010# register if the load fails.
1011# Category 2 we fix up by jumping to do_iret_error. We cannot use the
1012# normal Linux return path in this case because if we use the IRET hypercall
1013# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1014# We distinguish between categories by maintaining a status value in EAX.
1015ENTRY(xen_failsafe_callback)
1016 CFI_STARTPROC
df5d1874 1017 pushl_cfi %eax
5ead97c8
JF
1018 movl $1,%eax
10191: mov 4(%esp),%ds
10202: mov 8(%esp),%es
10213: mov 12(%esp),%fs
10224: mov 16(%esp),%gs
a349e23d
DV
1023 /* EAX == 0 => Category 1 (Bad segment)
1024 EAX != 0 => Category 2 (Bad IRET) */
5ead97c8 1025 testl %eax,%eax
df5d1874 1026 popl_cfi %eax
5ead97c8
JF
1027 lea 16(%esp),%esp
1028 CFI_ADJUST_CFA_OFFSET -16
1029 jz 5f
a349e23d
DV
1030 jmp iret_exc
10315: pushl_cfi $-1 /* orig_ax = -1 => not a system call */
5ead97c8
JF
1032 SAVE_ALL
1033 jmp ret_from_exception
1034 CFI_ENDPROC
1035
1036.section .fixup,"ax"
10376: xorl %eax,%eax
1038 movl %eax,4(%esp)
1039 jmp 1b
10407: xorl %eax,%eax
1041 movl %eax,8(%esp)
1042 jmp 2b
10438: xorl %eax,%eax
1044 movl %eax,12(%esp)
1045 jmp 3b
10469: xorl %eax,%eax
1047 movl %eax,16(%esp)
1048 jmp 4b
1049.previous
6837a54d
PA
1050 _ASM_EXTABLE(1b,6b)
1051 _ASM_EXTABLE(2b,7b)
1052 _ASM_EXTABLE(3b,8b)
1053 _ASM_EXTABLE(4b,9b)
5ead97c8
JF
1054ENDPROC(xen_failsafe_callback)
1055
bc2b0331 1056BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
38e20b07
SY
1057 xen_evtchn_do_upcall)
1058
5ead97c8 1059#endif /* CONFIG_XEN */
bc2b0331
S
1060
1061#if IS_ENABLED(CONFIG_HYPERV)
1062
1063BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1064 hyperv_vector_handler)
1065
1066#endif /* CONFIG_HYPERV */
5ead97c8 1067
606576ce 1068#ifdef CONFIG_FUNCTION_TRACER
d61f82d0
SR
1069#ifdef CONFIG_DYNAMIC_FTRACE
1070
1071ENTRY(mcount)
d61f82d0
SR
1072 ret
1073END(mcount)
1074
1075ENTRY(ftrace_caller)
60a7ecf4
SR
1076 cmpl $0, function_trace_stop
1077 jne ftrace_stub
1078
d61f82d0
SR
1079 pushl %eax
1080 pushl %ecx
1081 pushl %edx
08f6fba5
SR
1082 pushl $0 /* Pass NULL as regs pointer */
1083 movl 4*4(%esp), %eax
d61f82d0 1084 movl 0x4(%ebp), %edx
1739f09e 1085 movl function_trace_op, %ecx
395a59d0 1086 subl $MCOUNT_INSN_SIZE, %eax
d61f82d0
SR
1087
1088.globl ftrace_call
1089ftrace_call:
1090 call ftrace_stub
1091
08f6fba5 1092 addl $4,%esp /* skip NULL pointer */
d61f82d0
SR
1093 popl %edx
1094 popl %ecx
1095 popl %eax
4de72395 1096ftrace_ret:
5a45cfe1
SR
1097#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1098.globl ftrace_graph_call
1099ftrace_graph_call:
1100 jmp ftrace_stub
1101#endif
d61f82d0
SR
1102
1103.globl ftrace_stub
1104ftrace_stub:
1105 ret
1106END(ftrace_caller)
1107
4de72395
SR
1108ENTRY(ftrace_regs_caller)
1109 pushf /* push flags before compare (in cs location) */
1110 cmpl $0, function_trace_stop
1111 jne ftrace_restore_flags
1112
1113 /*
1114 * i386 does not save SS and ESP when coming from kernel.
1115 * Instead, to get sp, &regs->sp is used (see ptrace.h).
1116 * Unfortunately, that means eflags must be at the same location
1117 * as the current return ip is. We move the return ip into the
1118 * ip location, and move flags into the return ip location.
1119 */
1120 pushl 4(%esp) /* save return ip into ip slot */
4de72395
SR
1121
1122 pushl $0 /* Load 0 into orig_ax */
1123 pushl %gs
1124 pushl %fs
1125 pushl %es
1126 pushl %ds
1127 pushl %eax
1128 pushl %ebp
1129 pushl %edi
1130 pushl %esi
1131 pushl %edx
1132 pushl %ecx
1133 pushl %ebx
1134
1135 movl 13*4(%esp), %eax /* Get the saved flags */
1136 movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
1137 /* clobbering return ip */
1138 movl $__KERNEL_CS,13*4(%esp)
1139
1140 movl 12*4(%esp), %eax /* Load ip (1st parameter) */
a5e37863 1141 subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
e4ea3f6b 1142 movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
1739f09e 1143 movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
e4ea3f6b 1144 pushl %esp /* Save pt_regs as 4th parameter */
4de72395
SR
1145
1146GLOBAL(ftrace_regs_call)
1147 call ftrace_stub
1148
1149 addl $4, %esp /* Skip pt_regs */
1150 movl 14*4(%esp), %eax /* Move flags back into cs */
1151 movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
1152 movl 12*4(%esp), %eax /* Get return ip from regs->ip */
4de72395
SR
1153 movl %eax, 14*4(%esp) /* Put return ip back for ret */
1154
1155 popl %ebx
1156 popl %ecx
1157 popl %edx
1158 popl %esi
1159 popl %edi
1160 popl %ebp
1161 popl %eax
1162 popl %ds
1163 popl %es
1164 popl %fs
1165 popl %gs
1166 addl $8, %esp /* Skip orig_ax and ip */
1167 popf /* Pop flags at end (no addl to corrupt flags) */
1168 jmp ftrace_ret
1169
1170ftrace_restore_flags:
1171 popf
1172 jmp ftrace_stub
d61f82d0
SR
1173#else /* ! CONFIG_DYNAMIC_FTRACE */
1174
16444a8a 1175ENTRY(mcount)
af058ab0
PA
1176 cmpl $__PAGE_OFFSET, %esp
1177 jb ftrace_stub /* Paging not enabled yet? */
1178
60a7ecf4
SR
1179 cmpl $0, function_trace_stop
1180 jne ftrace_stub
1181
16444a8a
ACM
1182 cmpl $ftrace_stub, ftrace_trace_function
1183 jnz trace
fb52607a 1184#ifdef CONFIG_FUNCTION_GRAPH_TRACER
c2324b69 1185 cmpl $ftrace_stub, ftrace_graph_return
fb52607a 1186 jnz ftrace_graph_caller
e49dc19c
SR
1187
1188 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1189 jnz ftrace_graph_caller
caf4b323 1190#endif
16444a8a
ACM
1191.globl ftrace_stub
1192ftrace_stub:
1193 ret
1194
1195 /* taken from glibc */
1196trace:
1197 pushl %eax
1198 pushl %ecx
1199 pushl %edx
1200 movl 0xc(%esp), %eax
1201 movl 0x4(%ebp), %edx
395a59d0 1202 subl $MCOUNT_INSN_SIZE, %eax
16444a8a 1203
d61f82d0 1204 call *ftrace_trace_function
16444a8a
ACM
1205
1206 popl %edx
1207 popl %ecx
1208 popl %eax
16444a8a
ACM
1209 jmp ftrace_stub
1210END(mcount)
d61f82d0 1211#endif /* CONFIG_DYNAMIC_FTRACE */
606576ce 1212#endif /* CONFIG_FUNCTION_TRACER */
16444a8a 1213
fb52607a
FW
1214#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1215ENTRY(ftrace_graph_caller)
caf4b323
FW
1216 pushl %eax
1217 pushl %ecx
1218 pushl %edx
1dc1c6ad 1219 movl 0xc(%esp), %edx
caf4b323 1220 lea 0x4(%ebp), %eax
71e308a2 1221 movl (%ebp), %ecx
bb4304c7 1222 subl $MCOUNT_INSN_SIZE, %edx
caf4b323 1223 call prepare_ftrace_return
caf4b323
FW
1224 popl %edx
1225 popl %ecx
1226 popl %eax
e7d3737e 1227 ret
fb52607a 1228END(ftrace_graph_caller)
caf4b323
FW
1229
1230.globl return_to_handler
1231return_to_handler:
caf4b323 1232 pushl %eax
caf4b323 1233 pushl %edx
71e308a2 1234 movl %ebp, %eax
caf4b323 1235 call ftrace_return_to_handler
194ec341 1236 movl %eax, %ecx
caf4b323 1237 popl %edx
caf4b323 1238 popl %eax
194ec341 1239 jmp *%ecx
e7d3737e 1240#endif
16444a8a 1241
d211af05
AH
1242/*
1243 * Some functions should be protected against kprobes
1244 */
1245 .pushsection .kprobes.text, "ax"
1246
25c74b10
SA
1247#ifdef CONFIG_TRACING
1248ENTRY(trace_page_fault)
1249 RING0_EC_FRAME
1250 ASM_CLAC
1251 pushl_cfi $trace_do_page_fault
1252 jmp error_code
1253 CFI_ENDPROC
1254END(trace_page_fault)
1255#endif
1256
d211af05
AH
1257ENTRY(page_fault)
1258 RING0_EC_FRAME
e59d1b0a 1259 ASM_CLAC
df5d1874 1260 pushl_cfi $do_page_fault
d211af05
AH
1261 ALIGN
1262error_code:
ccbeed3a 1263 /* the function address is in %gs's slot on the stack */
df5d1874 1264 pushl_cfi %fs
ccbeed3a 1265 /*CFI_REL_OFFSET fs, 0*/
df5d1874 1266 pushl_cfi %es
d211af05 1267 /*CFI_REL_OFFSET es, 0*/
df5d1874 1268 pushl_cfi %ds
d211af05 1269 /*CFI_REL_OFFSET ds, 0*/
df5d1874 1270 pushl_cfi %eax
d211af05 1271 CFI_REL_OFFSET eax, 0
df5d1874 1272 pushl_cfi %ebp
d211af05 1273 CFI_REL_OFFSET ebp, 0
df5d1874 1274 pushl_cfi %edi
d211af05 1275 CFI_REL_OFFSET edi, 0
df5d1874 1276 pushl_cfi %esi
d211af05 1277 CFI_REL_OFFSET esi, 0
df5d1874 1278 pushl_cfi %edx
d211af05 1279 CFI_REL_OFFSET edx, 0
df5d1874 1280 pushl_cfi %ecx
d211af05 1281 CFI_REL_OFFSET ecx, 0
df5d1874 1282 pushl_cfi %ebx
d211af05
AH
1283 CFI_REL_OFFSET ebx, 0
1284 cld
d211af05
AH
1285 movl $(__KERNEL_PERCPU), %ecx
1286 movl %ecx, %fs
1287 UNWIND_ESPFIX_STACK
ccbeed3a
TH
1288 GS_TO_REG %ecx
1289 movl PT_GS(%esp), %edi # get the function address
d211af05
AH
1290 movl PT_ORIG_EAX(%esp), %edx # get the error code
1291 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
ccbeed3a
TH
1292 REG_TO_PTGS %ecx
1293 SET_KERNEL_GS %ecx
d211af05
AH
1294 movl $(__USER_DS), %ecx
1295 movl %ecx, %ds
1296 movl %ecx, %es
1297 TRACE_IRQS_OFF
1298 movl %esp,%eax # pt_regs pointer
1299 call *%edi
1300 jmp ret_from_exception
1301 CFI_ENDPROC
1302END(page_fault)
1303
1304/*
1305 * Debug traps and NMI can happen at the one SYSENTER instruction
1306 * that sets up the real kernel stack. Check here, since we can't
1307 * allow the wrong stack to be used.
1308 *
1309 * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
1310 * already pushed 3 words if it hits on the sysenter instruction:
1311 * eflags, cs and eip.
1312 *
1313 * We just load the right stack, and push the three (known) values
1314 * by hand onto the new stack - while updating the return eip past
1315 * the instruction that would have done it for sysenter.
1316 */
f0d96110
TH
1317.macro FIX_STACK offset ok label
1318 cmpw $__KERNEL_CS, 4(%esp)
1319 jne \ok
1320\label:
1321 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1322 CFI_DEF_CFA esp, 0
1323 CFI_UNDEFINED eip
df5d1874
JB
1324 pushfl_cfi
1325 pushl_cfi $__KERNEL_CS
1326 pushl_cfi $sysenter_past_esp
d211af05 1327 CFI_REL_OFFSET eip, 0
f0d96110 1328.endm
d211af05
AH
1329
1330ENTRY(debug)
1331 RING0_INT_FRAME
e59d1b0a 1332 ASM_CLAC
d211af05
AH
1333 cmpl $ia32_sysenter_target,(%esp)
1334 jne debug_stack_correct
f0d96110 1335 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
d211af05 1336debug_stack_correct:
df5d1874 1337 pushl_cfi $-1 # mark this as an int
d211af05
AH
1338 SAVE_ALL
1339 TRACE_IRQS_OFF
1340 xorl %edx,%edx # error code 0
1341 movl %esp,%eax # pt_regs pointer
1342 call do_debug
1343 jmp ret_from_exception
1344 CFI_ENDPROC
1345END(debug)
1346
1347/*
1348 * NMI is doubly nasty. It can happen _while_ we're handling
1349 * a debug fault, and the debug fault hasn't yet been able to
1350 * clear up the stack. So we first check whether we got an
1351 * NMI on the sysenter entry path, but after that we need to
1352 * check whether we got an NMI on the debug path where the debug
1353 * fault happened on the sysenter path.
1354 */
1355ENTRY(nmi)
1356 RING0_INT_FRAME
e59d1b0a 1357 ASM_CLAC
df5d1874 1358 pushl_cfi %eax
d211af05
AH
1359 movl %ss, %eax
1360 cmpw $__ESPFIX_SS, %ax
df5d1874 1361 popl_cfi %eax
d211af05
AH
1362 je nmi_espfix_stack
1363 cmpl $ia32_sysenter_target,(%esp)
1364 je nmi_stack_fixup
df5d1874 1365 pushl_cfi %eax
d211af05
AH
1366 movl %esp,%eax
1367 /* Do not access memory above the end of our stack page,
1368 * it might not exist.
1369 */
1370 andl $(THREAD_SIZE-1),%eax
1371 cmpl $(THREAD_SIZE-20),%eax
df5d1874 1372 popl_cfi %eax
d211af05
AH
1373 jae nmi_stack_correct
1374 cmpl $ia32_sysenter_target,12(%esp)
1375 je nmi_debug_stack_check
1376nmi_stack_correct:
1377 /* We have a RING0_INT_FRAME here */
df5d1874 1378 pushl_cfi %eax
d211af05 1379 SAVE_ALL
d211af05
AH
1380 xorl %edx,%edx # zero error code
1381 movl %esp,%eax # pt_regs pointer
1382 call do_nmi
2e04bc76 1383 jmp restore_all_notrace
d211af05
AH
1384 CFI_ENDPROC
1385
1386nmi_stack_fixup:
1387 RING0_INT_FRAME
f0d96110 1388 FIX_STACK 12, nmi_stack_correct, 1
d211af05
AH
1389 jmp nmi_stack_correct
1390
1391nmi_debug_stack_check:
1392 /* We have a RING0_INT_FRAME here */
1393 cmpw $__KERNEL_CS,16(%esp)
1394 jne nmi_stack_correct
1395 cmpl $debug,(%esp)
1396 jb nmi_stack_correct
1397 cmpl $debug_esp_fix_insn,(%esp)
1398 ja nmi_stack_correct
f0d96110 1399 FIX_STACK 24, nmi_stack_correct, 1
d211af05
AH
1400 jmp nmi_stack_correct
1401
1402nmi_espfix_stack:
1403 /* We have a RING0_INT_FRAME here.
1404 *
1405 * create the pointer to lss back
1406 */
df5d1874
JB
1407 pushl_cfi %ss
1408 pushl_cfi %esp
bda3a897 1409 addl $4, (%esp)
d211af05
AH
1410 /* copy the iret frame of 12 bytes */
1411 .rept 3
df5d1874 1412 pushl_cfi 16(%esp)
d211af05 1413 .endr
df5d1874 1414 pushl_cfi %eax
d211af05 1415 SAVE_ALL
d211af05
AH
1416 FIXUP_ESPFIX_STACK # %eax == %esp
1417 xorl %edx,%edx # zero error code
1418 call do_nmi
1419 RESTORE_REGS
1420 lss 12+4(%esp), %esp # back to espfix stack
1421 CFI_ADJUST_CFA_OFFSET -24
1422 jmp irq_return
1423 CFI_ENDPROC
1424END(nmi)
1425
1426ENTRY(int3)
1427 RING0_INT_FRAME
e59d1b0a 1428 ASM_CLAC
df5d1874 1429 pushl_cfi $-1 # mark this as an int
d211af05
AH
1430 SAVE_ALL
1431 TRACE_IRQS_OFF
1432 xorl %edx,%edx # zero error code
1433 movl %esp,%eax # pt_regs pointer
1434 call do_int3
1435 jmp ret_from_exception
1436 CFI_ENDPROC
1437END(int3)
1438
1439ENTRY(general_protection)
1440 RING0_EC_FRAME
df5d1874 1441 pushl_cfi $do_general_protection
d211af05
AH
1442 jmp error_code
1443 CFI_ENDPROC
1444END(general_protection)
1445
631bc487
GN
1446#ifdef CONFIG_KVM_GUEST
1447ENTRY(async_page_fault)
1448 RING0_EC_FRAME
e59d1b0a 1449 ASM_CLAC
60cf637a 1450 pushl_cfi $do_async_page_fault
631bc487
GN
1451 jmp error_code
1452 CFI_ENDPROC
2ae9d293 1453END(async_page_fault)
631bc487
GN
1454#endif
1455
d211af05
AH
1456/*
1457 * End of kprobes section
1458 */
1459 .popsection