[PATCH] i386: Fix double #includes in arch/i386
[linux-2.6-block.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
f95d47ca
JF
33 * 24(%esp) - %gs
34 * 28(%esp) - orig_eax
35 * 2C(%esp) - %eip
36 * 30(%esp) - %cs
37 * 34(%esp) - %eflags
38 * 38(%esp) - %oldesp
39 * 3C(%esp) - %oldss
1da177e4
LT
40 *
41 * "current" is in register %ebx during any slow entries.
42 */
43
1da177e4
LT
44#include <linux/linkage.h>
45#include <asm/thread_info.h>
55f327fa 46#include <asm/irqflags.h>
1da177e4
LT
47#include <asm/errno.h>
48#include <asm/segment.h>
49#include <asm/smp.h>
50#include <asm/page.h>
51#include <asm/desc.h>
be44d2aa 52#include <asm/percpu.h>
fe7cacc1 53#include <asm/dwarf2.h>
1da177e4
LT
54#include "irq_vectors.h"
55
56#define nr_syscalls ((syscall_table_size)/4)
57
1da177e4
LT
58CF_MASK = 0x00000001
59TF_MASK = 0x00000100
60IF_MASK = 0x00000200
61DF_MASK = 0x00000400
62NT_MASK = 0x00004000
63VM_MASK = 0x00020000
64
0da5db31
RR
65/* These are replaces for paravirtualization */
66#define DISABLE_INTERRUPTS cli
67#define ENABLE_INTERRUPTS sti
68#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
69#define INTERRUPT_RETURN iret
70#define GET_CR0_INTO_EAX movl %cr0, %eax
71
1da177e4 72#ifdef CONFIG_PREEMPT
0da5db31 73#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
1da177e4
LT
74#else
75#define preempt_stop
76#define resume_kernel restore_nocheck
77#endif
78
55f327fa
IM
79.macro TRACE_IRQS_IRET
80#ifdef CONFIG_TRACE_IRQFLAGS
eb5b7b9d 81 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
55f327fa
IM
82 jz 1f
83 TRACE_IRQS_ON
841:
85#endif
86.endm
87
4031ff38
AG
88#ifdef CONFIG_VM86
89#define resume_userspace_sig check_userspace
90#else
91#define resume_userspace_sig resume_userspace
92#endif
93
1da177e4
LT
94#define SAVE_ALL \
95 cld; \
f95d47ca
JF
96 pushl %gs; \
97 CFI_ADJUST_CFA_OFFSET 4;\
98 /*CFI_REL_OFFSET gs, 0;*/\
1da177e4 99 pushl %es; \
fe7cacc1
JB
100 CFI_ADJUST_CFA_OFFSET 4;\
101 /*CFI_REL_OFFSET es, 0;*/\
1da177e4 102 pushl %ds; \
fe7cacc1
JB
103 CFI_ADJUST_CFA_OFFSET 4;\
104 /*CFI_REL_OFFSET ds, 0;*/\
1da177e4 105 pushl %eax; \
fe7cacc1
JB
106 CFI_ADJUST_CFA_OFFSET 4;\
107 CFI_REL_OFFSET eax, 0;\
1da177e4 108 pushl %ebp; \
fe7cacc1
JB
109 CFI_ADJUST_CFA_OFFSET 4;\
110 CFI_REL_OFFSET ebp, 0;\
1da177e4 111 pushl %edi; \
fe7cacc1
JB
112 CFI_ADJUST_CFA_OFFSET 4;\
113 CFI_REL_OFFSET edi, 0;\
1da177e4 114 pushl %esi; \
fe7cacc1
JB
115 CFI_ADJUST_CFA_OFFSET 4;\
116 CFI_REL_OFFSET esi, 0;\
1da177e4 117 pushl %edx; \
fe7cacc1
JB
118 CFI_ADJUST_CFA_OFFSET 4;\
119 CFI_REL_OFFSET edx, 0;\
1da177e4 120 pushl %ecx; \
fe7cacc1
JB
121 CFI_ADJUST_CFA_OFFSET 4;\
122 CFI_REL_OFFSET ecx, 0;\
1da177e4 123 pushl %ebx; \
fe7cacc1
JB
124 CFI_ADJUST_CFA_OFFSET 4;\
125 CFI_REL_OFFSET ebx, 0;\
1da177e4
LT
126 movl $(__USER_DS), %edx; \
127 movl %edx, %ds; \
f95d47ca
JF
128 movl %edx, %es; \
129 movl $(__KERNEL_PDA), %edx; \
130 movl %edx, %gs
1da177e4
LT
131
132#define RESTORE_INT_REGS \
133 popl %ebx; \
fe7cacc1
JB
134 CFI_ADJUST_CFA_OFFSET -4;\
135 CFI_RESTORE ebx;\
1da177e4 136 popl %ecx; \
fe7cacc1
JB
137 CFI_ADJUST_CFA_OFFSET -4;\
138 CFI_RESTORE ecx;\
1da177e4 139 popl %edx; \
fe7cacc1
JB
140 CFI_ADJUST_CFA_OFFSET -4;\
141 CFI_RESTORE edx;\
1da177e4 142 popl %esi; \
fe7cacc1
JB
143 CFI_ADJUST_CFA_OFFSET -4;\
144 CFI_RESTORE esi;\
1da177e4 145 popl %edi; \
fe7cacc1
JB
146 CFI_ADJUST_CFA_OFFSET -4;\
147 CFI_RESTORE edi;\
1da177e4 148 popl %ebp; \
fe7cacc1
JB
149 CFI_ADJUST_CFA_OFFSET -4;\
150 CFI_RESTORE ebp;\
151 popl %eax; \
152 CFI_ADJUST_CFA_OFFSET -4;\
153 CFI_RESTORE eax
1da177e4
LT
154
155#define RESTORE_REGS \
156 RESTORE_INT_REGS; \
1571: popl %ds; \
fe7cacc1
JB
158 CFI_ADJUST_CFA_OFFSET -4;\
159 /*CFI_RESTORE ds;*/\
1da177e4 1602: popl %es; \
fe7cacc1
JB
161 CFI_ADJUST_CFA_OFFSET -4;\
162 /*CFI_RESTORE es;*/\
f95d47ca
JF
1633: popl %gs; \
164 CFI_ADJUST_CFA_OFFSET -4;\
165 /*CFI_RESTORE gs;*/\
166.pushsection .fixup,"ax"; \
1da177e4 1674: movl $0,(%esp); \
f95d47ca
JF
168 jmp 1b; \
1695: movl $0,(%esp); \
1da177e4 170 jmp 2b; \
f95d47ca
JF
1716: movl $0,(%esp); \
172 jmp 3b; \
1da177e4
LT
173.section __ex_table,"a";\
174 .align 4; \
f95d47ca
JF
175 .long 1b,4b; \
176 .long 2b,5b; \
177 .long 3b,6b; \
178.popsection
1da177e4 179
fe7cacc1
JB
180#define RING0_INT_FRAME \
181 CFI_STARTPROC simple;\
adf14236 182 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
183 CFI_DEF_CFA esp, 3*4;\
184 /*CFI_OFFSET cs, -2*4;*/\
185 CFI_OFFSET eip, -3*4
186
187#define RING0_EC_FRAME \
188 CFI_STARTPROC simple;\
adf14236 189 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
190 CFI_DEF_CFA esp, 4*4;\
191 /*CFI_OFFSET cs, -2*4;*/\
192 CFI_OFFSET eip, -3*4
193
194#define RING0_PTREGS_FRAME \
195 CFI_STARTPROC simple;\
adf14236 196 CFI_SIGNAL_FRAME;\
eb5b7b9d
JF
197 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
198 /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
199 CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
200 /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
201 /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
202 CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
203 CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
204 CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
205 CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
206 CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
207 CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
208 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
1da177e4
LT
209
210ENTRY(ret_from_fork)
fe7cacc1 211 CFI_STARTPROC
1da177e4 212 pushl %eax
25d7dfda 213 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
214 call schedule_tail
215 GET_THREAD_INFO(%ebp)
216 popl %eax
fe7cacc1 217 CFI_ADJUST_CFA_OFFSET -4
47a5c6fa
LT
218 pushl $0x0202 # Reset kernel eflags
219 CFI_ADJUST_CFA_OFFSET 4
220 popfl
221 CFI_ADJUST_CFA_OFFSET -4
1da177e4 222 jmp syscall_exit
fe7cacc1 223 CFI_ENDPROC
1da177e4
LT
224
225/*
226 * Return to user mode is not as complex as all this looks,
227 * but we want the default path for a system call return to
228 * go as quickly as possible which is why some of this is
229 * less clear than it otherwise should be.
230 */
231
232 # userspace resumption stub bypassing syscall exit tracing
233 ALIGN
fe7cacc1 234 RING0_PTREGS_FRAME
1da177e4
LT
235ret_from_exception:
236 preempt_stop
237ret_from_intr:
238 GET_THREAD_INFO(%ebp)
4031ff38 239check_userspace:
eb5b7b9d
JF
240 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
241 movb PT_CS(%esp), %al
78be3706
RR
242 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
243 cmpl $USER_RPL, %eax
244 jb resume_kernel # not returning to v8086 or userspace
f95d47ca 245
1da177e4 246ENTRY(resume_userspace)
0da5db31 247 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
248 # setting need_resched or sigpending
249 # between sampling and the iret
250 movl TI_flags(%ebp), %ecx
251 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
252 # int/exception return?
253 jne work_pending
254 jmp restore_all
255
256#ifdef CONFIG_PREEMPT
257ENTRY(resume_kernel)
0da5db31 258 DISABLE_INTERRUPTS
1da177e4
LT
259 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
260 jnz restore_nocheck
261need_resched:
262 movl TI_flags(%ebp), %ecx # need_resched set ?
263 testb $_TIF_NEED_RESCHED, %cl
264 jz restore_all
eb5b7b9d 265 testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
1da177e4
LT
266 jz restore_all
267 call preempt_schedule_irq
268 jmp need_resched
269#endif
fe7cacc1 270 CFI_ENDPROC
1da177e4
LT
271
272/* SYSENTER_RETURN points to after the "sysenter" instruction in
273 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
274
275 # sysenter call handler stub
276ENTRY(sysenter_entry)
fe7cacc1 277 CFI_STARTPROC simple
adf14236 278 CFI_SIGNAL_FRAME
fe7cacc1
JB
279 CFI_DEF_CFA esp, 0
280 CFI_REGISTER esp, ebp
1da177e4
LT
281 movl TSS_sysenter_esp0(%esp),%esp
282sysenter_past_esp:
55f327fa
IM
283 /*
284 * No need to follow this irqs on/off section: the syscall
285 * disabled irqs and here we enable it straight after entry:
286 */
0da5db31 287 ENABLE_INTERRUPTS
1da177e4 288 pushl $(__USER_DS)
fe7cacc1
JB
289 CFI_ADJUST_CFA_OFFSET 4
290 /*CFI_REL_OFFSET ss, 0*/
1da177e4 291 pushl %ebp
fe7cacc1
JB
292 CFI_ADJUST_CFA_OFFSET 4
293 CFI_REL_OFFSET esp, 0
1da177e4 294 pushfl
fe7cacc1 295 CFI_ADJUST_CFA_OFFSET 4
1da177e4 296 pushl $(__USER_CS)
fe7cacc1
JB
297 CFI_ADJUST_CFA_OFFSET 4
298 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
299 /*
300 * Push current_thread_info()->sysenter_return to the stack.
301 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
302 * pushed above; +8 corresponds to copy_thread's esp0 setting.
303 */
304 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1
JB
305 CFI_ADJUST_CFA_OFFSET 4
306 CFI_REL_OFFSET eip, 0
1da177e4
LT
307
308/*
309 * Load the potential sixth argument from user stack.
310 * Careful about security.
311 */
312 cmpl $__PAGE_OFFSET-3,%ebp
313 jae syscall_fault
3141: movl (%ebp),%ebp
315.section __ex_table,"a"
316 .align 4
317 .long 1b,syscall_fault
318.previous
319
320 pushl %eax
fe7cacc1 321 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
322 SAVE_ALL
323 GET_THREAD_INFO(%ebp)
324
325 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 326 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
327 jnz syscall_trace_entry
328 cmpl $(nr_syscalls), %eax
329 jae syscall_badsys
330 call *sys_call_table(,%eax,4)
eb5b7b9d 331 movl %eax,PT_EAX(%esp)
0da5db31 332 DISABLE_INTERRUPTS
55f327fa 333 TRACE_IRQS_OFF
1da177e4
LT
334 movl TI_flags(%ebp), %ecx
335 testw $_TIF_ALLWORK_MASK, %cx
336 jne syscall_exit_work
337/* if something modifies registers it must also disable sysexit */
eb5b7b9d
JF
338 movl PT_EIP(%esp), %edx
339 movl PT_OLDESP(%esp), %ecx
1da177e4 340 xorl %ebp,%ebp
55f327fa 341 TRACE_IRQS_ON
f95d47ca 3421: mov PT_GS(%esp), %gs
0da5db31 343 ENABLE_INTERRUPTS_SYSEXIT
fe7cacc1 344 CFI_ENDPROC
f95d47ca
JF
345.pushsection .fixup,"ax"
3462: movl $0,PT_GS(%esp)
347 jmp 1b
348.section __ex_table,"a"
349 .align 4
350 .long 1b,2b
351.popsection
1da177e4
LT
352
353 # system call handler stub
354ENTRY(system_call)
fe7cacc1 355 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 356 pushl %eax # save orig_eax
fe7cacc1 357 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
358 SAVE_ALL
359 GET_THREAD_INFO(%ebp)
eb5b7b9d 360 testl $TF_MASK,PT_EFLAGS(%esp)
635cf99a
CE
361 jz no_singlestep
362 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
363no_singlestep:
ed75e8d5 364 # system call tracing in operation / emulation
1da177e4 365 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 366 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
367 jnz syscall_trace_entry
368 cmpl $(nr_syscalls), %eax
369 jae syscall_badsys
370syscall_call:
371 call *sys_call_table(,%eax,4)
eb5b7b9d 372 movl %eax,PT_EAX(%esp) # store the return value
1da177e4 373syscall_exit:
0da5db31 374 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
375 # setting need_resched or sigpending
376 # between sampling and the iret
55f327fa 377 TRACE_IRQS_OFF
1da177e4
LT
378 movl TI_flags(%ebp), %ecx
379 testw $_TIF_ALLWORK_MASK, %cx # current->work
380 jne syscall_exit_work
381
382restore_all:
eb5b7b9d
JF
383 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
384 # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
5df24082
SS
385 # are returning to the kernel.
386 # See comments in process.c:copy_thread() for details.
eb5b7b9d
JF
387 movb PT_OLDSS(%esp), %ah
388 movb PT_CS(%esp), %al
78be3706
RR
389 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
390 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 391 CFI_REMEMBER_STATE
1da177e4
LT
392 je ldt_ss # returning to user-space with LDT SS
393restore_nocheck:
55f327fa
IM
394 TRACE_IRQS_IRET
395restore_nocheck_notrace:
1da177e4 396 RESTORE_REGS
f95d47ca 397 addl $4, %esp # skip orig_eax/error_code
fe7cacc1 398 CFI_ADJUST_CFA_OFFSET -4
0da5db31 3991: INTERRUPT_RETURN
1da177e4
LT
400.section .fixup,"ax"
401iret_exc:
55f327fa 402 TRACE_IRQS_ON
0da5db31 403 ENABLE_INTERRUPTS
a879cbbb
LT
404 pushl $0 # no error code
405 pushl $do_iret_error
406 jmp error_code
1da177e4
LT
407.previous
408.section __ex_table,"a"
409 .align 4
410 .long 1b,iret_exc
411.previous
412
fe7cacc1 413 CFI_RESTORE_STATE
1da177e4 414ldt_ss:
eb5b7b9d 415 larl PT_OLDSS(%esp), %eax
1da177e4
LT
416 jnz restore_nocheck
417 testl $0x00400000, %eax # returning to 32bit stack?
418 jnz restore_nocheck # allright, normal return
419 /* If returning to userspace with 16bit stack,
420 * try to fix the higher word of ESP, as the CPU
421 * won't restore it.
422 * This is an "official" bug of all the x86-compatible
423 * CPUs, which we can try to work around to make
424 * dosemu and wine happy. */
eb5b7b9d 425 movl PT_OLDESP(%esp), %eax
be44d2aa
SS
426 movl %esp, %edx
427 call patch_espfix_desc
428 pushl $__ESPFIX_SS
429 CFI_ADJUST_CFA_OFFSET 4
430 pushl %eax
431 CFI_ADJUST_CFA_OFFSET 4
0da5db31 432 DISABLE_INTERRUPTS
55f327fa 433 TRACE_IRQS_OFF
be44d2aa
SS
434 lss (%esp), %esp
435 CFI_ADJUST_CFA_OFFSET -8
436 jmp restore_nocheck
fe7cacc1 437 CFI_ENDPROC
1da177e4
LT
438
439 # perform work that needs to be done immediately before resumption
440 ALIGN
fe7cacc1 441 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
442work_pending:
443 testb $_TIF_NEED_RESCHED, %cl
444 jz work_notifysig
445work_resched:
446 call schedule
0da5db31 447 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
448 # setting need_resched or sigpending
449 # between sampling and the iret
55f327fa 450 TRACE_IRQS_OFF
1da177e4
LT
451 movl TI_flags(%ebp), %ecx
452 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
453 # than syscall tracing?
454 jz restore_all
455 testb $_TIF_NEED_RESCHED, %cl
456 jnz work_resched
457
458work_notifysig: # deal with pending signals and
459 # notify-resume requests
74b47a78 460#ifdef CONFIG_VM86
eb5b7b9d 461 testl $VM_MASK, PT_EFLAGS(%esp)
1da177e4
LT
462 movl %esp, %eax
463 jne work_notifysig_v86 # returning to kernel-space or
464 # vm86-space
465 xorl %edx, %edx
466 call do_notify_resume
4031ff38 467 jmp resume_userspace_sig
1da177e4
LT
468
469 ALIGN
470work_notifysig_v86:
471 pushl %ecx # save ti_flags for do_notify_resume
fe7cacc1 472 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
473 call save_v86_state # %eax contains pt_regs pointer
474 popl %ecx
fe7cacc1 475 CFI_ADJUST_CFA_OFFSET -4
1da177e4 476 movl %eax, %esp
74b47a78
JK
477#else
478 movl %esp, %eax
479#endif
1da177e4
LT
480 xorl %edx, %edx
481 call do_notify_resume
4031ff38 482 jmp resume_userspace_sig
1da177e4
LT
483
484 # perform syscall exit tracing
485 ALIGN
486syscall_trace_entry:
eb5b7b9d 487 movl $-ENOSYS,PT_EAX(%esp)
1da177e4
LT
488 movl %esp, %eax
489 xorl %edx,%edx
490 call do_syscall_trace
ed75e8d5 491 cmpl $0, %eax
640aa46e 492 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
ed75e8d5 493 # so must skip actual syscall
eb5b7b9d 494 movl PT_ORIG_EAX(%esp), %eax
1da177e4
LT
495 cmpl $(nr_syscalls), %eax
496 jnae syscall_call
497 jmp syscall_exit
498
499 # perform syscall exit tracing
500 ALIGN
501syscall_exit_work:
502 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
503 jz work_pending
55f327fa 504 TRACE_IRQS_ON
0da5db31 505 ENABLE_INTERRUPTS # could let do_syscall_trace() call
1da177e4
LT
506 # schedule() instead
507 movl %esp, %eax
508 movl $1, %edx
509 call do_syscall_trace
510 jmp resume_userspace
fe7cacc1 511 CFI_ENDPROC
1da177e4 512
fe7cacc1 513 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4
LT
514syscall_fault:
515 pushl %eax # save orig_eax
fe7cacc1 516 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
517 SAVE_ALL
518 GET_THREAD_INFO(%ebp)
eb5b7b9d 519 movl $-EFAULT,PT_EAX(%esp)
1da177e4
LT
520 jmp resume_userspace
521
1da177e4 522syscall_badsys:
eb5b7b9d 523 movl $-ENOSYS,PT_EAX(%esp)
1da177e4 524 jmp resume_userspace
fe7cacc1 525 CFI_ENDPROC
1da177e4
LT
526
527#define FIXUP_ESPFIX_STACK \
be44d2aa 528 /* since we are on a wrong stack, we cant make it a C code :( */ \
b2938f88 529 movl %gs:PDA_cpu, %ebx; \
be44d2aa
SS
530 PER_CPU(cpu_gdt_descr, %ebx); \
531 movl GDS_address(%ebx), %ebx; \
532 GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
533 addl %esp, %eax; \
534 pushl $__KERNEL_DS; \
535 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4 536 pushl %eax; \
fe7cacc1 537 CFI_ADJUST_CFA_OFFSET 4; \
be44d2aa
SS
538 lss (%esp), %esp; \
539 CFI_ADJUST_CFA_OFFSET -8;
540#define UNWIND_ESPFIX_STACK \
1da177e4 541 movl %ss, %eax; \
be44d2aa 542 /* see if on espfix stack */ \
1da177e4 543 cmpw $__ESPFIX_SS, %ax; \
be44d2aa
SS
544 jne 27f; \
545 movl $__KERNEL_DS, %eax; \
fe7cacc1
JB
546 movl %eax, %ds; \
547 movl %eax, %es; \
be44d2aa 548 /* switch to normal stack */ \
fe7cacc1 549 FIXUP_ESPFIX_STACK; \
be44d2aa 55027:;
1da177e4
LT
551
552/*
553 * Build the entry stubs and pointer table with
554 * some assembler magic.
555 */
556.data
557ENTRY(interrupt)
558.text
559
560vector=0
561ENTRY(irq_entries_start)
fe7cacc1 562 RING0_INT_FRAME
1da177e4
LT
563.rept NR_IRQS
564 ALIGN
fe7cacc1
JB
565 .if vector
566 CFI_ADJUST_CFA_OFFSET -4
567 .endif
19eadf98 5681: pushl $~(vector)
fe7cacc1 569 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
570 jmp common_interrupt
571.data
572 .long 1b
573.text
574vector=vector+1
575.endr
576
55f327fa
IM
577/*
578 * the CPU automatically disables interrupts when executing an IRQ vector,
579 * so IRQ-flags tracing has to follow that:
580 */
1da177e4
LT
581 ALIGN
582common_interrupt:
583 SAVE_ALL
55f327fa 584 TRACE_IRQS_OFF
1da177e4
LT
585 movl %esp,%eax
586 call do_IRQ
587 jmp ret_from_intr
fe7cacc1 588 CFI_ENDPROC
1da177e4
LT
589
590#define BUILD_INTERRUPT(name, nr) \
591ENTRY(name) \
fe7cacc1 592 RING0_INT_FRAME; \
19eadf98 593 pushl $~(nr); \
fe7cacc1
JB
594 CFI_ADJUST_CFA_OFFSET 4; \
595 SAVE_ALL; \
55f327fa 596 TRACE_IRQS_OFF \
1da177e4
LT
597 movl %esp,%eax; \
598 call smp_/**/name; \
55f327fa 599 jmp ret_from_intr; \
fe7cacc1 600 CFI_ENDPROC
1da177e4
LT
601
602/* The include is where all of the SMP etc. interrupts come from */
603#include "entry_arch.h"
604
d28c4393
P
605KPROBE_ENTRY(page_fault)
606 RING0_EC_FRAME
607 pushl $do_page_fault
fe7cacc1 608 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
609 ALIGN
610error_code:
f95d47ca
JF
611 /* the function address is in %gs's slot on the stack */
612 pushl %es
613 CFI_ADJUST_CFA_OFFSET 4
614 /*CFI_REL_OFFSET es, 0*/
1da177e4 615 pushl %ds
fe7cacc1
JB
616 CFI_ADJUST_CFA_OFFSET 4
617 /*CFI_REL_OFFSET ds, 0*/
1da177e4 618 pushl %eax
fe7cacc1
JB
619 CFI_ADJUST_CFA_OFFSET 4
620 CFI_REL_OFFSET eax, 0
1da177e4 621 pushl %ebp
fe7cacc1
JB
622 CFI_ADJUST_CFA_OFFSET 4
623 CFI_REL_OFFSET ebp, 0
1da177e4 624 pushl %edi
fe7cacc1
JB
625 CFI_ADJUST_CFA_OFFSET 4
626 CFI_REL_OFFSET edi, 0
1da177e4 627 pushl %esi
fe7cacc1
JB
628 CFI_ADJUST_CFA_OFFSET 4
629 CFI_REL_OFFSET esi, 0
1da177e4 630 pushl %edx
fe7cacc1
JB
631 CFI_ADJUST_CFA_OFFSET 4
632 CFI_REL_OFFSET edx, 0
1da177e4 633 pushl %ecx
fe7cacc1
JB
634 CFI_ADJUST_CFA_OFFSET 4
635 CFI_REL_OFFSET ecx, 0
1da177e4 636 pushl %ebx
fe7cacc1
JB
637 CFI_ADJUST_CFA_OFFSET 4
638 CFI_REL_OFFSET ebx, 0
1da177e4 639 cld
f95d47ca 640 pushl %gs
fe7cacc1 641 CFI_ADJUST_CFA_OFFSET 4
f95d47ca
JF
642 /*CFI_REL_OFFSET gs, 0*/
643 movl $(__KERNEL_PDA), %ecx
644 movl %ecx, %gs
1da177e4
LT
645 UNWIND_ESPFIX_STACK
646 popl %ecx
fe7cacc1
JB
647 CFI_ADJUST_CFA_OFFSET -4
648 /*CFI_REGISTER es, ecx*/
f95d47ca 649 movl PT_GS(%esp), %edi # get the function address
eb5b7b9d 650 movl PT_ORIG_EAX(%esp), %edx # get the error code
f95d47ca
JF
651 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
652 mov %ecx, PT_GS(%esp)
653 /*CFI_REL_OFFSET gs, ES*/
1da177e4
LT
654 movl $(__USER_DS), %ecx
655 movl %ecx, %ds
656 movl %ecx, %es
657 movl %esp,%eax # pt_regs pointer
658 call *%edi
659 jmp ret_from_exception
fe7cacc1 660 CFI_ENDPROC
d28c4393 661KPROBE_END(page_fault)
1da177e4
LT
662
663ENTRY(coprocessor_error)
fe7cacc1 664 RING0_INT_FRAME
1da177e4 665 pushl $0
fe7cacc1 666 CFI_ADJUST_CFA_OFFSET 4
1da177e4 667 pushl $do_coprocessor_error
fe7cacc1 668 CFI_ADJUST_CFA_OFFSET 4
1da177e4 669 jmp error_code
fe7cacc1 670 CFI_ENDPROC
1da177e4
LT
671
672ENTRY(simd_coprocessor_error)
fe7cacc1 673 RING0_INT_FRAME
1da177e4 674 pushl $0
fe7cacc1 675 CFI_ADJUST_CFA_OFFSET 4
1da177e4 676 pushl $do_simd_coprocessor_error
fe7cacc1 677 CFI_ADJUST_CFA_OFFSET 4
1da177e4 678 jmp error_code
fe7cacc1 679 CFI_ENDPROC
1da177e4
LT
680
681ENTRY(device_not_available)
fe7cacc1 682 RING0_INT_FRAME
1da177e4 683 pushl $-1 # mark this as an int
fe7cacc1 684 CFI_ADJUST_CFA_OFFSET 4
1da177e4 685 SAVE_ALL
0da5db31 686 GET_CR0_INTO_EAX
1da177e4
LT
687 testl $0x4, %eax # EM (math emulation bit)
688 jne device_not_available_emulate
689 preempt_stop
690 call math_state_restore
691 jmp ret_from_exception
692device_not_available_emulate:
693 pushl $0 # temporary storage for ORIG_EIP
fe7cacc1 694 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
695 call math_emulate
696 addl $4, %esp
fe7cacc1 697 CFI_ADJUST_CFA_OFFSET -4
1da177e4 698 jmp ret_from_exception
fe7cacc1 699 CFI_ENDPROC
1da177e4
LT
700
701/*
702 * Debug traps and NMI can happen at the one SYSENTER instruction
703 * that sets up the real kernel stack. Check here, since we can't
704 * allow the wrong stack to be used.
705 *
706 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
707 * already pushed 3 words if it hits on the sysenter instruction:
708 * eflags, cs and eip.
709 *
710 * We just load the right stack, and push the three (known) values
711 * by hand onto the new stack - while updating the return eip past
712 * the instruction that would have done it for sysenter.
713 */
714#define FIX_STACK(offset, ok, label) \
715 cmpw $__KERNEL_CS,4(%esp); \
716 jne ok; \
717label: \
718 movl TSS_sysenter_esp0+offset(%esp),%esp; \
a549b86d
CE
719 CFI_DEF_CFA esp, 0; \
720 CFI_UNDEFINED eip; \
1da177e4 721 pushfl; \
a549b86d 722 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4 723 pushl $__KERNEL_CS; \
a549b86d
CE
724 CFI_ADJUST_CFA_OFFSET 4; \
725 pushl $sysenter_past_esp; \
726 CFI_ADJUST_CFA_OFFSET 4; \
727 CFI_REL_OFFSET eip, 0
1da177e4 728
3d97ae5b 729KPROBE_ENTRY(debug)
fe7cacc1 730 RING0_INT_FRAME
1da177e4
LT
731 cmpl $sysenter_entry,(%esp)
732 jne debug_stack_correct
733 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
734debug_stack_correct:
735 pushl $-1 # mark this as an int
fe7cacc1 736 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
737 SAVE_ALL
738 xorl %edx,%edx # error code 0
739 movl %esp,%eax # pt_regs pointer
740 call do_debug
1da177e4 741 jmp ret_from_exception
fe7cacc1 742 CFI_ENDPROC
d28c4393
P
743KPROBE_END(debug)
744
1da177e4
LT
745/*
746 * NMI is doubly nasty. It can happen _while_ we're handling
747 * a debug fault, and the debug fault hasn't yet been able to
748 * clear up the stack. So we first check whether we got an
749 * NMI on the sysenter entry path, but after that we need to
750 * check whether we got an NMI on the debug path where the debug
751 * fault happened on the sysenter path.
752 */