[PATCH] x86: all cpu backtrace
[linux-2.6-block.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
1da177e4
LT
43#include <linux/linkage.h>
44#include <asm/thread_info.h>
55f327fa 45#include <asm/irqflags.h>
1da177e4
LT
46#include <asm/errno.h>
47#include <asm/segment.h>
48#include <asm/smp.h>
49#include <asm/page.h>
50#include <asm/desc.h>
fe7cacc1 51#include <asm/dwarf2.h>
1da177e4
LT
52#include "irq_vectors.h"
53
54#define nr_syscalls ((syscall_table_size)/4)
55
56EBX = 0x00
57ECX = 0x04
58EDX = 0x08
59ESI = 0x0C
60EDI = 0x10
61EBP = 0x14
62EAX = 0x18
63DS = 0x1C
64ES = 0x20
65ORIG_EAX = 0x24
66EIP = 0x28
67CS = 0x2C
68EFLAGS = 0x30
69OLDESP = 0x34
70OLDSS = 0x38
71
72CF_MASK = 0x00000001
73TF_MASK = 0x00000100
74IF_MASK = 0x00000200
75DF_MASK = 0x00000400
76NT_MASK = 0x00004000
77VM_MASK = 0x00020000
78
0da5db31
RR
79/* These are replaces for paravirtualization */
80#define DISABLE_INTERRUPTS cli
81#define ENABLE_INTERRUPTS sti
82#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
83#define INTERRUPT_RETURN iret
84#define GET_CR0_INTO_EAX movl %cr0, %eax
85
1da177e4 86#ifdef CONFIG_PREEMPT
0da5db31 87#define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF
1da177e4
LT
88#else
89#define preempt_stop
90#define resume_kernel restore_nocheck
91#endif
92
55f327fa
IM
93.macro TRACE_IRQS_IRET
94#ifdef CONFIG_TRACE_IRQFLAGS
95 testl $IF_MASK,EFLAGS(%esp) # interrupts off?
96 jz 1f
97 TRACE_IRQS_ON
981:
99#endif
100.endm
101
4031ff38
AG
102#ifdef CONFIG_VM86
103#define resume_userspace_sig check_userspace
104#else
105#define resume_userspace_sig resume_userspace
106#endif
107
1da177e4
LT
108#define SAVE_ALL \
109 cld; \
110 pushl %es; \
fe7cacc1
JB
111 CFI_ADJUST_CFA_OFFSET 4;\
112 /*CFI_REL_OFFSET es, 0;*/\
1da177e4 113 pushl %ds; \
fe7cacc1
JB
114 CFI_ADJUST_CFA_OFFSET 4;\
115 /*CFI_REL_OFFSET ds, 0;*/\
1da177e4 116 pushl %eax; \
fe7cacc1
JB
117 CFI_ADJUST_CFA_OFFSET 4;\
118 CFI_REL_OFFSET eax, 0;\
1da177e4 119 pushl %ebp; \
fe7cacc1
JB
120 CFI_ADJUST_CFA_OFFSET 4;\
121 CFI_REL_OFFSET ebp, 0;\
1da177e4 122 pushl %edi; \
fe7cacc1
JB
123 CFI_ADJUST_CFA_OFFSET 4;\
124 CFI_REL_OFFSET edi, 0;\
1da177e4 125 pushl %esi; \
fe7cacc1
JB
126 CFI_ADJUST_CFA_OFFSET 4;\
127 CFI_REL_OFFSET esi, 0;\
1da177e4 128 pushl %edx; \
fe7cacc1
JB
129 CFI_ADJUST_CFA_OFFSET 4;\
130 CFI_REL_OFFSET edx, 0;\
1da177e4 131 pushl %ecx; \
fe7cacc1
JB
132 CFI_ADJUST_CFA_OFFSET 4;\
133 CFI_REL_OFFSET ecx, 0;\
1da177e4 134 pushl %ebx; \
fe7cacc1
JB
135 CFI_ADJUST_CFA_OFFSET 4;\
136 CFI_REL_OFFSET ebx, 0;\
1da177e4
LT
137 movl $(__USER_DS), %edx; \
138 movl %edx, %ds; \
139 movl %edx, %es;
140
141#define RESTORE_INT_REGS \
142 popl %ebx; \
fe7cacc1
JB
143 CFI_ADJUST_CFA_OFFSET -4;\
144 CFI_RESTORE ebx;\
1da177e4 145 popl %ecx; \
fe7cacc1
JB
146 CFI_ADJUST_CFA_OFFSET -4;\
147 CFI_RESTORE ecx;\
1da177e4 148 popl %edx; \
fe7cacc1
JB
149 CFI_ADJUST_CFA_OFFSET -4;\
150 CFI_RESTORE edx;\
1da177e4 151 popl %esi; \
fe7cacc1
JB
152 CFI_ADJUST_CFA_OFFSET -4;\
153 CFI_RESTORE esi;\
1da177e4 154 popl %edi; \
fe7cacc1
JB
155 CFI_ADJUST_CFA_OFFSET -4;\
156 CFI_RESTORE edi;\
1da177e4 157 popl %ebp; \
fe7cacc1
JB
158 CFI_ADJUST_CFA_OFFSET -4;\
159 CFI_RESTORE ebp;\
160 popl %eax; \
161 CFI_ADJUST_CFA_OFFSET -4;\
162 CFI_RESTORE eax
1da177e4
LT
163
164#define RESTORE_REGS \
165 RESTORE_INT_REGS; \
1661: popl %ds; \
fe7cacc1
JB
167 CFI_ADJUST_CFA_OFFSET -4;\
168 /*CFI_RESTORE ds;*/\
1da177e4 1692: popl %es; \
fe7cacc1
JB
170 CFI_ADJUST_CFA_OFFSET -4;\
171 /*CFI_RESTORE es;*/\
1da177e4
LT
172.section .fixup,"ax"; \
1733: movl $0,(%esp); \
174 jmp 1b; \
1754: movl $0,(%esp); \
176 jmp 2b; \
177.previous; \
178.section __ex_table,"a";\
179 .align 4; \
180 .long 1b,3b; \
181 .long 2b,4b; \
182.previous
183
fe7cacc1
JB
184#define RING0_INT_FRAME \
185 CFI_STARTPROC simple;\
adf14236 186 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
187 CFI_DEF_CFA esp, 3*4;\
188 /*CFI_OFFSET cs, -2*4;*/\
189 CFI_OFFSET eip, -3*4
190
191#define RING0_EC_FRAME \
192 CFI_STARTPROC simple;\
adf14236 193 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
194 CFI_DEF_CFA esp, 4*4;\
195 /*CFI_OFFSET cs, -2*4;*/\
196 CFI_OFFSET eip, -3*4
197
198#define RING0_PTREGS_FRAME \
199 CFI_STARTPROC simple;\
adf14236 200 CFI_SIGNAL_FRAME;\
fe7cacc1
JB
201 CFI_DEF_CFA esp, OLDESP-EBX;\
202 /*CFI_OFFSET cs, CS-OLDESP;*/\
203 CFI_OFFSET eip, EIP-OLDESP;\
204 /*CFI_OFFSET es, ES-OLDESP;*/\
205 /*CFI_OFFSET ds, DS-OLDESP;*/\
206 CFI_OFFSET eax, EAX-OLDESP;\
207 CFI_OFFSET ebp, EBP-OLDESP;\
208 CFI_OFFSET edi, EDI-OLDESP;\
209 CFI_OFFSET esi, ESI-OLDESP;\
210 CFI_OFFSET edx, EDX-OLDESP;\
211 CFI_OFFSET ecx, ECX-OLDESP;\
212 CFI_OFFSET ebx, EBX-OLDESP
1da177e4
LT
213
214ENTRY(ret_from_fork)
fe7cacc1 215 CFI_STARTPROC
1da177e4 216 pushl %eax
25d7dfda 217 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
218 call schedule_tail
219 GET_THREAD_INFO(%ebp)
220 popl %eax
fe7cacc1 221 CFI_ADJUST_CFA_OFFSET -4
47a5c6fa
LT
222 pushl $0x0202 # Reset kernel eflags
223 CFI_ADJUST_CFA_OFFSET 4
224 popfl
225 CFI_ADJUST_CFA_OFFSET -4
1da177e4 226 jmp syscall_exit
fe7cacc1 227 CFI_ENDPROC
1da177e4
LT
228
229/*
230 * Return to user mode is not as complex as all this looks,
231 * but we want the default path for a system call return to
232 * go as quickly as possible which is why some of this is
233 * less clear than it otherwise should be.
234 */
235
236 # userspace resumption stub bypassing syscall exit tracing
237 ALIGN
fe7cacc1 238 RING0_PTREGS_FRAME
1da177e4
LT
239ret_from_exception:
240 preempt_stop
241ret_from_intr:
242 GET_THREAD_INFO(%ebp)
4031ff38 243check_userspace:
1da177e4
LT
244 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
245 movb CS(%esp), %al
78be3706
RR
246 andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
247 cmpl $USER_RPL, %eax
248 jb resume_kernel # not returning to v8086 or userspace
1da177e4 249ENTRY(resume_userspace)
0da5db31 250 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
251 # setting need_resched or sigpending
252 # between sampling and the iret
253 movl TI_flags(%ebp), %ecx
254 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
255 # int/exception return?
256 jne work_pending
257 jmp restore_all
258
259#ifdef CONFIG_PREEMPT
260ENTRY(resume_kernel)
0da5db31 261 DISABLE_INTERRUPTS
1da177e4
LT
262 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
263 jnz restore_nocheck
264need_resched:
265 movl TI_flags(%ebp), %ecx # need_resched set ?
266 testb $_TIF_NEED_RESCHED, %cl
267 jz restore_all
268 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
269 jz restore_all
270 call preempt_schedule_irq
271 jmp need_resched
272#endif
fe7cacc1 273 CFI_ENDPROC
1da177e4
LT
274
275/* SYSENTER_RETURN points to after the "sysenter" instruction in
276 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
277
278 # sysenter call handler stub
279ENTRY(sysenter_entry)
fe7cacc1 280 CFI_STARTPROC simple
adf14236 281 CFI_SIGNAL_FRAME
fe7cacc1
JB
282 CFI_DEF_CFA esp, 0
283 CFI_REGISTER esp, ebp
1da177e4
LT
284 movl TSS_sysenter_esp0(%esp),%esp
285sysenter_past_esp:
55f327fa
IM
286 /*
287 * No need to follow this irqs on/off section: the syscall
288 * disabled irqs and here we enable it straight after entry:
289 */
0da5db31 290 ENABLE_INTERRUPTS
1da177e4 291 pushl $(__USER_DS)
fe7cacc1
JB
292 CFI_ADJUST_CFA_OFFSET 4
293 /*CFI_REL_OFFSET ss, 0*/
1da177e4 294 pushl %ebp
fe7cacc1
JB
295 CFI_ADJUST_CFA_OFFSET 4
296 CFI_REL_OFFSET esp, 0
1da177e4 297 pushfl
fe7cacc1 298 CFI_ADJUST_CFA_OFFSET 4
1da177e4 299 pushl $(__USER_CS)
fe7cacc1
JB
300 CFI_ADJUST_CFA_OFFSET 4
301 /*CFI_REL_OFFSET cs, 0*/
e6e5494c
IM
302 /*
303 * Push current_thread_info()->sysenter_return to the stack.
304 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
305 * pushed above; +8 corresponds to copy_thread's esp0 setting.
306 */
307 pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
fe7cacc1
JB
308 CFI_ADJUST_CFA_OFFSET 4
309 CFI_REL_OFFSET eip, 0
1da177e4
LT
310
311/*
312 * Load the potential sixth argument from user stack.
313 * Careful about security.
314 */
315 cmpl $__PAGE_OFFSET-3,%ebp
316 jae syscall_fault
3171: movl (%ebp),%ebp
318.section __ex_table,"a"
319 .align 4
320 .long 1b,syscall_fault
321.previous
322
323 pushl %eax
fe7cacc1 324 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
325 SAVE_ALL
326 GET_THREAD_INFO(%ebp)
327
328 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 329 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
330 jnz syscall_trace_entry
331 cmpl $(nr_syscalls), %eax
332 jae syscall_badsys
333 call *sys_call_table(,%eax,4)
334 movl %eax,EAX(%esp)
0da5db31 335 DISABLE_INTERRUPTS
55f327fa 336 TRACE_IRQS_OFF
1da177e4
LT
337 movl TI_flags(%ebp), %ecx
338 testw $_TIF_ALLWORK_MASK, %cx
339 jne syscall_exit_work
340/* if something modifies registers it must also disable sysexit */
341 movl EIP(%esp), %edx
342 movl OLDESP(%esp), %ecx
343 xorl %ebp,%ebp
55f327fa 344 TRACE_IRQS_ON
0da5db31 345 ENABLE_INTERRUPTS_SYSEXIT
fe7cacc1 346 CFI_ENDPROC
1da177e4
LT
347
348
349 # system call handler stub
350ENTRY(system_call)
fe7cacc1 351 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4 352 pushl %eax # save orig_eax
fe7cacc1 353 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
354 SAVE_ALL
355 GET_THREAD_INFO(%ebp)
635cf99a
CE
356 testl $TF_MASK,EFLAGS(%esp)
357 jz no_singlestep
358 orl $_TIF_SINGLESTEP,TI_flags(%ebp)
359no_singlestep:
ed75e8d5 360 # system call tracing in operation / emulation
1da177e4 361 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
ed75e8d5 362 testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1da177e4
LT
363 jnz syscall_trace_entry
364 cmpl $(nr_syscalls), %eax
365 jae syscall_badsys
366syscall_call:
367 call *sys_call_table(,%eax,4)
368 movl %eax,EAX(%esp) # store the return value
369syscall_exit:
0da5db31 370 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
371 # setting need_resched or sigpending
372 # between sampling and the iret
55f327fa 373 TRACE_IRQS_OFF
1da177e4
LT
374 movl TI_flags(%ebp), %ecx
375 testw $_TIF_ALLWORK_MASK, %cx # current->work
376 jne syscall_exit_work
377
378restore_all:
379 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
5df24082
SS
380 # Warning: OLDSS(%esp) contains the wrong/random values if we
381 # are returning to the kernel.
382 # See comments in process.c:copy_thread() for details.
1da177e4
LT
383 movb OLDSS(%esp), %ah
384 movb CS(%esp), %al
78be3706
RR
385 andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
386 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
fe7cacc1 387 CFI_REMEMBER_STATE
1da177e4
LT
388 je ldt_ss # returning to user-space with LDT SS
389restore_nocheck:
55f327fa
IM
390 TRACE_IRQS_IRET
391restore_nocheck_notrace:
1da177e4
LT
392 RESTORE_REGS
393 addl $4, %esp
fe7cacc1 394 CFI_ADJUST_CFA_OFFSET -4
0da5db31 3951: INTERRUPT_RETURN
1da177e4
LT
396.section .fixup,"ax"
397iret_exc:
55f327fa 398 TRACE_IRQS_ON
0da5db31 399 ENABLE_INTERRUPTS
a879cbbb
LT
400 pushl $0 # no error code
401 pushl $do_iret_error
402 jmp error_code
1da177e4
LT
403.previous
404.section __ex_table,"a"
405 .align 4
406 .long 1b,iret_exc
407.previous
408
fe7cacc1 409 CFI_RESTORE_STATE
1da177e4
LT
410ldt_ss:
411 larl OLDSS(%esp), %eax
412 jnz restore_nocheck
413 testl $0x00400000, %eax # returning to 32bit stack?
414 jnz restore_nocheck # allright, normal return
415 /* If returning to userspace with 16bit stack,
416 * try to fix the higher word of ESP, as the CPU
417 * won't restore it.
418 * This is an "official" bug of all the x86-compatible
419 * CPUs, which we can try to work around to make
420 * dosemu and wine happy. */
421 subl $8, %esp # reserve space for switch16 pointer
fe7cacc1 422 CFI_ADJUST_CFA_OFFSET 8
0da5db31 423 DISABLE_INTERRUPTS
55f327fa 424 TRACE_IRQS_OFF
1da177e4
LT
425 movl %esp, %eax
426 /* Set up the 16bit stack frame with switch32 pointer on top,
427 * and a switch16 pointer on top of the current frame. */
428 call setup_x86_bogus_stack
fe7cacc1 429 CFI_ADJUST_CFA_OFFSET -8 # frame has moved
55f327fa 430 TRACE_IRQS_IRET
1da177e4
LT
431 RESTORE_REGS
432 lss 20+4(%esp), %esp # switch to 16bit stack
0da5db31 4331: INTERRUPT_RETURN
1da177e4
LT
434.section __ex_table,"a"
435 .align 4
436 .long 1b,iret_exc
437.previous
fe7cacc1 438 CFI_ENDPROC
1da177e4
LT
439
440 # perform work that needs to be done immediately before resumption
441 ALIGN
fe7cacc1 442 RING0_PTREGS_FRAME # can't unwind into user space anyway
1da177e4
LT
443work_pending:
444 testb $_TIF_NEED_RESCHED, %cl
445 jz work_notifysig
446work_resched:
447 call schedule
0da5db31 448 DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1da177e4
LT
449 # setting need_resched or sigpending
450 # between sampling and the iret
55f327fa 451 TRACE_IRQS_OFF
1da177e4
LT
452 movl TI_flags(%ebp), %ecx
453 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
454 # than syscall tracing?
455 jz restore_all
456 testb $_TIF_NEED_RESCHED, %cl
457 jnz work_resched
458
459work_notifysig: # deal with pending signals and
460 # notify-resume requests
461 testl $VM_MASK, EFLAGS(%esp)
462 movl %esp, %eax
463 jne work_notifysig_v86 # returning to kernel-space or
464 # vm86-space
465 xorl %edx, %edx
466 call do_notify_resume
4031ff38 467 jmp resume_userspace_sig
1da177e4
LT
468
469 ALIGN
470work_notifysig_v86:
64ca9004 471#ifdef CONFIG_VM86
1da177e4 472 pushl %ecx # save ti_flags for do_notify_resume
fe7cacc1 473 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
474 call save_v86_state # %eax contains pt_regs pointer
475 popl %ecx
fe7cacc1 476 CFI_ADJUST_CFA_OFFSET -4
1da177e4
LT
477 movl %eax, %esp
478 xorl %edx, %edx
479 call do_notify_resume
4031ff38 480 jmp resume_userspace_sig
64ca9004 481#endif
1da177e4
LT
482
483 # perform syscall exit tracing
484 ALIGN
485syscall_trace_entry:
486 movl $-ENOSYS,EAX(%esp)
487 movl %esp, %eax
488 xorl %edx,%edx
489 call do_syscall_trace
ed75e8d5 490 cmpl $0, %eax
640aa46e 491 jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
ed75e8d5 492 # so must skip actual syscall
1da177e4
LT
493 movl ORIG_EAX(%esp), %eax
494 cmpl $(nr_syscalls), %eax
495 jnae syscall_call
496 jmp syscall_exit
497
498 # perform syscall exit tracing
499 ALIGN
500syscall_exit_work:
501 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
502 jz work_pending
55f327fa 503 TRACE_IRQS_ON
0da5db31 504 ENABLE_INTERRUPTS # could let do_syscall_trace() call
1da177e4
LT
505 # schedule() instead
506 movl %esp, %eax
507 movl $1, %edx
508 call do_syscall_trace
509 jmp resume_userspace
fe7cacc1 510 CFI_ENDPROC
1da177e4 511
fe7cacc1 512 RING0_INT_FRAME # can't unwind into user space anyway
1da177e4
LT
513syscall_fault:
514 pushl %eax # save orig_eax
fe7cacc1 515 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
516 SAVE_ALL
517 GET_THREAD_INFO(%ebp)
518 movl $-EFAULT,EAX(%esp)
519 jmp resume_userspace
520
1da177e4
LT
521syscall_badsys:
522 movl $-ENOSYS,EAX(%esp)
523 jmp resume_userspace
fe7cacc1 524 CFI_ENDPROC
1da177e4
LT
525
526#define FIXUP_ESPFIX_STACK \
527 movl %esp, %eax; \
528 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
529 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
530 /* copy data from 16bit stack to 32bit stack */ \
531 call fixup_x86_bogus_stack; \
532 /* put ESP to the proper location */ \
533 movl %eax, %esp;
534#define UNWIND_ESPFIX_STACK \
535 pushl %eax; \
fe7cacc1 536 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4
LT
537 movl %ss, %eax; \
538 /* see if on 16bit stack */ \
539 cmpw $__ESPFIX_SS, %ax; \
fe7cacc1
JB
540 je 28f; \
54127: popl %eax; \
542 CFI_ADJUST_CFA_OFFSET -4; \
543.section .fixup,"ax"; \
54428: movl $__KERNEL_DS, %eax; \
545 movl %eax, %ds; \
546 movl %eax, %es; \
1da177e4 547 /* switch to 32bit stack */ \
fe7cacc1
JB
548 FIXUP_ESPFIX_STACK; \
549 jmp 27b; \
550.previous
1da177e4
LT
551
552/*
553 * Build the entry stubs and pointer table with
554 * some assembler magic.
555 */
556.data
557ENTRY(interrupt)
558.text
559
560vector=0
561ENTRY(irq_entries_start)
fe7cacc1 562 RING0_INT_FRAME
1da177e4
LT
563.rept NR_IRQS
564 ALIGN
fe7cacc1
JB
565 .if vector
566 CFI_ADJUST_CFA_OFFSET -4
567 .endif
19eadf98 5681: pushl $~(vector)
fe7cacc1 569 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
570 jmp common_interrupt
571.data
572 .long 1b
573.text
574vector=vector+1
575.endr
576
55f327fa
IM
577/*
578 * the CPU automatically disables interrupts when executing an IRQ vector,
579 * so IRQ-flags tracing has to follow that:
580 */
1da177e4
LT
581 ALIGN
582common_interrupt:
583 SAVE_ALL
55f327fa 584 TRACE_IRQS_OFF
1da177e4
LT
585 movl %esp,%eax
586 call do_IRQ
587 jmp ret_from_intr
fe7cacc1 588 CFI_ENDPROC
1da177e4
LT
589
590#define BUILD_INTERRUPT(name, nr) \
591ENTRY(name) \
fe7cacc1 592 RING0_INT_FRAME; \
19eadf98 593 pushl $~(nr); \
fe7cacc1
JB
594 CFI_ADJUST_CFA_OFFSET 4; \
595 SAVE_ALL; \
55f327fa 596 TRACE_IRQS_OFF \
1da177e4
LT
597 movl %esp,%eax; \
598 call smp_/**/name; \
55f327fa 599 jmp ret_from_intr; \
fe7cacc1 600 CFI_ENDPROC
1da177e4
LT
601
602/* The include is where all of the SMP etc. interrupts come from */
603#include "entry_arch.h"
604
d28c4393
P
605KPROBE_ENTRY(page_fault)
606 RING0_EC_FRAME
607 pushl $do_page_fault
fe7cacc1 608 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
609 ALIGN
610error_code:
611 pushl %ds
fe7cacc1
JB
612 CFI_ADJUST_CFA_OFFSET 4
613 /*CFI_REL_OFFSET ds, 0*/
1da177e4 614 pushl %eax
fe7cacc1
JB
615 CFI_ADJUST_CFA_OFFSET 4
616 CFI_REL_OFFSET eax, 0
1da177e4
LT
617 xorl %eax, %eax
618 pushl %ebp
fe7cacc1
JB
619 CFI_ADJUST_CFA_OFFSET 4
620 CFI_REL_OFFSET ebp, 0
1da177e4 621 pushl %edi
fe7cacc1
JB
622 CFI_ADJUST_CFA_OFFSET 4
623 CFI_REL_OFFSET edi, 0
1da177e4 624 pushl %esi
fe7cacc1
JB
625 CFI_ADJUST_CFA_OFFSET 4
626 CFI_REL_OFFSET esi, 0
1da177e4 627 pushl %edx
fe7cacc1
JB
628 CFI_ADJUST_CFA_OFFSET 4
629 CFI_REL_OFFSET edx, 0
1da177e4
LT
630 decl %eax # eax = -1
631 pushl %ecx
fe7cacc1
JB
632 CFI_ADJUST_CFA_OFFSET 4
633 CFI_REL_OFFSET ecx, 0
1da177e4 634 pushl %ebx
fe7cacc1
JB
635 CFI_ADJUST_CFA_OFFSET 4
636 CFI_REL_OFFSET ebx, 0
1da177e4
LT
637 cld
638 pushl %es
fe7cacc1
JB
639 CFI_ADJUST_CFA_OFFSET 4
640 /*CFI_REL_OFFSET es, 0*/
1da177e4
LT
641 UNWIND_ESPFIX_STACK
642 popl %ecx
fe7cacc1
JB
643 CFI_ADJUST_CFA_OFFSET -4
644 /*CFI_REGISTER es, ecx*/
1da177e4
LT
645 movl ES(%esp), %edi # get the function address
646 movl ORIG_EAX(%esp), %edx # get the error code
647 movl %eax, ORIG_EAX(%esp)
648 movl %ecx, ES(%esp)
fe7cacc1 649 /*CFI_REL_OFFSET es, ES*/
1da177e4
LT
650 movl $(__USER_DS), %ecx
651 movl %ecx, %ds
652 movl %ecx, %es
653 movl %esp,%eax # pt_regs pointer
654 call *%edi
655 jmp ret_from_exception
fe7cacc1 656 CFI_ENDPROC
d28c4393 657KPROBE_END(page_fault)
1da177e4
LT
658
659ENTRY(coprocessor_error)
fe7cacc1 660 RING0_INT_FRAME
1da177e4 661 pushl $0
fe7cacc1 662 CFI_ADJUST_CFA_OFFSET 4
1da177e4 663 pushl $do_coprocessor_error
fe7cacc1 664 CFI_ADJUST_CFA_OFFSET 4
1da177e4 665 jmp error_code
fe7cacc1 666 CFI_ENDPROC
1da177e4
LT
667
668ENTRY(simd_coprocessor_error)
fe7cacc1 669 RING0_INT_FRAME
1da177e4 670 pushl $0
fe7cacc1 671 CFI_ADJUST_CFA_OFFSET 4
1da177e4 672 pushl $do_simd_coprocessor_error
fe7cacc1 673 CFI_ADJUST_CFA_OFFSET 4
1da177e4 674 jmp error_code
fe7cacc1 675 CFI_ENDPROC
1da177e4
LT
676
677ENTRY(device_not_available)
fe7cacc1 678 RING0_INT_FRAME
1da177e4 679 pushl $-1 # mark this as an int
fe7cacc1 680 CFI_ADJUST_CFA_OFFSET 4
1da177e4 681 SAVE_ALL
0da5db31 682 GET_CR0_INTO_EAX
1da177e4
LT
683 testl $0x4, %eax # EM (math emulation bit)
684 jne device_not_available_emulate
685 preempt_stop
686 call math_state_restore
687 jmp ret_from_exception
688device_not_available_emulate:
689 pushl $0 # temporary storage for ORIG_EIP
fe7cacc1 690 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
691 call math_emulate
692 addl $4, %esp
fe7cacc1 693 CFI_ADJUST_CFA_OFFSET -4
1da177e4 694 jmp ret_from_exception
fe7cacc1 695 CFI_ENDPROC
1da177e4
LT
696
697/*
698 * Debug traps and NMI can happen at the one SYSENTER instruction
699 * that sets up the real kernel stack. Check here, since we can't
700 * allow the wrong stack to be used.
701 *
702 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
703 * already pushed 3 words if it hits on the sysenter instruction:
704 * eflags, cs and eip.
705 *
706 * We just load the right stack, and push the three (known) values
707 * by hand onto the new stack - while updating the return eip past
708 * the instruction that would have done it for sysenter.
709 */
710#define FIX_STACK(offset, ok, label) \
711 cmpw $__KERNEL_CS,4(%esp); \
712 jne ok; \
713label: \
714 movl TSS_sysenter_esp0+offset(%esp),%esp; \
a549b86d
CE
715 CFI_DEF_CFA esp, 0; \
716 CFI_UNDEFINED eip; \
1da177e4 717 pushfl; \
a549b86d 718 CFI_ADJUST_CFA_OFFSET 4; \
1da177e4 719 pushl $__KERNEL_CS; \
a549b86d
CE
720 CFI_ADJUST_CFA_OFFSET 4; \
721 pushl $sysenter_past_esp; \
722 CFI_ADJUST_CFA_OFFSET 4; \
723 CFI_REL_OFFSET eip, 0
1da177e4 724
3d97ae5b 725KPROBE_ENTRY(debug)
fe7cacc1 726 RING0_INT_FRAME
1da177e4
LT
727 cmpl $sysenter_entry,(%esp)
728 jne debug_stack_correct
729 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
730debug_stack_correct:
731 pushl $-1 # mark this as an int
fe7cacc1 732 CFI_ADJUST_CFA_OFFSET 4
1da177e4
LT
733 SAVE_ALL
734 xorl %edx,%edx # error code 0
735 movl %esp,%eax # pt_regs pointer
736 call do_debug
1da177e4 737 jmp ret_from_exception
fe7cacc1 738 CFI_ENDPROC
d28c4393
P
739KPROBE_END(debug)
740
1da177e4
LT
741/*
742 * NMI is doubly nasty. It can happen _while_ we're handling
743 * a debug fault, and the debug fault hasn't yet been able to
744 * clear up the stack. So we first check whether we got an
745 * NMI on the sysenter entry path, but after that we need to
746 * check whether we got an NMI on the debug path where the debug
747 * fault happened on the sysenter path.
748 */