Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
a49976d1 | 2 | * Copyright (C) 1991,1992 Linus Torvalds |
1da177e4 | 3 | * |
a49976d1 | 4 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
1da177e4 | 5 | * |
889f21ce | 6 | * Stack layout in 'syscall_exit': |
a49976d1 IM |
7 | * ptrace needs to have all registers on the stack. |
8 | * If the order here is changed, it needs to be | |
9 | * updated in fork.c:copy_process(), signal.c:do_signal(), | |
1da177e4 LT |
10 | * ptrace.c and ptrace.h |
11 | * | |
12 | * 0(%esp) - %ebx | |
13 | * 4(%esp) - %ecx | |
14 | * 8(%esp) - %edx | |
15 | * C(%esp) - %esi | |
16 | * 10(%esp) - %edi | |
17 | * 14(%esp) - %ebp | |
18 | * 18(%esp) - %eax | |
19 | * 1C(%esp) - %ds | |
20 | * 20(%esp) - %es | |
464d1a78 | 21 | * 24(%esp) - %fs |
ccbeed3a TH |
22 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
23 | * 2C(%esp) - orig_eax | |
24 | * 30(%esp) - %eip | |
25 | * 34(%esp) - %cs | |
26 | * 38(%esp) - %eflags | |
27 | * 3C(%esp) - %oldesp | |
28 | * 40(%esp) - %oldss | |
1da177e4 LT |
29 | */ |
30 | ||
1da177e4 | 31 | #include <linux/linkage.h> |
d7e7528b | 32 | #include <linux/err.h> |
1da177e4 | 33 | #include <asm/thread_info.h> |
55f327fa | 34 | #include <asm/irqflags.h> |
1da177e4 LT |
35 | #include <asm/errno.h> |
36 | #include <asm/segment.h> | |
37 | #include <asm/smp.h> | |
0341c14d | 38 | #include <asm/page_types.h> |
be44d2aa | 39 | #include <asm/percpu.h> |
ab68ed98 | 40 | #include <asm/processor-flags.h> |
395a59d0 | 41 | #include <asm/ftrace.h> |
9b7dc567 | 42 | #include <asm/irq_vectors.h> |
40d2e763 | 43 | #include <asm/cpufeature.h> |
b4ca46e4 | 44 | #include <asm/alternative-asm.h> |
6837a54d | 45 | #include <asm/asm.h> |
e59d1b0a | 46 | #include <asm/smap.h> |
1da177e4 | 47 | |
af0575bb RM |
48 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
49 | #include <linux/elf-em.h> | |
50 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | |
a49976d1 | 51 | #define __AUDIT_ARCH_LE 0x40000000 |
af0575bb RM |
52 | |
53 | #ifndef CONFIG_AUDITSYSCALL | |
a49976d1 IM |
54 | # define sysenter_audit syscall_trace_entry |
55 | # define sysexit_audit syscall_exit_work | |
af0575bb RM |
56 | #endif |
57 | ||
ea714547 JO |
58 | .section .entry.text, "ax" |
59 | ||
139ec7c4 RR |
60 | /* |
61 | * We use macros for low-level operations which need to be overridden | |
62 | * for paravirtualization. The following will never clobber any registers: | |
63 | * INTERRUPT_RETURN (aka. "iret") | |
64 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 65 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
66 | * |
67 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
68 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
69 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
70 | * enough to patch inline, increasing performance. | |
71 | */ | |
72 | ||
1da177e4 | 73 | #ifdef CONFIG_PREEMPT |
a49976d1 | 74 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 75 | #else |
a49976d1 IM |
76 | # define preempt_stop(clobbers) |
77 | # define resume_kernel restore_all | |
1da177e4 LT |
78 | #endif |
79 | ||
55f327fa IM |
80 | .macro TRACE_IRQS_IRET |
81 | #ifdef CONFIG_TRACE_IRQFLAGS | |
a49976d1 IM |
82 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
83 | jz 1f | |
55f327fa IM |
84 | TRACE_IRQS_ON |
85 | 1: | |
86 | #endif | |
87 | .endm | |
88 | ||
ccbeed3a TH |
89 | /* |
90 | * User gs save/restore | |
91 | * | |
92 | * %gs is used for userland TLS and kernel only uses it for stack | |
93 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
94 | * at the top of stackprotector.h for more info. | |
95 | * | |
96 | * Local labels 98 and 99 are used. | |
97 | */ | |
98 | #ifdef CONFIG_X86_32_LAZY_GS | |
99 | ||
100 | /* unfortunately push/pop can't be no-op */ | |
101 | .macro PUSH_GS | |
a49976d1 | 102 | pushl $0 |
ccbeed3a TH |
103 | .endm |
104 | .macro POP_GS pop=0 | |
a49976d1 | 105 | addl $(4 + \pop), %esp |
ccbeed3a TH |
106 | .endm |
107 | .macro POP_GS_EX | |
108 | .endm | |
109 | ||
110 | /* all the rest are no-op */ | |
111 | .macro PTGS_TO_GS | |
112 | .endm | |
113 | .macro PTGS_TO_GS_EX | |
114 | .endm | |
115 | .macro GS_TO_REG reg | |
116 | .endm | |
117 | .macro REG_TO_PTGS reg | |
118 | .endm | |
119 | .macro SET_KERNEL_GS reg | |
120 | .endm | |
121 | ||
122 | #else /* CONFIG_X86_32_LAZY_GS */ | |
123 | ||
124 | .macro PUSH_GS | |
a49976d1 | 125 | pushl %gs |
ccbeed3a TH |
126 | .endm |
127 | ||
128 | .macro POP_GS pop=0 | |
a49976d1 | 129 | 98: popl %gs |
ccbeed3a TH |
130 | .if \pop <> 0 |
131 | add $\pop, %esp | |
ccbeed3a TH |
132 | .endif |
133 | .endm | |
134 | .macro POP_GS_EX | |
135 | .pushsection .fixup, "ax" | |
a49976d1 IM |
136 | 99: movl $0, (%esp) |
137 | jmp 98b | |
ccbeed3a | 138 | .popsection |
a49976d1 | 139 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
140 | .endm |
141 | ||
142 | .macro PTGS_TO_GS | |
a49976d1 | 143 | 98: mov PT_GS(%esp), %gs |
ccbeed3a TH |
144 | .endm |
145 | .macro PTGS_TO_GS_EX | |
146 | .pushsection .fixup, "ax" | |
a49976d1 IM |
147 | 99: movl $0, PT_GS(%esp) |
148 | jmp 98b | |
ccbeed3a | 149 | .popsection |
a49976d1 | 150 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
151 | .endm |
152 | ||
153 | .macro GS_TO_REG reg | |
a49976d1 | 154 | movl %gs, \reg |
ccbeed3a TH |
155 | .endm |
156 | .macro REG_TO_PTGS reg | |
a49976d1 | 157 | movl \reg, PT_GS(%esp) |
ccbeed3a TH |
158 | .endm |
159 | .macro SET_KERNEL_GS reg | |
a49976d1 IM |
160 | movl $(__KERNEL_STACK_CANARY), \reg |
161 | movl \reg, %gs | |
ccbeed3a TH |
162 | .endm |
163 | ||
a49976d1 | 164 | #endif /* CONFIG_X86_32_LAZY_GS */ |
ccbeed3a | 165 | |
f0d96110 TH |
166 | .macro SAVE_ALL |
167 | cld | |
ccbeed3a | 168 | PUSH_GS |
a49976d1 IM |
169 | pushl %fs |
170 | pushl %es | |
171 | pushl %ds | |
172 | pushl %eax | |
173 | pushl %ebp | |
174 | pushl %edi | |
175 | pushl %esi | |
176 | pushl %edx | |
177 | pushl %ecx | |
178 | pushl %ebx | |
179 | movl $(__USER_DS), %edx | |
180 | movl %edx, %ds | |
181 | movl %edx, %es | |
182 | movl $(__KERNEL_PERCPU), %edx | |
183 | movl %edx, %fs | |
ccbeed3a | 184 | SET_KERNEL_GS %edx |
f0d96110 | 185 | .endm |
1da177e4 | 186 | |
f0d96110 | 187 | .macro RESTORE_INT_REGS |
a49976d1 IM |
188 | popl %ebx |
189 | popl %ecx | |
190 | popl %edx | |
191 | popl %esi | |
192 | popl %edi | |
193 | popl %ebp | |
194 | popl %eax | |
f0d96110 | 195 | .endm |
1da177e4 | 196 | |
ccbeed3a | 197 | .macro RESTORE_REGS pop=0 |
f0d96110 | 198 | RESTORE_INT_REGS |
a49976d1 IM |
199 | 1: popl %ds |
200 | 2: popl %es | |
201 | 3: popl %fs | |
ccbeed3a | 202 | POP_GS \pop |
f0d96110 | 203 | .pushsection .fixup, "ax" |
a49976d1 IM |
204 | 4: movl $0, (%esp) |
205 | jmp 1b | |
206 | 5: movl $0, (%esp) | |
207 | jmp 2b | |
208 | 6: movl $0, (%esp) | |
209 | jmp 3b | |
f95d47ca | 210 | .popsection |
a49976d1 IM |
211 | _ASM_EXTABLE(1b, 4b) |
212 | _ASM_EXTABLE(2b, 5b) | |
213 | _ASM_EXTABLE(3b, 6b) | |
ccbeed3a | 214 | POP_GS_EX |
f0d96110 | 215 | .endm |
1da177e4 | 216 | |
1da177e4 | 217 | ENTRY(ret_from_fork) |
a49976d1 IM |
218 | pushl %eax |
219 | call schedule_tail | |
1da177e4 | 220 | GET_THREAD_INFO(%ebp) |
a49976d1 IM |
221 | popl %eax |
222 | pushl $0x0202 # Reset kernel eflags | |
131484c8 | 223 | popfl |
a49976d1 | 224 | jmp syscall_exit |
47a55cd7 | 225 | END(ret_from_fork) |
1da177e4 | 226 | |
22e2430d | 227 | ENTRY(ret_from_kernel_thread) |
a49976d1 IM |
228 | pushl %eax |
229 | call schedule_tail | |
6783eaa2 | 230 | GET_THREAD_INFO(%ebp) |
a49976d1 IM |
231 | popl %eax |
232 | pushl $0x0202 # Reset kernel eflags | |
131484c8 | 233 | popfl |
a49976d1 IM |
234 | movl PT_EBP(%esp), %eax |
235 | call *PT_EBX(%esp) | |
236 | movl $0, PT_EAX(%esp) | |
237 | jmp syscall_exit | |
22e2430d | 238 | ENDPROC(ret_from_kernel_thread) |
6783eaa2 | 239 | |
1da177e4 LT |
240 | /* |
241 | * Return to user mode is not as complex as all this looks, | |
242 | * but we want the default path for a system call return to | |
243 | * go as quickly as possible which is why some of this is | |
244 | * less clear than it otherwise should be. | |
245 | */ | |
246 | ||
247 | # userspace resumption stub bypassing syscall exit tracing | |
248 | ALIGN | |
249 | ret_from_exception: | |
139ec7c4 | 250 | preempt_stop(CLBR_ANY) |
1da177e4 LT |
251 | ret_from_intr: |
252 | GET_THREAD_INFO(%ebp) | |
29a2e283 | 253 | #ifdef CONFIG_VM86 |
a49976d1 IM |
254 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
255 | movb PT_CS(%esp), %al | |
256 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | |
29a2e283 DA |
257 | #else |
258 | /* | |
6783eaa2 | 259 | * We can be coming here from child spawned by kernel_thread(). |
29a2e283 | 260 | */ |
a49976d1 IM |
261 | movl PT_CS(%esp), %eax |
262 | andl $SEGMENT_RPL_MASK, %eax | |
29a2e283 | 263 | #endif |
a49976d1 IM |
264 | cmpl $USER_RPL, %eax |
265 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 266 | |
1da177e4 | 267 | ENTRY(resume_userspace) |
c7e872e7 | 268 | LOCKDEP_SYS_EXIT |
a49976d1 IM |
269 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
270 | # setting need_resched or sigpending | |
271 | # between sampling and the iret | |
e32e58a9 | 272 | TRACE_IRQS_OFF |
a49976d1 IM |
273 | movl TI_flags(%ebp), %ecx |
274 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | |
275 | # int/exception return? | |
276 | jne work_pending | |
277 | jmp restore_all | |
47a55cd7 | 278 | END(ret_from_exception) |
1da177e4 LT |
279 | |
280 | #ifdef CONFIG_PREEMPT | |
281 | ENTRY(resume_kernel) | |
139ec7c4 | 282 | DISABLE_INTERRUPTS(CLBR_ANY) |
1da177e4 | 283 | need_resched: |
a49976d1 IM |
284 | cmpl $0, PER_CPU_VAR(__preempt_count) |
285 | jnz restore_all | |
286 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? | |
287 | jz restore_all | |
288 | call preempt_schedule_irq | |
289 | jmp need_resched | |
47a55cd7 | 290 | END(resume_kernel) |
1da177e4 LT |
291 | #endif |
292 | ||
a49976d1 IM |
293 | /* |
294 | * SYSENTER_RETURN points to after the SYSENTER instruction | |
295 | * in the vsyscall page. See vsyscall-sysentry.S, which defines | |
296 | * the symbol. | |
297 | */ | |
1da177e4 | 298 | |
a49976d1 | 299 | # SYSENTER call handler stub |
4c8cd0c5 | 300 | ENTRY(entry_SYSENTER_32) |
a49976d1 | 301 | movl TSS_sysenter_sp0(%esp), %esp |
1da177e4 | 302 | sysenter_past_esp: |
55f327fa | 303 | /* |
d93c870b JF |
304 | * Interrupts are disabled here, but we can't trace it until |
305 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | |
306 | * we immediately enable interrupts at that point anyway. | |
55f327fa | 307 | */ |
a49976d1 IM |
308 | pushl $__USER_DS |
309 | pushl %ebp | |
131484c8 | 310 | pushfl |
a49976d1 IM |
311 | orl $X86_EFLAGS_IF, (%esp) |
312 | pushl $__USER_CS | |
e6e5494c IM |
313 | /* |
314 | * Push current_thread_info()->sysenter_return to the stack. | |
ff8287f3 AL |
315 | * A tiny bit of offset fixup is necessary: TI_sysenter_return |
316 | * is relative to thread_info, which is at the bottom of the | |
317 | * kernel stack page. 4*4 means the 4 words pushed above; | |
318 | * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; | |
319 | * and THREAD_SIZE takes us to the bottom. | |
e6e5494c | 320 | */ |
a49976d1 | 321 | pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) |
1da177e4 | 322 | |
a49976d1 | 323 | pushl %eax |
d93c870b JF |
324 | SAVE_ALL |
325 | ENABLE_INTERRUPTS(CLBR_NONE) | |
326 | ||
1da177e4 LT |
327 | /* |
328 | * Load the potential sixth argument from user stack. | |
329 | * Careful about security. | |
330 | */ | |
a49976d1 IM |
331 | cmpl $__PAGE_OFFSET-3, %ebp |
332 | jae syscall_fault | |
e59d1b0a | 333 | ASM_STAC |
a49976d1 | 334 | 1: movl (%ebp), %ebp |
e59d1b0a | 335 | ASM_CLAC |
a49976d1 IM |
336 | movl %ebp, PT_EBP(%esp) |
337 | _ASM_EXTABLE(1b, syscall_fault) | |
1da177e4 | 338 | |
1da177e4 LT |
339 | GET_THREAD_INFO(%ebp) |
340 | ||
a49976d1 IM |
341 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) |
342 | jnz sysenter_audit | |
af0575bb | 343 | sysenter_do_call: |
a49976d1 IM |
344 | cmpl $(NR_syscalls), %eax |
345 | jae sysenter_badsys | |
346 | call *sys_call_table(, %eax, 4) | |
554086d8 | 347 | sysenter_after_call: |
a49976d1 | 348 | movl %eax, PT_EAX(%esp) |
c7e872e7 | 349 | LOCKDEP_SYS_EXIT |
42c24fa2 | 350 | DISABLE_INTERRUPTS(CLBR_ANY) |
55f327fa | 351 | TRACE_IRQS_OFF |
a49976d1 IM |
352 | movl TI_flags(%ebp), %ecx |
353 | testl $_TIF_ALLWORK_MASK, %ecx | |
354 | jnz sysexit_audit | |
af0575bb | 355 | sysenter_exit: |
1da177e4 | 356 | /* if something modifies registers it must also disable sysexit */ |
a49976d1 IM |
357 | movl PT_EIP(%esp), %edx |
358 | movl PT_OLDESP(%esp), %ecx | |
359 | xorl %ebp, %ebp | |
55f327fa | 360 | TRACE_IRQS_ON |
a49976d1 | 361 | 1: mov PT_FS(%esp), %fs |
ccbeed3a | 362 | PTGS_TO_GS |
d75cd22f | 363 | ENABLE_INTERRUPTS_SYSEXIT |
af0575bb RM |
364 | |
365 | #ifdef CONFIG_AUDITSYSCALL | |
366 | sysenter_audit: | |
a49976d1 IM |
367 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp) |
368 | jnz syscall_trace_entry | |
369 | /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ | |
370 | movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ | |
371 | /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ | |
372 | pushl PT_ESI(%esp) /* a3: 5th arg */ | |
373 | pushl PT_EDX+4(%esp) /* a2: 4th arg */ | |
374 | call __audit_syscall_entry | |
375 | popl %ecx /* get that remapped edx off the stack */ | |
376 | popl %ecx /* get that remapped esi off the stack */ | |
377 | movl PT_EAX(%esp), %eax /* reload syscall number */ | |
378 | jmp sysenter_do_call | |
af0575bb RM |
379 | |
380 | sysexit_audit: | |
a49976d1 IM |
381 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
382 | jnz syscall_exit_work | |
af0575bb RM |
383 | TRACE_IRQS_ON |
384 | ENABLE_INTERRUPTS(CLBR_ANY) | |
a49976d1 IM |
385 | movl %eax, %edx /* second arg, syscall return value */ |
386 | cmpl $-MAX_ERRNO, %eax /* is it an error ? */ | |
387 | setbe %al /* 1 if so, 0 if not */ | |
388 | movzbl %al, %eax /* zero-extend that */ | |
389 | call __audit_syscall_exit | |
af0575bb RM |
390 | DISABLE_INTERRUPTS(CLBR_ANY) |
391 | TRACE_IRQS_OFF | |
a49976d1 IM |
392 | movl TI_flags(%ebp), %ecx |
393 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx | |
394 | jnz syscall_exit_work | |
395 | movl PT_EAX(%esp), %eax /* reload syscall return value */ | |
396 | jmp sysenter_exit | |
af0575bb RM |
397 | #endif |
398 | ||
a49976d1 IM |
399 | .pushsection .fixup, "ax" |
400 | 2: movl $0, PT_FS(%esp) | |
401 | jmp 1b | |
f95d47ca | 402 | .popsection |
a49976d1 | 403 | _ASM_EXTABLE(1b, 2b) |
ccbeed3a | 404 | PTGS_TO_GS_EX |
4c8cd0c5 | 405 | ENDPROC(entry_SYSENTER_32) |
1da177e4 LT |
406 | |
407 | # system call handler stub | |
b2502b41 | 408 | ENTRY(entry_INT80_32) |
e59d1b0a | 409 | ASM_CLAC |
a49976d1 | 410 | pushl %eax # save orig_eax |
1da177e4 LT |
411 | SAVE_ALL |
412 | GET_THREAD_INFO(%ebp) | |
a49976d1 IM |
413 | # system call tracing in operation / emulation |
414 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) | |
415 | jnz syscall_trace_entry | |
416 | cmpl $(NR_syscalls), %eax | |
417 | jae syscall_badsys | |
1da177e4 | 418 | syscall_call: |
a49976d1 | 419 | call *sys_call_table(, %eax, 4) |
8142b215 | 420 | syscall_after_call: |
a49976d1 | 421 | movl %eax, PT_EAX(%esp) # store the return value |
1da177e4 | 422 | syscall_exit: |
c7e872e7 | 423 | LOCKDEP_SYS_EXIT |
a49976d1 IM |
424 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
425 | # setting need_resched or sigpending | |
426 | # between sampling and the iret | |
55f327fa | 427 | TRACE_IRQS_OFF |
a49976d1 IM |
428 | movl TI_flags(%ebp), %ecx |
429 | testl $_TIF_ALLWORK_MASK, %ecx # current->work | |
430 | jnz syscall_exit_work | |
1da177e4 LT |
431 | |
432 | restore_all: | |
2e04bc76 AH |
433 | TRACE_IRQS_IRET |
434 | restore_all_notrace: | |
34273f41 | 435 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 IM |
436 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
437 | /* | |
438 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
439 | * are returning to the kernel. | |
440 | * See comments in process.c:copy_thread() for details. | |
441 | */ | |
442 | movb PT_OLDSS(%esp), %ah | |
443 | movb PT_CS(%esp), %al | |
444 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | |
445 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | |
446 | je ldt_ss # returning to user-space with LDT SS | |
34273f41 | 447 | #endif |
1da177e4 | 448 | restore_nocheck: |
a49976d1 | 449 | RESTORE_REGS 4 # skip orig_eax/error_code |
f7f3d791 | 450 | irq_return: |
3701d863 | 451 | INTERRUPT_RETURN |
a49976d1 IM |
452 | .section .fixup, "ax" |
453 | ENTRY(iret_exc ) | |
454 | pushl $0 # no error code | |
455 | pushl $do_iret_error | |
456 | jmp error_code | |
1da177e4 | 457 | .previous |
a49976d1 | 458 | _ASM_EXTABLE(irq_return, iret_exc) |
1da177e4 | 459 | |
34273f41 | 460 | #ifdef CONFIG_X86_ESPFIX32 |
1da177e4 | 461 | ldt_ss: |
d3561b7f RR |
462 | #ifdef CONFIG_PARAVIRT |
463 | /* | |
464 | * The kernel can't run on a non-flat stack if paravirt mode | |
465 | * is active. Rather than try to fixup the high bits of | |
466 | * ESP, bypass this code entirely. This may break DOSemu | |
467 | * and/or Wine support in a paravirt VM, although the option | |
468 | * is still available to implement the setting of the high | |
469 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | |
470 | */ | |
a49976d1 IM |
471 | cmpl $0, pv_info+PARAVIRT_enabled |
472 | jne restore_nocheck | |
d3561b7f RR |
473 | #endif |
474 | ||
dc4c2a0a AH |
475 | /* |
476 | * Setup and switch to ESPFIX stack | |
477 | * | |
478 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
479 | * restore the high word of ESP for us on executing iret... This is an | |
480 | * "official" bug of all the x86-compatible CPUs, which we can work | |
481 | * around to make dosemu and wine happy. We do this by preloading the | |
482 | * high word of ESP with the high word of the userspace ESP while | |
483 | * compensating for the offset by changing to the ESPFIX segment with | |
484 | * a base address that matches for the difference. | |
485 | */ | |
72c511dd | 486 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
a49976d1 IM |
487 | mov %esp, %edx /* load kernel esp */ |
488 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
489 | mov %dx, %ax /* eax: new kernel esp */ | |
490 | sub %eax, %edx /* offset (low word is 0) */ | |
dc4c2a0a | 491 | shr $16, %edx |
a49976d1 IM |
492 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
493 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
494 | pushl $__ESPFIX_SS | |
495 | pushl %eax /* new kernel esp */ | |
496 | /* | |
497 | * Disable interrupts, but do not irqtrace this section: we | |
2e04bc76 | 498 | * will soon execute iret and the tracer was already set to |
a49976d1 IM |
499 | * the irqstate after the IRET: |
500 | */ | |
139ec7c4 | 501 | DISABLE_INTERRUPTS(CLBR_EAX) |
a49976d1 IM |
502 | lss (%esp), %esp /* switch to espfix segment */ |
503 | jmp restore_nocheck | |
34273f41 | 504 | #endif |
b2502b41 | 505 | ENDPROC(entry_INT80_32) |
1da177e4 LT |
506 | |
507 | # perform work that needs to be done immediately before resumption | |
508 | ALIGN | |
509 | work_pending: | |
510 | testb $_TIF_NEED_RESCHED, %cl | |
a49976d1 | 511 | jz work_notifysig |
1da177e4 | 512 | work_resched: |
a49976d1 | 513 | call schedule |
c7e872e7 | 514 | LOCKDEP_SYS_EXIT |
a49976d1 IM |
515 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
516 | # setting need_resched or sigpending | |
517 | # between sampling and the iret | |
55f327fa | 518 | TRACE_IRQS_OFF |
a49976d1 IM |
519 | movl TI_flags(%ebp), %ecx |
520 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | |
521 | # than syscall tracing? | |
522 | jz restore_all | |
1da177e4 | 523 | testb $_TIF_NEED_RESCHED, %cl |
a49976d1 | 524 | jnz work_resched |
1da177e4 | 525 | |
a49976d1 IM |
526 | work_notifysig: # deal with pending signals and |
527 | # notify-resume requests | |
74b47a78 | 528 | #ifdef CONFIG_VM86 |
a49976d1 IM |
529 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
530 | movl %esp, %eax | |
531 | jnz work_notifysig_v86 # returning to kernel-space or | |
532 | # vm86-space | |
969ae0bf AV |
533 | 1: |
534 | #else | |
a49976d1 | 535 | movl %esp, %eax |
969ae0bf | 536 | #endif |
3596ff4e SD |
537 | TRACE_IRQS_ON |
538 | ENABLE_INTERRUPTS(CLBR_NONE) | |
a49976d1 | 539 | movb PT_CS(%esp), %bl |
44fbbb3d AV |
540 | andb $SEGMENT_RPL_MASK, %bl |
541 | cmpb $USER_RPL, %bl | |
a49976d1 IM |
542 | jb resume_kernel |
543 | xorl %edx, %edx | |
544 | call do_notify_resume | |
545 | jmp resume_userspace | |
1da177e4 | 546 | |
969ae0bf | 547 | #ifdef CONFIG_VM86 |
1da177e4 LT |
548 | ALIGN |
549 | work_notifysig_v86: | |
a49976d1 IM |
550 | pushl %ecx # save ti_flags for do_notify_resume |
551 | call save_v86_state # %eax contains pt_regs pointer | |
552 | popl %ecx | |
553 | movl %eax, %esp | |
554 | jmp 1b | |
74b47a78 | 555 | #endif |
47a55cd7 | 556 | END(work_pending) |
1da177e4 LT |
557 | |
558 | # perform syscall exit tracing | |
559 | ALIGN | |
560 | syscall_trace_entry: | |
a49976d1 IM |
561 | movl $-ENOSYS, PT_EAX(%esp) |
562 | movl %esp, %eax | |
563 | call syscall_trace_enter | |
d4d67150 | 564 | /* What it returned is what we'll actually use. */ |
a49976d1 IM |
565 | cmpl $(NR_syscalls), %eax |
566 | jnae syscall_call | |
567 | jmp syscall_exit | |
47a55cd7 | 568 | END(syscall_trace_entry) |
1da177e4 LT |
569 | |
570 | # perform syscall exit tracing | |
571 | ALIGN | |
572 | syscall_exit_work: | |
a49976d1 IM |
573 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
574 | jz work_pending | |
55f327fa | 575 | TRACE_IRQS_ON |
a49976d1 IM |
576 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
577 | # schedule() instead | |
578 | movl %esp, %eax | |
579 | call syscall_trace_leave | |
580 | jmp resume_userspace | |
47a55cd7 | 581 | END(syscall_exit_work) |
1da177e4 | 582 | |
1da177e4 | 583 | syscall_fault: |
e59d1b0a | 584 | ASM_CLAC |
1da177e4 | 585 | GET_THREAD_INFO(%ebp) |
a49976d1 IM |
586 | movl $-EFAULT, PT_EAX(%esp) |
587 | jmp resume_userspace | |
47a55cd7 | 588 | END(syscall_fault) |
1da177e4 | 589 | |
1da177e4 | 590 | syscall_badsys: |
a49976d1 IM |
591 | movl $-ENOSYS, %eax |
592 | jmp syscall_after_call | |
554086d8 AL |
593 | END(syscall_badsys) |
594 | ||
595 | sysenter_badsys: | |
a49976d1 IM |
596 | movl $-ENOSYS, %eax |
597 | jmp sysenter_after_call | |
fb21b84e | 598 | END(sysenter_badsys) |
1da177e4 | 599 | |
f0d96110 | 600 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
601 | /* |
602 | * Switch back for ESPFIX stack to the normal zerobased stack | |
603 | * | |
604 | * We can't call C functions using the ESPFIX stack. This code reads | |
605 | * the high word of the segment base from the GDT and swiches to the | |
606 | * normal stack and adjusts ESP with the matching offset. | |
607 | */ | |
34273f41 | 608 | #ifdef CONFIG_X86_ESPFIX32 |
dc4c2a0a | 609 | /* fixup the stack */ |
a49976d1 IM |
610 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
611 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
dc4c2a0a | 612 | shl $16, %eax |
a49976d1 IM |
613 | addl %esp, %eax /* the adjusted stack pointer */ |
614 | pushl $__KERNEL_DS | |
615 | pushl %eax | |
616 | lss (%esp), %esp /* switch to the normal stack segment */ | |
34273f41 | 617 | #endif |
f0d96110 TH |
618 | .endm |
619 | .macro UNWIND_ESPFIX_STACK | |
34273f41 | 620 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 | 621 | movl %ss, %eax |
f0d96110 | 622 | /* see if on espfix stack */ |
a49976d1 IM |
623 | cmpw $__ESPFIX_SS, %ax |
624 | jne 27f | |
625 | movl $__KERNEL_DS, %eax | |
626 | movl %eax, %ds | |
627 | movl %eax, %es | |
f0d96110 TH |
628 | /* switch to normal stack */ |
629 | FIXUP_ESPFIX_STACK | |
630 | 27: | |
34273f41 | 631 | #endif |
f0d96110 | 632 | .endm |
1da177e4 LT |
633 | |
634 | /* | |
3304c9c3 DV |
635 | * Build the entry stubs with some assembler magic. |
636 | * We pack 1 stub into every 8-byte block. | |
1da177e4 | 637 | */ |
3304c9c3 | 638 | .align 8 |
1da177e4 | 639 | ENTRY(irq_entries_start) |
3304c9c3 DV |
640 | vector=FIRST_EXTERNAL_VECTOR |
641 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
a49976d1 | 642 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 DV |
643 | vector=vector+1 |
644 | jmp common_interrupt | |
3304c9c3 DV |
645 | .align 8 |
646 | .endr | |
47a55cd7 JB |
647 | END(irq_entries_start) |
648 | ||
55f327fa IM |
649 | /* |
650 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
651 | * so IRQ-flags tracing has to follow that: | |
652 | */ | |
b7c6244f | 653 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 654 | common_interrupt: |
e59d1b0a | 655 | ASM_CLAC |
a49976d1 | 656 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
1da177e4 | 657 | SAVE_ALL |
55f327fa | 658 | TRACE_IRQS_OFF |
a49976d1 IM |
659 | movl %esp, %eax |
660 | call do_IRQ | |
661 | jmp ret_from_intr | |
47a55cd7 | 662 | ENDPROC(common_interrupt) |
1da177e4 | 663 | |
02cf94c3 | 664 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 665 | ENTRY(name) \ |
e59d1b0a | 666 | ASM_CLAC; \ |
a49976d1 | 667 | pushl $~(nr); \ |
fe7cacc1 | 668 | SAVE_ALL; \ |
55f327fa | 669 | TRACE_IRQS_OFF \ |
a49976d1 IM |
670 | movl %esp, %eax; \ |
671 | call fn; \ | |
672 | jmp ret_from_intr; \ | |
47a55cd7 | 673 | ENDPROC(name) |
1da177e4 | 674 | |
cf910e83 SA |
675 | |
676 | #ifdef CONFIG_TRACING | |
a49976d1 | 677 | # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) |
cf910e83 | 678 | #else |
a49976d1 | 679 | # define TRACE_BUILD_INTERRUPT(name, nr) |
cf910e83 SA |
680 | #endif |
681 | ||
a49976d1 IM |
682 | #define BUILD_INTERRUPT(name, nr) \ |
683 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | |
cf910e83 | 684 | TRACE_BUILD_INTERRUPT(name, nr) |
02cf94c3 | 685 | |
1da177e4 | 686 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 687 | #include <asm/entry_arch.h> |
1da177e4 | 688 | |
1da177e4 | 689 | ENTRY(coprocessor_error) |
e59d1b0a | 690 | ASM_CLAC |
a49976d1 IM |
691 | pushl $0 |
692 | pushl $do_coprocessor_error | |
693 | jmp error_code | |
47a55cd7 | 694 | END(coprocessor_error) |
1da177e4 LT |
695 | |
696 | ENTRY(simd_coprocessor_error) | |
e59d1b0a | 697 | ASM_CLAC |
a49976d1 | 698 | pushl $0 |
40d2e763 BG |
699 | #ifdef CONFIG_X86_INVD_BUG |
700 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
a49976d1 IM |
701 | ALTERNATIVE "pushl $do_general_protection", \ |
702 | "pushl $do_simd_coprocessor_error", \ | |
8e65f6e0 | 703 | X86_FEATURE_XMM |
40d2e763 | 704 | #else |
a49976d1 | 705 | pushl $do_simd_coprocessor_error |
40d2e763 | 706 | #endif |
a49976d1 | 707 | jmp error_code |
47a55cd7 | 708 | END(simd_coprocessor_error) |
1da177e4 LT |
709 | |
710 | ENTRY(device_not_available) | |
e59d1b0a | 711 | ASM_CLAC |
a49976d1 IM |
712 | pushl $-1 # mark this as an int |
713 | pushl $do_device_not_available | |
714 | jmp error_code | |
47a55cd7 | 715 | END(device_not_available) |
1da177e4 | 716 | |
d3561b7f RR |
717 | #ifdef CONFIG_PARAVIRT |
718 | ENTRY(native_iret) | |
3701d863 | 719 | iret |
6837a54d | 720 | _ASM_EXTABLE(native_iret, iret_exc) |
47a55cd7 | 721 | END(native_iret) |
d3561b7f | 722 | |
d75cd22f | 723 | ENTRY(native_irq_enable_sysexit) |
d3561b7f RR |
724 | sti |
725 | sysexit | |
d75cd22f | 726 | END(native_irq_enable_sysexit) |
d3561b7f RR |
727 | #endif |
728 | ||
1da177e4 | 729 | ENTRY(overflow) |
e59d1b0a | 730 | ASM_CLAC |
a49976d1 IM |
731 | pushl $0 |
732 | pushl $do_overflow | |
733 | jmp error_code | |
47a55cd7 | 734 | END(overflow) |
1da177e4 LT |
735 | |
736 | ENTRY(bounds) | |
e59d1b0a | 737 | ASM_CLAC |
a49976d1 IM |
738 | pushl $0 |
739 | pushl $do_bounds | |
740 | jmp error_code | |
47a55cd7 | 741 | END(bounds) |
1da177e4 LT |
742 | |
743 | ENTRY(invalid_op) | |
e59d1b0a | 744 | ASM_CLAC |
a49976d1 IM |
745 | pushl $0 |
746 | pushl $do_invalid_op | |
747 | jmp error_code | |
47a55cd7 | 748 | END(invalid_op) |
1da177e4 LT |
749 | |
750 | ENTRY(coprocessor_segment_overrun) | |
e59d1b0a | 751 | ASM_CLAC |
a49976d1 IM |
752 | pushl $0 |
753 | pushl $do_coprocessor_segment_overrun | |
754 | jmp error_code | |
47a55cd7 | 755 | END(coprocessor_segment_overrun) |
1da177e4 LT |
756 | |
757 | ENTRY(invalid_TSS) | |
e59d1b0a | 758 | ASM_CLAC |
a49976d1 IM |
759 | pushl $do_invalid_TSS |
760 | jmp error_code | |
47a55cd7 | 761 | END(invalid_TSS) |
1da177e4 LT |
762 | |
763 | ENTRY(segment_not_present) | |
e59d1b0a | 764 | ASM_CLAC |
a49976d1 IM |
765 | pushl $do_segment_not_present |
766 | jmp error_code | |
47a55cd7 | 767 | END(segment_not_present) |
1da177e4 LT |
768 | |
769 | ENTRY(stack_segment) | |
e59d1b0a | 770 | ASM_CLAC |
a49976d1 IM |
771 | pushl $do_stack_segment |
772 | jmp error_code | |
47a55cd7 | 773 | END(stack_segment) |
1da177e4 | 774 | |
1da177e4 | 775 | ENTRY(alignment_check) |
e59d1b0a | 776 | ASM_CLAC |
a49976d1 IM |
777 | pushl $do_alignment_check |
778 | jmp error_code | |
47a55cd7 | 779 | END(alignment_check) |
1da177e4 | 780 | |
d28c4393 | 781 | ENTRY(divide_error) |
e59d1b0a | 782 | ASM_CLAC |
a49976d1 IM |
783 | pushl $0 # no error code |
784 | pushl $do_divide_error | |
785 | jmp error_code | |
47a55cd7 | 786 | END(divide_error) |
1da177e4 LT |
787 | |
788 | #ifdef CONFIG_X86_MCE | |
789 | ENTRY(machine_check) | |
e59d1b0a | 790 | ASM_CLAC |
a49976d1 IM |
791 | pushl $0 |
792 | pushl machine_check_vector | |
793 | jmp error_code | |
47a55cd7 | 794 | END(machine_check) |
1da177e4 LT |
795 | #endif |
796 | ||
797 | ENTRY(spurious_interrupt_bug) | |
e59d1b0a | 798 | ASM_CLAC |
a49976d1 IM |
799 | pushl $0 |
800 | pushl $do_spurious_interrupt_bug | |
801 | jmp error_code | |
47a55cd7 | 802 | END(spurious_interrupt_bug) |
1da177e4 | 803 | |
5ead97c8 | 804 | #ifdef CONFIG_XEN |
a49976d1 IM |
805 | /* |
806 | * Xen doesn't set %esp to be precisely what the normal SYSENTER | |
807 | * entry point expects, so fix it up before using the normal path. | |
808 | */ | |
e2a81baf | 809 | ENTRY(xen_sysenter_target) |
a49976d1 IM |
810 | addl $5*4, %esp /* remove xen-provided frame */ |
811 | jmp sysenter_past_esp | |
e2a81baf | 812 | |
5ead97c8 | 813 | ENTRY(xen_hypervisor_callback) |
a49976d1 | 814 | pushl $-1 /* orig_ax = -1 => not a system call */ |
5ead97c8 JF |
815 | SAVE_ALL |
816 | TRACE_IRQS_OFF | |
9ec2b804 | 817 | |
a49976d1 IM |
818 | /* |
819 | * Check to see if we got the event in the critical | |
820 | * region in xen_iret_direct, after we've reenabled | |
821 | * events and checked for pending events. This simulates | |
822 | * iret instruction's behaviour where it delivers a | |
823 | * pending interrupt when enabling interrupts: | |
824 | */ | |
825 | movl PT_EIP(%esp), %eax | |
826 | cmpl $xen_iret_start_crit, %eax | |
827 | jb 1f | |
828 | cmpl $xen_iret_end_crit, %eax | |
829 | jae 1f | |
9ec2b804 | 830 | |
a49976d1 | 831 | jmp xen_iret_crit_fixup |
e2a81baf | 832 | |
e2a81baf | 833 | ENTRY(xen_do_upcall) |
a49976d1 IM |
834 | 1: mov %esp, %eax |
835 | call xen_evtchn_do_upcall | |
fdfd811d | 836 | #ifndef CONFIG_PREEMPT |
a49976d1 | 837 | call xen_maybe_preempt_hcall |
fdfd811d | 838 | #endif |
a49976d1 | 839 | jmp ret_from_intr |
5ead97c8 JF |
840 | ENDPROC(xen_hypervisor_callback) |
841 | ||
a49976d1 IM |
842 | /* |
843 | * Hypervisor uses this for application faults while it executes. | |
844 | * We get here for two reasons: | |
845 | * 1. Fault while reloading DS, ES, FS or GS | |
846 | * 2. Fault while executing IRET | |
847 | * Category 1 we fix up by reattempting the load, and zeroing the segment | |
848 | * register if the load fails. | |
849 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
850 | * normal Linux return path in this case because if we use the IRET hypercall | |
851 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
852 | * We distinguish between categories by maintaining a status value in EAX. | |
853 | */ | |
5ead97c8 | 854 | ENTRY(xen_failsafe_callback) |
a49976d1 IM |
855 | pushl %eax |
856 | movl $1, %eax | |
857 | 1: mov 4(%esp), %ds | |
858 | 2: mov 8(%esp), %es | |
859 | 3: mov 12(%esp), %fs | |
860 | 4: mov 16(%esp), %gs | |
a349e23d DV |
861 | /* EAX == 0 => Category 1 (Bad segment) |
862 | EAX != 0 => Category 2 (Bad IRET) */ | |
a49976d1 IM |
863 | testl %eax, %eax |
864 | popl %eax | |
865 | lea 16(%esp), %esp | |
866 | jz 5f | |
867 | jmp iret_exc | |
868 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ | |
5ead97c8 | 869 | SAVE_ALL |
a49976d1 IM |
870 | jmp ret_from_exception |
871 | ||
872 | .section .fixup, "ax" | |
873 | 6: xorl %eax, %eax | |
874 | movl %eax, 4(%esp) | |
875 | jmp 1b | |
876 | 7: xorl %eax, %eax | |
877 | movl %eax, 8(%esp) | |
878 | jmp 2b | |
879 | 8: xorl %eax, %eax | |
880 | movl %eax, 12(%esp) | |
881 | jmp 3b | |
882 | 9: xorl %eax, %eax | |
883 | movl %eax, 16(%esp) | |
884 | jmp 4b | |
5ead97c8 | 885 | .previous |
a49976d1 IM |
886 | _ASM_EXTABLE(1b, 6b) |
887 | _ASM_EXTABLE(2b, 7b) | |
888 | _ASM_EXTABLE(3b, 8b) | |
889 | _ASM_EXTABLE(4b, 9b) | |
5ead97c8 JF |
890 | ENDPROC(xen_failsafe_callback) |
891 | ||
bc2b0331 | 892 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
38e20b07 SY |
893 | xen_evtchn_do_upcall) |
894 | ||
a49976d1 | 895 | #endif /* CONFIG_XEN */ |
bc2b0331 S |
896 | |
897 | #if IS_ENABLED(CONFIG_HYPERV) | |
898 | ||
899 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | |
900 | hyperv_vector_handler) | |
901 | ||
902 | #endif /* CONFIG_HYPERV */ | |
5ead97c8 | 903 | |
606576ce | 904 | #ifdef CONFIG_FUNCTION_TRACER |
d61f82d0 SR |
905 | #ifdef CONFIG_DYNAMIC_FTRACE |
906 | ||
907 | ENTRY(mcount) | |
d61f82d0 SR |
908 | ret |
909 | END(mcount) | |
910 | ||
911 | ENTRY(ftrace_caller) | |
a49976d1 IM |
912 | pushl %eax |
913 | pushl %ecx | |
914 | pushl %edx | |
915 | pushl $0 /* Pass NULL as regs pointer */ | |
916 | movl 4*4(%esp), %eax | |
917 | movl 0x4(%ebp), %edx | |
918 | movl function_trace_op, %ecx | |
919 | subl $MCOUNT_INSN_SIZE, %eax | |
d61f82d0 SR |
920 | |
921 | .globl ftrace_call | |
922 | ftrace_call: | |
a49976d1 | 923 | call ftrace_stub |
d61f82d0 | 924 | |
a49976d1 IM |
925 | addl $4, %esp /* skip NULL pointer */ |
926 | popl %edx | |
927 | popl %ecx | |
928 | popl %eax | |
4de72395 | 929 | ftrace_ret: |
5a45cfe1 SR |
930 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
931 | .globl ftrace_graph_call | |
932 | ftrace_graph_call: | |
a49976d1 | 933 | jmp ftrace_stub |
5a45cfe1 | 934 | #endif |
d61f82d0 SR |
935 | |
936 | .globl ftrace_stub | |
937 | ftrace_stub: | |
938 | ret | |
939 | END(ftrace_caller) | |
940 | ||
4de72395 SR |
941 | ENTRY(ftrace_regs_caller) |
942 | pushf /* push flags before compare (in cs location) */ | |
4de72395 SR |
943 | |
944 | /* | |
945 | * i386 does not save SS and ESP when coming from kernel. | |
946 | * Instead, to get sp, ®s->sp is used (see ptrace.h). | |
947 | * Unfortunately, that means eflags must be at the same location | |
948 | * as the current return ip is. We move the return ip into the | |
949 | * ip location, and move flags into the return ip location. | |
950 | */ | |
a49976d1 IM |
951 | pushl 4(%esp) /* save return ip into ip slot */ |
952 | ||
953 | pushl $0 /* Load 0 into orig_ax */ | |
954 | pushl %gs | |
955 | pushl %fs | |
956 | pushl %es | |
957 | pushl %ds | |
958 | pushl %eax | |
959 | pushl %ebp | |
960 | pushl %edi | |
961 | pushl %esi | |
962 | pushl %edx | |
963 | pushl %ecx | |
964 | pushl %ebx | |
965 | ||
966 | movl 13*4(%esp), %eax /* Get the saved flags */ | |
967 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ | |
968 | /* clobbering return ip */ | |
969 | movl $__KERNEL_CS, 13*4(%esp) | |
970 | ||
971 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ | |
972 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ | |
973 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ | |
974 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ | |
975 | pushl %esp /* Save pt_regs as 4th parameter */ | |
4de72395 SR |
976 | |
977 | GLOBAL(ftrace_regs_call) | |
a49976d1 IM |
978 | call ftrace_stub |
979 | ||
980 | addl $4, %esp /* Skip pt_regs */ | |
981 | movl 14*4(%esp), %eax /* Move flags back into cs */ | |
982 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ | |
983 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ | |
984 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ | |
985 | ||
986 | popl %ebx | |
987 | popl %ecx | |
988 | popl %edx | |
989 | popl %esi | |
990 | popl %edi | |
991 | popl %ebp | |
992 | popl %eax | |
993 | popl %ds | |
994 | popl %es | |
995 | popl %fs | |
996 | popl %gs | |
997 | addl $8, %esp /* Skip orig_ax and ip */ | |
998 | popf /* Pop flags at end (no addl to corrupt flags) */ | |
999 | jmp ftrace_ret | |
4de72395 | 1000 | |
4de72395 | 1001 | popf |
a49976d1 | 1002 | jmp ftrace_stub |
d61f82d0 SR |
1003 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
1004 | ||
16444a8a | 1005 | ENTRY(mcount) |
a49976d1 IM |
1006 | cmpl $__PAGE_OFFSET, %esp |
1007 | jb ftrace_stub /* Paging not enabled yet? */ | |
af058ab0 | 1008 | |
a49976d1 IM |
1009 | cmpl $ftrace_stub, ftrace_trace_function |
1010 | jnz trace | |
fb52607a | 1011 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
a49976d1 IM |
1012 | cmpl $ftrace_stub, ftrace_graph_return |
1013 | jnz ftrace_graph_caller | |
e49dc19c | 1014 | |
a49976d1 IM |
1015 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry |
1016 | jnz ftrace_graph_caller | |
caf4b323 | 1017 | #endif |
16444a8a ACM |
1018 | .globl ftrace_stub |
1019 | ftrace_stub: | |
1020 | ret | |
1021 | ||
1022 | /* taken from glibc */ | |
1023 | trace: | |
a49976d1 IM |
1024 | pushl %eax |
1025 | pushl %ecx | |
1026 | pushl %edx | |
1027 | movl 0xc(%esp), %eax | |
1028 | movl 0x4(%ebp), %edx | |
1029 | subl $MCOUNT_INSN_SIZE, %eax | |
1030 | ||
1031 | call *ftrace_trace_function | |
1032 | ||
1033 | popl %edx | |
1034 | popl %ecx | |
1035 | popl %eax | |
1036 | jmp ftrace_stub | |
16444a8a | 1037 | END(mcount) |
d61f82d0 | 1038 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce | 1039 | #endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a | 1040 | |
fb52607a FW |
1041 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1042 | ENTRY(ftrace_graph_caller) | |
a49976d1 IM |
1043 | pushl %eax |
1044 | pushl %ecx | |
1045 | pushl %edx | |
1046 | movl 0xc(%esp), %eax | |
1047 | lea 0x4(%ebp), %edx | |
1048 | movl (%ebp), %ecx | |
1049 | subl $MCOUNT_INSN_SIZE, %eax | |
1050 | call prepare_ftrace_return | |
1051 | popl %edx | |
1052 | popl %ecx | |
1053 | popl %eax | |
e7d3737e | 1054 | ret |
fb52607a | 1055 | END(ftrace_graph_caller) |
caf4b323 FW |
1056 | |
1057 | .globl return_to_handler | |
1058 | return_to_handler: | |
a49976d1 IM |
1059 | pushl %eax |
1060 | pushl %edx | |
1061 | movl %ebp, %eax | |
1062 | call ftrace_return_to_handler | |
1063 | movl %eax, %ecx | |
1064 | popl %edx | |
1065 | popl %eax | |
1066 | jmp *%ecx | |
e7d3737e | 1067 | #endif |
16444a8a | 1068 | |
25c74b10 SA |
1069 | #ifdef CONFIG_TRACING |
1070 | ENTRY(trace_page_fault) | |
25c74b10 | 1071 | ASM_CLAC |
a49976d1 IM |
1072 | pushl $trace_do_page_fault |
1073 | jmp error_code | |
25c74b10 SA |
1074 | END(trace_page_fault) |
1075 | #endif | |
1076 | ||
d211af05 | 1077 | ENTRY(page_fault) |
e59d1b0a | 1078 | ASM_CLAC |
a49976d1 | 1079 | pushl $do_page_fault |
d211af05 AH |
1080 | ALIGN |
1081 | error_code: | |
ccbeed3a | 1082 | /* the function address is in %gs's slot on the stack */ |
a49976d1 IM |
1083 | pushl %fs |
1084 | pushl %es | |
1085 | pushl %ds | |
1086 | pushl %eax | |
1087 | pushl %ebp | |
1088 | pushl %edi | |
1089 | pushl %esi | |
1090 | pushl %edx | |
1091 | pushl %ecx | |
1092 | pushl %ebx | |
d211af05 | 1093 | cld |
a49976d1 IM |
1094 | movl $(__KERNEL_PERCPU), %ecx |
1095 | movl %ecx, %fs | |
d211af05 | 1096 | UNWIND_ESPFIX_STACK |
ccbeed3a | 1097 | GS_TO_REG %ecx |
a49976d1 IM |
1098 | movl PT_GS(%esp), %edi # get the function address |
1099 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
1100 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
1101 | REG_TO_PTGS %ecx |
1102 | SET_KERNEL_GS %ecx | |
a49976d1 IM |
1103 | movl $(__USER_DS), %ecx |
1104 | movl %ecx, %ds | |
1105 | movl %ecx, %es | |
d211af05 | 1106 | TRACE_IRQS_OFF |
a49976d1 IM |
1107 | movl %esp, %eax # pt_regs pointer |
1108 | call *%edi | |
1109 | jmp ret_from_exception | |
d211af05 AH |
1110 | END(page_fault) |
1111 | ||
1112 | /* | |
1113 | * Debug traps and NMI can happen at the one SYSENTER instruction | |
1114 | * that sets up the real kernel stack. Check here, since we can't | |
1115 | * allow the wrong stack to be used. | |
1116 | * | |
1117 | * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have | |
1118 | * already pushed 3 words if it hits on the sysenter instruction: | |
1119 | * eflags, cs and eip. | |
1120 | * | |
1121 | * We just load the right stack, and push the three (known) values | |
1122 | * by hand onto the new stack - while updating the return eip past | |
1123 | * the instruction that would have done it for sysenter. | |
1124 | */ | |
f0d96110 | 1125 | .macro FIX_STACK offset ok label |
a49976d1 IM |
1126 | cmpw $__KERNEL_CS, 4(%esp) |
1127 | jne \ok | |
f0d96110 | 1128 | \label: |
a49976d1 | 1129 | movl TSS_sysenter_sp0 + \offset(%esp), %esp |
131484c8 | 1130 | pushfl |
a49976d1 IM |
1131 | pushl $__KERNEL_CS |
1132 | pushl $sysenter_past_esp | |
f0d96110 | 1133 | .endm |
d211af05 AH |
1134 | |
1135 | ENTRY(debug) | |
e59d1b0a | 1136 | ASM_CLAC |
a49976d1 IM |
1137 | cmpl $entry_SYSENTER_32, (%esp) |
1138 | jne debug_stack_correct | |
f0d96110 | 1139 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
d211af05 | 1140 | debug_stack_correct: |
a49976d1 | 1141 | pushl $-1 # mark this as an int |
d211af05 AH |
1142 | SAVE_ALL |
1143 | TRACE_IRQS_OFF | |
a49976d1 IM |
1144 | xorl %edx, %edx # error code 0 |
1145 | movl %esp, %eax # pt_regs pointer | |
1146 | call do_debug | |
1147 | jmp ret_from_exception | |
d211af05 AH |
1148 | END(debug) |
1149 | ||
1150 | /* | |
1151 | * NMI is doubly nasty. It can happen _while_ we're handling | |
1152 | * a debug fault, and the debug fault hasn't yet been able to | |
1153 | * clear up the stack. So we first check whether we got an | |
1154 | * NMI on the sysenter entry path, but after that we need to | |
1155 | * check whether we got an NMI on the debug path where the debug | |
1156 | * fault happened on the sysenter path. | |
1157 | */ | |
1158 | ENTRY(nmi) | |
e59d1b0a | 1159 | ASM_CLAC |
34273f41 | 1160 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 IM |
1161 | pushl %eax |
1162 | movl %ss, %eax | |
1163 | cmpw $__ESPFIX_SS, %ax | |
1164 | popl %eax | |
1165 | je nmi_espfix_stack | |
34273f41 | 1166 | #endif |
a49976d1 IM |
1167 | cmpl $entry_SYSENTER_32, (%esp) |
1168 | je nmi_stack_fixup | |
1169 | pushl %eax | |
1170 | movl %esp, %eax | |
1171 | /* | |
1172 | * Do not access memory above the end of our stack page, | |
d211af05 AH |
1173 | * it might not exist. |
1174 | */ | |
a49976d1 IM |
1175 | andl $(THREAD_SIZE-1), %eax |
1176 | cmpl $(THREAD_SIZE-20), %eax | |
1177 | popl %eax | |
1178 | jae nmi_stack_correct | |
1179 | cmpl $entry_SYSENTER_32, 12(%esp) | |
1180 | je nmi_debug_stack_check | |
d211af05 | 1181 | nmi_stack_correct: |
a49976d1 | 1182 | pushl %eax |
d211af05 | 1183 | SAVE_ALL |
a49976d1 IM |
1184 | xorl %edx, %edx # zero error code |
1185 | movl %esp, %eax # pt_regs pointer | |
1186 | call do_nmi | |
1187 | jmp restore_all_notrace | |
d211af05 AH |
1188 | |
1189 | nmi_stack_fixup: | |
f0d96110 | 1190 | FIX_STACK 12, nmi_stack_correct, 1 |
a49976d1 | 1191 | jmp nmi_stack_correct |
d211af05 AH |
1192 | |
1193 | nmi_debug_stack_check: | |
a49976d1 IM |
1194 | cmpw $__KERNEL_CS, 16(%esp) |
1195 | jne nmi_stack_correct | |
1196 | cmpl $debug, (%esp) | |
1197 | jb nmi_stack_correct | |
1198 | cmpl $debug_esp_fix_insn, (%esp) | |
1199 | ja nmi_stack_correct | |
f0d96110 | 1200 | FIX_STACK 24, nmi_stack_correct, 1 |
a49976d1 | 1201 | jmp nmi_stack_correct |
d211af05 | 1202 | |
34273f41 | 1203 | #ifdef CONFIG_X86_ESPFIX32 |
d211af05 | 1204 | nmi_espfix_stack: |
131484c8 | 1205 | /* |
d211af05 AH |
1206 | * create the pointer to lss back |
1207 | */ | |
a49976d1 IM |
1208 | pushl %ss |
1209 | pushl %esp | |
1210 | addl $4, (%esp) | |
d211af05 AH |
1211 | /* copy the iret frame of 12 bytes */ |
1212 | .rept 3 | |
a49976d1 | 1213 | pushl 16(%esp) |
d211af05 | 1214 | .endr |
a49976d1 | 1215 | pushl %eax |
d211af05 | 1216 | SAVE_ALL |
a49976d1 IM |
1217 | FIXUP_ESPFIX_STACK # %eax == %esp |
1218 | xorl %edx, %edx # zero error code | |
1219 | call do_nmi | |
d211af05 | 1220 | RESTORE_REGS |
a49976d1 IM |
1221 | lss 12+4(%esp), %esp # back to espfix stack |
1222 | jmp irq_return | |
34273f41 | 1223 | #endif |
d211af05 AH |
1224 | END(nmi) |
1225 | ||
1226 | ENTRY(int3) | |
e59d1b0a | 1227 | ASM_CLAC |
a49976d1 | 1228 | pushl $-1 # mark this as an int |
d211af05 AH |
1229 | SAVE_ALL |
1230 | TRACE_IRQS_OFF | |
a49976d1 IM |
1231 | xorl %edx, %edx # zero error code |
1232 | movl %esp, %eax # pt_regs pointer | |
1233 | call do_int3 | |
1234 | jmp ret_from_exception | |
d211af05 AH |
1235 | END(int3) |
1236 | ||
1237 | ENTRY(general_protection) | |
a49976d1 IM |
1238 | pushl $do_general_protection |
1239 | jmp error_code | |
d211af05 AH |
1240 | END(general_protection) |
1241 | ||
631bc487 GN |
1242 | #ifdef CONFIG_KVM_GUEST |
1243 | ENTRY(async_page_fault) | |
e59d1b0a | 1244 | ASM_CLAC |
a49976d1 IM |
1245 | pushl $do_async_page_fault |
1246 | jmp error_code | |
2ae9d293 | 1247 | END(async_page_fault) |
631bc487 | 1248 | #endif |