Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
a49976d1 | 3 | * Copyright (C) 1991,1992 Linus Torvalds |
1da177e4 | 4 | * |
a49976d1 | 5 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
1da177e4 | 6 | * |
39e8701f | 7 | * Stack layout while running C code: |
a49976d1 IM |
8 | * ptrace needs to have all registers on the stack. |
9 | * If the order here is changed, it needs to be | |
10 | * updated in fork.c:copy_process(), signal.c:do_signal(), | |
1da177e4 LT |
11 | * ptrace.c and ptrace.h |
12 | * | |
13 | * 0(%esp) - %ebx | |
14 | * 4(%esp) - %ecx | |
15 | * 8(%esp) - %edx | |
9b47feb7 | 16 | * C(%esp) - %esi |
1da177e4 LT |
17 | * 10(%esp) - %edi |
18 | * 14(%esp) - %ebp | |
19 | * 18(%esp) - %eax | |
20 | * 1C(%esp) - %ds | |
21 | * 20(%esp) - %es | |
464d1a78 | 22 | * 24(%esp) - %fs |
ccbeed3a TH |
23 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
24 | * 2C(%esp) - orig_eax | |
25 | * 30(%esp) - %eip | |
26 | * 34(%esp) - %cs | |
27 | * 38(%esp) - %eflags | |
28 | * 3C(%esp) - %oldesp | |
29 | * 40(%esp) - %oldss | |
1da177e4 LT |
30 | */ |
31 | ||
1da177e4 | 32 | #include <linux/linkage.h> |
d7e7528b | 33 | #include <linux/err.h> |
1da177e4 | 34 | #include <asm/thread_info.h> |
55f327fa | 35 | #include <asm/irqflags.h> |
1da177e4 LT |
36 | #include <asm/errno.h> |
37 | #include <asm/segment.h> | |
38 | #include <asm/smp.h> | |
be44d2aa | 39 | #include <asm/percpu.h> |
ab68ed98 | 40 | #include <asm/processor-flags.h> |
9b7dc567 | 41 | #include <asm/irq_vectors.h> |
cd4d09ec | 42 | #include <asm/cpufeatures.h> |
b4ca46e4 | 43 | #include <asm/alternative-asm.h> |
6837a54d | 44 | #include <asm/asm.h> |
e59d1b0a | 45 | #include <asm/smap.h> |
4d516f41 | 46 | #include <asm/frame.h> |
2641f08b | 47 | #include <asm/nospec-branch.h> |
1da177e4 | 48 | |
ea714547 JO |
49 | .section .entry.text, "ax" |
50 | ||
139ec7c4 RR |
51 | /* |
52 | * We use macros for low-level operations which need to be overridden | |
53 | * for paravirtualization. The following will never clobber any registers: | |
54 | * INTERRUPT_RETURN (aka. "iret") | |
55 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | |
d75cd22f | 56 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
139ec7c4 RR |
57 | * |
58 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | |
59 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | |
60 | * Allowing a register to be clobbered can shrink the paravirt replacement | |
61 | * enough to patch inline, increasing performance. | |
62 | */ | |
63 | ||
1da177e4 | 64 | #ifdef CONFIG_PREEMPT |
a49976d1 | 65 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
1da177e4 | 66 | #else |
a49976d1 IM |
67 | # define preempt_stop(clobbers) |
68 | # define resume_kernel restore_all | |
1da177e4 LT |
69 | #endif |
70 | ||
55f327fa IM |
71 | .macro TRACE_IRQS_IRET |
72 | #ifdef CONFIG_TRACE_IRQFLAGS | |
a49976d1 IM |
73 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
74 | jz 1f | |
55f327fa IM |
75 | TRACE_IRQS_ON |
76 | 1: | |
77 | #endif | |
78 | .endm | |
79 | ||
ccbeed3a TH |
80 | /* |
81 | * User gs save/restore | |
82 | * | |
83 | * %gs is used for userland TLS and kernel only uses it for stack | |
84 | * canary which is required to be at %gs:20 by gcc. Read the comment | |
85 | * at the top of stackprotector.h for more info. | |
86 | * | |
87 | * Local labels 98 and 99 are used. | |
88 | */ | |
89 | #ifdef CONFIG_X86_32_LAZY_GS | |
90 | ||
91 | /* unfortunately push/pop can't be no-op */ | |
92 | .macro PUSH_GS | |
a49976d1 | 93 | pushl $0 |
ccbeed3a TH |
94 | .endm |
95 | .macro POP_GS pop=0 | |
a49976d1 | 96 | addl $(4 + \pop), %esp |
ccbeed3a TH |
97 | .endm |
98 | .macro POP_GS_EX | |
99 | .endm | |
100 | ||
101 | /* all the rest are no-op */ | |
102 | .macro PTGS_TO_GS | |
103 | .endm | |
104 | .macro PTGS_TO_GS_EX | |
105 | .endm | |
106 | .macro GS_TO_REG reg | |
107 | .endm | |
108 | .macro REG_TO_PTGS reg | |
109 | .endm | |
110 | .macro SET_KERNEL_GS reg | |
111 | .endm | |
112 | ||
113 | #else /* CONFIG_X86_32_LAZY_GS */ | |
114 | ||
115 | .macro PUSH_GS | |
a49976d1 | 116 | pushl %gs |
ccbeed3a TH |
117 | .endm |
118 | ||
119 | .macro POP_GS pop=0 | |
a49976d1 | 120 | 98: popl %gs |
ccbeed3a | 121 | .if \pop <> 0 |
9b47feb7 | 122 | add $\pop, %esp |
ccbeed3a TH |
123 | .endif |
124 | .endm | |
125 | .macro POP_GS_EX | |
126 | .pushsection .fixup, "ax" | |
a49976d1 IM |
127 | 99: movl $0, (%esp) |
128 | jmp 98b | |
ccbeed3a | 129 | .popsection |
a49976d1 | 130 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
131 | .endm |
132 | ||
133 | .macro PTGS_TO_GS | |
a49976d1 | 134 | 98: mov PT_GS(%esp), %gs |
ccbeed3a TH |
135 | .endm |
136 | .macro PTGS_TO_GS_EX | |
137 | .pushsection .fixup, "ax" | |
a49976d1 IM |
138 | 99: movl $0, PT_GS(%esp) |
139 | jmp 98b | |
ccbeed3a | 140 | .popsection |
a49976d1 | 141 | _ASM_EXTABLE(98b, 99b) |
ccbeed3a TH |
142 | .endm |
143 | ||
144 | .macro GS_TO_REG reg | |
a49976d1 | 145 | movl %gs, \reg |
ccbeed3a TH |
146 | .endm |
147 | .macro REG_TO_PTGS reg | |
a49976d1 | 148 | movl \reg, PT_GS(%esp) |
ccbeed3a TH |
149 | .endm |
150 | .macro SET_KERNEL_GS reg | |
a49976d1 IM |
151 | movl $(__KERNEL_STACK_CANARY), \reg |
152 | movl \reg, %gs | |
ccbeed3a TH |
153 | .endm |
154 | ||
a49976d1 | 155 | #endif /* CONFIG_X86_32_LAZY_GS */ |
ccbeed3a | 156 | |
150ac78d | 157 | .macro SAVE_ALL pt_regs_ax=%eax |
f0d96110 | 158 | cld |
ccbeed3a | 159 | PUSH_GS |
a49976d1 IM |
160 | pushl %fs |
161 | pushl %es | |
162 | pushl %ds | |
150ac78d | 163 | pushl \pt_regs_ax |
a49976d1 IM |
164 | pushl %ebp |
165 | pushl %edi | |
166 | pushl %esi | |
167 | pushl %edx | |
168 | pushl %ecx | |
169 | pushl %ebx | |
170 | movl $(__USER_DS), %edx | |
171 | movl %edx, %ds | |
172 | movl %edx, %es | |
173 | movl $(__KERNEL_PERCPU), %edx | |
174 | movl %edx, %fs | |
ccbeed3a | 175 | SET_KERNEL_GS %edx |
f0d96110 | 176 | .endm |
1da177e4 | 177 | |
946c1911 JP |
178 | /* |
179 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The | |
180 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding | |
5c99b692 | 181 | * is just clearing the MSB, which makes it an invalid stack address and is also |
946c1911 JP |
182 | * a signal to the unwinder that it's a pt_regs pointer in disguise. |
183 | * | |
184 | * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the | |
185 | * original rbp. | |
186 | */ | |
187 | .macro ENCODE_FRAME_POINTER | |
188 | #ifdef CONFIG_FRAME_POINTER | |
189 | mov %esp, %ebp | |
5c99b692 | 190 | andl $0x7fffffff, %ebp |
946c1911 JP |
191 | #endif |
192 | .endm | |
193 | ||
f0d96110 | 194 | .macro RESTORE_INT_REGS |
a49976d1 IM |
195 | popl %ebx |
196 | popl %ecx | |
197 | popl %edx | |
198 | popl %esi | |
199 | popl %edi | |
200 | popl %ebp | |
201 | popl %eax | |
f0d96110 | 202 | .endm |
1da177e4 | 203 | |
ccbeed3a | 204 | .macro RESTORE_REGS pop=0 |
f0d96110 | 205 | RESTORE_INT_REGS |
a49976d1 IM |
206 | 1: popl %ds |
207 | 2: popl %es | |
208 | 3: popl %fs | |
ccbeed3a | 209 | POP_GS \pop |
f0d96110 | 210 | .pushsection .fixup, "ax" |
a49976d1 IM |
211 | 4: movl $0, (%esp) |
212 | jmp 1b | |
213 | 5: movl $0, (%esp) | |
214 | jmp 2b | |
215 | 6: movl $0, (%esp) | |
216 | jmp 3b | |
f95d47ca | 217 | .popsection |
a49976d1 IM |
218 | _ASM_EXTABLE(1b, 4b) |
219 | _ASM_EXTABLE(2b, 5b) | |
220 | _ASM_EXTABLE(3b, 6b) | |
ccbeed3a | 221 | POP_GS_EX |
f0d96110 | 222 | .endm |
1da177e4 | 223 | |
0100301b BG |
224 | /* |
225 | * %eax: prev task | |
226 | * %edx: next task | |
227 | */ | |
228 | ENTRY(__switch_to_asm) | |
229 | /* | |
230 | * Save callee-saved registers | |
231 | * This must match the order in struct inactive_task_frame | |
232 | */ | |
233 | pushl %ebp | |
234 | pushl %ebx | |
235 | pushl %edi | |
236 | pushl %esi | |
237 | ||
238 | /* switch stack */ | |
239 | movl %esp, TASK_threadsp(%eax) | |
240 | movl TASK_threadsp(%edx), %esp | |
241 | ||
050e9baa | 242 | #ifdef CONFIG_STACKPROTECTOR |
0100301b BG |
243 | movl TASK_stack_canary(%edx), %ebx |
244 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset | |
245 | #endif | |
246 | ||
c995efd5 DW |
247 | #ifdef CONFIG_RETPOLINE |
248 | /* | |
249 | * When switching from a shallower to a deeper call stack | |
250 | * the RSB may either underflow or use entries populated | |
251 | * with userspace addresses. On CPUs where those concerns | |
252 | * exist, overwrite the RSB with entries which capture | |
253 | * speculative execution to prevent attack. | |
254 | */ | |
d1c99108 | 255 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
c995efd5 DW |
256 | #endif |
257 | ||
0100301b BG |
258 | /* restore callee-saved registers */ |
259 | popl %esi | |
260 | popl %edi | |
261 | popl %ebx | |
262 | popl %ebp | |
263 | ||
264 | jmp __switch_to | |
265 | END(__switch_to_asm) | |
266 | ||
ebd57499 JP |
267 | /* |
268 | * The unwinder expects the last frame on the stack to always be at the same | |
269 | * offset from the end of the page, which allows it to validate the stack. | |
270 | * Calling schedule_tail() directly would break that convention because its an | |
271 | * asmlinkage function so its argument has to be pushed on the stack. This | |
272 | * wrapper creates a proper "end of stack" frame header before the call. | |
273 | */ | |
274 | ENTRY(schedule_tail_wrapper) | |
275 | FRAME_BEGIN | |
276 | ||
277 | pushl %eax | |
278 | call schedule_tail | |
279 | popl %eax | |
280 | ||
281 | FRAME_END | |
282 | ret | |
283 | ENDPROC(schedule_tail_wrapper) | |
0100301b BG |
284 | /* |
285 | * A newly forked process directly context switches into this address. | |
286 | * | |
287 | * eax: prev task we switched from | |
616d2483 BG |
288 | * ebx: kernel thread func (NULL for user thread) |
289 | * edi: kernel thread arg | |
0100301b | 290 | */ |
1da177e4 | 291 | ENTRY(ret_from_fork) |
ebd57499 | 292 | call schedule_tail_wrapper |
39e8701f | 293 | |
616d2483 BG |
294 | testl %ebx, %ebx |
295 | jnz 1f /* kernel threads are uncommon */ | |
296 | ||
297 | 2: | |
39e8701f | 298 | /* When we fork, we trace the syscall return in the child, too. */ |
ebd57499 | 299 | movl %esp, %eax |
39e8701f AL |
300 | call syscall_return_slowpath |
301 | jmp restore_all | |
39e8701f | 302 | |
616d2483 BG |
303 | /* kernel thread */ |
304 | 1: movl %edi, %eax | |
2641f08b | 305 | CALL_NOSPEC %ebx |
39e8701f | 306 | /* |
616d2483 BG |
307 | * A kernel thread is allowed to return here after successfully |
308 | * calling do_execve(). Exit to userspace to complete the execve() | |
309 | * syscall. | |
39e8701f | 310 | */ |
616d2483 BG |
311 | movl $0, PT_EAX(%esp) |
312 | jmp 2b | |
313 | END(ret_from_fork) | |
6783eaa2 | 314 | |
1da177e4 LT |
315 | /* |
316 | * Return to user mode is not as complex as all this looks, | |
317 | * but we want the default path for a system call return to | |
318 | * go as quickly as possible which is why some of this is | |
319 | * less clear than it otherwise should be. | |
320 | */ | |
321 | ||
322 | # userspace resumption stub bypassing syscall exit tracing | |
323 | ALIGN | |
324 | ret_from_exception: | |
139ec7c4 | 325 | preempt_stop(CLBR_ANY) |
1da177e4 | 326 | ret_from_intr: |
29a2e283 | 327 | #ifdef CONFIG_VM86 |
a49976d1 IM |
328 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
329 | movb PT_CS(%esp), %al | |
330 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | |
29a2e283 DA |
331 | #else |
332 | /* | |
6783eaa2 | 333 | * We can be coming here from child spawned by kernel_thread(). |
29a2e283 | 334 | */ |
a49976d1 IM |
335 | movl PT_CS(%esp), %eax |
336 | andl $SEGMENT_RPL_MASK, %eax | |
29a2e283 | 337 | #endif |
a49976d1 IM |
338 | cmpl $USER_RPL, %eax |
339 | jb resume_kernel # not returning to v8086 or userspace | |
f95d47ca | 340 | |
1da177e4 | 341 | ENTRY(resume_userspace) |
5d73fc70 | 342 | DISABLE_INTERRUPTS(CLBR_ANY) |
e32e58a9 | 343 | TRACE_IRQS_OFF |
5d73fc70 AL |
344 | movl %esp, %eax |
345 | call prepare_exit_to_usermode | |
a49976d1 | 346 | jmp restore_all |
47a55cd7 | 347 | END(ret_from_exception) |
1da177e4 LT |
348 | |
349 | #ifdef CONFIG_PREEMPT | |
350 | ENTRY(resume_kernel) | |
139ec7c4 | 351 | DISABLE_INTERRUPTS(CLBR_ANY) |
1b00255f | 352 | .Lneed_resched: |
a49976d1 IM |
353 | cmpl $0, PER_CPU_VAR(__preempt_count) |
354 | jnz restore_all | |
355 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? | |
356 | jz restore_all | |
357 | call preempt_schedule_irq | |
1b00255f | 358 | jmp .Lneed_resched |
47a55cd7 | 359 | END(resume_kernel) |
1da177e4 LT |
360 | #endif |
361 | ||
f2b37575 AL |
362 | GLOBAL(__begin_SYSENTER_singlestep_region) |
363 | /* | |
364 | * All code from here through __end_SYSENTER_singlestep_region is subject | |
365 | * to being single-stepped if a user program sets TF and executes SYSENTER. | |
366 | * There is absolutely nothing that we can do to prevent this from happening | |
367 | * (thanks Intel!). To keep our handling of this situation as simple as | |
368 | * possible, we handle TF just like AC and NT, except that our #DB handler | |
369 | * will ignore all of the single-step traps generated in this range. | |
370 | */ | |
371 | ||
372 | #ifdef CONFIG_XEN | |
373 | /* | |
374 | * Xen doesn't set %esp to be precisely what the normal SYSENTER | |
375 | * entry point expects, so fix it up before using the normal path. | |
376 | */ | |
377 | ENTRY(xen_sysenter_target) | |
378 | addl $5*4, %esp /* remove xen-provided frame */ | |
1b00255f | 379 | jmp .Lsysenter_past_esp |
f2b37575 AL |
380 | #endif |
381 | ||
fda57b22 AL |
382 | /* |
383 | * 32-bit SYSENTER entry. | |
384 | * | |
385 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here | |
386 | * if X86_FEATURE_SEP is available. This is the preferred system call | |
387 | * entry on 32-bit systems. | |
388 | * | |
389 | * The SYSENTER instruction, in principle, should *only* occur in the | |
390 | * vDSO. In practice, a small number of Android devices were shipped | |
391 | * with a copy of Bionic that inlined a SYSENTER instruction. This | |
392 | * never happened in any of Google's Bionic versions -- it only happened | |
393 | * in a narrow range of Intel-provided versions. | |
394 | * | |
395 | * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. | |
396 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). | |
397 | * SYSENTER does not save anything on the stack, | |
398 | * and does not save old EIP (!!!), ESP, or EFLAGS. | |
399 | * | |
400 | * To avoid losing track of EFLAGS.VM (and thus potentially corrupting | |
401 | * user and/or vm86 state), we explicitly disable the SYSENTER | |
402 | * instruction in vm86 mode by reprogramming the MSRs. | |
403 | * | |
404 | * Arguments: | |
405 | * eax system call number | |
406 | * ebx arg1 | |
407 | * ecx arg2 | |
408 | * edx arg3 | |
409 | * esi arg4 | |
410 | * edi arg5 | |
411 | * ebp user stack | |
412 | * 0(%ebp) arg6 | |
413 | */ | |
4c8cd0c5 | 414 | ENTRY(entry_SYSENTER_32) |
a49976d1 | 415 | movl TSS_sysenter_sp0(%esp), %esp |
1b00255f | 416 | .Lsysenter_past_esp: |
5f310f73 | 417 | pushl $__USER_DS /* pt_regs->ss */ |
30bfa7b3 | 418 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
5f310f73 AL |
419 | pushfl /* pt_regs->flags (except IF = 0) */ |
420 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ | |
421 | pushl $__USER_CS /* pt_regs->cs */ | |
422 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ | |
423 | pushl %eax /* pt_regs->orig_ax */ | |
424 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ | |
425 | ||
67f590e8 | 426 | /* |
f2b37575 AL |
427 | * SYSENTER doesn't filter flags, so we need to clear NT, AC |
428 | * and TF ourselves. To save a few cycles, we can check whether | |
67f590e8 AL |
429 | * either was set instead of doing an unconditional popfq. |
430 | * This needs to happen before enabling interrupts so that | |
431 | * we don't get preempted with NT set. | |
432 | * | |
f2b37575 AL |
433 | * If TF is set, we will single-step all the way to here -- do_debug |
434 | * will ignore all the traps. (Yes, this is slow, but so is | |
435 | * single-stepping in general. This allows us to avoid having | |
436 | * a more complicated code to handle the case where a user program | |
437 | * forces us to single-step through the SYSENTER entry code.) | |
438 | * | |
67f590e8 AL |
439 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
440 | * out-of-line as an optimization: NT is unlikely to be set in the | |
441 | * majority of the cases and instead of polluting the I$ unnecessarily, | |
442 | * we're keeping that code behind a branch which will predict as | |
443 | * not-taken and therefore its instructions won't be fetched. | |
444 | */ | |
f2b37575 | 445 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) |
67f590e8 AL |
446 | jnz .Lsysenter_fix_flags |
447 | .Lsysenter_flags_fixed: | |
448 | ||
55f327fa | 449 | /* |
5f310f73 AL |
450 | * User mode is traced as though IRQs are on, and SYSENTER |
451 | * turned them off. | |
e6e5494c | 452 | */ |
55f327fa | 453 | TRACE_IRQS_OFF |
5f310f73 AL |
454 | |
455 | movl %esp, %eax | |
456 | call do_fast_syscall_32 | |
91e2eea9 BO |
457 | /* XEN PV guests always use IRET path */ |
458 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ | |
459 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV | |
5f310f73 AL |
460 | |
461 | /* Opportunistic SYSEXIT */ | |
462 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ | |
463 | movl PT_EIP(%esp), %edx /* pt_regs->ip */ | |
464 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ | |
3bd29515 AL |
465 | 1: mov PT_FS(%esp), %fs |
466 | PTGS_TO_GS | |
5f310f73 AL |
467 | popl %ebx /* pt_regs->bx */ |
468 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ | |
469 | popl %esi /* pt_regs->si */ | |
470 | popl %edi /* pt_regs->di */ | |
471 | popl %ebp /* pt_regs->bp */ | |
472 | popl %eax /* pt_regs->ax */ | |
5f310f73 | 473 | |
c2c9b52f AL |
474 | /* |
475 | * Restore all flags except IF. (We restore IF separately because | |
476 | * STI gives a one-instruction window in which we won't be interrupted, | |
477 | * whereas POPF does not.) | |
478 | */ | |
479 | addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */ | |
480 | btr $X86_EFLAGS_IF_BIT, (%esp) | |
481 | popfl | |
482 | ||
5f310f73 AL |
483 | /* |
484 | * Return back to the vDSO, which will pop ecx and edx. | |
485 | * Don't bother with DS and ES (they already contain __USER_DS). | |
486 | */ | |
88c15ec9 BO |
487 | sti |
488 | sysexit | |
af0575bb | 489 | |
a49976d1 IM |
490 | .pushsection .fixup, "ax" |
491 | 2: movl $0, PT_FS(%esp) | |
492 | jmp 1b | |
f95d47ca | 493 | .popsection |
a49976d1 | 494 | _ASM_EXTABLE(1b, 2b) |
ccbeed3a | 495 | PTGS_TO_GS_EX |
67f590e8 AL |
496 | |
497 | .Lsysenter_fix_flags: | |
498 | pushl $X86_EFLAGS_FIXED | |
499 | popfl | |
500 | jmp .Lsysenter_flags_fixed | |
f2b37575 | 501 | GLOBAL(__end_SYSENTER_singlestep_region) |
4c8cd0c5 | 502 | ENDPROC(entry_SYSENTER_32) |
1da177e4 | 503 | |
fda57b22 AL |
504 | /* |
505 | * 32-bit legacy system call entry. | |
506 | * | |
507 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 | |
508 | * instruction. INT $0x80 lands here. | |
509 | * | |
510 | * This entry point can be used by any 32-bit perform system calls. | |
511 | * Instances of INT $0x80 can be found inline in various programs and | |
512 | * libraries. It is also used by the vDSO's __kernel_vsyscall | |
513 | * fallback for hardware that doesn't support a faster entry method. | |
514 | * Restarted 32-bit system calls also fall back to INT $0x80 | |
515 | * regardless of what instruction was originally used to do the system | |
516 | * call. (64-bit programs can use INT $0x80 as well, but they can | |
517 | * only run on 64-bit kernels and therefore land in | |
518 | * entry_INT80_compat.) | |
519 | * | |
520 | * This is considered a slow path. It is not used by most libc | |
521 | * implementations on modern hardware except during process startup. | |
522 | * | |
523 | * Arguments: | |
524 | * eax system call number | |
525 | * ebx arg1 | |
526 | * ecx arg2 | |
527 | * edx arg3 | |
528 | * esi arg4 | |
529 | * edi arg5 | |
530 | * ebp arg6 | |
531 | */ | |
b2502b41 | 532 | ENTRY(entry_INT80_32) |
e59d1b0a | 533 | ASM_CLAC |
150ac78d | 534 | pushl %eax /* pt_regs->orig_ax */ |
5f310f73 | 535 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */ |
150ac78d AL |
536 | |
537 | /* | |
a798f091 AL |
538 | * User mode is traced as though IRQs are on, and the interrupt gate |
539 | * turned them off. | |
150ac78d | 540 | */ |
a798f091 | 541 | TRACE_IRQS_OFF |
150ac78d AL |
542 | |
543 | movl %esp, %eax | |
a798f091 | 544 | call do_int80_syscall_32 |
5f310f73 | 545 | .Lsyscall_32_done: |
1da177e4 LT |
546 | |
547 | restore_all: | |
2e04bc76 | 548 | TRACE_IRQS_IRET |
1b00255f | 549 | .Lrestore_all_notrace: |
34273f41 | 550 | #ifdef CONFIG_X86_ESPFIX32 |
1b00255f | 551 | ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX |
58a5aac5 | 552 | |
a49976d1 IM |
553 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
554 | /* | |
555 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we | |
556 | * are returning to the kernel. | |
557 | * See comments in process.c:copy_thread() for details. | |
558 | */ | |
559 | movb PT_OLDSS(%esp), %ah | |
560 | movb PT_CS(%esp), %al | |
561 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | |
562 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | |
1b00255f | 563 | je .Lldt_ss # returning to user-space with LDT SS |
34273f41 | 564 | #endif |
1b00255f | 565 | .Lrestore_nocheck: |
a49976d1 | 566 | RESTORE_REGS 4 # skip orig_eax/error_code |
1b00255f | 567 | .Lirq_return: |
10bcc80e MD |
568 | /* |
569 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization | |
570 | * when returning from IPI handler and when returning from | |
571 | * scheduler to user-space. | |
572 | */ | |
3701d863 | 573 | INTERRUPT_RETURN |
1b00255f | 574 | |
a49976d1 IM |
575 | .section .fixup, "ax" |
576 | ENTRY(iret_exc ) | |
577 | pushl $0 # no error code | |
578 | pushl $do_iret_error | |
7252c4c3 | 579 | jmp common_exception |
1da177e4 | 580 | .previous |
1b00255f | 581 | _ASM_EXTABLE(.Lirq_return, iret_exc) |
1da177e4 | 582 | |
34273f41 | 583 | #ifdef CONFIG_X86_ESPFIX32 |
1b00255f | 584 | .Lldt_ss: |
dc4c2a0a AH |
585 | /* |
586 | * Setup and switch to ESPFIX stack | |
587 | * | |
588 | * We're returning to userspace with a 16 bit stack. The CPU will not | |
589 | * restore the high word of ESP for us on executing iret... This is an | |
590 | * "official" bug of all the x86-compatible CPUs, which we can work | |
591 | * around to make dosemu and wine happy. We do this by preloading the | |
592 | * high word of ESP with the high word of the userspace ESP while | |
593 | * compensating for the offset by changing to the ESPFIX segment with | |
594 | * a base address that matches for the difference. | |
595 | */ | |
72c511dd | 596 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
a49976d1 IM |
597 | mov %esp, %edx /* load kernel esp */ |
598 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | |
599 | mov %dx, %ax /* eax: new kernel esp */ | |
9b47feb7 DV |
600 | sub %eax, %edx /* offset (low word is 0) */ |
601 | shr $16, %edx | |
a49976d1 IM |
602 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
603 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | |
604 | pushl $__ESPFIX_SS | |
605 | pushl %eax /* new kernel esp */ | |
606 | /* | |
607 | * Disable interrupts, but do not irqtrace this section: we | |
2e04bc76 | 608 | * will soon execute iret and the tracer was already set to |
a49976d1 IM |
609 | * the irqstate after the IRET: |
610 | */ | |
fdbd518a | 611 | DISABLE_INTERRUPTS(CLBR_ANY) |
a49976d1 | 612 | lss (%esp), %esp /* switch to espfix segment */ |
1b00255f | 613 | jmp .Lrestore_nocheck |
34273f41 | 614 | #endif |
b2502b41 | 615 | ENDPROC(entry_INT80_32) |
1da177e4 | 616 | |
f0d96110 | 617 | .macro FIXUP_ESPFIX_STACK |
dc4c2a0a AH |
618 | /* |
619 | * Switch back for ESPFIX stack to the normal zerobased stack | |
620 | * | |
621 | * We can't call C functions using the ESPFIX stack. This code reads | |
622 | * the high word of the segment base from the GDT and swiches to the | |
623 | * normal stack and adjusts ESP with the matching offset. | |
624 | */ | |
34273f41 | 625 | #ifdef CONFIG_X86_ESPFIX32 |
dc4c2a0a | 626 | /* fixup the stack */ |
a49976d1 IM |
627 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
628 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | |
9b47feb7 | 629 | shl $16, %eax |
a49976d1 IM |
630 | addl %esp, %eax /* the adjusted stack pointer */ |
631 | pushl $__KERNEL_DS | |
632 | pushl %eax | |
633 | lss (%esp), %esp /* switch to the normal stack segment */ | |
34273f41 | 634 | #endif |
f0d96110 TH |
635 | .endm |
636 | .macro UNWIND_ESPFIX_STACK | |
34273f41 | 637 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 | 638 | movl %ss, %eax |
f0d96110 | 639 | /* see if on espfix stack */ |
a49976d1 IM |
640 | cmpw $__ESPFIX_SS, %ax |
641 | jne 27f | |
642 | movl $__KERNEL_DS, %eax | |
643 | movl %eax, %ds | |
644 | movl %eax, %es | |
f0d96110 TH |
645 | /* switch to normal stack */ |
646 | FIXUP_ESPFIX_STACK | |
647 | 27: | |
34273f41 | 648 | #endif |
f0d96110 | 649 | .endm |
1da177e4 LT |
650 | |
651 | /* | |
3304c9c3 DV |
652 | * Build the entry stubs with some assembler magic. |
653 | * We pack 1 stub into every 8-byte block. | |
1da177e4 | 654 | */ |
3304c9c3 | 655 | .align 8 |
1da177e4 | 656 | ENTRY(irq_entries_start) |
3304c9c3 DV |
657 | vector=FIRST_EXTERNAL_VECTOR |
658 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | |
a49976d1 | 659 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
3304c9c3 DV |
660 | vector=vector+1 |
661 | jmp common_interrupt | |
3304c9c3 DV |
662 | .align 8 |
663 | .endr | |
47a55cd7 JB |
664 | END(irq_entries_start) |
665 | ||
55f327fa IM |
666 | /* |
667 | * the CPU automatically disables interrupts when executing an IRQ vector, | |
668 | * so IRQ-flags tracing has to follow that: | |
669 | */ | |
b7c6244f | 670 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
1da177e4 | 671 | common_interrupt: |
e59d1b0a | 672 | ASM_CLAC |
a49976d1 | 673 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
1da177e4 | 674 | SAVE_ALL |
946c1911 | 675 | ENCODE_FRAME_POINTER |
55f327fa | 676 | TRACE_IRQS_OFF |
a49976d1 IM |
677 | movl %esp, %eax |
678 | call do_IRQ | |
679 | jmp ret_from_intr | |
47a55cd7 | 680 | ENDPROC(common_interrupt) |
1da177e4 | 681 | |
02cf94c3 | 682 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
1da177e4 | 683 | ENTRY(name) \ |
e59d1b0a | 684 | ASM_CLAC; \ |
a49976d1 | 685 | pushl $~(nr); \ |
fe7cacc1 | 686 | SAVE_ALL; \ |
946c1911 | 687 | ENCODE_FRAME_POINTER; \ |
55f327fa | 688 | TRACE_IRQS_OFF \ |
a49976d1 IM |
689 | movl %esp, %eax; \ |
690 | call fn; \ | |
691 | jmp ret_from_intr; \ | |
47a55cd7 | 692 | ENDPROC(name) |
1da177e4 | 693 | |
a49976d1 IM |
694 | #define BUILD_INTERRUPT(name, nr) \ |
695 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | |
02cf94c3 | 696 | |
1da177e4 | 697 | /* The include is where all of the SMP etc. interrupts come from */ |
1164dd00 | 698 | #include <asm/entry_arch.h> |
1da177e4 | 699 | |
1da177e4 | 700 | ENTRY(coprocessor_error) |
e59d1b0a | 701 | ASM_CLAC |
a49976d1 IM |
702 | pushl $0 |
703 | pushl $do_coprocessor_error | |
7252c4c3 | 704 | jmp common_exception |
47a55cd7 | 705 | END(coprocessor_error) |
1da177e4 LT |
706 | |
707 | ENTRY(simd_coprocessor_error) | |
e59d1b0a | 708 | ASM_CLAC |
a49976d1 | 709 | pushl $0 |
40d2e763 BG |
710 | #ifdef CONFIG_X86_INVD_BUG |
711 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | |
a49976d1 IM |
712 | ALTERNATIVE "pushl $do_general_protection", \ |
713 | "pushl $do_simd_coprocessor_error", \ | |
8e65f6e0 | 714 | X86_FEATURE_XMM |
40d2e763 | 715 | #else |
a49976d1 | 716 | pushl $do_simd_coprocessor_error |
40d2e763 | 717 | #endif |
7252c4c3 | 718 | jmp common_exception |
47a55cd7 | 719 | END(simd_coprocessor_error) |
1da177e4 LT |
720 | |
721 | ENTRY(device_not_available) | |
e59d1b0a | 722 | ASM_CLAC |
a49976d1 IM |
723 | pushl $-1 # mark this as an int |
724 | pushl $do_device_not_available | |
7252c4c3 | 725 | jmp common_exception |
47a55cd7 | 726 | END(device_not_available) |
1da177e4 | 727 | |
d3561b7f RR |
728 | #ifdef CONFIG_PARAVIRT |
729 | ENTRY(native_iret) | |
3701d863 | 730 | iret |
6837a54d | 731 | _ASM_EXTABLE(native_iret, iret_exc) |
47a55cd7 | 732 | END(native_iret) |
d3561b7f RR |
733 | #endif |
734 | ||
1da177e4 | 735 | ENTRY(overflow) |
e59d1b0a | 736 | ASM_CLAC |
a49976d1 IM |
737 | pushl $0 |
738 | pushl $do_overflow | |
7252c4c3 | 739 | jmp common_exception |
47a55cd7 | 740 | END(overflow) |
1da177e4 LT |
741 | |
742 | ENTRY(bounds) | |
e59d1b0a | 743 | ASM_CLAC |
a49976d1 IM |
744 | pushl $0 |
745 | pushl $do_bounds | |
7252c4c3 | 746 | jmp common_exception |
47a55cd7 | 747 | END(bounds) |
1da177e4 LT |
748 | |
749 | ENTRY(invalid_op) | |
e59d1b0a | 750 | ASM_CLAC |
a49976d1 IM |
751 | pushl $0 |
752 | pushl $do_invalid_op | |
7252c4c3 | 753 | jmp common_exception |
47a55cd7 | 754 | END(invalid_op) |
1da177e4 LT |
755 | |
756 | ENTRY(coprocessor_segment_overrun) | |
e59d1b0a | 757 | ASM_CLAC |
a49976d1 IM |
758 | pushl $0 |
759 | pushl $do_coprocessor_segment_overrun | |
7252c4c3 | 760 | jmp common_exception |
47a55cd7 | 761 | END(coprocessor_segment_overrun) |
1da177e4 LT |
762 | |
763 | ENTRY(invalid_TSS) | |
e59d1b0a | 764 | ASM_CLAC |
a49976d1 | 765 | pushl $do_invalid_TSS |
7252c4c3 | 766 | jmp common_exception |
47a55cd7 | 767 | END(invalid_TSS) |
1da177e4 LT |
768 | |
769 | ENTRY(segment_not_present) | |
e59d1b0a | 770 | ASM_CLAC |
a49976d1 | 771 | pushl $do_segment_not_present |
7252c4c3 | 772 | jmp common_exception |
47a55cd7 | 773 | END(segment_not_present) |
1da177e4 LT |
774 | |
775 | ENTRY(stack_segment) | |
e59d1b0a | 776 | ASM_CLAC |
a49976d1 | 777 | pushl $do_stack_segment |
7252c4c3 | 778 | jmp common_exception |
47a55cd7 | 779 | END(stack_segment) |
1da177e4 | 780 | |
1da177e4 | 781 | ENTRY(alignment_check) |
e59d1b0a | 782 | ASM_CLAC |
a49976d1 | 783 | pushl $do_alignment_check |
7252c4c3 | 784 | jmp common_exception |
47a55cd7 | 785 | END(alignment_check) |
1da177e4 | 786 | |
d28c4393 | 787 | ENTRY(divide_error) |
e59d1b0a | 788 | ASM_CLAC |
a49976d1 IM |
789 | pushl $0 # no error code |
790 | pushl $do_divide_error | |
7252c4c3 | 791 | jmp common_exception |
47a55cd7 | 792 | END(divide_error) |
1da177e4 LT |
793 | |
794 | #ifdef CONFIG_X86_MCE | |
795 | ENTRY(machine_check) | |
e59d1b0a | 796 | ASM_CLAC |
a49976d1 IM |
797 | pushl $0 |
798 | pushl machine_check_vector | |
7252c4c3 | 799 | jmp common_exception |
47a55cd7 | 800 | END(machine_check) |
1da177e4 LT |
801 | #endif |
802 | ||
803 | ENTRY(spurious_interrupt_bug) | |
e59d1b0a | 804 | ASM_CLAC |
a49976d1 IM |
805 | pushl $0 |
806 | pushl $do_spurious_interrupt_bug | |
7252c4c3 | 807 | jmp common_exception |
47a55cd7 | 808 | END(spurious_interrupt_bug) |
1da177e4 | 809 | |
5ead97c8 JF |
810 | #ifdef CONFIG_XEN |
811 | ENTRY(xen_hypervisor_callback) | |
a49976d1 | 812 | pushl $-1 /* orig_ax = -1 => not a system call */ |
5ead97c8 | 813 | SAVE_ALL |
946c1911 | 814 | ENCODE_FRAME_POINTER |
5ead97c8 | 815 | TRACE_IRQS_OFF |
9ec2b804 | 816 | |
a49976d1 IM |
817 | /* |
818 | * Check to see if we got the event in the critical | |
819 | * region in xen_iret_direct, after we've reenabled | |
820 | * events and checked for pending events. This simulates | |
821 | * iret instruction's behaviour where it delivers a | |
822 | * pending interrupt when enabling interrupts: | |
823 | */ | |
824 | movl PT_EIP(%esp), %eax | |
825 | cmpl $xen_iret_start_crit, %eax | |
826 | jb 1f | |
827 | cmpl $xen_iret_end_crit, %eax | |
828 | jae 1f | |
9ec2b804 | 829 | |
a49976d1 | 830 | jmp xen_iret_crit_fixup |
e2a81baf | 831 | |
e2a81baf | 832 | ENTRY(xen_do_upcall) |
a49976d1 IM |
833 | 1: mov %esp, %eax |
834 | call xen_evtchn_do_upcall | |
fdfd811d | 835 | #ifndef CONFIG_PREEMPT |
a49976d1 | 836 | call xen_maybe_preempt_hcall |
fdfd811d | 837 | #endif |
a49976d1 | 838 | jmp ret_from_intr |
5ead97c8 JF |
839 | ENDPROC(xen_hypervisor_callback) |
840 | ||
a49976d1 IM |
841 | /* |
842 | * Hypervisor uses this for application faults while it executes. | |
843 | * We get here for two reasons: | |
844 | * 1. Fault while reloading DS, ES, FS or GS | |
845 | * 2. Fault while executing IRET | |
846 | * Category 1 we fix up by reattempting the load, and zeroing the segment | |
847 | * register if the load fails. | |
848 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the | |
849 | * normal Linux return path in this case because if we use the IRET hypercall | |
850 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. | |
851 | * We distinguish between categories by maintaining a status value in EAX. | |
852 | */ | |
5ead97c8 | 853 | ENTRY(xen_failsafe_callback) |
a49976d1 IM |
854 | pushl %eax |
855 | movl $1, %eax | |
856 | 1: mov 4(%esp), %ds | |
857 | 2: mov 8(%esp), %es | |
858 | 3: mov 12(%esp), %fs | |
859 | 4: mov 16(%esp), %gs | |
a349e23d DV |
860 | /* EAX == 0 => Category 1 (Bad segment) |
861 | EAX != 0 => Category 2 (Bad IRET) */ | |
a49976d1 IM |
862 | testl %eax, %eax |
863 | popl %eax | |
864 | lea 16(%esp), %esp | |
865 | jz 5f | |
866 | jmp iret_exc | |
867 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ | |
5ead97c8 | 868 | SAVE_ALL |
946c1911 | 869 | ENCODE_FRAME_POINTER |
a49976d1 IM |
870 | jmp ret_from_exception |
871 | ||
872 | .section .fixup, "ax" | |
873 | 6: xorl %eax, %eax | |
874 | movl %eax, 4(%esp) | |
875 | jmp 1b | |
876 | 7: xorl %eax, %eax | |
877 | movl %eax, 8(%esp) | |
878 | jmp 2b | |
879 | 8: xorl %eax, %eax | |
880 | movl %eax, 12(%esp) | |
881 | jmp 3b | |
882 | 9: xorl %eax, %eax | |
883 | movl %eax, 16(%esp) | |
884 | jmp 4b | |
5ead97c8 | 885 | .previous |
a49976d1 IM |
886 | _ASM_EXTABLE(1b, 6b) |
887 | _ASM_EXTABLE(2b, 7b) | |
888 | _ASM_EXTABLE(3b, 8b) | |
889 | _ASM_EXTABLE(4b, 9b) | |
5ead97c8 JF |
890 | ENDPROC(xen_failsafe_callback) |
891 | ||
bc2b0331 | 892 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
4b9a8dca | 893 | xen_evtchn_do_upcall) |
38e20b07 | 894 | |
a49976d1 | 895 | #endif /* CONFIG_XEN */ |
bc2b0331 S |
896 | |
897 | #if IS_ENABLED(CONFIG_HYPERV) | |
898 | ||
899 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | |
4b9a8dca | 900 | hyperv_vector_handler) |
bc2b0331 | 901 | |
93286261 VK |
902 | BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, |
903 | hyperv_reenlightenment_intr) | |
904 | ||
248e742a MK |
905 | BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, |
906 | hv_stimer0_vector_handler) | |
907 | ||
bc2b0331 | 908 | #endif /* CONFIG_HYPERV */ |
5ead97c8 | 909 | |
d211af05 | 910 | ENTRY(page_fault) |
e59d1b0a | 911 | ASM_CLAC |
a49976d1 | 912 | pushl $do_page_fault |
d211af05 | 913 | ALIGN |
7252c4c3 JP |
914 | jmp common_exception |
915 | END(page_fault) | |
916 | ||
917 | common_exception: | |
ccbeed3a | 918 | /* the function address is in %gs's slot on the stack */ |
a49976d1 IM |
919 | pushl %fs |
920 | pushl %es | |
921 | pushl %ds | |
922 | pushl %eax | |
923 | pushl %ebp | |
924 | pushl %edi | |
925 | pushl %esi | |
926 | pushl %edx | |
927 | pushl %ecx | |
928 | pushl %ebx | |
946c1911 | 929 | ENCODE_FRAME_POINTER |
d211af05 | 930 | cld |
a49976d1 IM |
931 | movl $(__KERNEL_PERCPU), %ecx |
932 | movl %ecx, %fs | |
d211af05 | 933 | UNWIND_ESPFIX_STACK |
ccbeed3a | 934 | GS_TO_REG %ecx |
a49976d1 IM |
935 | movl PT_GS(%esp), %edi # get the function address |
936 | movl PT_ORIG_EAX(%esp), %edx # get the error code | |
937 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | |
ccbeed3a TH |
938 | REG_TO_PTGS %ecx |
939 | SET_KERNEL_GS %ecx | |
a49976d1 IM |
940 | movl $(__USER_DS), %ecx |
941 | movl %ecx, %ds | |
942 | movl %ecx, %es | |
d211af05 | 943 | TRACE_IRQS_OFF |
a49976d1 | 944 | movl %esp, %eax # pt_regs pointer |
2641f08b | 945 | CALL_NOSPEC %edi |
a49976d1 | 946 | jmp ret_from_exception |
7252c4c3 | 947 | END(common_exception) |
d211af05 | 948 | |
d211af05 | 949 | ENTRY(debug) |
7536656f AL |
950 | /* |
951 | * #DB can happen at the first instruction of | |
952 | * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this | |
953 | * happens, then we will be running on a very small stack. We | |
954 | * need to detect this condition and switch to the thread | |
955 | * stack before calling any C code at all. | |
956 | * | |
957 | * If you edit this code, keep in mind that NMIs can happen in here. | |
958 | */ | |
e59d1b0a | 959 | ASM_CLAC |
a49976d1 | 960 | pushl $-1 # mark this as an int |
d211af05 | 961 | SAVE_ALL |
946c1911 | 962 | ENCODE_FRAME_POINTER |
a49976d1 IM |
963 | xorl %edx, %edx # error code 0 |
964 | movl %esp, %eax # pt_regs pointer | |
7536656f AL |
965 | |
966 | /* Are we currently on the SYSENTER stack? */ | |
72f5e08d | 967 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
4fe2d8b1 DH |
968 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx |
969 | subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ | |
970 | cmpl $SIZEOF_entry_stack, %ecx | |
7536656f AL |
971 | jb .Ldebug_from_sysenter_stack |
972 | ||
973 | TRACE_IRQS_OFF | |
974 | call do_debug | |
975 | jmp ret_from_exception | |
976 | ||
977 | .Ldebug_from_sysenter_stack: | |
978 | /* We're on the SYSENTER stack. Switch off. */ | |
946c1911 | 979 | movl %esp, %ebx |
7536656f AL |
980 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
981 | TRACE_IRQS_OFF | |
a49976d1 | 982 | call do_debug |
946c1911 | 983 | movl %ebx, %esp |
a49976d1 | 984 | jmp ret_from_exception |
d211af05 AH |
985 | END(debug) |
986 | ||
987 | /* | |
7536656f AL |
988 | * NMI is doubly nasty. It can happen on the first instruction of |
989 | * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning | |
990 | * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 | |
991 | * switched stacks. We handle both conditions by simply checking whether we | |
992 | * interrupted kernel code running on the SYSENTER stack. | |
d211af05 AH |
993 | */ |
994 | ENTRY(nmi) | |
e59d1b0a | 995 | ASM_CLAC |
34273f41 | 996 | #ifdef CONFIG_X86_ESPFIX32 |
a49976d1 IM |
997 | pushl %eax |
998 | movl %ss, %eax | |
999 | cmpw $__ESPFIX_SS, %ax | |
1000 | popl %eax | |
1b00255f | 1001 | je .Lnmi_espfix_stack |
34273f41 | 1002 | #endif |
7536656f AL |
1003 | |
1004 | pushl %eax # pt_regs->orig_ax | |
d211af05 | 1005 | SAVE_ALL |
946c1911 | 1006 | ENCODE_FRAME_POINTER |
a49976d1 IM |
1007 | xorl %edx, %edx # zero error code |
1008 | movl %esp, %eax # pt_regs pointer | |
7536656f AL |
1009 | |
1010 | /* Are we currently on the SYSENTER stack? */ | |
72f5e08d | 1011 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
4fe2d8b1 DH |
1012 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx |
1013 | subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ | |
1014 | cmpl $SIZEOF_entry_stack, %ecx | |
7536656f AL |
1015 | jb .Lnmi_from_sysenter_stack |
1016 | ||
1017 | /* Not on SYSENTER stack. */ | |
a49976d1 | 1018 | call do_nmi |
1b00255f | 1019 | jmp .Lrestore_all_notrace |
d211af05 | 1020 | |
7536656f AL |
1021 | .Lnmi_from_sysenter_stack: |
1022 | /* | |
1023 | * We're on the SYSENTER stack. Switch off. No one (not even debug) | |
1024 | * is using the thread stack right now, so it's safe for us to use it. | |
1025 | */ | |
946c1911 | 1026 | movl %esp, %ebx |
7536656f AL |
1027 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
1028 | call do_nmi | |
946c1911 | 1029 | movl %ebx, %esp |
1b00255f | 1030 | jmp .Lrestore_all_notrace |
d211af05 | 1031 | |
34273f41 | 1032 | #ifdef CONFIG_X86_ESPFIX32 |
1b00255f | 1033 | .Lnmi_espfix_stack: |
131484c8 | 1034 | /* |
d211af05 AH |
1035 | * create the pointer to lss back |
1036 | */ | |
a49976d1 IM |
1037 | pushl %ss |
1038 | pushl %esp | |
1039 | addl $4, (%esp) | |
d211af05 AH |
1040 | /* copy the iret frame of 12 bytes */ |
1041 | .rept 3 | |
a49976d1 | 1042 | pushl 16(%esp) |
d211af05 | 1043 | .endr |
a49976d1 | 1044 | pushl %eax |
d211af05 | 1045 | SAVE_ALL |
946c1911 | 1046 | ENCODE_FRAME_POINTER |
a49976d1 IM |
1047 | FIXUP_ESPFIX_STACK # %eax == %esp |
1048 | xorl %edx, %edx # zero error code | |
1049 | call do_nmi | |
d211af05 | 1050 | RESTORE_REGS |
a49976d1 | 1051 | lss 12+4(%esp), %esp # back to espfix stack |
1b00255f | 1052 | jmp .Lirq_return |
34273f41 | 1053 | #endif |
d211af05 AH |
1054 | END(nmi) |
1055 | ||
1056 | ENTRY(int3) | |
e59d1b0a | 1057 | ASM_CLAC |
a49976d1 | 1058 | pushl $-1 # mark this as an int |
d211af05 | 1059 | SAVE_ALL |
946c1911 | 1060 | ENCODE_FRAME_POINTER |
d211af05 | 1061 | TRACE_IRQS_OFF |
a49976d1 IM |
1062 | xorl %edx, %edx # zero error code |
1063 | movl %esp, %eax # pt_regs pointer | |
1064 | call do_int3 | |
1065 | jmp ret_from_exception | |
d211af05 AH |
1066 | END(int3) |
1067 | ||
1068 | ENTRY(general_protection) | |
a49976d1 | 1069 | pushl $do_general_protection |
7252c4c3 | 1070 | jmp common_exception |
d211af05 AH |
1071 | END(general_protection) |
1072 | ||
631bc487 GN |
1073 | #ifdef CONFIG_KVM_GUEST |
1074 | ENTRY(async_page_fault) | |
e59d1b0a | 1075 | ASM_CLAC |
a49976d1 | 1076 | pushl $do_async_page_fault |
7252c4c3 | 1077 | jmp common_exception |
2ae9d293 | 1078 | END(async_page_fault) |
631bc487 | 1079 | #endif |
2deb4be2 AL |
1080 | |
1081 | ENTRY(rewind_stack_do_exit) | |
1082 | /* Prevent any naive code from trying to unwind to our caller. */ | |
1083 | xorl %ebp, %ebp | |
1084 | ||
1085 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esi | |
1086 | leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp | |
1087 | ||
1088 | call do_exit | |
1089 | 1: jmp 1b | |
1090 | END(rewind_stack_do_exit) |