1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/jump_label.h>
3 #include <asm/unwind_hints.h>
4 #include <asm/cpufeatures.h>
5 #include <asm/page_types.h>
9 x86 function call convention, 64-bit:
10 -------------------------------------
11 arguments | callee-saved | extra caller-saved | return
12 [callee-clobbered] | | [callee-clobbered] |
13 ---------------------------------------------------------------------------
14 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**]
16 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
17 functions when it sees tail-call optimization possibilities) rflags is
18 clobbered. Leftover arguments are passed over the stack frame.)
20 [*] In the frame-pointers case rbp is fixed to the stack frame.
22 [**] for struct return values wider than 64 bits the return convention is a
23 bit more complex: up to 128 bits width we return small structures
24 straight in rax, rdx. For structures larger than that (3 words or
25 larger) the caller puts a pointer to an on-stack return struct
26 [allocated in the caller's stack frame] into the first argument - i.e.
27 into rdi. All other arguments shift up by one in this case.
28 Fortunately this case is rare in the kernel.
30 For 32-bit we have the following conventions - kernel is built with
31 -mregparm=3 and -freg-struct-return:
33 x86 function calling convention, 32-bit:
34 ----------------------------------------
35 arguments | callee-saved | extra caller-saved | return
36 [callee-clobbered] | | [callee-clobbered] |
37 -------------------------------------------------------------------------
38 eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**]
40 ( here too esp is obviously invariant across normal function calls. eflags
41 is clobbered. Leftover arguments are passed over the stack frame. )
43 [*] In the frame-pointers case ebp is fixed to the stack frame.
45 [**] We build with -freg-struct-return, which on 32-bit means similar
46 semantics as on 64-bit: edx can be used for a second return value
47 (i.e. covering integer and structure sizes up to 64 bits) - after that
48 it gets more complex and more expensive: 3-word or larger struct returns
49 get done in the caller's frame and the pointer to the return struct goes
50 into regparm0, i.e. eax - the other arguments shift up and the
51 function's register parameters degenerate to regparm=2 in essence.
58 * 64-bit system call stack frame layout defines and helpers,
62 /* The layout forms the "struct pt_regs" on the stack: */
64 * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
65 * unless syscall needs a complete, fully filled "struct pt_regs".
73 /* These regs are callee-clobbered. Always saved on kernel entry. */
84 * On syscall entry, this is syscall#. On CPU exception, this is error code.
85 * On hw interrupt, it's IRQ number:
88 /* Return frame for iretq */
95 #define SIZEOF_PTREGS 21*8
97 .macro ALLOC_PT_GPREGS_ON_STACK
101 .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
103 movq %r11, 6*8+\offset(%rsp)
106 movq %r10, 7*8+\offset(%rsp)
107 movq %r9, 8*8+\offset(%rsp)
108 movq %r8, 9*8+\offset(%rsp)
111 movq %rax, 10*8+\offset(%rsp)
114 movq %rcx, 11*8+\offset(%rsp)
116 movq %rdx, 12*8+\offset(%rsp)
117 movq %rsi, 13*8+\offset(%rsp)
118 movq %rdi, 14*8+\offset(%rsp)
119 UNWIND_HINT_REGS offset=\offset extra=0
121 .macro SAVE_C_REGS offset=0
122 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
124 .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
125 SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
127 .macro SAVE_C_REGS_EXCEPT_R891011
128 SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
130 .macro SAVE_C_REGS_EXCEPT_RCX_R891011
131 SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
133 .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
134 SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
137 .macro SAVE_EXTRA_REGS offset=0
138 movq %r15, 0*8+\offset(%rsp)
139 movq %r14, 1*8+\offset(%rsp)
140 movq %r13, 2*8+\offset(%rsp)
141 movq %r12, 3*8+\offset(%rsp)
142 movq %rbp, 4*8+\offset(%rsp)
143 movq %rbx, 5*8+\offset(%rsp)
144 UNWIND_HINT_REGS offset=\offset
147 .macro POP_EXTRA_REGS
173 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
174 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
175 * is just setting the LSB, which makes it an invalid stack address and is also
176 * a signal to the unwinder that it's a pt_regs pointer in disguise.
178 * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
181 .macro ENCODE_FRAME_POINTER ptregs_offset=0
182 #ifdef CONFIG_FRAME_POINTER
184 leaq \ptregs_offset(%rsp), %rbp
192 #ifdef CONFIG_PAGE_TABLE_ISOLATION
194 /* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */
195 #define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
197 .macro ADJUST_KERNEL_CR3 reg:req
198 /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
199 andq $(~PTI_SWITCH_MASK), \reg
202 .macro ADJUST_USER_CR3 reg:req
203 /* Move CR3 up a page to the user page tables: */
204 orq $(PTI_SWITCH_MASK), \reg
207 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
208 mov %cr3, \scratch_reg
209 ADJUST_KERNEL_CR3 \scratch_reg
210 mov \scratch_reg, %cr3
213 .macro SWITCH_TO_USER_CR3 scratch_reg:req
214 mov %cr3, \scratch_reg
215 ADJUST_USER_CR3 \scratch_reg
216 mov \scratch_reg, %cr3
219 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
220 movq %cr3, \scratch_reg
221 movq \scratch_reg, \save_reg
223 * Is the switch bit zero? This means the address is
224 * up in real PAGE_TABLE_ISOLATION patches in a moment.
226 testq $(PTI_SWITCH_MASK), \scratch_reg
229 ADJUST_KERNEL_CR3 \scratch_reg
230 movq \scratch_reg, %cr3
235 .macro RESTORE_CR3 save_reg:req
237 * The CR3 write could be avoided when not changing its value,
238 * but would require a CR3 read *and* a scratch register.
243 #else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
245 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
247 .macro SWITCH_TO_USER_CR3 scratch_reg:req
249 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
251 .macro RESTORE_CR3 save_reg:req
256 #endif /* CONFIG_X86_64 */
259 * This does 'call enter_from_user_mode' unless we can avoid it based on
260 * kernel config or using the static jump infrastructure.
262 .macro CALL_enter_from_user_mode
263 #ifdef CONFIG_CONTEXT_TRACKING
264 #ifdef HAVE_JUMP_LABEL
265 STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
267 call enter_from_user_mode