Commit | Line | Data |
---|---|---|
0c2bd5a5 | 1 | /* |
063f8913 IM |
2 | |
3 | x86 function call convention, 64-bit: | |
4 | ------------------------------------- | |
5 | arguments | callee-saved | extra caller-saved | return | |
6 | [callee-clobbered] | | [callee-clobbered] | | |
7 | --------------------------------------------------------------------------- | |
8 | rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11 | rax, rdx [**] | |
9 | ||
10 | ( rsp is obviously invariant across normal function calls. (gcc can 'merge' | |
11 | functions when it sees tail-call optimization possibilities) rflags is | |
12 | clobbered. Leftover arguments are passed over the stack frame.) | |
13 | ||
14 | [*] In the frame-pointers case rbp is fixed to the stack frame. | |
15 | ||
16 | [**] for struct return values wider than 64 bits the return convention is a | |
17 | bit more complex: up to 128 bits width we return small structures | |
18 | straight in rax, rdx. For structures larger than that (3 words or | |
19 | larger) the caller puts a pointer to an on-stack return struct | |
20 | [allocated in the caller's stack frame] into the first argument - i.e. | |
21 | into rdi. All other arguments shift up by one in this case. | |
22 | Fortunately this case is rare in the kernel. | |
23 | ||
24 | For 32-bit we have the following conventions - kernel is built with | |
25 | -mregparm=3 and -freg-struct-return: | |
26 | ||
27 | x86 function calling convention, 32-bit: | |
28 | ---------------------------------------- | |
29 | arguments | callee-saved | extra caller-saved | return | |
30 | [callee-clobbered] | | [callee-clobbered] | | |
31 | ------------------------------------------------------------------------- | |
32 | eax edx ecx | ebx edi esi ebp [*] | <none> | eax, edx [**] | |
33 | ||
34 | ( here too esp is obviously invariant across normal function calls. eflags | |
35 | is clobbered. Leftover arguments are passed over the stack frame. ) | |
36 | ||
37 | [*] In the frame-pointers case ebp is fixed to the stack frame. | |
38 | ||
39 | [**] We build with -freg-struct-return, which on 32-bit means similar | |
40 | semantics as on 64-bit: edx can be used for a second return value | |
41 | (i.e. covering integer and structure sizes up to 64 bits) - after that | |
42 | it gets more complex and more expensive: 3-word or larger struct returns | |
43 | get done in the caller's frame and the pointer to the return struct goes | |
44 | into regparm0, i.e. eax - the other arguments shift up and the | |
45 | function's register parameters degenerate to regparm=2 in essence. | |
46 | ||
47 | */ | |
48 | ||
49 | ||
50 | /* | |
51 | * 64-bit system call stack frame layout defines and helpers, | |
52 | * for assembly code: | |
0c2bd5a5 | 53 | */ |
1da177e4 | 54 | |
0c2bd5a5 IM |
55 | #define R15 0 |
56 | #define R14 8 | |
57 | #define R13 16 | |
58 | #define R12 24 | |
59 | #define RBP 32 | |
60 | #define RBX 40 | |
1da177e4 | 61 | |
063f8913 | 62 | /* arguments: interrupts/non tracing syscalls only save up to here: */ |
0c2bd5a5 IM |
63 | #define R11 48 |
64 | #define R10 56 | |
65 | #define R9 64 | |
66 | #define R8 72 | |
67 | #define RAX 80 | |
68 | #define RCX 88 | |
69 | #define RDX 96 | |
70 | #define RSI 104 | |
71 | #define RDI 112 | |
72 | #define ORIG_RAX 120 /* + error_code */ | |
73 | /* end of arguments */ | |
74 | ||
063f8913 | 75 | /* cpu exception frame or undefined in case of fast syscall: */ |
0c2bd5a5 IM |
76 | #define RIP 128 |
77 | #define CS 136 | |
78 | #define EFLAGS 144 | |
79 | #define RSP 152 | |
80 | #define SS 160 | |
81 | ||
82 | #define ARGOFFSET R11 | |
83 | #define SWFRAME ORIG_RAX | |
1da177e4 | 84 | |
0c2bd5a5 IM |
85 | .macro SAVE_ARGS addskip=0, norcx=0, nor891011=0 |
86 | subq $9*8+\addskip, %rsp | |
1da177e4 | 87 | CFI_ADJUST_CFA_OFFSET 9*8+\addskip |
0c2bd5a5 IM |
88 | movq %rdi, 8*8(%rsp) |
89 | CFI_REL_OFFSET rdi, 8*8 | |
90 | movq %rsi, 7*8(%rsp) | |
91 | CFI_REL_OFFSET rsi, 7*8 | |
92 | movq %rdx, 6*8(%rsp) | |
93 | CFI_REL_OFFSET rdx, 6*8 | |
1da177e4 LT |
94 | .if \norcx |
95 | .else | |
0c2bd5a5 IM |
96 | movq %rcx, 5*8(%rsp) |
97 | CFI_REL_OFFSET rcx, 5*8 | |
1da177e4 | 98 | .endif |
0c2bd5a5 IM |
99 | movq %rax, 4*8(%rsp) |
100 | CFI_REL_OFFSET rax, 4*8 | |
1da177e4 LT |
101 | .if \nor891011 |
102 | .else | |
0c2bd5a5 IM |
103 | movq %r8, 3*8(%rsp) |
104 | CFI_REL_OFFSET r8, 3*8 | |
105 | movq %r9, 2*8(%rsp) | |
106 | CFI_REL_OFFSET r9, 2*8 | |
107 | movq %r10, 1*8(%rsp) | |
108 | CFI_REL_OFFSET r10, 1*8 | |
109 | movq %r11, (%rsp) | |
110 | CFI_REL_OFFSET r11, 0*8 | |
1da177e4 LT |
111 | .endif |
112 | .endm | |
113 | ||
0c2bd5a5 IM |
114 | #define ARG_SKIP 9*8 |
115 | ||
116 | .macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \ | |
117 | skipr8910=0, skiprdx=0 | |
1da177e4 LT |
118 | .if \skipr11 |
119 | .else | |
0c2bd5a5 | 120 | movq (%rsp), %r11 |
7effaa88 | 121 | CFI_RESTORE r11 |
1da177e4 LT |
122 | .endif |
123 | .if \skipr8910 | |
124 | .else | |
0c2bd5a5 | 125 | movq 1*8(%rsp), %r10 |
7effaa88 | 126 | CFI_RESTORE r10 |
0c2bd5a5 | 127 | movq 2*8(%rsp), %r9 |
7effaa88 | 128 | CFI_RESTORE r9 |
0c2bd5a5 | 129 | movq 3*8(%rsp), %r8 |
7effaa88 | 130 | CFI_RESTORE r8 |
1da177e4 LT |
131 | .endif |
132 | .if \skiprax | |
133 | .else | |
0c2bd5a5 | 134 | movq 4*8(%rsp), %rax |
7effaa88 | 135 | CFI_RESTORE rax |
1da177e4 LT |
136 | .endif |
137 | .if \skiprcx | |
138 | .else | |
0c2bd5a5 | 139 | movq 5*8(%rsp), %rcx |
7effaa88 | 140 | CFI_RESTORE rcx |
1da177e4 LT |
141 | .endif |
142 | .if \skiprdx | |
143 | .else | |
0c2bd5a5 | 144 | movq 6*8(%rsp), %rdx |
7effaa88 | 145 | CFI_RESTORE rdx |
1da177e4 | 146 | .endif |
0c2bd5a5 | 147 | movq 7*8(%rsp), %rsi |
7effaa88 | 148 | CFI_RESTORE rsi |
0c2bd5a5 | 149 | movq 8*8(%rsp), %rdi |
7effaa88 | 150 | CFI_RESTORE rdi |
1da177e4 | 151 | .if ARG_SKIP+\addskip > 0 |
0c2bd5a5 | 152 | addq $ARG_SKIP+\addskip, %rsp |
1da177e4 LT |
153 | CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip) |
154 | .endif | |
0c2bd5a5 | 155 | .endm |
1da177e4 | 156 | |
d4d67150 | 157 | .macro LOAD_ARGS offset, skiprax=0 |
0c2bd5a5 IM |
158 | movq \offset(%rsp), %r11 |
159 | movq \offset+8(%rsp), %r10 | |
160 | movq \offset+16(%rsp), %r9 | |
161 | movq \offset+24(%rsp), %r8 | |
162 | movq \offset+40(%rsp), %rcx | |
163 | movq \offset+48(%rsp), %rdx | |
164 | movq \offset+56(%rsp), %rsi | |
165 | movq \offset+64(%rsp), %rdi | |
d4d67150 RM |
166 | .if \skiprax |
167 | .else | |
0c2bd5a5 | 168 | movq \offset+72(%rsp), %rax |
d4d67150 | 169 | .endif |
1da177e4 | 170 | .endm |
0c2bd5a5 IM |
171 | |
172 | #define REST_SKIP 6*8 | |
173 | ||
1da177e4 | 174 | .macro SAVE_REST |
0c2bd5a5 | 175 | subq $REST_SKIP, %rsp |
1da177e4 | 176 | CFI_ADJUST_CFA_OFFSET REST_SKIP |
0c2bd5a5 IM |
177 | movq %rbx, 5*8(%rsp) |
178 | CFI_REL_OFFSET rbx, 5*8 | |
179 | movq %rbp, 4*8(%rsp) | |
180 | CFI_REL_OFFSET rbp, 4*8 | |
181 | movq %r12, 3*8(%rsp) | |
182 | CFI_REL_OFFSET r12, 3*8 | |
183 | movq %r13, 2*8(%rsp) | |
184 | CFI_REL_OFFSET r13, 2*8 | |
185 | movq %r14, 1*8(%rsp) | |
186 | CFI_REL_OFFSET r14, 1*8 | |
187 | movq %r15, (%rsp) | |
188 | CFI_REL_OFFSET r15, 0*8 | |
189 | .endm | |
1da177e4 LT |
190 | |
191 | .macro RESTORE_REST | |
0c2bd5a5 | 192 | movq (%rsp), %r15 |
7effaa88 | 193 | CFI_RESTORE r15 |
0c2bd5a5 | 194 | movq 1*8(%rsp), %r14 |
7effaa88 | 195 | CFI_RESTORE r14 |
0c2bd5a5 | 196 | movq 2*8(%rsp), %r13 |
7effaa88 | 197 | CFI_RESTORE r13 |
0c2bd5a5 | 198 | movq 3*8(%rsp), %r12 |
7effaa88 | 199 | CFI_RESTORE r12 |
0c2bd5a5 | 200 | movq 4*8(%rsp), %rbp |
7effaa88 | 201 | CFI_RESTORE rbp |
0c2bd5a5 | 202 | movq 5*8(%rsp), %rbx |
7effaa88 | 203 | CFI_RESTORE rbx |
0c2bd5a5 | 204 | addq $REST_SKIP, %rsp |
1da177e4 LT |
205 | CFI_ADJUST_CFA_OFFSET -(REST_SKIP) |
206 | .endm | |
0c2bd5a5 | 207 | |
1da177e4 LT |
208 | .macro SAVE_ALL |
209 | SAVE_ARGS | |
210 | SAVE_REST | |
211 | .endm | |
0c2bd5a5 | 212 | |
1da177e4 LT |
213 | .macro RESTORE_ALL addskip=0 |
214 | RESTORE_REST | |
0c2bd5a5 | 215 | RESTORE_ARGS 0, \addskip |
1da177e4 LT |
216 | .endm |
217 | ||
218 | .macro icebp | |
219 | .byte 0xf1 | |
220 | .endm |