Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
a2c7a983 | 2 | /* |
58ffa1b4 | 3 | * BPF JIT compiler |
0a14842f | 4 | * |
3b58908a | 5 | * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) |
58ffa1b4 | 6 | * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
0a14842f | 7 | */ |
0a14842f ED |
8 | #include <linux/netdevice.h> |
9 | #include <linux/filter.h> | |
855ddb56 | 10 | #include <linux/if_vlan.h> |
71d22d58 | 11 | #include <linux/bpf.h> |
5964b200 | 12 | #include <linux/memory.h> |
75ccbef6 | 13 | #include <linux/sort.h> |
3dec541b | 14 | #include <asm/extable.h> |
ee3e2469 | 15 | #include <asm/ftrace.h> |
d1163651 | 16 | #include <asm/set_memory.h> |
a493a87f | 17 | #include <asm/nospec-branch.h> |
5964b200 | 18 | #include <asm/text-patching.h> |
fd5d27b7 | 19 | #include <asm/unwind.h> |
4f9087f1 | 20 | #include <asm/cfi.h> |
0a14842f | 21 | |
f18b03fa KKD |
22 | static bool all_callee_regs_used[4] = {true, true, true, true}; |
23 | ||
5cccc702 | 24 | static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) |
0a14842f ED |
25 | { |
26 | if (len == 1) | |
27 | *ptr = bytes; | |
28 | else if (len == 2) | |
29 | *(u16 *)ptr = bytes; | |
30 | else { | |
31 | *(u32 *)ptr = bytes; | |
32 | barrier(); | |
33 | } | |
34 | return ptr + len; | |
35 | } | |
36 | ||
b52f00e6 | 37 | #define EMIT(bytes, len) \ |
ced50fc4 | 38 | do { prog = emit_code(prog, bytes, len); } while (0) |
0a14842f ED |
39 | |
40 | #define EMIT1(b1) EMIT(b1, 1) | |
41 | #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) | |
42 | #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) | |
43 | #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) | |
9f725eec DS |
44 | #define EMIT5(b1, b2, b3, b4, b5) \ |
45 | do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0) | |
a2c7a983 | 46 | |
62258278 | 47 | #define EMIT1_off32(b1, off) \ |
a2c7a983 | 48 | do { EMIT1(b1); EMIT(off, 4); } while (0) |
62258278 | 49 | #define EMIT2_off32(b1, b2, off) \ |
a2c7a983 | 50 | do { EMIT2(b1, b2); EMIT(off, 4); } while (0) |
62258278 | 51 | #define EMIT3_off32(b1, b2, b3, off) \ |
a2c7a983 | 52 | do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) |
62258278 | 53 | #define EMIT4_off32(b1, b2, b3, b4, off) \ |
a2c7a983 | 54 | do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) |
0a14842f | 55 | |
58912710 | 56 | #ifdef CONFIG_X86_KERNEL_IBT |
4f9087f1 PZ |
57 | #define EMIT_ENDBR() EMIT(gen_endbr(), 4) |
58 | #define EMIT_ENDBR_POISON() EMIT(gen_endbr_poison(), 4) | |
58912710 PZ |
59 | #else |
60 | #define EMIT_ENDBR() | |
4f9087f1 | 61 | #define EMIT_ENDBR_POISON() |
58912710 PZ |
62 | #endif |
63 | ||
5cccc702 | 64 | static bool is_imm8(int value) |
0a14842f ED |
65 | { |
66 | return value <= 127 && value >= -128; | |
67 | } | |
68 | ||
c8831bdb YS |
69 | /* |
70 | * Let us limit the positive offset to be <= 123. | |
71 | * This is to ensure eventual jit convergence For the following patterns: | |
72 | * ... | |
73 | * pass4, final_proglen=4391: | |
74 | * ... | |
75 | * 20e: 48 85 ff test rdi,rdi | |
76 | * 211: 74 7d je 0x290 | |
77 | * 213: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] | |
78 | * ... | |
79 | * 289: 48 85 ff test rdi,rdi | |
80 | * 28c: 74 17 je 0x2a5 | |
81 | * 28e: e9 7f ff ff ff jmp 0x212 | |
82 | * 293: bf 03 00 00 00 mov edi,0x3 | |
83 | * Note that insn at 0x211 is 2-byte cond jump insn for offset 0x7d (-125) | |
84 | * and insn at 0x28e is 5-byte jmp insn with offset -129. | |
85 | * | |
86 | * pass5, final_proglen=4392: | |
87 | * ... | |
88 | * 20e: 48 85 ff test rdi,rdi | |
89 | * 211: 0f 84 80 00 00 00 je 0x297 | |
90 | * 217: 48 8b 77 00 mov rsi,QWORD PTR [rdi+0x0] | |
91 | * ... | |
92 | * 28d: 48 85 ff test rdi,rdi | |
93 | * 290: 74 1a je 0x2ac | |
94 | * 292: eb 84 jmp 0x218 | |
95 | * 294: bf 03 00 00 00 mov edi,0x3 | |
96 | * Note that insn at 0x211 is 6-byte cond jump insn now since its offset | |
97 | * becomes 0x80 based on previous round (0x293 - 0x213 = 0x80). | |
98 | * At the same time, insn at 0x292 is a 2-byte insn since its offset is | |
99 | * -124. | |
100 | * | |
101 | * pass6 will repeat the same code as in pass4 and this will prevent | |
102 | * eventual convergence. | |
103 | * | |
104 | * To fix this issue, we need to break je (2->6 bytes) <-> jmp (5->2 bytes) | |
105 | * cycle in the above. In the above example je offset <= 0x7c should work. | |
106 | * | |
107 | * For other cases, je <-> je needs offset <= 0x7b to avoid no convergence | |
108 | * issue. For jmp <-> je and jmp <-> jmp cases, jmp offset <= 0x7c should | |
109 | * avoid no convergence issue. | |
110 | * | |
111 | * Overall, let us limit the positive offset for 8bit cond/uncond jmp insn | |
112 | * to maximum 123 (0x7b). This way, the jit pass can eventually converge. | |
113 | */ | |
114 | static bool is_imm8_jmp_offset(int value) | |
115 | { | |
116 | return value <= 123 && value >= -128; | |
117 | } | |
118 | ||
5cccc702 | 119 | static bool is_simm32(s64 value) |
0a14842f | 120 | { |
6fe8b9c1 DB |
121 | return value == (s64)(s32)value; |
122 | } | |
123 | ||
124 | static bool is_uimm32(u64 value) | |
125 | { | |
126 | return value == (u64)(u32)value; | |
0a14842f ED |
127 | } |
128 | ||
e430f34e | 129 | /* mov dst, src */ |
a2c7a983 IM |
130 | #define EMIT_mov(DST, SRC) \ |
131 | do { \ | |
132 | if (DST != SRC) \ | |
133 | EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ | |
62258278 AS |
134 | } while (0) |
135 | ||
136 | static int bpf_size_to_x86_bytes(int bpf_size) | |
137 | { | |
138 | if (bpf_size == BPF_W) | |
139 | return 4; | |
140 | else if (bpf_size == BPF_H) | |
141 | return 2; | |
142 | else if (bpf_size == BPF_B) | |
143 | return 1; | |
144 | else if (bpf_size == BPF_DW) | |
145 | return 4; /* imm32 */ | |
146 | else | |
147 | return 0; | |
148 | } | |
0a14842f | 149 | |
a2c7a983 IM |
150 | /* |
151 | * List of x86 cond jumps opcodes (. + s8) | |
0a14842f ED |
152 | * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) |
153 | */ | |
154 | #define X86_JB 0x72 | |
155 | #define X86_JAE 0x73 | |
156 | #define X86_JE 0x74 | |
157 | #define X86_JNE 0x75 | |
158 | #define X86_JBE 0x76 | |
159 | #define X86_JA 0x77 | |
52afc51e | 160 | #define X86_JL 0x7C |
62258278 | 161 | #define X86_JGE 0x7D |
52afc51e | 162 | #define X86_JLE 0x7E |
62258278 | 163 | #define X86_JG 0x7F |
0a14842f | 164 | |
a2c7a983 | 165 | /* Pick a register outside of BPF range for JIT internal work */ |
959a7579 | 166 | #define AUX_REG (MAX_BPF_JIT_REG + 1) |
fec56f58 | 167 | #define X86_REG_R9 (MAX_BPF_JIT_REG + 2) |
2fe99eb0 | 168 | #define X86_REG_R12 (MAX_BPF_JIT_REG + 3) |
62258278 | 169 | |
a2c7a983 IM |
170 | /* |
171 | * The following table maps BPF registers to x86-64 registers. | |
959a7579 | 172 | * |
a2c7a983 | 173 | * x86-64 register R12 is unused, since if used as base address |
959a7579 DB |
174 | * register in load/store instructions, it always needs an |
175 | * extra byte of encoding and is callee saved. | |
176 | * | |
fec56f58 AS |
177 | * x86-64 register R9 is not used by BPF programs, but can be used by BPF |
178 | * trampoline. x86-64 register R10 is used for blinding (if enabled). | |
62258278 AS |
179 | */ |
180 | static const int reg2hex[] = { | |
a2c7a983 IM |
181 | [BPF_REG_0] = 0, /* RAX */ |
182 | [BPF_REG_1] = 7, /* RDI */ | |
183 | [BPF_REG_2] = 6, /* RSI */ | |
184 | [BPF_REG_3] = 2, /* RDX */ | |
185 | [BPF_REG_4] = 1, /* RCX */ | |
186 | [BPF_REG_5] = 0, /* R8 */ | |
187 | [BPF_REG_6] = 3, /* RBX callee saved */ | |
188 | [BPF_REG_7] = 5, /* R13 callee saved */ | |
189 | [BPF_REG_8] = 6, /* R14 callee saved */ | |
190 | [BPF_REG_9] = 7, /* R15 callee saved */ | |
191 | [BPF_REG_FP] = 5, /* RBP readonly */ | |
192 | [BPF_REG_AX] = 2, /* R10 temp register */ | |
193 | [AUX_REG] = 3, /* R11 temp register */ | |
fec56f58 | 194 | [X86_REG_R9] = 1, /* R9 register, 6th function argument */ |
2fe99eb0 | 195 | [X86_REG_R12] = 4, /* R12 callee saved */ |
62258278 AS |
196 | }; |
197 | ||
3dec541b AS |
198 | static const int reg2pt_regs[] = { |
199 | [BPF_REG_0] = offsetof(struct pt_regs, ax), | |
200 | [BPF_REG_1] = offsetof(struct pt_regs, di), | |
201 | [BPF_REG_2] = offsetof(struct pt_regs, si), | |
202 | [BPF_REG_3] = offsetof(struct pt_regs, dx), | |
203 | [BPF_REG_4] = offsetof(struct pt_regs, cx), | |
204 | [BPF_REG_5] = offsetof(struct pt_regs, r8), | |
205 | [BPF_REG_6] = offsetof(struct pt_regs, bx), | |
206 | [BPF_REG_7] = offsetof(struct pt_regs, r13), | |
207 | [BPF_REG_8] = offsetof(struct pt_regs, r14), | |
208 | [BPF_REG_9] = offsetof(struct pt_regs, r15), | |
209 | }; | |
210 | ||
a2c7a983 IM |
211 | /* |
212 | * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15 | |
62258278 AS |
213 | * which need extra byte of encoding. |
214 | * rax,rcx,...,rbp have simpler encoding | |
215 | */ | |
5cccc702 | 216 | static bool is_ereg(u32 reg) |
62258278 | 217 | { |
d148134b JP |
218 | return (1 << reg) & (BIT(BPF_REG_5) | |
219 | BIT(AUX_REG) | | |
220 | BIT(BPF_REG_7) | | |
221 | BIT(BPF_REG_8) | | |
959a7579 | 222 | BIT(BPF_REG_9) | |
fec56f58 | 223 | BIT(X86_REG_R9) | |
2fe99eb0 | 224 | BIT(X86_REG_R12) | |
959a7579 | 225 | BIT(BPF_REG_AX)); |
62258278 AS |
226 | } |
227 | ||
aee194b1 LN |
228 | /* |
229 | * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64 | |
230 | * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte | |
231 | * of encoding. al,cl,dl,bl have simpler encoding. | |
232 | */ | |
233 | static bool is_ereg_8l(u32 reg) | |
234 | { | |
235 | return is_ereg(reg) || | |
236 | (1 << reg) & (BIT(BPF_REG_1) | | |
237 | BIT(BPF_REG_2) | | |
238 | BIT(BPF_REG_FP)); | |
239 | } | |
240 | ||
de0a444d DB |
241 | static bool is_axreg(u32 reg) |
242 | { | |
243 | return reg == BPF_REG_0; | |
244 | } | |
245 | ||
a2c7a983 | 246 | /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */ |
5cccc702 | 247 | static u8 add_1mod(u8 byte, u32 reg) |
62258278 AS |
248 | { |
249 | if (is_ereg(reg)) | |
250 | byte |= 1; | |
251 | return byte; | |
252 | } | |
253 | ||
5cccc702 | 254 | static u8 add_2mod(u8 byte, u32 r1, u32 r2) |
62258278 AS |
255 | { |
256 | if (is_ereg(r1)) | |
257 | byte |= 1; | |
258 | if (is_ereg(r2)) | |
259 | byte |= 4; | |
260 | return byte; | |
261 | } | |
262 | ||
2fe99eb0 AS |
263 | static u8 add_3mod(u8 byte, u32 r1, u32 r2, u32 index) |
264 | { | |
265 | if (is_ereg(r1)) | |
266 | byte |= 1; | |
267 | if (is_ereg(index)) | |
268 | byte |= 2; | |
269 | if (is_ereg(r2)) | |
270 | byte |= 4; | |
271 | return byte; | |
272 | } | |
273 | ||
a2c7a983 | 274 | /* Encode 'dst_reg' register into x86-64 opcode 'byte' */ |
5cccc702 | 275 | static u8 add_1reg(u8 byte, u32 dst_reg) |
62258278 | 276 | { |
e430f34e | 277 | return byte + reg2hex[dst_reg]; |
62258278 AS |
278 | } |
279 | ||
a2c7a983 | 280 | /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */ |
5cccc702 | 281 | static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) |
62258278 | 282 | { |
e430f34e | 283 | return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); |
62258278 AS |
284 | } |
285 | ||
e5f02cac BJ |
286 | /* Some 1-byte opcodes for binary ALU operations */ |
287 | static u8 simple_alu_opcodes[] = { | |
288 | [BPF_ADD] = 0x01, | |
289 | [BPF_SUB] = 0x29, | |
290 | [BPF_AND] = 0x21, | |
291 | [BPF_OR] = 0x09, | |
292 | [BPF_XOR] = 0x31, | |
293 | [BPF_LSH] = 0xE0, | |
294 | [BPF_RSH] = 0xE8, | |
295 | [BPF_ARSH] = 0xF8, | |
296 | }; | |
297 | ||
738cbe72 DB |
298 | static void jit_fill_hole(void *area, unsigned int size) |
299 | { | |
a2c7a983 | 300 | /* Fill whole space with INT3 instructions */ |
738cbe72 DB |
301 | memset(area, 0xcc, size); |
302 | } | |
303 | ||
fe736565 SL |
304 | int bpf_arch_text_invalidate(void *dst, size_t len) |
305 | { | |
306 | return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len)); | |
307 | } | |
308 | ||
f3c2af7b | 309 | struct jit_context { |
a2c7a983 | 310 | int cleanup_addr; /* Epilogue code offset */ |
dceba081 PZ |
311 | |
312 | /* | |
313 | * Program specific offsets of labels in the code; these rely on the | |
314 | * JIT doing at least 2 passes, recording the position on the first | |
315 | * pass, only to generate the correct offset on the second pass. | |
316 | */ | |
317 | int tail_call_direct_label; | |
318 | int tail_call_indirect_label; | |
f3c2af7b AS |
319 | }; |
320 | ||
a2c7a983 | 321 | /* Maximum number of bytes emitted while JITing one eBPF insn */ |
e0ee9c12 AS |
322 | #define BPF_MAX_INSN_SIZE 128 |
323 | #define BPF_INSN_SAFETY 64 | |
4b3da77b DB |
324 | |
325 | /* Number of bytes emit_patch() needs to generate instructions */ | |
326 | #define X86_PATCH_SIZE 5 | |
ebf7d1f5 | 327 | /* Number of bytes that will be skipped on tailcall */ |
116e04ba | 328 | #define X86_TAIL_CALL_OFFSET (12 + ENDBR_INSN_SIZE) |
e0ee9c12 | 329 | |
7d1cd70d YS |
330 | static void push_r9(u8 **pprog) |
331 | { | |
332 | u8 *prog = *pprog; | |
333 | ||
334 | EMIT2(0x41, 0x51); /* push r9 */ | |
335 | *pprog = prog; | |
336 | } | |
337 | ||
338 | static void pop_r9(u8 **pprog) | |
339 | { | |
340 | u8 *prog = *pprog; | |
341 | ||
342 | EMIT2(0x41, 0x59); /* pop r9 */ | |
343 | *pprog = prog; | |
344 | } | |
345 | ||
f18b03fa KKD |
346 | static void push_r12(u8 **pprog) |
347 | { | |
348 | u8 *prog = *pprog; | |
349 | ||
350 | EMIT2(0x41, 0x54); /* push r12 */ | |
351 | *pprog = prog; | |
352 | } | |
353 | ||
ebf7d1f5 MF |
354 | static void push_callee_regs(u8 **pprog, bool *callee_regs_used) |
355 | { | |
356 | u8 *prog = *pprog; | |
ebf7d1f5 MF |
357 | |
358 | if (callee_regs_used[0]) | |
359 | EMIT1(0x53); /* push rbx */ | |
360 | if (callee_regs_used[1]) | |
361 | EMIT2(0x41, 0x55); /* push r13 */ | |
362 | if (callee_regs_used[2]) | |
363 | EMIT2(0x41, 0x56); /* push r14 */ | |
364 | if (callee_regs_used[3]) | |
365 | EMIT2(0x41, 0x57); /* push r15 */ | |
366 | *pprog = prog; | |
367 | } | |
368 | ||
f18b03fa KKD |
369 | static void pop_r12(u8 **pprog) |
370 | { | |
371 | u8 *prog = *pprog; | |
372 | ||
373 | EMIT2(0x41, 0x5C); /* pop r12 */ | |
374 | *pprog = prog; | |
375 | } | |
376 | ||
ebf7d1f5 MF |
377 | static void pop_callee_regs(u8 **pprog, bool *callee_regs_used) |
378 | { | |
379 | u8 *prog = *pprog; | |
ebf7d1f5 MF |
380 | |
381 | if (callee_regs_used[3]) | |
382 | EMIT2(0x41, 0x5F); /* pop r15 */ | |
383 | if (callee_regs_used[2]) | |
384 | EMIT2(0x41, 0x5E); /* pop r14 */ | |
385 | if (callee_regs_used[1]) | |
386 | EMIT2(0x41, 0x5D); /* pop r13 */ | |
387 | if (callee_regs_used[0]) | |
388 | EMIT1(0x5B); /* pop rbx */ | |
389 | *pprog = prog; | |
390 | } | |
b52f00e6 | 391 | |
00bc8988 LH |
392 | static void emit_nops(u8 **pprog, int len) |
393 | { | |
394 | u8 *prog = *pprog; | |
395 | int i, noplen; | |
396 | ||
397 | while (len > 0) { | |
398 | noplen = len; | |
399 | ||
400 | if (noplen > ASM_NOP_MAX) | |
401 | noplen = ASM_NOP_MAX; | |
402 | ||
403 | for (i = 0; i < noplen; i++) | |
404 | EMIT1(x86_nops[noplen][i]); | |
405 | len -= noplen; | |
406 | } | |
407 | ||
408 | *pprog = prog; | |
409 | } | |
410 | ||
4f9087f1 PZ |
411 | /* |
412 | * Emit the various CFI preambles, see asm/cfi.h and the comments about FineIBT | |
413 | * in arch/x86/kernel/alternative.c | |
414 | */ | |
0c92385d | 415 | static int emit_call(u8 **prog, void *func, void *ip); |
4f9087f1 | 416 | |
0c92385d | 417 | static void emit_fineibt(u8 **pprog, u8 *ip, u32 hash, int arity) |
4f9087f1 PZ |
418 | { |
419 | u8 *prog = *pprog; | |
420 | ||
421 | EMIT_ENDBR(); | |
e72d88d1 | 422 | EMIT3_off32(0x41, 0x81, 0xea, hash); /* subl $hash, %r10d */ |
0c92385d PZ |
423 | if (cfi_bhi) { |
424 | emit_call(&prog, __bhi_args[arity], ip + 11); | |
425 | } else { | |
426 | EMIT2(0x75, 0xf9); /* jne.d8 .-7 */ | |
427 | EMIT3(0x0f, 0x1f, 0x00); /* nop3 */ | |
428 | } | |
4f9087f1 PZ |
429 | EMIT_ENDBR_POISON(); |
430 | ||
431 | *pprog = prog; | |
432 | } | |
433 | ||
2cd3e377 | 434 | static void emit_kcfi(u8 **pprog, u32 hash) |
4f9087f1 PZ |
435 | { |
436 | u8 *prog = *pprog; | |
437 | ||
e72d88d1 | 438 | EMIT1_off32(0xb8, hash); /* movl $hash, %eax */ |
4f9087f1 PZ |
439 | #ifdef CONFIG_CALL_PADDING |
440 | EMIT1(0x90); | |
441 | EMIT1(0x90); | |
442 | EMIT1(0x90); | |
443 | EMIT1(0x90); | |
444 | EMIT1(0x90); | |
445 | EMIT1(0x90); | |
446 | EMIT1(0x90); | |
447 | EMIT1(0x90); | |
448 | EMIT1(0x90); | |
449 | EMIT1(0x90); | |
450 | EMIT1(0x90); | |
451 | #endif | |
452 | EMIT_ENDBR(); | |
453 | ||
454 | *pprog = prog; | |
455 | } | |
456 | ||
0c92385d | 457 | static void emit_cfi(u8 **pprog, u8 *ip, u32 hash, int arity) |
4f9087f1 PZ |
458 | { |
459 | u8 *prog = *pprog; | |
460 | ||
461 | switch (cfi_mode) { | |
462 | case CFI_FINEIBT: | |
0c92385d | 463 | emit_fineibt(&prog, ip, hash, arity); |
4f9087f1 PZ |
464 | break; |
465 | ||
466 | case CFI_KCFI: | |
2cd3e377 | 467 | emit_kcfi(&prog, hash); |
4f9087f1 PZ |
468 | break; |
469 | ||
470 | default: | |
471 | EMIT_ENDBR(); | |
472 | break; | |
473 | } | |
474 | ||
475 | *pprog = prog; | |
476 | } | |
477 | ||
116e04ba LH |
478 | static void emit_prologue_tail_call(u8 **pprog, bool is_subprog) |
479 | { | |
480 | u8 *prog = *pprog; | |
481 | ||
482 | if (!is_subprog) { | |
483 | /* cmp rax, MAX_TAIL_CALL_CNT */ | |
484 | EMIT4(0x48, 0x83, 0xF8, MAX_TAIL_CALL_CNT); | |
485 | EMIT2(X86_JA, 6); /* ja 6 */ | |
486 | /* rax is tail_call_cnt if <= MAX_TAIL_CALL_CNT. | |
487 | * case1: entry of main prog. | |
488 | * case2: tail callee of main prog. | |
489 | */ | |
490 | EMIT1(0x50); /* push rax */ | |
491 | /* Make rax as tail_call_cnt_ptr. */ | |
492 | EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */ | |
493 | EMIT2(0xEB, 1); /* jmp 1 */ | |
494 | /* rax is tail_call_cnt_ptr if > MAX_TAIL_CALL_CNT. | |
495 | * case: tail callee of subprog. | |
496 | */ | |
497 | EMIT1(0x50); /* push rax */ | |
498 | /* push tail_call_cnt_ptr */ | |
499 | EMIT1(0x50); /* push rax */ | |
500 | } else { /* is_subprog */ | |
501 | /* rax is tail_call_cnt_ptr. */ | |
502 | EMIT1(0x50); /* push rax */ | |
503 | EMIT1(0x50); /* push rax */ | |
504 | } | |
505 | ||
506 | *pprog = prog; | |
507 | } | |
508 | ||
a2c7a983 | 509 | /* |
ebf7d1f5 MF |
510 | * Emit x86-64 prologue code for BPF program. |
511 | * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes | |
512 | * while jumping to another program | |
b52f00e6 | 513 | */ |
0c92385d | 514 | static void emit_prologue(u8 **pprog, u8 *ip, u32 stack_depth, bool ebpf_from_cbpf, |
f18b03fa KKD |
515 | bool tail_call_reachable, bool is_subprog, |
516 | bool is_exception_cb) | |
0a14842f | 517 | { |
b52f00e6 | 518 | u8 *prog = *pprog; |
0a14842f | 519 | |
0c92385d PZ |
520 | if (is_subprog) { |
521 | emit_cfi(&prog, ip, cfi_bpf_subprog_hash, 5); | |
522 | } else { | |
523 | emit_cfi(&prog, ip, cfi_bpf_hash, 1); | |
524 | } | |
9fd4a39d AS |
525 | /* BPF trampoline can be made to work without these nops, |
526 | * but let's waste 5 bytes for now and optimize later | |
527 | */ | |
00bc8988 | 528 | emit_nops(&prog, X86_PATCH_SIZE); |
ebf7d1f5 MF |
529 | if (!ebpf_from_cbpf) { |
530 | if (tail_call_reachable && !is_subprog) | |
2bee9770 LH |
531 | /* When it's the entry of the whole tailcall context, |
532 | * zeroing rax means initialising tail_call_cnt. | |
533 | */ | |
116e04ba | 534 | EMIT3(0x48, 0x31, 0xC0); /* xor rax, rax */ |
ebf7d1f5 | 535 | else |
2bee9770 | 536 | /* Keep the same instruction layout. */ |
116e04ba | 537 | emit_nops(&prog, 3); /* nop3 */ |
ebf7d1f5 | 538 | } |
f18b03fa KKD |
539 | /* Exception callback receives FP as third parameter */ |
540 | if (is_exception_cb) { | |
541 | EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */ | |
542 | EMIT3(0x48, 0x89, 0xD5); /* mov rbp, rdx */ | |
543 | /* The main frame must have exception_boundary as true, so we | |
544 | * first restore those callee-saved regs from stack, before | |
545 | * reusing the stack frame. | |
546 | */ | |
547 | pop_callee_regs(&prog, all_callee_regs_used); | |
548 | pop_r12(&prog); | |
549 | /* Reset the stack frame. */ | |
550 | EMIT3(0x48, 0x89, 0xEC); /* mov rsp, rbp */ | |
551 | } else { | |
552 | EMIT1(0x55); /* push rbp */ | |
553 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
554 | } | |
58912710 PZ |
555 | |
556 | /* X86_TAIL_CALL_OFFSET is here */ | |
557 | EMIT_ENDBR(); | |
558 | ||
fe8d9571 | 559 | /* sub rsp, rounded_stack_depth */ |
4d0b8c0b MF |
560 | if (stack_depth) |
561 | EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8)); | |
ebf7d1f5 | 562 | if (tail_call_reachable) |
116e04ba | 563 | emit_prologue_tail_call(&prog, is_subprog); |
b52f00e6 AS |
564 | *pprog = prog; |
565 | } | |
566 | ||
428d5df1 DB |
567 | static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode) |
568 | { | |
569 | u8 *prog = *pprog; | |
428d5df1 DB |
570 | s64 offset; |
571 | ||
572 | offset = func - (ip + X86_PATCH_SIZE); | |
573 | if (!is_simm32(offset)) { | |
574 | pr_err("Target call %p is out of range\n", func); | |
575 | return -ERANGE; | |
576 | } | |
577 | EMIT1_off32(opcode, offset); | |
578 | *pprog = prog; | |
579 | return 0; | |
580 | } | |
581 | ||
582 | static int emit_call(u8 **pprog, void *func, void *ip) | |
583 | { | |
584 | return emit_patch(pprog, func, ip, 0xE8); | |
585 | } | |
586 | ||
b2e9dfe5 TG |
587 | static int emit_rsb_call(u8 **pprog, void *func, void *ip) |
588 | { | |
589 | OPTIMIZER_HIDE_VAR(func); | |
6a537453 | 590 | ip += x86_call_depth_emit_accounting(pprog, func, ip); |
b2e9dfe5 TG |
591 | return emit_patch(pprog, func, ip, 0xE8); |
592 | } | |
593 | ||
428d5df1 DB |
594 | static int emit_jump(u8 **pprog, void *func, void *ip) |
595 | { | |
596 | return emit_patch(pprog, func, ip, 0xE9); | |
597 | } | |
598 | ||
599 | static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
1022a549 | 600 | void *old_addr, void *new_addr) |
428d5df1 | 601 | { |
a89dfde3 | 602 | const u8 *nop_insn = x86_nops[5]; |
b553a6ec DB |
603 | u8 old_insn[X86_PATCH_SIZE]; |
604 | u8 new_insn[X86_PATCH_SIZE]; | |
428d5df1 DB |
605 | u8 *prog; |
606 | int ret; | |
607 | ||
b553a6ec DB |
608 | memcpy(old_insn, nop_insn, X86_PATCH_SIZE); |
609 | if (old_addr) { | |
610 | prog = old_insn; | |
611 | ret = t == BPF_MOD_CALL ? | |
612 | emit_call(&prog, old_addr, ip) : | |
613 | emit_jump(&prog, old_addr, ip); | |
614 | if (ret) | |
615 | return ret; | |
428d5df1 DB |
616 | } |
617 | ||
b553a6ec DB |
618 | memcpy(new_insn, nop_insn, X86_PATCH_SIZE); |
619 | if (new_addr) { | |
620 | prog = new_insn; | |
621 | ret = t == BPF_MOD_CALL ? | |
622 | emit_call(&prog, new_addr, ip) : | |
623 | emit_jump(&prog, new_addr, ip); | |
624 | if (ret) | |
625 | return ret; | |
428d5df1 DB |
626 | } |
627 | ||
628 | ret = -EBUSY; | |
629 | mutex_lock(&text_mutex); | |
630 | if (memcmp(ip, old_insn, X86_PATCH_SIZE)) | |
631 | goto out; | |
ebf7d1f5 | 632 | ret = 1; |
b553a6ec | 633 | if (memcmp(ip, new_insn, X86_PATCH_SIZE)) { |
9586ae48 | 634 | smp_text_poke_single(ip, new_insn, X86_PATCH_SIZE, NULL); |
ebf7d1f5 | 635 | ret = 0; |
b553a6ec | 636 | } |
428d5df1 DB |
637 | out: |
638 | mutex_unlock(&text_mutex); | |
639 | return ret; | |
640 | } | |
641 | ||
642 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, | |
643 | void *old_addr, void *new_addr) | |
644 | { | |
645 | if (!is_kernel_text((long)ip) && | |
646 | !is_bpf_text_address((long)ip)) | |
647 | /* BPF poking in modules is not supported */ | |
648 | return -EINVAL; | |
649 | ||
58912710 PZ |
650 | /* |
651 | * See emit_prologue(), for IBT builds the trampoline hook is preceded | |
652 | * with an ENDBR instruction. | |
653 | */ | |
72e213a7 | 654 | if (is_endbr(ip)) |
58912710 PZ |
655 | ip += ENDBR_INSN_SIZE; |
656 | ||
1022a549 | 657 | return __bpf_arch_text_poke(ip, t, old_addr, new_addr); |
428d5df1 DB |
658 | } |
659 | ||
87c87ecd PZ |
660 | #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8) |
661 | ||
662 | static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) | |
663 | { | |
664 | u8 *prog = *pprog; | |
665 | ||
8754e67a PG |
666 | if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) { |
667 | OPTIMIZER_HIDE_VAR(reg); | |
e52c1dc7 | 668 | emit_jump(&prog, its_static_thunk(reg), ip); |
8754e67a | 669 | } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { |
87c87ecd PZ |
670 | EMIT_LFENCE(); |
671 | EMIT2(0xFF, 0xE0 + reg); | |
672 | } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { | |
be8a0965 | 673 | OPTIMIZER_HIDE_VAR(reg); |
3b6c1747 PZ |
674 | if (cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) |
675 | emit_jump(&prog, &__x86_indirect_jump_thunk_array[reg], ip); | |
676 | else | |
677 | emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip); | |
369ae6ff | 678 | } else { |
8c03af3e | 679 | EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */ |
7b75782f | 680 | if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || IS_ENABLED(CONFIG_MITIGATION_SLS)) |
8c03af3e | 681 | EMIT1(0xCC); /* int3 */ |
369ae6ff | 682 | } |
87c87ecd PZ |
683 | |
684 | *pprog = prog; | |
685 | } | |
686 | ||
d77cfe59 PZ |
687 | static void emit_return(u8 **pprog, u8 *ip) |
688 | { | |
689 | u8 *prog = *pprog; | |
690 | ||
a75bf27f | 691 | if (cpu_wants_rethunk()) { |
770ae1b7 | 692 | emit_jump(&prog, x86_return_thunk, ip); |
d77cfe59 PZ |
693 | } else { |
694 | EMIT1(0xC3); /* ret */ | |
7b75782f | 695 | if (IS_ENABLED(CONFIG_MITIGATION_SLS)) |
d77cfe59 PZ |
696 | EMIT1(0xCC); /* int3 */ |
697 | } | |
87c87ecd PZ |
698 | |
699 | *pprog = prog; | |
700 | } | |
701 | ||
116e04ba LH |
702 | #define BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack) (-16 - round_up(stack, 8)) |
703 | ||
a2c7a983 IM |
704 | /* |
705 | * Generate the following code: | |
706 | * | |
b52f00e6 AS |
707 | * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ... |
708 | * if (index >= array->map.max_entries) | |
709 | * goto out; | |
116e04ba | 710 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
b52f00e6 | 711 | * goto out; |
2a36f0b9 | 712 | * prog = array->ptrs[index]; |
b52f00e6 AS |
713 | * if (prog == NULL) |
714 | * goto out; | |
715 | * goto *(prog->bpf_func + prologue_size); | |
716 | * out: | |
717 | */ | |
f18b03fa KKD |
718 | static void emit_bpf_tail_call_indirect(struct bpf_prog *bpf_prog, |
719 | u8 **pprog, bool *callee_regs_used, | |
dceba081 PZ |
720 | u32 stack_depth, u8 *ip, |
721 | struct jit_context *ctx) | |
b52f00e6 | 722 | { |
116e04ba | 723 | int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); |
dceba081 PZ |
724 | u8 *prog = *pprog, *start = *pprog; |
725 | int offset; | |
4d0b8c0b | 726 | |
a2c7a983 IM |
727 | /* |
728 | * rdi - pointer to ctx | |
b52f00e6 AS |
729 | * rsi - pointer to bpf_array |
730 | * rdx - index in bpf_array | |
731 | */ | |
732 | ||
a2c7a983 IM |
733 | /* |
734 | * if (index >= array->map.max_entries) | |
735 | * goto out; | |
b52f00e6 | 736 | */ |
90caccdd AS |
737 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
738 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | |
b52f00e6 | 739 | offsetof(struct bpf_array, map.max_entries)); |
dceba081 PZ |
740 | |
741 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); | |
742 | EMIT2(X86_JBE, offset); /* jbe out */ | |
b52f00e6 | 743 | |
a2c7a983 | 744 | /* |
116e04ba | 745 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
a2c7a983 | 746 | * goto out; |
b52f00e6 | 747 | */ |
116e04ba LH |
748 | EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ |
749 | EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ | |
dceba081 PZ |
750 | |
751 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); | |
ebf7f6f0 | 752 | EMIT2(X86_JAE, offset); /* jae out */ |
b52f00e6 | 753 | |
2a36f0b9 | 754 | /* prog = array->ptrs[index]; */ |
0d4ddce3 | 755 | EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */ |
2a36f0b9 | 756 | offsetof(struct bpf_array, ptrs)); |
b52f00e6 | 757 | |
a2c7a983 IM |
758 | /* |
759 | * if (prog == NULL) | |
760 | * goto out; | |
b52f00e6 | 761 | */ |
ebf7d1f5 | 762 | EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */ |
b52f00e6 | 763 | |
dceba081 PZ |
764 | offset = ctx->tail_call_indirect_label - (prog + 2 - start); |
765 | EMIT2(X86_JE, offset); /* je out */ | |
766 | ||
116e04ba LH |
767 | /* Inc tail_call_cnt if the slot is populated. */ |
768 | EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ | |
769 | ||
f18b03fa KKD |
770 | if (bpf_prog->aux->exception_boundary) { |
771 | pop_callee_regs(&prog, all_callee_regs_used); | |
772 | pop_r12(&prog); | |
773 | } else { | |
774 | pop_callee_regs(&prog, callee_regs_used); | |
2fe99eb0 AS |
775 | if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) |
776 | pop_r12(&prog); | |
f18b03fa | 777 | } |
ebf7d1f5 | 778 | |
116e04ba LH |
779 | /* Pop tail_call_cnt_ptr. */ |
780 | EMIT1(0x58); /* pop rax */ | |
781 | /* Pop tail_call_cnt, if it's main prog. | |
782 | * Pop tail_call_cnt_ptr, if it's subprog. | |
783 | */ | |
ebf7d1f5 | 784 | EMIT1(0x58); /* pop rax */ |
4d0b8c0b MF |
785 | if (stack_depth) |
786 | EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */ | |
787 | round_up(stack_depth, 8)); | |
ebf7d1f5 MF |
788 | |
789 | /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */ | |
0d4ddce3 | 790 | EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */ |
b52f00e6 | 791 | offsetof(struct bpf_prog, bpf_func)); |
ebf7d1f5 MF |
792 | EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */ |
793 | X86_TAIL_CALL_OFFSET); | |
a2c7a983 | 794 | /* |
0d4ddce3 | 795 | * Now we're ready to jump into next BPF program |
b52f00e6 | 796 | * rdi == ctx (1st arg) |
ebf7d1f5 | 797 | * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET |
b52f00e6 | 798 | */ |
87c87ecd | 799 | emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start)); |
b52f00e6 AS |
800 | |
801 | /* out: */ | |
dceba081 | 802 | ctx->tail_call_indirect_label = prog - start; |
b52f00e6 AS |
803 | *pprog = prog; |
804 | } | |
805 | ||
f18b03fa KKD |
806 | static void emit_bpf_tail_call_direct(struct bpf_prog *bpf_prog, |
807 | struct bpf_jit_poke_descriptor *poke, | |
dceba081 PZ |
808 | u8 **pprog, u8 *ip, |
809 | bool *callee_regs_used, u32 stack_depth, | |
810 | struct jit_context *ctx) | |
428d5df1 | 811 | { |
116e04ba | 812 | int tcc_ptr_off = BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack_depth); |
dceba081 PZ |
813 | u8 *prog = *pprog, *start = *pprog; |
814 | int offset; | |
ebf7d1f5 | 815 | |
428d5df1 | 816 | /* |
116e04ba | 817 | * if ((*tcc_ptr)++ >= MAX_TAIL_CALL_CNT) |
428d5df1 DB |
818 | * goto out; |
819 | */ | |
116e04ba LH |
820 | EMIT3_off32(0x48, 0x8B, 0x85, tcc_ptr_off); /* mov rax, qword ptr [rbp - tcc_ptr_off] */ |
821 | EMIT4(0x48, 0x83, 0x38, MAX_TAIL_CALL_CNT); /* cmp qword ptr [rax], MAX_TAIL_CALL_CNT */ | |
dceba081 PZ |
822 | |
823 | offset = ctx->tail_call_direct_label - (prog + 2 - start); | |
ebf7f6f0 | 824 | EMIT2(X86_JAE, offset); /* jae out */ |
428d5df1 | 825 | |
dceba081 | 826 | poke->tailcall_bypass = ip + (prog - start); |
ebf7d1f5 | 827 | poke->adj_off = X86_TAIL_CALL_OFFSET; |
dceba081 | 828 | poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE; |
ebf7d1f5 MF |
829 | poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE; |
830 | ||
831 | emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE, | |
832 | poke->tailcall_bypass); | |
833 | ||
116e04ba LH |
834 | /* Inc tail_call_cnt if the slot is populated. */ |
835 | EMIT4(0x48, 0x83, 0x00, 0x01); /* add qword ptr [rax], 1 */ | |
836 | ||
f18b03fa KKD |
837 | if (bpf_prog->aux->exception_boundary) { |
838 | pop_callee_regs(&prog, all_callee_regs_used); | |
839 | pop_r12(&prog); | |
840 | } else { | |
841 | pop_callee_regs(&prog, callee_regs_used); | |
2fe99eb0 AS |
842 | if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) |
843 | pop_r12(&prog); | |
f18b03fa KKD |
844 | } |
845 | ||
116e04ba LH |
846 | /* Pop tail_call_cnt_ptr. */ |
847 | EMIT1(0x58); /* pop rax */ | |
848 | /* Pop tail_call_cnt, if it's main prog. | |
849 | * Pop tail_call_cnt_ptr, if it's subprog. | |
850 | */ | |
ebf7d1f5 | 851 | EMIT1(0x58); /* pop rax */ |
4d0b8c0b MF |
852 | if (stack_depth) |
853 | EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8)); | |
428d5df1 | 854 | |
00bc8988 | 855 | emit_nops(&prog, X86_PATCH_SIZE); |
dceba081 | 856 | |
428d5df1 | 857 | /* out: */ |
dceba081 | 858 | ctx->tail_call_direct_label = prog - start; |
428d5df1 DB |
859 | |
860 | *pprog = prog; | |
861 | } | |
862 | ||
863 | static void bpf_tail_call_direct_fixup(struct bpf_prog *prog) | |
864 | { | |
428d5df1 DB |
865 | struct bpf_jit_poke_descriptor *poke; |
866 | struct bpf_array *array; | |
867 | struct bpf_prog *target; | |
868 | int i, ret; | |
869 | ||
870 | for (i = 0; i < prog->aux->size_poke_tab; i++) { | |
871 | poke = &prog->aux->poke_tab[i]; | |
f263a814 JF |
872 | if (poke->aux && poke->aux != prog->aux) |
873 | continue; | |
874 | ||
cf71b174 | 875 | WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable)); |
428d5df1 DB |
876 | |
877 | if (poke->reason != BPF_POKE_REASON_TAIL_CALL) | |
878 | continue; | |
879 | ||
880 | array = container_of(poke->tail_call.map, struct bpf_array, map); | |
881 | mutex_lock(&array->aux->poke_mutex); | |
882 | target = array->ptrs[poke->tail_call.key]; | |
883 | if (target) { | |
cf71b174 MF |
884 | ret = __bpf_arch_text_poke(poke->tailcall_target, |
885 | BPF_MOD_JUMP, NULL, | |
428d5df1 | 886 | (u8 *)target->bpf_func + |
1022a549 | 887 | poke->adj_off); |
428d5df1 | 888 | BUG_ON(ret < 0); |
ebf7d1f5 MF |
889 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, |
890 | BPF_MOD_JUMP, | |
891 | (u8 *)poke->tailcall_target + | |
1022a549 | 892 | X86_PATCH_SIZE, NULL); |
ebf7d1f5 | 893 | BUG_ON(ret < 0); |
428d5df1 | 894 | } |
cf71b174 | 895 | WRITE_ONCE(poke->tailcall_target_stable, true); |
428d5df1 DB |
896 | mutex_unlock(&array->aux->poke_mutex); |
897 | } | |
898 | } | |
899 | ||
6fe8b9c1 DB |
900 | static void emit_mov_imm32(u8 **pprog, bool sign_propagate, |
901 | u32 dst_reg, const u32 imm32) | |
902 | { | |
903 | u8 *prog = *pprog; | |
904 | u8 b1, b2, b3; | |
6fe8b9c1 | 905 | |
a2c7a983 IM |
906 | /* |
907 | * Optimization: if imm32 is positive, use 'mov %eax, imm32' | |
6fe8b9c1 DB |
908 | * (which zero-extends imm32) to save 2 bytes. |
909 | */ | |
910 | if (sign_propagate && (s32)imm32 < 0) { | |
911 | /* 'mov %rax, imm32' sign extends imm32 */ | |
912 | b1 = add_1mod(0x48, dst_reg); | |
913 | b2 = 0xC7; | |
914 | b3 = 0xC0; | |
915 | EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); | |
916 | goto done; | |
917 | } | |
918 | ||
a2c7a983 IM |
919 | /* |
920 | * Optimization: if imm32 is zero, use 'xor %eax, %eax' | |
6fe8b9c1 DB |
921 | * to save 3 bytes. |
922 | */ | |
923 | if (imm32 == 0) { | |
924 | if (is_ereg(dst_reg)) | |
925 | EMIT1(add_2mod(0x40, dst_reg, dst_reg)); | |
926 | b2 = 0x31; /* xor */ | |
927 | b3 = 0xC0; | |
928 | EMIT2(b2, add_2reg(b3, dst_reg, dst_reg)); | |
929 | goto done; | |
930 | } | |
931 | ||
932 | /* mov %eax, imm32 */ | |
933 | if (is_ereg(dst_reg)) | |
934 | EMIT1(add_1mod(0x40, dst_reg)); | |
935 | EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); | |
936 | done: | |
937 | *pprog = prog; | |
938 | } | |
939 | ||
940 | static void emit_mov_imm64(u8 **pprog, u32 dst_reg, | |
941 | const u32 imm32_hi, const u32 imm32_lo) | |
942 | { | |
af682b76 | 943 | u64 imm64 = ((u64)imm32_hi << 32) | (u32)imm32_lo; |
6fe8b9c1 | 944 | u8 *prog = *pprog; |
6fe8b9c1 | 945 | |
af682b76 | 946 | if (is_uimm32(imm64)) { |
a2c7a983 IM |
947 | /* |
948 | * For emitting plain u32, where sign bit must not be | |
6fe8b9c1 DB |
949 | * propagated LLVM tends to load imm64 over mov32 |
950 | * directly, so save couple of bytes by just doing | |
951 | * 'mov %eax, imm32' instead. | |
952 | */ | |
953 | emit_mov_imm32(&prog, false, dst_reg, imm32_lo); | |
af682b76 AS |
954 | } else if (is_simm32(imm64)) { |
955 | emit_mov_imm32(&prog, true, dst_reg, imm32_lo); | |
6fe8b9c1 | 956 | } else { |
4d854f4f | 957 | /* movabsq rax, imm64 */ |
6fe8b9c1 DB |
958 | EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); |
959 | EMIT(imm32_lo, 4); | |
960 | EMIT(imm32_hi, 4); | |
961 | } | |
962 | ||
963 | *pprog = prog; | |
964 | } | |
965 | ||
4c38e2f3 DB |
966 | static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg) |
967 | { | |
968 | u8 *prog = *pprog; | |
4c38e2f3 DB |
969 | |
970 | if (is64) { | |
971 | /* mov dst, src */ | |
972 | EMIT_mov(dst_reg, src_reg); | |
973 | } else { | |
974 | /* mov32 dst, src */ | |
975 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
976 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
977 | EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); | |
978 | } | |
979 | ||
980 | *pprog = prog; | |
981 | } | |
982 | ||
8100928c YS |
983 | static void emit_movsx_reg(u8 **pprog, int num_bits, bool is64, u32 dst_reg, |
984 | u32 src_reg) | |
985 | { | |
986 | u8 *prog = *pprog; | |
987 | ||
988 | if (is64) { | |
989 | /* movs[b,w,l]q dst, src */ | |
990 | if (num_bits == 8) | |
991 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbe, | |
992 | add_2reg(0xC0, src_reg, dst_reg)); | |
993 | else if (num_bits == 16) | |
994 | EMIT4(add_2mod(0x48, src_reg, dst_reg), 0x0f, 0xbf, | |
995 | add_2reg(0xC0, src_reg, dst_reg)); | |
996 | else if (num_bits == 32) | |
997 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x63, | |
998 | add_2reg(0xC0, src_reg, dst_reg)); | |
999 | } else { | |
1000 | /* movs[b,w]l dst, src */ | |
1001 | if (num_bits == 8) { | |
1002 | EMIT4(add_2mod(0x40, src_reg, dst_reg), 0x0f, 0xbe, | |
1003 | add_2reg(0xC0, src_reg, dst_reg)); | |
1004 | } else if (num_bits == 16) { | |
1005 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1006 | EMIT1(add_2mod(0x40, src_reg, dst_reg)); | |
1007 | EMIT3(add_2mod(0x0f, src_reg, dst_reg), 0xbf, | |
1008 | add_2reg(0xC0, src_reg, dst_reg)); | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | *pprog = prog; | |
1013 | } | |
1014 | ||
11c11d07 BJ |
1015 | /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */ |
1016 | static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off) | |
1017 | { | |
1018 | u8 *prog = *pprog; | |
11c11d07 BJ |
1019 | |
1020 | if (is_imm8(off)) { | |
1021 | /* 1-byte signed displacement. | |
1022 | * | |
1023 | * If off == 0 we could skip this and save one extra byte, but | |
1024 | * special case of x86 R13 which always needs an offset is not | |
1025 | * worth the hassle | |
1026 | */ | |
1027 | EMIT2(add_2reg(0x40, ptr_reg, val_reg), off); | |
1028 | } else { | |
1029 | /* 4-byte signed displacement */ | |
1030 | EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off); | |
1031 | } | |
1032 | *pprog = prog; | |
1033 | } | |
1034 | ||
2fe99eb0 AS |
1035 | static void emit_insn_suffix_SIB(u8 **pprog, u32 ptr_reg, u32 val_reg, u32 index_reg, int off) |
1036 | { | |
1037 | u8 *prog = *pprog; | |
1038 | ||
1039 | if (is_imm8(off)) { | |
1040 | EMIT3(add_2reg(0x44, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); | |
1041 | } else { | |
1042 | EMIT2_off32(add_2reg(0x84, BPF_REG_0, val_reg), add_2reg(0, ptr_reg, index_reg) /* SIB */, off); | |
1043 | } | |
1044 | *pprog = prog; | |
1045 | } | |
1046 | ||
74007cfc BJ |
1047 | /* |
1048 | * Emit a REX byte if it will be necessary to address these registers | |
1049 | */ | |
1050 | static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64) | |
1051 | { | |
1052 | u8 *prog = *pprog; | |
74007cfc BJ |
1053 | |
1054 | if (is64) | |
1055 | EMIT1(add_2mod(0x48, dst_reg, src_reg)); | |
1056 | else if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1057 | EMIT1(add_2mod(0x40, dst_reg, src_reg)); | |
1058 | *pprog = prog; | |
1059 | } | |
1060 | ||
6364d7d7 JM |
1061 | /* |
1062 | * Similar version of maybe_emit_mod() for a single register | |
1063 | */ | |
1064 | static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64) | |
1065 | { | |
1066 | u8 *prog = *pprog; | |
1067 | ||
1068 | if (is64) | |
1069 | EMIT1(add_1mod(0x48, reg)); | |
1070 | else if (is_ereg(reg)) | |
1071 | EMIT1(add_1mod(0x40, reg)); | |
1072 | *pprog = prog; | |
1073 | } | |
1074 | ||
3b2744e6 AS |
1075 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
1076 | static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
1077 | { | |
1078 | u8 *prog = *pprog; | |
3b2744e6 AS |
1079 | |
1080 | switch (size) { | |
1081 | case BPF_B: | |
1082 | /* Emit 'movzx rax, byte ptr [rax + off]' */ | |
1083 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); | |
1084 | break; | |
1085 | case BPF_H: | |
1086 | /* Emit 'movzx rax, word ptr [rax + off]' */ | |
1087 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); | |
1088 | break; | |
1089 | case BPF_W: | |
1090 | /* Emit 'mov eax, dword ptr [rax+0x14]' */ | |
1091 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1092 | EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); | |
1093 | else | |
1094 | EMIT1(0x8B); | |
1095 | break; | |
1096 | case BPF_DW: | |
1097 | /* Emit 'mov rax, qword ptr [rax+0x14]' */ | |
1098 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); | |
1099 | break; | |
1100 | } | |
11c11d07 | 1101 | emit_insn_suffix(&prog, src_reg, dst_reg, off); |
3b2744e6 AS |
1102 | *pprog = prog; |
1103 | } | |
1104 | ||
1f9a1ea8 YS |
1105 | /* LDSX: dst_reg = *(s8*)(src_reg + off) */ |
1106 | static void emit_ldsx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
1107 | { | |
1108 | u8 *prog = *pprog; | |
1109 | ||
1110 | switch (size) { | |
1111 | case BPF_B: | |
1112 | /* Emit 'movsx rax, byte ptr [rax + off]' */ | |
1113 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBE); | |
1114 | break; | |
1115 | case BPF_H: | |
1116 | /* Emit 'movsx rax, word ptr [rax + off]' */ | |
1117 | EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xBF); | |
1118 | break; | |
1119 | case BPF_W: | |
1120 | /* Emit 'movsx rax, dword ptr [rax+0x14]' */ | |
1121 | EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x63); | |
1122 | break; | |
1123 | } | |
1124 | emit_insn_suffix(&prog, src_reg, dst_reg, off); | |
1125 | *pprog = prog; | |
1126 | } | |
1127 | ||
2fe99eb0 AS |
1128 | static void emit_ldx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) |
1129 | { | |
1130 | u8 *prog = *pprog; | |
1131 | ||
1132 | switch (size) { | |
1133 | case BPF_B: | |
1134 | /* movzx rax, byte ptr [rax + r12 + off] */ | |
1135 | EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB6); | |
1136 | break; | |
1137 | case BPF_H: | |
1138 | /* movzx rax, word ptr [rax + r12 + off] */ | |
1139 | EMIT3(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x0F, 0xB7); | |
1140 | break; | |
1141 | case BPF_W: | |
1142 | /* mov eax, dword ptr [rax + r12 + off] */ | |
1143 | EMIT2(add_3mod(0x40, src_reg, dst_reg, index_reg), 0x8B); | |
1144 | break; | |
1145 | case BPF_DW: | |
1146 | /* mov rax, qword ptr [rax + r12 + off] */ | |
1147 | EMIT2(add_3mod(0x48, src_reg, dst_reg, index_reg), 0x8B); | |
1148 | break; | |
1149 | } | |
1150 | emit_insn_suffix_SIB(&prog, src_reg, dst_reg, index_reg, off); | |
1151 | *pprog = prog; | |
1152 | } | |
1153 | ||
1154 | static void emit_ldx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
1155 | { | |
1156 | emit_ldx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); | |
1157 | } | |
1158 | ||
3b2744e6 AS |
1159 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
1160 | static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
1161 | { | |
1162 | u8 *prog = *pprog; | |
3b2744e6 AS |
1163 | |
1164 | switch (size) { | |
1165 | case BPF_B: | |
1166 | /* Emit 'mov byte ptr [rax + off], al' */ | |
aee194b1 LN |
1167 | if (is_ereg(dst_reg) || is_ereg_8l(src_reg)) |
1168 | /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */ | |
3b2744e6 AS |
1169 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); |
1170 | else | |
1171 | EMIT1(0x88); | |
1172 | break; | |
1173 | case BPF_H: | |
1174 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1175 | EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); | |
1176 | else | |
1177 | EMIT2(0x66, 0x89); | |
1178 | break; | |
1179 | case BPF_W: | |
1180 | if (is_ereg(dst_reg) || is_ereg(src_reg)) | |
1181 | EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); | |
1182 | else | |
1183 | EMIT1(0x89); | |
1184 | break; | |
1185 | case BPF_DW: | |
1186 | EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); | |
1187 | break; | |
1188 | } | |
11c11d07 | 1189 | emit_insn_suffix(&prog, dst_reg, src_reg, off); |
3b2744e6 AS |
1190 | *pprog = prog; |
1191 | } | |
1192 | ||
2fe99eb0 AS |
1193 | /* STX: *(u8*)(dst_reg + index_reg + off) = src_reg */ |
1194 | static void emit_stx_index(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, u32 index_reg, int off) | |
1195 | { | |
1196 | u8 *prog = *pprog; | |
1197 | ||
1198 | switch (size) { | |
1199 | case BPF_B: | |
1200 | /* mov byte ptr [rax + r12 + off], al */ | |
1201 | EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x88); | |
1202 | break; | |
1203 | case BPF_H: | |
1204 | /* mov word ptr [rax + r12 + off], ax */ | |
1205 | EMIT3(0x66, add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); | |
1206 | break; | |
1207 | case BPF_W: | |
1208 | /* mov dword ptr [rax + r12 + 1], eax */ | |
1209 | EMIT2(add_3mod(0x40, dst_reg, src_reg, index_reg), 0x89); | |
1210 | break; | |
1211 | case BPF_DW: | |
1212 | /* mov qword ptr [rax + r12 + 1], rax */ | |
1213 | EMIT2(add_3mod(0x48, dst_reg, src_reg, index_reg), 0x89); | |
1214 | break; | |
1215 | } | |
1216 | emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); | |
1217 | *pprog = prog; | |
1218 | } | |
1219 | ||
1220 | static void emit_stx_r12(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) | |
1221 | { | |
1222 | emit_stx_index(pprog, size, dst_reg, src_reg, X86_REG_R12, off); | |
1223 | } | |
1224 | ||
1225 | /* ST: *(u8*)(dst_reg + index_reg + off) = imm32 */ | |
1226 | static void emit_st_index(u8 **pprog, u32 size, u32 dst_reg, u32 index_reg, int off, int imm) | |
1227 | { | |
1228 | u8 *prog = *pprog; | |
1229 | ||
1230 | switch (size) { | |
1231 | case BPF_B: | |
1232 | /* mov byte ptr [rax + r12 + off], imm8 */ | |
1233 | EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC6); | |
1234 | break; | |
1235 | case BPF_H: | |
1236 | /* mov word ptr [rax + r12 + off], imm16 */ | |
1237 | EMIT3(0x66, add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); | |
1238 | break; | |
1239 | case BPF_W: | |
1240 | /* mov dword ptr [rax + r12 + 1], imm32 */ | |
1241 | EMIT2(add_3mod(0x40, dst_reg, 0, index_reg), 0xC7); | |
1242 | break; | |
1243 | case BPF_DW: | |
1244 | /* mov qword ptr [rax + r12 + 1], imm32 */ | |
1245 | EMIT2(add_3mod(0x48, dst_reg, 0, index_reg), 0xC7); | |
1246 | break; | |
1247 | } | |
1248 | emit_insn_suffix_SIB(&prog, dst_reg, 0, index_reg, off); | |
1249 | EMIT(imm, bpf_size_to_x86_bytes(size)); | |
1250 | *pprog = prog; | |
1251 | } | |
1252 | ||
1253 | static void emit_st_r12(u8 **pprog, u32 size, u32 dst_reg, int off, int imm) | |
1254 | { | |
1255 | emit_st_index(pprog, size, dst_reg, X86_REG_R12, off, imm); | |
1256 | } | |
1257 | ||
5341c9a4 PY |
1258 | static int emit_atomic_rmw(u8 **pprog, u32 atomic_op, |
1259 | u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size) | |
91c960b0 BJ |
1260 | { |
1261 | u8 *prog = *pprog; | |
91c960b0 BJ |
1262 | |
1263 | EMIT1(0xF0); /* lock prefix */ | |
1264 | ||
1265 | maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW); | |
1266 | ||
1267 | /* emit opcode */ | |
1268 | switch (atomic_op) { | |
1269 | case BPF_ADD: | |
981f94c3 BJ |
1270 | case BPF_AND: |
1271 | case BPF_OR: | |
1272 | case BPF_XOR: | |
91c960b0 BJ |
1273 | /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */ |
1274 | EMIT1(simple_alu_opcodes[atomic_op]); | |
1275 | break; | |
5ca419f2 BJ |
1276 | case BPF_ADD | BPF_FETCH: |
1277 | /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */ | |
1278 | EMIT2(0x0F, 0xC1); | |
1279 | break; | |
5ffa2550 BJ |
1280 | case BPF_XCHG: |
1281 | /* src_reg = atomic_xchg(dst_reg + off, src_reg); */ | |
1282 | EMIT1(0x87); | |
1283 | break; | |
1284 | case BPF_CMPXCHG: | |
1285 | /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */ | |
1286 | EMIT2(0x0F, 0xB1); | |
1287 | break; | |
91c960b0 BJ |
1288 | default: |
1289 | pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); | |
1290 | return -EFAULT; | |
1291 | } | |
1292 | ||
1293 | emit_insn_suffix(&prog, dst_reg, src_reg, off); | |
1294 | ||
1295 | *pprog = prog; | |
1296 | return 0; | |
1297 | } | |
1298 | ||
5341c9a4 PY |
1299 | static int emit_atomic_rmw_index(u8 **pprog, u32 atomic_op, u32 size, |
1300 | u32 dst_reg, u32 src_reg, u32 index_reg, | |
1301 | int off) | |
d503a04f AS |
1302 | { |
1303 | u8 *prog = *pprog; | |
1304 | ||
1305 | EMIT1(0xF0); /* lock prefix */ | |
1306 | switch (size) { | |
1307 | case BPF_W: | |
1308 | EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); | |
1309 | break; | |
1310 | case BPF_DW: | |
1311 | EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); | |
1312 | break; | |
1313 | default: | |
5341c9a4 | 1314 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); |
d503a04f AS |
1315 | return -EFAULT; |
1316 | } | |
1317 | ||
1318 | /* emit opcode */ | |
1319 | switch (atomic_op) { | |
1320 | case BPF_ADD: | |
1321 | case BPF_AND: | |
1322 | case BPF_OR: | |
1323 | case BPF_XOR: | |
1324 | /* lock *(u32/u64*)(dst_reg + idx_reg + off) <op>= src_reg */ | |
1325 | EMIT1(simple_alu_opcodes[atomic_op]); | |
1326 | break; | |
1327 | case BPF_ADD | BPF_FETCH: | |
1328 | /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ | |
1329 | EMIT2(0x0F, 0xC1); | |
1330 | break; | |
1331 | case BPF_XCHG: | |
1332 | /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ | |
1333 | EMIT1(0x87); | |
1334 | break; | |
1335 | case BPF_CMPXCHG: | |
1336 | /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ | |
1337 | EMIT2(0x0F, 0xB1); | |
1338 | break; | |
1339 | default: | |
1340 | pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); | |
1341 | return -EFAULT; | |
1342 | } | |
1343 | emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); | |
1344 | *pprog = prog; | |
1345 | return 0; | |
1346 | } | |
1347 | ||
5341c9a4 PY |
1348 | static int emit_atomic_ld_st(u8 **pprog, u32 atomic_op, u32 dst_reg, |
1349 | u32 src_reg, s16 off, u8 bpf_size) | |
1350 | { | |
1351 | switch (atomic_op) { | |
1352 | case BPF_LOAD_ACQ: | |
1353 | /* dst_reg = smp_load_acquire(src_reg + off16) */ | |
1354 | emit_ldx(pprog, bpf_size, dst_reg, src_reg, off); | |
1355 | break; | |
1356 | case BPF_STORE_REL: | |
1357 | /* smp_store_release(dst_reg + off16, src_reg) */ | |
1358 | emit_stx(pprog, bpf_size, dst_reg, src_reg, off); | |
1359 | break; | |
1360 | default: | |
1361 | pr_err("bpf_jit: unknown atomic load/store opcode %02x\n", | |
1362 | atomic_op); | |
1363 | return -EFAULT; | |
1364 | } | |
1365 | ||
1366 | return 0; | |
1367 | } | |
1368 | ||
1369 | static int emit_atomic_ld_st_index(u8 **pprog, u32 atomic_op, u32 size, | |
1370 | u32 dst_reg, u32 src_reg, u32 index_reg, | |
1371 | int off) | |
1372 | { | |
1373 | switch (atomic_op) { | |
1374 | case BPF_LOAD_ACQ: | |
1375 | /* dst_reg = smp_load_acquire(src_reg + idx_reg + off16) */ | |
1376 | emit_ldx_index(pprog, size, dst_reg, src_reg, index_reg, off); | |
1377 | break; | |
1378 | case BPF_STORE_REL: | |
1379 | /* smp_store_release(dst_reg + idx_reg + off16, src_reg) */ | |
1380 | emit_stx_index(pprog, size, dst_reg, src_reg, index_reg, off); | |
1381 | break; | |
1382 | default: | |
1383 | pr_err("bpf_jit: unknown atomic load/store opcode %02x\n", | |
1384 | atomic_op); | |
1385 | return -EFAULT; | |
1386 | } | |
1387 | ||
1388 | return 0; | |
1389 | } | |
1390 | ||
2fe99eb0 AS |
1391 | #define DONT_CLEAR 1 |
1392 | ||
46d28947 | 1393 | bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) |
3dec541b AS |
1394 | { |
1395 | u32 reg = x->fixup >> 8; | |
1396 | ||
1397 | /* jump over faulting load and clear dest register */ | |
2fe99eb0 AS |
1398 | if (reg != DONT_CLEAR) |
1399 | *(unsigned long *)((void *)regs + reg) = 0; | |
3dec541b AS |
1400 | regs->ip += x->fixup & 0xff; |
1401 | return true; | |
1402 | } | |
1403 | ||
ebf7d1f5 | 1404 | static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, |
f663a03c | 1405 | bool *regs_used) |
ebf7d1f5 MF |
1406 | { |
1407 | int i; | |
1408 | ||
1409 | for (i = 1; i <= insn_cnt; i++, insn++) { | |
ebf7d1f5 MF |
1410 | if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) |
1411 | regs_used[0] = true; | |
1412 | if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) | |
1413 | regs_used[1] = true; | |
1414 | if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8) | |
1415 | regs_used[2] = true; | |
1416 | if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9) | |
1417 | regs_used[3] = true; | |
1418 | } | |
1419 | } | |
1420 | ||
77d8f5d4 JM |
1421 | /* emit the 3-byte VEX prefix |
1422 | * | |
1423 | * r: same as rex.r, extra bit for ModRM reg field | |
1424 | * x: same as rex.x, extra bit for SIB index field | |
1425 | * b: same as rex.b, extra bit for ModRM r/m, or SIB base | |
1426 | * m: opcode map select, encoding escape bytes e.g. 0x0f38 | |
1427 | * w: same as rex.w (32 bit or 64 bit) or opcode specific | |
1428 | * src_reg2: additional source reg (encoded as BPF reg) | |
1429 | * l: vector length (128 bit or 256 bit) or reserved | |
1430 | * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3) | |
1431 | */ | |
1432 | static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m, | |
1433 | bool w, u8 src_reg2, bool l, u8 pp) | |
1434 | { | |
1435 | u8 *prog = *pprog; | |
1436 | const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */ | |
1437 | u8 b1, b2; | |
1438 | u8 vvvv = reg2hex[src_reg2]; | |
1439 | ||
1440 | /* reg2hex gives only the lower 3 bit of vvvv */ | |
1441 | if (is_ereg(src_reg2)) | |
1442 | vvvv |= 1 << 3; | |
1443 | ||
1444 | /* | |
1445 | * 2nd byte of 3-byte VEX prefix | |
1446 | * ~ means bit inverted encoding | |
1447 | * | |
1448 | * 7 0 | |
1449 | * +---+---+---+---+---+---+---+---+ | |
1450 | * |~R |~X |~B | m | | |
1451 | * +---+---+---+---+---+---+---+---+ | |
1452 | */ | |
1453 | b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f); | |
1454 | /* | |
1455 | * 3rd byte of 3-byte VEX prefix | |
1456 | * | |
1457 | * 7 0 | |
1458 | * +---+---+---+---+---+---+---+---+ | |
1459 | * | W | ~vvvv | L | pp | | |
1460 | * +---+---+---+---+---+---+---+---+ | |
1461 | */ | |
1462 | b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3); | |
1463 | ||
1464 | EMIT3(b0, b1, b2); | |
1465 | *pprog = prog; | |
1466 | } | |
1467 | ||
1468 | /* emit BMI2 shift instruction */ | |
1469 | static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) | |
1470 | { | |
1471 | u8 *prog = *pprog; | |
1472 | bool r = is_ereg(dst_reg); | |
1473 | u8 m = 2; /* escape code 0f38 */ | |
1474 | ||
1475 | emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op); | |
1476 | EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg)); | |
1477 | *pprog = prog; | |
1478 | } | |
1479 | ||
7d1cd70d YS |
1480 | static void emit_priv_frame_ptr(u8 **pprog, void __percpu *priv_frame_ptr) |
1481 | { | |
1482 | u8 *prog = *pprog; | |
1483 | ||
1484 | /* movabs r9, priv_frame_ptr */ | |
1485 | emit_mov_imm64(&prog, X86_REG_R9, (__force long) priv_frame_ptr >> 32, | |
1486 | (u32) (__force long) priv_frame_ptr); | |
1487 | ||
1488 | #ifdef CONFIG_SMP | |
1489 | /* add <r9>, gs:[<off>] */ | |
1490 | EMIT2(0x65, 0x4c); | |
1491 | EMIT3(0x03, 0x0c, 0x25); | |
1492 | EMIT((u32)(unsigned long)&this_cpu_off, 4); | |
1493 | #endif | |
1494 | ||
1495 | *pprog = prog; | |
1496 | } | |
1497 | ||
93c5aecc GL |
1498 | #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) |
1499 | ||
116e04ba LH |
1500 | #define __LOAD_TCC_PTR(off) \ |
1501 | EMIT3_off32(0x48, 0x8B, 0x85, off) | |
1502 | /* mov rax, qword ptr [rbp - rounded_stack_depth - 16] */ | |
1503 | #define LOAD_TAIL_CALL_CNT_PTR(stack) \ | |
1504 | __LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack)) | |
2b5dcb31 | 1505 | |
7d1cd70d YS |
1506 | /* Memory size/value to protect private stack overflow/underflow */ |
1507 | #define PRIV_STACK_GUARD_SZ 8 | |
1508 | #define PRIV_STACK_GUARD_VAL 0xEB9F12345678eb9fULL | |
1509 | ||
d4e89d21 DS |
1510 | static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip, |
1511 | struct bpf_prog *bpf_prog) | |
1512 | { | |
1513 | u8 *prog = *pprog; | |
1514 | u8 *func; | |
1515 | ||
1516 | if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) { | |
1517 | /* The clearing sequence clobbers eax and ecx. */ | |
1518 | EMIT1(0x50); /* push rax */ | |
1519 | EMIT1(0x51); /* push rcx */ | |
1520 | ip += 2; | |
1521 | ||
1522 | func = (u8 *)clear_bhb_loop; | |
1523 | ip += x86_call_depth_emit_accounting(&prog, func, ip); | |
1524 | ||
1525 | if (emit_call(&prog, func, ip)) | |
1526 | return -EINVAL; | |
1527 | EMIT1(0x59); /* pop rcx */ | |
1528 | EMIT1(0x58); /* pop rax */ | |
1529 | } | |
9f725eec DS |
1530 | /* Insert IBHF instruction */ |
1531 | if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) && | |
1532 | cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) || | |
073fdbe0 | 1533 | cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) { |
9f725eec DS |
1534 | /* |
1535 | * Add an Indirect Branch History Fence (IBHF). IBHF acts as a | |
1536 | * fence preventing branch history from before the fence from | |
1537 | * affecting indirect branches after the fence. This is | |
1538 | * specifically used in cBPF jitted code to prevent Intra-mode | |
1539 | * BHI attacks. The IBHF instruction is designed to be a NOP on | |
1540 | * hardware that doesn't need or support it. The REP and REX.W | |
1541 | * prefixes are required by the microcode, and they also ensure | |
1542 | * that the NOP is unlikely to be used in existing code. | |
073fdbe0 PG |
1543 | * |
1544 | * IBHF is not a valid instruction in 32-bit mode. | |
9f725eec DS |
1545 | */ |
1546 | EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */ | |
1547 | } | |
d4e89d21 DS |
1548 | *pprog = prog; |
1549 | return 0; | |
1550 | } | |
1551 | ||
1022a549 | 1552 | static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, |
93c5aecc | 1553 | int oldproglen, struct jit_context *ctx, bool jmp_padding) |
b52f00e6 | 1554 | { |
ebf7d1f5 | 1555 | bool tail_call_reachable = bpf_prog->aux->tail_call_reachable; |
b52f00e6 | 1556 | struct bpf_insn *insn = bpf_prog->insnsi; |
ebf7d1f5 | 1557 | bool callee_regs_used[4] = {}; |
b52f00e6 | 1558 | int insn_cnt = bpf_prog->len; |
b52f00e6 AS |
1559 | bool seen_exit = false; |
1560 | u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; | |
7d1cd70d | 1561 | void __percpu *priv_frame_ptr = NULL; |
142fd4d2 | 1562 | u64 arena_vm_start, user_vm_start; |
7d1cd70d | 1563 | void __percpu *priv_stack_ptr; |
ced50fc4 | 1564 | int i, excnt = 0; |
93c5aecc | 1565 | int ilen, proglen = 0; |
b52f00e6 | 1566 | u8 *prog = temp; |
f4b21ed0 | 1567 | u32 stack_depth; |
91c960b0 | 1568 | int err; |
b52f00e6 | 1569 | |
f4b21ed0 | 1570 | stack_depth = bpf_prog->aux->stack_depth; |
7d1cd70d YS |
1571 | priv_stack_ptr = bpf_prog->aux->priv_stack_ptr; |
1572 | if (priv_stack_ptr) { | |
1573 | priv_frame_ptr = priv_stack_ptr + PRIV_STACK_GUARD_SZ + round_up(stack_depth, 8); | |
1574 | stack_depth = 0; | |
1575 | } | |
f4b21ed0 | 1576 | |
2fe99eb0 | 1577 | arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); |
142fd4d2 | 1578 | user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); |
2fe99eb0 | 1579 | |
f663a03c | 1580 | detect_reg_usage(insn, insn_cnt, callee_regs_used); |
ebf7d1f5 | 1581 | |
0c92385d | 1582 | emit_prologue(&prog, image, stack_depth, |
ebf7d1f5 | 1583 | bpf_prog_was_classic(bpf_prog), tail_call_reachable, |
f18b03fa KKD |
1584 | bpf_is_subprog(bpf_prog), bpf_prog->aux->exception_cb); |
1585 | /* Exception callback will clobber callee regs for its own use, and | |
1586 | * restore the original callee regs from main prog's stack frame. | |
1587 | */ | |
1588 | if (bpf_prog->aux->exception_boundary) { | |
1589 | /* We also need to save r12, which is not mapped to any BPF | |
1590 | * register, as we throw after entry into the kernel, which may | |
1591 | * overwrite r12. | |
1592 | */ | |
1593 | push_r12(&prog); | |
1594 | push_callee_regs(&prog, all_callee_regs_used); | |
1595 | } else { | |
2fe99eb0 AS |
1596 | if (arena_vm_start) |
1597 | push_r12(&prog); | |
f18b03fa KKD |
1598 | push_callee_regs(&prog, callee_regs_used); |
1599 | } | |
2fe99eb0 AS |
1600 | if (arena_vm_start) |
1601 | emit_mov_imm64(&prog, X86_REG_R12, | |
1602 | arena_vm_start >> 32, (u32) arena_vm_start); | |
93c5aecc | 1603 | |
7d1cd70d YS |
1604 | if (priv_frame_ptr) |
1605 | emit_priv_frame_ptr(&prog, priv_frame_ptr); | |
1606 | ||
93c5aecc | 1607 | ilen = prog - temp; |
1022a549 SL |
1608 | if (rw_image) |
1609 | memcpy(rw_image + proglen, temp, ilen); | |
93c5aecc GL |
1610 | proglen += ilen; |
1611 | addrs[0] = proglen; | |
1612 | prog = temp; | |
b52f00e6 | 1613 | |
7c2e988f | 1614 | for (i = 1; i <= insn_cnt; i++, insn++) { |
e430f34e AS |
1615 | const s32 imm32 = insn->imm; |
1616 | u32 dst_reg = insn->dst_reg; | |
1617 | u32 src_reg = insn->src_reg; | |
6fe8b9c1 | 1618 | u8 b2 = 0, b3 = 0; |
4c5de127 | 1619 | u8 *start_of_ldx; |
62258278 | 1620 | s64 jmp_offset; |
90156f4b | 1621 | s16 insn_off; |
62258278 | 1622 | u8 jmp_cond; |
62258278 | 1623 | u8 *func; |
93c5aecc | 1624 | int nops; |
62258278 | 1625 | |
7d1cd70d YS |
1626 | if (priv_frame_ptr) { |
1627 | if (src_reg == BPF_REG_FP) | |
1628 | src_reg = X86_REG_R9; | |
1629 | ||
1630 | if (dst_reg == BPF_REG_FP) | |
1631 | dst_reg = X86_REG_R9; | |
1632 | } | |
1633 | ||
62258278 AS |
1634 | switch (insn->code) { |
1635 | /* ALU */ | |
1636 | case BPF_ALU | BPF_ADD | BPF_X: | |
1637 | case BPF_ALU | BPF_SUB | BPF_X: | |
1638 | case BPF_ALU | BPF_AND | BPF_X: | |
1639 | case BPF_ALU | BPF_OR | BPF_X: | |
1640 | case BPF_ALU | BPF_XOR | BPF_X: | |
1641 | case BPF_ALU64 | BPF_ADD | BPF_X: | |
1642 | case BPF_ALU64 | BPF_SUB | BPF_X: | |
1643 | case BPF_ALU64 | BPF_AND | BPF_X: | |
1644 | case BPF_ALU64 | BPF_OR | BPF_X: | |
1645 | case BPF_ALU64 | BPF_XOR | BPF_X: | |
74007cfc BJ |
1646 | maybe_emit_mod(&prog, dst_reg, src_reg, |
1647 | BPF_CLASS(insn->code) == BPF_ALU64); | |
e5f02cac | 1648 | b2 = simple_alu_opcodes[BPF_OP(insn->code)]; |
e430f34e | 1649 | EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); |
62258278 | 1650 | break; |
0a14842f | 1651 | |
62258278 | 1652 | case BPF_ALU64 | BPF_MOV | BPF_X: |
770546ae | 1653 | if (insn_is_cast_user(insn)) { |
142fd4d2 AS |
1654 | if (dst_reg != src_reg) |
1655 | /* 32-bit mov */ | |
1656 | emit_mov_reg(&prog, false, dst_reg, src_reg); | |
1657 | /* shl dst_reg, 32 */ | |
1658 | maybe_emit_1mod(&prog, dst_reg, true); | |
1659 | EMIT3(0xC1, add_1reg(0xE0, dst_reg), 32); | |
1660 | ||
1661 | /* or dst_reg, user_vm_start */ | |
1662 | maybe_emit_1mod(&prog, dst_reg, true); | |
1663 | if (is_axreg(dst_reg)) | |
1664 | EMIT1_off32(0x0D, user_vm_start >> 32); | |
1665 | else | |
1666 | EMIT2_off32(0x81, add_1reg(0xC8, dst_reg), user_vm_start >> 32); | |
1667 | ||
1668 | /* rol dst_reg, 32 */ | |
1669 | maybe_emit_1mod(&prog, dst_reg, true); | |
1670 | EMIT3(0xC1, add_1reg(0xC0, dst_reg), 32); | |
1671 | ||
1672 | /* xor r11, r11 */ | |
1673 | EMIT3(0x4D, 0x31, 0xDB); | |
1674 | ||
1675 | /* test dst_reg32, dst_reg32; check if lower 32-bit are zero */ | |
1676 | maybe_emit_mod(&prog, dst_reg, dst_reg, false); | |
1677 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); | |
1678 | ||
1679 | /* cmove r11, dst_reg; if so, set dst_reg to zero */ | |
1680 | /* WARNING: Intel swapped src/dst register encoding in CMOVcc !!! */ | |
1681 | maybe_emit_mod(&prog, AUX_REG, dst_reg, true); | |
1682 | EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); | |
1683 | break; | |
7bdbf744 | 1684 | } else if (insn_is_mov_percpu_addr(insn)) { |
7bdbf744 AN |
1685 | /* mov <dst>, <src> (if necessary) */ |
1686 | EMIT_mov(dst_reg, src_reg); | |
1e9e0b85 | 1687 | #ifdef CONFIG_SMP |
7bdbf744 AN |
1688 | /* add <dst>, gs:[<off>] */ |
1689 | EMIT2(0x65, add_1mod(0x48, dst_reg)); | |
462e5e2a | 1690 | EMIT3(0x03, add_2reg(0x04, 0, dst_reg), 0x25); |
1e9e0b85 AN |
1691 | EMIT((u32)(unsigned long)&this_cpu_off, 4); |
1692 | #endif | |
7bdbf744 | 1693 | break; |
142fd4d2 AS |
1694 | } |
1695 | fallthrough; | |
62258278 | 1696 | case BPF_ALU | BPF_MOV | BPF_X: |
8100928c YS |
1697 | if (insn->off == 0) |
1698 | emit_mov_reg(&prog, | |
1699 | BPF_CLASS(insn->code) == BPF_ALU64, | |
1700 | dst_reg, src_reg); | |
1701 | else | |
1702 | emit_movsx_reg(&prog, insn->off, | |
1703 | BPF_CLASS(insn->code) == BPF_ALU64, | |
1704 | dst_reg, src_reg); | |
62258278 | 1705 | break; |
0a14842f | 1706 | |
e430f34e | 1707 | /* neg dst */ |
62258278 AS |
1708 | case BPF_ALU | BPF_NEG: |
1709 | case BPF_ALU64 | BPF_NEG: | |
6364d7d7 JM |
1710 | maybe_emit_1mod(&prog, dst_reg, |
1711 | BPF_CLASS(insn->code) == BPF_ALU64); | |
e430f34e | 1712 | EMIT2(0xF7, add_1reg(0xD8, dst_reg)); |
62258278 AS |
1713 | break; |
1714 | ||
1715 | case BPF_ALU | BPF_ADD | BPF_K: | |
1716 | case BPF_ALU | BPF_SUB | BPF_K: | |
1717 | case BPF_ALU | BPF_AND | BPF_K: | |
1718 | case BPF_ALU | BPF_OR | BPF_K: | |
1719 | case BPF_ALU | BPF_XOR | BPF_K: | |
1720 | case BPF_ALU64 | BPF_ADD | BPF_K: | |
1721 | case BPF_ALU64 | BPF_SUB | BPF_K: | |
1722 | case BPF_ALU64 | BPF_AND | BPF_K: | |
1723 | case BPF_ALU64 | BPF_OR | BPF_K: | |
1724 | case BPF_ALU64 | BPF_XOR | BPF_K: | |
6364d7d7 JM |
1725 | maybe_emit_1mod(&prog, dst_reg, |
1726 | BPF_CLASS(insn->code) == BPF_ALU64); | |
62258278 | 1727 | |
a2c7a983 IM |
1728 | /* |
1729 | * b3 holds 'normal' opcode, b2 short form only valid | |
de0a444d DB |
1730 | * in case dst is eax/rax. |
1731 | */ | |
62258278 | 1732 | switch (BPF_OP(insn->code)) { |
de0a444d DB |
1733 | case BPF_ADD: |
1734 | b3 = 0xC0; | |
1735 | b2 = 0x05; | |
1736 | break; | |
1737 | case BPF_SUB: | |
1738 | b3 = 0xE8; | |
1739 | b2 = 0x2D; | |
1740 | break; | |
1741 | case BPF_AND: | |
1742 | b3 = 0xE0; | |
1743 | b2 = 0x25; | |
1744 | break; | |
1745 | case BPF_OR: | |
1746 | b3 = 0xC8; | |
1747 | b2 = 0x0D; | |
1748 | break; | |
1749 | case BPF_XOR: | |
1750 | b3 = 0xF0; | |
1751 | b2 = 0x35; | |
1752 | break; | |
62258278 AS |
1753 | } |
1754 | ||
e430f34e AS |
1755 | if (is_imm8(imm32)) |
1756 | EMIT3(0x83, add_1reg(b3, dst_reg), imm32); | |
de0a444d DB |
1757 | else if (is_axreg(dst_reg)) |
1758 | EMIT1_off32(b2, imm32); | |
62258278 | 1759 | else |
e430f34e | 1760 | EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); |
62258278 AS |
1761 | break; |
1762 | ||
1763 | case BPF_ALU64 | BPF_MOV | BPF_K: | |
62258278 | 1764 | case BPF_ALU | BPF_MOV | BPF_K: |
6fe8b9c1 DB |
1765 | emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64, |
1766 | dst_reg, imm32); | |
62258278 AS |
1767 | break; |
1768 | ||
02ab695b | 1769 | case BPF_LD | BPF_IMM | BPF_DW: |
6fe8b9c1 | 1770 | emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm); |
02ab695b AS |
1771 | insn++; |
1772 | i++; | |
1773 | break; | |
1774 | ||
e430f34e | 1775 | /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ |
62258278 AS |
1776 | case BPF_ALU | BPF_MOD | BPF_X: |
1777 | case BPF_ALU | BPF_DIV | BPF_X: | |
1778 | case BPF_ALU | BPF_MOD | BPF_K: | |
1779 | case BPF_ALU | BPF_DIV | BPF_K: | |
1780 | case BPF_ALU64 | BPF_MOD | BPF_X: | |
1781 | case BPF_ALU64 | BPF_DIV | BPF_X: | |
1782 | case BPF_ALU64 | BPF_MOD | BPF_K: | |
57a610f1 JM |
1783 | case BPF_ALU64 | BPF_DIV | BPF_K: { |
1784 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; | |
62258278 | 1785 | |
57a610f1 JM |
1786 | if (dst_reg != BPF_REG_0) |
1787 | EMIT1(0x50); /* push rax */ | |
1788 | if (dst_reg != BPF_REG_3) | |
1789 | EMIT1(0x52); /* push rdx */ | |
1790 | ||
1791 | if (BPF_SRC(insn->code) == BPF_X) { | |
1792 | if (src_reg == BPF_REG_0 || | |
1793 | src_reg == BPF_REG_3) { | |
1794 | /* mov r11, src_reg */ | |
1795 | EMIT_mov(AUX_REG, src_reg); | |
1796 | src_reg = AUX_REG; | |
1797 | } | |
1798 | } else { | |
e430f34e AS |
1799 | /* mov r11, imm32 */ |
1800 | EMIT3_off32(0x49, 0xC7, 0xC3, imm32); | |
57a610f1 JM |
1801 | src_reg = AUX_REG; |
1802 | } | |
62258278 | 1803 | |
57a610f1 JM |
1804 | if (dst_reg != BPF_REG_0) |
1805 | /* mov rax, dst_reg */ | |
1806 | emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg); | |
62258278 | 1807 | |
ec0e2da9 YS |
1808 | if (insn->off == 0) { |
1809 | /* | |
1810 | * xor edx, edx | |
1811 | * equivalent to 'xor rdx, rdx', but one byte less | |
1812 | */ | |
1813 | EMIT2(0x31, 0xd2); | |
1814 | ||
1815 | /* div src_reg */ | |
1816 | maybe_emit_1mod(&prog, src_reg, is64); | |
1817 | EMIT2(0xF7, add_1reg(0xF0, src_reg)); | |
1818 | } else { | |
1819 | if (BPF_CLASS(insn->code) == BPF_ALU) | |
1820 | EMIT1(0x99); /* cdq */ | |
1821 | else | |
1822 | EMIT2(0x48, 0x99); /* cqo */ | |
1823 | ||
1824 | /* idiv src_reg */ | |
1825 | maybe_emit_1mod(&prog, src_reg, is64); | |
1826 | EMIT2(0xF7, add_1reg(0xF8, src_reg)); | |
1827 | } | |
62258278 | 1828 | |
57a610f1 JM |
1829 | if (BPF_OP(insn->code) == BPF_MOD && |
1830 | dst_reg != BPF_REG_3) | |
1831 | /* mov dst_reg, rdx */ | |
1832 | emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3); | |
1833 | else if (BPF_OP(insn->code) == BPF_DIV && | |
1834 | dst_reg != BPF_REG_0) | |
1835 | /* mov dst_reg, rax */ | |
1836 | emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0); | |
62258278 | 1837 | |
57a610f1 JM |
1838 | if (dst_reg != BPF_REG_3) |
1839 | EMIT1(0x5A); /* pop rdx */ | |
1840 | if (dst_reg != BPF_REG_0) | |
1841 | EMIT1(0x58); /* pop rax */ | |
62258278 | 1842 | break; |
57a610f1 | 1843 | } |
62258278 AS |
1844 | |
1845 | case BPF_ALU | BPF_MUL | BPF_K: | |
62258278 | 1846 | case BPF_ALU64 | BPF_MUL | BPF_K: |
6364d7d7 JM |
1847 | maybe_emit_mod(&prog, dst_reg, dst_reg, |
1848 | BPF_CLASS(insn->code) == BPF_ALU64); | |
62258278 | 1849 | |
c0354077 JM |
1850 | if (is_imm8(imm32)) |
1851 | /* imul dst_reg, dst_reg, imm8 */ | |
1852 | EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), | |
1853 | imm32); | |
62258278 | 1854 | else |
c0354077 JM |
1855 | /* imul dst_reg, dst_reg, imm32 */ |
1856 | EMIT2_off32(0x69, | |
1857 | add_2reg(0xC0, dst_reg, dst_reg), | |
1858 | imm32); | |
1859 | break; | |
62258278 | 1860 | |
c0354077 JM |
1861 | case BPF_ALU | BPF_MUL | BPF_X: |
1862 | case BPF_ALU64 | BPF_MUL | BPF_X: | |
6364d7d7 JM |
1863 | maybe_emit_mod(&prog, src_reg, dst_reg, |
1864 | BPF_CLASS(insn->code) == BPF_ALU64); | |
62258278 | 1865 | |
c0354077 JM |
1866 | /* imul dst_reg, src_reg */ |
1867 | EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); | |
62258278 | 1868 | break; |
c0354077 | 1869 | |
a2c7a983 | 1870 | /* Shifts */ |
62258278 AS |
1871 | case BPF_ALU | BPF_LSH | BPF_K: |
1872 | case BPF_ALU | BPF_RSH | BPF_K: | |
1873 | case BPF_ALU | BPF_ARSH | BPF_K: | |
1874 | case BPF_ALU64 | BPF_LSH | BPF_K: | |
1875 | case BPF_ALU64 | BPF_RSH | BPF_K: | |
1876 | case BPF_ALU64 | BPF_ARSH | BPF_K: | |
6364d7d7 JM |
1877 | maybe_emit_1mod(&prog, dst_reg, |
1878 | BPF_CLASS(insn->code) == BPF_ALU64); | |
62258278 | 1879 | |
e5f02cac | 1880 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
88e69a1f DB |
1881 | if (imm32 == 1) |
1882 | EMIT2(0xD1, add_1reg(b3, dst_reg)); | |
1883 | else | |
1884 | EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); | |
62258278 AS |
1885 | break; |
1886 | ||
72b603ee AS |
1887 | case BPF_ALU | BPF_LSH | BPF_X: |
1888 | case BPF_ALU | BPF_RSH | BPF_X: | |
1889 | case BPF_ALU | BPF_ARSH | BPF_X: | |
1890 | case BPF_ALU64 | BPF_LSH | BPF_X: | |
1891 | case BPF_ALU64 | BPF_RSH | BPF_X: | |
1892 | case BPF_ALU64 | BPF_ARSH | BPF_X: | |
77d8f5d4 JM |
1893 | /* BMI2 shifts aren't better when shift count is already in rcx */ |
1894 | if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) { | |
1895 | /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */ | |
1896 | bool w = (BPF_CLASS(insn->code) == BPF_ALU64); | |
1897 | u8 op; | |
1898 | ||
1899 | switch (BPF_OP(insn->code)) { | |
1900 | case BPF_LSH: | |
1901 | op = 1; /* prefix 0x66 */ | |
1902 | break; | |
1903 | case BPF_RSH: | |
1904 | op = 3; /* prefix 0xf2 */ | |
1905 | break; | |
1906 | case BPF_ARSH: | |
1907 | op = 2; /* prefix 0xf3 */ | |
1908 | break; | |
1909 | } | |
72b603ee | 1910 | |
77d8f5d4 JM |
1911 | emit_shiftx(&prog, dst_reg, src_reg, w, op); |
1912 | ||
1913 | break; | |
72b603ee AS |
1914 | } |
1915 | ||
1916 | if (src_reg != BPF_REG_4) { /* common case */ | |
81b35e7c JM |
1917 | /* Check for bad case when dst_reg == rcx */ |
1918 | if (dst_reg == BPF_REG_4) { | |
1919 | /* mov r11, dst_reg */ | |
1920 | EMIT_mov(AUX_REG, dst_reg); | |
1921 | dst_reg = AUX_REG; | |
1922 | } else { | |
1923 | EMIT1(0x51); /* push rcx */ | |
1924 | } | |
72b603ee AS |
1925 | /* mov rcx, src_reg */ |
1926 | EMIT_mov(BPF_REG_4, src_reg); | |
1927 | } | |
1928 | ||
1929 | /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */ | |
6364d7d7 JM |
1930 | maybe_emit_1mod(&prog, dst_reg, |
1931 | BPF_CLASS(insn->code) == BPF_ALU64); | |
72b603ee | 1932 | |
e5f02cac | 1933 | b3 = simple_alu_opcodes[BPF_OP(insn->code)]; |
72b603ee AS |
1934 | EMIT2(0xD3, add_1reg(b3, dst_reg)); |
1935 | ||
81b35e7c JM |
1936 | if (src_reg != BPF_REG_4) { |
1937 | if (insn->dst_reg == BPF_REG_4) | |
1938 | /* mov dst_reg, r11 */ | |
1939 | EMIT_mov(insn->dst_reg, AUX_REG); | |
1940 | else | |
1941 | EMIT1(0x59); /* pop rcx */ | |
1942 | } | |
72b603ee | 1943 | |
72b603ee AS |
1944 | break; |
1945 | ||
62258278 | 1946 | case BPF_ALU | BPF_END | BPF_FROM_BE: |
0845c3db | 1947 | case BPF_ALU64 | BPF_END | BPF_FROM_LE: |
e430f34e | 1948 | switch (imm32) { |
62258278 | 1949 | case 16: |
a2c7a983 | 1950 | /* Emit 'ror %ax, 8' to swap lower 2 bytes */ |
62258278 | 1951 | EMIT1(0x66); |
e430f34e | 1952 | if (is_ereg(dst_reg)) |
62258278 | 1953 | EMIT1(0x41); |
e430f34e | 1954 | EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); |
343f845b | 1955 | |
a2c7a983 | 1956 | /* Emit 'movzwl eax, ax' */ |
343f845b AS |
1957 | if (is_ereg(dst_reg)) |
1958 | EMIT3(0x45, 0x0F, 0xB7); | |
1959 | else | |
1960 | EMIT2(0x0F, 0xB7); | |
1961 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
62258278 AS |
1962 | break; |
1963 | case 32: | |
a2c7a983 | 1964 | /* Emit 'bswap eax' to swap lower 4 bytes */ |
e430f34e | 1965 | if (is_ereg(dst_reg)) |
62258278 | 1966 | EMIT2(0x41, 0x0F); |
0a14842f | 1967 | else |
62258278 | 1968 | EMIT1(0x0F); |
e430f34e | 1969 | EMIT1(add_1reg(0xC8, dst_reg)); |
0a14842f | 1970 | break; |
62258278 | 1971 | case 64: |
a2c7a983 | 1972 | /* Emit 'bswap rax' to swap 8 bytes */ |
e430f34e AS |
1973 | EMIT3(add_1mod(0x48, dst_reg), 0x0F, |
1974 | add_1reg(0xC8, dst_reg)); | |
3b58908a ED |
1975 | break; |
1976 | } | |
62258278 AS |
1977 | break; |
1978 | ||
1979 | case BPF_ALU | BPF_END | BPF_FROM_LE: | |
343f845b AS |
1980 | switch (imm32) { |
1981 | case 16: | |
a2c7a983 IM |
1982 | /* |
1983 | * Emit 'movzwl eax, ax' to zero extend 16-bit | |
343f845b AS |
1984 | * into 64 bit |
1985 | */ | |
1986 | if (is_ereg(dst_reg)) | |
1987 | EMIT3(0x45, 0x0F, 0xB7); | |
1988 | else | |
1989 | EMIT2(0x0F, 0xB7); | |
1990 | EMIT1(add_2reg(0xC0, dst_reg, dst_reg)); | |
1991 | break; | |
1992 | case 32: | |
a2c7a983 | 1993 | /* Emit 'mov eax, eax' to clear upper 32-bits */ |
343f845b AS |
1994 | if (is_ereg(dst_reg)) |
1995 | EMIT1(0x45); | |
1996 | EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg)); | |
1997 | break; | |
1998 | case 64: | |
1999 | /* nop */ | |
2000 | break; | |
2001 | } | |
62258278 AS |
2002 | break; |
2003 | ||
f5e81d11 DB |
2004 | /* speculation barrier */ |
2005 | case BPF_ST | BPF_NOSPEC: | |
2e309600 | 2006 | EMIT_LFENCE(); |
f5e81d11 DB |
2007 | break; |
2008 | ||
e430f34e | 2009 | /* ST: *(u8*)(dst_reg + off) = imm */ |
62258278 | 2010 | case BPF_ST | BPF_MEM | BPF_B: |
e430f34e | 2011 | if (is_ereg(dst_reg)) |
62258278 AS |
2012 | EMIT2(0x41, 0xC6); |
2013 | else | |
2014 | EMIT1(0xC6); | |
2015 | goto st; | |
2016 | case BPF_ST | BPF_MEM | BPF_H: | |
e430f34e | 2017 | if (is_ereg(dst_reg)) |
62258278 AS |
2018 | EMIT3(0x66, 0x41, 0xC7); |
2019 | else | |
2020 | EMIT2(0x66, 0xC7); | |
2021 | goto st; | |
2022 | case BPF_ST | BPF_MEM | BPF_W: | |
e430f34e | 2023 | if (is_ereg(dst_reg)) |
62258278 AS |
2024 | EMIT2(0x41, 0xC7); |
2025 | else | |
2026 | EMIT1(0xC7); | |
2027 | goto st; | |
2028 | case BPF_ST | BPF_MEM | BPF_DW: | |
e430f34e | 2029 | EMIT2(add_1mod(0x48, dst_reg), 0xC7); |
62258278 AS |
2030 | |
2031 | st: if (is_imm8(insn->off)) | |
e430f34e | 2032 | EMIT2(add_1reg(0x40, dst_reg), insn->off); |
62258278 | 2033 | else |
e430f34e | 2034 | EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); |
62258278 | 2035 | |
e430f34e | 2036 | EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); |
62258278 AS |
2037 | break; |
2038 | ||
e430f34e | 2039 | /* STX: *(u8*)(dst_reg + off) = src_reg */ |
62258278 | 2040 | case BPF_STX | BPF_MEM | BPF_B: |
62258278 | 2041 | case BPF_STX | BPF_MEM | BPF_H: |
62258278 | 2042 | case BPF_STX | BPF_MEM | BPF_W: |
62258278 | 2043 | case BPF_STX | BPF_MEM | BPF_DW: |
3b2744e6 | 2044 | emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); |
62258278 AS |
2045 | break; |
2046 | ||
2fe99eb0 AS |
2047 | case BPF_ST | BPF_PROBE_MEM32 | BPF_B: |
2048 | case BPF_ST | BPF_PROBE_MEM32 | BPF_H: | |
2049 | case BPF_ST | BPF_PROBE_MEM32 | BPF_W: | |
2050 | case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: | |
2051 | start_of_ldx = prog; | |
2052 | emit_st_r12(&prog, BPF_SIZE(insn->code), dst_reg, insn->off, insn->imm); | |
2053 | goto populate_extable; | |
2054 | ||
2055 | /* LDX: dst_reg = *(u8*)(src_reg + r12 + off) */ | |
2056 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: | |
2057 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: | |
2058 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: | |
2059 | case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: | |
2060 | case BPF_STX | BPF_PROBE_MEM32 | BPF_B: | |
2061 | case BPF_STX | BPF_PROBE_MEM32 | BPF_H: | |
2062 | case BPF_STX | BPF_PROBE_MEM32 | BPF_W: | |
2063 | case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: | |
2064 | start_of_ldx = prog; | |
2065 | if (BPF_CLASS(insn->code) == BPF_LDX) | |
2066 | emit_ldx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); | |
2067 | else | |
2068 | emit_stx_r12(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off); | |
2069 | populate_extable: | |
2070 | { | |
2071 | struct exception_table_entry *ex; | |
2072 | u8 *_insn = image + proglen + (start_of_ldx - temp); | |
2073 | s64 delta; | |
2074 | ||
2075 | if (!bpf_prog->aux->extable) | |
2076 | break; | |
2077 | ||
2078 | if (excnt >= bpf_prog->aux->num_exentries) { | |
2079 | pr_err("mem32 extable bug\n"); | |
2080 | return -EFAULT; | |
2081 | } | |
2082 | ex = &bpf_prog->aux->extable[excnt++]; | |
2083 | ||
2084 | delta = _insn - (u8 *)&ex->insn; | |
2085 | /* switch ex to rw buffer for writes */ | |
2086 | ex = (void *)rw_image + ((void *)ex - (void *)image); | |
2087 | ||
2088 | ex->insn = delta; | |
2089 | ||
2090 | ex->data = EX_TYPE_BPF; | |
2091 | ||
2092 | ex->fixup = (prog - start_of_ldx) | | |
2093 | ((BPF_CLASS(insn->code) == BPF_LDX ? reg2pt_regs[dst_reg] : DONT_CLEAR) << 8); | |
2094 | } | |
2095 | break; | |
2096 | ||
e430f34e | 2097 | /* LDX: dst_reg = *(u8*)(src_reg + off) */ |
62258278 | 2098 | case BPF_LDX | BPF_MEM | BPF_B: |
3dec541b | 2099 | case BPF_LDX | BPF_PROBE_MEM | BPF_B: |
62258278 | 2100 | case BPF_LDX | BPF_MEM | BPF_H: |
3dec541b | 2101 | case BPF_LDX | BPF_PROBE_MEM | BPF_H: |
62258278 | 2102 | case BPF_LDX | BPF_MEM | BPF_W: |
3dec541b | 2103 | case BPF_LDX | BPF_PROBE_MEM | BPF_W: |
62258278 | 2104 | case BPF_LDX | BPF_MEM | BPF_DW: |
3dec541b | 2105 | case BPF_LDX | BPF_PROBE_MEM | BPF_DW: |
1f9a1ea8 YS |
2106 | /* LDXS: dst_reg = *(s8*)(src_reg + off) */ |
2107 | case BPF_LDX | BPF_MEMSX | BPF_B: | |
2108 | case BPF_LDX | BPF_MEMSX | BPF_H: | |
2109 | case BPF_LDX | BPF_MEMSX | BPF_W: | |
2110 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: | |
2111 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: | |
2112 | case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: | |
90156f4b DM |
2113 | insn_off = insn->off; |
2114 | ||
1f9a1ea8 YS |
2115 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
2116 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { | |
90156f4b | 2117 | /* Conservatively check that src_reg + insn->off is a kernel address: |
b599d7d2 PM |
2118 | * src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE |
2119 | * and | |
2120 | * src_reg + insn->off < VSYSCALL_ADDR | |
588a25e9 | 2121 | */ |
588a25e9 | 2122 | |
b599d7d2 | 2123 | u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR; |
90156f4b DM |
2124 | u8 *end_of_jmp; |
2125 | ||
b599d7d2 PM |
2126 | /* movabsq r10, VSYSCALL_ADDR */ |
2127 | emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32, | |
2128 | (u32)(long)VSYSCALL_ADDR); | |
588a25e9 | 2129 | |
b599d7d2 PM |
2130 | /* mov src_reg, r11 */ |
2131 | EMIT_mov(AUX_REG, src_reg); | |
90156f4b DM |
2132 | |
2133 | if (insn->off) { | |
b599d7d2 PM |
2134 | /* add r11, insn->off */ |
2135 | maybe_emit_1mod(&prog, AUX_REG, true); | |
2136 | EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off); | |
90156f4b DM |
2137 | } |
2138 | ||
b599d7d2 PM |
2139 | /* sub r11, r10 */ |
2140 | maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); | |
2141 | EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); | |
2142 | ||
2143 | /* movabsq r10, limit */ | |
2144 | emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32, | |
2145 | (u32)(long)limit); | |
2146 | ||
2147 | /* cmp r10, r11 */ | |
2148 | maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true); | |
2149 | EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX)); | |
90156f4b | 2150 | |
b599d7d2 PM |
2151 | /* if unsigned '>', goto load */ |
2152 | EMIT2(X86_JA, 0); | |
90156f4b | 2153 | end_of_jmp = prog; |
588a25e9 | 2154 | |
4c5de127 AS |
2155 | /* xor dst_reg, dst_reg */ |
2156 | emit_mov_imm32(&prog, false, dst_reg, 0); | |
2157 | /* jmp byte_after_ldx */ | |
2158 | EMIT2(0xEB, 0); | |
2159 | ||
90156f4b | 2160 | /* populate jmp_offset for JAE above to jump to start_of_ldx */ |
4c5de127 | 2161 | start_of_ldx = prog; |
90156f4b | 2162 | end_of_jmp[-1] = start_of_ldx - end_of_jmp; |
4c5de127 | 2163 | } |
1f9a1ea8 YS |
2164 | if (BPF_MODE(insn->code) == BPF_PROBE_MEMSX || |
2165 | BPF_MODE(insn->code) == BPF_MEMSX) | |
2166 | emit_ldsx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); | |
2167 | else | |
2168 | emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); | |
2169 | if (BPF_MODE(insn->code) == BPF_PROBE_MEM || | |
2170 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX) { | |
3dec541b | 2171 | struct exception_table_entry *ex; |
328aac5e | 2172 | u8 *_insn = image + proglen + (start_of_ldx - temp); |
3dec541b AS |
2173 | s64 delta; |
2174 | ||
4c5de127 AS |
2175 | /* populate jmp_offset for JMP above */ |
2176 | start_of_ldx[-1] = prog - start_of_ldx; | |
2177 | ||
3dec541b AS |
2178 | if (!bpf_prog->aux->extable) |
2179 | break; | |
2180 | ||
2181 | if (excnt >= bpf_prog->aux->num_exentries) { | |
2182 | pr_err("ex gen bug\n"); | |
2183 | return -EFAULT; | |
2184 | } | |
2185 | ex = &bpf_prog->aux->extable[excnt++]; | |
2186 | ||
2187 | delta = _insn - (u8 *)&ex->insn; | |
2188 | if (!is_simm32(delta)) { | |
2189 | pr_err("extable->insn doesn't fit into 32-bit\n"); | |
2190 | return -EFAULT; | |
2191 | } | |
1022a549 SL |
2192 | /* switch ex to rw buffer for writes */ |
2193 | ex = (void *)rw_image + ((void *)ex - (void *)image); | |
2194 | ||
3dec541b AS |
2195 | ex->insn = delta; |
2196 | ||
4b5305de | 2197 | ex->data = EX_TYPE_BPF; |
3dec541b AS |
2198 | |
2199 | if (dst_reg > BPF_REG_9) { | |
2200 | pr_err("verifier error\n"); | |
2201 | return -EFAULT; | |
2202 | } | |
2203 | /* | |
2204 | * Compute size of x86 insn and its target dest x86 register. | |
2205 | * ex_handler_bpf() will use lower 8 bits to adjust | |
2206 | * pt_regs->ip to jump over this x86 instruction | |
2207 | * and upper bits to figure out which pt_regs to zero out. | |
2208 | * End result: x86 insn "mov rbx, qword ptr [rax+0x14]" | |
2209 | * of 4 bytes will be ignored and rbx will be zero inited. | |
2210 | */ | |
433956e9 | 2211 | ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8); |
3dec541b | 2212 | } |
62258278 AS |
2213 | break; |
2214 | ||
5341c9a4 PY |
2215 | case BPF_STX | BPF_ATOMIC | BPF_B: |
2216 | case BPF_STX | BPF_ATOMIC | BPF_H: | |
2217 | if (!bpf_atomic_is_load_store(insn)) { | |
2218 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); | |
2219 | return -EFAULT; | |
2220 | } | |
2221 | fallthrough; | |
91c960b0 BJ |
2222 | case BPF_STX | BPF_ATOMIC | BPF_W: |
2223 | case BPF_STX | BPF_ATOMIC | BPF_DW: | |
981f94c3 BJ |
2224 | if (insn->imm == (BPF_AND | BPF_FETCH) || |
2225 | insn->imm == (BPF_OR | BPF_FETCH) || | |
2226 | insn->imm == (BPF_XOR | BPF_FETCH)) { | |
981f94c3 | 2227 | bool is64 = BPF_SIZE(insn->code) == BPF_DW; |
b29dd96b | 2228 | u32 real_src_reg = src_reg; |
ced18582 JA |
2229 | u32 real_dst_reg = dst_reg; |
2230 | u8 *branch_target; | |
981f94c3 BJ |
2231 | |
2232 | /* | |
2233 | * Can't be implemented with a single x86 insn. | |
2234 | * Need to do a CMPXCHG loop. | |
2235 | */ | |
2236 | ||
2237 | /* Will need RAX as a CMPXCHG operand so save R0 */ | |
2238 | emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0); | |
b29dd96b BJ |
2239 | if (src_reg == BPF_REG_0) |
2240 | real_src_reg = BPF_REG_AX; | |
ced18582 JA |
2241 | if (dst_reg == BPF_REG_0) |
2242 | real_dst_reg = BPF_REG_AX; | |
b29dd96b | 2243 | |
981f94c3 BJ |
2244 | branch_target = prog; |
2245 | /* Load old value */ | |
2246 | emit_ldx(&prog, BPF_SIZE(insn->code), | |
ced18582 | 2247 | BPF_REG_0, real_dst_reg, insn->off); |
981f94c3 BJ |
2248 | /* |
2249 | * Perform the (commutative) operation locally, | |
2250 | * put the result in the AUX_REG. | |
2251 | */ | |
2252 | emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0); | |
b29dd96b | 2253 | maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64); |
981f94c3 | 2254 | EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)], |
b29dd96b | 2255 | add_2reg(0xC0, AUX_REG, real_src_reg)); |
981f94c3 | 2256 | /* Attempt to swap in new value */ |
5341c9a4 PY |
2257 | err = emit_atomic_rmw(&prog, BPF_CMPXCHG, |
2258 | real_dst_reg, AUX_REG, | |
2259 | insn->off, | |
2260 | BPF_SIZE(insn->code)); | |
981f94c3 BJ |
2261 | if (WARN_ON(err)) |
2262 | return err; | |
2263 | /* | |
2264 | * ZF tells us whether we won the race. If it's | |
2265 | * cleared we need to try again. | |
2266 | */ | |
2267 | EMIT2(X86_JNE, -(prog - branch_target) - 2); | |
2268 | /* Return the pre-modification value */ | |
b29dd96b | 2269 | emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0); |
981f94c3 BJ |
2270 | /* Restore R0 after clobbering RAX */ |
2271 | emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX); | |
2272 | break; | |
981f94c3 BJ |
2273 | } |
2274 | ||
5341c9a4 PY |
2275 | if (bpf_atomic_is_load_store(insn)) |
2276 | err = emit_atomic_ld_st(&prog, insn->imm, dst_reg, src_reg, | |
2277 | insn->off, BPF_SIZE(insn->code)); | |
2278 | else | |
2279 | err = emit_atomic_rmw(&prog, insn->imm, dst_reg, src_reg, | |
2280 | insn->off, BPF_SIZE(insn->code)); | |
91c960b0 BJ |
2281 | if (err) |
2282 | return err; | |
62258278 AS |
2283 | break; |
2284 | ||
5341c9a4 PY |
2285 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_B: |
2286 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_H: | |
2287 | if (!bpf_atomic_is_load_store(insn)) { | |
2288 | pr_err("bpf_jit: 1- and 2-byte RMW atomics are not supported\n"); | |
2289 | return -EFAULT; | |
2290 | } | |
2291 | fallthrough; | |
d503a04f AS |
2292 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: |
2293 | case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: | |
2294 | start_of_ldx = prog; | |
5341c9a4 PY |
2295 | |
2296 | if (bpf_atomic_is_load_store(insn)) | |
2297 | err = emit_atomic_ld_st_index(&prog, insn->imm, | |
2298 | BPF_SIZE(insn->code), dst_reg, | |
2299 | src_reg, X86_REG_R12, insn->off); | |
2300 | else | |
2301 | err = emit_atomic_rmw_index(&prog, insn->imm, BPF_SIZE(insn->code), | |
2302 | dst_reg, src_reg, X86_REG_R12, | |
2303 | insn->off); | |
d503a04f AS |
2304 | if (err) |
2305 | return err; | |
2306 | goto populate_extable; | |
2307 | ||
62258278 | 2308 | /* call */ |
b2e9dfe5 | 2309 | case BPF_JMP | BPF_CALL: { |
6a537453 | 2310 | u8 *ip = image + addrs[i - 1]; |
b2e9dfe5 | 2311 | |
e430f34e | 2312 | func = (u8 *) __bpf_call_base + imm32; |
a1087da9 | 2313 | if (src_reg == BPF_PSEUDO_CALL && tail_call_reachable) { |
f4b21ed0 | 2314 | LOAD_TAIL_CALL_CNT_PTR(stack_depth); |
6a537453 | 2315 | ip += 7; |
ebf7d1f5 | 2316 | } |
6a537453 JBM |
2317 | if (!imm32) |
2318 | return -EINVAL; | |
7d1cd70d YS |
2319 | if (priv_frame_ptr) { |
2320 | push_r9(&prog); | |
2321 | ip += 2; | |
2322 | } | |
6a537453 JBM |
2323 | ip += x86_call_depth_emit_accounting(&prog, func, ip); |
2324 | if (emit_call(&prog, func, ip)) | |
b2e9dfe5 | 2325 | return -EINVAL; |
7d1cd70d YS |
2326 | if (priv_frame_ptr) |
2327 | pop_r9(&prog); | |
62258278 | 2328 | break; |
b2e9dfe5 | 2329 | } |
62258278 | 2330 | |
71189fa9 | 2331 | case BPF_JMP | BPF_TAIL_CALL: |
428d5df1 | 2332 | if (imm32) |
f18b03fa KKD |
2333 | emit_bpf_tail_call_direct(bpf_prog, |
2334 | &bpf_prog->aux->poke_tab[imm32 - 1], | |
dceba081 | 2335 | &prog, image + addrs[i - 1], |
ebf7d1f5 | 2336 | callee_regs_used, |
f4b21ed0 | 2337 | stack_depth, |
dceba081 | 2338 | ctx); |
428d5df1 | 2339 | else |
f18b03fa KKD |
2340 | emit_bpf_tail_call_indirect(bpf_prog, |
2341 | &prog, | |
ebf7d1f5 | 2342 | callee_regs_used, |
f4b21ed0 | 2343 | stack_depth, |
dceba081 PZ |
2344 | image + addrs[i - 1], |
2345 | ctx); | |
b52f00e6 AS |
2346 | break; |
2347 | ||
62258278 AS |
2348 | /* cond jump */ |
2349 | case BPF_JMP | BPF_JEQ | BPF_X: | |
2350 | case BPF_JMP | BPF_JNE | BPF_X: | |
2351 | case BPF_JMP | BPF_JGT | BPF_X: | |
52afc51e | 2352 | case BPF_JMP | BPF_JLT | BPF_X: |
62258278 | 2353 | case BPF_JMP | BPF_JGE | BPF_X: |
52afc51e | 2354 | case BPF_JMP | BPF_JLE | BPF_X: |
62258278 | 2355 | case BPF_JMP | BPF_JSGT | BPF_X: |
52afc51e | 2356 | case BPF_JMP | BPF_JSLT | BPF_X: |
62258278 | 2357 | case BPF_JMP | BPF_JSGE | BPF_X: |
52afc51e | 2358 | case BPF_JMP | BPF_JSLE | BPF_X: |
3f5d6525 JW |
2359 | case BPF_JMP32 | BPF_JEQ | BPF_X: |
2360 | case BPF_JMP32 | BPF_JNE | BPF_X: | |
2361 | case BPF_JMP32 | BPF_JGT | BPF_X: | |
2362 | case BPF_JMP32 | BPF_JLT | BPF_X: | |
2363 | case BPF_JMP32 | BPF_JGE | BPF_X: | |
2364 | case BPF_JMP32 | BPF_JLE | BPF_X: | |
2365 | case BPF_JMP32 | BPF_JSGT | BPF_X: | |
2366 | case BPF_JMP32 | BPF_JSLT | BPF_X: | |
2367 | case BPF_JMP32 | BPF_JSGE | BPF_X: | |
2368 | case BPF_JMP32 | BPF_JSLE | BPF_X: | |
e430f34e | 2369 | /* cmp dst_reg, src_reg */ |
74007cfc BJ |
2370 | maybe_emit_mod(&prog, dst_reg, src_reg, |
2371 | BPF_CLASS(insn->code) == BPF_JMP); | |
3f5d6525 | 2372 | EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg)); |
62258278 AS |
2373 | goto emit_cond_jmp; |
2374 | ||
2375 | case BPF_JMP | BPF_JSET | BPF_X: | |
3f5d6525 | 2376 | case BPF_JMP32 | BPF_JSET | BPF_X: |
e430f34e | 2377 | /* test dst_reg, src_reg */ |
74007cfc BJ |
2378 | maybe_emit_mod(&prog, dst_reg, src_reg, |
2379 | BPF_CLASS(insn->code) == BPF_JMP); | |
3f5d6525 | 2380 | EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg)); |
62258278 AS |
2381 | goto emit_cond_jmp; |
2382 | ||
2383 | case BPF_JMP | BPF_JSET | BPF_K: | |
3f5d6525 | 2384 | case BPF_JMP32 | BPF_JSET | BPF_K: |
e430f34e | 2385 | /* test dst_reg, imm32 */ |
6364d7d7 JM |
2386 | maybe_emit_1mod(&prog, dst_reg, |
2387 | BPF_CLASS(insn->code) == BPF_JMP); | |
e430f34e | 2388 | EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); |
62258278 AS |
2389 | goto emit_cond_jmp; |
2390 | ||
2391 | case BPF_JMP | BPF_JEQ | BPF_K: | |
2392 | case BPF_JMP | BPF_JNE | BPF_K: | |
2393 | case BPF_JMP | BPF_JGT | BPF_K: | |
52afc51e | 2394 | case BPF_JMP | BPF_JLT | BPF_K: |
62258278 | 2395 | case BPF_JMP | BPF_JGE | BPF_K: |
52afc51e | 2396 | case BPF_JMP | BPF_JLE | BPF_K: |
62258278 | 2397 | case BPF_JMP | BPF_JSGT | BPF_K: |
52afc51e | 2398 | case BPF_JMP | BPF_JSLT | BPF_K: |
62258278 | 2399 | case BPF_JMP | BPF_JSGE | BPF_K: |
52afc51e | 2400 | case BPF_JMP | BPF_JSLE | BPF_K: |
3f5d6525 JW |
2401 | case BPF_JMP32 | BPF_JEQ | BPF_K: |
2402 | case BPF_JMP32 | BPF_JNE | BPF_K: | |
2403 | case BPF_JMP32 | BPF_JGT | BPF_K: | |
2404 | case BPF_JMP32 | BPF_JLT | BPF_K: | |
2405 | case BPF_JMP32 | BPF_JGE | BPF_K: | |
2406 | case BPF_JMP32 | BPF_JLE | BPF_K: | |
2407 | case BPF_JMP32 | BPF_JSGT | BPF_K: | |
2408 | case BPF_JMP32 | BPF_JSLT | BPF_K: | |
2409 | case BPF_JMP32 | BPF_JSGE | BPF_K: | |
2410 | case BPF_JMP32 | BPF_JSLE | BPF_K: | |
38f51c07 DB |
2411 | /* test dst_reg, dst_reg to save one extra byte */ |
2412 | if (imm32 == 0) { | |
74007cfc BJ |
2413 | maybe_emit_mod(&prog, dst_reg, dst_reg, |
2414 | BPF_CLASS(insn->code) == BPF_JMP); | |
38f51c07 DB |
2415 | EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg)); |
2416 | goto emit_cond_jmp; | |
2417 | } | |
2418 | ||
e430f34e | 2419 | /* cmp dst_reg, imm8/32 */ |
6364d7d7 JM |
2420 | maybe_emit_1mod(&prog, dst_reg, |
2421 | BPF_CLASS(insn->code) == BPF_JMP); | |
62258278 | 2422 | |
e430f34e AS |
2423 | if (is_imm8(imm32)) |
2424 | EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); | |
62258278 | 2425 | else |
e430f34e | 2426 | EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); |
62258278 | 2427 | |
a2c7a983 | 2428 | emit_cond_jmp: /* Convert BPF opcode to x86 */ |
62258278 AS |
2429 | switch (BPF_OP(insn->code)) { |
2430 | case BPF_JEQ: | |
2431 | jmp_cond = X86_JE; | |
2432 | break; | |
2433 | case BPF_JSET: | |
2434 | case BPF_JNE: | |
2435 | jmp_cond = X86_JNE; | |
2436 | break; | |
2437 | case BPF_JGT: | |
2438 | /* GT is unsigned '>', JA in x86 */ | |
2439 | jmp_cond = X86_JA; | |
2440 | break; | |
52afc51e DB |
2441 | case BPF_JLT: |
2442 | /* LT is unsigned '<', JB in x86 */ | |
2443 | jmp_cond = X86_JB; | |
2444 | break; | |
62258278 AS |
2445 | case BPF_JGE: |
2446 | /* GE is unsigned '>=', JAE in x86 */ | |
2447 | jmp_cond = X86_JAE; | |
2448 | break; | |
52afc51e DB |
2449 | case BPF_JLE: |
2450 | /* LE is unsigned '<=', JBE in x86 */ | |
2451 | jmp_cond = X86_JBE; | |
2452 | break; | |
62258278 | 2453 | case BPF_JSGT: |
a2c7a983 | 2454 | /* Signed '>', GT in x86 */ |
62258278 AS |
2455 | jmp_cond = X86_JG; |
2456 | break; | |
52afc51e | 2457 | case BPF_JSLT: |
a2c7a983 | 2458 | /* Signed '<', LT in x86 */ |
52afc51e DB |
2459 | jmp_cond = X86_JL; |
2460 | break; | |
62258278 | 2461 | case BPF_JSGE: |
a2c7a983 | 2462 | /* Signed '>=', GE in x86 */ |
62258278 AS |
2463 | jmp_cond = X86_JGE; |
2464 | break; | |
52afc51e | 2465 | case BPF_JSLE: |
a2c7a983 | 2466 | /* Signed '<=', LE in x86 */ |
52afc51e DB |
2467 | jmp_cond = X86_JLE; |
2468 | break; | |
a2c7a983 | 2469 | default: /* to silence GCC warning */ |
62258278 AS |
2470 | return -EFAULT; |
2471 | } | |
2472 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
c8831bdb | 2473 | if (is_imm8_jmp_offset(jmp_offset)) { |
93c5aecc GL |
2474 | if (jmp_padding) { |
2475 | /* To keep the jmp_offset valid, the extra bytes are | |
d9f6e12f | 2476 | * padded before the jump insn, so we subtract the |
93c5aecc GL |
2477 | * 2 bytes of jmp_cond insn from INSN_SZ_DIFF. |
2478 | * | |
2479 | * If the previous pass already emits an imm8 | |
2480 | * jmp_cond, then this BPF insn won't shrink, so | |
2481 | * "nops" is 0. | |
2482 | * | |
2483 | * On the other hand, if the previous pass emits an | |
2484 | * imm32 jmp_cond, the extra 4 bytes(*) is padded to | |
2485 | * keep the image from shrinking further. | |
2486 | * | |
2487 | * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond | |
2488 | * is 2 bytes, so the size difference is 4 bytes. | |
2489 | */ | |
2490 | nops = INSN_SZ_DIFF - 2; | |
2491 | if (nops != 0 && nops != 4) { | |
2492 | pr_err("unexpected jmp_cond padding: %d bytes\n", | |
2493 | nops); | |
2494 | return -EFAULT; | |
2495 | } | |
ced50fc4 | 2496 | emit_nops(&prog, nops); |
93c5aecc | 2497 | } |
62258278 AS |
2498 | EMIT2(jmp_cond, jmp_offset); |
2499 | } else if (is_simm32(jmp_offset)) { | |
2500 | EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); | |
2501 | } else { | |
2502 | pr_err("cond_jmp gen bug %llx\n", jmp_offset); | |
2503 | return -EFAULT; | |
2504 | } | |
2505 | ||
2506 | break; | |
0a14842f | 2507 | |
62258278 | 2508 | case BPF_JMP | BPF_JA: |
4cd58e9a YS |
2509 | case BPF_JMP32 | BPF_JA: |
2510 | if (BPF_CLASS(insn->code) == BPF_JMP) { | |
2511 | if (insn->off == -1) | |
2512 | /* -1 jmp instructions will always jump | |
2513 | * backwards two bytes. Explicitly handling | |
2514 | * this case avoids wasting too many passes | |
2515 | * when there are long sequences of replaced | |
2516 | * dead code. | |
2517 | */ | |
2518 | jmp_offset = -2; | |
2519 | else | |
2520 | jmp_offset = addrs[i + insn->off] - addrs[i]; | |
2521 | } else { | |
2522 | if (insn->imm == -1) | |
2523 | jmp_offset = -2; | |
2524 | else | |
2525 | jmp_offset = addrs[i + insn->imm] - addrs[i]; | |
2526 | } | |
1612a981 | 2527 | |
93c5aecc GL |
2528 | if (!jmp_offset) { |
2529 | /* | |
2530 | * If jmp_padding is enabled, the extra nops will | |
2531 | * be inserted. Otherwise, optimize out nop jumps. | |
2532 | */ | |
2533 | if (jmp_padding) { | |
2534 | /* There are 3 possible conditions. | |
2535 | * (1) This BPF_JA is already optimized out in | |
2536 | * the previous run, so there is no need | |
2537 | * to pad any extra byte (0 byte). | |
2538 | * (2) The previous pass emits an imm8 jmp, | |
2539 | * so we pad 2 bytes to match the previous | |
2540 | * insn size. | |
2541 | * (3) Similarly, the previous pass emits an | |
2542 | * imm32 jmp, and 5 bytes is padded. | |
2543 | */ | |
2544 | nops = INSN_SZ_DIFF; | |
2545 | if (nops != 0 && nops != 2 && nops != 5) { | |
2546 | pr_err("unexpected nop jump padding: %d bytes\n", | |
2547 | nops); | |
2548 | return -EFAULT; | |
2549 | } | |
ced50fc4 | 2550 | emit_nops(&prog, nops); |
93c5aecc | 2551 | } |
62258278 | 2552 | break; |
93c5aecc | 2553 | } |
62258278 | 2554 | emit_jmp: |
c8831bdb | 2555 | if (is_imm8_jmp_offset(jmp_offset)) { |
93c5aecc GL |
2556 | if (jmp_padding) { |
2557 | /* To avoid breaking jmp_offset, the extra bytes | |
2558 | * are padded before the actual jmp insn, so | |
d9f6e12f | 2559 | * 2 bytes is subtracted from INSN_SZ_DIFF. |
93c5aecc GL |
2560 | * |
2561 | * If the previous pass already emits an imm8 | |
2562 | * jmp, there is nothing to pad (0 byte). | |
2563 | * | |
2564 | * If it emits an imm32 jmp (5 bytes) previously | |
2565 | * and now an imm8 jmp (2 bytes), then we pad | |
2566 | * (5 - 2 = 3) bytes to stop the image from | |
2567 | * shrinking further. | |
2568 | */ | |
2569 | nops = INSN_SZ_DIFF - 2; | |
2570 | if (nops != 0 && nops != 3) { | |
2571 | pr_err("unexpected jump padding: %d bytes\n", | |
2572 | nops); | |
2573 | return -EFAULT; | |
2574 | } | |
ced50fc4 | 2575 | emit_nops(&prog, INSN_SZ_DIFF - 2); |
93c5aecc | 2576 | } |
62258278 AS |
2577 | EMIT2(0xEB, jmp_offset); |
2578 | } else if (is_simm32(jmp_offset)) { | |
2579 | EMIT1_off32(0xE9, jmp_offset); | |
2580 | } else { | |
2581 | pr_err("jmp gen bug %llx\n", jmp_offset); | |
2582 | return -EFAULT; | |
2583 | } | |
2584 | break; | |
2585 | ||
62258278 | 2586 | case BPF_JMP | BPF_EXIT: |
769e0de6 | 2587 | if (seen_exit) { |
62258278 AS |
2588 | jmp_offset = ctx->cleanup_addr - addrs[i]; |
2589 | goto emit_jmp; | |
2590 | } | |
769e0de6 | 2591 | seen_exit = true; |
a2c7a983 | 2592 | /* Update cleanup_addr */ |
62258278 | 2593 | ctx->cleanup_addr = proglen; |
d4e89d21 DS |
2594 | if (bpf_prog_was_classic(bpf_prog) && |
2595 | !capable(CAP_SYS_ADMIN)) { | |
2596 | u8 *ip = image + addrs[i - 1]; | |
2597 | ||
2598 | if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) | |
2599 | return -EINVAL; | |
2600 | } | |
f18b03fa KKD |
2601 | if (bpf_prog->aux->exception_boundary) { |
2602 | pop_callee_regs(&prog, all_callee_regs_used); | |
2603 | pop_r12(&prog); | |
2604 | } else { | |
2605 | pop_callee_regs(&prog, callee_regs_used); | |
2fe99eb0 AS |
2606 | if (arena_vm_start) |
2607 | pop_r12(&prog); | |
f18b03fa | 2608 | } |
fe8d9571 | 2609 | EMIT1(0xC9); /* leave */ |
d77cfe59 | 2610 | emit_return(&prog, image + addrs[i - 1] + (prog - temp)); |
62258278 AS |
2611 | break; |
2612 | ||
f3c2af7b | 2613 | default: |
a2c7a983 IM |
2614 | /* |
2615 | * By design x86-64 JIT should support all BPF instructions. | |
62258278 | 2616 | * This error will be seen if new instruction was added |
a2c7a983 IM |
2617 | * to the interpreter, but not to the JIT, or if there is |
2618 | * junk in bpf_prog. | |
62258278 AS |
2619 | */ |
2620 | pr_err("bpf_jit: unknown opcode %02x\n", insn->code); | |
f3c2af7b AS |
2621 | return -EINVAL; |
2622 | } | |
62258278 | 2623 | |
f3c2af7b | 2624 | ilen = prog - temp; |
e0ee9c12 | 2625 | if (ilen > BPF_MAX_INSN_SIZE) { |
9383191d | 2626 | pr_err("bpf_jit: fatal insn size error\n"); |
e0ee9c12 AS |
2627 | return -EFAULT; |
2628 | } | |
2629 | ||
f3c2af7b | 2630 | if (image) { |
e4d4d456 PK |
2631 | /* |
2632 | * When populating the image, assert that: | |
2633 | * | |
2634 | * i) We do not write beyond the allocated space, and | |
2635 | * ii) addrs[i] did not change from the prior run, in order | |
2636 | * to validate assumptions made for computing branch | |
2637 | * displacements. | |
2638 | */ | |
2639 | if (unlikely(proglen + ilen > oldproglen || | |
2640 | proglen + ilen != addrs[i])) { | |
9383191d | 2641 | pr_err("bpf_jit: fatal error\n"); |
f3c2af7b | 2642 | return -EFAULT; |
0a14842f | 2643 | } |
1022a549 | 2644 | memcpy(rw_image + proglen, temp, ilen); |
0a14842f | 2645 | } |
f3c2af7b AS |
2646 | proglen += ilen; |
2647 | addrs[i] = proglen; | |
2648 | prog = temp; | |
2649 | } | |
3dec541b AS |
2650 | |
2651 | if (image && excnt != bpf_prog->aux->num_exentries) { | |
2652 | pr_err("extable is not populated\n"); | |
2653 | return -EFAULT; | |
2654 | } | |
f3c2af7b AS |
2655 | return proglen; |
2656 | } | |
2657 | ||
473e3150 MD |
2658 | static void clean_stack_garbage(const struct btf_func_model *m, |
2659 | u8 **pprog, int nr_stack_slots, | |
2660 | int stack_size) | |
fec56f58 | 2661 | { |
473e3150 MD |
2662 | int arg_size, off; |
2663 | u8 *prog; | |
2664 | ||
2665 | /* Generally speaking, the compiler will pass the arguments | |
2666 | * on-stack with "push" instruction, which will take 8-byte | |
2667 | * on the stack. In this case, there won't be garbage values | |
2668 | * while we copy the arguments from origin stack frame to current | |
2669 | * in BPF_DW. | |
2670 | * | |
2671 | * However, sometimes the compiler will only allocate 4-byte on | |
2672 | * the stack for the arguments. For now, this case will only | |
2673 | * happen if there is only one argument on-stack and its size | |
2674 | * not more than 4 byte. In this case, there will be garbage | |
2675 | * values on the upper 4-byte where we store the argument on | |
2676 | * current stack frame. | |
2677 | * | |
2678 | * arguments on origin stack: | |
2679 | * | |
2680 | * stack_arg_1(4-byte) xxx(4-byte) | |
2681 | * | |
2682 | * what we copy: | |
2683 | * | |
2684 | * stack_arg_1(8-byte): stack_arg_1(origin) xxx | |
2685 | * | |
2686 | * and the xxx is the garbage values which we should clean here. | |
2687 | */ | |
2688 | if (nr_stack_slots != 1) | |
2689 | return; | |
2690 | ||
2691 | /* the size of the last argument */ | |
2692 | arg_size = m->arg_size[m->nr_args - 1]; | |
2693 | if (arg_size <= 4) { | |
2694 | off = -(stack_size - 4); | |
2695 | prog = *pprog; | |
2696 | /* mov DWORD PTR [rbp + off], 0 */ | |
2697 | if (!is_imm8(off)) | |
2698 | EMIT2_off32(0xC7, 0x85, off); | |
2699 | else | |
2700 | EMIT3(0xC7, 0x45, off); | |
2701 | EMIT(0, 4); | |
2702 | *pprog = prog; | |
2703 | } | |
2704 | } | |
2705 | ||
2706 | /* get the count of the regs that are used to pass arguments */ | |
2707 | static int get_nr_used_regs(const struct btf_func_model *m) | |
2708 | { | |
2709 | int i, arg_regs, nr_used_regs = 0; | |
2710 | ||
2711 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { | |
2712 | arg_regs = (m->arg_size[i] + 7) / 8; | |
2713 | if (nr_used_regs + arg_regs <= 6) | |
2714 | nr_used_regs += arg_regs; | |
2715 | ||
2716 | if (nr_used_regs >= 6) | |
2717 | break; | |
2718 | } | |
2719 | ||
2720 | return nr_used_regs; | |
2721 | } | |
2722 | ||
2723 | static void save_args(const struct btf_func_model *m, u8 **prog, | |
2724 | int stack_size, bool for_call_origin) | |
2725 | { | |
492e797f | 2726 | int arg_regs, first_off = 0, nr_regs = 0, nr_stack_slots = 0; |
473e3150 | 2727 | int i, j; |
7f788049 | 2728 | |
fec56f58 AS |
2729 | /* Store function arguments to stack. |
2730 | * For a function that accepts two pointers the sequence will be: | |
2731 | * mov QWORD PTR [rbp-0x10],rdi | |
2732 | * mov QWORD PTR [rbp-0x8],rsi | |
2733 | */ | |
473e3150 MD |
2734 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
2735 | arg_regs = (m->arg_size[i] + 7) / 8; | |
2736 | ||
2737 | /* According to the research of Yonghong, struct members | |
2738 | * should be all in register or all on the stack. | |
2739 | * Meanwhile, the compiler will pass the argument on regs | |
2740 | * if the remaining regs can hold the argument. | |
2741 | * | |
2742 | * Disorder of the args can happen. For example: | |
2743 | * | |
2744 | * struct foo_struct { | |
2745 | * long a; | |
2746 | * int b; | |
2747 | * }; | |
2748 | * int foo(char, char, char, char, char, struct foo_struct, | |
2749 | * char); | |
2750 | * | |
2751 | * the arg1-5,arg7 will be passed by regs, and arg6 will | |
2752 | * by stack. | |
2753 | */ | |
2754 | if (nr_regs + arg_regs > 6) { | |
2755 | /* copy function arguments from origin stack frame | |
2756 | * into current stack frame. | |
2757 | * | |
2758 | * The starting address of the arguments on-stack | |
2759 | * is: | |
2760 | * rbp + 8(push rbp) + | |
2761 | * 8(return addr of origin call) + | |
2762 | * 8(return addr of the caller) | |
2763 | * which means: rbp + 24 | |
2764 | */ | |
2765 | for (j = 0; j < arg_regs; j++) { | |
2766 | emit_ldx(prog, BPF_DW, BPF_REG_0, BPF_REG_FP, | |
2767 | nr_stack_slots * 8 + 0x18); | |
2768 | emit_stx(prog, BPF_DW, BPF_REG_FP, BPF_REG_0, | |
2769 | -stack_size); | |
2770 | ||
2771 | if (!nr_stack_slots) | |
2772 | first_off = stack_size; | |
2773 | stack_size -= 8; | |
2774 | nr_stack_slots++; | |
2775 | } | |
2776 | } else { | |
2777 | /* Only copy the arguments on-stack to current | |
2778 | * 'stack_size' and ignore the regs, used to | |
54aa699e | 2779 | * prepare the arguments on-stack for origin call. |
473e3150 MD |
2780 | */ |
2781 | if (for_call_origin) { | |
2782 | nr_regs += arg_regs; | |
2783 | continue; | |
2784 | } | |
2785 | ||
2786 | /* copy the arguments from regs into stack */ | |
2787 | for (j = 0; j < arg_regs; j++) { | |
2788 | emit_stx(prog, BPF_DW, BPF_REG_FP, | |
2789 | nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, | |
2790 | -stack_size); | |
2791 | stack_size -= 8; | |
2792 | nr_regs++; | |
2793 | } | |
2794 | } | |
2795 | } | |
2796 | ||
2797 | clean_stack_garbage(m, prog, nr_stack_slots, first_off); | |
fec56f58 AS |
2798 | } |
2799 | ||
473e3150 | 2800 | static void restore_regs(const struct btf_func_model *m, u8 **prog, |
fec56f58 AS |
2801 | int stack_size) |
2802 | { | |
473e3150 | 2803 | int i, j, arg_regs, nr_regs = 0; |
fec56f58 AS |
2804 | |
2805 | /* Restore function arguments from stack. | |
2806 | * For a function that accepts two pointers the sequence will be: | |
2807 | * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] | |
2808 | * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] | |
473e3150 MD |
2809 | * |
2810 | * The logic here is similar to what we do in save_args() | |
fec56f58 | 2811 | */ |
473e3150 MD |
2812 | for (i = 0; i < min_t(int, m->nr_args, MAX_BPF_FUNC_ARGS); i++) { |
2813 | arg_regs = (m->arg_size[i] + 7) / 8; | |
2814 | if (nr_regs + arg_regs <= 6) { | |
2815 | for (j = 0; j < arg_regs; j++) { | |
2816 | emit_ldx(prog, BPF_DW, | |
2817 | nr_regs == 5 ? X86_REG_R9 : BPF_REG_1 + nr_regs, | |
2818 | BPF_REG_FP, | |
2819 | -stack_size); | |
2820 | stack_size -= 8; | |
2821 | nr_regs++; | |
2822 | } | |
2823 | } else { | |
2824 | stack_size -= 8 * arg_regs; | |
2825 | } | |
2826 | ||
2827 | if (nr_regs >= 6) | |
2828 | break; | |
2829 | } | |
fec56f58 AS |
2830 | } |
2831 | ||
7e639208 | 2832 | static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, |
f7e0beaf | 2833 | struct bpf_tramp_link *l, int stack_size, |
3ba026fc SL |
2834 | int run_ctx_off, bool save_ret, |
2835 | void *image, void *rw_image) | |
7e639208 KS |
2836 | { |
2837 | u8 *prog = *pprog; | |
ca06f55b | 2838 | u8 *jmp_insn; |
e384c7b7 | 2839 | int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie); |
f7e0beaf | 2840 | struct bpf_prog *p = l->link.prog; |
2fcc8241 | 2841 | u64 cookie = l->cookie; |
7e639208 | 2842 | |
2fcc8241 KFL |
2843 | /* mov rdi, cookie */ |
2844 | emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie); | |
e384c7b7 KFL |
2845 | |
2846 | /* Prepare struct bpf_tramp_run_ctx. | |
2847 | * | |
2848 | * bpf_tramp_run_ctx is already preserved by | |
2849 | * arch_prepare_bpf_trampoline(). | |
2850 | * | |
2851 | * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi | |
2852 | */ | |
2853 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off); | |
2854 | ||
ca06f55b AS |
2855 | /* arg1: mov rdi, progs[i] */ |
2856 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); | |
e384c7b7 | 2857 | /* arg2: lea rsi, [rbp - ctx_cookie_off] */ |
473e3150 MD |
2858 | if (!is_imm8(-run_ctx_off)) |
2859 | EMIT3_off32(0x48, 0x8D, 0xB5, -run_ctx_off); | |
2860 | else | |
2861 | EMIT4(0x48, 0x8D, 0x75, -run_ctx_off); | |
e384c7b7 | 2862 | |
3ba026fc | 2863 | if (emit_rsb_call(&prog, bpf_trampoline_enter(p), image + (prog - (u8 *)rw_image))) |
69fd337a | 2864 | return -EINVAL; |
f2dd3b39 AS |
2865 | /* remember prog start time returned by __bpf_prog_enter */ |
2866 | emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0); | |
7e639208 | 2867 | |
ca06f55b AS |
2868 | /* if (__bpf_prog_enter*(prog) == 0) |
2869 | * goto skip_exec_of_prog; | |
2870 | */ | |
2871 | EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ | |
2872 | /* emit 2 nops that will be replaced with JE insn */ | |
2873 | jmp_insn = prog; | |
2874 | emit_nops(&prog, 2); | |
2875 | ||
7e639208 | 2876 | /* arg1: lea rdi, [rbp - stack_size] */ |
473e3150 MD |
2877 | if (!is_imm8(-stack_size)) |
2878 | EMIT3_off32(0x48, 0x8D, 0xBD, -stack_size); | |
2879 | else | |
2880 | EMIT4(0x48, 0x8D, 0x7D, -stack_size); | |
7e639208 KS |
2881 | /* arg2: progs[i]->insnsi for interpreter */ |
2882 | if (!p->jited) | |
2883 | emit_mov_imm64(&prog, BPF_REG_2, | |
2884 | (long) p->insnsi >> 32, | |
2885 | (u32) (long) p->insnsi); | |
2886 | /* call JITed bpf program or interpreter */ | |
3ba026fc | 2887 | if (emit_rsb_call(&prog, p->bpf_func, image + (prog - (u8 *)rw_image))) |
7e639208 KS |
2888 | return -EINVAL; |
2889 | ||
356ed649 HT |
2890 | /* |
2891 | * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return | |
ae240823 KS |
2892 | * of the previous call which is then passed on the stack to |
2893 | * the next BPF program. | |
356ed649 HT |
2894 | * |
2895 | * BPF_TRAMP_FENTRY trampoline may need to return the return | |
2896 | * value of BPF_PROG_TYPE_STRUCT_OPS prog. | |
ae240823 | 2897 | */ |
356ed649 | 2898 | if (save_ret) |
ae240823 KS |
2899 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); |
2900 | ||
ca06f55b AS |
2901 | /* replace 2 nops with JE insn, since jmp target is known */ |
2902 | jmp_insn[0] = X86_JE; | |
2903 | jmp_insn[1] = prog - jmp_insn - 2; | |
2904 | ||
f2dd3b39 AS |
2905 | /* arg1: mov rdi, progs[i] */ |
2906 | emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p); | |
2907 | /* arg2: mov rsi, rbx <- start time in nsec */ | |
2908 | emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6); | |
e384c7b7 | 2909 | /* arg3: lea rdx, [rbp - run_ctx_off] */ |
473e3150 MD |
2910 | if (!is_imm8(-run_ctx_off)) |
2911 | EMIT3_off32(0x48, 0x8D, 0x95, -run_ctx_off); | |
2912 | else | |
2913 | EMIT4(0x48, 0x8D, 0x55, -run_ctx_off); | |
3ba026fc | 2914 | if (emit_rsb_call(&prog, bpf_trampoline_exit(p), image + (prog - (u8 *)rw_image))) |
69fd337a | 2915 | return -EINVAL; |
7e639208 KS |
2916 | |
2917 | *pprog = prog; | |
2918 | return 0; | |
2919 | } | |
2920 | ||
7e639208 KS |
2921 | static void emit_align(u8 **pprog, u32 align) |
2922 | { | |
2923 | u8 *target, *prog = *pprog; | |
2924 | ||
2925 | target = PTR_ALIGN(prog, align); | |
2926 | if (target != prog) | |
2927 | emit_nops(&prog, target - prog); | |
2928 | ||
2929 | *pprog = prog; | |
2930 | } | |
2931 | ||
2932 | static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond) | |
2933 | { | |
2934 | u8 *prog = *pprog; | |
7e639208 KS |
2935 | s64 offset; |
2936 | ||
2937 | offset = func - (ip + 2 + 4); | |
2938 | if (!is_simm32(offset)) { | |
2939 | pr_err("Target %p is out of range\n", func); | |
2940 | return -EINVAL; | |
2941 | } | |
2942 | EMIT2_off32(0x0F, jmp_cond + 0x10, offset); | |
2943 | *pprog = prog; | |
2944 | return 0; | |
2945 | } | |
2946 | ||
85d33df3 | 2947 | static int invoke_bpf(const struct btf_func_model *m, u8 **pprog, |
f7e0beaf | 2948 | struct bpf_tramp_links *tl, int stack_size, |
3ba026fc SL |
2949 | int run_ctx_off, bool save_ret, |
2950 | void *image, void *rw_image) | |
fec56f58 | 2951 | { |
7e639208 | 2952 | int i; |
fec56f58 | 2953 | u8 *prog = *pprog; |
fec56f58 | 2954 | |
f7e0beaf KFL |
2955 | for (i = 0; i < tl->nr_links; i++) { |
2956 | if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, | |
3ba026fc | 2957 | run_ctx_off, save_ret, image, rw_image)) |
ae240823 KS |
2958 | return -EINVAL; |
2959 | } | |
2960 | *pprog = prog; | |
2961 | return 0; | |
2962 | } | |
2963 | ||
2964 | static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, | |
f7e0beaf | 2965 | struct bpf_tramp_links *tl, int stack_size, |
3ba026fc SL |
2966 | int run_ctx_off, u8 **branches, |
2967 | void *image, void *rw_image) | |
ae240823 KS |
2968 | { |
2969 | u8 *prog = *pprog; | |
ced50fc4 | 2970 | int i; |
ae240823 KS |
2971 | |
2972 | /* The first fmod_ret program will receive a garbage return value. | |
2973 | * Set this to 0 to avoid confusing the program. | |
2974 | */ | |
2975 | emit_mov_imm32(&prog, false, BPF_REG_0, 0); | |
2976 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
f7e0beaf | 2977 | for (i = 0; i < tl->nr_links; i++) { |
3ba026fc SL |
2978 | if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true, |
2979 | image, rw_image)) | |
fec56f58 | 2980 | return -EINVAL; |
ae240823 | 2981 | |
13fac1d8 AS |
2982 | /* mod_ret prog stored return value into [rbp - 8]. Emit: |
2983 | * if (*(u64 *)(rbp - 8) != 0) | |
ae240823 | 2984 | * goto do_fexit; |
ae240823 | 2985 | */ |
13fac1d8 AS |
2986 | /* cmp QWORD PTR [rbp - 0x8], 0x0 */ |
2987 | EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00); | |
ae240823 KS |
2988 | |
2989 | /* Save the location of the branch and Generate 6 nops | |
2990 | * (4 bytes for an offset and 2 bytes for the jump) These nops | |
2991 | * are replaced with a conditional jump once do_fexit (i.e. the | |
2992 | * start of the fexit invocation) is finalized. | |
2993 | */ | |
2994 | branches[i] = prog; | |
2995 | emit_nops(&prog, 4 + 2); | |
fec56f58 | 2996 | } |
ae240823 | 2997 | |
fec56f58 AS |
2998 | *pprog = prog; |
2999 | return 0; | |
3000 | } | |
3001 | ||
116e04ba LH |
3002 | /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ |
3003 | #define LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack) \ | |
3004 | __LOAD_TCC_PTR(-round_up(stack, 8) - 8) | |
3005 | ||
fec56f58 AS |
3006 | /* Example: |
3007 | * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); | |
3008 | * its 'struct btf_func_model' will be nr_args=2 | |
3009 | * The assembly code when eth_type_trans is executing after trampoline: | |
3010 | * | |
3011 | * push rbp | |
3012 | * mov rbp, rsp | |
3013 | * sub rsp, 16 // space for skb and dev | |
3014 | * push rbx // temp regs to pass start time | |
3015 | * mov qword ptr [rbp - 16], rdi // save skb pointer to stack | |
3016 | * mov qword ptr [rbp - 8], rsi // save dev pointer to stack | |
3017 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
3018 | * mov rbx, rax // remember start time in bpf stats are enabled | |
3019 | * lea rdi, [rbp - 16] // R1==ctx of bpf prog | |
3020 | * call addr_of_jited_FENTRY_prog | |
3021 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
3022 | * mov rsi, rbx // prog start time | |
3023 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
3024 | * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack | |
3025 | * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack | |
3026 | * pop rbx | |
3027 | * leave | |
3028 | * ret | |
3029 | * | |
3030 | * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be | |
3031 | * replaced with 'call generated_bpf_trampoline'. When it returns | |
3032 | * eth_type_trans will continue executing with original skb and dev pointers. | |
3033 | * | |
3034 | * The assembly code when eth_type_trans is called from trampoline: | |
3035 | * | |
3036 | * push rbp | |
3037 | * mov rbp, rsp | |
3038 | * sub rsp, 24 // space for skb, dev, return value | |
3039 | * push rbx // temp regs to pass start time | |
3040 | * mov qword ptr [rbp - 24], rdi // save skb pointer to stack | |
3041 | * mov qword ptr [rbp - 16], rsi // save dev pointer to stack | |
3042 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
3043 | * mov rbx, rax // remember start time if bpf stats are enabled | |
3044 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
3045 | * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev | |
3046 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
3047 | * mov rsi, rbx // prog start time | |
3048 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
3049 | * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack | |
3050 | * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack | |
3051 | * call eth_type_trans+5 // execute body of eth_type_trans | |
3052 | * mov qword ptr [rbp - 8], rax // save return value | |
3053 | * call __bpf_prog_enter // rcu_read_lock and preempt_disable | |
3054 | * mov rbx, rax // remember start time in bpf stats are enabled | |
3055 | * lea rdi, [rbp - 24] // R1==ctx of bpf prog | |
3056 | * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value | |
3057 | * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off | |
3058 | * mov rsi, rbx // prog start time | |
3059 | * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math | |
3060 | * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value | |
3061 | * pop rbx | |
3062 | * leave | |
3063 | * add rsp, 8 // skip eth_type_trans's frame | |
3064 | * ret // return to its caller | |
3065 | */ | |
3ba026fc SL |
3066 | static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_image, |
3067 | void *rw_image_end, void *image, | |
96d1b7c0 SL |
3068 | const struct btf_func_model *m, u32 flags, |
3069 | struct bpf_tramp_links *tlinks, | |
3070 | void *func_addr) | |
fec56f58 | 3071 | { |
7f788049 | 3072 | int i, ret, nr_regs = m->nr_args, stack_size = 0; |
473e3150 | 3073 | int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off; |
f7e0beaf KFL |
3074 | struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; |
3075 | struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; | |
3076 | struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; | |
4d854f4f | 3077 | void *orig_call = func_addr; |
ae240823 | 3078 | u8 **branches = NULL; |
fec56f58 | 3079 | u8 *prog; |
356ed649 | 3080 | bool save_ret; |
fec56f58 | 3081 | |
2cd3e377 PZ |
3082 | /* |
3083 | * F_INDIRECT is only compatible with F_RET_FENTRY_RET, it is | |
3084 | * explicitly incompatible with F_CALL_ORIG | F_SKIP_FRAME | F_IP_ARG | |
3085 | * because @func_addr. | |
3086 | */ | |
3087 | WARN_ON_ONCE((flags & BPF_TRAMP_F_INDIRECT) && | |
3088 | (flags & ~(BPF_TRAMP_F_INDIRECT | BPF_TRAMP_F_RET_FENTRY_RET))); | |
3089 | ||
7f788049 | 3090 | /* extra registers for struct arguments */ |
2cd3e377 | 3091 | for (i = 0; i < m->nr_args; i++) { |
a9c5ad31 | 3092 | if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) |
7f788049 | 3093 | nr_regs += (m->arg_size[i] + 7) / 8 - 1; |
2cd3e377 | 3094 | } |
7f788049 | 3095 | |
473e3150 MD |
3096 | /* x86-64 supports up to MAX_BPF_FUNC_ARGS arguments. 1-6 |
3097 | * are passed through regs, the remains are through stack. | |
3098 | */ | |
3099 | if (nr_regs > MAX_BPF_FUNC_ARGS) | |
a9c5ad31 | 3100 | return -ENOTSUPP; |
a9c5ad31 | 3101 | |
5edf6a19 JO |
3102 | /* Generated trampoline stack layout: |
3103 | * | |
3104 | * RBP + 8 [ return address ] | |
3105 | * RBP + 0 [ RBP ] | |
3106 | * | |
3107 | * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or | |
3108 | * BPF_TRAMP_F_RET_FENTRY_RET flags | |
3109 | * | |
3110 | * [ reg_argN ] always | |
3111 | * [ ... ] | |
3112 | * RBP - regs_off [ reg_arg1 ] program's ctx pointer | |
3113 | * | |
7f788049 | 3114 | * RBP - nregs_off [ regs count ] always |
f92c1e18 | 3115 | * |
5edf6a19 | 3116 | * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag |
e384c7b7 | 3117 | * |
473e3150 MD |
3118 | * RBP - rbx_off [ rbx value ] always |
3119 | * | |
e384c7b7 | 3120 | * RBP - run_ctx_off [ bpf_tramp_run_ctx ] |
473e3150 MD |
3121 | * |
3122 | * [ stack_argN ] BPF_TRAMP_F_CALL_ORIG | |
3123 | * [ ... ] | |
3124 | * [ stack_arg2 ] | |
3125 | * RBP - arg_stack_off [ stack_arg1 ] | |
116e04ba | 3126 | * RSP [ tail_call_cnt_ptr ] BPF_TRAMP_F_TAIL_CALL_CTX |
5edf6a19 JO |
3127 | */ |
3128 | ||
356ed649 HT |
3129 | /* room for return value of orig_call or fentry prog */ |
3130 | save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); | |
3131 | if (save_ret) | |
3132 | stack_size += 8; | |
fec56f58 | 3133 | |
7f788049 | 3134 | stack_size += nr_regs * 8; |
5edf6a19 JO |
3135 | regs_off = stack_size; |
3136 | ||
7f788049 | 3137 | /* regs count */ |
f92c1e18 | 3138 | stack_size += 8; |
7f788049 | 3139 | nregs_off = stack_size; |
f92c1e18 | 3140 | |
7e6f3cd8 JO |
3141 | if (flags & BPF_TRAMP_F_IP_ARG) |
3142 | stack_size += 8; /* room for IP address argument */ | |
3143 | ||
5edf6a19 JO |
3144 | ip_off = stack_size; |
3145 | ||
473e3150 MD |
3146 | stack_size += 8; |
3147 | rbx_off = stack_size; | |
3148 | ||
e384c7b7 KFL |
3149 | stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7; |
3150 | run_ctx_off = stack_size; | |
3151 | ||
473e3150 MD |
3152 | if (nr_regs > 6 && (flags & BPF_TRAMP_F_CALL_ORIG)) { |
3153 | /* the space that used to pass arguments on-stack */ | |
3154 | stack_size += (nr_regs - get_nr_used_regs(m)) * 8; | |
3155 | /* make sure the stack pointer is 16-byte aligned if we | |
3156 | * need pass arguments on stack, which means | |
3157 | * [stack_size + 8(rbp) + 8(rip) + 8(origin rip)] | |
3158 | * should be 16-byte aligned. Following code depend on | |
3159 | * that stack_size is already 8-byte aligned. | |
3160 | */ | |
3161 | stack_size += (stack_size % 16) ? 0 : 8; | |
3162 | } | |
3163 | ||
3164 | arg_stack_off = stack_size; | |
3165 | ||
58912710 | 3166 | if (flags & BPF_TRAMP_F_SKIP_FRAME) { |
fec56f58 AS |
3167 | /* skip patched call instruction and point orig_call to actual |
3168 | * body of the kernel function. | |
3169 | */ | |
72e213a7 | 3170 | if (is_endbr(orig_call)) |
58912710 | 3171 | orig_call += ENDBR_INSN_SIZE; |
4b3da77b | 3172 | orig_call += X86_PATCH_SIZE; |
58912710 | 3173 | } |
fec56f58 | 3174 | |
3ba026fc | 3175 | prog = rw_image; |
fec56f58 | 3176 | |
2cd3e377 PZ |
3177 | if (flags & BPF_TRAMP_F_INDIRECT) { |
3178 | /* | |
3179 | * Indirect call for bpf_struct_ops | |
3180 | */ | |
0c92385d PZ |
3181 | emit_cfi(&prog, image, |
3182 | cfi_get_func_hash(func_addr), | |
3183 | cfi_get_func_arity(func_addr)); | |
2cd3e377 PZ |
3184 | } else { |
3185 | /* | |
3186 | * Direct-call fentry stub, as such it needs accounting for the | |
3187 | * __fentry__ call. | |
3188 | */ | |
6a537453 | 3189 | x86_call_depth_emit_accounting(&prog, NULL, image); |
2cd3e377 | 3190 | } |
fec56f58 AS |
3191 | EMIT1(0x55); /* push rbp */ |
3192 | EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ | |
2cd3e377 | 3193 | if (!is_imm8(stack_size)) { |
473e3150 MD |
3194 | /* sub rsp, stack_size */ |
3195 | EMIT3_off32(0x48, 0x81, 0xEC, stack_size); | |
2cd3e377 | 3196 | } else { |
473e3150 MD |
3197 | /* sub rsp, stack_size */ |
3198 | EMIT4(0x48, 0x83, 0xEC, stack_size); | |
2cd3e377 | 3199 | } |
2b5dcb31 LH |
3200 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) |
3201 | EMIT1(0x50); /* push rax */ | |
473e3150 MD |
3202 | /* mov QWORD PTR [rbp - rbx_off], rbx */ |
3203 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); | |
fec56f58 | 3204 | |
a9c5ad31 | 3205 | /* Store number of argument registers of the traced function: |
7f788049 PL |
3206 | * mov rax, nr_regs |
3207 | * mov QWORD PTR [rbp - nregs_off], rax | |
f92c1e18 | 3208 | */ |
7f788049 PL |
3209 | emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); |
3210 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); | |
f92c1e18 | 3211 | |
7e6f3cd8 JO |
3212 | if (flags & BPF_TRAMP_F_IP_ARG) { |
3213 | /* Store IP address of the traced function: | |
4d854f4f | 3214 | * movabsq rax, func_addr |
5edf6a19 | 3215 | * mov QWORD PTR [rbp - ip_off], rax |
7e6f3cd8 | 3216 | */ |
4d854f4f | 3217 | emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); |
5edf6a19 | 3218 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); |
7e6f3cd8 JO |
3219 | } |
3220 | ||
473e3150 | 3221 | save_args(m, &prog, regs_off, false); |
fec56f58 | 3222 | |
e21aa341 AS |
3223 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
3224 | /* arg1: mov rdi, im */ | |
3225 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); | |
3ba026fc SL |
3226 | if (emit_rsb_call(&prog, __bpf_tramp_enter, |
3227 | image + (prog - (u8 *)rw_image))) { | |
e21aa341 AS |
3228 | ret = -EINVAL; |
3229 | goto cleanup; | |
3230 | } | |
3231 | } | |
3232 | ||
2cd3e377 | 3233 | if (fentry->nr_links) { |
e384c7b7 | 3234 | if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off, |
3ba026fc | 3235 | flags & BPF_TRAMP_F_RET_FENTRY_RET, image, rw_image)) |
fec56f58 | 3236 | return -EINVAL; |
2cd3e377 | 3237 | } |
fec56f58 | 3238 | |
f7e0beaf KFL |
3239 | if (fmod_ret->nr_links) { |
3240 | branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *), | |
ae240823 KS |
3241 | GFP_KERNEL); |
3242 | if (!branches) | |
3243 | return -ENOMEM; | |
3244 | ||
5edf6a19 | 3245 | if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off, |
3ba026fc | 3246 | run_ctx_off, branches, image, rw_image)) { |
ae240823 KS |
3247 | ret = -EINVAL; |
3248 | goto cleanup; | |
3249 | } | |
3250 | } | |
3251 | ||
fec56f58 | 3252 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
473e3150 MD |
3253 | restore_regs(m, &prog, regs_off); |
3254 | save_args(m, &prog, arg_stack_off, true); | |
fec56f58 | 3255 | |
2cd3e377 | 3256 | if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { |
116e04ba LH |
3257 | /* Before calling the original function, load the |
3258 | * tail_call_cnt_ptr from stack to rax. | |
2b5dcb31 | 3259 | */ |
116e04ba | 3260 | LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); |
2cd3e377 | 3261 | } |
2b5dcb31 | 3262 | |
316cba62 | 3263 | if (flags & BPF_TRAMP_F_ORIG_STACK) { |
2b5dcb31 LH |
3264 | emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); |
3265 | EMIT2(0xff, 0xd3); /* call *rbx */ | |
316cba62 JO |
3266 | } else { |
3267 | /* call original function */ | |
3ba026fc | 3268 | if (emit_rsb_call(&prog, orig_call, image + (prog - (u8 *)rw_image))) { |
316cba62 JO |
3269 | ret = -EINVAL; |
3270 | goto cleanup; | |
3271 | } | |
ae240823 | 3272 | } |
fec56f58 AS |
3273 | /* remember return value in a stack for bpf prog to access */ |
3274 | emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8); | |
3ba026fc | 3275 | im->ip_after_call = image + (prog - (u8 *)rw_image); |
00bc8988 | 3276 | emit_nops(&prog, X86_PATCH_SIZE); |
fec56f58 AS |
3277 | } |
3278 | ||
f7e0beaf | 3279 | if (fmod_ret->nr_links) { |
ae240823 KS |
3280 | /* From Intel 64 and IA-32 Architectures Optimization |
3281 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
3282 | * Coding Rule 11: All branch targets should be 16-byte | |
3283 | * aligned. | |
3284 | */ | |
3285 | emit_align(&prog, 16); | |
3286 | /* Update the branches saved in invoke_bpf_mod_ret with the | |
3287 | * aligned address of do_fexit. | |
3288 | */ | |
2cd3e377 | 3289 | for (i = 0; i < fmod_ret->nr_links; i++) { |
3ba026fc SL |
3290 | emit_cond_near_jump(&branches[i], image + (prog - (u8 *)rw_image), |
3291 | image + (branches[i] - (u8 *)rw_image), X86_JNE); | |
2cd3e377 | 3292 | } |
ae240823 KS |
3293 | } |
3294 | ||
2cd3e377 | 3295 | if (fexit->nr_links) { |
3ba026fc SL |
3296 | if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, |
3297 | false, image, rw_image)) { | |
ae240823 KS |
3298 | ret = -EINVAL; |
3299 | goto cleanup; | |
3300 | } | |
2cd3e377 | 3301 | } |
fec56f58 AS |
3302 | |
3303 | if (flags & BPF_TRAMP_F_RESTORE_REGS) | |
473e3150 | 3304 | restore_regs(m, &prog, regs_off); |
fec56f58 | 3305 | |
ae240823 KS |
3306 | /* This needs to be done regardless. If there were fmod_ret programs, |
3307 | * the return value is only updated on the stack and still needs to be | |
3308 | * restored to R0. | |
3309 | */ | |
e21aa341 | 3310 | if (flags & BPF_TRAMP_F_CALL_ORIG) { |
3ba026fc | 3311 | im->ip_epilogue = image + (prog - (u8 *)rw_image); |
e21aa341 AS |
3312 | /* arg1: mov rdi, im */ |
3313 | emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im); | |
3ba026fc | 3314 | if (emit_rsb_call(&prog, __bpf_tramp_exit, image + (prog - (u8 *)rw_image))) { |
e21aa341 AS |
3315 | ret = -EINVAL; |
3316 | goto cleanup; | |
3317 | } | |
2cd3e377 | 3318 | } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) { |
116e04ba LH |
3319 | /* Before running the original function, load the |
3320 | * tail_call_cnt_ptr from stack to rax. | |
2b5dcb31 | 3321 | */ |
116e04ba | 3322 | LOAD_TRAMP_TAIL_CALL_CNT_PTR(stack_size); |
2cd3e377 | 3323 | } |
2b5dcb31 | 3324 | |
356ed649 HT |
3325 | /* restore return value of orig_call or fentry prog back into RAX */ |
3326 | if (save_ret) | |
3327 | emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); | |
fec56f58 | 3328 | |
473e3150 | 3329 | emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, -rbx_off); |
fec56f58 | 3330 | EMIT1(0xC9); /* leave */ |
2cd3e377 | 3331 | if (flags & BPF_TRAMP_F_SKIP_FRAME) { |
fec56f58 AS |
3332 | /* skip our return address and return to parent */ |
3333 | EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */ | |
2cd3e377 | 3334 | } |
3ba026fc | 3335 | emit_return(&prog, image + (prog - (u8 *)rw_image)); |
85d33df3 | 3336 | /* Make sure the trampoline generation logic doesn't overflow */ |
3ba026fc | 3337 | if (WARN_ON_ONCE(prog > (u8 *)rw_image_end - BPF_INSN_SAFETY)) { |
ae240823 KS |
3338 | ret = -EFAULT; |
3339 | goto cleanup; | |
3340 | } | |
3ba026fc | 3341 | ret = prog - (u8 *)rw_image + BPF_INSN_SAFETY; |
ae240823 KS |
3342 | |
3343 | cleanup: | |
3344 | kfree(branches); | |
3345 | return ret; | |
fec56f58 AS |
3346 | } |
3347 | ||
3ba026fc SL |
3348 | void *arch_alloc_bpf_trampoline(unsigned int size) |
3349 | { | |
3350 | return bpf_prog_pack_alloc(size, jit_fill_hole); | |
3351 | } | |
3352 | ||
3353 | void arch_free_bpf_trampoline(void *image, unsigned int size) | |
3354 | { | |
3355 | bpf_prog_pack_free(image, size); | |
3356 | } | |
3357 | ||
c733239f | 3358 | int arch_protect_bpf_trampoline(void *image, unsigned int size) |
3ba026fc | 3359 | { |
c733239f | 3360 | return 0; |
3ba026fc SL |
3361 | } |
3362 | ||
96d1b7c0 SL |
3363 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
3364 | const struct btf_func_model *m, u32 flags, | |
3365 | struct bpf_tramp_links *tlinks, | |
3366 | void *func_addr) | |
3367 | { | |
3ba026fc SL |
3368 | void *rw_image, *tmp; |
3369 | int ret; | |
3370 | u32 size = image_end - image; | |
3371 | ||
3372 | /* rw_image doesn't need to be in module memory range, so we can | |
3373 | * use kvmalloc. | |
3374 | */ | |
3375 | rw_image = kvmalloc(size, GFP_KERNEL); | |
3376 | if (!rw_image) | |
3377 | return -ENOMEM; | |
3378 | ||
3379 | ret = __arch_prepare_bpf_trampoline(im, rw_image, rw_image + size, image, m, | |
3380 | flags, tlinks, func_addr); | |
3381 | if (ret < 0) | |
3382 | goto out; | |
3383 | ||
3384 | tmp = bpf_arch_text_copy(image, rw_image, size); | |
3385 | if (IS_ERR(tmp)) | |
3386 | ret = PTR_ERR(tmp); | |
3387 | out: | |
3388 | kvfree(rw_image); | |
3389 | return ret; | |
96d1b7c0 SL |
3390 | } |
3391 | ||
3392 | int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, | |
3393 | struct bpf_tramp_links *tlinks, void *func_addr) | |
3394 | { | |
3395 | struct bpf_tramp_image im; | |
3396 | void *image; | |
3397 | int ret; | |
3398 | ||
3399 | /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline(). | |
3400 | * This will NOT cause fragmentation in direct map, as we do not | |
3401 | * call set_memory_*() on this buffer. | |
3402 | * | |
3403 | * We cannot use kvmalloc here, because we need image to be in | |
3404 | * module memory range. | |
3405 | */ | |
3406 | image = bpf_jit_alloc_exec(PAGE_SIZE); | |
3407 | if (!image) | |
3408 | return -ENOMEM; | |
3409 | ||
3ba026fc SL |
3410 | ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, image, |
3411 | m, flags, tlinks, func_addr); | |
96d1b7c0 SL |
3412 | bpf_jit_free_exec(image); |
3413 | return ret; | |
3414 | } | |
3415 | ||
19c02415 | 3416 | static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf) |
75ccbef6 | 3417 | { |
7e639208 | 3418 | u8 *jg_reloc, *prog = *pprog; |
ced50fc4 | 3419 | int pivot, err, jg_bytes = 1; |
75ccbef6 BT |
3420 | s64 jg_offset; |
3421 | ||
3422 | if (a == b) { | |
3423 | /* Leaf node of recursion, i.e. not a range of indices | |
3424 | * anymore. | |
3425 | */ | |
3426 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
3427 | if (!is_simm32(progs[a])) | |
3428 | return -1; | |
3429 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), | |
3430 | progs[a]); | |
3431 | err = emit_cond_near_jump(&prog, /* je func */ | |
19c02415 | 3432 | (void *)progs[a], image + (prog - buf), |
75ccbef6 BT |
3433 | X86_JE); |
3434 | if (err) | |
3435 | return err; | |
3436 | ||
19c02415 | 3437 | emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf)); |
75ccbef6 BT |
3438 | |
3439 | *pprog = prog; | |
3440 | return 0; | |
3441 | } | |
3442 | ||
3443 | /* Not a leaf node, so we pivot, and recursively descend into | |
3444 | * the lower and upper ranges. | |
3445 | */ | |
3446 | pivot = (b - a) / 2; | |
3447 | EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */ | |
3448 | if (!is_simm32(progs[a + pivot])) | |
3449 | return -1; | |
3450 | EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]); | |
3451 | ||
3452 | if (pivot > 2) { /* jg upper_part */ | |
3453 | /* Require near jump. */ | |
3454 | jg_bytes = 4; | |
3455 | EMIT2_off32(0x0F, X86_JG + 0x10, 0); | |
3456 | } else { | |
3457 | EMIT2(X86_JG, 0); | |
3458 | } | |
3459 | jg_reloc = prog; | |
3460 | ||
3461 | err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */ | |
19c02415 | 3462 | progs, image, buf); |
75ccbef6 BT |
3463 | if (err) |
3464 | return err; | |
3465 | ||
116eb788 BT |
3466 | /* From Intel 64 and IA-32 Architectures Optimization |
3467 | * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler | |
3468 | * Coding Rule 11: All branch targets should be 16-byte | |
3469 | * aligned. | |
3470 | */ | |
7e639208 | 3471 | emit_align(&prog, 16); |
75ccbef6 BT |
3472 | jg_offset = prog - jg_reloc; |
3473 | emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes); | |
3474 | ||
3475 | err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */ | |
19c02415 | 3476 | b, progs, image, buf); |
75ccbef6 BT |
3477 | if (err) |
3478 | return err; | |
3479 | ||
3480 | *pprog = prog; | |
3481 | return 0; | |
3482 | } | |
3483 | ||
3484 | static int cmp_ips(const void *a, const void *b) | |
3485 | { | |
3486 | const s64 *ipa = a; | |
3487 | const s64 *ipb = b; | |
3488 | ||
3489 | if (*ipa > *ipb) | |
3490 | return 1; | |
3491 | if (*ipa < *ipb) | |
3492 | return -1; | |
3493 | return 0; | |
3494 | } | |
3495 | ||
19c02415 | 3496 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs) |
75ccbef6 | 3497 | { |
19c02415 | 3498 | u8 *prog = buf; |
75ccbef6 BT |
3499 | |
3500 | sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL); | |
19c02415 | 3501 | return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf); |
75ccbef6 BT |
3502 | } |
3503 | ||
7d1cd70d YS |
3504 | static const char *bpf_get_prog_name(struct bpf_prog *prog) |
3505 | { | |
3506 | if (prog->aux->ksym.prog) | |
3507 | return prog->aux->ksym.name; | |
3508 | return prog->aux->name; | |
3509 | } | |
3510 | ||
3511 | static void priv_stack_init_guard(void __percpu *priv_stack_ptr, int alloc_size) | |
3512 | { | |
3513 | int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; | |
3514 | u64 *stack_ptr; | |
3515 | ||
3516 | for_each_possible_cpu(cpu) { | |
3517 | stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); | |
3518 | stack_ptr[0] = PRIV_STACK_GUARD_VAL; | |
3519 | stack_ptr[underflow_idx] = PRIV_STACK_GUARD_VAL; | |
3520 | } | |
3521 | } | |
3522 | ||
3523 | static void priv_stack_check_guard(void __percpu *priv_stack_ptr, int alloc_size, | |
3524 | struct bpf_prog *prog) | |
3525 | { | |
3526 | int cpu, underflow_idx = (alloc_size - PRIV_STACK_GUARD_SZ) >> 3; | |
3527 | u64 *stack_ptr; | |
3528 | ||
3529 | for_each_possible_cpu(cpu) { | |
3530 | stack_ptr = per_cpu_ptr(priv_stack_ptr, cpu); | |
3531 | if (stack_ptr[0] != PRIV_STACK_GUARD_VAL || | |
3532 | stack_ptr[underflow_idx] != PRIV_STACK_GUARD_VAL) { | |
3533 | pr_err("BPF private stack overflow/underflow detected for prog %sx\n", | |
3534 | bpf_get_prog_name(prog)); | |
3535 | break; | |
3536 | } | |
3537 | } | |
3538 | } | |
3539 | ||
1c2a088a | 3540 | struct x64_jit_data { |
1022a549 | 3541 | struct bpf_binary_header *rw_header; |
1c2a088a AS |
3542 | struct bpf_binary_header *header; |
3543 | int *addrs; | |
3544 | u8 *image; | |
3545 | int proglen; | |
3546 | struct jit_context ctx; | |
3547 | }; | |
3548 | ||
93c5aecc GL |
3549 | #define MAX_PASSES 20 |
3550 | #define PADDING_PASSES (MAX_PASSES - 5) | |
3551 | ||
d1c55ab5 | 3552 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) |
f3c2af7b | 3553 | { |
1022a549 | 3554 | struct bpf_binary_header *rw_header = NULL; |
f3c2af7b | 3555 | struct bpf_binary_header *header = NULL; |
959a7579 | 3556 | struct bpf_prog *tmp, *orig_prog = prog; |
7d1cd70d | 3557 | void __percpu *priv_stack_ptr = NULL; |
1c2a088a | 3558 | struct x64_jit_data *jit_data; |
7d1cd70d | 3559 | int priv_stack_alloc_sz; |
f3c2af7b AS |
3560 | int proglen, oldproglen = 0; |
3561 | struct jit_context ctx = {}; | |
959a7579 | 3562 | bool tmp_blinded = false; |
1c2a088a | 3563 | bool extra_pass = false; |
93c5aecc | 3564 | bool padding = false; |
1022a549 | 3565 | u8 *rw_image = NULL; |
f3c2af7b AS |
3566 | u8 *image = NULL; |
3567 | int *addrs; | |
3568 | int pass; | |
3569 | int i; | |
3570 | ||
60b58afc | 3571 | if (!prog->jit_requested) |
959a7579 DB |
3572 | return orig_prog; |
3573 | ||
3574 | tmp = bpf_jit_blind_constants(prog); | |
a2c7a983 IM |
3575 | /* |
3576 | * If blinding was requested and we failed during blinding, | |
959a7579 DB |
3577 | * we must fall back to the interpreter. |
3578 | */ | |
3579 | if (IS_ERR(tmp)) | |
3580 | return orig_prog; | |
3581 | if (tmp != prog) { | |
3582 | tmp_blinded = true; | |
3583 | prog = tmp; | |
3584 | } | |
0a14842f | 3585 | |
1c2a088a AS |
3586 | jit_data = prog->aux->jit_data; |
3587 | if (!jit_data) { | |
3588 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); | |
3589 | if (!jit_data) { | |
3590 | prog = orig_prog; | |
3591 | goto out; | |
3592 | } | |
3593 | prog->aux->jit_data = jit_data; | |
3594 | } | |
7d1cd70d YS |
3595 | priv_stack_ptr = prog->aux->priv_stack_ptr; |
3596 | if (!priv_stack_ptr && prog->aux->jits_use_priv_stack) { | |
3597 | /* Allocate actual private stack size with verifier-calculated | |
3598 | * stack size plus two memory guards to protect overflow and | |
3599 | * underflow. | |
3600 | */ | |
3601 | priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + | |
3602 | 2 * PRIV_STACK_GUARD_SZ; | |
3603 | priv_stack_ptr = __alloc_percpu_gfp(priv_stack_alloc_sz, 8, GFP_KERNEL); | |
3604 | if (!priv_stack_ptr) { | |
3605 | prog = orig_prog; | |
3606 | goto out_priv_stack; | |
3607 | } | |
3608 | ||
3609 | priv_stack_init_guard(priv_stack_ptr, priv_stack_alloc_sz); | |
3610 | prog->aux->priv_stack_ptr = priv_stack_ptr; | |
3611 | } | |
1c2a088a AS |
3612 | addrs = jit_data->addrs; |
3613 | if (addrs) { | |
3614 | ctx = jit_data->ctx; | |
3615 | oldproglen = jit_data->proglen; | |
3616 | image = jit_data->image; | |
3617 | header = jit_data->header; | |
1022a549 SL |
3618 | rw_header = jit_data->rw_header; |
3619 | rw_image = (void *)rw_header + ((void *)image - (void *)header); | |
1c2a088a | 3620 | extra_pass = true; |
93c5aecc | 3621 | padding = true; |
1c2a088a AS |
3622 | goto skip_init_addrs; |
3623 | } | |
de920fc6 | 3624 | addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); |
959a7579 DB |
3625 | if (!addrs) { |
3626 | prog = orig_prog; | |
1c2a088a | 3627 | goto out_addrs; |
959a7579 | 3628 | } |
f3c2af7b | 3629 | |
a2c7a983 IM |
3630 | /* |
3631 | * Before first pass, make a rough estimation of addrs[] | |
3632 | * each BPF instruction is translated to less than 64 bytes | |
f3c2af7b | 3633 | */ |
7c2e988f | 3634 | for (proglen = 0, i = 0; i <= prog->len; i++) { |
f3c2af7b AS |
3635 | proglen += 64; |
3636 | addrs[i] = proglen; | |
3637 | } | |
3638 | ctx.cleanup_addr = proglen; | |
1c2a088a | 3639 | skip_init_addrs: |
f3c2af7b | 3640 | |
a2c7a983 IM |
3641 | /* |
3642 | * JITed image shrinks with every pass and the loop iterates | |
3643 | * until the image stops shrinking. Very large BPF programs | |
3f7352bf | 3644 | * may converge on the last pass. In such case do one more |
a2c7a983 | 3645 | * pass to emit the final image. |
3f7352bf | 3646 | */ |
93c5aecc GL |
3647 | for (pass = 0; pass < MAX_PASSES || image; pass++) { |
3648 | if (!padding && pass >= PADDING_PASSES) | |
3649 | padding = true; | |
1022a549 | 3650 | proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding); |
f3c2af7b | 3651 | if (proglen <= 0) { |
3aab8884 | 3652 | out_image: |
f3c2af7b | 3653 | image = NULL; |
676b2daa SL |
3654 | if (header) { |
3655 | bpf_arch_text_copy(&header->size, &rw_header->size, | |
3656 | sizeof(rw_header->size)); | |
1022a549 | 3657 | bpf_jit_binary_pack_free(header, rw_header); |
676b2daa | 3658 | } |
73e14451 | 3659 | /* Fall back to interpreter mode */ |
959a7579 | 3660 | prog = orig_prog; |
73e14451 HT |
3661 | if (extra_pass) { |
3662 | prog->bpf_func = NULL; | |
3663 | prog->jited = 0; | |
3664 | prog->jited_len = 0; | |
3665 | } | |
959a7579 | 3666 | goto out_addrs; |
f3c2af7b | 3667 | } |
0a14842f | 3668 | if (image) { |
e0ee9c12 | 3669 | if (proglen != oldproglen) { |
f3c2af7b AS |
3670 | pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", |
3671 | proglen, oldproglen); | |
3aab8884 | 3672 | goto out_image; |
e0ee9c12 | 3673 | } |
0a14842f ED |
3674 | break; |
3675 | } | |
3676 | if (proglen == oldproglen) { | |
3dec541b AS |
3677 | /* |
3678 | * The number of entries in extable is the number of BPF_LDX | |
3679 | * insns that access kernel memory via "pointer to BTF type". | |
3680 | * The verifier changed their opcode from LDX|MEM|size | |
3681 | * to LDX|PROBE_MEM|size to make JITing easier. | |
3682 | */ | |
3683 | u32 align = __alignof__(struct exception_table_entry); | |
3684 | u32 extable_size = prog->aux->num_exentries * | |
3685 | sizeof(struct exception_table_entry); | |
3686 | ||
3687 | /* allocate module memory for x86 insns and extable */ | |
1022a549 SL |
3688 | header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size, |
3689 | &image, align, &rw_header, &rw_image, | |
3690 | jit_fill_hole); | |
959a7579 DB |
3691 | if (!header) { |
3692 | prog = orig_prog; | |
3693 | goto out_addrs; | |
3694 | } | |
3dec541b | 3695 | prog->aux->extable = (void *) image + roundup(proglen, align); |
0a14842f ED |
3696 | } |
3697 | oldproglen = proglen; | |
6007b080 | 3698 | cond_resched(); |
0a14842f | 3699 | } |
79617801 | 3700 | |
0a14842f | 3701 | if (bpf_jit_enable > 1) |
ad96f1c9 | 3702 | bpf_jit_dump(prog->len, proglen, pass + 1, rw_image); |
0a14842f ED |
3703 | |
3704 | if (image) { | |
1c2a088a | 3705 | if (!prog->is_func || extra_pass) { |
1022a549 SL |
3706 | /* |
3707 | * bpf_jit_binary_pack_finalize fails in two scenarios: | |
3708 | * 1) header is not pointing to proper module memory; | |
3709 | * 2) the arch doesn't support bpf_arch_text_copy(). | |
3710 | * | |
f95f768f | 3711 | * Both cases are serious bugs and justify WARN_ON. |
1022a549 | 3712 | */ |
9919c5c9 | 3713 | if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { |
73e14451 HT |
3714 | /* header has been freed */ |
3715 | header = NULL; | |
3716 | goto out_image; | |
f95f768f SL |
3717 | } |
3718 | ||
428d5df1 | 3719 | bpf_tail_call_direct_fixup(prog); |
1c2a088a AS |
3720 | } else { |
3721 | jit_data->addrs = addrs; | |
3722 | jit_data->ctx = ctx; | |
3723 | jit_data->proglen = proglen; | |
3724 | jit_data->image = image; | |
3725 | jit_data->header = header; | |
1022a549 | 3726 | jit_data->rw_header = rw_header; |
1c2a088a | 3727 | } |
4f9087f1 PZ |
3728 | /* |
3729 | * ctx.prog_offset is used when CFI preambles put code *before* | |
3730 | * the function. See emit_cfi(). For FineIBT specifically this code | |
3731 | * can also be executed and bpf_prog_kallsyms_add() will | |
3732 | * generate an additional symbol to cover this, hence also | |
3733 | * decrement proglen. | |
3734 | */ | |
3735 | prog->bpf_func = (void *)image + cfi_get_offset(); | |
a91263d5 | 3736 | prog->jited = 1; |
4f9087f1 | 3737 | prog->jited_len = proglen - cfi_get_offset(); |
9d5ecb09 DB |
3738 | } else { |
3739 | prog = orig_prog; | |
0a14842f | 3740 | } |
959a7579 | 3741 | |
39f56ca9 | 3742 | if (!image || !prog->is_func || extra_pass) { |
c454a46b | 3743 | if (image) |
7c2e988f | 3744 | bpf_prog_fill_jited_linfo(prog, addrs + 1); |
959a7579 | 3745 | out_addrs: |
de920fc6 | 3746 | kvfree(addrs); |
7d1cd70d YS |
3747 | if (!image && priv_stack_ptr) { |
3748 | free_percpu(priv_stack_ptr); | |
3749 | prog->aux->priv_stack_ptr = NULL; | |
3750 | } | |
3751 | out_priv_stack: | |
1c2a088a AS |
3752 | kfree(jit_data); |
3753 | prog->aux->jit_data = NULL; | |
3754 | } | |
959a7579 DB |
3755 | out: |
3756 | if (tmp_blinded) | |
3757 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | |
3758 | tmp : orig_prog); | |
d1c55ab5 | 3759 | return prog; |
0a14842f | 3760 | } |
e6ac2450 MKL |
3761 | |
3762 | bool bpf_jit_supports_kfunc_call(void) | |
3763 | { | |
3764 | return true; | |
3765 | } | |
ebc1415d SL |
3766 | |
3767 | void *bpf_arch_text_copy(void *dst, void *src, size_t len) | |
3768 | { | |
3769 | if (text_poke_copy(dst, src, len) == NULL) | |
3770 | return ERR_PTR(-EINVAL); | |
3771 | return dst; | |
3772 | } | |
95acd881 TA |
3773 | |
3774 | /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ | |
3775 | bool bpf_jit_supports_subprog_tailcalls(void) | |
3776 | { | |
3777 | return true; | |
3778 | } | |
1d5f82d9 | 3779 | |
7bdbf744 AN |
3780 | bool bpf_jit_supports_percpu_insn(void) |
3781 | { | |
3782 | return true; | |
3783 | } | |
3784 | ||
1d5f82d9 SL |
3785 | void bpf_jit_free(struct bpf_prog *prog) |
3786 | { | |
3787 | if (prog->jited) { | |
3788 | struct x64_jit_data *jit_data = prog->aux->jit_data; | |
3789 | struct bpf_binary_header *hdr; | |
7d1cd70d YS |
3790 | void __percpu *priv_stack_ptr; |
3791 | int priv_stack_alloc_sz; | |
1d5f82d9 SL |
3792 | |
3793 | /* | |
3794 | * If we fail the final pass of JIT (from jit_subprogs), | |
3795 | * the program may not be finalized yet. Call finalize here | |
3796 | * before freeing it. | |
3797 | */ | |
3798 | if (jit_data) { | |
9919c5c9 | 3799 | bpf_jit_binary_pack_finalize(jit_data->header, |
1d5f82d9 SL |
3800 | jit_data->rw_header); |
3801 | kvfree(jit_data->addrs); | |
3802 | kfree(jit_data); | |
3803 | } | |
4f9087f1 | 3804 | prog->bpf_func = (void *)prog->bpf_func - cfi_get_offset(); |
1d5f82d9 SL |
3805 | hdr = bpf_jit_binary_pack_hdr(prog); |
3806 | bpf_jit_binary_pack_free(hdr, NULL); | |
7d1cd70d YS |
3807 | priv_stack_ptr = prog->aux->priv_stack_ptr; |
3808 | if (priv_stack_ptr) { | |
3809 | priv_stack_alloc_sz = round_up(prog->aux->stack_depth, 8) + | |
3810 | 2 * PRIV_STACK_GUARD_SZ; | |
3811 | priv_stack_check_guard(priv_stack_ptr, priv_stack_alloc_sz, prog); | |
3812 | free_percpu(prog->aux->priv_stack_ptr); | |
3813 | } | |
1d5f82d9 SL |
3814 | WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog)); |
3815 | } | |
3816 | ||
3817 | bpf_prog_unlock_free(prog); | |
3818 | } | |
fd5d27b7 KKD |
3819 | |
3820 | bool bpf_jit_supports_exceptions(void) | |
3821 | { | |
3822 | /* We unwind through both kernel frames (starting from within bpf_throw | |
5bfdb4fb KKD |
3823 | * call) and BPF frames. Therefore we require ORC unwinder to be enabled |
3824 | * to walk kernel frames and reach BPF frames in the stack trace. | |
fd5d27b7 | 3825 | */ |
5bfdb4fb | 3826 | return IS_ENABLED(CONFIG_UNWINDER_ORC); |
fd5d27b7 KKD |
3827 | } |
3828 | ||
7d1cd70d YS |
3829 | bool bpf_jit_supports_private_stack(void) |
3830 | { | |
3831 | return true; | |
3832 | } | |
3833 | ||
fd5d27b7 KKD |
3834 | void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie) |
3835 | { | |
5bfdb4fb | 3836 | #if defined(CONFIG_UNWINDER_ORC) |
fd5d27b7 KKD |
3837 | struct unwind_state state; |
3838 | unsigned long addr; | |
3839 | ||
3840 | for (unwind_start(&state, current, NULL, NULL); !unwind_done(&state); | |
3841 | unwind_next_frame(&state)) { | |
3842 | addr = unwind_get_return_address(&state); | |
3843 | if (!addr || !consume_fn(cookie, (u64)addr, (u64)state.sp, (u64)state.bp)) | |
3844 | break; | |
3845 | } | |
3846 | return; | |
3847 | #endif | |
3848 | WARN(1, "verification of programs using bpf_throw should have failed\n"); | |
3849 | } | |
4b7de801 JO |
3850 | |
3851 | void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, | |
3852 | struct bpf_prog *new, struct bpf_prog *old) | |
3853 | { | |
3854 | u8 *old_addr, *new_addr, *old_bypass_addr; | |
3855 | int ret; | |
3856 | ||
3857 | old_bypass_addr = old ? NULL : poke->bypass_addr; | |
3858 | old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; | |
3859 | new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; | |
3860 | ||
3861 | /* | |
3862 | * On program loading or teardown, the program's kallsym entry | |
3863 | * might not be in place, so we use __bpf_arch_text_poke to skip | |
3864 | * the kallsyms check. | |
3865 | */ | |
3866 | if (new) { | |
3867 | ret = __bpf_arch_text_poke(poke->tailcall_target, | |
3868 | BPF_MOD_JUMP, | |
3869 | old_addr, new_addr); | |
3870 | BUG_ON(ret < 0); | |
3871 | if (!old) { | |
3872 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, | |
3873 | BPF_MOD_JUMP, | |
3874 | poke->bypass_addr, | |
3875 | NULL); | |
3876 | BUG_ON(ret < 0); | |
3877 | } | |
3878 | } else { | |
3879 | ret = __bpf_arch_text_poke(poke->tailcall_bypass, | |
3880 | BPF_MOD_JUMP, | |
3881 | old_bypass_addr, | |
3882 | poke->bypass_addr); | |
3883 | BUG_ON(ret < 0); | |
3884 | /* let other CPUs finish the execution of program | |
3885 | * so that it will not possible to expose them | |
3886 | * to invalid nop, stack unwind, nop state | |
3887 | */ | |
3888 | if (!ret) | |
3889 | synchronize_rcu(); | |
3890 | ret = __bpf_arch_text_poke(poke->tailcall_target, | |
3891 | BPF_MOD_JUMP, | |
3892 | old_addr, NULL); | |
3893 | BUG_ON(ret < 0); | |
3894 | } | |
3895 | } | |
7c05e7f3 | 3896 | |
142fd4d2 AS |
3897 | bool bpf_jit_supports_arena(void) |
3898 | { | |
3899 | return true; | |
3900 | } | |
3901 | ||
d503a04f AS |
3902 | bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) |
3903 | { | |
3904 | if (!in_arena) | |
3905 | return true; | |
3906 | switch (insn->code) { | |
3907 | case BPF_STX | BPF_ATOMIC | BPF_W: | |
3908 | case BPF_STX | BPF_ATOMIC | BPF_DW: | |
3909 | if (insn->imm == (BPF_AND | BPF_FETCH) || | |
3910 | insn->imm == (BPF_OR | BPF_FETCH) || | |
3911 | insn->imm == (BPF_XOR | BPF_FETCH)) | |
3912 | return false; | |
3913 | } | |
3914 | return true; | |
3915 | } | |
3916 | ||
7c05e7f3 HT |
3917 | bool bpf_jit_supports_ptr_xchg(void) |
3918 | { | |
3919 | return true; | |
3920 | } | |
66e13b61 PM |
3921 | |
3922 | /* x86-64 JIT emits its own code to filter user addresses so return 0 here */ | |
3923 | u64 bpf_arch_uaddress_limit(void) | |
3924 | { | |
3925 | return 0; | |
3926 | } | |
2fb76182 KKD |
3927 | |
3928 | bool bpf_jit_supports_timed_may_goto(void) | |
3929 | { | |
3930 | return true; | |
3931 | } |