1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp.c: BPF JIT compiler
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
18 #include <asm/asm-prototypes.h>
20 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
33 #define EMIT(bytes, len) \
34 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
36 #define EMIT1(b1) EMIT(b1, 1)
37 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
38 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
39 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
41 #define EMIT1_off32(b1, off) \
42 do { EMIT1(b1); EMIT(off, 4); } while (0)
43 #define EMIT2_off32(b1, b2, off) \
44 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
45 #define EMIT3_off32(b1, b2, b3, off) \
46 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
47 #define EMIT4_off32(b1, b2, b3, b4, off) \
48 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
50 static bool is_imm8(int value)
52 return value <= 127 && value >= -128;
55 static bool is_simm32(s64 value)
57 return value == (s64)(s32)value;
60 static bool is_uimm32(u64 value)
62 return value == (u64)(u32)value;
66 #define EMIT_mov(DST, SRC) \
69 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
72 static int bpf_size_to_x86_bytes(int bpf_size)
74 if (bpf_size == BPF_W)
76 else if (bpf_size == BPF_H)
78 else if (bpf_size == BPF_B)
80 else if (bpf_size == BPF_DW)
87 * List of x86 cond jumps opcodes (. + s8)
88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
101 /* Pick a register outside of BPF range for JIT internal work */
102 #define AUX_REG (MAX_BPF_JIT_REG + 1)
103 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
106 * The following table maps BPF registers to x86-64 registers.
108 * x86-64 register R12 is unused, since if used as base address
109 * register in load/store instructions, it always needs an
110 * extra byte of encoding and is callee saved.
112 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
113 * trampoline. x86-64 register R10 is used for blinding (if enabled).
115 static const int reg2hex[] = {
116 [BPF_REG_0] = 0, /* RAX */
117 [BPF_REG_1] = 7, /* RDI */
118 [BPF_REG_2] = 6, /* RSI */
119 [BPF_REG_3] = 2, /* RDX */
120 [BPF_REG_4] = 1, /* RCX */
121 [BPF_REG_5] = 0, /* R8 */
122 [BPF_REG_6] = 3, /* RBX callee saved */
123 [BPF_REG_7] = 5, /* R13 callee saved */
124 [BPF_REG_8] = 6, /* R14 callee saved */
125 [BPF_REG_9] = 7, /* R15 callee saved */
126 [BPF_REG_FP] = 5, /* RBP readonly */
127 [BPF_REG_AX] = 2, /* R10 temp register */
128 [AUX_REG] = 3, /* R11 temp register */
129 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
132 static const int reg2pt_regs[] = {
133 [BPF_REG_0] = offsetof(struct pt_regs, ax),
134 [BPF_REG_1] = offsetof(struct pt_regs, di),
135 [BPF_REG_2] = offsetof(struct pt_regs, si),
136 [BPF_REG_3] = offsetof(struct pt_regs, dx),
137 [BPF_REG_4] = offsetof(struct pt_regs, cx),
138 [BPF_REG_5] = offsetof(struct pt_regs, r8),
139 [BPF_REG_6] = offsetof(struct pt_regs, bx),
140 [BPF_REG_7] = offsetof(struct pt_regs, r13),
141 [BPF_REG_8] = offsetof(struct pt_regs, r14),
142 [BPF_REG_9] = offsetof(struct pt_regs, r15),
146 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
147 * which need extra byte of encoding.
148 * rax,rcx,...,rbp have simpler encoding
150 static bool is_ereg(u32 reg)
152 return (1 << reg) & (BIT(BPF_REG_5) |
162 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
163 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
164 * of encoding. al,cl,dl,bl have simpler encoding.
166 static bool is_ereg_8l(u32 reg)
168 return is_ereg(reg) ||
169 (1 << reg) & (BIT(BPF_REG_1) |
174 static bool is_axreg(u32 reg)
176 return reg == BPF_REG_0;
179 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
180 static u8 add_1mod(u8 byte, u32 reg)
187 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
196 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
197 static u8 add_1reg(u8 byte, u32 dst_reg)
199 return byte + reg2hex[dst_reg];
202 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
203 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
205 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
208 /* Some 1-byte opcodes for binary ALU operations */
209 static u8 simple_alu_opcodes[] = {
220 static void jit_fill_hole(void *area, unsigned int size)
222 /* Fill whole space with INT3 instructions */
223 memset(area, 0xcc, size);
227 int cleanup_addr; /* Epilogue code offset */
230 /* Maximum number of bytes emitted while JITing one eBPF insn */
231 #define BPF_MAX_INSN_SIZE 128
232 #define BPF_INSN_SAFETY 64
234 /* Number of bytes emit_patch() needs to generate instructions */
235 #define X86_PATCH_SIZE 5
236 /* Number of bytes that will be skipped on tailcall */
237 #define X86_TAIL_CALL_OFFSET 11
239 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
244 if (callee_regs_used[0])
245 EMIT1(0x53); /* push rbx */
246 if (callee_regs_used[1])
247 EMIT2(0x41, 0x55); /* push r13 */
248 if (callee_regs_used[2])
249 EMIT2(0x41, 0x56); /* push r14 */
250 if (callee_regs_used[3])
251 EMIT2(0x41, 0x57); /* push r15 */
255 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
260 if (callee_regs_used[3])
261 EMIT2(0x41, 0x5F); /* pop r15 */
262 if (callee_regs_used[2])
263 EMIT2(0x41, 0x5E); /* pop r14 */
264 if (callee_regs_used[1])
265 EMIT2(0x41, 0x5D); /* pop r13 */
266 if (callee_regs_used[0])
267 EMIT1(0x5B); /* pop rbx */
272 * Emit x86-64 prologue code for BPF program.
273 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
274 * while jumping to another program
276 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
277 bool tail_call_reachable, bool is_subprog)
280 int cnt = X86_PATCH_SIZE;
282 /* BPF trampoline can be made to work without these nops,
283 * but let's waste 5 bytes for now and optimize later
285 memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
287 if (!ebpf_from_cbpf) {
288 if (tail_call_reachable && !is_subprog)
289 EMIT2(0x31, 0xC0); /* xor eax, eax */
291 EMIT2(0x66, 0x90); /* nop2 */
293 EMIT1(0x55); /* push rbp */
294 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
295 /* sub rsp, rounded_stack_depth */
297 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
298 if (tail_call_reachable)
299 EMIT1(0x50); /* push rax */
303 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
309 offset = func - (ip + X86_PATCH_SIZE);
310 if (!is_simm32(offset)) {
311 pr_err("Target call %p is out of range\n", func);
314 EMIT1_off32(opcode, offset);
319 static int emit_call(u8 **pprog, void *func, void *ip)
321 return emit_patch(pprog, func, ip, 0xE8);
324 static int emit_jump(u8 **pprog, void *func, void *ip)
326 return emit_patch(pprog, func, ip, 0xE9);
329 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
330 void *old_addr, void *new_addr,
331 const bool text_live)
333 const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
334 u8 old_insn[X86_PATCH_SIZE];
335 u8 new_insn[X86_PATCH_SIZE];
339 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
342 ret = t == BPF_MOD_CALL ?
343 emit_call(&prog, old_addr, ip) :
344 emit_jump(&prog, old_addr, ip);
349 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
352 ret = t == BPF_MOD_CALL ?
353 emit_call(&prog, new_addr, ip) :
354 emit_jump(&prog, new_addr, ip);
360 mutex_lock(&text_mutex);
361 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
364 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
366 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
368 memcpy(ip, new_insn, X86_PATCH_SIZE);
372 mutex_unlock(&text_mutex);
376 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
377 void *old_addr, void *new_addr)
379 if (!is_kernel_text((long)ip) &&
380 !is_bpf_text_address((long)ip))
381 /* BPF poking in modules is not supported */
384 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
387 static int get_pop_bytes(bool *callee_regs_used)
391 if (callee_regs_used[3])
393 if (callee_regs_used[2])
395 if (callee_regs_used[1])
397 if (callee_regs_used[0])
404 * Generate the following code:
406 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
407 * if (index >= array->map.max_entries)
409 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
411 * prog = array->ptrs[index];
414 * goto *(prog->bpf_func + prologue_size);
417 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
420 int tcc_off = -4 - round_up(stack_depth, 8);
428 /* count the additional bytes used for popping callee regs from stack
429 * that need to be taken into account for each of the offsets that
430 * are used for bailing out of the tail call
432 pop_bytes = get_pop_bytes(callee_regs_used);
444 * rdi - pointer to ctx
445 * rsi - pointer to bpf_array
446 * rdx - index in bpf_array
450 * if (index >= array->map.max_entries)
453 EMIT2(0x89, 0xD2); /* mov edx, edx */
454 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
455 offsetof(struct bpf_array, map.max_entries));
456 #define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
457 EMIT2(X86_JBE, OFFSET1); /* jbe out */
460 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
463 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
464 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
465 #define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
466 EMIT2(X86_JA, OFFSET2); /* ja out */
467 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
468 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
470 /* prog = array->ptrs[index]; */
471 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
472 offsetof(struct bpf_array, ptrs));
478 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
479 #define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
480 EMIT2(X86_JE, OFFSET3); /* je out */
483 pop_callee_regs(pprog, callee_regs_used);
486 EMIT1(0x58); /* pop rax */
488 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
489 round_up(stack_depth, 8));
491 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
492 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
493 offsetof(struct bpf_prog, bpf_func));
494 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
495 X86_TAIL_CALL_OFFSET);
497 * Now we're ready to jump into next BPF program
498 * rdi == ctx (1st arg)
499 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
501 RETPOLINE_RCX_BPF_JIT();
507 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
508 u8 **pprog, int addr, u8 *image,
509 bool *callee_regs_used, u32 stack_depth)
511 int tcc_off = -4 - round_up(stack_depth, 8);
518 /* count the additional bytes used for popping callee regs to stack
519 * that need to be taken into account for jump offset that is used for
520 * bailing out from of the tail call when limit is reached
522 pop_bytes = get_pop_bytes(callee_regs_used);
529 * - sub rsp, $val if depth > 0
532 poke_off = X86_PATCH_SIZE + pop_bytes + 1;
539 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
542 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
543 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
544 EMIT2(X86_JA, off1); /* ja out */
545 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
546 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
548 poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
549 poke->adj_off = X86_TAIL_CALL_OFFSET;
550 poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
551 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
553 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
554 poke->tailcall_bypass);
557 pop_callee_regs(pprog, callee_regs_used);
559 EMIT1(0x58); /* pop rax */
561 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
563 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
564 prog += X86_PATCH_SIZE;
570 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
572 struct bpf_jit_poke_descriptor *poke;
573 struct bpf_array *array;
574 struct bpf_prog *target;
577 for (i = 0; i < prog->aux->size_poke_tab; i++) {
578 poke = &prog->aux->poke_tab[i];
579 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
581 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
584 array = container_of(poke->tail_call.map, struct bpf_array, map);
585 mutex_lock(&array->aux->poke_mutex);
586 target = array->ptrs[poke->tail_call.key];
588 /* Plain memcpy is used when image is not live yet
589 * and still not locked as read-only. Once poke
590 * location is active (poke->tailcall_target_stable),
591 * any parallel bpf_arch_text_poke() might occur
592 * still on the read-write image until we finally
593 * locked it as read-only. Both modifications on
594 * the given image are under text_mutex to avoid
597 ret = __bpf_arch_text_poke(poke->tailcall_target,
599 (u8 *)target->bpf_func +
600 poke->adj_off, false);
602 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
604 (u8 *)poke->tailcall_target +
605 X86_PATCH_SIZE, NULL, false);
608 WRITE_ONCE(poke->tailcall_target_stable, true);
609 mutex_unlock(&array->aux->poke_mutex);
613 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
614 u32 dst_reg, const u32 imm32)
621 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
622 * (which zero-extends imm32) to save 2 bytes.
624 if (sign_propagate && (s32)imm32 < 0) {
625 /* 'mov %rax, imm32' sign extends imm32 */
626 b1 = add_1mod(0x48, dst_reg);
629 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
634 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
638 if (is_ereg(dst_reg))
639 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
642 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
646 /* mov %eax, imm32 */
647 if (is_ereg(dst_reg))
648 EMIT1(add_1mod(0x40, dst_reg));
649 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
654 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
655 const u32 imm32_hi, const u32 imm32_lo)
660 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
662 * For emitting plain u32, where sign bit must not be
663 * propagated LLVM tends to load imm64 over mov32
664 * directly, so save couple of bytes by just doing
665 * 'mov %eax, imm32' instead.
667 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
669 /* movabsq %rax, imm64 */
670 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
678 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
685 EMIT_mov(dst_reg, src_reg);
688 if (is_ereg(dst_reg) || is_ereg(src_reg))
689 EMIT1(add_2mod(0x40, dst_reg, src_reg));
690 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
696 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
697 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
703 /* 1-byte signed displacement.
705 * If off == 0 we could skip this and save one extra byte, but
706 * special case of x86 R13 which always needs an offset is not
709 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
711 /* 4-byte signed displacement */
712 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
718 * Emit a REX byte if it will be necessary to address these registers
720 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
726 EMIT1(add_2mod(0x48, dst_reg, src_reg));
727 else if (is_ereg(dst_reg) || is_ereg(src_reg))
728 EMIT1(add_2mod(0x40, dst_reg, src_reg));
732 /* LDX: dst_reg = *(u8*)(src_reg + off) */
733 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
740 /* Emit 'movzx rax, byte ptr [rax + off]' */
741 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
744 /* Emit 'movzx rax, word ptr [rax + off]' */
745 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
748 /* Emit 'mov eax, dword ptr [rax+0x14]' */
749 if (is_ereg(dst_reg) || is_ereg(src_reg))
750 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
755 /* Emit 'mov rax, qword ptr [rax+0x14]' */
756 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
759 emit_insn_suffix(&prog, src_reg, dst_reg, off);
763 /* STX: *(u8*)(dst_reg + off) = src_reg */
764 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
771 /* Emit 'mov byte ptr [rax + off], al' */
772 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
773 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
774 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
779 if (is_ereg(dst_reg) || is_ereg(src_reg))
780 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
785 if (is_ereg(dst_reg) || is_ereg(src_reg))
786 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
791 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
794 emit_insn_suffix(&prog, dst_reg, src_reg, off);
798 static int emit_atomic(u8 **pprog, u8 atomic_op,
799 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
804 EMIT1(0xF0); /* lock prefix */
806 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
815 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
816 EMIT1(simple_alu_opcodes[atomic_op]);
818 case BPF_ADD | BPF_FETCH:
819 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
823 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
827 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
831 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
835 emit_insn_suffix(&prog, dst_reg, src_reg, off);
841 static bool ex_handler_bpf(const struct exception_table_entry *x,
842 struct pt_regs *regs, int trapnr,
843 unsigned long error_code, unsigned long fault_addr)
845 u32 reg = x->fixup >> 8;
847 /* jump over faulting load and clear dest register */
848 *(unsigned long *)((void *)regs + reg) = 0;
849 regs->ip += x->fixup & 0xff;
853 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
854 bool *regs_used, bool *tail_call_seen)
858 for (i = 1; i <= insn_cnt; i++, insn++) {
859 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
860 *tail_call_seen = true;
861 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
863 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
865 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
867 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
872 static int emit_nops(u8 **pprog, int len)
875 int i, noplen, cnt = 0;
880 if (noplen > ASM_NOP_MAX)
881 noplen = ASM_NOP_MAX;
883 for (i = 0; i < noplen; i++)
884 EMIT1(ideal_nops[noplen][i]);
893 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
895 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
896 int oldproglen, struct jit_context *ctx, bool jmp_padding)
898 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
899 struct bpf_insn *insn = bpf_prog->insnsi;
900 bool callee_regs_used[4] = {};
901 int insn_cnt = bpf_prog->len;
902 bool tail_call_seen = false;
903 bool seen_exit = false;
904 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
905 int i, cnt = 0, excnt = 0;
906 int ilen, proglen = 0;
910 detect_reg_usage(insn, insn_cnt, callee_regs_used,
913 /* tail call's presence in current prog implies it is reachable */
914 tail_call_reachable |= tail_call_seen;
916 emit_prologue(&prog, bpf_prog->aux->stack_depth,
917 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
918 bpf_prog->aux->func_idx != 0);
919 push_callee_regs(&prog, callee_regs_used);
923 memcpy(image + proglen, temp, ilen);
928 for (i = 1; i <= insn_cnt; i++, insn++) {
929 const s32 imm32 = insn->imm;
930 u32 dst_reg = insn->dst_reg;
931 u32 src_reg = insn->src_reg;
939 switch (insn->code) {
941 case BPF_ALU | BPF_ADD | BPF_X:
942 case BPF_ALU | BPF_SUB | BPF_X:
943 case BPF_ALU | BPF_AND | BPF_X:
944 case BPF_ALU | BPF_OR | BPF_X:
945 case BPF_ALU | BPF_XOR | BPF_X:
946 case BPF_ALU64 | BPF_ADD | BPF_X:
947 case BPF_ALU64 | BPF_SUB | BPF_X:
948 case BPF_ALU64 | BPF_AND | BPF_X:
949 case BPF_ALU64 | BPF_OR | BPF_X:
950 case BPF_ALU64 | BPF_XOR | BPF_X:
951 maybe_emit_mod(&prog, dst_reg, src_reg,
952 BPF_CLASS(insn->code) == BPF_ALU64);
953 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
954 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
957 case BPF_ALU64 | BPF_MOV | BPF_X:
958 case BPF_ALU | BPF_MOV | BPF_X:
960 BPF_CLASS(insn->code) == BPF_ALU64,
965 case BPF_ALU | BPF_NEG:
966 case BPF_ALU64 | BPF_NEG:
967 if (BPF_CLASS(insn->code) == BPF_ALU64)
968 EMIT1(add_1mod(0x48, dst_reg));
969 else if (is_ereg(dst_reg))
970 EMIT1(add_1mod(0x40, dst_reg));
971 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
974 case BPF_ALU | BPF_ADD | BPF_K:
975 case BPF_ALU | BPF_SUB | BPF_K:
976 case BPF_ALU | BPF_AND | BPF_K:
977 case BPF_ALU | BPF_OR | BPF_K:
978 case BPF_ALU | BPF_XOR | BPF_K:
979 case BPF_ALU64 | BPF_ADD | BPF_K:
980 case BPF_ALU64 | BPF_SUB | BPF_K:
981 case BPF_ALU64 | BPF_AND | BPF_K:
982 case BPF_ALU64 | BPF_OR | BPF_K:
983 case BPF_ALU64 | BPF_XOR | BPF_K:
984 if (BPF_CLASS(insn->code) == BPF_ALU64)
985 EMIT1(add_1mod(0x48, dst_reg));
986 else if (is_ereg(dst_reg))
987 EMIT1(add_1mod(0x40, dst_reg));
990 * b3 holds 'normal' opcode, b2 short form only valid
991 * in case dst is eax/rax.
993 switch (BPF_OP(insn->code)) {
1017 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1018 else if (is_axreg(dst_reg))
1019 EMIT1_off32(b2, imm32);
1021 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1024 case BPF_ALU64 | BPF_MOV | BPF_K:
1025 case BPF_ALU | BPF_MOV | BPF_K:
1026 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1030 case BPF_LD | BPF_IMM | BPF_DW:
1031 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1036 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1037 case BPF_ALU | BPF_MOD | BPF_X:
1038 case BPF_ALU | BPF_DIV | BPF_X:
1039 case BPF_ALU | BPF_MOD | BPF_K:
1040 case BPF_ALU | BPF_DIV | BPF_K:
1041 case BPF_ALU64 | BPF_MOD | BPF_X:
1042 case BPF_ALU64 | BPF_DIV | BPF_X:
1043 case BPF_ALU64 | BPF_MOD | BPF_K:
1044 case BPF_ALU64 | BPF_DIV | BPF_K:
1045 EMIT1(0x50); /* push rax */
1046 EMIT1(0x52); /* push rdx */
1048 if (BPF_SRC(insn->code) == BPF_X)
1049 /* mov r11, src_reg */
1050 EMIT_mov(AUX_REG, src_reg);
1052 /* mov r11, imm32 */
1053 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1055 /* mov rax, dst_reg */
1056 EMIT_mov(BPF_REG_0, dst_reg);
1060 * equivalent to 'xor rdx, rdx', but one byte less
1064 if (BPF_CLASS(insn->code) == BPF_ALU64)
1066 EMIT3(0x49, 0xF7, 0xF3);
1069 EMIT3(0x41, 0xF7, 0xF3);
1071 if (BPF_OP(insn->code) == BPF_MOD)
1073 EMIT3(0x49, 0x89, 0xD3);
1076 EMIT3(0x49, 0x89, 0xC3);
1078 EMIT1(0x5A); /* pop rdx */
1079 EMIT1(0x58); /* pop rax */
1081 /* mov dst_reg, r11 */
1082 EMIT_mov(dst_reg, AUX_REG);
1085 case BPF_ALU | BPF_MUL | BPF_K:
1086 case BPF_ALU | BPF_MUL | BPF_X:
1087 case BPF_ALU64 | BPF_MUL | BPF_K:
1088 case BPF_ALU64 | BPF_MUL | BPF_X:
1090 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1092 if (dst_reg != BPF_REG_0)
1093 EMIT1(0x50); /* push rax */
1094 if (dst_reg != BPF_REG_3)
1095 EMIT1(0x52); /* push rdx */
1097 /* mov r11, dst_reg */
1098 EMIT_mov(AUX_REG, dst_reg);
1100 if (BPF_SRC(insn->code) == BPF_X)
1101 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
1103 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
1106 EMIT1(add_1mod(0x48, AUX_REG));
1107 else if (is_ereg(AUX_REG))
1108 EMIT1(add_1mod(0x40, AUX_REG));
1110 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
1112 if (dst_reg != BPF_REG_3)
1113 EMIT1(0x5A); /* pop rdx */
1114 if (dst_reg != BPF_REG_0) {
1115 /* mov dst_reg, rax */
1116 EMIT_mov(dst_reg, BPF_REG_0);
1117 EMIT1(0x58); /* pop rax */
1122 case BPF_ALU | BPF_LSH | BPF_K:
1123 case BPF_ALU | BPF_RSH | BPF_K:
1124 case BPF_ALU | BPF_ARSH | BPF_K:
1125 case BPF_ALU64 | BPF_LSH | BPF_K:
1126 case BPF_ALU64 | BPF_RSH | BPF_K:
1127 case BPF_ALU64 | BPF_ARSH | BPF_K:
1128 if (BPF_CLASS(insn->code) == BPF_ALU64)
1129 EMIT1(add_1mod(0x48, dst_reg));
1130 else if (is_ereg(dst_reg))
1131 EMIT1(add_1mod(0x40, dst_reg));
1133 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1135 EMIT2(0xD1, add_1reg(b3, dst_reg));
1137 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1140 case BPF_ALU | BPF_LSH | BPF_X:
1141 case BPF_ALU | BPF_RSH | BPF_X:
1142 case BPF_ALU | BPF_ARSH | BPF_X:
1143 case BPF_ALU64 | BPF_LSH | BPF_X:
1144 case BPF_ALU64 | BPF_RSH | BPF_X:
1145 case BPF_ALU64 | BPF_ARSH | BPF_X:
1147 /* Check for bad case when dst_reg == rcx */
1148 if (dst_reg == BPF_REG_4) {
1149 /* mov r11, dst_reg */
1150 EMIT_mov(AUX_REG, dst_reg);
1154 if (src_reg != BPF_REG_4) { /* common case */
1155 EMIT1(0x51); /* push rcx */
1157 /* mov rcx, src_reg */
1158 EMIT_mov(BPF_REG_4, src_reg);
1161 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1162 if (BPF_CLASS(insn->code) == BPF_ALU64)
1163 EMIT1(add_1mod(0x48, dst_reg));
1164 else if (is_ereg(dst_reg))
1165 EMIT1(add_1mod(0x40, dst_reg));
1167 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1168 EMIT2(0xD3, add_1reg(b3, dst_reg));
1170 if (src_reg != BPF_REG_4)
1171 EMIT1(0x59); /* pop rcx */
1173 if (insn->dst_reg == BPF_REG_4)
1174 /* mov dst_reg, r11 */
1175 EMIT_mov(insn->dst_reg, AUX_REG);
1178 case BPF_ALU | BPF_END | BPF_FROM_BE:
1181 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1183 if (is_ereg(dst_reg))
1185 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1187 /* Emit 'movzwl eax, ax' */
1188 if (is_ereg(dst_reg))
1189 EMIT3(0x45, 0x0F, 0xB7);
1192 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1195 /* Emit 'bswap eax' to swap lower 4 bytes */
1196 if (is_ereg(dst_reg))
1200 EMIT1(add_1reg(0xC8, dst_reg));
1203 /* Emit 'bswap rax' to swap 8 bytes */
1204 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1205 add_1reg(0xC8, dst_reg));
1210 case BPF_ALU | BPF_END | BPF_FROM_LE:
1214 * Emit 'movzwl eax, ax' to zero extend 16-bit
1217 if (is_ereg(dst_reg))
1218 EMIT3(0x45, 0x0F, 0xB7);
1221 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1224 /* Emit 'mov eax, eax' to clear upper 32-bits */
1225 if (is_ereg(dst_reg))
1227 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1235 /* ST: *(u8*)(dst_reg + off) = imm */
1236 case BPF_ST | BPF_MEM | BPF_B:
1237 if (is_ereg(dst_reg))
1242 case BPF_ST | BPF_MEM | BPF_H:
1243 if (is_ereg(dst_reg))
1244 EMIT3(0x66, 0x41, 0xC7);
1248 case BPF_ST | BPF_MEM | BPF_W:
1249 if (is_ereg(dst_reg))
1254 case BPF_ST | BPF_MEM | BPF_DW:
1255 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1257 st: if (is_imm8(insn->off))
1258 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1260 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1262 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1265 /* STX: *(u8*)(dst_reg + off) = src_reg */
1266 case BPF_STX | BPF_MEM | BPF_B:
1267 case BPF_STX | BPF_MEM | BPF_H:
1268 case BPF_STX | BPF_MEM | BPF_W:
1269 case BPF_STX | BPF_MEM | BPF_DW:
1270 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1273 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1274 case BPF_LDX | BPF_MEM | BPF_B:
1275 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1276 case BPF_LDX | BPF_MEM | BPF_H:
1277 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1278 case BPF_LDX | BPF_MEM | BPF_W:
1279 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1280 case BPF_LDX | BPF_MEM | BPF_DW:
1281 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1282 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1283 /* test src_reg, src_reg */
1284 maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
1285 EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
1286 /* jne start_of_ldx */
1288 /* xor dst_reg, dst_reg */
1289 emit_mov_imm32(&prog, false, dst_reg, 0);
1290 /* jmp byte_after_ldx */
1293 /* populate jmp_offset for JNE above */
1294 temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
1295 start_of_ldx = prog;
1297 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1298 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1299 struct exception_table_entry *ex;
1300 u8 *_insn = image + proglen;
1303 /* populate jmp_offset for JMP above */
1304 start_of_ldx[-1] = prog - start_of_ldx;
1306 if (!bpf_prog->aux->extable)
1309 if (excnt >= bpf_prog->aux->num_exentries) {
1310 pr_err("ex gen bug\n");
1313 ex = &bpf_prog->aux->extable[excnt++];
1315 delta = _insn - (u8 *)&ex->insn;
1316 if (!is_simm32(delta)) {
1317 pr_err("extable->insn doesn't fit into 32-bit\n");
1322 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1323 if (!is_simm32(delta)) {
1324 pr_err("extable->handler doesn't fit into 32-bit\n");
1327 ex->handler = delta;
1329 if (dst_reg > BPF_REG_9) {
1330 pr_err("verifier error\n");
1334 * Compute size of x86 insn and its target dest x86 register.
1335 * ex_handler_bpf() will use lower 8 bits to adjust
1336 * pt_regs->ip to jump over this x86 instruction
1337 * and upper bits to figure out which pt_regs to zero out.
1338 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1339 * of 4 bytes will be ignored and rbx will be zero inited.
1341 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1345 case BPF_STX | BPF_ATOMIC | BPF_W:
1346 case BPF_STX | BPF_ATOMIC | BPF_DW:
1347 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1348 insn->imm == (BPF_OR | BPF_FETCH) ||
1349 insn->imm == (BPF_XOR | BPF_FETCH)) {
1351 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1354 * Can't be implemented with a single x86 insn.
1355 * Need to do a CMPXCHG loop.
1358 /* Will need RAX as a CMPXCHG operand so save R0 */
1359 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1360 branch_target = prog;
1361 /* Load old value */
1362 emit_ldx(&prog, BPF_SIZE(insn->code),
1363 BPF_REG_0, dst_reg, insn->off);
1365 * Perform the (commutative) operation locally,
1366 * put the result in the AUX_REG.
1368 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1369 maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
1370 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1371 add_2reg(0xC0, AUX_REG, src_reg));
1372 /* Attempt to swap in new value */
1373 err = emit_atomic(&prog, BPF_CMPXCHG,
1374 dst_reg, AUX_REG, insn->off,
1375 BPF_SIZE(insn->code));
1379 * ZF tells us whether we won the race. If it's
1380 * cleared we need to try again.
1382 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1383 /* Return the pre-modification value */
1384 emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
1385 /* Restore R0 after clobbering RAX */
1386 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1391 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1392 insn->off, BPF_SIZE(insn->code));
1398 case BPF_JMP | BPF_CALL:
1399 func = (u8 *) __bpf_call_base + imm32;
1400 if (tail_call_reachable) {
1401 EMIT3_off32(0x48, 0x8B, 0x85,
1402 -(bpf_prog->aux->stack_depth + 8));
1403 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1406 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1411 case BPF_JMP | BPF_TAIL_CALL:
1413 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1414 &prog, addrs[i], image,
1416 bpf_prog->aux->stack_depth);
1418 emit_bpf_tail_call_indirect(&prog,
1420 bpf_prog->aux->stack_depth);
1424 case BPF_JMP | BPF_JEQ | BPF_X:
1425 case BPF_JMP | BPF_JNE | BPF_X:
1426 case BPF_JMP | BPF_JGT | BPF_X:
1427 case BPF_JMP | BPF_JLT | BPF_X:
1428 case BPF_JMP | BPF_JGE | BPF_X:
1429 case BPF_JMP | BPF_JLE | BPF_X:
1430 case BPF_JMP | BPF_JSGT | BPF_X:
1431 case BPF_JMP | BPF_JSLT | BPF_X:
1432 case BPF_JMP | BPF_JSGE | BPF_X:
1433 case BPF_JMP | BPF_JSLE | BPF_X:
1434 case BPF_JMP32 | BPF_JEQ | BPF_X:
1435 case BPF_JMP32 | BPF_JNE | BPF_X:
1436 case BPF_JMP32 | BPF_JGT | BPF_X:
1437 case BPF_JMP32 | BPF_JLT | BPF_X:
1438 case BPF_JMP32 | BPF_JGE | BPF_X:
1439 case BPF_JMP32 | BPF_JLE | BPF_X:
1440 case BPF_JMP32 | BPF_JSGT | BPF_X:
1441 case BPF_JMP32 | BPF_JSLT | BPF_X:
1442 case BPF_JMP32 | BPF_JSGE | BPF_X:
1443 case BPF_JMP32 | BPF_JSLE | BPF_X:
1444 /* cmp dst_reg, src_reg */
1445 maybe_emit_mod(&prog, dst_reg, src_reg,
1446 BPF_CLASS(insn->code) == BPF_JMP);
1447 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1450 case BPF_JMP | BPF_JSET | BPF_X:
1451 case BPF_JMP32 | BPF_JSET | BPF_X:
1452 /* test dst_reg, src_reg */
1453 maybe_emit_mod(&prog, dst_reg, src_reg,
1454 BPF_CLASS(insn->code) == BPF_JMP);
1455 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1458 case BPF_JMP | BPF_JSET | BPF_K:
1459 case BPF_JMP32 | BPF_JSET | BPF_K:
1460 /* test dst_reg, imm32 */
1461 if (BPF_CLASS(insn->code) == BPF_JMP)
1462 EMIT1(add_1mod(0x48, dst_reg));
1463 else if (is_ereg(dst_reg))
1464 EMIT1(add_1mod(0x40, dst_reg));
1465 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1468 case BPF_JMP | BPF_JEQ | BPF_K:
1469 case BPF_JMP | BPF_JNE | BPF_K:
1470 case BPF_JMP | BPF_JGT | BPF_K:
1471 case BPF_JMP | BPF_JLT | BPF_K:
1472 case BPF_JMP | BPF_JGE | BPF_K:
1473 case BPF_JMP | BPF_JLE | BPF_K:
1474 case BPF_JMP | BPF_JSGT | BPF_K:
1475 case BPF_JMP | BPF_JSLT | BPF_K:
1476 case BPF_JMP | BPF_JSGE | BPF_K:
1477 case BPF_JMP | BPF_JSLE | BPF_K:
1478 case BPF_JMP32 | BPF_JEQ | BPF_K:
1479 case BPF_JMP32 | BPF_JNE | BPF_K:
1480 case BPF_JMP32 | BPF_JGT | BPF_K:
1481 case BPF_JMP32 | BPF_JLT | BPF_K:
1482 case BPF_JMP32 | BPF_JGE | BPF_K:
1483 case BPF_JMP32 | BPF_JLE | BPF_K:
1484 case BPF_JMP32 | BPF_JSGT | BPF_K:
1485 case BPF_JMP32 | BPF_JSLT | BPF_K:
1486 case BPF_JMP32 | BPF_JSGE | BPF_K:
1487 case BPF_JMP32 | BPF_JSLE | BPF_K:
1488 /* test dst_reg, dst_reg to save one extra byte */
1490 maybe_emit_mod(&prog, dst_reg, dst_reg,
1491 BPF_CLASS(insn->code) == BPF_JMP);
1492 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1496 /* cmp dst_reg, imm8/32 */
1497 if (BPF_CLASS(insn->code) == BPF_JMP)
1498 EMIT1(add_1mod(0x48, dst_reg));
1499 else if (is_ereg(dst_reg))
1500 EMIT1(add_1mod(0x40, dst_reg));
1503 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1505 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1507 emit_cond_jmp: /* Convert BPF opcode to x86 */
1508 switch (BPF_OP(insn->code)) {
1517 /* GT is unsigned '>', JA in x86 */
1521 /* LT is unsigned '<', JB in x86 */
1525 /* GE is unsigned '>=', JAE in x86 */
1529 /* LE is unsigned '<=', JBE in x86 */
1533 /* Signed '>', GT in x86 */
1537 /* Signed '<', LT in x86 */
1541 /* Signed '>=', GE in x86 */
1545 /* Signed '<=', LE in x86 */
1548 default: /* to silence GCC warning */
1551 jmp_offset = addrs[i + insn->off] - addrs[i];
1552 if (is_imm8(jmp_offset)) {
1554 /* To keep the jmp_offset valid, the extra bytes are
1555 * padded before the jump insn, so we substract the
1556 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1558 * If the previous pass already emits an imm8
1559 * jmp_cond, then this BPF insn won't shrink, so
1562 * On the other hand, if the previous pass emits an
1563 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1564 * keep the image from shrinking further.
1566 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1567 * is 2 bytes, so the size difference is 4 bytes.
1569 nops = INSN_SZ_DIFF - 2;
1570 if (nops != 0 && nops != 4) {
1571 pr_err("unexpected jmp_cond padding: %d bytes\n",
1575 cnt += emit_nops(&prog, nops);
1577 EMIT2(jmp_cond, jmp_offset);
1578 } else if (is_simm32(jmp_offset)) {
1579 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1581 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1587 case BPF_JMP | BPF_JA:
1588 if (insn->off == -1)
1589 /* -1 jmp instructions will always jump
1590 * backwards two bytes. Explicitly handling
1591 * this case avoids wasting too many passes
1592 * when there are long sequences of replaced
1597 jmp_offset = addrs[i + insn->off] - addrs[i];
1601 * If jmp_padding is enabled, the extra nops will
1602 * be inserted. Otherwise, optimize out nop jumps.
1605 /* There are 3 possible conditions.
1606 * (1) This BPF_JA is already optimized out in
1607 * the previous run, so there is no need
1608 * to pad any extra byte (0 byte).
1609 * (2) The previous pass emits an imm8 jmp,
1610 * so we pad 2 bytes to match the previous
1612 * (3) Similarly, the previous pass emits an
1613 * imm32 jmp, and 5 bytes is padded.
1615 nops = INSN_SZ_DIFF;
1616 if (nops != 0 && nops != 2 && nops != 5) {
1617 pr_err("unexpected nop jump padding: %d bytes\n",
1621 cnt += emit_nops(&prog, nops);
1626 if (is_imm8(jmp_offset)) {
1628 /* To avoid breaking jmp_offset, the extra bytes
1629 * are padded before the actual jmp insn, so
1630 * 2 bytes is substracted from INSN_SZ_DIFF.
1632 * If the previous pass already emits an imm8
1633 * jmp, there is nothing to pad (0 byte).
1635 * If it emits an imm32 jmp (5 bytes) previously
1636 * and now an imm8 jmp (2 bytes), then we pad
1637 * (5 - 2 = 3) bytes to stop the image from
1638 * shrinking further.
1640 nops = INSN_SZ_DIFF - 2;
1641 if (nops != 0 && nops != 3) {
1642 pr_err("unexpected jump padding: %d bytes\n",
1646 cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
1648 EMIT2(0xEB, jmp_offset);
1649 } else if (is_simm32(jmp_offset)) {
1650 EMIT1_off32(0xE9, jmp_offset);
1652 pr_err("jmp gen bug %llx\n", jmp_offset);
1657 case BPF_JMP | BPF_EXIT:
1659 jmp_offset = ctx->cleanup_addr - addrs[i];
1663 /* Update cleanup_addr */
1664 ctx->cleanup_addr = proglen;
1665 pop_callee_regs(&prog, callee_regs_used);
1666 EMIT1(0xC9); /* leave */
1667 EMIT1(0xC3); /* ret */
1672 * By design x86-64 JIT should support all BPF instructions.
1673 * This error will be seen if new instruction was added
1674 * to the interpreter, but not to the JIT, or if there is
1677 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1682 if (ilen > BPF_MAX_INSN_SIZE) {
1683 pr_err("bpf_jit: fatal insn size error\n");
1688 if (unlikely(proglen + ilen > oldproglen)) {
1689 pr_err("bpf_jit: fatal error\n");
1692 memcpy(image + proglen, temp, ilen);
1699 if (image && excnt != bpf_prog->aux->num_exentries) {
1700 pr_err("extable is not populated\n");
1706 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1710 /* Store function arguments to stack.
1711 * For a function that accepts two pointers the sequence will be:
1712 * mov QWORD PTR [rbp-0x10],rdi
1713 * mov QWORD PTR [rbp-0x8],rsi
1715 for (i = 0; i < min(nr_args, 6); i++)
1716 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1718 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1719 -(stack_size - i * 8));
1722 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1727 /* Restore function arguments from stack.
1728 * For a function that accepts two pointers the sequence will be:
1729 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1730 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1732 for (i = 0; i < min(nr_args, 6); i++)
1733 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1734 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1736 -(stack_size - i * 8));
1739 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1740 struct bpf_prog *p, int stack_size, bool mod_ret)
1745 if (p->aux->sleepable) {
1746 if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
1749 if (emit_call(&prog, __bpf_prog_enter, prog))
1751 /* remember prog start time returned by __bpf_prog_enter */
1752 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1755 /* arg1: lea rdi, [rbp - stack_size] */
1756 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1757 /* arg2: progs[i]->insnsi for interpreter */
1759 emit_mov_imm64(&prog, BPF_REG_2,
1760 (long) p->insnsi >> 32,
1761 (u32) (long) p->insnsi);
1762 /* call JITed bpf program or interpreter */
1763 if (emit_call(&prog, p->bpf_func, prog))
1766 /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1767 * of the previous call which is then passed on the stack to
1768 * the next BPF program.
1771 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1773 if (p->aux->sleepable) {
1774 if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
1777 /* arg1: mov rdi, progs[i] */
1778 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
1780 /* arg2: mov rsi, rbx <- start time in nsec */
1781 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1782 if (emit_call(&prog, __bpf_prog_exit, prog))
1790 static void emit_align(u8 **pprog, u32 align)
1792 u8 *target, *prog = *pprog;
1794 target = PTR_ALIGN(prog, align);
1796 emit_nops(&prog, target - prog);
1801 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1807 offset = func - (ip + 2 + 4);
1808 if (!is_simm32(offset)) {
1809 pr_err("Target %p is out of range\n", func);
1812 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1817 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1818 struct bpf_tramp_progs *tp, int stack_size)
1823 for (i = 0; i < tp->nr_progs; i++) {
1824 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
1831 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1832 struct bpf_tramp_progs *tp, int stack_size,
1838 /* The first fmod_ret program will receive a garbage return value.
1839 * Set this to 0 to avoid confusing the program.
1841 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1842 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1843 for (i = 0; i < tp->nr_progs; i++) {
1844 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1847 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1848 * if (*(u64 *)(rbp - 8) != 0)
1851 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1852 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1854 /* Save the location of the branch and Generate 6 nops
1855 * (4 bytes for an offset and 2 bytes for the jump) These nops
1856 * are replaced with a conditional jump once do_fexit (i.e. the
1857 * start of the fexit invocation) is finalized.
1860 emit_nops(&prog, 4 + 2);
1868 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1869 * its 'struct btf_func_model' will be nr_args=2
1870 * The assembly code when eth_type_trans is executing after trampoline:
1874 * sub rsp, 16 // space for skb and dev
1875 * push rbx // temp regs to pass start time
1876 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1877 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1878 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1879 * mov rbx, rax // remember start time in bpf stats are enabled
1880 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1881 * call addr_of_jited_FENTRY_prog
1882 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1883 * mov rsi, rbx // prog start time
1884 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1885 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1886 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1891 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1892 * replaced with 'call generated_bpf_trampoline'. When it returns
1893 * eth_type_trans will continue executing with original skb and dev pointers.
1895 * The assembly code when eth_type_trans is called from trampoline:
1899 * sub rsp, 24 // space for skb, dev, return value
1900 * push rbx // temp regs to pass start time
1901 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1902 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1903 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1904 * mov rbx, rax // remember start time if bpf stats are enabled
1905 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1906 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1907 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1908 * mov rsi, rbx // prog start time
1909 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1910 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1911 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1912 * call eth_type_trans+5 // execute body of eth_type_trans
1913 * mov qword ptr [rbp - 8], rax // save return value
1914 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1915 * mov rbx, rax // remember start time in bpf stats are enabled
1916 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1917 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1918 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1919 * mov rsi, rbx // prog start time
1920 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1921 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1924 * add rsp, 8 // skip eth_type_trans's frame
1925 * ret // return to its caller
1927 int arch_prepare_bpf_trampoline(void *image, void *image_end,
1928 const struct btf_func_model *m, u32 flags,
1929 struct bpf_tramp_progs *tprogs,
1932 int ret, i, cnt = 0, nr_args = m->nr_args;
1933 int stack_size = nr_args * 8;
1934 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1935 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1936 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1937 u8 **branches = NULL;
1940 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1944 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1945 (flags & BPF_TRAMP_F_SKIP_FRAME))
1948 if (flags & BPF_TRAMP_F_CALL_ORIG)
1949 stack_size += 8; /* room for return value of orig_call */
1951 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1952 /* skip patched call instruction and point orig_call to actual
1953 * body of the kernel function.
1955 orig_call += X86_PATCH_SIZE;
1959 EMIT1(0x55); /* push rbp */
1960 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1961 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1962 EMIT1(0x53); /* push rbx */
1964 save_regs(m, &prog, nr_args, stack_size);
1966 if (fentry->nr_progs)
1967 if (invoke_bpf(m, &prog, fentry, stack_size))
1970 if (fmod_ret->nr_progs) {
1971 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
1976 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size,
1983 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1984 if (fentry->nr_progs || fmod_ret->nr_progs)
1985 restore_regs(m, &prog, nr_args, stack_size);
1987 /* call original function */
1988 if (emit_call(&prog, orig_call, prog)) {
1992 /* remember return value in a stack for bpf prog to access */
1993 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1996 if (fmod_ret->nr_progs) {
1997 /* From Intel 64 and IA-32 Architectures Optimization
1998 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
1999 * Coding Rule 11: All branch targets should be 16-byte
2002 emit_align(&prog, 16);
2003 /* Update the branches saved in invoke_bpf_mod_ret with the
2004 * aligned address of do_fexit.
2006 for (i = 0; i < fmod_ret->nr_progs; i++)
2007 emit_cond_near_jump(&branches[i], prog, branches[i],
2011 if (fexit->nr_progs)
2012 if (invoke_bpf(m, &prog, fexit, stack_size)) {
2017 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2018 restore_regs(m, &prog, nr_args, stack_size);
2020 /* This needs to be done regardless. If there were fmod_ret programs,
2021 * the return value is only updated on the stack and still needs to be
2024 if (flags & BPF_TRAMP_F_CALL_ORIG)
2025 /* restore original return value back into RAX */
2026 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2028 EMIT1(0x5B); /* pop rbx */
2029 EMIT1(0xC9); /* leave */
2030 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2031 /* skip our return address and return to parent */
2032 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2033 EMIT1(0xC3); /* ret */
2034 /* Make sure the trampoline generation logic doesn't overflow */
2035 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2039 ret = prog - (u8 *)image;
2046 static int emit_fallback_jump(u8 **pprog)
2051 #ifdef CONFIG_RETPOLINE
2052 /* Note that this assumes the the compiler uses external
2053 * thunks for indirect calls. Both clang and GCC use the same
2054 * naming convention for external thunks.
2056 err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
2060 EMIT2(0xFF, 0xE2); /* jmp rdx */
2066 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2068 u8 *jg_reloc, *prog = *pprog;
2069 int pivot, err, jg_bytes = 1, cnt = 0;
2073 /* Leaf node of recursion, i.e. not a range of indices
2076 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2077 if (!is_simm32(progs[a]))
2079 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2081 err = emit_cond_near_jump(&prog, /* je func */
2082 (void *)progs[a], prog,
2087 err = emit_fallback_jump(&prog); /* jmp thunk/indirect */
2095 /* Not a leaf node, so we pivot, and recursively descend into
2096 * the lower and upper ranges.
2098 pivot = (b - a) / 2;
2099 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2100 if (!is_simm32(progs[a + pivot]))
2102 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2104 if (pivot > 2) { /* jg upper_part */
2105 /* Require near jump. */
2107 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2113 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2118 /* From Intel 64 and IA-32 Architectures Optimization
2119 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2120 * Coding Rule 11: All branch targets should be 16-byte
2123 emit_align(&prog, 16);
2124 jg_offset = prog - jg_reloc;
2125 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2127 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2136 static int cmp_ips(const void *a, const void *b)
2148 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2152 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2153 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2156 struct x64_jit_data {
2157 struct bpf_binary_header *header;
2161 struct jit_context ctx;
2164 #define MAX_PASSES 20
2165 #define PADDING_PASSES (MAX_PASSES - 5)
2167 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2169 struct bpf_binary_header *header = NULL;
2170 struct bpf_prog *tmp, *orig_prog = prog;
2171 struct x64_jit_data *jit_data;
2172 int proglen, oldproglen = 0;
2173 struct jit_context ctx = {};
2174 bool tmp_blinded = false;
2175 bool extra_pass = false;
2176 bool padding = false;
2182 if (!prog->jit_requested)
2185 tmp = bpf_jit_blind_constants(prog);
2187 * If blinding was requested and we failed during blinding,
2188 * we must fall back to the interpreter.
2197 jit_data = prog->aux->jit_data;
2199 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2204 prog->aux->jit_data = jit_data;
2206 addrs = jit_data->addrs;
2208 ctx = jit_data->ctx;
2209 oldproglen = jit_data->proglen;
2210 image = jit_data->image;
2211 header = jit_data->header;
2214 goto skip_init_addrs;
2216 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2223 * Before first pass, make a rough estimation of addrs[]
2224 * each BPF instruction is translated to less than 64 bytes
2226 for (proglen = 0, i = 0; i <= prog->len; i++) {
2230 ctx.cleanup_addr = proglen;
2234 * JITed image shrinks with every pass and the loop iterates
2235 * until the image stops shrinking. Very large BPF programs
2236 * may converge on the last pass. In such case do one more
2237 * pass to emit the final image.
2239 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2240 if (!padding && pass >= PADDING_PASSES)
2242 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2247 bpf_jit_binary_free(header);
2252 if (proglen != oldproglen) {
2253 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2254 proglen, oldproglen);
2259 if (proglen == oldproglen) {
2261 * The number of entries in extable is the number of BPF_LDX
2262 * insns that access kernel memory via "pointer to BTF type".
2263 * The verifier changed their opcode from LDX|MEM|size
2264 * to LDX|PROBE_MEM|size to make JITing easier.
2266 u32 align = __alignof__(struct exception_table_entry);
2267 u32 extable_size = prog->aux->num_exentries *
2268 sizeof(struct exception_table_entry);
2270 /* allocate module memory for x86 insns and extable */
2271 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2272 &image, align, jit_fill_hole);
2277 prog->aux->extable = (void *) image + roundup(proglen, align);
2279 oldproglen = proglen;
2283 if (bpf_jit_enable > 1)
2284 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2287 if (!prog->is_func || extra_pass) {
2288 bpf_tail_call_direct_fixup(prog);
2289 bpf_jit_binary_lock_ro(header);
2291 jit_data->addrs = addrs;
2292 jit_data->ctx = ctx;
2293 jit_data->proglen = proglen;
2294 jit_data->image = image;
2295 jit_data->header = header;
2297 prog->bpf_func = (void *)image;
2299 prog->jited_len = proglen;
2304 if (!image || !prog->is_func || extra_pass) {
2306 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2310 prog->aux->jit_data = NULL;
2314 bpf_jit_prog_release_other(prog, prog == orig_prog ?