1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp.c: BPF JIT compiler
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <asm/extable.h>
13 #include <asm/set_memory.h>
14 #include <asm/nospec-branch.h>
16 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29 #define EMIT(bytes, len) \
30 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
32 #define EMIT1(b1) EMIT(b1, 1)
33 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
34 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
35 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
37 #define EMIT1_off32(b1, off) \
38 do { EMIT1(b1); EMIT(off, 4); } while (0)
39 #define EMIT2_off32(b1, b2, off) \
40 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
41 #define EMIT3_off32(b1, b2, b3, off) \
42 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
43 #define EMIT4_off32(b1, b2, b3, b4, off) \
44 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
46 static bool is_imm8(int value)
48 return value <= 127 && value >= -128;
51 static bool is_simm32(s64 value)
53 return value == (s64)(s32)value;
56 static bool is_uimm32(u64 value)
58 return value == (u64)(u32)value;
62 #define EMIT_mov(DST, SRC) \
65 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
68 static int bpf_size_to_x86_bytes(int bpf_size)
70 if (bpf_size == BPF_W)
72 else if (bpf_size == BPF_H)
74 else if (bpf_size == BPF_B)
76 else if (bpf_size == BPF_DW)
83 * List of x86 cond jumps opcodes (. + s8)
84 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
97 /* Pick a register outside of BPF range for JIT internal work */
98 #define AUX_REG (MAX_BPF_JIT_REG + 1)
101 * The following table maps BPF registers to x86-64 registers.
103 * x86-64 register R12 is unused, since if used as base address
104 * register in load/store instructions, it always needs an
105 * extra byte of encoding and is callee saved.
107 * Also x86-64 register R9 is unused. x86-64 register R10 is
108 * used for blinding (if enabled).
110 static const int reg2hex[] = {
111 [BPF_REG_0] = 0, /* RAX */
112 [BPF_REG_1] = 7, /* RDI */
113 [BPF_REG_2] = 6, /* RSI */
114 [BPF_REG_3] = 2, /* RDX */
115 [BPF_REG_4] = 1, /* RCX */
116 [BPF_REG_5] = 0, /* R8 */
117 [BPF_REG_6] = 3, /* RBX callee saved */
118 [BPF_REG_7] = 5, /* R13 callee saved */
119 [BPF_REG_8] = 6, /* R14 callee saved */
120 [BPF_REG_9] = 7, /* R15 callee saved */
121 [BPF_REG_FP] = 5, /* RBP readonly */
122 [BPF_REG_AX] = 2, /* R10 temp register */
123 [AUX_REG] = 3, /* R11 temp register */
126 static const int reg2pt_regs[] = {
127 [BPF_REG_0] = offsetof(struct pt_regs, ax),
128 [BPF_REG_1] = offsetof(struct pt_regs, di),
129 [BPF_REG_2] = offsetof(struct pt_regs, si),
130 [BPF_REG_3] = offsetof(struct pt_regs, dx),
131 [BPF_REG_4] = offsetof(struct pt_regs, cx),
132 [BPF_REG_5] = offsetof(struct pt_regs, r8),
133 [BPF_REG_6] = offsetof(struct pt_regs, bx),
134 [BPF_REG_7] = offsetof(struct pt_regs, r13),
135 [BPF_REG_8] = offsetof(struct pt_regs, r14),
136 [BPF_REG_9] = offsetof(struct pt_regs, r15),
140 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
141 * which need extra byte of encoding.
142 * rax,rcx,...,rbp have simpler encoding
144 static bool is_ereg(u32 reg)
146 return (1 << reg) & (BIT(BPF_REG_5) |
154 static bool is_axreg(u32 reg)
156 return reg == BPF_REG_0;
159 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
160 static u8 add_1mod(u8 byte, u32 reg)
167 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
176 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
177 static u8 add_1reg(u8 byte, u32 dst_reg)
179 return byte + reg2hex[dst_reg];
182 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
183 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
185 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
188 static void jit_fill_hole(void *area, unsigned int size)
190 /* Fill whole space with INT3 instructions */
191 memset(area, 0xcc, size);
195 int cleanup_addr; /* Epilogue code offset */
198 /* Maximum number of bytes emitted while JITing one eBPF insn */
199 #define BPF_MAX_INSN_SIZE 128
200 #define BPF_INSN_SAFETY 64
201 /* number of bytes emit_call() needs to generate call instruction */
202 #define X86_CALL_SIZE 5
204 #define PROLOGUE_SIZE 20
207 * Emit x86-64 prologue code for BPF program and check its size.
208 * bpf_tail_call helper will skip it while jumping into another program
210 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
215 EMIT1(0x55); /* push rbp */
216 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
217 /* sub rsp, rounded_stack_depth */
218 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
219 EMIT1(0x53); /* push rbx */
220 EMIT2(0x41, 0x55); /* push r13 */
221 EMIT2(0x41, 0x56); /* push r14 */
222 EMIT2(0x41, 0x57); /* push r15 */
223 if (!ebpf_from_cbpf) {
224 /* zero init tail_call_cnt */
226 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
232 * Generate the following code:
234 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
235 * if (index >= array->map.max_entries)
237 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
239 * prog = array->ptrs[index];
242 * goto *(prog->bpf_func + prologue_size);
245 static void emit_bpf_tail_call(u8 **pprog)
248 int label1, label2, label3;
252 * rdi - pointer to ctx
253 * rsi - pointer to bpf_array
254 * rdx - index in bpf_array
258 * if (index >= array->map.max_entries)
261 EMIT2(0x89, 0xD2); /* mov edx, edx */
262 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
263 offsetof(struct bpf_array, map.max_entries));
264 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
265 EMIT2(X86_JBE, OFFSET1); /* jbe out */
269 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
272 EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
273 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
274 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
275 EMIT2(X86_JA, OFFSET2); /* ja out */
277 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
278 EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
280 /* prog = array->ptrs[index]; */
281 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
282 offsetof(struct bpf_array, ptrs));
288 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
289 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
290 EMIT2(X86_JE, OFFSET3); /* je out */
293 /* goto *(prog->bpf_func + prologue_size); */
294 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
295 offsetof(struct bpf_prog, bpf_func));
296 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
299 * Wow we're ready to jump into next BPF program
300 * rdi == ctx (1st arg)
301 * rax == prog->bpf_func + prologue_size
303 RETPOLINE_RAX_BPF_JIT();
306 BUILD_BUG_ON(cnt - label1 != OFFSET1);
307 BUILD_BUG_ON(cnt - label2 != OFFSET2);
308 BUILD_BUG_ON(cnt - label3 != OFFSET3);
312 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
313 u32 dst_reg, const u32 imm32)
320 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
321 * (which zero-extends imm32) to save 2 bytes.
323 if (sign_propagate && (s32)imm32 < 0) {
324 /* 'mov %rax, imm32' sign extends imm32 */
325 b1 = add_1mod(0x48, dst_reg);
328 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
333 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
337 if (is_ereg(dst_reg))
338 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
341 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
345 /* mov %eax, imm32 */
346 if (is_ereg(dst_reg))
347 EMIT1(add_1mod(0x40, dst_reg));
348 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
353 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
354 const u32 imm32_hi, const u32 imm32_lo)
359 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
361 * For emitting plain u32, where sign bit must not be
362 * propagated LLVM tends to load imm64 over mov32
363 * directly, so save couple of bytes by just doing
364 * 'mov %eax, imm32' instead.
366 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
368 /* movabsq %rax, imm64 */
369 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
377 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
384 EMIT_mov(dst_reg, src_reg);
387 if (is_ereg(dst_reg) || is_ereg(src_reg))
388 EMIT1(add_2mod(0x40, dst_reg, src_reg));
389 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
395 /* LDX: dst_reg = *(u8*)(src_reg + off) */
396 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
403 /* Emit 'movzx rax, byte ptr [rax + off]' */
404 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
407 /* Emit 'movzx rax, word ptr [rax + off]' */
408 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
411 /* Emit 'mov eax, dword ptr [rax+0x14]' */
412 if (is_ereg(dst_reg) || is_ereg(src_reg))
413 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
418 /* Emit 'mov rax, qword ptr [rax+0x14]' */
419 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
423 * If insn->off == 0 we can save one extra byte, but
424 * special case of x86 R13 which always needs an offset
425 * is not worth the hassle
428 EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
430 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
434 /* STX: *(u8*)(dst_reg + off) = src_reg */
435 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
442 /* Emit 'mov byte ptr [rax + off], al' */
443 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
444 /* We have to add extra byte for x86 SIL, DIL regs */
445 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
446 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
451 if (is_ereg(dst_reg) || is_ereg(src_reg))
452 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
457 if (is_ereg(dst_reg) || is_ereg(src_reg))
458 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
463 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
467 EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
469 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
473 static int emit_call(u8 **pprog, void *func, void *ip)
479 offset = func - (ip + X86_CALL_SIZE);
480 if (!is_simm32(offset)) {
481 pr_err("Target call %p is out of range\n", func);
484 EMIT1_off32(0xE8, offset);
489 static bool ex_handler_bpf(const struct exception_table_entry *x,
490 struct pt_regs *regs, int trapnr,
491 unsigned long error_code, unsigned long fault_addr)
493 u32 reg = x->fixup >> 8;
495 /* jump over faulting load and clear dest register */
496 *(unsigned long *)((void *)regs + reg) = 0;
497 regs->ip += x->fixup & 0xff;
501 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
502 int oldproglen, struct jit_context *ctx)
504 struct bpf_insn *insn = bpf_prog->insnsi;
505 int insn_cnt = bpf_prog->len;
506 bool seen_exit = false;
507 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
508 int i, cnt = 0, excnt = 0;
512 emit_prologue(&prog, bpf_prog->aux->stack_depth,
513 bpf_prog_was_classic(bpf_prog));
514 addrs[0] = prog - temp;
516 for (i = 1; i <= insn_cnt; i++, insn++) {
517 const s32 imm32 = insn->imm;
518 u32 dst_reg = insn->dst_reg;
519 u32 src_reg = insn->src_reg;
526 switch (insn->code) {
528 case BPF_ALU | BPF_ADD | BPF_X:
529 case BPF_ALU | BPF_SUB | BPF_X:
530 case BPF_ALU | BPF_AND | BPF_X:
531 case BPF_ALU | BPF_OR | BPF_X:
532 case BPF_ALU | BPF_XOR | BPF_X:
533 case BPF_ALU64 | BPF_ADD | BPF_X:
534 case BPF_ALU64 | BPF_SUB | BPF_X:
535 case BPF_ALU64 | BPF_AND | BPF_X:
536 case BPF_ALU64 | BPF_OR | BPF_X:
537 case BPF_ALU64 | BPF_XOR | BPF_X:
538 switch (BPF_OP(insn->code)) {
539 case BPF_ADD: b2 = 0x01; break;
540 case BPF_SUB: b2 = 0x29; break;
541 case BPF_AND: b2 = 0x21; break;
542 case BPF_OR: b2 = 0x09; break;
543 case BPF_XOR: b2 = 0x31; break;
545 if (BPF_CLASS(insn->code) == BPF_ALU64)
546 EMIT1(add_2mod(0x48, dst_reg, src_reg));
547 else if (is_ereg(dst_reg) || is_ereg(src_reg))
548 EMIT1(add_2mod(0x40, dst_reg, src_reg));
549 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
552 case BPF_ALU64 | BPF_MOV | BPF_X:
553 case BPF_ALU | BPF_MOV | BPF_X:
555 BPF_CLASS(insn->code) == BPF_ALU64,
560 case BPF_ALU | BPF_NEG:
561 case BPF_ALU64 | BPF_NEG:
562 if (BPF_CLASS(insn->code) == BPF_ALU64)
563 EMIT1(add_1mod(0x48, dst_reg));
564 else if (is_ereg(dst_reg))
565 EMIT1(add_1mod(0x40, dst_reg));
566 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
569 case BPF_ALU | BPF_ADD | BPF_K:
570 case BPF_ALU | BPF_SUB | BPF_K:
571 case BPF_ALU | BPF_AND | BPF_K:
572 case BPF_ALU | BPF_OR | BPF_K:
573 case BPF_ALU | BPF_XOR | BPF_K:
574 case BPF_ALU64 | BPF_ADD | BPF_K:
575 case BPF_ALU64 | BPF_SUB | BPF_K:
576 case BPF_ALU64 | BPF_AND | BPF_K:
577 case BPF_ALU64 | BPF_OR | BPF_K:
578 case BPF_ALU64 | BPF_XOR | BPF_K:
579 if (BPF_CLASS(insn->code) == BPF_ALU64)
580 EMIT1(add_1mod(0x48, dst_reg));
581 else if (is_ereg(dst_reg))
582 EMIT1(add_1mod(0x40, dst_reg));
585 * b3 holds 'normal' opcode, b2 short form only valid
586 * in case dst is eax/rax.
588 switch (BPF_OP(insn->code)) {
612 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
613 else if (is_axreg(dst_reg))
614 EMIT1_off32(b2, imm32);
616 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
619 case BPF_ALU64 | BPF_MOV | BPF_K:
620 case BPF_ALU | BPF_MOV | BPF_K:
621 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
625 case BPF_LD | BPF_IMM | BPF_DW:
626 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
631 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
632 case BPF_ALU | BPF_MOD | BPF_X:
633 case BPF_ALU | BPF_DIV | BPF_X:
634 case BPF_ALU | BPF_MOD | BPF_K:
635 case BPF_ALU | BPF_DIV | BPF_K:
636 case BPF_ALU64 | BPF_MOD | BPF_X:
637 case BPF_ALU64 | BPF_DIV | BPF_X:
638 case BPF_ALU64 | BPF_MOD | BPF_K:
639 case BPF_ALU64 | BPF_DIV | BPF_K:
640 EMIT1(0x50); /* push rax */
641 EMIT1(0x52); /* push rdx */
643 if (BPF_SRC(insn->code) == BPF_X)
644 /* mov r11, src_reg */
645 EMIT_mov(AUX_REG, src_reg);
648 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
650 /* mov rax, dst_reg */
651 EMIT_mov(BPF_REG_0, dst_reg);
655 * equivalent to 'xor rdx, rdx', but one byte less
659 if (BPF_CLASS(insn->code) == BPF_ALU64)
661 EMIT3(0x49, 0xF7, 0xF3);
664 EMIT3(0x41, 0xF7, 0xF3);
666 if (BPF_OP(insn->code) == BPF_MOD)
668 EMIT3(0x49, 0x89, 0xD3);
671 EMIT3(0x49, 0x89, 0xC3);
673 EMIT1(0x5A); /* pop rdx */
674 EMIT1(0x58); /* pop rax */
676 /* mov dst_reg, r11 */
677 EMIT_mov(dst_reg, AUX_REG);
680 case BPF_ALU | BPF_MUL | BPF_K:
681 case BPF_ALU | BPF_MUL | BPF_X:
682 case BPF_ALU64 | BPF_MUL | BPF_K:
683 case BPF_ALU64 | BPF_MUL | BPF_X:
685 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
687 if (dst_reg != BPF_REG_0)
688 EMIT1(0x50); /* push rax */
689 if (dst_reg != BPF_REG_3)
690 EMIT1(0x52); /* push rdx */
692 /* mov r11, dst_reg */
693 EMIT_mov(AUX_REG, dst_reg);
695 if (BPF_SRC(insn->code) == BPF_X)
696 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
698 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
701 EMIT1(add_1mod(0x48, AUX_REG));
702 else if (is_ereg(AUX_REG))
703 EMIT1(add_1mod(0x40, AUX_REG));
705 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
707 if (dst_reg != BPF_REG_3)
708 EMIT1(0x5A); /* pop rdx */
709 if (dst_reg != BPF_REG_0) {
710 /* mov dst_reg, rax */
711 EMIT_mov(dst_reg, BPF_REG_0);
712 EMIT1(0x58); /* pop rax */
717 case BPF_ALU | BPF_LSH | BPF_K:
718 case BPF_ALU | BPF_RSH | BPF_K:
719 case BPF_ALU | BPF_ARSH | BPF_K:
720 case BPF_ALU64 | BPF_LSH | BPF_K:
721 case BPF_ALU64 | BPF_RSH | BPF_K:
722 case BPF_ALU64 | BPF_ARSH | BPF_K:
723 if (BPF_CLASS(insn->code) == BPF_ALU64)
724 EMIT1(add_1mod(0x48, dst_reg));
725 else if (is_ereg(dst_reg))
726 EMIT1(add_1mod(0x40, dst_reg));
728 switch (BPF_OP(insn->code)) {
729 case BPF_LSH: b3 = 0xE0; break;
730 case BPF_RSH: b3 = 0xE8; break;
731 case BPF_ARSH: b3 = 0xF8; break;
735 EMIT2(0xD1, add_1reg(b3, dst_reg));
737 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
740 case BPF_ALU | BPF_LSH | BPF_X:
741 case BPF_ALU | BPF_RSH | BPF_X:
742 case BPF_ALU | BPF_ARSH | BPF_X:
743 case BPF_ALU64 | BPF_LSH | BPF_X:
744 case BPF_ALU64 | BPF_RSH | BPF_X:
745 case BPF_ALU64 | BPF_ARSH | BPF_X:
747 /* Check for bad case when dst_reg == rcx */
748 if (dst_reg == BPF_REG_4) {
749 /* mov r11, dst_reg */
750 EMIT_mov(AUX_REG, dst_reg);
754 if (src_reg != BPF_REG_4) { /* common case */
755 EMIT1(0x51); /* push rcx */
757 /* mov rcx, src_reg */
758 EMIT_mov(BPF_REG_4, src_reg);
761 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
762 if (BPF_CLASS(insn->code) == BPF_ALU64)
763 EMIT1(add_1mod(0x48, dst_reg));
764 else if (is_ereg(dst_reg))
765 EMIT1(add_1mod(0x40, dst_reg));
767 switch (BPF_OP(insn->code)) {
768 case BPF_LSH: b3 = 0xE0; break;
769 case BPF_RSH: b3 = 0xE8; break;
770 case BPF_ARSH: b3 = 0xF8; break;
772 EMIT2(0xD3, add_1reg(b3, dst_reg));
774 if (src_reg != BPF_REG_4)
775 EMIT1(0x59); /* pop rcx */
777 if (insn->dst_reg == BPF_REG_4)
778 /* mov dst_reg, r11 */
779 EMIT_mov(insn->dst_reg, AUX_REG);
782 case BPF_ALU | BPF_END | BPF_FROM_BE:
785 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
787 if (is_ereg(dst_reg))
789 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
791 /* Emit 'movzwl eax, ax' */
792 if (is_ereg(dst_reg))
793 EMIT3(0x45, 0x0F, 0xB7);
796 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
799 /* Emit 'bswap eax' to swap lower 4 bytes */
800 if (is_ereg(dst_reg))
804 EMIT1(add_1reg(0xC8, dst_reg));
807 /* Emit 'bswap rax' to swap 8 bytes */
808 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
809 add_1reg(0xC8, dst_reg));
814 case BPF_ALU | BPF_END | BPF_FROM_LE:
818 * Emit 'movzwl eax, ax' to zero extend 16-bit
821 if (is_ereg(dst_reg))
822 EMIT3(0x45, 0x0F, 0xB7);
825 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
828 /* Emit 'mov eax, eax' to clear upper 32-bits */
829 if (is_ereg(dst_reg))
831 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
839 /* ST: *(u8*)(dst_reg + off) = imm */
840 case BPF_ST | BPF_MEM | BPF_B:
841 if (is_ereg(dst_reg))
846 case BPF_ST | BPF_MEM | BPF_H:
847 if (is_ereg(dst_reg))
848 EMIT3(0x66, 0x41, 0xC7);
852 case BPF_ST | BPF_MEM | BPF_W:
853 if (is_ereg(dst_reg))
858 case BPF_ST | BPF_MEM | BPF_DW:
859 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
861 st: if (is_imm8(insn->off))
862 EMIT2(add_1reg(0x40, dst_reg), insn->off);
864 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
866 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
869 /* STX: *(u8*)(dst_reg + off) = src_reg */
870 case BPF_STX | BPF_MEM | BPF_B:
871 case BPF_STX | BPF_MEM | BPF_H:
872 case BPF_STX | BPF_MEM | BPF_W:
873 case BPF_STX | BPF_MEM | BPF_DW:
874 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
877 /* LDX: dst_reg = *(u8*)(src_reg + off) */
878 case BPF_LDX | BPF_MEM | BPF_B:
879 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
880 case BPF_LDX | BPF_MEM | BPF_H:
881 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
882 case BPF_LDX | BPF_MEM | BPF_W:
883 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
884 case BPF_LDX | BPF_MEM | BPF_DW:
885 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
886 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
887 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
888 struct exception_table_entry *ex;
889 u8 *_insn = image + proglen;
892 if (!bpf_prog->aux->extable)
895 if (excnt >= bpf_prog->aux->num_exentries) {
896 pr_err("ex gen bug\n");
899 ex = &bpf_prog->aux->extable[excnt++];
901 delta = _insn - (u8 *)&ex->insn;
902 if (!is_simm32(delta)) {
903 pr_err("extable->insn doesn't fit into 32-bit\n");
908 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
909 if (!is_simm32(delta)) {
910 pr_err("extable->handler doesn't fit into 32-bit\n");
915 if (dst_reg > BPF_REG_9) {
916 pr_err("verifier error\n");
920 * Compute size of x86 insn and its target dest x86 register.
921 * ex_handler_bpf() will use lower 8 bits to adjust
922 * pt_regs->ip to jump over this x86 instruction
923 * and upper bits to figure out which pt_regs to zero out.
924 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
925 * of 4 bytes will be ignored and rbx will be zero inited.
927 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
931 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
932 case BPF_STX | BPF_XADD | BPF_W:
933 /* Emit 'lock add dword ptr [rax + off], eax' */
934 if (is_ereg(dst_reg) || is_ereg(src_reg))
935 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
939 case BPF_STX | BPF_XADD | BPF_DW:
940 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
941 xadd: if (is_imm8(insn->off))
942 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
944 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
949 case BPF_JMP | BPF_CALL:
950 func = (u8 *) __bpf_call_base + imm32;
951 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
955 case BPF_JMP | BPF_TAIL_CALL:
956 emit_bpf_tail_call(&prog);
960 case BPF_JMP | BPF_JEQ | BPF_X:
961 case BPF_JMP | BPF_JNE | BPF_X:
962 case BPF_JMP | BPF_JGT | BPF_X:
963 case BPF_JMP | BPF_JLT | BPF_X:
964 case BPF_JMP | BPF_JGE | BPF_X:
965 case BPF_JMP | BPF_JLE | BPF_X:
966 case BPF_JMP | BPF_JSGT | BPF_X:
967 case BPF_JMP | BPF_JSLT | BPF_X:
968 case BPF_JMP | BPF_JSGE | BPF_X:
969 case BPF_JMP | BPF_JSLE | BPF_X:
970 case BPF_JMP32 | BPF_JEQ | BPF_X:
971 case BPF_JMP32 | BPF_JNE | BPF_X:
972 case BPF_JMP32 | BPF_JGT | BPF_X:
973 case BPF_JMP32 | BPF_JLT | BPF_X:
974 case BPF_JMP32 | BPF_JGE | BPF_X:
975 case BPF_JMP32 | BPF_JLE | BPF_X:
976 case BPF_JMP32 | BPF_JSGT | BPF_X:
977 case BPF_JMP32 | BPF_JSLT | BPF_X:
978 case BPF_JMP32 | BPF_JSGE | BPF_X:
979 case BPF_JMP32 | BPF_JSLE | BPF_X:
980 /* cmp dst_reg, src_reg */
981 if (BPF_CLASS(insn->code) == BPF_JMP)
982 EMIT1(add_2mod(0x48, dst_reg, src_reg));
983 else if (is_ereg(dst_reg) || is_ereg(src_reg))
984 EMIT1(add_2mod(0x40, dst_reg, src_reg));
985 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
988 case BPF_JMP | BPF_JSET | BPF_X:
989 case BPF_JMP32 | BPF_JSET | BPF_X:
990 /* test dst_reg, src_reg */
991 if (BPF_CLASS(insn->code) == BPF_JMP)
992 EMIT1(add_2mod(0x48, dst_reg, src_reg));
993 else if (is_ereg(dst_reg) || is_ereg(src_reg))
994 EMIT1(add_2mod(0x40, dst_reg, src_reg));
995 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
998 case BPF_JMP | BPF_JSET | BPF_K:
999 case BPF_JMP32 | BPF_JSET | BPF_K:
1000 /* test dst_reg, imm32 */
1001 if (BPF_CLASS(insn->code) == BPF_JMP)
1002 EMIT1(add_1mod(0x48, dst_reg));
1003 else if (is_ereg(dst_reg))
1004 EMIT1(add_1mod(0x40, dst_reg));
1005 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1008 case BPF_JMP | BPF_JEQ | BPF_K:
1009 case BPF_JMP | BPF_JNE | BPF_K:
1010 case BPF_JMP | BPF_JGT | BPF_K:
1011 case BPF_JMP | BPF_JLT | BPF_K:
1012 case BPF_JMP | BPF_JGE | BPF_K:
1013 case BPF_JMP | BPF_JLE | BPF_K:
1014 case BPF_JMP | BPF_JSGT | BPF_K:
1015 case BPF_JMP | BPF_JSLT | BPF_K:
1016 case BPF_JMP | BPF_JSGE | BPF_K:
1017 case BPF_JMP | BPF_JSLE | BPF_K:
1018 case BPF_JMP32 | BPF_JEQ | BPF_K:
1019 case BPF_JMP32 | BPF_JNE | BPF_K:
1020 case BPF_JMP32 | BPF_JGT | BPF_K:
1021 case BPF_JMP32 | BPF_JLT | BPF_K:
1022 case BPF_JMP32 | BPF_JGE | BPF_K:
1023 case BPF_JMP32 | BPF_JLE | BPF_K:
1024 case BPF_JMP32 | BPF_JSGT | BPF_K:
1025 case BPF_JMP32 | BPF_JSLT | BPF_K:
1026 case BPF_JMP32 | BPF_JSGE | BPF_K:
1027 case BPF_JMP32 | BPF_JSLE | BPF_K:
1028 /* test dst_reg, dst_reg to save one extra byte */
1030 if (BPF_CLASS(insn->code) == BPF_JMP)
1031 EMIT1(add_2mod(0x48, dst_reg, dst_reg));
1032 else if (is_ereg(dst_reg))
1033 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
1034 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1038 /* cmp dst_reg, imm8/32 */
1039 if (BPF_CLASS(insn->code) == BPF_JMP)
1040 EMIT1(add_1mod(0x48, dst_reg));
1041 else if (is_ereg(dst_reg))
1042 EMIT1(add_1mod(0x40, dst_reg));
1045 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1047 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1049 emit_cond_jmp: /* Convert BPF opcode to x86 */
1050 switch (BPF_OP(insn->code)) {
1059 /* GT is unsigned '>', JA in x86 */
1063 /* LT is unsigned '<', JB in x86 */
1067 /* GE is unsigned '>=', JAE in x86 */
1071 /* LE is unsigned '<=', JBE in x86 */
1075 /* Signed '>', GT in x86 */
1079 /* Signed '<', LT in x86 */
1083 /* Signed '>=', GE in x86 */
1087 /* Signed '<=', LE in x86 */
1090 default: /* to silence GCC warning */
1093 jmp_offset = addrs[i + insn->off] - addrs[i];
1094 if (is_imm8(jmp_offset)) {
1095 EMIT2(jmp_cond, jmp_offset);
1096 } else if (is_simm32(jmp_offset)) {
1097 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1099 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1105 case BPF_JMP | BPF_JA:
1106 if (insn->off == -1)
1107 /* -1 jmp instructions will always jump
1108 * backwards two bytes. Explicitly handling
1109 * this case avoids wasting too many passes
1110 * when there are long sequences of replaced
1115 jmp_offset = addrs[i + insn->off] - addrs[i];
1118 /* Optimize out nop jumps */
1121 if (is_imm8(jmp_offset)) {
1122 EMIT2(0xEB, jmp_offset);
1123 } else if (is_simm32(jmp_offset)) {
1124 EMIT1_off32(0xE9, jmp_offset);
1126 pr_err("jmp gen bug %llx\n", jmp_offset);
1131 case BPF_JMP | BPF_EXIT:
1133 jmp_offset = ctx->cleanup_addr - addrs[i];
1137 /* Update cleanup_addr */
1138 ctx->cleanup_addr = proglen;
1139 if (!bpf_prog_was_classic(bpf_prog))
1140 EMIT1(0x5B); /* get rid of tail_call_cnt */
1141 EMIT2(0x41, 0x5F); /* pop r15 */
1142 EMIT2(0x41, 0x5E); /* pop r14 */
1143 EMIT2(0x41, 0x5D); /* pop r13 */
1144 EMIT1(0x5B); /* pop rbx */
1145 EMIT1(0xC9); /* leave */
1146 EMIT1(0xC3); /* ret */
1151 * By design x86-64 JIT should support all BPF instructions.
1152 * This error will be seen if new instruction was added
1153 * to the interpreter, but not to the JIT, or if there is
1156 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1161 if (ilen > BPF_MAX_INSN_SIZE) {
1162 pr_err("bpf_jit: fatal insn size error\n");
1167 if (unlikely(proglen + ilen > oldproglen)) {
1168 pr_err("bpf_jit: fatal error\n");
1171 memcpy(image + proglen, temp, ilen);
1178 if (image && excnt != bpf_prog->aux->num_exentries) {
1179 pr_err("extable is not populated\n");
1185 struct x64_jit_data {
1186 struct bpf_binary_header *header;
1190 struct jit_context ctx;
1193 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1195 struct bpf_binary_header *header = NULL;
1196 struct bpf_prog *tmp, *orig_prog = prog;
1197 struct x64_jit_data *jit_data;
1198 int proglen, oldproglen = 0;
1199 struct jit_context ctx = {};
1200 bool tmp_blinded = false;
1201 bool extra_pass = false;
1207 if (!prog->jit_requested)
1210 tmp = bpf_jit_blind_constants(prog);
1212 * If blinding was requested and we failed during blinding,
1213 * we must fall back to the interpreter.
1222 jit_data = prog->aux->jit_data;
1224 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1229 prog->aux->jit_data = jit_data;
1231 addrs = jit_data->addrs;
1233 ctx = jit_data->ctx;
1234 oldproglen = jit_data->proglen;
1235 image = jit_data->image;
1236 header = jit_data->header;
1238 goto skip_init_addrs;
1240 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1247 * Before first pass, make a rough estimation of addrs[]
1248 * each BPF instruction is translated to less than 64 bytes
1250 for (proglen = 0, i = 0; i <= prog->len; i++) {
1254 ctx.cleanup_addr = proglen;
1258 * JITed image shrinks with every pass and the loop iterates
1259 * until the image stops shrinking. Very large BPF programs
1260 * may converge on the last pass. In such case do one more
1261 * pass to emit the final image.
1263 for (pass = 0; pass < 20 || image; pass++) {
1264 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1269 bpf_jit_binary_free(header);
1274 if (proglen != oldproglen) {
1275 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1276 proglen, oldproglen);
1281 if (proglen == oldproglen) {
1283 * The number of entries in extable is the number of BPF_LDX
1284 * insns that access kernel memory via "pointer to BTF type".
1285 * The verifier changed their opcode from LDX|MEM|size
1286 * to LDX|PROBE_MEM|size to make JITing easier.
1288 u32 align = __alignof__(struct exception_table_entry);
1289 u32 extable_size = prog->aux->num_exentries *
1290 sizeof(struct exception_table_entry);
1292 /* allocate module memory for x86 insns and extable */
1293 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
1294 &image, align, jit_fill_hole);
1299 prog->aux->extable = (void *) image + roundup(proglen, align);
1301 oldproglen = proglen;
1305 if (bpf_jit_enable > 1)
1306 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1309 if (!prog->is_func || extra_pass) {
1310 bpf_jit_binary_lock_ro(header);
1312 jit_data->addrs = addrs;
1313 jit_data->ctx = ctx;
1314 jit_data->proglen = proglen;
1315 jit_data->image = image;
1316 jit_data->header = header;
1318 prog->bpf_func = (void *)image;
1320 prog->jited_len = proglen;
1325 if (!image || !prog->is_func || extra_pass) {
1327 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1331 prog->aux->jit_data = NULL;
1335 bpf_jit_prog_release_other(prog, prog == orig_prog ?