2 * Just-In-Time compiler for BPF filters on 32bit ARM
4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License.
11 #include <linux/bitops.h>
12 #include <linux/compiler.h>
13 #include <linux/errno.h>
14 #include <linux/filter.h>
15 #include <linux/netdevice.h>
16 #include <linux/string.h>
17 #include <linux/slab.h>
18 #include <linux/if_vlan.h>
20 #include <asm/cacheflush.h>
21 #include <asm/hwcap.h>
22 #include <asm/opcodes.h>
24 #include "bpf_jit_32.h"
32 * r6 pointer to the skb
37 #define r_scratch ARM_R0
38 /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
43 #define r_skb_data ARM_R7
44 #define r_skb_hl ARM_R8
46 #define SCRATCH_SP_OFFSET 0
47 #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k))
49 #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1)
50 #define SEEN_MEM_WORD(k) (1 << (k))
51 #define SEEN_X (1 << BPF_MEMWORDS)
52 #define SEEN_CALL (1 << (BPF_MEMWORDS + 1))
53 #define SEEN_SKB (1 << (BPF_MEMWORDS + 2))
54 #define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
56 #define FLAG_NEED_X_RESET (1 << 0)
59 const struct bpf_prog *skf;
61 unsigned prologue_bytes;
67 #if __LINUX_ARM_ARCH__ < 7
74 int bpf_jit_enable __read_mostly;
76 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
81 err = skb_copy_bits(skb, offset, &ret, 1);
83 return (u64)err << 32 | ret;
86 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
91 err = skb_copy_bits(skb, offset, &ret, 2);
93 return (u64)err << 32 | ntohs(ret);
96 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
101 err = skb_copy_bits(skb, offset, &ret, 4);
103 return (u64)err << 32 | ntohl(ret);
107 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
108 * (where the assembly routines like __aeabi_uidiv could cause problems).
110 static u32 jit_udiv(u32 dividend, u32 divisor)
112 return dividend / divisor;
115 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
117 inst |= (cond << 28);
118 inst = __opcode_to_mem_arm(inst);
120 if (ctx->target != NULL)
121 ctx->target[ctx->idx] = inst;
127 * Emit an instruction that will be executed unconditionally.
129 static inline void emit(u32 inst, struct jit_ctx *ctx)
131 _emit(ARM_COND_AL, inst, ctx);
134 static u16 saved_regs(struct jit_ctx *ctx)
138 if ((ctx->skf->len > 1) ||
139 (ctx->skf->insns[0].code == (BPF_RET | BPF_A)))
142 #ifdef CONFIG_FRAME_POINTER
143 ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
145 if (ctx->seen & SEEN_CALL)
148 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
150 if (ctx->seen & SEEN_DATA)
151 ret |= (1 << r_skb_data) | (1 << r_skb_hl);
152 if (ctx->seen & SEEN_X)
158 static inline int mem_words_used(struct jit_ctx *ctx)
160 /* yes, we do waste some stack space IF there are "holes" in the set" */
161 return fls(ctx->seen & SEEN_MEM);
164 static inline bool is_load_to_a(u16 inst)
167 case BPF_LD | BPF_W | BPF_LEN:
168 case BPF_LD | BPF_W | BPF_ABS:
169 case BPF_LD | BPF_H | BPF_ABS:
170 case BPF_LD | BPF_B | BPF_ABS:
177 static void jit_fill_hole(void *area, unsigned int size)
180 /* We are guaranteed to have aligned memory. */
181 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
182 *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
185 static void build_prologue(struct jit_ctx *ctx)
187 u16 reg_set = saved_regs(ctx);
188 u16 first_inst = ctx->skf->insns[0].code;
191 #ifdef CONFIG_FRAME_POINTER
192 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
193 emit(ARM_PUSH(reg_set), ctx);
194 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
197 emit(ARM_PUSH(reg_set), ctx);
200 if (ctx->seen & (SEEN_DATA | SEEN_SKB))
201 emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
203 if (ctx->seen & SEEN_DATA) {
204 off = offsetof(struct sk_buff, data);
205 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
206 /* headlen = len - data_len */
207 off = offsetof(struct sk_buff, len);
208 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
209 off = offsetof(struct sk_buff, data_len);
210 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
211 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
214 if (ctx->flags & FLAG_NEED_X_RESET)
215 emit(ARM_MOV_I(r_X, 0), ctx);
217 /* do not leak kernel data to userspace */
218 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
219 emit(ARM_MOV_I(r_A, 0), ctx);
221 /* stack space for the BPF_MEM words */
222 if (ctx->seen & SEEN_MEM)
223 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
226 static void build_epilogue(struct jit_ctx *ctx)
228 u16 reg_set = saved_regs(ctx);
230 if (ctx->seen & SEEN_MEM)
231 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
233 reg_set &= ~(1 << ARM_LR);
235 #ifdef CONFIG_FRAME_POINTER
236 /* the first instruction of the prologue was: mov ip, sp */
237 reg_set &= ~(1 << ARM_IP);
238 reg_set |= (1 << ARM_SP);
239 emit(ARM_LDM(ARM_SP, reg_set), ctx);
242 if (ctx->seen & SEEN_CALL)
243 reg_set |= 1 << ARM_PC;
244 emit(ARM_POP(reg_set), ctx);
247 if (!(ctx->seen & SEEN_CALL))
248 emit(ARM_BX(ARM_LR), ctx);
252 static int16_t imm8m(u32 x)
256 for (rot = 0; rot < 16; rot++)
257 if ((x & ~ror32(0xff, 2 * rot)) == 0)
258 return rol32(x, 2 * rot) | (rot << 8);
263 #if __LINUX_ARM_ARCH__ < 7
265 static u16 imm_offset(u32 k, struct jit_ctx *ctx)
267 unsigned i = 0, offset;
270 /* on the "fake" run we just count them (duplicates included) */
271 if (ctx->target == NULL) {
276 while ((i < ctx->imm_count) && ctx->imms[i]) {
277 if (ctx->imms[i] == k)
282 if (ctx->imms[i] == 0)
285 /* constants go just after the epilogue */
286 offset = ctx->offsets[ctx->skf->len];
287 offset += ctx->prologue_bytes;
288 offset += ctx->epilogue_bytes;
291 ctx->target[offset / 4] = k;
293 /* PC in ARM mode == address of the instruction + 8 */
294 imm = offset - (8 + ctx->idx * 4);
299 #endif /* __LINUX_ARM_ARCH__ */
302 * Move an immediate that's not an imm8m to a core register.
304 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
306 #if __LINUX_ARM_ARCH__ < 7
307 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
309 emit(ARM_MOVW(rd, val & 0xffff), ctx);
311 emit(ARM_MOVT(rd, val >> 16), ctx);
315 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
317 int imm12 = imm8m(val);
320 emit(ARM_MOV_I(rd, imm12), ctx);
322 emit_mov_i_no8m(rd, val, ctx);
325 #if __LINUX_ARM_ARCH__ < 6
327 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
329 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
330 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
331 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
332 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
333 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
334 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
335 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
336 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
339 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
341 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
342 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
343 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
346 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
348 /* r_dst = (r_src << 8) | (r_src >> 8) */
349 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
350 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
353 * we need to mask out the bits set in r_dst[23:16] due to
354 * the first shift instruction.
356 * note that 0x8ff is the encoded immediate 0x00ff0000.
358 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
363 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
365 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
366 #ifdef __LITTLE_ENDIAN
367 _emit(cond, ARM_REV(r_res, r_res), ctx);
371 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
373 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
374 #ifdef __LITTLE_ENDIAN
375 _emit(cond, ARM_REV16(r_res, r_res), ctx);
379 static inline void emit_swap16(u8 r_dst __maybe_unused,
380 u8 r_src __maybe_unused,
381 struct jit_ctx *ctx __maybe_unused)
383 #ifdef __LITTLE_ENDIAN
384 emit(ARM_REV16(r_dst, r_src), ctx);
388 #endif /* __LINUX_ARM_ARCH__ < 6 */
391 /* Compute the immediate value for a PC-relative branch. */
392 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
396 if (ctx->target == NULL)
399 * BPF allows only forward jumps and the offset of the target is
400 * still the one computed during the first pass.
402 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
407 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
409 imm12 = imm8m(imm_val); \
411 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
412 emit(op ## _R((r1), (r2), r_scratch), ctx); \
414 emit(op ## _I((r1), (r2), imm12), ctx); \
418 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
420 if (ctx->ret0_fp_idx >= 0) {
421 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
422 /* NOP to keep the size constant between passes */
423 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
425 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
426 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
430 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
432 #if __LINUX_ARM_ARCH__ < 5
433 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
435 if (elf_hwcap & HWCAP_THUMB)
436 emit(ARM_BX(tgt_reg), ctx);
438 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
440 emit(ARM_BLX_R(tgt_reg), ctx);
444 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
446 #if __LINUX_ARM_ARCH__ == 7
447 if (elf_hwcap & HWCAP_IDIVA) {
448 emit(ARM_UDIV(rd, rm, rn), ctx);
453 emit(ARM_MOV_R(ARM_R0, rm), ctx);
455 emit(ARM_MOV_R(ARM_R1, rn), ctx);
457 ctx->seen |= SEEN_CALL;
458 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
459 emit_blx_r(ARM_R3, ctx);
462 emit(ARM_MOV_R(rd, ARM_R0), ctx);
465 static inline void update_on_xread(struct jit_ctx *ctx)
467 if (!(ctx->seen & SEEN_X))
468 ctx->flags |= FLAG_NEED_X_RESET;
473 static int build_body(struct jit_ctx *ctx)
475 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
476 const struct bpf_prog *prog = ctx->skf;
477 const struct sock_filter *inst;
478 unsigned i, load_order, off, condt;
482 for (i = 0; i < prog->len; i++) {
485 inst = &(prog->insns[i]);
486 /* K as an immediate value operand */
488 code = bpf_anc_helper(inst);
490 /* compute offsets only in the fake pass */
491 if (ctx->target == NULL)
492 ctx->offsets[i] = ctx->idx * 4;
495 case BPF_LD | BPF_IMM:
496 emit_mov_i(r_A, k, ctx);
498 case BPF_LD | BPF_W | BPF_LEN:
499 ctx->seen |= SEEN_SKB;
500 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
501 emit(ARM_LDR_I(r_A, r_skb,
502 offsetof(struct sk_buff, len)), ctx);
504 case BPF_LD | BPF_MEM:
506 ctx->seen |= SEEN_MEM_WORD(k);
507 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
509 case BPF_LD | BPF_W | BPF_ABS:
512 case BPF_LD | BPF_H | BPF_ABS:
515 case BPF_LD | BPF_B | BPF_ABS:
518 /* the interpreter will deal with the negative K */
521 emit_mov_i(r_off, k, ctx);
523 ctx->seen |= SEEN_DATA | SEEN_CALL;
525 if (load_order > 0) {
526 emit(ARM_SUB_I(r_scratch, r_skb_hl,
527 1 << load_order), ctx);
528 emit(ARM_CMP_R(r_scratch, r_off), ctx);
531 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
535 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
539 _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
541 else if (load_order == 1)
542 emit_load_be16(condt, r_A, r_scratch, ctx);
543 else if (load_order == 2)
544 emit_load_be32(condt, r_A, r_scratch, ctx);
546 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
549 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
550 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
551 /* the offset is already in R1 */
552 emit_blx_r(ARM_R3, ctx);
553 /* check the result of skb_copy_bits */
554 emit(ARM_CMP_I(ARM_R1, 0), ctx);
555 emit_err_ret(ARM_COND_NE, ctx);
556 emit(ARM_MOV_R(r_A, ARM_R0), ctx);
558 case BPF_LD | BPF_W | BPF_IND:
561 case BPF_LD | BPF_H | BPF_IND:
564 case BPF_LD | BPF_B | BPF_IND:
567 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
569 case BPF_LDX | BPF_IMM:
571 emit_mov_i(r_X, k, ctx);
573 case BPF_LDX | BPF_W | BPF_LEN:
574 ctx->seen |= SEEN_X | SEEN_SKB;
575 emit(ARM_LDR_I(r_X, r_skb,
576 offsetof(struct sk_buff, len)), ctx);
578 case BPF_LDX | BPF_MEM:
579 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
580 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
582 case BPF_LDX | BPF_B | BPF_MSH:
583 /* x = ((*(frame + k)) & 0xf) << 2; */
584 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
585 /* the interpreter should deal with the negative K */
588 /* offset in r1: we might have to take the slow path */
589 emit_mov_i(r_off, k, ctx);
590 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
592 /* load in r0: common with the slowpath */
593 _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
596 * emit_mov_i() might generate one or two instructions,
597 * the same holds for emit_blx_r()
599 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
601 emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
603 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
604 emit_blx_r(ARM_R3, ctx);
605 /* check the return value of skb_copy_bits */
606 emit(ARM_CMP_I(ARM_R1, 0), ctx);
607 emit_err_ret(ARM_COND_NE, ctx);
609 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
610 emit(ARM_LSL_I(r_X, r_X, 2), ctx);
613 ctx->seen |= SEEN_MEM_WORD(k);
614 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
617 update_on_xread(ctx);
618 ctx->seen |= SEEN_MEM_WORD(k);
619 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
621 case BPF_ALU | BPF_ADD | BPF_K:
623 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
625 case BPF_ALU | BPF_ADD | BPF_X:
626 update_on_xread(ctx);
627 emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
629 case BPF_ALU | BPF_SUB | BPF_K:
631 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
633 case BPF_ALU | BPF_SUB | BPF_X:
634 update_on_xread(ctx);
635 emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
637 case BPF_ALU | BPF_MUL | BPF_K:
639 emit_mov_i(r_scratch, k, ctx);
640 emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
642 case BPF_ALU | BPF_MUL | BPF_X:
643 update_on_xread(ctx);
644 emit(ARM_MUL(r_A, r_A, r_X), ctx);
646 case BPF_ALU | BPF_DIV | BPF_K:
649 emit_mov_i(r_scratch, k, ctx);
650 emit_udiv(r_A, r_A, r_scratch, ctx);
652 case BPF_ALU | BPF_DIV | BPF_X:
653 update_on_xread(ctx);
654 emit(ARM_CMP_I(r_X, 0), ctx);
655 emit_err_ret(ARM_COND_EQ, ctx);
656 emit_udiv(r_A, r_A, r_X, ctx);
658 case BPF_ALU | BPF_OR | BPF_K:
660 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
662 case BPF_ALU | BPF_OR | BPF_X:
663 update_on_xread(ctx);
664 emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
666 case BPF_ALU | BPF_XOR | BPF_K:
668 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
670 case BPF_ANC | SKF_AD_ALU_XOR_X:
671 case BPF_ALU | BPF_XOR | BPF_X:
673 update_on_xread(ctx);
674 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
676 case BPF_ALU | BPF_AND | BPF_K:
678 OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
680 case BPF_ALU | BPF_AND | BPF_X:
681 update_on_xread(ctx);
682 emit(ARM_AND_R(r_A, r_A, r_X), ctx);
684 case BPF_ALU | BPF_LSH | BPF_K:
685 if (unlikely(k > 31))
687 emit(ARM_LSL_I(r_A, r_A, k), ctx);
689 case BPF_ALU | BPF_LSH | BPF_X:
690 update_on_xread(ctx);
691 emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
693 case BPF_ALU | BPF_RSH | BPF_K:
694 if (unlikely(k > 31))
696 emit(ARM_LSR_I(r_A, r_A, k), ctx);
698 case BPF_ALU | BPF_RSH | BPF_X:
699 update_on_xread(ctx);
700 emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
702 case BPF_ALU | BPF_NEG:
704 emit(ARM_RSB_I(r_A, r_A, 0), ctx);
706 case BPF_JMP | BPF_JA:
708 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
710 case BPF_JMP | BPF_JEQ | BPF_K:
711 /* pc += (A == K) ? pc->jt : pc->jf */
714 case BPF_JMP | BPF_JGT | BPF_K:
715 /* pc += (A > K) ? pc->jt : pc->jf */
718 case BPF_JMP | BPF_JGE | BPF_K:
719 /* pc += (A >= K) ? pc->jt : pc->jf */
724 emit_mov_i_no8m(r_scratch, k, ctx);
725 emit(ARM_CMP_R(r_A, r_scratch), ctx);
727 emit(ARM_CMP_I(r_A, imm12), ctx);
731 _emit(condt, ARM_B(b_imm(i + inst->jt + 1,
734 _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
737 case BPF_JMP | BPF_JEQ | BPF_X:
738 /* pc += (A == X) ? pc->jt : pc->jf */
741 case BPF_JMP | BPF_JGT | BPF_X:
742 /* pc += (A > X) ? pc->jt : pc->jf */
745 case BPF_JMP | BPF_JGE | BPF_X:
746 /* pc += (A >= X) ? pc->jt : pc->jf */
749 update_on_xread(ctx);
750 emit(ARM_CMP_R(r_A, r_X), ctx);
752 case BPF_JMP | BPF_JSET | BPF_K:
753 /* pc += (A & K) ? pc->jt : pc->jf */
755 /* not set iff all zeroes iff Z==1 iff EQ */
759 emit_mov_i_no8m(r_scratch, k, ctx);
760 emit(ARM_TST_R(r_A, r_scratch), ctx);
762 emit(ARM_TST_I(r_A, imm12), ctx);
765 case BPF_JMP | BPF_JSET | BPF_X:
766 /* pc += (A & X) ? pc->jt : pc->jf */
767 update_on_xread(ctx);
769 emit(ARM_TST_R(r_A, r_X), ctx);
771 case BPF_RET | BPF_A:
772 emit(ARM_MOV_R(ARM_R0, r_A), ctx);
774 case BPF_RET | BPF_K:
775 if ((k == 0) && (ctx->ret0_fp_idx < 0))
776 ctx->ret0_fp_idx = i;
777 emit_mov_i(ARM_R0, k, ctx);
779 if (i != ctx->skf->len - 1)
780 emit(ARM_B(b_imm(prog->len, ctx)), ctx);
782 case BPF_MISC | BPF_TAX:
785 emit(ARM_MOV_R(r_X, r_A), ctx);
787 case BPF_MISC | BPF_TXA:
789 update_on_xread(ctx);
790 emit(ARM_MOV_R(r_A, r_X), ctx);
792 case BPF_ANC | SKF_AD_PROTOCOL:
793 /* A = ntohs(skb->protocol) */
794 ctx->seen |= SEEN_SKB;
795 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
797 off = offsetof(struct sk_buff, protocol);
798 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
799 emit_swap16(r_A, r_scratch, ctx);
801 case BPF_ANC | SKF_AD_CPU:
802 /* r_scratch = current_thread_info() */
803 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
804 /* A = current_thread_info()->cpu */
805 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
806 off = offsetof(struct thread_info, cpu);
807 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
809 case BPF_ANC | SKF_AD_IFINDEX:
810 /* A = skb->dev->ifindex */
811 ctx->seen |= SEEN_SKB;
812 off = offsetof(struct sk_buff, dev);
813 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
815 emit(ARM_CMP_I(r_scratch, 0), ctx);
816 emit_err_ret(ARM_COND_EQ, ctx);
818 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
820 off = offsetof(struct net_device, ifindex);
821 emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
823 case BPF_ANC | SKF_AD_MARK:
824 ctx->seen |= SEEN_SKB;
825 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
826 off = offsetof(struct sk_buff, mark);
827 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
829 case BPF_ANC | SKF_AD_RXHASH:
830 ctx->seen |= SEEN_SKB;
831 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
832 off = offsetof(struct sk_buff, hash);
833 emit(ARM_LDR_I(r_A, r_skb, off), ctx);
835 case BPF_ANC | SKF_AD_VLAN_TAG:
836 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
837 ctx->seen |= SEEN_SKB;
838 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
839 off = offsetof(struct sk_buff, vlan_tci);
840 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
841 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
842 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
844 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
846 case BPF_ANC | SKF_AD_QUEUE:
847 ctx->seen |= SEEN_SKB;
848 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
849 queue_mapping) != 2);
850 BUILD_BUG_ON(offsetof(struct sk_buff,
851 queue_mapping) > 0xff);
852 off = offsetof(struct sk_buff, queue_mapping);
853 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
860 /* compute offsets only during the first pass */
861 if (ctx->target == NULL)
862 ctx->offsets[i] = ctx->idx * 4;
868 void bpf_jit_compile(struct bpf_prog *fp)
870 struct bpf_binary_header *header;
879 memset(&ctx, 0, sizeof(ctx));
881 ctx.ret0_fp_idx = -1;
883 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
884 if (ctx.offsets == NULL)
887 /* fake pass to fill in the ctx->seen */
888 if (unlikely(build_body(&ctx)))
892 build_prologue(&ctx);
893 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
895 #if __LINUX_ARM_ARCH__ < 7
897 build_epilogue(&ctx);
898 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
900 ctx.idx += ctx.imm_count;
902 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
903 if (ctx.imms == NULL)
907 /* there's nothing after the epilogue on ARMv7 */
908 build_epilogue(&ctx);
910 alloc_size = 4 * ctx.idx;
911 header = bpf_jit_binary_alloc(alloc_size, &target_ptr,
916 ctx.target = (u32 *) target_ptr;
919 build_prologue(&ctx);
921 build_epilogue(&ctx);
923 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
925 #if __LINUX_ARM_ARCH__ < 7
930 if (bpf_jit_enable > 1)
931 /* there are 2 passes here */
932 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
934 set_memory_ro((unsigned long)header, header->pages);
935 fp->bpf_func = (void *)ctx.target;
942 void bpf_jit_free(struct bpf_prog *fp)
944 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
945 struct bpf_binary_header *header = (void *)addr;
950 set_memory_rw(addr, header->pages);
951 bpf_jit_binary_free(header);
954 bpf_prog_unlock_free(fp);