2 * Just-In-Time compiler for BPF filters on MIPS
4 * Copyright (c) 2014 Imagination Technologies Ltd.
5 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; version 2 of the License.
12 #include <linux/bitops.h>
13 #include <linux/compiler.h>
14 #include <linux/errno.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
17 #include <linux/kconfig.h>
18 #include <linux/moduleloader.h>
19 #include <linux/netdevice.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/types.h>
23 #include <asm/bitops.h>
24 #include <asm/cacheflush.h>
25 #include <asm/cpu-features.h>
32 * s0 1st scratch register
33 * s1 2nd scratch register
40 * On entry (*bpf_func)(*skb, *filter)
41 * a0 = MIPS_R_A0 = skb;
42 * a1 = MIPS_R_A1 = filter;
54 * saved reg 0 <-- r_sp
59 * <--------------------- len ------------------------>
60 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
61 * ----------------------------------------------------
63 * ----------------------------------------------------
66 #define RSIZE (sizeof(unsigned long))
67 #define ptr typeof(unsigned long)
69 /* ABI specific return values */
70 #ifdef CONFIG_32BIT /* O32 */
71 #ifdef CONFIG_CPU_LITTLE_ENDIAN
72 #define r_err MIPS_R_V1
73 #define r_val MIPS_R_V0
74 #else /* CONFIG_CPU_LITTLE_ENDIAN */
75 #define r_err MIPS_R_V0
76 #define r_val MIPS_R_V1
79 #define r_err MIPS_R_V0
80 #define r_val MIPS_R_V0
83 #define r_ret MIPS_R_V0
86 * Use 2 scratch registers to avoid pipeline interlocks.
87 * There is no overhead during epilogue and prologue since
88 * any of the $s0-$s6 registers will only be preserved if
89 * they are going to actually be used.
91 #define r_s0 MIPS_R_S0 /* scratch reg 1 */
92 #define r_s1 MIPS_R_S1 /* scratch reg 2 */
93 #define r_off MIPS_R_S2
96 #define r_skb MIPS_R_S5
98 #define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
99 #define r_tmp MIPS_R_T7 /* No need to preserve this */
100 #define r_zero MIPS_R_ZERO
101 #define r_sp MIPS_R_SP
102 #define r_ra MIPS_R_RA
104 #define SCRATCH_OFF(k) (4 * (k))
107 #define SEEN_CALL (1 << BPF_MEMWORDS)
108 #define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
109 #define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
110 #define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
111 #define SEEN_S0 SEEN_SREG(0)
112 #define SEEN_S1 SEEN_SREG(1)
113 #define SEEN_OFF SEEN_SREG(2)
114 #define SEEN_A SEEN_SREG(3)
115 #define SEEN_X SEEN_SREG(4)
116 #define SEEN_SKB SEEN_SREG(5)
117 #define SEEN_MEM SEEN_SREG(6)
119 /* Arguments used by JIT */
120 #define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
122 #define SBIT(x) (1 << (x)) /* Signed version of BIT() */
125 * struct jit_ctx - JIT context
126 * @skf: The sk_filter
127 * @prologue_bytes: Number of bytes for prologue
128 * @idx: Instruction index
130 * @offsets: Instruction offsets
131 * @target: Memory location for the compiled filter
134 const struct bpf_prog *skf;
135 unsigned int prologue_bytes;
143 static inline int optimize_div(u32 *k)
145 /* power of 2 divides can be implemented with right shift */
146 if (!(*k & (*k-1))) {
154 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
156 /* Simply emit the instruction if the JIT memory space has been allocated */
157 #define emit_instr(ctx, func, ...) \
159 if ((ctx)->target != NULL) { \
160 u32 *p = &(ctx)->target[ctx->idx]; \
161 uasm_i_##func(&p, ##__VA_ARGS__); \
166 /* Determine if immediate is within the 16-bit signed range */
167 static inline bool is_range16(s32 imm)
169 return !(imm >= SBIT(15) || imm < -SBIT(15));
172 static inline void emit_addu(unsigned int dst, unsigned int src1,
173 unsigned int src2, struct jit_ctx *ctx)
175 emit_instr(ctx, addu, dst, src1, src2);
178 static inline void emit_nop(struct jit_ctx *ctx)
180 emit_instr(ctx, nop);
183 /* Load a u32 immediate to a register */
184 static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
186 if (ctx->target != NULL) {
187 /* addiu can only handle s16 */
188 if (!is_range16(imm)) {
189 u32 *p = &ctx->target[ctx->idx];
190 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
191 p = &ctx->target[ctx->idx + 1];
192 uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
194 u32 *p = &ctx->target[ctx->idx];
195 uasm_i_addiu(&p, dst, r_zero, imm);
200 if (!is_range16(imm))
204 static inline void emit_or(unsigned int dst, unsigned int src1,
205 unsigned int src2, struct jit_ctx *ctx)
207 emit_instr(ctx, or, dst, src1, src2);
210 static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
213 if (imm >= BIT(16)) {
214 emit_load_imm(r_tmp, imm, ctx);
215 emit_or(dst, src, r_tmp, ctx);
217 emit_instr(ctx, ori, dst, src, imm);
222 static inline void emit_daddu(unsigned int dst, unsigned int src1,
223 unsigned int src2, struct jit_ctx *ctx)
225 emit_instr(ctx, daddu, dst, src1, src2);
228 static inline void emit_daddiu(unsigned int dst, unsigned int src,
229 int imm, struct jit_ctx *ctx)
232 * Only used for stack, so the imm is relatively small
233 * and it fits in 15-bits
235 emit_instr(ctx, daddiu, dst, src, imm);
238 static inline void emit_addiu(unsigned int dst, unsigned int src,
239 u32 imm, struct jit_ctx *ctx)
241 if (!is_range16(imm)) {
242 emit_load_imm(r_tmp, imm, ctx);
243 emit_addu(dst, r_tmp, src, ctx);
245 emit_instr(ctx, addiu, dst, src, imm);
249 static inline void emit_and(unsigned int dst, unsigned int src1,
250 unsigned int src2, struct jit_ctx *ctx)
252 emit_instr(ctx, and, dst, src1, src2);
255 static inline void emit_andi(unsigned int dst, unsigned int src,
256 u32 imm, struct jit_ctx *ctx)
258 /* If imm does not fit in u16 then load it to register */
259 if (imm >= BIT(16)) {
260 emit_load_imm(r_tmp, imm, ctx);
261 emit_and(dst, src, r_tmp, ctx);
263 emit_instr(ctx, andi, dst, src, imm);
267 static inline void emit_xor(unsigned int dst, unsigned int src1,
268 unsigned int src2, struct jit_ctx *ctx)
270 emit_instr(ctx, xor, dst, src1, src2);
273 static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
275 /* If imm does not fit in u16 then load it to register */
276 if (imm >= BIT(16)) {
277 emit_load_imm(r_tmp, imm, ctx);
278 emit_xor(dst, src, r_tmp, ctx);
280 emit_instr(ctx, xori, dst, src, imm);
284 static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
286 if (config_enabled(CONFIG_64BIT))
287 emit_instr(ctx, daddiu, r_sp, r_sp, offset);
289 emit_instr(ctx, addiu, r_sp, r_sp, offset);
293 static inline void emit_subu(unsigned int dst, unsigned int src1,
294 unsigned int src2, struct jit_ctx *ctx)
296 emit_instr(ctx, subu, dst, src1, src2);
299 static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
301 emit_subu(reg, r_zero, reg, ctx);
304 static inline void emit_sllv(unsigned int dst, unsigned int src,
305 unsigned int sa, struct jit_ctx *ctx)
307 emit_instr(ctx, sllv, dst, src, sa);
310 static inline void emit_sll(unsigned int dst, unsigned int src,
311 unsigned int sa, struct jit_ctx *ctx)
313 /* sa is 5-bits long */
315 /* Shifting >= 32 results in zero */
316 emit_jit_reg_move(dst, r_zero, ctx);
318 emit_instr(ctx, sll, dst, src, sa);
321 static inline void emit_srlv(unsigned int dst, unsigned int src,
322 unsigned int sa, struct jit_ctx *ctx)
324 emit_instr(ctx, srlv, dst, src, sa);
327 static inline void emit_srl(unsigned int dst, unsigned int src,
328 unsigned int sa, struct jit_ctx *ctx)
330 /* sa is 5-bits long */
332 /* Shifting >= 32 results in zero */
333 emit_jit_reg_move(dst, r_zero, ctx);
335 emit_instr(ctx, srl, dst, src, sa);
338 static inline void emit_slt(unsigned int dst, unsigned int src1,
339 unsigned int src2, struct jit_ctx *ctx)
341 emit_instr(ctx, slt, dst, src1, src2);
344 static inline void emit_sltu(unsigned int dst, unsigned int src1,
345 unsigned int src2, struct jit_ctx *ctx)
347 emit_instr(ctx, sltu, dst, src1, src2);
350 static inline void emit_sltiu(unsigned dst, unsigned int src,
351 unsigned int imm, struct jit_ctx *ctx)
353 /* 16 bit immediate */
354 if (!is_range16((s32)imm)) {
355 emit_load_imm(r_tmp, imm, ctx);
356 emit_sltu(dst, src, r_tmp, ctx);
358 emit_instr(ctx, sltiu, dst, src, imm);
363 /* Store register on the stack */
364 static inline void emit_store_stack_reg(ptr reg, ptr base,
368 if (config_enabled(CONFIG_64BIT))
369 emit_instr(ctx, sd, reg, offset, base);
371 emit_instr(ctx, sw, reg, offset, base);
374 static inline void emit_store(ptr reg, ptr base, unsigned int offset,
377 emit_instr(ctx, sw, reg, offset, base);
380 static inline void emit_load_stack_reg(ptr reg, ptr base,
384 if (config_enabled(CONFIG_64BIT))
385 emit_instr(ctx, ld, reg, offset, base);
387 emit_instr(ctx, lw, reg, offset, base);
390 static inline void emit_load(unsigned int reg, unsigned int base,
391 unsigned int offset, struct jit_ctx *ctx)
393 emit_instr(ctx, lw, reg, offset, base);
396 static inline void emit_load_byte(unsigned int reg, unsigned int base,
397 unsigned int offset, struct jit_ctx *ctx)
399 emit_instr(ctx, lb, reg, offset, base);
402 static inline void emit_half_load(unsigned int reg, unsigned int base,
403 unsigned int offset, struct jit_ctx *ctx)
405 emit_instr(ctx, lh, reg, offset, base);
408 static inline void emit_mul(unsigned int dst, unsigned int src1,
409 unsigned int src2, struct jit_ctx *ctx)
411 emit_instr(ctx, mul, dst, src1, src2);
414 static inline void emit_div(unsigned int dst, unsigned int src,
417 if (ctx->target != NULL) {
418 u32 *p = &ctx->target[ctx->idx];
419 uasm_i_divu(&p, dst, src);
420 p = &ctx->target[ctx->idx + 1];
421 uasm_i_mflo(&p, dst);
423 ctx->idx += 2; /* 2 insts */
426 static inline void emit_mod(unsigned int dst, unsigned int src,
429 if (ctx->target != NULL) {
430 u32 *p = &ctx->target[ctx->idx];
431 uasm_i_divu(&p, dst, src);
432 p = &ctx->target[ctx->idx + 1];
433 uasm_i_mflo(&p, dst);
435 ctx->idx += 2; /* 2 insts */
438 static inline void emit_dsll(unsigned int dst, unsigned int src,
439 unsigned int sa, struct jit_ctx *ctx)
441 emit_instr(ctx, dsll, dst, src, sa);
444 static inline void emit_dsrl32(unsigned int dst, unsigned int src,
445 unsigned int sa, struct jit_ctx *ctx)
447 emit_instr(ctx, dsrl32, dst, src, sa);
450 static inline void emit_wsbh(unsigned int dst, unsigned int src,
453 emit_instr(ctx, wsbh, dst, src);
456 /* load pointer to register */
457 static inline void emit_load_ptr(unsigned int dst, unsigned int src,
458 int imm, struct jit_ctx *ctx)
460 /* src contains the base addr of the 32/64-pointer */
461 if (config_enabled(CONFIG_64BIT))
462 emit_instr(ctx, ld, dst, imm, src);
464 emit_instr(ctx, lw, dst, imm, src);
467 /* load a function pointer to register */
468 static inline void emit_load_func(unsigned int reg, ptr imm,
471 if (config_enabled(CONFIG_64BIT)) {
472 /* At this point imm is always 64-bit */
473 emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
474 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
475 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
476 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
477 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
479 emit_load_imm(reg, imm, ctx);
483 /* Move to real MIPS register */
484 static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
486 if (config_enabled(CONFIG_64BIT))
487 emit_daddu(dst, src, r_zero, ctx);
489 emit_addu(dst, src, r_zero, ctx);
492 /* Move to JIT (32-bit) register */
493 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
495 emit_addu(dst, src, r_zero, ctx);
498 /* Compute the immediate value for PC-relative branches. */
499 static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
501 if (ctx->target == NULL)
505 * We want a pc-relative branch. We only do forward branches
506 * so tgt is always after pc. tgt is the instruction offset
507 * we want to jump to.
510 * I: target_offset <- sign_extend(offset)
511 * I+1: PC += target_offset (delay slot)
513 * ctx->idx currently points to the branch instruction
514 * but the offset is added to the delay slot so we need
517 return ctx->offsets[tgt] -
518 (ctx->idx * 4 - ctx->prologue_bytes) - 4;
521 static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
522 unsigned int imm, struct jit_ctx *ctx)
524 if (ctx->target != NULL) {
525 u32 *p = &ctx->target[ctx->idx];
529 uasm_i_beq(&p, reg1, reg2, imm);
532 uasm_i_bne(&p, reg1, reg2, imm);
538 pr_warn("%s: Unhandled branch conditional: %d\n",
545 static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
547 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
550 static inline void emit_jalr(unsigned int link, unsigned int reg,
553 emit_instr(ctx, jalr, link, reg);
556 static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
558 emit_instr(ctx, jr, reg);
561 static inline u16 align_sp(unsigned int num)
563 /* Double word alignment for 32-bit, quadword for 64-bit */
564 unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
565 num = (num + (align - 1)) & -align;
569 static bool is_load_to_a(u16 inst)
572 case BPF_LD | BPF_W | BPF_LEN:
573 case BPF_LD | BPF_W | BPF_ABS:
574 case BPF_LD | BPF_H | BPF_ABS:
575 case BPF_LD | BPF_B | BPF_ABS:
582 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
584 int i = 0, real_off = 0;
585 u32 sflags, tmp_flags;
587 /* Adjust the stack pointer */
588 emit_stack_offset(-align_sp(offset), ctx);
590 if (ctx->flags & SEEN_CALL) {
591 /* Argument save area */
592 if (config_enabled(CONFIG_64BIT))
593 /* Bottom of current frame */
594 real_off = align_sp(offset) - RSIZE;
596 /* Top of previous frame */
597 real_off = align_sp(offset) + RSIZE;
598 emit_store_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
599 emit_store_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx);
604 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
605 /* sflags is essentially a bitmap */
607 if ((sflags >> i) & 0x1) {
608 emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
616 /* save return address */
617 if (ctx->flags & SEEN_CALL) {
618 emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
622 /* Setup r_M leaving the alignment gap if necessary */
623 if (ctx->flags & SEEN_MEM) {
624 if (real_off % (RSIZE * 2))
626 if (config_enabled(CONFIG_64BIT))
627 emit_daddiu(r_M, r_sp, real_off, ctx);
629 emit_addiu(r_M, r_sp, real_off, ctx);
633 static void restore_bpf_jit_regs(struct jit_ctx *ctx,
637 u32 sflags, tmp_flags;
639 if (ctx->flags & SEEN_CALL) {
640 if (config_enabled(CONFIG_64BIT))
641 /* Bottom of current frame */
642 real_off = align_sp(offset) - RSIZE;
644 /* Top of previous frame */
645 real_off = align_sp(offset) + RSIZE;
646 emit_load_stack_reg(MIPS_R_A0, r_sp, real_off, ctx);
647 emit_load_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx);
652 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
653 /* sflags is a bitmap */
656 if ((sflags >> i) & 0x1) {
657 emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
665 /* restore return address */
666 if (ctx->flags & SEEN_CALL)
667 emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
669 /* Restore the sp and discard the scrach memory */
670 emit_stack_offset(align_sp(offset), ctx);
673 static unsigned int get_stack_depth(struct jit_ctx *ctx)
678 /* How may s* regs do we need to preserved? */
679 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * RSIZE;
681 if (ctx->flags & SEEN_MEM)
682 sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
684 if (ctx->flags & SEEN_CALL)
686 * The JIT code make calls to external functions using 2
687 * arguments. Therefore, for o32 we don't need to allocate
688 * space because we don't care if the argumetns are lost
689 * across calls. We do need however to preserve incoming
690 * arguments but the space is already allocated for us by
691 * the caller. On the other hand, for n64, we need to allocate
692 * this space ourselves. We need to preserve $ra as well.
694 sp_off += config_enabled(CONFIG_64BIT) ?
695 (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
698 * Subtract the bytes for the last registers since we only care about
699 * the location on the stack pointer.
701 return sp_off - RSIZE;
704 static void build_prologue(struct jit_ctx *ctx)
706 u16 first_inst = ctx->skf->insns[0].code;
709 /* Calculate the total offset for the stack pointer */
710 sp_off = get_stack_depth(ctx);
711 save_bpf_jit_regs(ctx, sp_off);
713 if (ctx->flags & SEEN_SKB)
714 emit_reg_move(r_skb, MIPS_R_A0, ctx);
716 if (ctx->flags & SEEN_X)
717 emit_jit_reg_move(r_X, r_zero, ctx);
719 /* Do not leak kernel data to userspace */
720 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
721 emit_jit_reg_move(r_A, r_zero, ctx);
724 static void build_epilogue(struct jit_ctx *ctx)
728 /* Calculate the total offset for the stack pointer */
730 sp_off = get_stack_depth(ctx);
731 restore_bpf_jit_regs(ctx, sp_off);
738 static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
743 err = skb_copy_bits(skb, offset, &ret, 1);
745 return (u64)err << 32 | ret;
748 static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
753 err = skb_copy_bits(skb, offset, &ret, 2);
755 return (u64)err << 32 | ntohs(ret);
758 static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
763 err = skb_copy_bits(skb, offset, &ret, 4);
765 return (u64)err << 32 | ntohl(ret);
768 #ifdef __BIG_ENDIAN_BITFIELD
769 #define PKT_TYPE_MAX (7 << 5)
771 #define PKT_TYPE_MAX 7
773 static int pkt_type_offset(void)
775 struct sk_buff skb_probe = {
778 u8 *ct = (u8 *)&skb_probe;
781 for (off = 0; off < sizeof(struct sk_buff); off++) {
782 if (ct[off] == PKT_TYPE_MAX)
785 pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
789 static int build_body(struct jit_ctx *ctx)
791 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
792 const struct bpf_prog *prog = ctx->skf;
793 const struct sock_filter *inst;
794 unsigned int i, off, load_order, condt;
795 u32 k, b_off __maybe_unused;
797 for (i = 0; i < prog->len; i++) {
800 inst = &(prog->insns[i]);
801 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
802 __func__, inst->code, inst->jt, inst->jf, inst->k);
804 code = bpf_anc_helper(inst);
806 if (ctx->target == NULL)
807 ctx->offsets[i] = ctx->idx * 4;
810 case BPF_LD | BPF_IMM:
811 /* A <- k ==> li r_A, k */
812 ctx->flags |= SEEN_A;
813 emit_load_imm(r_A, k, ctx);
815 case BPF_LD | BPF_W | BPF_LEN:
816 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
817 /* A <- len ==> lw r_A, offset(skb) */
818 ctx->flags |= SEEN_SKB | SEEN_A;
819 off = offsetof(struct sk_buff, len);
820 emit_load(r_A, r_skb, off, ctx);
822 case BPF_LD | BPF_MEM:
823 /* A <- M[k] ==> lw r_A, offset(M) */
824 ctx->flags |= SEEN_MEM | SEEN_A;
825 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
827 case BPF_LD | BPF_W | BPF_ABS:
831 case BPF_LD | BPF_H | BPF_ABS:
835 case BPF_LD | BPF_B | BPF_ABS:
839 /* the interpreter will deal with the negative K */
843 emit_load_imm(r_off, k, ctx);
846 * We may got here from the indirect loads so
847 * return if offset is negative.
849 emit_slt(r_s0, r_off, r_zero, ctx);
850 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
851 b_imm(prog->len, ctx), ctx);
852 emit_reg_move(r_ret, r_zero, ctx);
854 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
857 emit_load_func(r_s0, (ptr)load_func[load_order],
859 emit_reg_move(MIPS_R_A0, r_skb, ctx);
860 emit_jalr(MIPS_R_RA, r_s0, ctx);
861 /* Load second argument to delay slot */
862 emit_reg_move(MIPS_R_A1, r_off, ctx);
863 /* Check the error value */
864 if (config_enabled(CONFIG_64BIT)) {
865 /* Get error code from the top 32-bits */
866 emit_dsrl32(r_s0, r_val, 0, ctx);
867 /* Branch to 3 instructions ahead */
868 emit_bcond(MIPS_COND_NE, r_s0, r_zero, 3 << 2,
871 /* Branch to 3 instructions ahead */
872 emit_bcond(MIPS_COND_NE, r_err, r_zero, 3 << 2,
877 emit_b(b_imm(i + 1, ctx), ctx);
878 emit_jit_reg_move(r_A, r_val, ctx);
879 /* Return with error */
880 emit_b(b_imm(prog->len, ctx), ctx);
881 emit_reg_move(r_ret, r_zero, ctx);
883 case BPF_LD | BPF_W | BPF_IND:
884 /* A <- P[X + k:4] */
887 case BPF_LD | BPF_H | BPF_IND:
888 /* A <- P[X + k:2] */
891 case BPF_LD | BPF_B | BPF_IND:
892 /* A <- P[X + k:1] */
895 ctx->flags |= SEEN_OFF | SEEN_X;
896 emit_addiu(r_off, r_X, k, ctx);
898 case BPF_LDX | BPF_IMM:
900 ctx->flags |= SEEN_X;
901 emit_load_imm(r_X, k, ctx);
903 case BPF_LDX | BPF_MEM:
905 ctx->flags |= SEEN_X | SEEN_MEM;
906 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
908 case BPF_LDX | BPF_W | BPF_LEN:
910 ctx->flags |= SEEN_X | SEEN_SKB;
911 off = offsetof(struct sk_buff, len);
912 emit_load(r_X, r_skb, off, ctx);
914 case BPF_LDX | BPF_B | BPF_MSH:
915 /* the interpreter will deal with the negative K */
919 /* X <- 4 * (P[k:1] & 0xf) */
920 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
921 /* Load offset to a1 */
922 emit_load_func(r_s0, (ptr)jit_get_skb_b, ctx);
924 * This may emit two instructions so it may not fit
925 * in the delay slot. So use a0 in the delay slot.
927 emit_load_imm(MIPS_R_A1, k, ctx);
928 emit_jalr(MIPS_R_RA, r_s0, ctx);
929 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
930 /* Check the error value */
931 if (config_enabled(CONFIG_64BIT)) {
932 /* Top 32-bits of $v0 on 64-bit */
933 emit_dsrl32(r_s0, r_val, 0, ctx);
934 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
937 emit_bcond(MIPS_COND_NE, r_err, r_zero,
940 /* No need for delay slot */
942 /* X <- P[1:K] & 0xf */
943 emit_andi(r_X, r_val, 0xf, ctx);
945 emit_b(b_imm(i + 1, ctx), ctx);
946 emit_sll(r_X, r_X, 2, ctx); /* delay slot */
947 /* Return with error */
948 emit_b(b_imm(prog->len, ctx), ctx);
949 emit_load_imm(r_ret, 0, ctx); /* delay slot */
953 ctx->flags |= SEEN_MEM | SEEN_A;
954 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
958 ctx->flags |= SEEN_MEM | SEEN_X;
959 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
961 case BPF_ALU | BPF_ADD | BPF_K:
963 ctx->flags |= SEEN_A;
964 emit_addiu(r_A, r_A, k, ctx);
966 case BPF_ALU | BPF_ADD | BPF_X:
968 ctx->flags |= SEEN_A | SEEN_X;
969 emit_addu(r_A, r_A, r_X, ctx);
971 case BPF_ALU | BPF_SUB | BPF_K:
973 ctx->flags |= SEEN_A;
974 emit_addiu(r_A, r_A, -k, ctx);
976 case BPF_ALU | BPF_SUB | BPF_X:
978 ctx->flags |= SEEN_A | SEEN_X;
979 emit_subu(r_A, r_A, r_X, ctx);
981 case BPF_ALU | BPF_MUL | BPF_K:
983 /* Load K to scratch register before MUL */
984 ctx->flags |= SEEN_A | SEEN_S0;
985 emit_load_imm(r_s0, k, ctx);
986 emit_mul(r_A, r_A, r_s0, ctx);
988 case BPF_ALU | BPF_MUL | BPF_X:
990 ctx->flags |= SEEN_A | SEEN_X;
991 emit_mul(r_A, r_A, r_X, ctx);
993 case BPF_ALU | BPF_DIV | BPF_K:
997 if (optimize_div(&k)) {
998 ctx->flags |= SEEN_A;
999 emit_srl(r_A, r_A, k, ctx);
1002 ctx->flags |= SEEN_A | SEEN_S0;
1003 emit_load_imm(r_s0, k, ctx);
1004 emit_div(r_A, r_s0, ctx);
1006 case BPF_ALU | BPF_MOD | BPF_K:
1008 if (k == 1 || optimize_div(&k)) {
1009 ctx->flags |= SEEN_A;
1010 emit_jit_reg_move(r_A, r_zero, ctx);
1012 ctx->flags |= SEEN_A | SEEN_S0;
1013 emit_load_imm(r_s0, k, ctx);
1014 emit_mod(r_A, r_s0, ctx);
1017 case BPF_ALU | BPF_DIV | BPF_X:
1019 ctx->flags |= SEEN_X | SEEN_A;
1020 /* Check if r_X is zero */
1021 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
1022 b_imm(prog->len, ctx), ctx);
1023 emit_load_imm(r_val, 0, ctx); /* delay slot */
1024 emit_div(r_A, r_X, ctx);
1026 case BPF_ALU | BPF_MOD | BPF_X:
1028 ctx->flags |= SEEN_X | SEEN_A;
1029 /* Check if r_X is zero */
1030 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
1031 b_imm(prog->len, ctx), ctx);
1032 emit_load_imm(r_val, 0, ctx); /* delay slot */
1033 emit_mod(r_A, r_X, ctx);
1035 case BPF_ALU | BPF_OR | BPF_K:
1037 ctx->flags |= SEEN_A;
1038 emit_ori(r_A, r_A, k, ctx);
1040 case BPF_ALU | BPF_OR | BPF_X:
1042 ctx->flags |= SEEN_A;
1043 emit_ori(r_A, r_A, r_X, ctx);
1045 case BPF_ALU | BPF_XOR | BPF_K:
1047 ctx->flags |= SEEN_A;
1048 emit_xori(r_A, r_A, k, ctx);
1050 case BPF_ANC | SKF_AD_ALU_XOR_X:
1051 case BPF_ALU | BPF_XOR | BPF_X:
1053 ctx->flags |= SEEN_A;
1054 emit_xor(r_A, r_A, r_X, ctx);
1056 case BPF_ALU | BPF_AND | BPF_K:
1058 ctx->flags |= SEEN_A;
1059 emit_andi(r_A, r_A, k, ctx);
1061 case BPF_ALU | BPF_AND | BPF_X:
1063 ctx->flags |= SEEN_A | SEEN_X;
1064 emit_and(r_A, r_A, r_X, ctx);
1066 case BPF_ALU | BPF_LSH | BPF_K:
1068 ctx->flags |= SEEN_A;
1069 emit_sll(r_A, r_A, k, ctx);
1071 case BPF_ALU | BPF_LSH | BPF_X:
1073 ctx->flags |= SEEN_A | SEEN_X;
1074 emit_sllv(r_A, r_A, r_X, ctx);
1076 case BPF_ALU | BPF_RSH | BPF_K:
1078 ctx->flags |= SEEN_A;
1079 emit_srl(r_A, r_A, k, ctx);
1081 case BPF_ALU | BPF_RSH | BPF_X:
1082 ctx->flags |= SEEN_A | SEEN_X;
1083 emit_srlv(r_A, r_A, r_X, ctx);
1085 case BPF_ALU | BPF_NEG:
1087 ctx->flags |= SEEN_A;
1090 case BPF_JMP | BPF_JA:
1092 emit_b(b_imm(i + k + 1, ctx), ctx);
1095 case BPF_JMP | BPF_JEQ | BPF_K:
1096 /* pc += ( A == K ) ? pc->jt : pc->jf */
1097 condt = MIPS_COND_EQ | MIPS_COND_K;
1099 case BPF_JMP | BPF_JEQ | BPF_X:
1100 ctx->flags |= SEEN_X;
1101 /* pc += ( A == X ) ? pc->jt : pc->jf */
1102 condt = MIPS_COND_EQ | MIPS_COND_X;
1104 case BPF_JMP | BPF_JGE | BPF_K:
1105 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1106 condt = MIPS_COND_GE | MIPS_COND_K;
1108 case BPF_JMP | BPF_JGE | BPF_X:
1109 ctx->flags |= SEEN_X;
1110 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1111 condt = MIPS_COND_GE | MIPS_COND_X;
1113 case BPF_JMP | BPF_JGT | BPF_K:
1114 /* pc += ( A > K ) ? pc->jt : pc->jf */
1115 condt = MIPS_COND_GT | MIPS_COND_K;
1117 case BPF_JMP | BPF_JGT | BPF_X:
1118 ctx->flags |= SEEN_X;
1119 /* pc += ( A > X ) ? pc->jt : pc->jf */
1120 condt = MIPS_COND_GT | MIPS_COND_X;
1122 /* Greater or Equal */
1123 if ((condt & MIPS_COND_GE) ||
1124 (condt & MIPS_COND_GT)) {
1125 if (condt & MIPS_COND_K) { /* K */
1126 ctx->flags |= SEEN_S0 | SEEN_A;
1127 emit_sltiu(r_s0, r_A, k, ctx);
1129 ctx->flags |= SEEN_S0 | SEEN_A |
1131 emit_sltu(r_s0, r_A, r_X, ctx);
1133 /* A < (K|X) ? r_scrach = 1 */
1134 b_off = b_imm(i + inst->jf + 1, ctx);
1135 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1138 /* A > (K|X) ? scratch = 0 */
1139 if (condt & MIPS_COND_GT) {
1140 /* Checking for equality */
1141 ctx->flags |= SEEN_S0 | SEEN_A | SEEN_X;
1142 if (condt & MIPS_COND_K)
1143 emit_load_imm(r_s0, k, ctx);
1145 emit_jit_reg_move(r_s0, r_X,
1147 b_off = b_imm(i + inst->jf + 1, ctx);
1148 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1151 /* Finally, A > K|X */
1152 b_off = b_imm(i + inst->jt + 1, ctx);
1156 /* A >= (K|X) so jump */
1157 b_off = b_imm(i + inst->jt + 1, ctx);
1163 if (condt & MIPS_COND_K) { /* K */
1164 ctx->flags |= SEEN_S0 | SEEN_A;
1165 emit_load_imm(r_s0, k, ctx);
1167 b_off = b_imm(i + inst->jt + 1, ctx);
1168 emit_bcond(MIPS_COND_EQ, r_A, r_s0,
1172 b_off = b_imm(i + inst->jf + 1,
1174 emit_bcond(MIPS_COND_NE, r_A, r_s0,
1179 ctx->flags |= SEEN_A | SEEN_X;
1180 b_off = b_imm(i + inst->jt + 1,
1182 emit_bcond(MIPS_COND_EQ, r_A, r_X,
1186 b_off = b_imm(i + inst->jf + 1, ctx);
1187 emit_bcond(MIPS_COND_NE, r_A, r_X,
1193 case BPF_JMP | BPF_JSET | BPF_K:
1194 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
1195 /* pc += (A & K) ? pc -> jt : pc -> jf */
1196 emit_load_imm(r_s1, k, ctx);
1197 emit_and(r_s0, r_A, r_s1, ctx);
1199 b_off = b_imm(i + inst->jt + 1, ctx);
1200 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1203 b_off = b_imm(i + inst->jf + 1, ctx);
1207 case BPF_JMP | BPF_JSET | BPF_X:
1208 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
1209 /* pc += (A & X) ? pc -> jt : pc -> jf */
1210 emit_and(r_s0, r_A, r_X, ctx);
1212 b_off = b_imm(i + inst->jt + 1, ctx);
1213 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1216 b_off = b_imm(i + inst->jf + 1, ctx);
1220 case BPF_RET | BPF_A:
1221 ctx->flags |= SEEN_A;
1222 if (i != prog->len - 1)
1224 * If this is not the last instruction
1225 * then jump to the epilogue
1227 emit_b(b_imm(prog->len, ctx), ctx);
1228 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1230 case BPF_RET | BPF_K:
1232 * It can emit two instructions so it does not fit on
1235 emit_load_imm(r_ret, k, ctx);
1236 if (i != prog->len - 1) {
1238 * If this is not the last instruction
1239 * then jump to the epilogue
1241 emit_b(b_imm(prog->len, ctx), ctx);
1245 case BPF_MISC | BPF_TAX:
1247 ctx->flags |= SEEN_X | SEEN_A;
1248 emit_jit_reg_move(r_X, r_A, ctx);
1250 case BPF_MISC | BPF_TXA:
1252 ctx->flags |= SEEN_A | SEEN_X;
1253 emit_jit_reg_move(r_A, r_X, ctx);
1256 case BPF_ANC | SKF_AD_PROTOCOL:
1257 /* A = ntohs(skb->protocol */
1258 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1259 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1261 off = offsetof(struct sk_buff, protocol);
1262 emit_half_load(r_A, r_skb, off, ctx);
1263 #ifdef CONFIG_CPU_LITTLE_ENDIAN
1264 /* This needs little endian fixup */
1265 if (cpu_has_mips_r2) {
1266 /* R2 and later have the wsbh instruction */
1267 emit_wsbh(r_A, r_A, ctx);
1269 /* Get first byte */
1270 emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1272 emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1273 /* Get second byte */
1274 emit_srl(r_tmp_imm, r_A, 8, ctx);
1275 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1276 /* Put everyting together in r_A */
1277 emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1281 case BPF_ANC | SKF_AD_CPU:
1282 ctx->flags |= SEEN_A | SEEN_OFF;
1283 /* A = current_thread_info()->cpu */
1284 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1286 off = offsetof(struct thread_info, cpu);
1287 /* $28/gp points to the thread_info struct */
1288 emit_load(r_A, 28, off, ctx);
1290 case BPF_ANC | SKF_AD_IFINDEX:
1291 /* A = skb->dev->ifindex */
1292 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
1293 off = offsetof(struct sk_buff, dev);
1294 /* Load *dev pointer */
1295 emit_load_ptr(r_s0, r_skb, off, ctx);
1296 /* error (0) in the delay slot */
1297 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1298 b_imm(prog->len, ctx), ctx);
1299 emit_reg_move(r_ret, r_zero, ctx);
1300 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
1302 off = offsetof(struct net_device, ifindex);
1303 emit_load(r_A, r_s0, off, ctx);
1305 case BPF_ANC | SKF_AD_MARK:
1306 ctx->flags |= SEEN_SKB | SEEN_A;
1307 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1308 off = offsetof(struct sk_buff, mark);
1309 emit_load(r_A, r_skb, off, ctx);
1311 case BPF_ANC | SKF_AD_RXHASH:
1312 ctx->flags |= SEEN_SKB | SEEN_A;
1313 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1314 off = offsetof(struct sk_buff, hash);
1315 emit_load(r_A, r_skb, off, ctx);
1317 case BPF_ANC | SKF_AD_VLAN_TAG:
1318 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1319 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
1320 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1322 off = offsetof(struct sk_buff, vlan_tci);
1323 emit_half_load(r_s0, r_skb, off, ctx);
1324 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1325 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1327 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1328 /* return 1 if present */
1329 emit_sltu(r_A, r_zero, r_A, ctx);
1332 case BPF_ANC | SKF_AD_PKTTYPE:
1333 ctx->flags |= SEEN_SKB;
1335 off = pkt_type_offset();
1339 emit_load_byte(r_tmp, r_skb, off, ctx);
1340 /* Keep only the last 3 bits */
1341 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1342 #ifdef __BIG_ENDIAN_BITFIELD
1343 /* Get the actual packet type to the lower 3 bits */
1344 emit_srl(r_A, r_A, 5, ctx);
1347 case BPF_ANC | SKF_AD_QUEUE:
1348 ctx->flags |= SEEN_SKB | SEEN_A;
1349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1350 queue_mapping) != 2);
1351 BUILD_BUG_ON(offsetof(struct sk_buff,
1352 queue_mapping) > 0xff);
1353 off = offsetof(struct sk_buff, queue_mapping);
1354 emit_half_load(r_A, r_skb, off, ctx);
1357 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1363 /* compute offsets only during the first pass */
1364 if (ctx->target == NULL)
1365 ctx->offsets[i] = ctx->idx * 4;
1370 int bpf_jit_enable __read_mostly;
1372 void bpf_jit_compile(struct bpf_prog *fp)
1375 unsigned int alloc_size, tmp_idx;
1377 if (!bpf_jit_enable)
1380 memset(&ctx, 0, sizeof(ctx));
1382 ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
1383 if (ctx.offsets == NULL)
1388 if (build_body(&ctx))
1392 build_prologue(&ctx);
1393 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1394 /* just to complete the ctx.idx count */
1395 build_epilogue(&ctx);
1397 alloc_size = 4 * ctx.idx;
1398 ctx.target = module_alloc(alloc_size);
1399 if (ctx.target == NULL)
1403 memset(ctx.target, 0, alloc_size);
1407 /* Generate the actual JIT code */
1408 build_prologue(&ctx);
1410 build_epilogue(&ctx);
1412 /* Update the icache */
1413 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1415 if (bpf_jit_enable > 1)
1417 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1419 fp->bpf_func = (void *)ctx.target;
1426 void bpf_jit_free(struct bpf_prog *fp)
1429 module_free(NULL, fp->bpf_func);