1 /* bpf_jit_comp.c: BPF JIT compiler for PPC64
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 #include <linux/moduleloader.h>
13 #include <asm/cacheflush.h>
14 #include <linux/netdevice.h>
15 #include <linux/filter.h>
16 #include <linux/if_vlan.h>
20 int bpf_jit_enable __read_mostly;
22 static inline void bpf_flush_icache(void *start, void *end)
25 flush_icache_range((unsigned long)start, (unsigned long)end);
28 static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
29 struct codegen_context *ctx)
32 const struct sock_filter *filter = fp->insns;
34 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
36 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
41 /* Back up non-volatile regs. */
42 PPC_STD(r_D, 1, -(8*(32-r_D)));
43 PPC_STD(r_HL, 1, -(8*(32-r_HL)));
45 if (ctx->seen & SEEN_MEM) {
47 * Conditionally save regs r15-r31 as some will be used
50 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M)))
52 PPC_STD(i, 1, -(8*(32-i)));
55 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
56 (-BPF_PPC_STACKFRAME & 0xfffc));
59 if (ctx->seen & SEEN_DATAREF) {
61 * If this filter needs to access skb data,
62 * prepare r_D and r_HL:
63 * r_HL = skb->len - skb->data_len
66 PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
73 if (ctx->seen & SEEN_XREG) {
75 * TODO: Could also detect whether first instr. sets X and
76 * avoid this (as below, with A).
81 switch (filter[0].code) {
84 case BPF_S_ANC_PROTOCOL:
85 case BPF_S_ANC_IFINDEX:
87 case BPF_S_ANC_RXHASH:
88 case BPF_S_ANC_VLAN_TAG:
89 case BPF_S_ANC_VLAN_TAG_PRESENT:
95 /* first instruction sets A register (or is RET 'constant') */
98 /* make sure we dont leak kernel information to user */
103 static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
107 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
108 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
109 if (ctx->seen & SEEN_DATAREF) {
112 PPC_LD(r_D, 1, -(8*(32-r_D)));
113 PPC_LD(r_HL, 1, -(8*(32-r_HL)));
115 if (ctx->seen & SEEN_MEM) {
116 /* Restore any saved non-vol registers */
117 for (i = r_M; i < (r_M+16); i++) {
118 if (ctx->seen & (1 << (i-r_M)))
119 PPC_LD(i, 1, -(8*(32-i)));
123 /* The RETs have left a return value in R3. */
128 #define CHOOSE_LOAD_FUNC(K, func) \
129 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
131 /* Assemble the body code between the prologue & epilogue. */
132 static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
133 struct codegen_context *ctx,
136 const struct sock_filter *filter = fp->insns;
139 unsigned int true_cond;
142 /* Start of epilogue code */
143 unsigned int exit_addr = addrs[flen];
145 for (i = 0; i < flen; i++) {
146 unsigned int K = filter[i].k;
149 * addrs[] maps a BPF bytecode address into a real offset from
150 * the start of the body code.
152 addrs[i] = ctx->idx * 4;
154 switch (filter[i].code) {
156 case BPF_S_ALU_ADD_X: /* A += X; */
157 ctx->seen |= SEEN_XREG;
158 PPC_ADD(r_A, r_A, r_X);
160 case BPF_S_ALU_ADD_K: /* A += K; */
163 PPC_ADDI(r_A, r_A, IMM_L(K));
165 PPC_ADDIS(r_A, r_A, IMM_HA(K));
167 case BPF_S_ALU_SUB_X: /* A -= X; */
168 ctx->seen |= SEEN_XREG;
169 PPC_SUB(r_A, r_A, r_X);
171 case BPF_S_ALU_SUB_K: /* A -= K */
174 PPC_ADDI(r_A, r_A, IMM_L(-K));
176 PPC_ADDIS(r_A, r_A, IMM_HA(-K));
178 case BPF_S_ALU_MUL_X: /* A *= X; */
179 ctx->seen |= SEEN_XREG;
180 PPC_MUL(r_A, r_A, r_X);
182 case BPF_S_ALU_MUL_K: /* A *= K */
184 PPC_MULI(r_A, r_A, K);
186 PPC_LI32(r_scratch1, K);
187 PPC_MUL(r_A, r_A, r_scratch1);
190 case BPF_S_ALU_MOD_X: /* A %= X; */
191 ctx->seen |= SEEN_XREG;
193 if (ctx->pc_ret0 != -1) {
194 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
196 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
200 PPC_DIVWU(r_scratch1, r_A, r_X);
201 PPC_MUL(r_scratch1, r_X, r_scratch1);
202 PPC_SUB(r_A, r_A, r_scratch1);
204 case BPF_S_ALU_MOD_K: /* A %= K; */
205 PPC_LI32(r_scratch2, K);
206 PPC_DIVWU(r_scratch1, r_A, r_scratch2);
207 PPC_MUL(r_scratch1, r_scratch2, r_scratch1);
208 PPC_SUB(r_A, r_A, r_scratch1);
210 case BPF_S_ALU_DIV_X: /* A /= X; */
211 ctx->seen |= SEEN_XREG;
213 if (ctx->pc_ret0 != -1) {
214 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
217 * Exit, returning 0; first pass hits here
218 * (longer worst-case code size).
220 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
224 PPC_DIVWU(r_A, r_A, r_X);
226 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
227 PPC_LI32(r_scratch1, K);
228 /* Top 32 bits of 64bit result -> A */
229 PPC_MULHWU(r_A, r_A, r_scratch1);
231 case BPF_S_ALU_AND_X:
232 ctx->seen |= SEEN_XREG;
233 PPC_AND(r_A, r_A, r_X);
235 case BPF_S_ALU_AND_K:
237 PPC_ANDI(r_A, r_A, K);
239 PPC_LI32(r_scratch1, K);
240 PPC_AND(r_A, r_A, r_scratch1);
244 ctx->seen |= SEEN_XREG;
245 PPC_OR(r_A, r_A, r_X);
249 PPC_ORI(r_A, r_A, IMM_L(K));
251 PPC_ORIS(r_A, r_A, IMM_H(K));
253 case BPF_S_ANC_ALU_XOR_X:
254 case BPF_S_ALU_XOR_X: /* A ^= X */
255 ctx->seen |= SEEN_XREG;
256 PPC_XOR(r_A, r_A, r_X);
258 case BPF_S_ALU_XOR_K: /* A ^= K */
260 PPC_XORI(r_A, r_A, IMM_L(K));
262 PPC_XORIS(r_A, r_A, IMM_H(K));
264 case BPF_S_ALU_LSH_X: /* A <<= X; */
265 ctx->seen |= SEEN_XREG;
266 PPC_SLW(r_A, r_A, r_X);
268 case BPF_S_ALU_LSH_K:
272 PPC_SLWI(r_A, r_A, K);
274 case BPF_S_ALU_RSH_X: /* A >>= X; */
275 ctx->seen |= SEEN_XREG;
276 PPC_SRW(r_A, r_A, r_X);
278 case BPF_S_ALU_RSH_K: /* A >>= K; */
282 PPC_SRWI(r_A, r_A, K);
290 if (ctx->pc_ret0 == -1)
294 * If this isn't the very last instruction, branch to
295 * the epilogue if we've stuff to clean up. Otherwise,
296 * if there's nothing to tidy, just return. If we /are/
297 * the last instruction, we're about to fall through to
298 * the epilogue to return.
302 * Note: 'seen' is properly valid only on pass
303 * #2. Both parts of this conditional are the
304 * same instruction size though, meaning the
305 * first pass will still correctly determine the
306 * code size/addresses.
323 case BPF_S_MISC_TAX: /* X = A */
326 case BPF_S_MISC_TXA: /* A = X */
327 ctx->seen |= SEEN_XREG;
331 /*** Constant loads/M[] access ***/
332 case BPF_S_LD_IMM: /* A = K */
335 case BPF_S_LDX_IMM: /* X = K */
338 case BPF_S_LD_MEM: /* A = mem[K] */
339 PPC_MR(r_A, r_M + (K & 0xf));
340 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
342 case BPF_S_LDX_MEM: /* X = mem[K] */
343 PPC_MR(r_X, r_M + (K & 0xf));
344 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
346 case BPF_S_ST: /* mem[K] = A */
347 PPC_MR(r_M + (K & 0xf), r_A);
348 ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
350 case BPF_S_STX: /* mem[K] = X */
351 PPC_MR(r_M + (K & 0xf), r_X);
352 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
354 case BPF_S_LD_W_LEN: /* A = skb->len; */
355 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
356 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
358 case BPF_S_LDX_W_LEN: /* X = skb->len; */
359 PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
362 /*** Ancillary info loads ***/
363 case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
364 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
366 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
369 case BPF_S_ANC_IFINDEX:
370 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
372 PPC_CMPDI(r_scratch1, 0);
373 if (ctx->pc_ret0 != -1) {
374 PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
376 /* Exit, returning 0; first pass hits here. */
377 PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
381 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
383 PPC_LWZ_OFFS(r_A, r_scratch1,
384 offsetof(struct net_device, ifindex));
387 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
388 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
391 case BPF_S_ANC_RXHASH:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
393 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
396 case BPF_S_ANC_VLAN_TAG:
397 case BPF_S_ANC_VLAN_TAG_PRESENT:
398 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
399 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
401 if (filter[i].code == BPF_S_ANC_VLAN_TAG)
402 PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
404 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
406 case BPF_S_ANC_QUEUE:
407 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
408 queue_mapping) != 2);
409 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
416 * raw_smp_processor_id() = local_paca->paca_index
418 BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct,
420 PPC_LHZ_OFFS(r_A, 13,
421 offsetof(struct paca_struct, paca_index));
427 /*** Absolute loads from packet header/data ***/
429 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
432 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
435 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
438 ctx->seen |= SEEN_DATAREF;
439 PPC_LI64(r_scratch1, func);
440 PPC_MTLR(r_scratch1);
444 * Helper returns 'lt' condition on error, and an
445 * appropriate return value in r3
447 PPC_BCC(COND_LT, exit_addr);
450 /*** Indirect loads from packet header/data ***/
453 goto common_load_ind;
456 goto common_load_ind;
461 * Load from [X + K]. Negative offsets are tested for
462 * in the helper functions.
464 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
465 PPC_LI64(r_scratch1, func);
466 PPC_MTLR(r_scratch1);
467 PPC_ADDI(r_addr, r_X, IMM_L(K));
469 PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
471 /* If error, cr0.LT set */
472 PPC_BCC(COND_LT, exit_addr);
475 case BPF_S_LDX_B_MSH:
476 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
480 /*** Jump and branches ***/
483 PPC_JMP(addrs[i + 1 + K]);
486 case BPF_S_JMP_JGT_K:
487 case BPF_S_JMP_JGT_X:
490 case BPF_S_JMP_JGE_K:
491 case BPF_S_JMP_JGE_X:
494 case BPF_S_JMP_JEQ_K:
495 case BPF_S_JMP_JEQ_X:
498 case BPF_S_JMP_JSET_K:
499 case BPF_S_JMP_JSET_X:
503 /* same targets, can avoid doing the test :) */
504 if (filter[i].jt == filter[i].jf) {
505 if (filter[i].jt > 0)
506 PPC_JMP(addrs[i + 1 + filter[i].jt]);
510 switch (filter[i].code) {
511 case BPF_S_JMP_JGT_X:
512 case BPF_S_JMP_JGE_X:
513 case BPF_S_JMP_JEQ_X:
514 ctx->seen |= SEEN_XREG;
517 case BPF_S_JMP_JSET_X:
518 ctx->seen |= SEEN_XREG;
519 PPC_AND_DOT(r_scratch1, r_A, r_X);
521 case BPF_S_JMP_JEQ_K:
522 case BPF_S_JMP_JGT_K:
523 case BPF_S_JMP_JGE_K:
527 PPC_LI32(r_scratch1, K);
528 PPC_CMPLW(r_A, r_scratch1);
531 case BPF_S_JMP_JSET_K:
533 /* PPC_ANDI is /only/ dot-form */
534 PPC_ANDI(r_scratch1, r_A, K);
536 PPC_LI32(r_scratch1, K);
537 PPC_AND_DOT(r_scratch1, r_A,
542 /* Sometimes branches are constructed "backward", with
543 * the false path being the branch and true path being
544 * a fallthrough to the next instruction.
546 if (filter[i].jt == 0)
547 /* Swap the sense of the branch */
548 PPC_BCC(true_cond ^ COND_CMP_TRUE,
549 addrs[i + 1 + filter[i].jf]);
551 PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
552 if (filter[i].jf != 0)
553 PPC_JMP(addrs[i + 1 + filter[i].jf]);
557 /* The filter contains something cruel & unusual.
558 * We don't handle it, but also there shouldn't be
559 * anything missing from our list.
561 if (printk_ratelimit())
562 pr_err("BPF filter opcode %04x (@%d) unsupported\n",
568 /* Set end-of-body-code address for exit. */
569 addrs[i] = ctx->idx * 4;
574 void bpf_jit_compile(struct sk_filter *fp)
576 unsigned int proglen;
577 unsigned int alloclen;
581 struct codegen_context cgctx;
588 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
593 * There are multiple assembly passes as the generated code will change
594 * size as it settles down, figuring out the max branch offsets/exit
597 * The range of standard conditional branches is +/- 32Kbytes. Since
598 * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
599 * finish with 8 bytes/instruction. Not feasible, so long jumps are
600 * used, distinct from short branches.
604 * For now, both branch types assemble to 2 words (short branches padded
605 * with a NOP); this is less efficient, but assembly will always complete
606 * after exactly 3 passes:
608 * First pass: No code buffer; Program is "faux-generated" -- no code
609 * emitted but maximum size of output determined (and addrs[] filled
610 * in). Also, we note whether we use M[], whether we use skb data, etc.
611 * All generation choices assumed to be 'worst-case', e.g. branches all
612 * far (2 instructions), return path code reduction not available, etc.
614 * Second pass: Code buffer allocated with size determined previously.
615 * Prologue generated to support features we have seen used. Exit paths
616 * determined and addrs[] is filled in again, as code may be slightly
617 * smaller as a result.
619 * Third pass: Code generated 'for real', and branch destinations
620 * determined from now-accurate addrs[] map.
624 * If we optimise this, near branches will be shorter. On the
625 * first assembly pass, we should err on the side of caution and
626 * generate the biggest code. On subsequent passes, branches will be
627 * generated short or long and code size will reduce. With smaller
628 * code, more branches may fall into the short category, and code will
631 * Finally, if we see one pass generate code the same size as the
632 * previous pass we have converged and should now generate code for
633 * real. Allocating at the end will also save the memory that would
634 * otherwise be wasted by the (small) current code shrinkage.
635 * Preferably, we should do a small number of passes (e.g. 5) and if we
636 * haven't converged by then, get impatient and force code to generate
637 * as-is, even if the odd branch would be left long. The chances of a
638 * long jump are tiny with all but the most enormous of BPF filter
639 * inputs, so we should usually converge on the third pass.
645 /* Scouting faux-generate pass 0 */
646 if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
647 /* We hit something illegal or unsupported. */
651 * Pretend to build prologue, given the features we've seen. This will
652 * update ctgtx.idx as it pretends to output instructions, then we can
653 * calculate total size from idx.
655 bpf_jit_build_prologue(fp, 0, &cgctx);
656 bpf_jit_build_epilogue(0, &cgctx);
658 proglen = cgctx.idx * 4;
659 alloclen = proglen + FUNCTION_DESCR_SIZE;
660 image = module_alloc(alloclen);
664 code_base = image + (FUNCTION_DESCR_SIZE/4);
666 /* Code generation passes 1-2 */
667 for (pass = 1; pass < 3; pass++) {
668 /* Now build the prologue, body code & epilogue for real. */
670 bpf_jit_build_prologue(fp, code_base, &cgctx);
671 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
672 bpf_jit_build_epilogue(code_base, &cgctx);
674 if (bpf_jit_enable > 1)
675 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
676 proglen - (cgctx.idx * 4), cgctx.seen);
679 if (bpf_jit_enable > 1)
680 /* Note that we output the base address of the code_base
681 * rather than image, since opcodes are in code_base.
683 bpf_jit_dump(flen, proglen, pass, code_base);
686 bpf_flush_icache(code_base, code_base + (proglen/4));
687 /* Function descriptor nastiness: Address + TOC */
688 ((u64 *)image)[0] = (u64)code_base;
689 ((u64 *)image)[1] = local_paca->kernel_toc;
690 fp->bpf_func = (void *)image;
697 void bpf_jit_free(struct sk_filter *fp)
699 if (fp->bpf_func != sk_run_filter)
700 module_free(NULL, fp->bpf_func);