1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
20 #include "bpf_jit64.h"
22 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
25 * We only need a stack frame if:
26 * - we call other functions (kernel helpers), or
27 * - the bpf program uses its stack area
28 * The latter condition is deduced from the usage of BPF_REG_FP
30 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
34 * When not setting up our own stackframe, the redzone usage is:
36 * [ prev sp ] <-------------
38 * sp (r1) ---> [ stack pointer ] --------------
39 * [ nv gpr save area ] 5*8
41 * [ local_tmp_var ] 16
42 * [ unused red zone ] 208 bytes protected
44 static int bpf_jit_stack_local(struct codegen_context *ctx)
46 if (bpf_has_stack_frame(ctx))
47 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
49 return -(BPF_PPC_STACK_SAVE + 24);
52 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
54 return bpf_jit_stack_local(ctx) + 16;
57 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
59 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 return (bpf_has_stack_frame(ctx) ?
61 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
64 pr_err("BPF JIT is asking about unknown registers");
68 void bpf_jit_realloc_regs(struct codegen_context *ctx)
72 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
77 * Initialize tail_call_cnt if we do tail calls.
78 * Otherwise, put in NOPs so that it can be skipped when we are
79 * invoked through a tail call.
81 if (ctx->seen & SEEN_TAILCALL) {
82 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83 /* this goes in the redzone */
84 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
90 #define BPF_TAILCALL_PROLOGUE_SIZE 8
92 if (bpf_has_stack_frame(ctx)) {
94 * We need a stack frame, but we don't necessarily need to
95 * save/restore LR unless we call other functions
97 if (ctx->seen & SEEN_FUNC) {
98 EMIT(PPC_RAW_MFLR(_R0));
99 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
102 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
106 * Back up non-volatile regs -- BPF registers 6-10
107 * If we haven't created our own stack frame, we save these
108 * in the protected zone below the previous stack frame
110 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111 if (bpf_is_seen_register(ctx, b2p[i]))
112 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
114 /* Setup frame pointer to point to the bpf stack area */
115 if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
116 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
117 STACK_FRAME_MIN_SIZE + ctx->stack_size));
120 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
125 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126 if (bpf_is_seen_register(ctx, b2p[i]))
127 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
129 /* Tear down our stack frame */
130 if (bpf_has_stack_frame(ctx)) {
131 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132 if (ctx->seen & SEEN_FUNC) {
133 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
134 EMIT(PPC_RAW_MTLR(0));
139 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
141 bpf_jit_emit_common_epilogue(image, ctx);
143 /* Move result to r3 */
144 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
149 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
152 #ifdef PPC64_ELF_ABI_v1
153 /* func points to the function descriptor */
154 PPC_LI64(b2p[TMP_REG_2], func);
155 /* Load actual entry point from function descriptor */
156 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
157 /* ... and move it to CTR */
158 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
160 * Load TOC from function descriptor at offset 8.
161 * We can clobber r2 since we get called through a
162 * function pointer (so caller will save/restore r2)
163 * and since we don't use a TOC ourself.
165 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
167 /* We can clobber r12 */
168 PPC_FUNC_ADDR(12, func);
169 EMIT(PPC_RAW_MTCTR(12));
171 EMIT(PPC_RAW_BCTRL());
174 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
176 unsigned int i, ctx_idx = ctx->idx;
178 /* Load function address into r12 */
181 /* For bpf-to-bpf function calls, the callee's address is unknown
182 * until the last extra pass. As seen above, we use PPC_LI64() to
183 * load the callee's address, but this may optimize the number of
184 * instructions required based on the nature of the address.
186 * Since we don't want the number of instructions emitted to change,
187 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
188 * we always have a five-instruction sequence, which is the maximum
189 * that PPC_LI64() can emit.
191 for (i = ctx->idx - ctx_idx; i < 5; i++)
194 #ifdef PPC64_ELF_ABI_v1
196 * Load TOC from function descriptor at offset 8.
197 * We can clobber r2 since we get called through a
198 * function pointer (so caller will save/restore r2)
199 * and since we don't use a TOC ourself.
201 PPC_BPF_LL(2, 12, 8);
202 /* Load actual entry point from function descriptor */
203 PPC_BPF_LL(12, 12, 0);
206 EMIT(PPC_RAW_MTCTR(12));
207 EMIT(PPC_RAW_BCTRL());
210 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
213 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215 * r4/BPF_REG_2 - pointer to bpf_array
216 * r5/BPF_REG_3 - index in bpf_array
218 int b2p_bpf_array = b2p[BPF_REG_2];
219 int b2p_index = b2p[BPF_REG_3];
222 * if (index >= array->map.max_entries)
225 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
226 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
227 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228 PPC_BCC(COND_GE, out);
231 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
234 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
235 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236 PPC_BCC(COND_GE, out);
241 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
244 /* prog = array->ptrs[index]; */
245 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
246 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
247 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
253 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254 PPC_BCC(COND_EQ, out);
256 /* goto *(prog->bpf_func + prologue_size); */
257 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258 #ifdef PPC64_ELF_ABI_v1
259 /* skip past the function descriptor */
260 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
261 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
263 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
265 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
267 /* tear down stack, restore NVRs, ... */
268 bpf_jit_emit_common_epilogue(image, ctx);
270 EMIT(PPC_RAW_BCTR());
277 * We spill into the redzone always, even if the bpf program has its own stackframe.
278 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
280 void bpf_stf_barrier(void);
283 " .global bpf_stf_barrier ;"
284 " bpf_stf_barrier: ;"
298 /* Assemble the body code between the prologue & epilogue */
299 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300 u32 *addrs, int pass)
302 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303 const struct bpf_insn *insn = fp->insnsi;
307 /* Start of epilogue code - will only be valid 2nd pass onwards */
308 u32 exit_addr = addrs[flen];
310 for (i = 0; i < flen; i++) {
311 u32 code = insn[i].code;
312 u32 dst_reg = b2p[insn[i].dst_reg];
313 u32 src_reg = b2p[insn[i].src_reg];
314 u32 size = BPF_SIZE(code);
315 s16 off = insn[i].off;
316 s32 imm = insn[i].imm;
317 bool func_addr_fixed;
324 * addrs[] maps a BPF bytecode address into a real offset from
325 * the start of the body code.
327 addrs[i] = ctx->idx * 4;
330 * As an optimization, we note down which non-volatile registers
331 * are used so that we can only save/restore those in our
332 * prologue and epilogue. We do this here regardless of whether
333 * the actual BPF instruction uses src/dst registers or not
334 * (for instance, BPF_CALL does not use them). The expectation
335 * is that those instructions will have src_reg/dst_reg set to
336 * 0. Even otherwise, we just lose some prologue/epilogue
337 * optimization but everything else should work without
340 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
341 bpf_set_seen_register(ctx, dst_reg);
342 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
343 bpf_set_seen_register(ctx, src_reg);
347 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
349 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
350 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
351 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
352 goto bpf_alu32_trunc;
353 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
354 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
355 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
356 goto bpf_alu32_trunc;
357 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
358 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
360 goto bpf_alu32_trunc;
361 } else if (imm >= -32768 && imm < 32768) {
362 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
364 PPC_LI32(b2p[TMP_REG_1], imm);
365 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
367 goto bpf_alu32_trunc;
368 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
369 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
371 goto bpf_alu32_trunc;
372 } else if (imm > -32768 && imm <= 32768) {
373 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
375 PPC_LI32(b2p[TMP_REG_1], imm);
376 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
378 goto bpf_alu32_trunc;
379 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
380 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
381 if (BPF_CLASS(code) == BPF_ALU)
382 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
384 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
385 goto bpf_alu32_trunc;
386 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
387 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
388 if (imm >= -32768 && imm < 32768)
389 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
391 PPC_LI32(b2p[TMP_REG_1], imm);
392 if (BPF_CLASS(code) == BPF_ALU)
393 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
396 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
399 goto bpf_alu32_trunc;
400 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
401 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
402 if (BPF_OP(code) == BPF_MOD) {
403 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
404 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
406 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
408 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
409 goto bpf_alu32_trunc;
410 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
411 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
412 if (BPF_OP(code) == BPF_MOD) {
413 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
414 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
416 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
418 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
420 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
421 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
422 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
423 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
427 if (BPF_OP(code) == BPF_DIV) {
428 goto bpf_alu32_trunc;
430 EMIT(PPC_RAW_LI(dst_reg, 0));
435 PPC_LI32(b2p[TMP_REG_1], imm);
436 switch (BPF_CLASS(code)) {
438 if (BPF_OP(code) == BPF_MOD) {
439 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
442 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
445 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
448 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
452 if (BPF_OP(code) == BPF_MOD) {
453 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
456 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
459 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
462 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
466 goto bpf_alu32_trunc;
467 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
468 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
469 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
470 goto bpf_alu32_trunc;
473 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
475 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
476 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
477 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
478 goto bpf_alu32_trunc;
479 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
480 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
482 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
485 PPC_LI32(b2p[TMP_REG_1], imm);
486 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
488 goto bpf_alu32_trunc;
489 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
490 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
491 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
492 goto bpf_alu32_trunc;
493 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
494 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
495 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
497 PPC_LI32(b2p[TMP_REG_1], imm);
498 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
501 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
503 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
505 goto bpf_alu32_trunc;
506 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
507 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
508 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
509 goto bpf_alu32_trunc;
510 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
511 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
512 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
514 PPC_LI32(b2p[TMP_REG_1], imm);
515 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
518 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
520 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
522 goto bpf_alu32_trunc;
523 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
524 /* slw clears top 32 bits */
525 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
526 /* skip zero extension move, but set address map. */
527 if (insn_is_zext(&insn[i + 1]))
528 addrs[++i] = ctx->idx * 4;
530 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
531 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
533 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
534 /* with imm 0, we still need to clear top 32 bits */
535 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
536 if (insn_is_zext(&insn[i + 1]))
537 addrs[++i] = ctx->idx * 4;
539 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
541 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
543 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
544 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
545 if (insn_is_zext(&insn[i + 1]))
546 addrs[++i] = ctx->idx * 4;
548 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
549 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
551 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
552 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
553 if (insn_is_zext(&insn[i + 1]))
554 addrs[++i] = ctx->idx * 4;
556 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
558 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
560 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
561 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
562 goto bpf_alu32_trunc;
563 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
564 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
566 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
567 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
568 goto bpf_alu32_trunc;
569 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
571 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
577 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
578 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
580 /* special mov32 for zext */
581 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
584 EMIT(PPC_RAW_MR(dst_reg, src_reg));
585 goto bpf_alu32_trunc;
586 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
587 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
588 PPC_LI32(dst_reg, imm);
590 goto bpf_alu32_trunc;
591 else if (insn_is_zext(&insn[i + 1]))
592 addrs[++i] = ctx->idx * 4;
596 /* Truncate to 32-bits */
597 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
598 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
604 case BPF_ALU | BPF_END | BPF_FROM_LE:
605 case BPF_ALU | BPF_END | BPF_FROM_BE:
606 #ifdef __BIG_ENDIAN__
607 if (BPF_SRC(code) == BPF_FROM_BE)
609 #else /* !__BIG_ENDIAN__ */
610 if (BPF_SRC(code) == BPF_FROM_LE)
615 /* Rotate 8 bits left & mask with 0x0000ff00 */
616 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
617 /* Rotate 8 bits right & insert LSB to reg */
618 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
619 /* Move result back to dst_reg */
620 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
624 * Rotate word left by 8 bits:
625 * 2 bytes are already in their final position
626 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
628 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
629 /* Rotate 24 bits and insert byte 1 */
630 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
631 /* Rotate 24 bits and insert byte 3 */
632 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
633 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
637 * Way easier and faster(?) to store the value
638 * into stack and then use ldbrx
640 * ctx->seen will be reliable in pass2, but
641 * the instructions generated will remain the
642 * same across all passes
644 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
645 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
646 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
654 /* zero-extend 16 bits into 64 bits */
655 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
656 if (insn_is_zext(&insn[i + 1]))
657 addrs[++i] = ctx->idx * 4;
660 if (!fp->aux->verifier_zext)
661 /* zero-extend 32 bits into 64 bits */
662 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
671 * BPF_ST NOSPEC (speculation barrier)
673 case BPF_ST | BPF_NOSPEC:
674 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
675 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
678 switch (stf_barrier) {
679 case STF_BARRIER_EIEIO:
680 EMIT(PPC_RAW_EIEIO() | 0x02000000);
682 case STF_BARRIER_SYNC_ORI:
683 EMIT(PPC_RAW_SYNC());
684 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
685 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
687 case STF_BARRIER_FALLBACK:
688 EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
689 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
690 EMIT(PPC_RAW_MTCTR(12));
691 EMIT(PPC_RAW_BCTRL());
692 EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
694 case STF_BARRIER_NONE:
702 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
703 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
704 if (BPF_CLASS(code) == BPF_ST) {
705 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
706 src_reg = b2p[TMP_REG_1];
708 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
710 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
711 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
712 if (BPF_CLASS(code) == BPF_ST) {
713 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
714 src_reg = b2p[TMP_REG_1];
716 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
718 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
719 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
720 if (BPF_CLASS(code) == BPF_ST) {
721 PPC_LI32(b2p[TMP_REG_1], imm);
722 src_reg = b2p[TMP_REG_1];
724 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
726 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
727 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
728 if (BPF_CLASS(code) == BPF_ST) {
729 PPC_LI32(b2p[TMP_REG_1], imm);
730 src_reg = b2p[TMP_REG_1];
732 PPC_BPF_STL(src_reg, dst_reg, off);
736 * BPF_STX ATOMIC (atomic ops)
738 case BPF_STX | BPF_ATOMIC | BPF_W:
739 if (imm != BPF_ADD) {
741 "eBPF filter atomic op code %02x (@%d) unsupported\n",
746 /* *(u32 *)(dst + off) += src */
748 /* Get EA into TMP_REG_1 */
749 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
750 tmp_idx = ctx->idx * 4;
751 /* load value from memory into TMP_REG_2 */
752 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
753 /* add value from src_reg into this */
754 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
755 /* store result back */
756 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
757 /* we're done if this succeeded */
758 PPC_BCC_SHORT(COND_NE, tmp_idx);
760 case BPF_STX | BPF_ATOMIC | BPF_DW:
761 if (imm != BPF_ADD) {
763 "eBPF filter atomic op code %02x (@%d) unsupported\n",
767 /* *(u64 *)(dst + off) += src */
769 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
770 tmp_idx = ctx->idx * 4;
771 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
772 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
773 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
774 PPC_BCC_SHORT(COND_NE, tmp_idx);
780 /* dst = *(u8 *)(ul) (src + off) */
781 case BPF_LDX | BPF_MEM | BPF_B:
782 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
783 /* dst = *(u16 *)(ul) (src + off) */
784 case BPF_LDX | BPF_MEM | BPF_H:
785 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
786 /* dst = *(u32 *)(ul) (src + off) */
787 case BPF_LDX | BPF_MEM | BPF_W:
788 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
789 /* dst = *(u64 *)(ul) (src + off) */
790 case BPF_LDX | BPF_MEM | BPF_DW:
791 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
793 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
794 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
795 * load only if addr is kernel address (see is_kernel_addr()), otherwise
796 * set dst_reg=0 and move on.
798 if (BPF_MODE(code) == BPF_PROBE_MEM) {
799 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
800 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
801 PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
803 PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
804 EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
805 PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
806 EMIT(PPC_RAW_LI(dst_reg, 0));
808 * Check if 'off' is word aligned because PPC_BPF_LL()
809 * (BPF_DW case) generates two instructions if 'off' is not
810 * word-aligned and one instruction otherwise.
812 if (BPF_SIZE(code) == BPF_DW && (off & 3))
813 PPC_JMP((ctx->idx + 3) * 4);
815 PPC_JMP((ctx->idx + 2) * 4);
820 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
823 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
826 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
829 PPC_BPF_LL(dst_reg, src_reg, off);
833 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
834 addrs[++i] = ctx->idx * 4;
836 if (BPF_MODE(code) == BPF_PROBE_MEM) {
837 ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
846 * 16 byte instruction that uses two 'struct bpf_insn'
848 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
849 imm64 = ((u64)(u32) insn[i].imm) |
850 (((u64)(u32) insn[i+1].imm) << 32);
851 /* Adjust for two bpf instructions */
852 addrs[++i] = ctx->idx * 4;
853 PPC_LI64(dst_reg, imm64);
859 case BPF_JMP | BPF_EXIT:
861 * If this isn't the very last instruction, branch to
862 * the epilogue. If we _are_ the last instruction,
863 * we'll just fall through to the epilogue.
867 /* else fall through to the epilogue */
871 * Call kernel helper or bpf function
873 case BPF_JMP | BPF_CALL:
874 ctx->seen |= SEEN_FUNC;
876 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
877 &func_addr, &func_addr_fixed);
882 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
884 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
885 /* move return value from r3 to BPF_REG_0 */
886 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
892 case BPF_JMP | BPF_JA:
893 PPC_JMP(addrs[i + 1 + off]);
896 case BPF_JMP | BPF_JGT | BPF_K:
897 case BPF_JMP | BPF_JGT | BPF_X:
898 case BPF_JMP | BPF_JSGT | BPF_K:
899 case BPF_JMP | BPF_JSGT | BPF_X:
900 case BPF_JMP32 | BPF_JGT | BPF_K:
901 case BPF_JMP32 | BPF_JGT | BPF_X:
902 case BPF_JMP32 | BPF_JSGT | BPF_K:
903 case BPF_JMP32 | BPF_JSGT | BPF_X:
906 case BPF_JMP | BPF_JLT | BPF_K:
907 case BPF_JMP | BPF_JLT | BPF_X:
908 case BPF_JMP | BPF_JSLT | BPF_K:
909 case BPF_JMP | BPF_JSLT | BPF_X:
910 case BPF_JMP32 | BPF_JLT | BPF_K:
911 case BPF_JMP32 | BPF_JLT | BPF_X:
912 case BPF_JMP32 | BPF_JSLT | BPF_K:
913 case BPF_JMP32 | BPF_JSLT | BPF_X:
916 case BPF_JMP | BPF_JGE | BPF_K:
917 case BPF_JMP | BPF_JGE | BPF_X:
918 case BPF_JMP | BPF_JSGE | BPF_K:
919 case BPF_JMP | BPF_JSGE | BPF_X:
920 case BPF_JMP32 | BPF_JGE | BPF_K:
921 case BPF_JMP32 | BPF_JGE | BPF_X:
922 case BPF_JMP32 | BPF_JSGE | BPF_K:
923 case BPF_JMP32 | BPF_JSGE | BPF_X:
926 case BPF_JMP | BPF_JLE | BPF_K:
927 case BPF_JMP | BPF_JLE | BPF_X:
928 case BPF_JMP | BPF_JSLE | BPF_K:
929 case BPF_JMP | BPF_JSLE | BPF_X:
930 case BPF_JMP32 | BPF_JLE | BPF_K:
931 case BPF_JMP32 | BPF_JLE | BPF_X:
932 case BPF_JMP32 | BPF_JSLE | BPF_K:
933 case BPF_JMP32 | BPF_JSLE | BPF_X:
936 case BPF_JMP | BPF_JEQ | BPF_K:
937 case BPF_JMP | BPF_JEQ | BPF_X:
938 case BPF_JMP32 | BPF_JEQ | BPF_K:
939 case BPF_JMP32 | BPF_JEQ | BPF_X:
942 case BPF_JMP | BPF_JNE | BPF_K:
943 case BPF_JMP | BPF_JNE | BPF_X:
944 case BPF_JMP32 | BPF_JNE | BPF_K:
945 case BPF_JMP32 | BPF_JNE | BPF_X:
948 case BPF_JMP | BPF_JSET | BPF_K:
949 case BPF_JMP | BPF_JSET | BPF_X:
950 case BPF_JMP32 | BPF_JSET | BPF_K:
951 case BPF_JMP32 | BPF_JSET | BPF_X:
957 case BPF_JMP | BPF_JGT | BPF_X:
958 case BPF_JMP | BPF_JLT | BPF_X:
959 case BPF_JMP | BPF_JGE | BPF_X:
960 case BPF_JMP | BPF_JLE | BPF_X:
961 case BPF_JMP | BPF_JEQ | BPF_X:
962 case BPF_JMP | BPF_JNE | BPF_X:
963 case BPF_JMP32 | BPF_JGT | BPF_X:
964 case BPF_JMP32 | BPF_JLT | BPF_X:
965 case BPF_JMP32 | BPF_JGE | BPF_X:
966 case BPF_JMP32 | BPF_JLE | BPF_X:
967 case BPF_JMP32 | BPF_JEQ | BPF_X:
968 case BPF_JMP32 | BPF_JNE | BPF_X:
969 /* unsigned comparison */
970 if (BPF_CLASS(code) == BPF_JMP32)
971 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
973 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
975 case BPF_JMP | BPF_JSGT | BPF_X:
976 case BPF_JMP | BPF_JSLT | BPF_X:
977 case BPF_JMP | BPF_JSGE | BPF_X:
978 case BPF_JMP | BPF_JSLE | BPF_X:
979 case BPF_JMP32 | BPF_JSGT | BPF_X:
980 case BPF_JMP32 | BPF_JSLT | BPF_X:
981 case BPF_JMP32 | BPF_JSGE | BPF_X:
982 case BPF_JMP32 | BPF_JSLE | BPF_X:
983 /* signed comparison */
984 if (BPF_CLASS(code) == BPF_JMP32)
985 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
987 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
989 case BPF_JMP | BPF_JSET | BPF_X:
990 case BPF_JMP32 | BPF_JSET | BPF_X:
991 if (BPF_CLASS(code) == BPF_JMP) {
992 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
995 int tmp_reg = b2p[TMP_REG_1];
997 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
998 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
1002 case BPF_JMP | BPF_JNE | BPF_K:
1003 case BPF_JMP | BPF_JEQ | BPF_K:
1004 case BPF_JMP | BPF_JGT | BPF_K:
1005 case BPF_JMP | BPF_JLT | BPF_K:
1006 case BPF_JMP | BPF_JGE | BPF_K:
1007 case BPF_JMP | BPF_JLE | BPF_K:
1008 case BPF_JMP32 | BPF_JNE | BPF_K:
1009 case BPF_JMP32 | BPF_JEQ | BPF_K:
1010 case BPF_JMP32 | BPF_JGT | BPF_K:
1011 case BPF_JMP32 | BPF_JLT | BPF_K:
1012 case BPF_JMP32 | BPF_JGE | BPF_K:
1013 case BPF_JMP32 | BPF_JLE | BPF_K:
1015 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1018 * Need sign-extended load, so only positive
1019 * values can be used as imm in cmpldi
1021 if (imm >= 0 && imm < 32768) {
1023 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1025 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1027 /* sign-extending load */
1028 PPC_LI32(b2p[TMP_REG_1], imm);
1029 /* ... but unsigned comparison */
1031 EMIT(PPC_RAW_CMPLW(dst_reg,
1034 EMIT(PPC_RAW_CMPLD(dst_reg,
1039 case BPF_JMP | BPF_JSGT | BPF_K:
1040 case BPF_JMP | BPF_JSLT | BPF_K:
1041 case BPF_JMP | BPF_JSGE | BPF_K:
1042 case BPF_JMP | BPF_JSLE | BPF_K:
1043 case BPF_JMP32 | BPF_JSGT | BPF_K:
1044 case BPF_JMP32 | BPF_JSLT | BPF_K:
1045 case BPF_JMP32 | BPF_JSGE | BPF_K:
1046 case BPF_JMP32 | BPF_JSLE | BPF_K:
1048 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1051 * signed comparison, so any 16-bit value
1052 * can be used in cmpdi
1054 if (imm >= -32768 && imm < 32768) {
1056 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1058 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1060 PPC_LI32(b2p[TMP_REG_1], imm);
1062 EMIT(PPC_RAW_CMPW(dst_reg,
1065 EMIT(PPC_RAW_CMPD(dst_reg,
1070 case BPF_JMP | BPF_JSET | BPF_K:
1071 case BPF_JMP32 | BPF_JSET | BPF_K:
1072 /* andi does not sign-extend the immediate */
1073 if (imm >= 0 && imm < 32768)
1074 /* PPC_ANDI is _only/always_ dot-form */
1075 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1077 int tmp_reg = b2p[TMP_REG_1];
1079 PPC_LI32(tmp_reg, imm);
1080 if (BPF_CLASS(code) == BPF_JMP) {
1081 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1084 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1086 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1092 PPC_BCC(true_cond, addrs[i + 1 + off]);
1098 case BPF_JMP | BPF_TAIL_CALL:
1099 ctx->seen |= SEEN_TAILCALL;
1100 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1107 * The filter contains something cruel & unusual.
1108 * We don't handle it, but also there shouldn't be
1109 * anything missing from our list.
1111 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1117 /* Set end-of-body-code address for exit. */
1118 addrs[i] = ctx->idx * 4;