1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
20 #include "bpf_jit64.h"
22 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
25 * We only need a stack frame if:
26 * - we call other functions (kernel helpers), or
27 * - the bpf program uses its stack area
28 * The latter condition is deduced from the usage of BPF_REG_FP
30 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
34 * When not setting up our own stackframe, the redzone usage is:
36 * [ prev sp ] <-------------
38 * sp (r1) ---> [ stack pointer ] --------------
39 * [ nv gpr save area ] 5*8
41 * [ local_tmp_var ] 16
42 * [ unused red zone ] 208 bytes protected
44 static int bpf_jit_stack_local(struct codegen_context *ctx)
46 if (bpf_has_stack_frame(ctx))
47 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
49 return -(BPF_PPC_STACK_SAVE + 24);
52 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
54 return bpf_jit_stack_local(ctx) + 16;
57 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
59 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 return (bpf_has_stack_frame(ctx) ?
61 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
64 pr_err("BPF JIT is asking about unknown registers");
68 void bpf_jit_realloc_regs(struct codegen_context *ctx)
72 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
76 if (__is_defined(PPC64_ELF_ABI_v2))
77 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
80 * Initialize tail_call_cnt if we do tail calls.
81 * Otherwise, put in NOPs so that it can be skipped when we are
82 * invoked through a tail call.
84 if (ctx->seen & SEEN_TAILCALL) {
85 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
86 /* this goes in the redzone */
87 EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8)));
93 if (bpf_has_stack_frame(ctx)) {
95 * We need a stack frame, but we don't necessarily need to
96 * save/restore LR unless we call other functions
98 if (ctx->seen & SEEN_FUNC) {
99 EMIT(PPC_RAW_MFLR(_R0));
100 EMIT(PPC_RAW_STD(0, 1, PPC_LR_STKOFF));
103 EMIT(PPC_RAW_STDU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
107 * Back up non-volatile regs -- BPF registers 6-10
108 * If we haven't created our own stack frame, we save these
109 * in the protected zone below the previous stack frame
111 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
112 if (bpf_is_seen_register(ctx, b2p[i]))
113 EMIT(PPC_RAW_STD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])));
115 /* Setup frame pointer to point to the bpf stack area */
116 if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
117 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
118 STACK_FRAME_MIN_SIZE + ctx->stack_size));
121 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
126 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
127 if (bpf_is_seen_register(ctx, b2p[i]))
128 EMIT(PPC_RAW_LD(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i])));
130 /* Tear down our stack frame */
131 if (bpf_has_stack_frame(ctx)) {
132 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
133 if (ctx->seen & SEEN_FUNC) {
134 EMIT(PPC_RAW_LD(0, 1, PPC_LR_STKOFF));
135 EMIT(PPC_RAW_MTLR(0));
140 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
142 bpf_jit_emit_common_epilogue(image, ctx);
144 /* Move result to r3 */
145 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
150 static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
152 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
155 if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
158 reladdr = func_addr - kernel_toc_addr();
159 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
160 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
164 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
165 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
166 EMIT(PPC_RAW_MTCTR(_R12));
167 EMIT(PPC_RAW_BCTRL());
172 int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
174 unsigned int i, ctx_idx = ctx->idx;
176 if (WARN_ON_ONCE(func && is_module_text_address(func)))
179 /* skip past descriptor if elf v1 */
180 func += FUNCTION_DESCR_SIZE;
182 /* Load function address into r12 */
185 /* For bpf-to-bpf function calls, the callee's address is unknown
186 * until the last extra pass. As seen above, we use PPC_LI64() to
187 * load the callee's address, but this may optimize the number of
188 * instructions required based on the nature of the address.
190 * Since we don't want the number of instructions emitted to change,
191 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
192 * we always have a five-instruction sequence, which is the maximum
193 * that PPC_LI64() can emit.
195 for (i = ctx->idx - ctx_idx; i < 5; i++)
198 EMIT(PPC_RAW_MTCTR(12));
199 EMIT(PPC_RAW_BCTRL());
204 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
207 * By now, the eBPF program has already setup parameters in r3, r4 and r5
208 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
209 * r4/BPF_REG_2 - pointer to bpf_array
210 * r5/BPF_REG_3 - index in bpf_array
212 int b2p_bpf_array = b2p[BPF_REG_2];
213 int b2p_index = b2p[BPF_REG_3];
214 int bpf_tailcall_prologue_size = 8;
216 if (__is_defined(PPC64_ELF_ABI_v2))
217 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
220 * if (index >= array->map.max_entries)
223 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
224 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
225 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
226 PPC_BCC_SHORT(COND_GE, out);
229 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
232 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)));
233 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
234 PPC_BCC_SHORT(COND_GE, out);
239 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
240 EMIT(PPC_RAW_STD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx)));
242 /* prog = array->ptrs[index]; */
243 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
244 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
245 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs)));
251 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
252 PPC_BCC_SHORT(COND_EQ, out);
254 /* goto *(prog->bpf_func + prologue_size); */
255 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func)));
256 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
257 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
258 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
260 /* tear down stack, restore NVRs, ... */
261 bpf_jit_emit_common_epilogue(image, ctx);
263 EMIT(PPC_RAW_BCTR());
270 * We spill into the redzone always, even if the bpf program has its own stackframe.
271 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
273 void bpf_stf_barrier(void);
276 " .global bpf_stf_barrier ;"
277 " bpf_stf_barrier: ;"
291 /* Assemble the body code between the prologue & epilogue */
292 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
293 u32 *addrs, int pass)
295 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
296 const struct bpf_insn *insn = fp->insnsi;
300 /* Start of epilogue code - will only be valid 2nd pass onwards */
301 u32 exit_addr = addrs[flen];
303 for (i = 0; i < flen; i++) {
304 u32 code = insn[i].code;
305 u32 dst_reg = b2p[insn[i].dst_reg];
306 u32 src_reg = b2p[insn[i].src_reg];
307 u32 size = BPF_SIZE(code);
308 s16 off = insn[i].off;
309 s32 imm = insn[i].imm;
310 bool func_addr_fixed;
318 * addrs[] maps a BPF bytecode address into a real offset from
319 * the start of the body code.
321 addrs[i] = ctx->idx * 4;
324 * As an optimization, we note down which non-volatile registers
325 * are used so that we can only save/restore those in our
326 * prologue and epilogue. We do this here regardless of whether
327 * the actual BPF instruction uses src/dst registers or not
328 * (for instance, BPF_CALL does not use them). The expectation
329 * is that those instructions will have src_reg/dst_reg set to
330 * 0. Even otherwise, we just lose some prologue/epilogue
331 * optimization but everything else should work without
334 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
335 bpf_set_seen_register(ctx, dst_reg);
336 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
337 bpf_set_seen_register(ctx, src_reg);
341 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
343 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
344 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
345 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
346 goto bpf_alu32_trunc;
347 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
348 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
349 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
350 goto bpf_alu32_trunc;
351 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
352 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
354 goto bpf_alu32_trunc;
355 } else if (imm >= -32768 && imm < 32768) {
356 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
358 PPC_LI32(b2p[TMP_REG_1], imm);
359 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
361 goto bpf_alu32_trunc;
362 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
363 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
365 goto bpf_alu32_trunc;
366 } else if (imm > -32768 && imm <= 32768) {
367 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
369 PPC_LI32(b2p[TMP_REG_1], imm);
370 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
372 goto bpf_alu32_trunc;
373 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
374 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
375 if (BPF_CLASS(code) == BPF_ALU)
376 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
378 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
379 goto bpf_alu32_trunc;
380 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
381 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
382 if (imm >= -32768 && imm < 32768)
383 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
385 PPC_LI32(b2p[TMP_REG_1], imm);
386 if (BPF_CLASS(code) == BPF_ALU)
387 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
390 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
393 goto bpf_alu32_trunc;
394 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
395 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
396 if (BPF_OP(code) == BPF_MOD) {
397 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
398 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
400 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
402 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
403 goto bpf_alu32_trunc;
404 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
405 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
406 if (BPF_OP(code) == BPF_MOD) {
407 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
408 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
410 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
412 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
414 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
415 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
416 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
417 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
421 if (BPF_OP(code) == BPF_DIV) {
422 goto bpf_alu32_trunc;
424 EMIT(PPC_RAW_LI(dst_reg, 0));
429 PPC_LI32(b2p[TMP_REG_1], imm);
430 switch (BPF_CLASS(code)) {
432 if (BPF_OP(code) == BPF_MOD) {
433 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
436 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
439 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
442 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
446 if (BPF_OP(code) == BPF_MOD) {
447 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
450 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
453 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
456 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
460 goto bpf_alu32_trunc;
461 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
462 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
463 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
464 goto bpf_alu32_trunc;
467 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
469 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
470 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
471 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
472 goto bpf_alu32_trunc;
473 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
474 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
476 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
479 PPC_LI32(b2p[TMP_REG_1], imm);
480 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
482 goto bpf_alu32_trunc;
483 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
484 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
485 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
486 goto bpf_alu32_trunc;
487 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
488 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
489 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
491 PPC_LI32(b2p[TMP_REG_1], imm);
492 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
495 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
497 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
499 goto bpf_alu32_trunc;
500 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
501 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
502 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
503 goto bpf_alu32_trunc;
504 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
505 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
506 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
508 PPC_LI32(b2p[TMP_REG_1], imm);
509 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
512 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
514 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
516 goto bpf_alu32_trunc;
517 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
518 /* slw clears top 32 bits */
519 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
520 /* skip zero extension move, but set address map. */
521 if (insn_is_zext(&insn[i + 1]))
522 addrs[++i] = ctx->idx * 4;
524 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
525 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
527 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
528 /* with imm 0, we still need to clear top 32 bits */
529 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
530 if (insn_is_zext(&insn[i + 1]))
531 addrs[++i] = ctx->idx * 4;
533 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
535 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
537 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
538 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
539 if (insn_is_zext(&insn[i + 1]))
540 addrs[++i] = ctx->idx * 4;
542 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
543 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
545 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
546 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
547 if (insn_is_zext(&insn[i + 1]))
548 addrs[++i] = ctx->idx * 4;
550 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
552 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
554 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
555 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
556 goto bpf_alu32_trunc;
557 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
558 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
560 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
561 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
562 goto bpf_alu32_trunc;
563 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
565 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
571 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
572 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
574 /* special mov32 for zext */
575 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
578 EMIT(PPC_RAW_MR(dst_reg, src_reg));
579 goto bpf_alu32_trunc;
580 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
581 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
582 PPC_LI32(dst_reg, imm);
584 goto bpf_alu32_trunc;
585 else if (insn_is_zext(&insn[i + 1]))
586 addrs[++i] = ctx->idx * 4;
590 /* Truncate to 32-bits */
591 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
592 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
598 case BPF_ALU | BPF_END | BPF_FROM_LE:
599 case BPF_ALU | BPF_END | BPF_FROM_BE:
600 #ifdef __BIG_ENDIAN__
601 if (BPF_SRC(code) == BPF_FROM_BE)
603 #else /* !__BIG_ENDIAN__ */
604 if (BPF_SRC(code) == BPF_FROM_LE)
609 /* Rotate 8 bits left & mask with 0x0000ff00 */
610 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
611 /* Rotate 8 bits right & insert LSB to reg */
612 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
613 /* Move result back to dst_reg */
614 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
618 * Rotate word left by 8 bits:
619 * 2 bytes are already in their final position
620 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
622 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
623 /* Rotate 24 bits and insert byte 1 */
624 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
625 /* Rotate 24 bits and insert byte 3 */
626 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
627 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
630 /* Store the value to stack and then use byte-reverse loads */
631 EMIT(PPC_RAW_STD(dst_reg, 1, bpf_jit_stack_local(ctx)));
632 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
633 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
634 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
636 EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
637 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
638 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
639 EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
640 EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
641 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
642 EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
643 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
652 /* zero-extend 16 bits into 64 bits */
653 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
654 if (insn_is_zext(&insn[i + 1]))
655 addrs[++i] = ctx->idx * 4;
658 if (!fp->aux->verifier_zext)
659 /* zero-extend 32 bits into 64 bits */
660 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
669 * BPF_ST NOSPEC (speculation barrier)
671 case BPF_ST | BPF_NOSPEC:
672 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
673 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
676 switch (stf_barrier) {
677 case STF_BARRIER_EIEIO:
678 EMIT(PPC_RAW_EIEIO() | 0x02000000);
680 case STF_BARRIER_SYNC_ORI:
681 EMIT(PPC_RAW_SYNC());
682 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
683 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
685 case STF_BARRIER_FALLBACK:
686 ctx->seen |= SEEN_FUNC;
687 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
688 EMIT(PPC_RAW_MTCTR(12));
689 EMIT(PPC_RAW_BCTRL());
691 case STF_BARRIER_NONE:
699 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
700 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
701 if (BPF_CLASS(code) == BPF_ST) {
702 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
703 src_reg = b2p[TMP_REG_1];
705 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
707 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
708 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
709 if (BPF_CLASS(code) == BPF_ST) {
710 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
711 src_reg = b2p[TMP_REG_1];
713 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
715 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
716 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
717 if (BPF_CLASS(code) == BPF_ST) {
718 PPC_LI32(b2p[TMP_REG_1], imm);
719 src_reg = b2p[TMP_REG_1];
721 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
723 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
724 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
725 if (BPF_CLASS(code) == BPF_ST) {
726 PPC_LI32(b2p[TMP_REG_1], imm);
727 src_reg = b2p[TMP_REG_1];
730 EMIT(PPC_RAW_LI(b2p[TMP_REG_2], off));
731 EMIT(PPC_RAW_STDX(src_reg, dst_reg, b2p[TMP_REG_2]));
733 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
738 * BPF_STX ATOMIC (atomic ops)
740 case BPF_STX | BPF_ATOMIC | BPF_W:
741 if (imm != BPF_ADD) {
743 "eBPF filter atomic op code %02x (@%d) unsupported\n",
748 /* *(u32 *)(dst + off) += src */
750 /* Get EA into TMP_REG_1 */
751 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
752 tmp_idx = ctx->idx * 4;
753 /* load value from memory into TMP_REG_2 */
754 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
755 /* add value from src_reg into this */
756 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
757 /* store result back */
758 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
759 /* we're done if this succeeded */
760 PPC_BCC_SHORT(COND_NE, tmp_idx);
762 case BPF_STX | BPF_ATOMIC | BPF_DW:
763 if (imm != BPF_ADD) {
765 "eBPF filter atomic op code %02x (@%d) unsupported\n",
769 /* *(u64 *)(dst + off) += src */
771 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
772 tmp_idx = ctx->idx * 4;
773 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
774 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
775 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
776 PPC_BCC_SHORT(COND_NE, tmp_idx);
782 /* dst = *(u8 *)(ul) (src + off) */
783 case BPF_LDX | BPF_MEM | BPF_B:
784 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
785 /* dst = *(u16 *)(ul) (src + off) */
786 case BPF_LDX | BPF_MEM | BPF_H:
787 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
788 /* dst = *(u32 *)(ul) (src + off) */
789 case BPF_LDX | BPF_MEM | BPF_W:
790 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
791 /* dst = *(u64 *)(ul) (src + off) */
792 case BPF_LDX | BPF_MEM | BPF_DW:
793 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
795 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
796 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
797 * load only if addr is kernel address (see is_kernel_addr()), otherwise
798 * set dst_reg=0 and move on.
800 if (BPF_MODE(code) == BPF_PROBE_MEM) {
801 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
802 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
803 PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
805 PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
806 EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
807 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
808 EMIT(PPC_RAW_LI(dst_reg, 0));
810 * Check if 'off' is word aligned for BPF_DW, because
811 * we might generate two instructions.
813 if (BPF_SIZE(code) == BPF_DW && (off & 3))
814 PPC_JMP((ctx->idx + 3) * 4);
816 PPC_JMP((ctx->idx + 2) * 4);
821 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
824 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
827 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
831 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], off));
832 EMIT(PPC_RAW_LDX(dst_reg, src_reg, b2p[TMP_REG_1]));
834 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
839 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
840 addrs[++i] = ctx->idx * 4;
842 if (BPF_MODE(code) == BPF_PROBE_MEM) {
843 ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
852 * 16 byte instruction that uses two 'struct bpf_insn'
854 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
855 imm64 = ((u64)(u32) insn[i].imm) |
856 (((u64)(u32) insn[i+1].imm) << 32);
858 PPC_LI64(dst_reg, imm64);
859 /* padding to allow full 5 instructions for later patching */
860 for (j = ctx->idx - tmp_idx; j < 5; j++)
862 /* Adjust for two bpf instructions */
863 addrs[++i] = ctx->idx * 4;
869 case BPF_JMP | BPF_EXIT:
871 * If this isn't the very last instruction, branch to
872 * the epilogue. If we _are_ the last instruction,
873 * we'll just fall through to the epilogue.
876 ret = bpf_jit_emit_exit_insn(image, ctx, b2p[TMP_REG_1], exit_addr);
880 /* else fall through to the epilogue */
884 * Call kernel helper or bpf function
886 case BPF_JMP | BPF_CALL:
887 ctx->seen |= SEEN_FUNC;
889 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
890 &func_addr, &func_addr_fixed);
895 ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
897 ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
902 /* move return value from r3 to BPF_REG_0 */
903 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
909 case BPF_JMP | BPF_JA:
910 PPC_JMP(addrs[i + 1 + off]);
913 case BPF_JMP | BPF_JGT | BPF_K:
914 case BPF_JMP | BPF_JGT | BPF_X:
915 case BPF_JMP | BPF_JSGT | BPF_K:
916 case BPF_JMP | BPF_JSGT | BPF_X:
917 case BPF_JMP32 | BPF_JGT | BPF_K:
918 case BPF_JMP32 | BPF_JGT | BPF_X:
919 case BPF_JMP32 | BPF_JSGT | BPF_K:
920 case BPF_JMP32 | BPF_JSGT | BPF_X:
923 case BPF_JMP | BPF_JLT | BPF_K:
924 case BPF_JMP | BPF_JLT | BPF_X:
925 case BPF_JMP | BPF_JSLT | BPF_K:
926 case BPF_JMP | BPF_JSLT | BPF_X:
927 case BPF_JMP32 | BPF_JLT | BPF_K:
928 case BPF_JMP32 | BPF_JLT | BPF_X:
929 case BPF_JMP32 | BPF_JSLT | BPF_K:
930 case BPF_JMP32 | BPF_JSLT | BPF_X:
933 case BPF_JMP | BPF_JGE | BPF_K:
934 case BPF_JMP | BPF_JGE | BPF_X:
935 case BPF_JMP | BPF_JSGE | BPF_K:
936 case BPF_JMP | BPF_JSGE | BPF_X:
937 case BPF_JMP32 | BPF_JGE | BPF_K:
938 case BPF_JMP32 | BPF_JGE | BPF_X:
939 case BPF_JMP32 | BPF_JSGE | BPF_K:
940 case BPF_JMP32 | BPF_JSGE | BPF_X:
943 case BPF_JMP | BPF_JLE | BPF_K:
944 case BPF_JMP | BPF_JLE | BPF_X:
945 case BPF_JMP | BPF_JSLE | BPF_K:
946 case BPF_JMP | BPF_JSLE | BPF_X:
947 case BPF_JMP32 | BPF_JLE | BPF_K:
948 case BPF_JMP32 | BPF_JLE | BPF_X:
949 case BPF_JMP32 | BPF_JSLE | BPF_K:
950 case BPF_JMP32 | BPF_JSLE | BPF_X:
953 case BPF_JMP | BPF_JEQ | BPF_K:
954 case BPF_JMP | BPF_JEQ | BPF_X:
955 case BPF_JMP32 | BPF_JEQ | BPF_K:
956 case BPF_JMP32 | BPF_JEQ | BPF_X:
959 case BPF_JMP | BPF_JNE | BPF_K:
960 case BPF_JMP | BPF_JNE | BPF_X:
961 case BPF_JMP32 | BPF_JNE | BPF_K:
962 case BPF_JMP32 | BPF_JNE | BPF_X:
965 case BPF_JMP | BPF_JSET | BPF_K:
966 case BPF_JMP | BPF_JSET | BPF_X:
967 case BPF_JMP32 | BPF_JSET | BPF_K:
968 case BPF_JMP32 | BPF_JSET | BPF_X:
974 case BPF_JMP | BPF_JGT | BPF_X:
975 case BPF_JMP | BPF_JLT | BPF_X:
976 case BPF_JMP | BPF_JGE | BPF_X:
977 case BPF_JMP | BPF_JLE | BPF_X:
978 case BPF_JMP | BPF_JEQ | BPF_X:
979 case BPF_JMP | BPF_JNE | BPF_X:
980 case BPF_JMP32 | BPF_JGT | BPF_X:
981 case BPF_JMP32 | BPF_JLT | BPF_X:
982 case BPF_JMP32 | BPF_JGE | BPF_X:
983 case BPF_JMP32 | BPF_JLE | BPF_X:
984 case BPF_JMP32 | BPF_JEQ | BPF_X:
985 case BPF_JMP32 | BPF_JNE | BPF_X:
986 /* unsigned comparison */
987 if (BPF_CLASS(code) == BPF_JMP32)
988 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
990 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
992 case BPF_JMP | BPF_JSGT | BPF_X:
993 case BPF_JMP | BPF_JSLT | BPF_X:
994 case BPF_JMP | BPF_JSGE | BPF_X:
995 case BPF_JMP | BPF_JSLE | BPF_X:
996 case BPF_JMP32 | BPF_JSGT | BPF_X:
997 case BPF_JMP32 | BPF_JSLT | BPF_X:
998 case BPF_JMP32 | BPF_JSGE | BPF_X:
999 case BPF_JMP32 | BPF_JSLE | BPF_X:
1000 /* signed comparison */
1001 if (BPF_CLASS(code) == BPF_JMP32)
1002 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1004 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1006 case BPF_JMP | BPF_JSET | BPF_X:
1007 case BPF_JMP32 | BPF_JSET | BPF_X:
1008 if (BPF_CLASS(code) == BPF_JMP) {
1009 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
1012 int tmp_reg = b2p[TMP_REG_1];
1014 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
1015 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
1019 case BPF_JMP | BPF_JNE | BPF_K:
1020 case BPF_JMP | BPF_JEQ | BPF_K:
1021 case BPF_JMP | BPF_JGT | BPF_K:
1022 case BPF_JMP | BPF_JLT | BPF_K:
1023 case BPF_JMP | BPF_JGE | BPF_K:
1024 case BPF_JMP | BPF_JLE | BPF_K:
1025 case BPF_JMP32 | BPF_JNE | BPF_K:
1026 case BPF_JMP32 | BPF_JEQ | BPF_K:
1027 case BPF_JMP32 | BPF_JGT | BPF_K:
1028 case BPF_JMP32 | BPF_JLT | BPF_K:
1029 case BPF_JMP32 | BPF_JGE | BPF_K:
1030 case BPF_JMP32 | BPF_JLE | BPF_K:
1032 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1035 * Need sign-extended load, so only positive
1036 * values can be used as imm in cmpldi
1038 if (imm >= 0 && imm < 32768) {
1040 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1042 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1044 /* sign-extending load */
1045 PPC_LI32(b2p[TMP_REG_1], imm);
1046 /* ... but unsigned comparison */
1048 EMIT(PPC_RAW_CMPLW(dst_reg,
1051 EMIT(PPC_RAW_CMPLD(dst_reg,
1056 case BPF_JMP | BPF_JSGT | BPF_K:
1057 case BPF_JMP | BPF_JSLT | BPF_K:
1058 case BPF_JMP | BPF_JSGE | BPF_K:
1059 case BPF_JMP | BPF_JSLE | BPF_K:
1060 case BPF_JMP32 | BPF_JSGT | BPF_K:
1061 case BPF_JMP32 | BPF_JSLT | BPF_K:
1062 case BPF_JMP32 | BPF_JSGE | BPF_K:
1063 case BPF_JMP32 | BPF_JSLE | BPF_K:
1065 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1068 * signed comparison, so any 16-bit value
1069 * can be used in cmpdi
1071 if (imm >= -32768 && imm < 32768) {
1073 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1075 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1077 PPC_LI32(b2p[TMP_REG_1], imm);
1079 EMIT(PPC_RAW_CMPW(dst_reg,
1082 EMIT(PPC_RAW_CMPD(dst_reg,
1087 case BPF_JMP | BPF_JSET | BPF_K:
1088 case BPF_JMP32 | BPF_JSET | BPF_K:
1089 /* andi does not sign-extend the immediate */
1090 if (imm >= 0 && imm < 32768)
1091 /* PPC_ANDI is _only/always_ dot-form */
1092 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1094 int tmp_reg = b2p[TMP_REG_1];
1096 PPC_LI32(tmp_reg, imm);
1097 if (BPF_CLASS(code) == BPF_JMP) {
1098 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1101 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1103 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1109 PPC_BCC(true_cond, addrs[i + 1 + off]);
1115 case BPF_JMP | BPF_TAIL_CALL:
1116 ctx->seen |= SEEN_TAILCALL;
1117 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1124 * The filter contains something cruel & unusual.
1125 * We don't handle it, but also there shouldn't be
1126 * anything missing from our list.
1128 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1134 /* Set end-of-body-code address for exit. */
1135 addrs[i] = ctx->idx * 4;