1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
24 * Ensure the top half (upto local_tmp_var) stays consistent
25 * with our redzone usage.
27 * [ prev sp ] <-------------
28 * [ nv gpr save area ] 5*8 |
29 * [ tail_call_cnt ] 8 |
30 * [ local_tmp_var ] 16 |
31 * fp (r31) --> [ ebpf stack space ] upto 512 |
32 * [ frame header ] 32/112 |
33 * sp (r1) ---> [ stack pointer ] --------------
36 /* for gpr non volatile registers BPG_REG_6 to 10 */
37 #define BPF_PPC_STACK_SAVE (5*8)
38 /* for bpf JIT code internal usage */
39 #define BPF_PPC_STACK_LOCALS 24
40 /* stack frame excluding BPF stack, ensure this is quadword aligned */
41 #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
42 BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
44 /* BPF register usage */
45 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
46 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
48 /* BPF to ppc register mappings */
49 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
51 /* function return value */
52 ctx->b2p[BPF_REG_0] = _R8;
53 /* function arguments */
54 ctx->b2p[BPF_REG_1] = _R3;
55 ctx->b2p[BPF_REG_2] = _R4;
56 ctx->b2p[BPF_REG_3] = _R5;
57 ctx->b2p[BPF_REG_4] = _R6;
58 ctx->b2p[BPF_REG_5] = _R7;
59 /* non volatile registers */
60 ctx->b2p[BPF_REG_6] = _R27;
61 ctx->b2p[BPF_REG_7] = _R28;
62 ctx->b2p[BPF_REG_8] = _R29;
63 ctx->b2p[BPF_REG_9] = _R30;
64 /* frame pointer aka BPF_REG_10 */
65 ctx->b2p[BPF_REG_FP] = _R31;
66 /* eBPF jit internal registers */
67 ctx->b2p[BPF_REG_AX] = _R12;
68 ctx->b2p[TMP_REG_1] = _R9;
69 ctx->b2p[TMP_REG_2] = _R10;
72 /* PPC NVR range -- update this if we ever use NVRs below r27 */
73 #define BPF_PPC_NVR_MIN _R27
75 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
78 * We only need a stack frame if:
79 * - we call other functions (kernel helpers), or
80 * - the bpf program uses its stack area
81 * The latter condition is deduced from the usage of BPF_REG_FP
83 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
87 * When not setting up our own stackframe, the redzone usage is:
89 * [ prev sp ] <-------------
91 * sp (r1) ---> [ stack pointer ] --------------
92 * [ nv gpr save area ] 5*8
94 * [ local_tmp_var ] 16
95 * [ unused red zone ] 208 bytes protected
97 static int bpf_jit_stack_local(struct codegen_context *ctx)
99 if (bpf_has_stack_frame(ctx))
100 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
102 return -(BPF_PPC_STACK_SAVE + 24);
105 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
107 return bpf_jit_stack_local(ctx) + 16;
110 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
112 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
113 return (bpf_has_stack_frame(ctx) ?
114 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
117 pr_err("BPF JIT is asking about unknown registers");
121 void bpf_jit_realloc_regs(struct codegen_context *ctx)
125 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
129 #ifndef CONFIG_PPC_KERNEL_PCREL
130 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
131 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
135 * Initialize tail_call_cnt if we do tail calls.
136 * Otherwise, put in NOPs so that it can be skipped when we are
137 * invoked through a tail call.
139 if (ctx->seen & SEEN_TAILCALL) {
140 EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
141 /* this goes in the redzone */
142 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
148 if (bpf_has_stack_frame(ctx)) {
150 * We need a stack frame, but we don't necessarily need to
151 * save/restore LR unless we call other functions
153 if (ctx->seen & SEEN_FUNC) {
154 EMIT(PPC_RAW_MFLR(_R0));
155 EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
158 EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
162 * Back up non-volatile regs -- BPF registers 6-10
163 * If we haven't created our own stack frame, we save these
164 * in the protected zone below the previous stack frame
166 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
167 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
168 EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
170 /* Setup frame pointer to point to the bpf stack area */
171 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
172 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
173 STACK_FRAME_MIN_SIZE + ctx->stack_size));
176 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
181 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
182 if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
183 EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
185 /* Tear down our stack frame */
186 if (bpf_has_stack_frame(ctx)) {
187 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
188 if (ctx->seen & SEEN_FUNC) {
189 EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
190 EMIT(PPC_RAW_MTLR(_R0));
195 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
197 bpf_jit_emit_common_epilogue(image, ctx);
199 /* Move result to r3 */
200 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
206 bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
208 unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
211 if (WARN_ON_ONCE(!kernel_text_address(func_addr)))
214 #ifdef CONFIG_PPC_KERNEL_PCREL
215 reladdr = func_addr - local_paca->kernelbase;
217 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
218 EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
219 /* Align for subsequent prefix instruction */
220 if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
222 /* paddi r12,r12,addr */
223 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
224 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
226 unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
227 bool alignment_needed = !IS_ALIGNED(pc, 8);
229 reladdr = func_addr - (alignment_needed ? pc + 4 : pc);
231 if (reladdr < (long)SZ_8G && reladdr >= -(long)SZ_8G) {
232 if (alignment_needed)
235 EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
236 EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
238 /* We can clobber r12 */
239 PPC_LI64(_R12, func);
242 EMIT(PPC_RAW_MTCTR(_R12));
243 EMIT(PPC_RAW_BCTRL());
245 if (core_kernel_text(func_addr)) {
246 reladdr = func_addr - kernel_toc_addr();
247 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
248 pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
252 EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
253 EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
254 EMIT(PPC_RAW_MTCTR(_R12));
255 EMIT(PPC_RAW_BCTRL());
257 if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1)) {
258 /* func points to the function descriptor */
259 PPC_LI64(bpf_to_ppc(TMP_REG_2), func);
260 /* Load actual entry point from function descriptor */
261 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_2), 0));
262 /* ... and move it to CTR */
263 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
265 * Load TOC from function descriptor at offset 8.
266 * We can clobber r2 since we get called through a
267 * function pointer (so caller will save/restore r2).
269 EMIT(PPC_RAW_LD(_R2, bpf_to_ppc(TMP_REG_2), 8));
271 PPC_LI64(_R12, func);
272 EMIT(PPC_RAW_MTCTR(_R12));
274 EMIT(PPC_RAW_BCTRL());
276 * Load r2 with kernel TOC as kernel TOC is used if function address falls
277 * within core kernel text.
279 EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
286 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
288 unsigned int i, ctx_idx = ctx->idx;
290 if (WARN_ON_ONCE(func && is_module_text_address(func)))
293 /* skip past descriptor if elf v1 */
294 func += FUNCTION_DESCR_SIZE;
296 /* Load function address into r12 */
297 PPC_LI64(_R12, func);
299 /* For bpf-to-bpf function calls, the callee's address is unknown
300 * until the last extra pass. As seen above, we use PPC_LI64() to
301 * load the callee's address, but this may optimize the number of
302 * instructions required based on the nature of the address.
304 * Since we don't want the number of instructions emitted to increase,
305 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
306 * we always have a five-instruction sequence, which is the maximum
307 * that PPC_LI64() can emit.
310 for (i = ctx->idx - ctx_idx; i < 5; i++)
313 EMIT(PPC_RAW_MTCTR(_R12));
314 EMIT(PPC_RAW_BCTRL());
319 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
322 * By now, the eBPF program has already setup parameters in r3, r4 and r5
323 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
324 * r4/BPF_REG_2 - pointer to bpf_array
325 * r5/BPF_REG_3 - index in bpf_array
327 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
328 int b2p_index = bpf_to_ppc(BPF_REG_3);
329 int bpf_tailcall_prologue_size = 8;
331 if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
332 bpf_tailcall_prologue_size += 4; /* skip past the toc load */
335 * if (index >= array->map.max_entries)
338 EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
339 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
340 EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
341 PPC_BCC_SHORT(COND_GE, out);
344 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
347 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
348 EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
349 PPC_BCC_SHORT(COND_GE, out);
354 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
355 EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
357 /* prog = array->ptrs[index]; */
358 EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
359 EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
360 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
366 EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
367 PPC_BCC_SHORT(COND_EQ, out);
369 /* goto *(prog->bpf_func + prologue_size); */
370 EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
371 EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
372 FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
373 EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
375 /* tear down stack, restore NVRs, ... */
376 bpf_jit_emit_common_epilogue(image, ctx);
378 EMIT(PPC_RAW_BCTR());
385 * We spill into the redzone always, even if the bpf program has its own stackframe.
386 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
388 void bpf_stf_barrier(void);
391 " .global bpf_stf_barrier ;"
392 " bpf_stf_barrier: ;"
406 /* Assemble the body code between the prologue & epilogue */
407 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
408 u32 *addrs, int pass, bool extra_pass)
410 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
411 const struct bpf_insn *insn = fp->insnsi;
415 /* Start of epilogue code - will only be valid 2nd pass onwards */
416 u32 exit_addr = addrs[flen];
418 for (i = 0; i < flen; i++) {
419 u32 code = insn[i].code;
420 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
421 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
422 u32 size = BPF_SIZE(code);
423 u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
424 u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
425 u32 save_reg, ret_reg;
426 s16 off = insn[i].off;
427 s32 imm = insn[i].imm;
428 bool func_addr_fixed;
436 * addrs[] maps a BPF bytecode address into a real offset from
437 * the start of the body code.
439 addrs[i] = ctx->idx * 4;
442 * As an optimization, we note down which non-volatile registers
443 * are used so that we can only save/restore those in our
444 * prologue and epilogue. We do this here regardless of whether
445 * the actual BPF instruction uses src/dst registers or not
446 * (for instance, BPF_CALL does not use them). The expectation
447 * is that those instructions will have src_reg/dst_reg set to
448 * 0. Even otherwise, we just lose some prologue/epilogue
449 * optimization but everything else should work without
452 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
453 bpf_set_seen_register(ctx, dst_reg);
454 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
455 bpf_set_seen_register(ctx, src_reg);
459 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
461 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
462 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
463 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
464 goto bpf_alu32_trunc;
465 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
466 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
467 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
468 goto bpf_alu32_trunc;
469 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
470 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
472 goto bpf_alu32_trunc;
473 } else if (imm >= -32768 && imm < 32768) {
474 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
476 PPC_LI32(tmp1_reg, imm);
477 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
479 goto bpf_alu32_trunc;
480 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
481 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
483 goto bpf_alu32_trunc;
484 } else if (imm > -32768 && imm <= 32768) {
485 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
487 PPC_LI32(tmp1_reg, imm);
488 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
490 goto bpf_alu32_trunc;
491 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
492 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
493 if (BPF_CLASS(code) == BPF_ALU)
494 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
496 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
497 goto bpf_alu32_trunc;
498 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
499 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
500 if (imm >= -32768 && imm < 32768)
501 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
503 PPC_LI32(tmp1_reg, imm);
504 if (BPF_CLASS(code) == BPF_ALU)
505 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
507 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
509 goto bpf_alu32_trunc;
510 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
511 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
512 if (BPF_OP(code) == BPF_MOD) {
513 EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
514 EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
515 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
517 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
518 goto bpf_alu32_trunc;
519 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
520 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
521 if (BPF_OP(code) == BPF_MOD) {
522 EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
523 EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
524 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
526 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
528 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
529 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
530 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
531 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
535 if (BPF_OP(code) == BPF_DIV) {
536 goto bpf_alu32_trunc;
538 EMIT(PPC_RAW_LI(dst_reg, 0));
543 PPC_LI32(tmp1_reg, imm);
544 switch (BPF_CLASS(code)) {
546 if (BPF_OP(code) == BPF_MOD) {
547 EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
548 EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
549 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
551 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
554 if (BPF_OP(code) == BPF_MOD) {
555 EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
556 EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
557 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
559 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
562 goto bpf_alu32_trunc;
563 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
564 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
565 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
566 goto bpf_alu32_trunc;
569 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
571 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
572 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
573 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
574 goto bpf_alu32_trunc;
575 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
576 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
578 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
581 PPC_LI32(tmp1_reg, imm);
582 EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
584 goto bpf_alu32_trunc;
585 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
586 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
587 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
588 goto bpf_alu32_trunc;
589 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
590 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
591 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
593 PPC_LI32(tmp1_reg, imm);
594 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
597 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
599 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
601 goto bpf_alu32_trunc;
602 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
603 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
604 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
605 goto bpf_alu32_trunc;
606 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
607 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
608 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
610 PPC_LI32(tmp1_reg, imm);
611 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
614 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
616 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
618 goto bpf_alu32_trunc;
619 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
620 /* slw clears top 32 bits */
621 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
622 /* skip zero extension move, but set address map. */
623 if (insn_is_zext(&insn[i + 1]))
624 addrs[++i] = ctx->idx * 4;
626 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
627 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
629 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
630 /* with imm 0, we still need to clear top 32 bits */
631 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
632 if (insn_is_zext(&insn[i + 1]))
633 addrs[++i] = ctx->idx * 4;
635 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
637 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
639 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
640 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
641 if (insn_is_zext(&insn[i + 1]))
642 addrs[++i] = ctx->idx * 4;
644 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
645 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
647 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
648 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
649 if (insn_is_zext(&insn[i + 1]))
650 addrs[++i] = ctx->idx * 4;
652 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
654 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
656 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
657 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
658 goto bpf_alu32_trunc;
659 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
660 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
662 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
663 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
664 goto bpf_alu32_trunc;
665 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
667 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
673 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
674 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
676 /* special mov32 for zext */
677 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
680 EMIT(PPC_RAW_MR(dst_reg, src_reg));
681 goto bpf_alu32_trunc;
682 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
683 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
684 PPC_LI32(dst_reg, imm);
686 goto bpf_alu32_trunc;
687 else if (insn_is_zext(&insn[i + 1]))
688 addrs[++i] = ctx->idx * 4;
692 /* Truncate to 32-bits */
693 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
694 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
700 case BPF_ALU | BPF_END | BPF_FROM_LE:
701 case BPF_ALU | BPF_END | BPF_FROM_BE:
702 #ifdef __BIG_ENDIAN__
703 if (BPF_SRC(code) == BPF_FROM_BE)
705 #else /* !__BIG_ENDIAN__ */
706 if (BPF_SRC(code) == BPF_FROM_LE)
711 /* Rotate 8 bits left & mask with 0x0000ff00 */
712 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
713 /* Rotate 8 bits right & insert LSB to reg */
714 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
715 /* Move result back to dst_reg */
716 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
720 * Rotate word left by 8 bits:
721 * 2 bytes are already in their final position
722 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
724 EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
725 /* Rotate 24 bits and insert byte 1 */
726 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
727 /* Rotate 24 bits and insert byte 3 */
728 EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
729 EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
732 /* Store the value to stack and then use byte-reverse loads */
733 EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
734 EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
735 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
736 EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
738 EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
739 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
740 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
741 EMIT(PPC_RAW_LI(tmp2_reg, 4));
742 EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
743 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
744 EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
745 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
754 /* zero-extend 16 bits into 64 bits */
755 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
756 if (insn_is_zext(&insn[i + 1]))
757 addrs[++i] = ctx->idx * 4;
760 if (!fp->aux->verifier_zext)
761 /* zero-extend 32 bits into 64 bits */
762 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
771 * BPF_ST NOSPEC (speculation barrier)
773 case BPF_ST | BPF_NOSPEC:
774 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
775 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
778 switch (stf_barrier) {
779 case STF_BARRIER_EIEIO:
780 EMIT(PPC_RAW_EIEIO() | 0x02000000);
782 case STF_BARRIER_SYNC_ORI:
783 EMIT(PPC_RAW_SYNC());
784 EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
785 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
787 case STF_BARRIER_FALLBACK:
788 ctx->seen |= SEEN_FUNC;
789 PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
790 EMIT(PPC_RAW_MTCTR(_R12));
791 EMIT(PPC_RAW_BCTRL());
793 case STF_BARRIER_NONE:
801 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
802 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
803 if (BPF_CLASS(code) == BPF_ST) {
804 EMIT(PPC_RAW_LI(tmp1_reg, imm));
807 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
809 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
810 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
811 if (BPF_CLASS(code) == BPF_ST) {
812 EMIT(PPC_RAW_LI(tmp1_reg, imm));
815 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
817 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
818 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
819 if (BPF_CLASS(code) == BPF_ST) {
820 PPC_LI32(tmp1_reg, imm);
823 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
825 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
826 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
827 if (BPF_CLASS(code) == BPF_ST) {
828 PPC_LI32(tmp1_reg, imm);
832 EMIT(PPC_RAW_LI(tmp2_reg, off));
833 EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
835 EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
840 * BPF_STX ATOMIC (atomic ops)
842 case BPF_STX | BPF_ATOMIC | BPF_W:
843 case BPF_STX | BPF_ATOMIC | BPF_DW:
847 /* Get offset into TMP_REG_1 */
848 EMIT(PPC_RAW_LI(tmp1_reg, off));
850 * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
851 * before and after the operation.
853 * This is a requirement in the Linux Kernel Memory Model.
854 * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
856 if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
857 EMIT(PPC_RAW_SYNC());
858 tmp_idx = ctx->idx * 4;
859 /* load value from memory into TMP_REG_2 */
861 EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
863 EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
865 /* Save old value in _R0 */
867 EMIT(PPC_RAW_MR(_R0, tmp2_reg));
871 case BPF_ADD | BPF_FETCH:
872 EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
875 case BPF_AND | BPF_FETCH:
876 EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
879 case BPF_OR | BPF_FETCH:
880 EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
883 case BPF_XOR | BPF_FETCH:
884 EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
888 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
889 * in src_reg for other cases.
891 ret_reg = bpf_to_ppc(BPF_REG_0);
893 /* Compare with old value in BPF_R0 */
895 EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
897 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
898 /* Don't set if different from old value */
899 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
906 "eBPF filter atomic op code %02x (@%d) unsupported\n",
911 /* store new value */
913 EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
915 EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
916 /* we're done if this succeeded */
917 PPC_BCC_SHORT(COND_NE, tmp_idx);
919 if (imm & BPF_FETCH) {
920 /* Emit 'sync' to enforce full ordering */
921 if (IS_ENABLED(CONFIG_SMP))
922 EMIT(PPC_RAW_SYNC());
923 EMIT(PPC_RAW_MR(ret_reg, _R0));
925 * Skip unnecessary zero-extension for 32-bit cmpxchg.
926 * For context, see commit 39491867ace5.
928 if (size != BPF_DW && imm == BPF_CMPXCHG &&
929 insn_is_zext(&insn[i + 1]))
930 addrs[++i] = ctx->idx * 4;
937 /* dst = *(u8 *)(ul) (src + off) */
938 case BPF_LDX | BPF_MEM | BPF_B:
939 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
940 /* dst = *(u16 *)(ul) (src + off) */
941 case BPF_LDX | BPF_MEM | BPF_H:
942 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
943 /* dst = *(u32 *)(ul) (src + off) */
944 case BPF_LDX | BPF_MEM | BPF_W:
945 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
946 /* dst = *(u64 *)(ul) (src + off) */
947 case BPF_LDX | BPF_MEM | BPF_DW:
948 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
950 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
951 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
952 * load only if addr is kernel address (see is_kernel_addr()), otherwise
953 * set dst_reg=0 and move on.
955 if (BPF_MODE(code) == BPF_PROBE_MEM) {
956 EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
957 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
958 PPC_LI64(tmp2_reg, 0x8000000000000000ul);
960 PPC_LI64(tmp2_reg, PAGE_OFFSET);
961 EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
962 PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
963 EMIT(PPC_RAW_LI(dst_reg, 0));
965 * Check if 'off' is word aligned for BPF_DW, because
966 * we might generate two instructions.
968 if (BPF_SIZE(code) == BPF_DW && (off & 3))
969 PPC_JMP((ctx->idx + 3) * 4);
971 PPC_JMP((ctx->idx + 2) * 4);
976 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
979 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
982 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
986 EMIT(PPC_RAW_LI(tmp1_reg, off));
987 EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
989 EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
994 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
995 addrs[++i] = ctx->idx * 4;
997 if (BPF_MODE(code) == BPF_PROBE_MEM) {
998 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx,
999 ctx->idx - 1, 4, dst_reg);
1007 * 16 byte instruction that uses two 'struct bpf_insn'
1009 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1010 imm64 = ((u64)(u32) insn[i].imm) |
1011 (((u64)(u32) insn[i+1].imm) << 32);
1013 PPC_LI64(dst_reg, imm64);
1014 /* padding to allow full 5 instructions for later patching */
1016 for (j = ctx->idx - tmp_idx; j < 5; j++)
1017 EMIT(PPC_RAW_NOP());
1018 /* Adjust for two bpf instructions */
1019 addrs[++i] = ctx->idx * 4;
1025 case BPF_JMP | BPF_EXIT:
1027 * If this isn't the very last instruction, branch to
1028 * the epilogue. If we _are_ the last instruction,
1029 * we'll just fall through to the epilogue.
1031 if (i != flen - 1) {
1032 ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
1036 /* else fall through to the epilogue */
1040 * Call kernel helper or bpf function
1042 case BPF_JMP | BPF_CALL:
1043 ctx->seen |= SEEN_FUNC;
1045 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1046 &func_addr, &func_addr_fixed);
1050 if (func_addr_fixed)
1051 ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
1053 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1058 /* move return value from r3 to BPF_REG_0 */
1059 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
1063 * Jumps and branches
1065 case BPF_JMP | BPF_JA:
1066 PPC_JMP(addrs[i + 1 + off]);
1069 case BPF_JMP | BPF_JGT | BPF_K:
1070 case BPF_JMP | BPF_JGT | BPF_X:
1071 case BPF_JMP | BPF_JSGT | BPF_K:
1072 case BPF_JMP | BPF_JSGT | BPF_X:
1073 case BPF_JMP32 | BPF_JGT | BPF_K:
1074 case BPF_JMP32 | BPF_JGT | BPF_X:
1075 case BPF_JMP32 | BPF_JSGT | BPF_K:
1076 case BPF_JMP32 | BPF_JSGT | BPF_X:
1077 true_cond = COND_GT;
1079 case BPF_JMP | BPF_JLT | BPF_K:
1080 case BPF_JMP | BPF_JLT | BPF_X:
1081 case BPF_JMP | BPF_JSLT | BPF_K:
1082 case BPF_JMP | BPF_JSLT | BPF_X:
1083 case BPF_JMP32 | BPF_JLT | BPF_K:
1084 case BPF_JMP32 | BPF_JLT | BPF_X:
1085 case BPF_JMP32 | BPF_JSLT | BPF_K:
1086 case BPF_JMP32 | BPF_JSLT | BPF_X:
1087 true_cond = COND_LT;
1089 case BPF_JMP | BPF_JGE | BPF_K:
1090 case BPF_JMP | BPF_JGE | BPF_X:
1091 case BPF_JMP | BPF_JSGE | BPF_K:
1092 case BPF_JMP | BPF_JSGE | BPF_X:
1093 case BPF_JMP32 | BPF_JGE | BPF_K:
1094 case BPF_JMP32 | BPF_JGE | BPF_X:
1095 case BPF_JMP32 | BPF_JSGE | BPF_K:
1096 case BPF_JMP32 | BPF_JSGE | BPF_X:
1097 true_cond = COND_GE;
1099 case BPF_JMP | BPF_JLE | BPF_K:
1100 case BPF_JMP | BPF_JLE | BPF_X:
1101 case BPF_JMP | BPF_JSLE | BPF_K:
1102 case BPF_JMP | BPF_JSLE | BPF_X:
1103 case BPF_JMP32 | BPF_JLE | BPF_K:
1104 case BPF_JMP32 | BPF_JLE | BPF_X:
1105 case BPF_JMP32 | BPF_JSLE | BPF_K:
1106 case BPF_JMP32 | BPF_JSLE | BPF_X:
1107 true_cond = COND_LE;
1109 case BPF_JMP | BPF_JEQ | BPF_K:
1110 case BPF_JMP | BPF_JEQ | BPF_X:
1111 case BPF_JMP32 | BPF_JEQ | BPF_K:
1112 case BPF_JMP32 | BPF_JEQ | BPF_X:
1113 true_cond = COND_EQ;
1115 case BPF_JMP | BPF_JNE | BPF_K:
1116 case BPF_JMP | BPF_JNE | BPF_X:
1117 case BPF_JMP32 | BPF_JNE | BPF_K:
1118 case BPF_JMP32 | BPF_JNE | BPF_X:
1119 true_cond = COND_NE;
1121 case BPF_JMP | BPF_JSET | BPF_K:
1122 case BPF_JMP | BPF_JSET | BPF_X:
1123 case BPF_JMP32 | BPF_JSET | BPF_K:
1124 case BPF_JMP32 | BPF_JSET | BPF_X:
1125 true_cond = COND_NE;
1130 case BPF_JMP | BPF_JGT | BPF_X:
1131 case BPF_JMP | BPF_JLT | BPF_X:
1132 case BPF_JMP | BPF_JGE | BPF_X:
1133 case BPF_JMP | BPF_JLE | BPF_X:
1134 case BPF_JMP | BPF_JEQ | BPF_X:
1135 case BPF_JMP | BPF_JNE | BPF_X:
1136 case BPF_JMP32 | BPF_JGT | BPF_X:
1137 case BPF_JMP32 | BPF_JLT | BPF_X:
1138 case BPF_JMP32 | BPF_JGE | BPF_X:
1139 case BPF_JMP32 | BPF_JLE | BPF_X:
1140 case BPF_JMP32 | BPF_JEQ | BPF_X:
1141 case BPF_JMP32 | BPF_JNE | BPF_X:
1142 /* unsigned comparison */
1143 if (BPF_CLASS(code) == BPF_JMP32)
1144 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1146 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
1148 case BPF_JMP | BPF_JSGT | BPF_X:
1149 case BPF_JMP | BPF_JSLT | BPF_X:
1150 case BPF_JMP | BPF_JSGE | BPF_X:
1151 case BPF_JMP | BPF_JSLE | BPF_X:
1152 case BPF_JMP32 | BPF_JSGT | BPF_X:
1153 case BPF_JMP32 | BPF_JSLT | BPF_X:
1154 case BPF_JMP32 | BPF_JSGE | BPF_X:
1155 case BPF_JMP32 | BPF_JSLE | BPF_X:
1156 /* signed comparison */
1157 if (BPF_CLASS(code) == BPF_JMP32)
1158 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1160 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
1162 case BPF_JMP | BPF_JSET | BPF_X:
1163 case BPF_JMP32 | BPF_JSET | BPF_X:
1164 if (BPF_CLASS(code) == BPF_JMP) {
1165 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
1167 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
1168 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
1171 case BPF_JMP | BPF_JNE | BPF_K:
1172 case BPF_JMP | BPF_JEQ | BPF_K:
1173 case BPF_JMP | BPF_JGT | BPF_K:
1174 case BPF_JMP | BPF_JLT | BPF_K:
1175 case BPF_JMP | BPF_JGE | BPF_K:
1176 case BPF_JMP | BPF_JLE | BPF_K:
1177 case BPF_JMP32 | BPF_JNE | BPF_K:
1178 case BPF_JMP32 | BPF_JEQ | BPF_K:
1179 case BPF_JMP32 | BPF_JGT | BPF_K:
1180 case BPF_JMP32 | BPF_JLT | BPF_K:
1181 case BPF_JMP32 | BPF_JGE | BPF_K:
1182 case BPF_JMP32 | BPF_JLE | BPF_K:
1184 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1187 * Need sign-extended load, so only positive
1188 * values can be used as imm in cmpldi
1190 if (imm >= 0 && imm < 32768) {
1192 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1194 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1196 /* sign-extending load */
1197 PPC_LI32(tmp1_reg, imm);
1198 /* ... but unsigned comparison */
1200 EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
1202 EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
1206 case BPF_JMP | BPF_JSGT | BPF_K:
1207 case BPF_JMP | BPF_JSLT | BPF_K:
1208 case BPF_JMP | BPF_JSGE | BPF_K:
1209 case BPF_JMP | BPF_JSLE | BPF_K:
1210 case BPF_JMP32 | BPF_JSGT | BPF_K:
1211 case BPF_JMP32 | BPF_JSLT | BPF_K:
1212 case BPF_JMP32 | BPF_JSGE | BPF_K:
1213 case BPF_JMP32 | BPF_JSLE | BPF_K:
1215 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1218 * signed comparison, so any 16-bit value
1219 * can be used in cmpdi
1221 if (imm >= -32768 && imm < 32768) {
1223 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1225 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1227 PPC_LI32(tmp1_reg, imm);
1229 EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
1231 EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
1235 case BPF_JMP | BPF_JSET | BPF_K:
1236 case BPF_JMP32 | BPF_JSET | BPF_K:
1237 /* andi does not sign-extend the immediate */
1238 if (imm >= 0 && imm < 32768)
1239 /* PPC_ANDI is _only/always_ dot-form */
1240 EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
1242 PPC_LI32(tmp1_reg, imm);
1243 if (BPF_CLASS(code) == BPF_JMP) {
1244 EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
1247 EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
1248 EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
1254 PPC_BCC(true_cond, addrs[i + 1 + off]);
1260 case BPF_JMP | BPF_TAIL_CALL:
1261 ctx->seen |= SEEN_TAILCALL;
1262 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1269 * The filter contains something cruel & unusual.
1270 * We don't handle it, but also there shouldn't be
1271 * anything missing from our list.
1273 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1279 /* Set end-of-body-code address for exit. */
1280 addrs[i] = ctx->idx * 4;