1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for LoongArch
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
9 #define REG_TCC LOONGARCH_GPR_A6
10 #define TCC_SAVED LOONGARCH_GPR_S5
12 #define SAVE_RA BIT(0)
13 #define SAVE_TCC BIT(1)
15 static const int regmap[] = {
16 /* return value from in-kernel function, and exit value for eBPF program */
17 [BPF_REG_0] = LOONGARCH_GPR_A5,
18 /* arguments from eBPF program to in-kernel function */
19 [BPF_REG_1] = LOONGARCH_GPR_A0,
20 [BPF_REG_2] = LOONGARCH_GPR_A1,
21 [BPF_REG_3] = LOONGARCH_GPR_A2,
22 [BPF_REG_4] = LOONGARCH_GPR_A3,
23 [BPF_REG_5] = LOONGARCH_GPR_A4,
24 /* callee saved registers that in-kernel function will preserve */
25 [BPF_REG_6] = LOONGARCH_GPR_S0,
26 [BPF_REG_7] = LOONGARCH_GPR_S1,
27 [BPF_REG_8] = LOONGARCH_GPR_S2,
28 [BPF_REG_9] = LOONGARCH_GPR_S3,
29 /* read-only frame pointer to access stack */
30 [BPF_REG_FP] = LOONGARCH_GPR_S4,
31 /* temporary register for blinding constants */
32 [BPF_REG_AX] = LOONGARCH_GPR_T0,
35 static void mark_call(struct jit_ctx *ctx)
37 ctx->flags |= SAVE_RA;
40 static void mark_tail_call(struct jit_ctx *ctx)
42 ctx->flags |= SAVE_TCC;
45 static bool seen_call(struct jit_ctx *ctx)
47 return (ctx->flags & SAVE_RA);
50 static bool seen_tail_call(struct jit_ctx *ctx)
52 return (ctx->flags & SAVE_TCC);
55 static u8 tail_call_reg(struct jit_ctx *ctx)
64 * eBPF prog stack layout:
67 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
69 * +-------------------------+
71 * +-------------------------+
73 * +-------------------------+
75 * +-------------------------+
77 * +-------------------------+
79 * +-------------------------+
81 * +-------------------------+
83 * +-------------------------+ <--BPF_REG_FP
84 * | prog->aux->stack_depth |
86 * current $sp -------------> +-------------------------+
89 static void build_prologue(struct jit_ctx *ctx)
91 int stack_adjust = 0, store_offset, bpf_stack_adjust;
93 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
95 /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
96 stack_adjust += sizeof(long) * 8;
98 stack_adjust = round_up(stack_adjust, 16);
99 stack_adjust += bpf_stack_adjust;
102 * First instruction initializes the tail call count (TCC).
103 * On tail call we skip this instruction, and the TCC is
104 * passed in REG_TCC from the caller.
106 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
108 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
110 store_offset = stack_adjust - sizeof(long);
111 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
113 store_offset -= sizeof(long);
114 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
116 store_offset -= sizeof(long);
117 emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
119 store_offset -= sizeof(long);
120 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
122 store_offset -= sizeof(long);
123 emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
125 store_offset -= sizeof(long);
126 emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
128 store_offset -= sizeof(long);
129 emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
131 store_offset -= sizeof(long);
132 emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
134 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
136 if (bpf_stack_adjust)
137 emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
140 * Program contains calls and tail calls, so REG_TCC need
141 * to be saved across calls.
143 if (seen_tail_call(ctx) && seen_call(ctx))
144 move_reg(ctx, TCC_SAVED, REG_TCC);
146 ctx->stack_size = stack_adjust;
149 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
151 int stack_adjust = ctx->stack_size;
154 load_offset = stack_adjust - sizeof(long);
155 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
157 load_offset -= sizeof(long);
158 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
160 load_offset -= sizeof(long);
161 emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
163 load_offset -= sizeof(long);
164 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
166 load_offset -= sizeof(long);
167 emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
169 load_offset -= sizeof(long);
170 emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
172 load_offset -= sizeof(long);
173 emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
175 load_offset -= sizeof(long);
176 emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
178 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
181 /* Set return value */
182 move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
183 /* Return to the caller */
184 emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
187 * Call the next bpf prog and skip the first instruction
188 * of TCC initialization.
190 emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
194 static void build_epilogue(struct jit_ctx *ctx)
196 __build_epilogue(ctx, false);
199 bool bpf_jit_supports_kfunc_call(void)
204 /* initialized on the first pass of build_body() */
205 static int out_offset = -1;
206 static int emit_bpf_tail_call(struct jit_ctx *ctx)
209 u8 tcc = tail_call_reg(ctx);
210 u8 a1 = LOONGARCH_GPR_A1;
211 u8 a2 = LOONGARCH_GPR_A2;
212 u8 t1 = LOONGARCH_GPR_T1;
213 u8 t2 = LOONGARCH_GPR_T2;
214 u8 t3 = LOONGARCH_GPR_T3;
215 const int idx0 = ctx->idx;
217 #define cur_offset (ctx->idx - idx0)
218 #define jmp_offset (out_offset - (cur_offset))
225 * if (index >= array->map.max_entries)
228 off = offsetof(struct bpf_array, map.max_entries);
229 emit_insn(ctx, ldwu, t1, a1, off);
230 /* bgeu $a2, $t1, jmp_offset */
231 if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
238 emit_insn(ctx, addid, REG_TCC, tcc, -1);
239 if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
243 * prog = array->ptrs[index];
247 emit_insn(ctx, alsld, t2, a2, a1, 2);
248 off = offsetof(struct bpf_array, ptrs);
249 emit_insn(ctx, ldd, t2, t2, off);
250 /* beq $t2, $zero, jmp_offset */
251 if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
254 /* goto *(prog->bpf_func + 4); */
255 off = offsetof(struct bpf_prog, bpf_func);
256 emit_insn(ctx, ldd, t3, t2, off);
257 __build_epilogue(ctx, true);
260 if (out_offset == -1)
261 out_offset = cur_offset;
262 if (cur_offset != out_offset) {
263 pr_err_once("tail_call out_offset = %d, expected %d!\n",
264 cur_offset, out_offset);
271 pr_info_once("tail_call: jump too far\n");
277 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
279 const u8 t1 = LOONGARCH_GPR_T1;
280 const u8 t2 = LOONGARCH_GPR_T2;
281 const u8 t3 = LOONGARCH_GPR_T3;
282 const u8 r0 = regmap[BPF_REG_0];
283 const u8 src = regmap[insn->src_reg];
284 const u8 dst = regmap[insn->dst_reg];
285 const s16 off = insn->off;
286 const s32 imm = insn->imm;
287 const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
289 move_imm(ctx, t1, off, false);
290 emit_insn(ctx, addd, t1, dst, t1);
291 move_reg(ctx, t3, src);
294 /* lock *(size *)(dst + off) <op>= src */
297 emit_insn(ctx, amaddd, t2, t1, src);
299 emit_insn(ctx, amaddw, t2, t1, src);
303 emit_insn(ctx, amandd, t2, t1, src);
305 emit_insn(ctx, amandw, t2, t1, src);
309 emit_insn(ctx, amord, t2, t1, src);
311 emit_insn(ctx, amorw, t2, t1, src);
315 emit_insn(ctx, amxord, t2, t1, src);
317 emit_insn(ctx, amxorw, t2, t1, src);
319 /* src = atomic_fetch_<op>(dst + off, src) */
320 case BPF_ADD | BPF_FETCH:
322 emit_insn(ctx, amaddd, src, t1, t3);
324 emit_insn(ctx, amaddw, src, t1, t3);
325 emit_zext_32(ctx, src, true);
328 case BPF_AND | BPF_FETCH:
330 emit_insn(ctx, amandd, src, t1, t3);
332 emit_insn(ctx, amandw, src, t1, t3);
333 emit_zext_32(ctx, src, true);
336 case BPF_OR | BPF_FETCH:
338 emit_insn(ctx, amord, src, t1, t3);
340 emit_insn(ctx, amorw, src, t1, t3);
341 emit_zext_32(ctx, src, true);
344 case BPF_XOR | BPF_FETCH:
346 emit_insn(ctx, amxord, src, t1, t3);
348 emit_insn(ctx, amxorw, src, t1, t3);
349 emit_zext_32(ctx, src, true);
352 /* src = atomic_xchg(dst + off, src); */
355 emit_insn(ctx, amswapd, src, t1, t3);
357 emit_insn(ctx, amswapw, src, t1, t3);
358 emit_zext_32(ctx, src, true);
361 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
363 move_reg(ctx, t2, r0);
365 emit_insn(ctx, lld, r0, t1, 0);
366 emit_insn(ctx, bne, t2, r0, 4);
367 move_reg(ctx, t3, src);
368 emit_insn(ctx, scd, t3, t1, 0);
369 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
371 emit_insn(ctx, llw, r0, t1, 0);
372 emit_zext_32(ctx, t2, true);
373 emit_zext_32(ctx, r0, true);
374 emit_insn(ctx, bne, t2, r0, 4);
375 move_reg(ctx, t3, src);
376 emit_insn(ctx, scw, t3, t1, 0);
377 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
378 emit_zext_32(ctx, r0, true);
384 static bool is_signed_bpf_cond(u8 cond)
386 return cond == BPF_JSGT || cond == BPF_JSLT ||
387 cond == BPF_JSGE || cond == BPF_JSLE;
390 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
391 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
393 bool ex_handler_bpf(const struct exception_table_entry *ex,
394 struct pt_regs *regs)
396 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
397 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
399 regs->regs[dst_reg] = 0;
400 regs->csr_era = (unsigned long)&ex->fixup - offset;
405 /* For accesses to BTF pointers, add an entry to the exception table */
406 static int add_exception_handler(const struct bpf_insn *insn,
412 struct exception_table_entry *ex;
414 if (!ctx->image || !ctx->prog->aux->extable)
417 if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
418 BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
421 if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
424 ex = &ctx->prog->aux->extable[ctx->num_exentries];
425 pc = (unsigned long)&ctx->image[ctx->idx - 1];
427 offset = pc - (long)&ex->insn;
428 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
434 * Since the extable follows the program, the fixup offset is always
435 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
436 * to keep things simple, and put the destination register in the upper
437 * bits. We don't need to worry about buildtime or runtime sort
438 * modifying the upper bits because the table is already sorted, and
439 * isn't part of the main exception table.
441 offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
442 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
445 ex->type = EX_TYPE_BPF;
446 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
448 ctx->num_exentries++;
453 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
457 bool func_addr_fixed, sign_extend;
458 int i = insn - ctx->prog->insnsi;
460 const u8 code = insn->code;
461 const u8 cond = BPF_OP(code);
462 const u8 t1 = LOONGARCH_GPR_T1;
463 const u8 t2 = LOONGARCH_GPR_T2;
464 const u8 src = regmap[insn->src_reg];
465 const u8 dst = regmap[insn->dst_reg];
466 const s16 off = insn->off;
467 const s32 imm = insn->imm;
468 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
469 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
473 case BPF_ALU | BPF_MOV | BPF_X:
474 case BPF_ALU64 | BPF_MOV | BPF_X:
475 move_reg(ctx, dst, src);
476 emit_zext_32(ctx, dst, is32);
480 case BPF_ALU | BPF_MOV | BPF_K:
481 case BPF_ALU64 | BPF_MOV | BPF_K:
482 move_imm(ctx, dst, imm, is32);
485 /* dst = dst + src */
486 case BPF_ALU | BPF_ADD | BPF_X:
487 case BPF_ALU64 | BPF_ADD | BPF_X:
488 emit_insn(ctx, addd, dst, dst, src);
489 emit_zext_32(ctx, dst, is32);
492 /* dst = dst + imm */
493 case BPF_ALU | BPF_ADD | BPF_K:
494 case BPF_ALU64 | BPF_ADD | BPF_K:
495 if (is_signed_imm12(imm)) {
496 emit_insn(ctx, addid, dst, dst, imm);
498 move_imm(ctx, t1, imm, is32);
499 emit_insn(ctx, addd, dst, dst, t1);
501 emit_zext_32(ctx, dst, is32);
504 /* dst = dst - src */
505 case BPF_ALU | BPF_SUB | BPF_X:
506 case BPF_ALU64 | BPF_SUB | BPF_X:
507 emit_insn(ctx, subd, dst, dst, src);
508 emit_zext_32(ctx, dst, is32);
511 /* dst = dst - imm */
512 case BPF_ALU | BPF_SUB | BPF_K:
513 case BPF_ALU64 | BPF_SUB | BPF_K:
514 if (is_signed_imm12(-imm)) {
515 emit_insn(ctx, addid, dst, dst, -imm);
517 move_imm(ctx, t1, imm, is32);
518 emit_insn(ctx, subd, dst, dst, t1);
520 emit_zext_32(ctx, dst, is32);
523 /* dst = dst * src */
524 case BPF_ALU | BPF_MUL | BPF_X:
525 case BPF_ALU64 | BPF_MUL | BPF_X:
526 emit_insn(ctx, muld, dst, dst, src);
527 emit_zext_32(ctx, dst, is32);
530 /* dst = dst * imm */
531 case BPF_ALU | BPF_MUL | BPF_K:
532 case BPF_ALU64 | BPF_MUL | BPF_K:
533 move_imm(ctx, t1, imm, is32);
534 emit_insn(ctx, muld, dst, dst, t1);
535 emit_zext_32(ctx, dst, is32);
538 /* dst = dst / src */
539 case BPF_ALU | BPF_DIV | BPF_X:
540 case BPF_ALU64 | BPF_DIV | BPF_X:
541 emit_zext_32(ctx, dst, is32);
542 move_reg(ctx, t1, src);
543 emit_zext_32(ctx, t1, is32);
544 emit_insn(ctx, divdu, dst, dst, t1);
545 emit_zext_32(ctx, dst, is32);
548 /* dst = dst / imm */
549 case BPF_ALU | BPF_DIV | BPF_K:
550 case BPF_ALU64 | BPF_DIV | BPF_K:
551 move_imm(ctx, t1, imm, is32);
552 emit_zext_32(ctx, dst, is32);
553 emit_insn(ctx, divdu, dst, dst, t1);
554 emit_zext_32(ctx, dst, is32);
557 /* dst = dst % src */
558 case BPF_ALU | BPF_MOD | BPF_X:
559 case BPF_ALU64 | BPF_MOD | BPF_X:
560 emit_zext_32(ctx, dst, is32);
561 move_reg(ctx, t1, src);
562 emit_zext_32(ctx, t1, is32);
563 emit_insn(ctx, moddu, dst, dst, t1);
564 emit_zext_32(ctx, dst, is32);
567 /* dst = dst % imm */
568 case BPF_ALU | BPF_MOD | BPF_K:
569 case BPF_ALU64 | BPF_MOD | BPF_K:
570 move_imm(ctx, t1, imm, is32);
571 emit_zext_32(ctx, dst, is32);
572 emit_insn(ctx, moddu, dst, dst, t1);
573 emit_zext_32(ctx, dst, is32);
577 case BPF_ALU | BPF_NEG:
578 case BPF_ALU64 | BPF_NEG:
579 move_imm(ctx, t1, imm, is32);
580 emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
581 emit_zext_32(ctx, dst, is32);
584 /* dst = dst & src */
585 case BPF_ALU | BPF_AND | BPF_X:
586 case BPF_ALU64 | BPF_AND | BPF_X:
587 emit_insn(ctx, and, dst, dst, src);
588 emit_zext_32(ctx, dst, is32);
591 /* dst = dst & imm */
592 case BPF_ALU | BPF_AND | BPF_K:
593 case BPF_ALU64 | BPF_AND | BPF_K:
594 if (is_unsigned_imm12(imm)) {
595 emit_insn(ctx, andi, dst, dst, imm);
597 move_imm(ctx, t1, imm, is32);
598 emit_insn(ctx, and, dst, dst, t1);
600 emit_zext_32(ctx, dst, is32);
603 /* dst = dst | src */
604 case BPF_ALU | BPF_OR | BPF_X:
605 case BPF_ALU64 | BPF_OR | BPF_X:
606 emit_insn(ctx, or, dst, dst, src);
607 emit_zext_32(ctx, dst, is32);
610 /* dst = dst | imm */
611 case BPF_ALU | BPF_OR | BPF_K:
612 case BPF_ALU64 | BPF_OR | BPF_K:
613 if (is_unsigned_imm12(imm)) {
614 emit_insn(ctx, ori, dst, dst, imm);
616 move_imm(ctx, t1, imm, is32);
617 emit_insn(ctx, or, dst, dst, t1);
619 emit_zext_32(ctx, dst, is32);
622 /* dst = dst ^ src */
623 case BPF_ALU | BPF_XOR | BPF_X:
624 case BPF_ALU64 | BPF_XOR | BPF_X:
625 emit_insn(ctx, xor, dst, dst, src);
626 emit_zext_32(ctx, dst, is32);
629 /* dst = dst ^ imm */
630 case BPF_ALU | BPF_XOR | BPF_K:
631 case BPF_ALU64 | BPF_XOR | BPF_K:
632 if (is_unsigned_imm12(imm)) {
633 emit_insn(ctx, xori, dst, dst, imm);
635 move_imm(ctx, t1, imm, is32);
636 emit_insn(ctx, xor, dst, dst, t1);
638 emit_zext_32(ctx, dst, is32);
641 /* dst = dst << src (logical) */
642 case BPF_ALU | BPF_LSH | BPF_X:
643 emit_insn(ctx, sllw, dst, dst, src);
644 emit_zext_32(ctx, dst, is32);
647 case BPF_ALU64 | BPF_LSH | BPF_X:
648 emit_insn(ctx, slld, dst, dst, src);
651 /* dst = dst << imm (logical) */
652 case BPF_ALU | BPF_LSH | BPF_K:
653 emit_insn(ctx, slliw, dst, dst, imm);
654 emit_zext_32(ctx, dst, is32);
657 case BPF_ALU64 | BPF_LSH | BPF_K:
658 emit_insn(ctx, sllid, dst, dst, imm);
661 /* dst = dst >> src (logical) */
662 case BPF_ALU | BPF_RSH | BPF_X:
663 emit_insn(ctx, srlw, dst, dst, src);
664 emit_zext_32(ctx, dst, is32);
667 case BPF_ALU64 | BPF_RSH | BPF_X:
668 emit_insn(ctx, srld, dst, dst, src);
671 /* dst = dst >> imm (logical) */
672 case BPF_ALU | BPF_RSH | BPF_K:
673 emit_insn(ctx, srliw, dst, dst, imm);
674 emit_zext_32(ctx, dst, is32);
677 case BPF_ALU64 | BPF_RSH | BPF_K:
678 emit_insn(ctx, srlid, dst, dst, imm);
681 /* dst = dst >> src (arithmetic) */
682 case BPF_ALU | BPF_ARSH | BPF_X:
683 emit_insn(ctx, sraw, dst, dst, src);
684 emit_zext_32(ctx, dst, is32);
687 case BPF_ALU64 | BPF_ARSH | BPF_X:
688 emit_insn(ctx, srad, dst, dst, src);
691 /* dst = dst >> imm (arithmetic) */
692 case BPF_ALU | BPF_ARSH | BPF_K:
693 emit_insn(ctx, sraiw, dst, dst, imm);
694 emit_zext_32(ctx, dst, is32);
697 case BPF_ALU64 | BPF_ARSH | BPF_K:
698 emit_insn(ctx, sraid, dst, dst, imm);
701 /* dst = BSWAP##imm(dst) */
702 case BPF_ALU | BPF_END | BPF_FROM_LE:
705 /* zero-extend 16 bits into 64 bits */
706 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
709 /* zero-extend 32 bits into 64 bits */
710 emit_zext_32(ctx, dst, is32);
718 case BPF_ALU | BPF_END | BPF_FROM_BE:
721 emit_insn(ctx, revb2h, dst, dst);
722 /* zero-extend 16 bits into 64 bits */
723 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
726 emit_insn(ctx, revb2w, dst, dst);
727 /* zero-extend 32 bits into 64 bits */
728 emit_zext_32(ctx, dst, is32);
731 emit_insn(ctx, revbd, dst, dst);
736 /* PC += off if dst cond src */
737 case BPF_JMP | BPF_JEQ | BPF_X:
738 case BPF_JMP | BPF_JNE | BPF_X:
739 case BPF_JMP | BPF_JGT | BPF_X:
740 case BPF_JMP | BPF_JGE | BPF_X:
741 case BPF_JMP | BPF_JLT | BPF_X:
742 case BPF_JMP | BPF_JLE | BPF_X:
743 case BPF_JMP | BPF_JSGT | BPF_X:
744 case BPF_JMP | BPF_JSGE | BPF_X:
745 case BPF_JMP | BPF_JSLT | BPF_X:
746 case BPF_JMP | BPF_JSLE | BPF_X:
747 case BPF_JMP32 | BPF_JEQ | BPF_X:
748 case BPF_JMP32 | BPF_JNE | BPF_X:
749 case BPF_JMP32 | BPF_JGT | BPF_X:
750 case BPF_JMP32 | BPF_JGE | BPF_X:
751 case BPF_JMP32 | BPF_JLT | BPF_X:
752 case BPF_JMP32 | BPF_JLE | BPF_X:
753 case BPF_JMP32 | BPF_JSGT | BPF_X:
754 case BPF_JMP32 | BPF_JSGE | BPF_X:
755 case BPF_JMP32 | BPF_JSLT | BPF_X:
756 case BPF_JMP32 | BPF_JSLE | BPF_X:
757 jmp_offset = bpf2la_offset(i, off, ctx);
758 move_reg(ctx, t1, dst);
759 move_reg(ctx, t2, src);
760 if (is_signed_bpf_cond(BPF_OP(code))) {
761 emit_sext_32(ctx, t1, is32);
762 emit_sext_32(ctx, t2, is32);
764 emit_zext_32(ctx, t1, is32);
765 emit_zext_32(ctx, t2, is32);
767 if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
771 /* PC += off if dst cond imm */
772 case BPF_JMP | BPF_JEQ | BPF_K:
773 case BPF_JMP | BPF_JNE | BPF_K:
774 case BPF_JMP | BPF_JGT | BPF_K:
775 case BPF_JMP | BPF_JGE | BPF_K:
776 case BPF_JMP | BPF_JLT | BPF_K:
777 case BPF_JMP | BPF_JLE | BPF_K:
778 case BPF_JMP | BPF_JSGT | BPF_K:
779 case BPF_JMP | BPF_JSGE | BPF_K:
780 case BPF_JMP | BPF_JSLT | BPF_K:
781 case BPF_JMP | BPF_JSLE | BPF_K:
782 case BPF_JMP32 | BPF_JEQ | BPF_K:
783 case BPF_JMP32 | BPF_JNE | BPF_K:
784 case BPF_JMP32 | BPF_JGT | BPF_K:
785 case BPF_JMP32 | BPF_JGE | BPF_K:
786 case BPF_JMP32 | BPF_JLT | BPF_K:
787 case BPF_JMP32 | BPF_JLE | BPF_K:
788 case BPF_JMP32 | BPF_JSGT | BPF_K:
789 case BPF_JMP32 | BPF_JSGE | BPF_K:
790 case BPF_JMP32 | BPF_JSLT | BPF_K:
791 case BPF_JMP32 | BPF_JSLE | BPF_K:
792 jmp_offset = bpf2la_offset(i, off, ctx);
794 move_imm(ctx, t1, imm, false);
797 /* If imm is 0, simply use zero register. */
798 tm = LOONGARCH_GPR_ZERO;
800 move_reg(ctx, t2, dst);
801 if (is_signed_bpf_cond(BPF_OP(code))) {
802 emit_sext_32(ctx, tm, is32);
803 emit_sext_32(ctx, t2, is32);
805 emit_zext_32(ctx, tm, is32);
806 emit_zext_32(ctx, t2, is32);
808 if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
812 /* PC += off if dst & src */
813 case BPF_JMP | BPF_JSET | BPF_X:
814 case BPF_JMP32 | BPF_JSET | BPF_X:
815 jmp_offset = bpf2la_offset(i, off, ctx);
816 emit_insn(ctx, and, t1, dst, src);
817 emit_zext_32(ctx, t1, is32);
818 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
822 /* PC += off if dst & imm */
823 case BPF_JMP | BPF_JSET | BPF_K:
824 case BPF_JMP32 | BPF_JSET | BPF_K:
825 jmp_offset = bpf2la_offset(i, off, ctx);
826 move_imm(ctx, t1, imm, is32);
827 emit_insn(ctx, and, t1, dst, t1);
828 emit_zext_32(ctx, t1, is32);
829 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
834 case BPF_JMP | BPF_JA:
835 jmp_offset = bpf2la_offset(i, off, ctx);
836 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
841 case BPF_JMP | BPF_CALL:
843 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
844 &func_addr, &func_addr_fixed);
848 move_addr(ctx, t1, func_addr);
849 emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
850 move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
854 case BPF_JMP | BPF_TAIL_CALL:
856 if (emit_bpf_tail_call(ctx) < 0)
860 /* function return */
861 case BPF_JMP | BPF_EXIT:
862 emit_sext_32(ctx, regmap[BPF_REG_0], true);
864 if (i == ctx->prog->len - 1)
867 jmp_offset = epilogue_offset(ctx);
868 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
873 case BPF_LD | BPF_IMM | BPF_DW:
874 move_imm(ctx, dst, imm64, is32);
877 /* dst = *(size *)(src + off) */
878 case BPF_LDX | BPF_MEM | BPF_B:
879 case BPF_LDX | BPF_MEM | BPF_H:
880 case BPF_LDX | BPF_MEM | BPF_W:
881 case BPF_LDX | BPF_MEM | BPF_DW:
882 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
883 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
884 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
885 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
886 /* dst_reg = (s64)*(signed size *)(src_reg + off) */
887 case BPF_LDX | BPF_MEMSX | BPF_B:
888 case BPF_LDX | BPF_MEMSX | BPF_H:
889 case BPF_LDX | BPF_MEMSX | BPF_W:
890 case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
891 case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
892 case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
893 sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
894 BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
895 switch (BPF_SIZE(code)) {
897 if (is_signed_imm12(off)) {
899 emit_insn(ctx, ldb, dst, src, off);
901 emit_insn(ctx, ldbu, dst, src, off);
903 move_imm(ctx, t1, off, is32);
905 emit_insn(ctx, ldxb, dst, src, t1);
907 emit_insn(ctx, ldxbu, dst, src, t1);
911 if (is_signed_imm12(off)) {
913 emit_insn(ctx, ldh, dst, src, off);
915 emit_insn(ctx, ldhu, dst, src, off);
917 move_imm(ctx, t1, off, is32);
919 emit_insn(ctx, ldxh, dst, src, t1);
921 emit_insn(ctx, ldxhu, dst, src, t1);
925 if (is_signed_imm12(off)) {
927 emit_insn(ctx, ldw, dst, src, off);
929 emit_insn(ctx, ldwu, dst, src, off);
931 move_imm(ctx, t1, off, is32);
933 emit_insn(ctx, ldxw, dst, src, t1);
935 emit_insn(ctx, ldxwu, dst, src, t1);
939 if (is_signed_imm12(off)) {
940 emit_insn(ctx, ldd, dst, src, off);
941 } else if (is_signed_imm14(off)) {
942 emit_insn(ctx, ldptrd, dst, src, off);
944 move_imm(ctx, t1, off, is32);
945 emit_insn(ctx, ldxd, dst, src, t1);
950 ret = add_exception_handler(insn, ctx, dst);
955 /* *(size *)(dst + off) = imm */
956 case BPF_ST | BPF_MEM | BPF_B:
957 case BPF_ST | BPF_MEM | BPF_H:
958 case BPF_ST | BPF_MEM | BPF_W:
959 case BPF_ST | BPF_MEM | BPF_DW:
960 switch (BPF_SIZE(code)) {
962 move_imm(ctx, t1, imm, is32);
963 if (is_signed_imm12(off)) {
964 emit_insn(ctx, stb, t1, dst, off);
966 move_imm(ctx, t2, off, is32);
967 emit_insn(ctx, stxb, t1, dst, t2);
971 move_imm(ctx, t1, imm, is32);
972 if (is_signed_imm12(off)) {
973 emit_insn(ctx, sth, t1, dst, off);
975 move_imm(ctx, t2, off, is32);
976 emit_insn(ctx, stxh, t1, dst, t2);
980 move_imm(ctx, t1, imm, is32);
981 if (is_signed_imm12(off)) {
982 emit_insn(ctx, stw, t1, dst, off);
983 } else if (is_signed_imm14(off)) {
984 emit_insn(ctx, stptrw, t1, dst, off);
986 move_imm(ctx, t2, off, is32);
987 emit_insn(ctx, stxw, t1, dst, t2);
991 move_imm(ctx, t1, imm, is32);
992 if (is_signed_imm12(off)) {
993 emit_insn(ctx, std, t1, dst, off);
994 } else if (is_signed_imm14(off)) {
995 emit_insn(ctx, stptrd, t1, dst, off);
997 move_imm(ctx, t2, off, is32);
998 emit_insn(ctx, stxd, t1, dst, t2);
1004 /* *(size *)(dst + off) = src */
1005 case BPF_STX | BPF_MEM | BPF_B:
1006 case BPF_STX | BPF_MEM | BPF_H:
1007 case BPF_STX | BPF_MEM | BPF_W:
1008 case BPF_STX | BPF_MEM | BPF_DW:
1009 switch (BPF_SIZE(code)) {
1011 if (is_signed_imm12(off)) {
1012 emit_insn(ctx, stb, src, dst, off);
1014 move_imm(ctx, t1, off, is32);
1015 emit_insn(ctx, stxb, src, dst, t1);
1019 if (is_signed_imm12(off)) {
1020 emit_insn(ctx, sth, src, dst, off);
1022 move_imm(ctx, t1, off, is32);
1023 emit_insn(ctx, stxh, src, dst, t1);
1027 if (is_signed_imm12(off)) {
1028 emit_insn(ctx, stw, src, dst, off);
1029 } else if (is_signed_imm14(off)) {
1030 emit_insn(ctx, stptrw, src, dst, off);
1032 move_imm(ctx, t1, off, is32);
1033 emit_insn(ctx, stxw, src, dst, t1);
1037 if (is_signed_imm12(off)) {
1038 emit_insn(ctx, std, src, dst, off);
1039 } else if (is_signed_imm14(off)) {
1040 emit_insn(ctx, stptrd, src, dst, off);
1042 move_imm(ctx, t1, off, is32);
1043 emit_insn(ctx, stxd, src, dst, t1);
1049 case BPF_STX | BPF_ATOMIC | BPF_W:
1050 case BPF_STX | BPF_ATOMIC | BPF_DW:
1051 emit_atomic(insn, ctx);
1054 /* Speculation barrier */
1055 case BPF_ST | BPF_NOSPEC:
1059 pr_err("bpf_jit: unknown opcode %02x\n", code);
1066 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
1070 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1073 const struct bpf_prog *prog = ctx->prog;
1075 for (i = 0; i < prog->len; i++) {
1076 const struct bpf_insn *insn = &prog->insnsi[i];
1079 if (ctx->image == NULL)
1080 ctx->offset[i] = ctx->idx;
1082 ret = build_insn(insn, ctx, extra_pass);
1085 if (ctx->image == NULL)
1086 ctx->offset[i] = ctx->idx;
1093 if (ctx->image == NULL)
1094 ctx->offset[i] = ctx->idx;
1099 /* Fill space with break instructions */
1100 static void jit_fill_hole(void *area, unsigned int size)
1104 /* We are guaranteed to have aligned memory */
1105 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1106 *ptr++ = INSN_BREAK;
1109 static int validate_code(struct jit_ctx *ctx)
1112 union loongarch_instruction insn;
1114 for (i = 0; i < ctx->idx; i++) {
1115 insn = ctx->image[i];
1116 /* Check INSN_BREAK */
1117 if (insn.word == INSN_BREAK)
1121 if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1127 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1129 bool tmp_blinded = false, extra_pass = false;
1131 int image_size, prog_size, extable_size;
1133 struct jit_data *jit_data;
1134 struct bpf_binary_header *header;
1135 struct bpf_prog *tmp, *orig_prog = prog;
1138 * If BPF JIT was not enabled then we must fall back to
1141 if (!prog->jit_requested)
1144 tmp = bpf_jit_blind_constants(prog);
1146 * If blinding was requested and we failed during blinding,
1147 * we must fall back to the interpreter. Otherwise, we save
1148 * the new JITed code.
1158 jit_data = prog->aux->jit_data;
1160 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1165 prog->aux->jit_data = jit_data;
1167 if (jit_data->ctx.offset) {
1168 ctx = jit_data->ctx;
1169 image_ptr = jit_data->image;
1170 header = jit_data->header;
1172 prog_size = sizeof(u32) * ctx.idx;
1176 memset(&ctx, 0, sizeof(ctx));
1179 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1180 if (ctx.offset == NULL) {
1185 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1186 build_prologue(&ctx);
1187 if (build_body(&ctx, extra_pass)) {
1191 ctx.epilogue_offset = ctx.idx;
1192 build_epilogue(&ctx);
1194 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1196 /* Now we know the actual image size.
1197 * As each LoongArch instruction is of length 32bit,
1198 * we are translating number of JITed intructions into
1199 * the size required to store these JITed code.
1201 prog_size = sizeof(u32) * ctx.idx;
1202 image_size = prog_size + extable_size;
1203 /* Now we know the size of the structure to make */
1204 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1205 sizeof(u32), jit_fill_hole);
1206 if (header == NULL) {
1211 /* 2. Now, the actual pass to generate final JIT code */
1212 ctx.image = (union loongarch_instruction *)image_ptr;
1214 prog->aux->extable = (void *)image_ptr + prog_size;
1218 ctx.num_exentries = 0;
1220 build_prologue(&ctx);
1221 if (build_body(&ctx, extra_pass)) {
1222 bpf_jit_binary_free(header);
1226 build_epilogue(&ctx);
1228 /* 3. Extra pass to validate JITed code */
1229 if (validate_code(&ctx)) {
1230 bpf_jit_binary_free(header);
1235 /* And we're done */
1236 if (bpf_jit_enable > 1)
1237 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1239 /* Update the icache */
1240 flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1242 if (!prog->is_func || extra_pass) {
1243 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1244 pr_err_once("multi-func JIT bug %d != %d\n",
1245 ctx.idx, jit_data->ctx.idx);
1246 bpf_jit_binary_free(header);
1247 prog->bpf_func = NULL;
1249 prog->jited_len = 0;
1252 bpf_jit_binary_lock_ro(header);
1254 jit_data->ctx = ctx;
1255 jit_data->image = image_ptr;
1256 jit_data->header = header;
1259 prog->jited_len = prog_size;
1260 prog->bpf_func = (void *)ctx.image;
1262 if (!prog->is_func || extra_pass) {
1265 /* offset[prog->len] is the size of program */
1266 for (i = 0; i <= prog->len; i++)
1267 ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1268 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1273 prog->aux->jit_data = NULL;
1278 bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
1285 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1286 bool bpf_jit_supports_subprog_tailcalls(void)