LoongArch: BPF: Fix jump offset calculation in tailcall
authorHaoran Jiang <jianghaoran@kylinos.cn>
Tue, 5 Aug 2025 11:00:22 +0000 (19:00 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Tue, 5 Aug 2025 11:00:22 +0000 (19:00 +0800)
The extra pass of bpf_int_jit_compile() skips JIT context initialization
which essentially skips offset calculation leaving out_offset = -1, so
the jmp_offset in emit_bpf_tail_call is calculated by

"#define jmp_offset (out_offset - (cur_offset))"

is a negative number, which is wrong. The final generated assembly are
as follow.

54: bgeu         $a2, $t1, -8     # 0x0000004c
58: addi.d       $a6, $s5, -1
5c: bltz         $a6, -16     # 0x0000004c
60: alsl.d       $t2, $a2, $a1, 0x3
64: ld.d         $t2, $t2, 264
68: beq          $t2, $zero, -28     # 0x0000004c

Before apply this patch, the follow test case will reveal soft lock issues.

cd tools/testing/selftests/bpf/
./test_progs --allow=tailcalls/tailcall_bpf2bpf_1

dmesg:
watchdog: BUG: soft lockup - CPU#2 stuck for 26s! [test_progs:25056]

Cc: stable@vger.kernel.org
Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
Reviewed-by: Hengqi Chen <hengqi.chen@gmail.com>
Signed-off-by: Haoran Jiang <jianghaoran@kylinos.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/net/bpf_jit.c

index 0d11d90dc4c91616065e79dcb4f5d2b21c5b5910..f4f12ed16d2f21e62bef32985bf040ba7434d5f7 100644 (file)
@@ -222,11 +222,9 @@ bool bpf_jit_supports_far_kfunc_call(void)
        return true;
 }
 
-/* initialized on the first pass of build_body() */
-static int out_offset = -1;
-static int emit_bpf_tail_call(struct jit_ctx *ctx)
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int insn)
 {
-       int off;
+       int off, tc_ninsn = 0;
        u8 tcc = tail_call_reg(ctx);
        u8 a1 = LOONGARCH_GPR_A1;
        u8 a2 = LOONGARCH_GPR_A2;
@@ -236,7 +234,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        const int idx0 = ctx->idx;
 
 #define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (tc_ninsn - (cur_offset))
 
        /*
         * a0: &ctx
@@ -246,6 +244,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
         * if (index >= array->map.max_entries)
         *       goto out;
         */
+       tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
        off = offsetof(struct bpf_array, map.max_entries);
        emit_insn(ctx, ldwu, t1, a1, off);
        /* bgeu $a2, $t1, jmp_offset */
@@ -277,15 +276,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
        emit_insn(ctx, ldd, t3, t2, off);
        __build_epilogue(ctx, true);
 
-       /* out: */
-       if (out_offset == -1)
-               out_offset = cur_offset;
-       if (cur_offset != out_offset) {
-               pr_err_once("tail_call out_offset = %d, expected %d!\n",
-                           cur_offset, out_offset);
-               return -1;
-       }
-
        return 0;
 
 toofar:
@@ -930,7 +920,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
        /* tail call */
        case BPF_JMP | BPF_TAIL_CALL:
                mark_tail_call(ctx);
-               if (emit_bpf_tail_call(ctx) < 0)
+               if (emit_bpf_tail_call(ctx, i) < 0)
                        return -EINVAL;
                break;
 
@@ -1855,7 +1845,6 @@ out:
        if (tmp_blinded)
                bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
 
-       out_offset = -1;
 
        return prog;