arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs
authorJames Morse <james.morse@arm.com>
Thu, 9 Dec 2021 15:13:24 +0000 (15:13 +0000)
committerJames Morse <james.morse@arm.com>
Thu, 8 May 2025 14:28:35 +0000 (15:28 +0100)
A malicious BPF program may manipulate the branch history to influence
what the hardware speculates will happen next.

On exit from a BPF program, emit the BHB mititgation sequence.

This is only applied for 'classic' cBPF programs that are loaded by
seccomp.

Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
arch/arm64/include/asm/spectre.h
arch/arm64/kernel/proton-pack.c
arch/arm64/net/bpf_jit_comp.c

index bca12134245c6a46e96ab5833f3ed159735f4fd2..8fef12626090112fdb308eae1d88977643b63cbd 100644 (file)
@@ -97,6 +97,7 @@ enum mitigation_state arm64_get_meltdown_state(void);
 
 enum mitigation_state arm64_get_spectre_bhb_state(void);
 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
+extern bool __nospectre_bhb;
 u8 get_spectre_bhb_loop_value(void);
 bool is_spectre_bhb_fw_mitigated(void);
 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
index 3154094a9e33d447a6461198c595afa6e15e5f9a..4459b613077e93bacda4aebdd45e3ec9e7513776 100644 (file)
@@ -1021,7 +1021,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
        isb();
 }
 
-static bool __read_mostly __nospectre_bhb;
+bool __read_mostly __nospectre_bhb;
 static int __init parse_spectre_bhb_param(char *str)
 {
        __nospectre_bhb = true;
index 70d7c89d3ac907798e86e0051e7b472c252c1412..0ab8e47062d9a15f6fa2802f21e8a813417a1b7a 100644 (file)
@@ -7,6 +7,7 @@
 
 #define pr_fmt(fmt) "bpf_jit: " fmt
 
+#include <linux/arm-smccc.h>
 #include <linux/bitfield.h>
 #include <linux/bpf.h>
 #include <linux/filter.h>
@@ -17,6 +18,7 @@
 #include <asm/asm-extable.h>
 #include <asm/byteorder.h>
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/debug-monitors.h>
 #include <asm/insn.h>
 #include <asm/text-patching.h>
@@ -939,7 +941,48 @@ static void build_plt(struct jit_ctx *ctx)
                plt->target = (u64)&dummy_tramp;
 }
 
-static void build_epilogue(struct jit_ctx *ctx)
+/* Clobbers BPF registers 1-4, aka x0-x3 */
+static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
+{
+       const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
+       u8 k = get_spectre_bhb_loop_value();
+
+       if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
+           cpu_mitigations_off() || __nospectre_bhb ||
+           arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
+               return;
+
+       if (supports_clearbhb(SCOPE_SYSTEM)) {
+               emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
+               return;
+       }
+
+       if (k) {
+               emit_a64_mov_i64(r1, k, ctx);
+               emit(A64_B(1), ctx);
+               emit(A64_SUBS_I(true, r1, r1, 1), ctx);
+               emit(A64_B_(A64_COND_NE, -2), ctx);
+               emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
+               emit(aarch64_insn_get_isb_value(), ctx);
+       }
+
+       if (is_spectre_bhb_fw_mitigated()) {
+               emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
+                              ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
+               switch (arm_smccc_1_1_get_conduit()) {
+               case SMCCC_CONDUIT_HVC:
+                       emit(aarch64_insn_get_hvc_value(), ctx);
+                       break;
+               case SMCCC_CONDUIT_SMC:
+                       emit(aarch64_insn_get_smc_value(), ctx);
+                       break;
+               default:
+                       pr_err_once("Firmware mitigation enabled with unknown conduit\n");
+               }
+       }
+}
+
+static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
 {
        const u8 r0 = bpf2a64[BPF_REG_0];
        const u8 ptr = bpf2a64[TCCNT_PTR];
@@ -952,10 +995,13 @@ static void build_epilogue(struct jit_ctx *ctx)
 
        emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
 
+       if (was_classic)
+               build_bhb_mitigation(ctx);
+
        /* Restore FP/LR registers */
        emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
 
-       /* Set return value */
+       /* Move the return value from bpf:r0 (aka x7) to x0 */
        emit(A64_MOV(1, A64_R(0), r0), ctx);
 
        /* Authenticate lr */
@@ -1898,7 +1944,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        }
 
        ctx.epilogue_offset = ctx.idx;
-       build_epilogue(&ctx);
+       build_epilogue(&ctx, was_classic);
        build_plt(&ctx);
 
        extable_align = __alignof__(struct exception_table_entry);
@@ -1961,7 +2007,7 @@ skip_init_ctx:
                goto out_free_hdr;
        }
 
-       build_epilogue(&ctx);
+       build_epilogue(&ctx, was_classic);
        build_plt(&ctx);
 
        /* Extra pass to validate JITed code. */