bpf: consistently use BPF token throughout BPF verifier logic
authorAndrii Nakryiko <andrii@kernel.org>
Thu, 30 Nov 2023 18:52:20 +0000 (10:52 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 6 Dec 2023 18:02:59 +0000 (10:02 -0800)
Remove remaining direct queries to perfmon_capable() and bpf_capable()
in BPF verifier logic and instead use BPF token (if available) to make
decisions about privileges.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20231130185229.2688956-9-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
include/linux/filter.h
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/verifier.c
net/core/filter.c

index 2a3ab4f3dd8cb573d37b566f72bd7160360b6ade..435abad3cc61e912a900bee816eabab85385c30e 100644 (file)
@@ -2200,24 +2200,24 @@ extern int sysctl_unprivileged_bpf_disabled;
 
 bool bpf_token_capable(const struct bpf_token *token, int cap);
 
-static inline bool bpf_allow_ptr_leaks(void)
+static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_allow_uninit_stack(void)
+static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
 {
-       return perfmon_capable();
+       return bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v1(void)
+static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
-static inline bool bpf_bypass_spec_v4(void)
+static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
 {
-       return cpu_mitigations_off() || perfmon_capable();
+       return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
 }
 
 int bpf_map_new_fd(struct bpf_map *map, int flags);
index a4953fafc8cb8c83a5d7a5be068988026bb0aa34..14354605ad269fc4b575ba0102bd2610982b268c 100644 (file)
@@ -1139,7 +1139,7 @@ static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
                return false;
        if (!bpf_jit_harden)
                return false;
-       if (bpf_jit_harden == 1 && bpf_capable())
+       if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
                return false;
 
        return true;
index 4a4a67956e21194071c9122dfe5046c54b0a3a64..8d365bda9a8bf1e5cf2b74efb2e09a4b18af94f7 100644 (file)
@@ -82,7 +82,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        int numa_node = bpf_map_attr_numa_node(attr);
        u32 elem_size, index_mask, max_entries;
-       bool bypass_spec_v1 = bpf_bypass_spec_v1();
+       bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
        u64 array_size, mask64;
        struct bpf_array *array;
 
index 47085839af8d0ec1a8d94fc4a638f2153f9b5167..ced511f44174f9f0edaafa6970d424778c023e40 100644 (file)
@@ -675,7 +675,7 @@ static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
 {
        if (!bpf_prog_kallsyms_candidate(fp) ||
-           !bpf_capable())
+           !bpf_token_capable(fp->aux->token, CAP_BPF))
                return;
 
        bpf_prog_ksym_set_addr(fp);
index e5ce530641ba0cfe6e719c0126b998010bee054b..45e85fb76d825f5ccd03012d8dc5b09ad1ff7d46 100644 (file)
@@ -20597,7 +20597,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
        env->prog = *prog;
        env->ops = bpf_verifier_ops[env->prog->type];
        env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
-       is_priv = bpf_capable();
+
+       env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token);
+       env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token);
+       env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token);
+       env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token);
+       env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF);
 
        bpf_get_btf_vmlinux();
 
@@ -20629,12 +20634,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
        if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
                env->strict_alignment = false;
 
-       env->allow_ptr_leaks = bpf_allow_ptr_leaks();
-       env->allow_uninit_stack = bpf_allow_uninit_stack();
-       env->bypass_spec_v1 = bpf_bypass_spec_v1();
-       env->bypass_spec_v4 = bpf_bypass_spec_v4();
-       env->bpf_capable = bpf_capable();
-
        if (is_priv)
                env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
        env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS;
index 0bf2a03d8203e522714d12d4be033028abc95327..adcfc2c25754893891b3b013d245a3d9fe55e756 100644 (file)
@@ -8559,7 +8559,7 @@ static bool cg_skb_is_valid_access(int off, int size,
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
-               if (!bpf_capable())
+               if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                        return false;
                break;
        }
@@ -8571,7 +8571,7 @@ static bool cg_skb_is_valid_access(int off, int size,
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
                case bpf_ctx_range(struct __sk_buff, tstamp):
-                       if (!bpf_capable())
+                       if (!bpf_token_capable(prog->aux->token, CAP_BPF))
                                return false;
                        break;
                default: