bpf: Fix percpu address space issues
authorUros Bizjak <ubizjak@gmail.com>
Sun, 11 Aug 2024 16:13:33 +0000 (18:13 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 22 Aug 2024 15:01:50 +0000 (08:01 -0700)
In arraymap.c:

In bpf_array_map_seq_start() and bpf_array_map_seq_next()
cast return values from the __percpu address space to
the generic address space via uintptr_t [1].

Correct the declaration of pptr pointer in __bpf_array_map_seq_show()
to void __percpu * and cast the value from the generic address
space to the __percpu address space via uintptr_t [1].

In hashtab.c:

Assign the return value from bpf_mem_cache_alloc() to void pointer
and cast the value to void __percpu ** (void pointer to percpu void
pointer) before dereferencing.

In memalloc.c:

Explicitly declare __percpu variables.

Cast obj to void __percpu **.

In helpers.c:

Cast ptr in BPF_CALL_1 and BPF_CALL_2 from generic address space
to __percpu address space via const uintptr_t [1].

Found by GCC's named address space checks.

There were no changes in the resulting object files.

[1] https://sparse.docs.kernel.org/en/latest/annotations.html#address-space-name

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <martin.lau@linux.dev>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Song Liu <song@kernel.org>
Cc: Yonghong Song <yonghong.song@linux.dev>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Stanislav Fomichev <sdf@fomichev.me>
Cc: Hao Luo <haoluo@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240811161414.56744-1-ubizjak@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/memalloc.c

index 188e3c2effb21c541b4e811cfb83b1bb5f723d45..a43e62e2a8bbe2f549061911477b172388b3329c 100644 (file)
@@ -600,7 +600,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
        array = container_of(map, struct bpf_array, map);
        index = info->index & array->index_mask;
        if (info->percpu_value_buf)
-              return array->pptrs[index];
+               return (void *)(uintptr_t)array->pptrs[index];
        return array_map_elem_ptr(array, index);
 }
 
@@ -619,7 +619,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        array = container_of(map, struct bpf_array, map);
        index = info->index & array->index_mask;
        if (info->percpu_value_buf)
-              return array->pptrs[index];
+               return (void *)(uintptr_t)array->pptrs[index];
        return array_map_elem_ptr(array, index);
 }
 
@@ -632,7 +632,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
        struct bpf_iter_meta meta;
        struct bpf_prog *prog;
        int off = 0, cpu = 0;
-       void __percpu **pptr;
+       void __percpu *pptr;
        u32 size;
 
        meta.seq = seq;
@@ -648,7 +648,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
                if (!info->percpu_value_buf) {
                        ctx.value = v;
                } else {
-                       pptr = v;
+                       pptr = (void __percpu *)(uintptr_t)v;
                        size = array->elem_size;
                        for_each_possible_cpu(cpu) {
                                copy_map_value_long(map, info->percpu_value_buf + off,
index be1f64c20125fe6367b6fa9f4900e0d202c7e7e0..45c7195b65bada4ad055864540d147b84df9dd80 100644 (file)
@@ -1049,14 +1049,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                        pptr = htab_elem_get_ptr(l_new, key_size);
                } else {
                        /* alloc_percpu zero-fills */
-                       pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
-                       if (!pptr) {
+                       void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
+
+                       if (!ptr) {
                                bpf_mem_cache_free(&htab->ma, l_new);
                                l_new = ERR_PTR(-ENOMEM);
                                goto dec_count;
                        }
-                       l_new->ptr_to_pptr = pptr;
-                       pptr = *(void **)pptr;
+                       l_new->ptr_to_pptr = ptr;
+                       pptr = *(void __percpu **)ptr;
                }
 
                pcpu_init_value(htab, pptr, value, onallcpus);
index 12e3aa40b18084f62632274328cc83a334d90abd..ccca6fe0367cea4070eb4763c2e56a2957ec661e 100644 (file)
@@ -715,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
        if (cpu >= nr_cpu_ids)
                return (unsigned long)NULL;
 
-       return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
+       return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
 }
 
 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
@@ -728,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
 
 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
 {
-       return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
+       return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
 }
 
 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
index dec892ded031e0a2b97f7384d5751e8fd7d71c8c..b3858a76e0b35891342810f9eb6cdcacf9690d9b 100644 (file)
@@ -138,8 +138,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
 {
        if (c->percpu_size) {
-               void **obj = kmalloc_node(c->percpu_size, flags, node);
-               void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
+               void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
+               void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
 
                if (!obj || !pptr) {
                        free_percpu(pptr);
@@ -253,7 +253,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
 static void free_one(void *obj, bool percpu)
 {
        if (percpu) {
-               free_percpu(((void **)obj)[1]);
+               free_percpu(((void __percpu **)obj)[1]);
                kfree(obj);
                return;
        }
@@ -509,8 +509,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
  */
 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
 {
-       struct bpf_mem_caches *cc, __percpu *pcc;
-       struct bpf_mem_cache *c, __percpu *pc;
+       struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
+       struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
        struct obj_cgroup *objcg = NULL;
        int cpu, i, unit_size, percpu_size = 0;
 
@@ -591,7 +591,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
 
 int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
 {
-       struct bpf_mem_caches *cc, __percpu *pcc;
+       struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
        int cpu, i, unit_size, percpu_size;
        struct obj_cgroup *objcg;
        struct bpf_mem_cache *c;