bpf: Re-enable unit_size checking for global per-cpu allocator
authorHou Tao <houtao1@huawei.com>
Fri, 20 Oct 2023 13:31:58 +0000 (21:31 +0800)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 20 Oct 2023 21:15:13 +0000 (14:15 -0700)
With pcpu_alloc_size() in place, check whether or not the size of
the dynamic per-cpu area is matched with unit_size.

Signed-off-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/r/20231020133202.4043247-4-houtao@huaweicloud.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/memalloc.c

index 39ea316c55e79da160d3d2110a48bf0c9c66abb9..776bdf5ffd80bc4790f36cdc9a22d8c75672de87 100644 (file)
@@ -491,21 +491,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
        struct llist_node *first;
        unsigned int obj_size;
 
-       /* For per-cpu allocator, the size of free objects in free list doesn't
-        * match with unit_size and now there is no way to get the size of
-        * per-cpu pointer saved in free object, so just skip the checking.
-        */
-       if (c->percpu_size)
-               return 0;
-
        first = c->free_llist.first;
        if (!first)
                return 0;
 
-       obj_size = ksize(first);
+       if (c->percpu_size)
+               obj_size = pcpu_alloc_size(((void **)first)[1]);
+       else
+               obj_size = ksize(first);
        if (obj_size != c->unit_size) {
-               WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n",
-                         idx, obj_size, c->unit_size);
+               WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
+                         idx, c->percpu_size, obj_size, c->unit_size);
                return -EINVAL;
        }
        return 0;
@@ -973,6 +969,12 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
        return !ret ? NULL : ret + LLIST_NODE_SZ;
 }
 
+/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
+ * actual size of dynamic per-cpu area will always be matched and there is
+ * no need to adjust size_index for per-cpu allocation. However for the
+ * simplicity of the implementation, use an unified size_index for both
+ * kmalloc and per-cpu allocation.
+ */
 static __init int bpf_mem_cache_adjust_size(void)
 {
        unsigned int size;