bpf: Add objcg to bpf_mem_alloc
authorYonghong Song <yonghong.song@linux.dev>
Fri, 22 Dec 2023 03:17:39 +0000 (19:17 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 4 Jan 2024 05:08:25 +0000 (21:08 -0800)
The objcg is a bpf_mem_alloc level property since all bpf_mem_cache's
are with the same objcg. This patch made such a property explicit.
The next patch will use this property to save and restore objcg
for percpu unit allocator.

Acked-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20231222031739.1288590-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf_mem_alloc.h
kernel/bpf/memalloc.c

index bb1223b2130877d6fee6a2d58926647c354f6cb7..acef8c80859968789ed8972c5f2b1a5f95280fc7 100644 (file)
@@ -11,6 +11,7 @@ struct bpf_mem_caches;
 struct bpf_mem_alloc {
        struct bpf_mem_caches __percpu *caches;
        struct bpf_mem_cache __percpu *cache;
+       struct obj_cgroup *objcg;
        bool percpu;
        struct work_struct work;
 };
index 288ec4a967d08f5d62602ca12a9d82a2cc225f2b..4a21050f0359759b10939e79c41716ecec14b5e0 100644 (file)
@@ -523,6 +523,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
                if (memcg_bpf_enabled())
                        objcg = get_obj_cgroup_from_current();
 #endif
+               ma->objcg = objcg;
                for_each_possible_cpu(cpu) {
                        c = per_cpu_ptr(pc, cpu);
                        c->unit_size = unit_size;
@@ -542,6 +543,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
 #ifdef CONFIG_MEMCG_KMEM
        objcg = get_obj_cgroup_from_current();
 #endif
+       ma->objcg = objcg;
        for_each_possible_cpu(cpu) {
                cc = per_cpu_ptr(pcc, cpu);
                for (i = 0; i < NUM_CACHES; i++) {
@@ -691,9 +693,8 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
                        rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
                        rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                }
-               /* objcg is the same across cpus */
-               if (c->objcg)
-                       obj_cgroup_put(c->objcg);
+               if (ma->objcg)
+                       obj_cgroup_put(ma->objcg);
                destroy_mem_alloc(ma, rcu_in_progress);
        }
        if (ma->caches) {
@@ -709,8 +710,8 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
                                rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
                        }
                }
-               if (c->objcg)
-                       obj_cgroup_put(c->objcg);
+               if (ma->objcg)
+                       obj_cgroup_put(ma->objcg);
                destroy_mem_alloc(ma, rcu_in_progress);
        }
 }