bpf: Further refactor alloc_bulk().
authorAlexei Starovoitov <ast@kernel.org>
Thu, 6 Jul 2023 03:34:39 +0000 (20:34 -0700)
committerDaniel Borkmann <daniel@iogearbox.net>
Wed, 12 Jul 2023 21:45:22 +0000 (23:45 +0200)
In certain scenarios alloc_bulk() might be taking free objects mainly from
free_by_rcu_ttrace list. In such case get_memcg() and set_active_memcg() are
redundant, but they show up in perf profile. Split the loop and only set memcg
when allocating from slab. No performance difference in this patch alone, but
it helps in combination with further patches.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-7-alexei.starovoitov@gmail.com
kernel/bpf/memalloc.c

index 052fc801fb9f44687eb5596412bb5df658385da5..0ee566a7719a90520825e8683377e4d9241d732f 100644 (file)
@@ -196,8 +196,6 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
        void *obj;
        int i;
 
-       memcg = get_memcg(c);
-       old_memcg = set_active_memcg(memcg);
        for (i = 0; i < cnt; i++) {
                /*
                 * free_by_rcu_ttrace is only manipulated by irq work refill_work().
@@ -212,16 +210,24 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
                 * numa node and it is not a guarantee.
                 */
                obj = __llist_del_first(&c->free_by_rcu_ttrace);
-               if (!obj) {
-                       /* Allocate, but don't deplete atomic reserves that typical
-                        * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
-                        * will allocate from the current numa node which is what we
-                        * want here.
-                        */
-                       obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT);
-                       if (!obj)
-                               break;
-               }
+               if (!obj)
+                       break;
+               add_obj_to_free_list(c, obj);
+       }
+       if (i >= cnt)
+               return;
+
+       memcg = get_memcg(c);
+       old_memcg = set_active_memcg(memcg);
+       for (; i < cnt; i++) {
+               /* Allocate, but don't deplete atomic reserves that typical
+                * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
+                * will allocate from the current numa node which is what we
+                * want here.
+                */
+               obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT);
+               if (!obj)
+                       break;
                add_obj_to_free_list(c, obj);
        }
        set_active_memcg(old_memcg);