1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
4 #include <linux/llist.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
11 /* Any context (including NMI) BPF specific memory allocator.
13 * Tracing BPF programs can attach to kprobe and fentry. Hence they
14 * run in unknown context where calling plain kmalloc() might not be safe.
16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17 * Refill this cache asynchronously from irq_work.
20 * 16 32 64 96 128 196 256 512 1024 2048 4096
23 * 16 32 64 96 128 196 256 512 1024 2048 4096
25 * The buckets are prefilled at the start.
26 * BPF programs always run with migration disabled.
27 * It's safe to allocate from cache of the current cpu with irqs disabled.
28 * Free-ing is always done into bucket of the current cpu as well.
29 * irq_work trims extra free elements from buckets with kfree
30 * and refills them with kmalloc, so global kmalloc logic takes care
31 * of freeing objects allocated by one cpu and freed on another.
33 * Every allocated objected is padded with extra 8 bytes that contains
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
38 /* similar to kmalloc, but sizeof == 8 bucket is gone */
39 static u8 size_index[24] __ro_after_init = {
66 static int bpf_mem_cache_idx(size_t size)
68 if (!size || size > 4096)
72 return size_index[(size - 1) / 8] - 1;
74 return fls(size - 1) - 1;
79 struct bpf_mem_cache {
80 /* per-cpu list of free objects of size 'unit_size'.
81 * All accesses are done with interrupts disabled and 'active' counter
82 * protection with __llist_add() and __llist_del_first().
84 struct llist_head free_llist;
87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
89 * fail. When 'active' is busy the unit_free() will add an object to
92 struct llist_head free_llist_extra;
94 struct irq_work refill_work;
95 struct obj_cgroup *objcg;
97 /* count of objects in free_llist */
99 int low_watermark, high_watermark, batch;
103 struct llist_head free_by_rcu;
104 struct llist_head waiting_for_gp;
105 atomic_t call_rcu_in_progress;
108 struct bpf_mem_caches {
109 struct bpf_mem_cache cache[NUM_CACHES];
112 static struct llist_node notrace *__llist_del_first(struct llist_head *head)
114 struct llist_node *entry, *next;
124 static void *__alloc(struct bpf_mem_cache *c, int node)
126 /* Allocate, but don't deplete atomic reserves that typical
127 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
128 * will allocate from the current numa node which is what we
131 gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
133 if (c->percpu_size) {
134 void **obj = kmalloc_node(c->percpu_size, flags, node);
135 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
146 return kmalloc_node(c->unit_size, flags, node);
149 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
151 #ifdef CONFIG_MEMCG_KMEM
153 return get_mem_cgroup_from_objcg(c->objcg);
157 return root_mem_cgroup;
163 /* Mostly runs from irq_work except __init phase. */
164 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
166 struct mem_cgroup *memcg = NULL, *old_memcg;
171 memcg = get_memcg(c);
172 old_memcg = set_active_memcg(memcg);
173 for (i = 0; i < cnt; i++) {
174 obj = __alloc(c, node);
177 if (IS_ENABLED(CONFIG_PREEMPT_RT))
178 /* In RT irq_work runs in per-cpu kthread, so disable
179 * interrupts to avoid preemption and interrupts and
180 * reduce the chance of bpf prog executing on this cpu
181 * when active counter is busy.
183 local_irq_save(flags);
184 /* alloc_bulk runs from irq_work which will not preempt a bpf
185 * program that does unit_alloc/unit_free since IRQs are
186 * disabled there. There is no race to increment 'active'
187 * counter. It protects free_llist from corruption in case NMI
188 * bpf prog preempted this loop.
190 WARN_ON_ONCE(local_inc_return(&c->active) != 1);
191 __llist_add(obj, &c->free_llist);
193 local_dec(&c->active);
194 if (IS_ENABLED(CONFIG_PREEMPT_RT))
195 local_irq_restore(flags);
197 set_active_memcg(old_memcg);
198 mem_cgroup_put(memcg);
201 static void free_one(struct bpf_mem_cache *c, void *obj)
203 if (c->percpu_size) {
204 free_percpu(((void **)obj)[1]);
212 static void __free_rcu(struct rcu_head *head)
214 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
215 struct llist_node *llnode = llist_del_all(&c->waiting_for_gp);
216 struct llist_node *pos, *t;
218 llist_for_each_safe(pos, t, llnode)
220 atomic_set(&c->call_rcu_in_progress, 0);
223 static void __free_rcu_tasks_trace(struct rcu_head *head)
225 /* If RCU Tasks Trace grace period implies RCU grace period,
226 * there is no need to invoke call_rcu().
228 if (rcu_trace_implies_rcu_gp())
231 call_rcu(head, __free_rcu);
234 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
236 struct llist_node *llnode = obj;
238 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
239 * Nothing races to add to free_by_rcu list.
241 __llist_add(llnode, &c->free_by_rcu);
244 static void do_call_rcu(struct bpf_mem_cache *c)
246 struct llist_node *llnode, *t;
248 if (atomic_xchg(&c->call_rcu_in_progress, 1))
251 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
252 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
253 /* There is no concurrent __llist_add(waiting_for_gp) access.
254 * It doesn't race with llist_del_all either.
255 * But there could be two concurrent llist_del_all(waiting_for_gp):
256 * from __free_rcu() and from drain_mem_cache().
258 __llist_add(llnode, &c->waiting_for_gp);
259 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
260 * If RCU Tasks Trace grace period implies RCU grace period, free
261 * these elements directly, else use call_rcu() to wait for normal
262 * progs to finish and finally do free_one() on each element.
264 call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace);
267 static void free_bulk(struct bpf_mem_cache *c)
269 struct llist_node *llnode, *t;
274 if (IS_ENABLED(CONFIG_PREEMPT_RT))
275 local_irq_save(flags);
276 WARN_ON_ONCE(local_inc_return(&c->active) != 1);
277 llnode = __llist_del_first(&c->free_llist);
282 local_dec(&c->active);
283 if (IS_ENABLED(CONFIG_PREEMPT_RT))
284 local_irq_restore(flags);
286 enque_to_free(c, llnode);
287 } while (cnt > (c->high_watermark + c->low_watermark) / 2);
289 /* and drain free_llist_extra */
290 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
291 enque_to_free(c, llnode);
295 static void bpf_mem_refill(struct irq_work *work)
297 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
300 /* Racy access to free_cnt. It doesn't need to be 100% accurate */
302 if (cnt < c->low_watermark)
303 /* irq_work runs on this cpu and kmalloc will allocate
304 * from the current numa node which is what we want here.
306 alloc_bulk(c, c->batch, NUMA_NO_NODE);
307 else if (cnt > c->high_watermark)
311 static void notrace irq_work_raise(struct bpf_mem_cache *c)
313 irq_work_queue(&c->refill_work);
316 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
317 * the freelist cache will be elem_size * 64 (or less) on each cpu.
319 * For bpf programs that don't have statically known allocation sizes and
320 * assuming (low_mark + high_mark) / 2 as an average number of elements per
321 * bucket and all buckets are used the total amount of memory in freelists
322 * on each cpu will be:
323 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
324 * == ~ 116 Kbyte using below heuristic.
325 * Initialized, but unused bpf allocator (not bpf map specific one) will
326 * consume ~ 11 Kbyte per cpu.
327 * Typical case will be between 11K and 116K closer to 11K.
328 * bpf progs can and should share bpf_mem_cache when possible.
331 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
333 init_irq_work(&c->refill_work, bpf_mem_refill);
334 if (c->unit_size <= 256) {
335 c->low_watermark = 32;
336 c->high_watermark = 96;
338 /* When page_size == 4k, order-0 cache will have low_mark == 2
339 * and high_mark == 6 with batch alloc of 3 individual pages at
341 * 8k allocs and above low == 1, high == 3, batch == 1.
343 c->low_watermark = max(32 * 256 / c->unit_size, 1);
344 c->high_watermark = max(96 * 256 / c->unit_size, 3);
346 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
348 /* To avoid consuming memory assume that 1st run of bpf
349 * prog won't be doing more than 4 map_update_elem from
350 * irq disabled region
352 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu));
355 /* When size != 0 bpf_mem_cache for each cpu.
356 * This is typical bpf hash map use case when all elements have equal size.
358 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
359 * kmalloc/kfree. Max allocation size is 4096 in this case.
360 * This is bpf_dynptr and bpf_kptr use case.
362 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
364 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
365 struct bpf_mem_caches *cc, __percpu *pcc;
366 struct bpf_mem_cache *c, __percpu *pc;
367 struct obj_cgroup *objcg = NULL;
368 int cpu, i, unit_size, percpu_size = 0;
371 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
376 /* room for llist_node and per-cpu pointer */
377 percpu_size = LLIST_NODE_SZ + sizeof(void *);
379 size += LLIST_NODE_SZ; /* room for llist_node */
382 #ifdef CONFIG_MEMCG_KMEM
383 objcg = get_obj_cgroup_from_current();
385 for_each_possible_cpu(cpu) {
386 c = per_cpu_ptr(pc, cpu);
387 c->unit_size = unit_size;
389 c->percpu_size = percpu_size;
390 prefill_mem_cache(c, cpu);
396 /* size == 0 && percpu is an invalid combination */
397 if (WARN_ON_ONCE(percpu))
400 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
403 #ifdef CONFIG_MEMCG_KMEM
404 objcg = get_obj_cgroup_from_current();
406 for_each_possible_cpu(cpu) {
407 cc = per_cpu_ptr(pcc, cpu);
408 for (i = 0; i < NUM_CACHES; i++) {
410 c->unit_size = sizes[i];
412 prefill_mem_cache(c, cpu);
419 static void drain_mem_cache(struct bpf_mem_cache *c)
421 struct llist_node *llnode, *t;
423 /* No progs are using this bpf_mem_cache, but htab_map_free() called
424 * bpf_mem_cache_free() for all remaining elements and they can be in
425 * free_by_rcu or in waiting_for_gp lists, so drain those lists now.
427 * Except for waiting_for_gp list, there are no concurrent operations
428 * on these lists, so it is safe to use __llist_del_all().
430 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu))
432 llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp))
434 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist))
436 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_llist_extra))
440 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
442 free_percpu(ma->cache);
443 free_percpu(ma->caches);
448 static void free_mem_alloc(struct bpf_mem_alloc *ma)
450 /* waiting_for_gp lists was drained, but __free_rcu might
451 * still execute. Wait for it now before we freeing percpu caches.
453 rcu_barrier_tasks_trace();
455 free_mem_alloc_no_barrier(ma);
458 static void free_mem_alloc_deferred(struct work_struct *work)
460 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
466 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
468 struct bpf_mem_alloc *copy;
470 if (!rcu_in_progress) {
471 /* Fast path. No callbacks are pending, hence no need to do
474 free_mem_alloc_no_barrier(ma);
478 copy = kmalloc(sizeof(*ma), GFP_KERNEL);
480 /* Slow path with inline barrier-s */
485 /* Defer barriers into worker to let the rest of map memory to be freed */
486 copy->cache = ma->cache;
488 copy->caches = ma->caches;
490 INIT_WORK(©->work, free_mem_alloc_deferred);
491 queue_work(system_unbound_wq, ©->work);
494 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
496 struct bpf_mem_caches *cc;
497 struct bpf_mem_cache *c;
498 int cpu, i, rcu_in_progress;
502 for_each_possible_cpu(cpu) {
503 c = per_cpu_ptr(ma->cache, cpu);
505 * refill_work may be unfinished for PREEMPT_RT kernel
506 * in which irq work is invoked in a per-CPU RT thread.
507 * It is also possible for kernel with
508 * arch_irq_work_has_interrupt() being false and irq
509 * work is invoked in timer interrupt. So waiting for
510 * the completion of irq work to ease the handling of
513 irq_work_sync(&c->refill_work);
515 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
517 /* objcg is the same across cpus */
519 obj_cgroup_put(c->objcg);
520 destroy_mem_alloc(ma, rcu_in_progress);
524 for_each_possible_cpu(cpu) {
525 cc = per_cpu_ptr(ma->caches, cpu);
526 for (i = 0; i < NUM_CACHES; i++) {
528 irq_work_sync(&c->refill_work);
530 rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
534 obj_cgroup_put(c->objcg);
535 destroy_mem_alloc(ma, rcu_in_progress);
539 /* notrace is necessary here and in other functions to make sure
540 * bpf programs cannot attach to them and cause llist corruptions.
542 static void notrace *unit_alloc(struct bpf_mem_cache *c)
544 struct llist_node *llnode = NULL;
548 /* Disable irqs to prevent the following race for majority of prog types:
551 * preemption or irq -> prog_B
554 * but prog_B could be a perf_event NMI prog.
555 * Use per-cpu 'active' counter to order free_list access between
556 * unit_alloc/unit_free/bpf_mem_refill.
558 local_irq_save(flags);
559 if (local_inc_return(&c->active) == 1) {
560 llnode = __llist_del_first(&c->free_llist);
564 local_dec(&c->active);
565 local_irq_restore(flags);
569 if (cnt < c->low_watermark)
574 /* Though 'ptr' object could have been allocated on a different cpu
575 * add it to the free_llist of the current cpu.
576 * Let kfree() logic deal with it when it's later called from irq_work.
578 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
580 struct llist_node *llnode = ptr - LLIST_NODE_SZ;
584 BUILD_BUG_ON(LLIST_NODE_SZ > 8);
586 local_irq_save(flags);
587 if (local_inc_return(&c->active) == 1) {
588 __llist_add(llnode, &c->free_llist);
591 /* unit_free() cannot fail. Therefore add an object to atomic
592 * llist. free_bulk() will drain it. Though free_llist_extra is
593 * a per-cpu list we have to use atomic llist_add here, since
594 * it also can be interrupted by bpf nmi prog that does another
595 * unit_free() into the same free_llist_extra.
597 llist_add(llnode, &c->free_llist_extra);
599 local_dec(&c->active);
600 local_irq_restore(flags);
602 if (cnt > c->high_watermark)
603 /* free few objects from current cpu into global kmalloc pool */
607 /* Called from BPF program or from sys_bpf syscall.
608 * In both cases migration is disabled.
610 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
616 return ZERO_SIZE_PTR;
618 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
622 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
623 return !ret ? NULL : ret + LLIST_NODE_SZ;
626 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
633 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
637 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
640 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
644 ret = unit_alloc(this_cpu_ptr(ma->cache));
645 return !ret ? NULL : ret + LLIST_NODE_SZ;
648 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
653 unit_free(this_cpu_ptr(ma->cache), ptr);