Commit | Line | Data |
---|---|---|
7c8199e2 AS |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ | |
3 | #include <linux/mm.h> | |
4 | #include <linux/llist.h> | |
5 | #include <linux/bpf.h> | |
6 | #include <linux/irq_work.h> | |
7 | #include <linux/bpf_mem_alloc.h> | |
8 | #include <linux/memcontrol.h> | |
9 | #include <asm/local.h> | |
10 | ||
11 | /* Any context (including NMI) BPF specific memory allocator. | |
12 | * | |
13 | * Tracing BPF programs can attach to kprobe and fentry. Hence they | |
14 | * run in unknown context where calling plain kmalloc() might not be safe. | |
15 | * | |
16 | * Front-end kmalloc() with per-cpu per-bucket cache of free elements. | |
17 | * Refill this cache asynchronously from irq_work. | |
18 | * | |
19 | * CPU_0 buckets | |
20 | * 16 32 64 96 128 196 256 512 1024 2048 4096 | |
21 | * ... | |
22 | * CPU_N buckets | |
23 | * 16 32 64 96 128 196 256 512 1024 2048 4096 | |
24 | * | |
25 | * The buckets are prefilled at the start. | |
26 | * BPF programs always run with migration disabled. | |
27 | * It's safe to allocate from cache of the current cpu with irqs disabled. | |
28 | * Free-ing is always done into bucket of the current cpu as well. | |
29 | * irq_work trims extra free elements from buckets with kfree | |
30 | * and refills them with kmalloc, so global kmalloc logic takes care | |
31 | * of freeing objects allocated by one cpu and freed on another. | |
32 | * | |
33 | * Every allocated objected is padded with extra 8 bytes that contains | |
34 | * struct llist_node. | |
35 | */ | |
36 | #define LLIST_NODE_SZ sizeof(struct llist_node) | |
37 | ||
38 | /* similar to kmalloc, but sizeof == 8 bucket is gone */ | |
39 | static u8 size_index[24] __ro_after_init = { | |
40 | 3, /* 8 */ | |
41 | 3, /* 16 */ | |
42 | 4, /* 24 */ | |
43 | 4, /* 32 */ | |
44 | 5, /* 40 */ | |
45 | 5, /* 48 */ | |
46 | 5, /* 56 */ | |
47 | 5, /* 64 */ | |
48 | 1, /* 72 */ | |
49 | 1, /* 80 */ | |
50 | 1, /* 88 */ | |
51 | 1, /* 96 */ | |
52 | 6, /* 104 */ | |
53 | 6, /* 112 */ | |
54 | 6, /* 120 */ | |
55 | 6, /* 128 */ | |
56 | 2, /* 136 */ | |
57 | 2, /* 144 */ | |
58 | 2, /* 152 */ | |
59 | 2, /* 160 */ | |
60 | 2, /* 168 */ | |
61 | 2, /* 176 */ | |
62 | 2, /* 184 */ | |
63 | 2 /* 192 */ | |
64 | }; | |
65 | ||
66 | static int bpf_mem_cache_idx(size_t size) | |
67 | { | |
68 | if (!size || size > 4096) | |
69 | return -1; | |
70 | ||
71 | if (size <= 192) | |
72 | return size_index[(size - 1) / 8] - 1; | |
73 | ||
36024d02 | 74 | return fls(size - 1) - 2; |
7c8199e2 AS |
75 | } |
76 | ||
77 | #define NUM_CACHES 11 | |
78 | ||
79 | struct bpf_mem_cache { | |
80 | /* per-cpu list of free objects of size 'unit_size'. | |
81 | * All accesses are done with interrupts disabled and 'active' counter | |
82 | * protection with __llist_add() and __llist_del_first(). | |
83 | */ | |
84 | struct llist_head free_llist; | |
85 | local_t active; | |
86 | ||
87 | /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill | |
88 | * are sequenced by per-cpu 'active' counter. But unit_free() cannot | |
89 | * fail. When 'active' is busy the unit_free() will add an object to | |
90 | * free_llist_extra. | |
91 | */ | |
92 | struct llist_head free_llist_extra; | |
93 | ||
7c8199e2 AS |
94 | struct irq_work refill_work; |
95 | struct obj_cgroup *objcg; | |
96 | int unit_size; | |
97 | /* count of objects in free_llist */ | |
98 | int free_cnt; | |
7c266178 | 99 | int low_watermark, high_watermark, batch; |
bfc03c15 | 100 | int percpu_size; |
d114dde2 | 101 | bool draining; |
822fb26b | 102 | struct bpf_mem_cache *tgt; |
8d5a8011 | 103 | |
5af6807b AS |
104 | /* list of objects to be freed after RCU GP */ |
105 | struct llist_head free_by_rcu; | |
106 | struct llist_node *free_by_rcu_tail; | |
107 | struct llist_head waiting_for_gp; | |
108 | struct llist_node *waiting_for_gp_tail; | |
109 | struct rcu_head rcu; | |
110 | atomic_t call_rcu_in_progress; | |
111 | struct llist_head free_llist_extra_rcu; | |
112 | ||
12c8d0f4 AS |
113 | /* list of objects to be freed after RCU tasks trace GP */ |
114 | struct llist_head free_by_rcu_ttrace; | |
115 | struct llist_head waiting_for_gp_ttrace; | |
116 | struct rcu_head rcu_ttrace; | |
117 | atomic_t call_rcu_ttrace_in_progress; | |
7c8199e2 AS |
118 | }; |
119 | ||
120 | struct bpf_mem_caches { | |
121 | struct bpf_mem_cache cache[NUM_CACHES]; | |
122 | }; | |
123 | ||
124 | static struct llist_node notrace *__llist_del_first(struct llist_head *head) | |
125 | { | |
126 | struct llist_node *entry, *next; | |
127 | ||
128 | entry = head->first; | |
129 | if (!entry) | |
130 | return NULL; | |
131 | next = entry->next; | |
132 | head->first = next; | |
133 | return entry; | |
134 | } | |
135 | ||
e65a5c6e | 136 | static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) |
7c8199e2 | 137 | { |
bfc03c15 AS |
138 | if (c->percpu_size) { |
139 | void **obj = kmalloc_node(c->percpu_size, flags, node); | |
4ab67149 AS |
140 | void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); |
141 | ||
142 | if (!obj || !pptr) { | |
143 | free_percpu(pptr); | |
144 | kfree(obj); | |
145 | return NULL; | |
146 | } | |
147 | obj[1] = pptr; | |
148 | return obj; | |
149 | } | |
150 | ||
997849c4 | 151 | return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); |
7c8199e2 AS |
152 | } |
153 | ||
154 | static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) | |
155 | { | |
156 | #ifdef CONFIG_MEMCG_KMEM | |
157 | if (c->objcg) | |
158 | return get_mem_cgroup_from_objcg(c->objcg); | |
159 | #endif | |
160 | ||
161 | #ifdef CONFIG_MEMCG | |
162 | return root_mem_cgroup; | |
163 | #else | |
164 | return NULL; | |
165 | #endif | |
166 | } | |
167 | ||
18e027b1 | 168 | static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) |
05ae6865 | 169 | { |
05ae6865 AS |
170 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
171 | /* In RT irq_work runs in per-cpu kthread, so disable | |
172 | * interrupts to avoid preemption and interrupts and | |
173 | * reduce the chance of bpf prog executing on this cpu | |
174 | * when active counter is busy. | |
175 | */ | |
18e027b1 | 176 | local_irq_save(*flags); |
05ae6865 AS |
177 | /* alloc_bulk runs from irq_work which will not preempt a bpf |
178 | * program that does unit_alloc/unit_free since IRQs are | |
179 | * disabled there. There is no race to increment 'active' | |
180 | * counter. It protects free_llist from corruption in case NMI | |
181 | * bpf prog preempted this loop. | |
182 | */ | |
183 | WARN_ON_ONCE(local_inc_return(&c->active) != 1); | |
18e027b1 AS |
184 | } |
185 | ||
63e2da3b | 186 | static void dec_active(struct bpf_mem_cache *c, unsigned long *flags) |
18e027b1 | 187 | { |
05ae6865 AS |
188 | local_dec(&c->active); |
189 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) | |
63e2da3b | 190 | local_irq_restore(*flags); |
05ae6865 AS |
191 | } |
192 | ||
18e027b1 AS |
193 | static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) |
194 | { | |
195 | unsigned long flags; | |
196 | ||
197 | inc_active(c, &flags); | |
198 | __llist_add(obj, &c->free_llist); | |
199 | c->free_cnt++; | |
63e2da3b | 200 | dec_active(c, &flags); |
18e027b1 AS |
201 | } |
202 | ||
7c8199e2 | 203 | /* Mostly runs from irq_work except __init phase. */ |
d1a02358 | 204 | static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic) |
7c8199e2 AS |
205 | { |
206 | struct mem_cgroup *memcg = NULL, *old_memcg; | |
d1a02358 | 207 | gfp_t gfp; |
7c8199e2 AS |
208 | void *obj; |
209 | int i; | |
210 | ||
d1a02358 YZ |
211 | gfp = __GFP_NOWARN | __GFP_ACCOUNT; |
212 | gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL; | |
213 | ||
7c8199e2 | 214 | for (i = 0; i < cnt; i++) { |
0893d600 | 215 | /* |
822fb26b AS |
216 | * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is |
217 | * done only by one CPU == current CPU. Other CPUs might | |
218 | * llist_add() and llist_del_all() in parallel. | |
0893d600 | 219 | */ |
822fb26b | 220 | obj = llist_del_first(&c->free_by_rcu_ttrace); |
74680482 AS |
221 | if (!obj) |
222 | break; | |
223 | add_obj_to_free_list(c, obj); | |
224 | } | |
225 | if (i >= cnt) | |
226 | return; | |
227 | ||
04fabf00 AS |
228 | for (; i < cnt; i++) { |
229 | obj = llist_del_first(&c->waiting_for_gp_ttrace); | |
230 | if (!obj) | |
231 | break; | |
232 | add_obj_to_free_list(c, obj); | |
233 | } | |
234 | if (i >= cnt) | |
235 | return; | |
236 | ||
74680482 AS |
237 | memcg = get_memcg(c); |
238 | old_memcg = set_active_memcg(memcg); | |
239 | for (; i < cnt; i++) { | |
240 | /* Allocate, but don't deplete atomic reserves that typical | |
241 | * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc | |
242 | * will allocate from the current numa node which is what we | |
243 | * want here. | |
244 | */ | |
d1a02358 | 245 | obj = __alloc(c, node, gfp); |
74680482 AS |
246 | if (!obj) |
247 | break; | |
05ae6865 | 248 | add_obj_to_free_list(c, obj); |
7c8199e2 AS |
249 | } |
250 | set_active_memcg(old_memcg); | |
251 | mem_cgroup_put(memcg); | |
252 | } | |
253 | ||
aa7881fc | 254 | static void free_one(void *obj, bool percpu) |
7c8199e2 | 255 | { |
aa7881fc | 256 | if (percpu) { |
4ab67149 | 257 | free_percpu(((void **)obj)[1]); |
bfc03c15 | 258 | kfree(obj); |
4ab67149 AS |
259 | return; |
260 | } | |
261 | ||
bfc03c15 | 262 | kfree(obj); |
7c8199e2 AS |
263 | } |
264 | ||
9de3e815 | 265 | static int free_all(struct llist_node *llnode, bool percpu) |
8d5a8011 | 266 | { |
8d5a8011 | 267 | struct llist_node *pos, *t; |
9de3e815 | 268 | int cnt = 0; |
8d5a8011 | 269 | |
9de3e815 | 270 | llist_for_each_safe(pos, t, llnode) { |
aa7881fc | 271 | free_one(pos, percpu); |
9de3e815 AS |
272 | cnt++; |
273 | } | |
274 | return cnt; | |
aa7881fc HT |
275 | } |
276 | ||
277 | static void __free_rcu(struct rcu_head *head) | |
278 | { | |
12c8d0f4 | 279 | struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); |
aa7881fc | 280 | |
12c8d0f4 AS |
281 | free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); |
282 | atomic_set(&c->call_rcu_ttrace_in_progress, 0); | |
8d5a8011 AS |
283 | } |
284 | ||
dccb4a90 AS |
285 | static void __free_rcu_tasks_trace(struct rcu_head *head) |
286 | { | |
59be91e5 HT |
287 | /* If RCU Tasks Trace grace period implies RCU grace period, |
288 | * there is no need to invoke call_rcu(). | |
289 | */ | |
290 | if (rcu_trace_implies_rcu_gp()) | |
291 | __free_rcu(head); | |
292 | else | |
293 | call_rcu(head, __free_rcu); | |
dccb4a90 AS |
294 | } |
295 | ||
8d5a8011 AS |
296 | static void enque_to_free(struct bpf_mem_cache *c, void *obj) |
297 | { | |
298 | struct llist_node *llnode = obj; | |
299 | ||
300 | /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. | |
12c8d0f4 | 301 | * Nothing races to add to free_by_rcu_ttrace list. |
8d5a8011 | 302 | */ |
822fb26b | 303 | llist_add(llnode, &c->free_by_rcu_ttrace); |
8d5a8011 AS |
304 | } |
305 | ||
12c8d0f4 | 306 | static void do_call_rcu_ttrace(struct bpf_mem_cache *c) |
8d5a8011 AS |
307 | { |
308 | struct llist_node *llnode, *t; | |
309 | ||
822fb26b AS |
310 | if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { |
311 | if (unlikely(READ_ONCE(c->draining))) { | |
312 | llnode = llist_del_all(&c->free_by_rcu_ttrace); | |
313 | free_all(llnode, !!c->percpu_size); | |
314 | } | |
8d5a8011 | 315 | return; |
822fb26b | 316 | } |
8d5a8011 | 317 | |
12c8d0f4 | 318 | WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); |
822fb26b | 319 | llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) |
04fabf00 | 320 | llist_add(llnode, &c->waiting_for_gp_ttrace); |
d114dde2 AS |
321 | |
322 | if (unlikely(READ_ONCE(c->draining))) { | |
323 | __free_rcu(&c->rcu_ttrace); | |
324 | return; | |
325 | } | |
326 | ||
dccb4a90 | 327 | /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. |
59be91e5 HT |
328 | * If RCU Tasks Trace grace period implies RCU grace period, free |
329 | * these elements directly, else use call_rcu() to wait for normal | |
330 | * progs to finish and finally do free_one() on each element. | |
dccb4a90 | 331 | */ |
12c8d0f4 | 332 | call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); |
8d5a8011 AS |
333 | } |
334 | ||
7c8199e2 AS |
335 | static void free_bulk(struct bpf_mem_cache *c) |
336 | { | |
822fb26b | 337 | struct bpf_mem_cache *tgt = c->tgt; |
7c8199e2 AS |
338 | struct llist_node *llnode, *t; |
339 | unsigned long flags; | |
340 | int cnt; | |
341 | ||
822fb26b | 342 | WARN_ON_ONCE(tgt->unit_size != c->unit_size); |
c421c125 | 343 | WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); |
822fb26b | 344 | |
7c8199e2 | 345 | do { |
18e027b1 | 346 | inc_active(c, &flags); |
7c8199e2 AS |
347 | llnode = __llist_del_first(&c->free_llist); |
348 | if (llnode) | |
349 | cnt = --c->free_cnt; | |
350 | else | |
351 | cnt = 0; | |
63e2da3b | 352 | dec_active(c, &flags); |
c31b38cb | 353 | if (llnode) |
822fb26b | 354 | enque_to_free(tgt, llnode); |
7c266178 | 355 | } while (cnt > (c->high_watermark + c->low_watermark) / 2); |
7c8199e2 AS |
356 | |
357 | /* and drain free_llist_extra */ | |
358 | llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) | |
822fb26b AS |
359 | enque_to_free(tgt, llnode); |
360 | do_call_rcu_ttrace(tgt); | |
7c8199e2 AS |
361 | } |
362 | ||
5af6807b AS |
363 | static void __free_by_rcu(struct rcu_head *head) |
364 | { | |
365 | struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); | |
366 | struct bpf_mem_cache *tgt = c->tgt; | |
367 | struct llist_node *llnode; | |
368 | ||
c421c125 HT |
369 | WARN_ON_ONCE(tgt->unit_size != c->unit_size); |
370 | WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); | |
371 | ||
5af6807b AS |
372 | llnode = llist_del_all(&c->waiting_for_gp); |
373 | if (!llnode) | |
374 | goto out; | |
375 | ||
376 | llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace); | |
377 | ||
378 | /* Objects went through regular RCU GP. Send them to RCU tasks trace */ | |
379 | do_call_rcu_ttrace(tgt); | |
380 | out: | |
381 | atomic_set(&c->call_rcu_in_progress, 0); | |
382 | } | |
383 | ||
384 | static void check_free_by_rcu(struct bpf_mem_cache *c) | |
385 | { | |
386 | struct llist_node *llnode, *t; | |
387 | unsigned long flags; | |
388 | ||
389 | /* drain free_llist_extra_rcu */ | |
390 | if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) { | |
391 | inc_active(c, &flags); | |
392 | llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu)) | |
393 | if (__llist_add(llnode, &c->free_by_rcu)) | |
394 | c->free_by_rcu_tail = llnode; | |
63e2da3b | 395 | dec_active(c, &flags); |
5af6807b AS |
396 | } |
397 | ||
398 | if (llist_empty(&c->free_by_rcu)) | |
399 | return; | |
400 | ||
401 | if (atomic_xchg(&c->call_rcu_in_progress, 1)) { | |
402 | /* | |
403 | * Instead of kmalloc-ing new rcu_head and triggering 10k | |
404 | * call_rcu() to hit rcutree.qhimark and force RCU to notice | |
405 | * the overload just ask RCU to hurry up. There could be many | |
406 | * objects in free_by_rcu list. | |
407 | * This hint reduces memory consumption for an artificial | |
408 | * benchmark from 2 Gbyte to 150 Mbyte. | |
409 | */ | |
410 | rcu_request_urgent_qs_task(current); | |
411 | return; | |
412 | } | |
413 | ||
414 | WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); | |
415 | ||
416 | inc_active(c, &flags); | |
417 | WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu)); | |
418 | c->waiting_for_gp_tail = c->free_by_rcu_tail; | |
63e2da3b | 419 | dec_active(c, &flags); |
5af6807b AS |
420 | |
421 | if (unlikely(READ_ONCE(c->draining))) { | |
422 | free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); | |
423 | atomic_set(&c->call_rcu_in_progress, 0); | |
424 | } else { | |
425 | call_rcu_hurry(&c->rcu, __free_by_rcu); | |
426 | } | |
427 | } | |
428 | ||
7c8199e2 AS |
429 | static void bpf_mem_refill(struct irq_work *work) |
430 | { | |
431 | struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); | |
432 | int cnt; | |
433 | ||
434 | /* Racy access to free_cnt. It doesn't need to be 100% accurate */ | |
435 | cnt = c->free_cnt; | |
7c266178 | 436 | if (cnt < c->low_watermark) |
7c8199e2 AS |
437 | /* irq_work runs on this cpu and kmalloc will allocate |
438 | * from the current numa node which is what we want here. | |
439 | */ | |
d1a02358 | 440 | alloc_bulk(c, c->batch, NUMA_NO_NODE, true); |
7c266178 | 441 | else if (cnt > c->high_watermark) |
7c8199e2 | 442 | free_bulk(c); |
5af6807b AS |
443 | |
444 | check_free_by_rcu(c); | |
7c8199e2 AS |
445 | } |
446 | ||
447 | static void notrace irq_work_raise(struct bpf_mem_cache *c) | |
448 | { | |
449 | irq_work_queue(&c->refill_work); | |
450 | } | |
451 | ||
7c266178 AS |
452 | /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket |
453 | * the freelist cache will be elem_size * 64 (or less) on each cpu. | |
454 | * | |
455 | * For bpf programs that don't have statically known allocation sizes and | |
456 | * assuming (low_mark + high_mark) / 2 as an average number of elements per | |
457 | * bucket and all buckets are used the total amount of memory in freelists | |
458 | * on each cpu will be: | |
459 | * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 | |
460 | * == ~ 116 Kbyte using below heuristic. | |
461 | * Initialized, but unused bpf allocator (not bpf map specific one) will | |
462 | * consume ~ 11 Kbyte per cpu. | |
463 | * Typical case will be between 11K and 116K closer to 11K. | |
464 | * bpf progs can and should share bpf_mem_cache when possible. | |
465 | */ | |
b1d53958 | 466 | static void init_refill_work(struct bpf_mem_cache *c) |
7c8199e2 AS |
467 | { |
468 | init_irq_work(&c->refill_work, bpf_mem_refill); | |
7c266178 AS |
469 | if (c->unit_size <= 256) { |
470 | c->low_watermark = 32; | |
471 | c->high_watermark = 96; | |
472 | } else { | |
473 | /* When page_size == 4k, order-0 cache will have low_mark == 2 | |
474 | * and high_mark == 6 with batch alloc of 3 individual pages at | |
475 | * a time. | |
476 | * 8k allocs and above low == 1, high == 3, batch == 1. | |
477 | */ | |
478 | c->low_watermark = max(32 * 256 / c->unit_size, 1); | |
479 | c->high_watermark = max(96 * 256 / c->unit_size, 3); | |
480 | } | |
481 | c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); | |
b1d53958 | 482 | } |
7c266178 | 483 | |
b1d53958 HT |
484 | static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) |
485 | { | |
7c8199e2 AS |
486 | /* To avoid consuming memory assume that 1st run of bpf |
487 | * prog won't be doing more than 4 map_update_elem from | |
488 | * irq disabled region | |
489 | */ | |
d1a02358 | 490 | alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false); |
7c8199e2 AS |
491 | } |
492 | ||
c9304725 HT |
493 | static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) |
494 | { | |
495 | struct llist_node *first; | |
496 | unsigned int obj_size; | |
497 | ||
498 | first = c->free_llist.first; | |
499 | if (!first) | |
500 | return 0; | |
501 | ||
baa8fdec HT |
502 | if (c->percpu_size) |
503 | obj_size = pcpu_alloc_size(((void **)first)[1]); | |
504 | else | |
505 | obj_size = ksize(first); | |
c9304725 | 506 | if (obj_size != c->unit_size) { |
baa8fdec HT |
507 | WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n", |
508 | idx, c->percpu_size, obj_size, c->unit_size); | |
c9304725 HT |
509 | return -EINVAL; |
510 | } | |
511 | return 0; | |
512 | } | |
513 | ||
bfc03c15 | 514 | /* When size != 0 bpf_mem_cache for each cpu. |
7c8199e2 AS |
515 | * This is typical bpf hash map use case when all elements have equal size. |
516 | * | |
517 | * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on | |
518 | * kmalloc/kfree. Max allocation size is 4096 in this case. | |
519 | * This is bpf_dynptr and bpf_kptr use case. | |
520 | */ | |
4ab67149 | 521 | int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) |
7c8199e2 AS |
522 | { |
523 | static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; | |
c9304725 | 524 | int cpu, i, err, unit_size, percpu_size = 0; |
7c8199e2 AS |
525 | struct bpf_mem_caches *cc, __percpu *pcc; |
526 | struct bpf_mem_cache *c, __percpu *pc; | |
7c8199e2 | 527 | struct obj_cgroup *objcg = NULL; |
7c8199e2 | 528 | |
41a5db8d YS |
529 | /* room for llist_node and per-cpu pointer */ |
530 | if (percpu) | |
531 | percpu_size = LLIST_NODE_SZ + sizeof(void *); | |
3f2189e4 | 532 | ma->percpu = percpu; |
41a5db8d | 533 | |
7c8199e2 AS |
534 | if (size) { |
535 | pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); | |
536 | if (!pc) | |
537 | return -ENOMEM; | |
4ab67149 | 538 | |
41a5db8d | 539 | if (!percpu) |
4ab67149 | 540 | size += LLIST_NODE_SZ; /* room for llist_node */ |
bfc03c15 | 541 | unit_size = size; |
4ab67149 | 542 | |
7c8199e2 | 543 | #ifdef CONFIG_MEMCG_KMEM |
ee53cbfb YS |
544 | if (memcg_bpf_enabled()) |
545 | objcg = get_obj_cgroup_from_current(); | |
7c8199e2 AS |
546 | #endif |
547 | for_each_possible_cpu(cpu) { | |
548 | c = per_cpu_ptr(pc, cpu); | |
4ab67149 | 549 | c->unit_size = unit_size; |
7c8199e2 | 550 | c->objcg = objcg; |
bfc03c15 | 551 | c->percpu_size = percpu_size; |
822fb26b | 552 | c->tgt = c; |
b1d53958 | 553 | init_refill_work(c); |
7c8199e2 AS |
554 | prefill_mem_cache(c, cpu); |
555 | } | |
556 | ma->cache = pc; | |
557 | return 0; | |
558 | } | |
559 | ||
560 | pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); | |
561 | if (!pcc) | |
562 | return -ENOMEM; | |
c9304725 | 563 | err = 0; |
7c8199e2 AS |
564 | #ifdef CONFIG_MEMCG_KMEM |
565 | objcg = get_obj_cgroup_from_current(); | |
566 | #endif | |
567 | for_each_possible_cpu(cpu) { | |
568 | cc = per_cpu_ptr(pcc, cpu); | |
569 | for (i = 0; i < NUM_CACHES; i++) { | |
570 | c = &cc->cache[i]; | |
571 | c->unit_size = sizes[i]; | |
572 | c->objcg = objcg; | |
41a5db8d | 573 | c->percpu_size = percpu_size; |
822fb26b | 574 | c->tgt = c; |
b1d53958 HT |
575 | |
576 | init_refill_work(c); | |
577 | /* Another bpf_mem_cache will be used when allocating | |
578 | * c->unit_size in bpf_mem_alloc(), so doesn't prefill | |
579 | * for the bpf_mem_cache because these free objects will | |
580 | * never be used. | |
581 | */ | |
582 | if (i != bpf_mem_cache_idx(c->unit_size)) | |
583 | continue; | |
7c8199e2 | 584 | prefill_mem_cache(c, cpu); |
c9304725 HT |
585 | err = check_obj_size(c, i); |
586 | if (err) | |
587 | goto out; | |
7c8199e2 AS |
588 | } |
589 | } | |
c9304725 HT |
590 | |
591 | out: | |
7c8199e2 | 592 | ma->caches = pcc; |
c9304725 HT |
593 | /* refill_work is either zeroed or initialized, so it is safe to |
594 | * call irq_work_sync(). | |
595 | */ | |
596 | if (err) | |
597 | bpf_mem_alloc_destroy(ma); | |
598 | return err; | |
7c8199e2 AS |
599 | } |
600 | ||
601 | static void drain_mem_cache(struct bpf_mem_cache *c) | |
602 | { | |
aa7881fc | 603 | bool percpu = !!c->percpu_size; |
7c8199e2 | 604 | |
9f2c6e96 AS |
605 | /* No progs are using this bpf_mem_cache, but htab_map_free() called |
606 | * bpf_mem_cache_free() for all remaining elements and they can be in | |
12c8d0f4 | 607 | * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now. |
fa4447cb | 608 | * |
12c8d0f4 | 609 | * Except for waiting_for_gp_ttrace list, there are no concurrent operations |
fa4447cb | 610 | * on these lists, so it is safe to use __llist_del_all(). |
8d5a8011 | 611 | */ |
822fb26b | 612 | free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); |
12c8d0f4 | 613 | free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); |
aa7881fc HT |
614 | free_all(__llist_del_all(&c->free_llist), percpu); |
615 | free_all(__llist_del_all(&c->free_llist_extra), percpu); | |
5af6807b AS |
616 | free_all(__llist_del_all(&c->free_by_rcu), percpu); |
617 | free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); | |
618 | free_all(llist_del_all(&c->waiting_for_gp), percpu); | |
7c8199e2 AS |
619 | } |
620 | ||
4ed8b5bc HT |
621 | static void check_mem_cache(struct bpf_mem_cache *c) |
622 | { | |
623 | WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace)); | |
624 | WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); | |
625 | WARN_ON_ONCE(!llist_empty(&c->free_llist)); | |
626 | WARN_ON_ONCE(!llist_empty(&c->free_llist_extra)); | |
627 | WARN_ON_ONCE(!llist_empty(&c->free_by_rcu)); | |
628 | WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu)); | |
629 | WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); | |
630 | } | |
631 | ||
632 | static void check_leaked_objs(struct bpf_mem_alloc *ma) | |
633 | { | |
634 | struct bpf_mem_caches *cc; | |
635 | struct bpf_mem_cache *c; | |
636 | int cpu, i; | |
637 | ||
638 | if (ma->cache) { | |
639 | for_each_possible_cpu(cpu) { | |
640 | c = per_cpu_ptr(ma->cache, cpu); | |
641 | check_mem_cache(c); | |
642 | } | |
643 | } | |
644 | if (ma->caches) { | |
645 | for_each_possible_cpu(cpu) { | |
646 | cc = per_cpu_ptr(ma->caches, cpu); | |
647 | for (i = 0; i < NUM_CACHES; i++) { | |
648 | c = &cc->cache[i]; | |
649 | check_mem_cache(c); | |
650 | } | |
651 | } | |
652 | } | |
653 | } | |
654 | ||
9f2c6e96 AS |
655 | static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) |
656 | { | |
4ed8b5bc | 657 | check_leaked_objs(ma); |
9f2c6e96 AS |
658 | free_percpu(ma->cache); |
659 | free_percpu(ma->caches); | |
660 | ma->cache = NULL; | |
661 | ma->caches = NULL; | |
662 | } | |
663 | ||
664 | static void free_mem_alloc(struct bpf_mem_alloc *ma) | |
665 | { | |
5af6807b AS |
666 | /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks |
667 | * might still execute. Wait for them. | |
822ed78f HT |
668 | * |
669 | * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), | |
670 | * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used | |
671 | * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(), | |
672 | * so if call_rcu(head, __free_rcu) is skipped due to | |
673 | * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by | |
674 | * using rcu_trace_implies_rcu_gp() as well. | |
9f2c6e96 | 675 | */ |
5af6807b AS |
676 | rcu_barrier(); /* wait for __free_by_rcu */ |
677 | rcu_barrier_tasks_trace(); /* wait for __free_rcu */ | |
822ed78f HT |
678 | if (!rcu_trace_implies_rcu_gp()) |
679 | rcu_barrier(); | |
9f2c6e96 AS |
680 | free_mem_alloc_no_barrier(ma); |
681 | } | |
682 | ||
683 | static void free_mem_alloc_deferred(struct work_struct *work) | |
684 | { | |
685 | struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); | |
686 | ||
687 | free_mem_alloc(ma); | |
688 | kfree(ma); | |
689 | } | |
690 | ||
691 | static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) | |
692 | { | |
693 | struct bpf_mem_alloc *copy; | |
694 | ||
695 | if (!rcu_in_progress) { | |
696 | /* Fast path. No callbacks are pending, hence no need to do | |
697 | * rcu_barrier-s. | |
698 | */ | |
699 | free_mem_alloc_no_barrier(ma); | |
700 | return; | |
701 | } | |
702 | ||
a80672d7 | 703 | copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL); |
9f2c6e96 AS |
704 | if (!copy) { |
705 | /* Slow path with inline barrier-s */ | |
706 | free_mem_alloc(ma); | |
707 | return; | |
708 | } | |
709 | ||
710 | /* Defer barriers into worker to let the rest of map memory to be freed */ | |
a80672d7 | 711 | memset(ma, 0, sizeof(*ma)); |
9f2c6e96 AS |
712 | INIT_WORK(©->work, free_mem_alloc_deferred); |
713 | queue_work(system_unbound_wq, ©->work); | |
714 | } | |
715 | ||
7c8199e2 AS |
716 | void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) |
717 | { | |
718 | struct bpf_mem_caches *cc; | |
719 | struct bpf_mem_cache *c; | |
9f2c6e96 | 720 | int cpu, i, rcu_in_progress; |
7c8199e2 AS |
721 | |
722 | if (ma->cache) { | |
9f2c6e96 | 723 | rcu_in_progress = 0; |
7c8199e2 AS |
724 | for_each_possible_cpu(cpu) { |
725 | c = per_cpu_ptr(ma->cache, cpu); | |
d114dde2 | 726 | WRITE_ONCE(c->draining, true); |
3d058187 | 727 | irq_work_sync(&c->refill_work); |
7c8199e2 | 728 | drain_mem_cache(c); |
12c8d0f4 | 729 | rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); |
5af6807b | 730 | rcu_in_progress += atomic_read(&c->call_rcu_in_progress); |
7c8199e2 | 731 | } |
bfc03c15 | 732 | /* objcg is the same across cpus */ |
7c8199e2 AS |
733 | if (c->objcg) |
734 | obj_cgroup_put(c->objcg); | |
9f2c6e96 | 735 | destroy_mem_alloc(ma, rcu_in_progress); |
7c8199e2 AS |
736 | } |
737 | if (ma->caches) { | |
9f2c6e96 | 738 | rcu_in_progress = 0; |
7c8199e2 AS |
739 | for_each_possible_cpu(cpu) { |
740 | cc = per_cpu_ptr(ma->caches, cpu); | |
741 | for (i = 0; i < NUM_CACHES; i++) { | |
742 | c = &cc->cache[i]; | |
d114dde2 | 743 | WRITE_ONCE(c->draining, true); |
3d058187 | 744 | irq_work_sync(&c->refill_work); |
7c8199e2 | 745 | drain_mem_cache(c); |
12c8d0f4 | 746 | rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); |
5af6807b | 747 | rcu_in_progress += atomic_read(&c->call_rcu_in_progress); |
7c8199e2 AS |
748 | } |
749 | } | |
750 | if (c->objcg) | |
751 | obj_cgroup_put(c->objcg); | |
9f2c6e96 | 752 | destroy_mem_alloc(ma, rcu_in_progress); |
7c8199e2 AS |
753 | } |
754 | } | |
755 | ||
756 | /* notrace is necessary here and in other functions to make sure | |
757 | * bpf programs cannot attach to them and cause llist corruptions. | |
758 | */ | |
759 | static void notrace *unit_alloc(struct bpf_mem_cache *c) | |
760 | { | |
761 | struct llist_node *llnode = NULL; | |
762 | unsigned long flags; | |
763 | int cnt = 0; | |
764 | ||
765 | /* Disable irqs to prevent the following race for majority of prog types: | |
766 | * prog_A | |
767 | * bpf_mem_alloc | |
768 | * preemption or irq -> prog_B | |
769 | * bpf_mem_alloc | |
770 | * | |
771 | * but prog_B could be a perf_event NMI prog. | |
772 | * Use per-cpu 'active' counter to order free_list access between | |
773 | * unit_alloc/unit_free/bpf_mem_refill. | |
774 | */ | |
775 | local_irq_save(flags); | |
776 | if (local_inc_return(&c->active) == 1) { | |
777 | llnode = __llist_del_first(&c->free_llist); | |
822fb26b | 778 | if (llnode) { |
7c8199e2 | 779 | cnt = --c->free_cnt; |
822fb26b AS |
780 | *(struct bpf_mem_cache **)llnode = c; |
781 | } | |
7c8199e2 AS |
782 | } |
783 | local_dec(&c->active); | |
7c8199e2 AS |
784 | |
785 | WARN_ON(cnt < 0); | |
786 | ||
7c266178 | 787 | if (cnt < c->low_watermark) |
7c8199e2 | 788 | irq_work_raise(c); |
566f6de3 HT |
789 | /* Enable IRQ after the enqueue of irq work completes, so irq work |
790 | * will run after IRQ is enabled and free_llist may be refilled by | |
791 | * irq work before other task preempts current task. | |
792 | */ | |
793 | local_irq_restore(flags); | |
794 | ||
7c8199e2 AS |
795 | return llnode; |
796 | } | |
797 | ||
798 | /* Though 'ptr' object could have been allocated on a different cpu | |
799 | * add it to the free_llist of the current cpu. | |
800 | * Let kfree() logic deal with it when it's later called from irq_work. | |
801 | */ | |
802 | static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) | |
803 | { | |
804 | struct llist_node *llnode = ptr - LLIST_NODE_SZ; | |
805 | unsigned long flags; | |
806 | int cnt = 0; | |
807 | ||
808 | BUILD_BUG_ON(LLIST_NODE_SZ > 8); | |
809 | ||
822fb26b AS |
810 | /* |
811 | * Remember bpf_mem_cache that allocated this object. | |
812 | * The hint is not accurate. | |
813 | */ | |
814 | c->tgt = *(struct bpf_mem_cache **)llnode; | |
815 | ||
7c8199e2 AS |
816 | local_irq_save(flags); |
817 | if (local_inc_return(&c->active) == 1) { | |
818 | __llist_add(llnode, &c->free_llist); | |
819 | cnt = ++c->free_cnt; | |
820 | } else { | |
821 | /* unit_free() cannot fail. Therefore add an object to atomic | |
822 | * llist. free_bulk() will drain it. Though free_llist_extra is | |
823 | * a per-cpu list we have to use atomic llist_add here, since | |
824 | * it also can be interrupted by bpf nmi prog that does another | |
825 | * unit_free() into the same free_llist_extra. | |
826 | */ | |
827 | llist_add(llnode, &c->free_llist_extra); | |
828 | } | |
829 | local_dec(&c->active); | |
7c8199e2 | 830 | |
7c266178 | 831 | if (cnt > c->high_watermark) |
7c8199e2 AS |
832 | /* free few objects from current cpu into global kmalloc pool */ |
833 | irq_work_raise(c); | |
62cf51cb HT |
834 | /* Enable IRQ after irq_work_raise() completes, otherwise when current |
835 | * task is preempted by task which does unit_alloc(), unit_alloc() may | |
836 | * return NULL unexpectedly because irq work is already pending but can | |
837 | * not been triggered and free_llist can not be refilled timely. | |
838 | */ | |
839 | local_irq_restore(flags); | |
7c8199e2 AS |
840 | } |
841 | ||
5af6807b AS |
842 | static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) |
843 | { | |
844 | struct llist_node *llnode = ptr - LLIST_NODE_SZ; | |
845 | unsigned long flags; | |
846 | ||
847 | c->tgt = *(struct bpf_mem_cache **)llnode; | |
848 | ||
849 | local_irq_save(flags); | |
850 | if (local_inc_return(&c->active) == 1) { | |
851 | if (__llist_add(llnode, &c->free_by_rcu)) | |
852 | c->free_by_rcu_tail = llnode; | |
853 | } else { | |
854 | llist_add(llnode, &c->free_llist_extra_rcu); | |
855 | } | |
856 | local_dec(&c->active); | |
5af6807b AS |
857 | |
858 | if (!atomic_read(&c->call_rcu_in_progress)) | |
859 | irq_work_raise(c); | |
62cf51cb | 860 | local_irq_restore(flags); |
5af6807b AS |
861 | } |
862 | ||
7c8199e2 AS |
863 | /* Called from BPF program or from sys_bpf syscall. |
864 | * In both cases migration is disabled. | |
865 | */ | |
866 | void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) | |
867 | { | |
868 | int idx; | |
869 | void *ret; | |
870 | ||
871 | if (!size) | |
872 | return ZERO_SIZE_PTR; | |
873 | ||
874 | idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); | |
875 | if (idx < 0) | |
876 | return NULL; | |
877 | ||
878 | ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); | |
879 | return !ret ? NULL : ret + LLIST_NODE_SZ; | |
880 | } | |
881 | ||
3f2189e4 HT |
882 | static notrace int bpf_mem_free_idx(void *ptr, bool percpu) |
883 | { | |
884 | size_t size; | |
885 | ||
886 | if (percpu) | |
887 | size = pcpu_alloc_size(*((void **)ptr)); | |
888 | else | |
889 | size = ksize(ptr - LLIST_NODE_SZ); | |
890 | return bpf_mem_cache_idx(size); | |
891 | } | |
892 | ||
7c8199e2 AS |
893 | void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) |
894 | { | |
895 | int idx; | |
896 | ||
897 | if (!ptr) | |
898 | return; | |
899 | ||
3f2189e4 | 900 | idx = bpf_mem_free_idx(ptr, ma->percpu); |
7c8199e2 AS |
901 | if (idx < 0) |
902 | return; | |
903 | ||
904 | unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); | |
905 | } | |
906 | ||
5af6807b AS |
907 | void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr) |
908 | { | |
909 | int idx; | |
910 | ||
911 | if (!ptr) | |
912 | return; | |
913 | ||
3f2189e4 | 914 | idx = bpf_mem_free_idx(ptr, ma->percpu); |
5af6807b AS |
915 | if (idx < 0) |
916 | return; | |
917 | ||
918 | unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr); | |
919 | } | |
920 | ||
7c8199e2 AS |
921 | void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) |
922 | { | |
923 | void *ret; | |
924 | ||
925 | ret = unit_alloc(this_cpu_ptr(ma->cache)); | |
926 | return !ret ? NULL : ret + LLIST_NODE_SZ; | |
927 | } | |
928 | ||
929 | void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) | |
930 | { | |
931 | if (!ptr) | |
932 | return; | |
933 | ||
934 | unit_free(this_cpu_ptr(ma->cache), ptr); | |
935 | } | |
e65a5c6e | 936 | |
5af6807b AS |
937 | void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr) |
938 | { | |
939 | if (!ptr) | |
940 | return; | |
941 | ||
942 | unit_free_rcu(this_cpu_ptr(ma->cache), ptr); | |
943 | } | |
944 | ||
e65a5c6e MKL |
945 | /* Directly does a kfree() without putting 'ptr' back to the free_llist |
946 | * for reuse and without waiting for a rcu_tasks_trace gp. | |
947 | * The caller must first go through the rcu_tasks_trace gp for 'ptr' | |
948 | * before calling bpf_mem_cache_raw_free(). | |
949 | * It could be used when the rcu_tasks_trace callback does not have | |
950 | * a hold on the original bpf_mem_alloc object that allocated the | |
951 | * 'ptr'. This should only be used in the uncommon code path. | |
952 | * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled | |
953 | * and may affect performance. | |
954 | */ | |
955 | void bpf_mem_cache_raw_free(void *ptr) | |
956 | { | |
957 | if (!ptr) | |
958 | return; | |
959 | ||
960 | kfree(ptr - LLIST_NODE_SZ); | |
961 | } | |
962 | ||
963 | /* When flags == GFP_KERNEL, it signals that the caller will not cause | |
964 | * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use | |
965 | * kmalloc if the free_llist is empty. | |
966 | */ | |
967 | void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) | |
968 | { | |
969 | struct bpf_mem_cache *c; | |
970 | void *ret; | |
971 | ||
972 | c = this_cpu_ptr(ma->cache); | |
973 | ||
974 | ret = unit_alloc(c); | |
975 | if (!ret && flags == GFP_KERNEL) { | |
976 | struct mem_cgroup *memcg, *old_memcg; | |
977 | ||
978 | memcg = get_memcg(c); | |
979 | old_memcg = set_active_memcg(memcg); | |
980 | ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); | |
981 | set_active_memcg(old_memcg); | |
982 | mem_cgroup_put(memcg); | |
983 | } | |
984 | ||
985 | return !ret ? NULL : ret + LLIST_NODE_SZ; | |
986 | } | |
d52b5931 | 987 | |
baa8fdec HT |
988 | /* The alignment of dynamic per-cpu area is 8, so c->unit_size and the |
989 | * actual size of dynamic per-cpu area will always be matched and there is | |
990 | * no need to adjust size_index for per-cpu allocation. However for the | |
991 | * simplicity of the implementation, use an unified size_index for both | |
992 | * kmalloc and per-cpu allocation. | |
993 | */ | |
d52b5931 HT |
994 | static __init int bpf_mem_cache_adjust_size(void) |
995 | { | |
9077fc22 | 996 | unsigned int size; |
d52b5931 | 997 | |
9077fc22 HT |
998 | /* Adjusting the indexes in size_index() according to the object_size |
999 | * of underlying slab cache, so bpf_mem_alloc() will select a | |
1000 | * bpf_mem_cache with unit_size equal to the object_size of | |
1001 | * the underlying slab cache. | |
1002 | * | |
1003 | * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is | |
1004 | * 256-bytes, so only do adjustment for [8-bytes, 192-bytes]. | |
d52b5931 | 1005 | */ |
9077fc22 HT |
1006 | for (size = 192; size >= 8; size -= 8) { |
1007 | unsigned int kmalloc_size, index; | |
d52b5931 | 1008 | |
9077fc22 HT |
1009 | kmalloc_size = kmalloc_size_roundup(size); |
1010 | if (kmalloc_size == size) | |
1011 | continue; | |
d52b5931 | 1012 | |
9077fc22 HT |
1013 | if (kmalloc_size <= 192) |
1014 | index = size_index[(kmalloc_size - 1) / 8]; | |
1015 | else | |
1016 | index = fls(kmalloc_size - 1) - 1; | |
1017 | /* Only overwrite if necessary */ | |
1018 | if (size_index[(size - 1) / 8] != index) | |
d52b5931 HT |
1019 | size_index[(size - 1) / 8] = index; |
1020 | } | |
1021 | ||
1022 | return 0; | |
1023 | } | |
1024 | subsys_initcall(bpf_mem_cache_adjust_size); |