bpf: Use bpf_mem_cache_alloc/free for bpf_local_storage
[linux-block.git] / kernel / bpf / bpf_local_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22               struct bpf_local_storage_elem *selem)
23 {
24         return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29         struct bpf_map *map = &smap->map;
30
31         if (!map->ops->map_local_storage_charge)
32                 return 0;
33
34         return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38                          u32 size)
39 {
40         struct bpf_map *map = &smap->map;
41
42         if (map->ops->map_local_storage_uncharge)
43                 map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49         struct bpf_map *map = &smap->map;
50
51         return map->ops->map_owner_storage_ptr(owner);
52 }
53
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56         return !hlist_unhashed_lockless(&selem->snode);
57 }
58
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61         return !hlist_unhashed(&selem->snode);
62 }
63
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66         return !hlist_unhashed_lockless(&selem->map_node);
67 }
68
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71         return !hlist_unhashed(&selem->map_node);
72 }
73
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76                 void *value, bool charge_mem, gfp_t gfp_flags)
77 {
78         struct bpf_local_storage_elem *selem;
79
80         if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81                 return NULL;
82
83         if (smap->bpf_ma) {
84                 migrate_disable();
85                 selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
86                 migrate_enable();
87                 if (selem)
88                         /* Keep the original bpf_map_kzalloc behavior
89                          * before started using the bpf_mem_cache_alloc.
90                          *
91                          * No need to use zero_map_value. The bpf_selem_free()
92                          * only does bpf_mem_cache_free when there is
93                          * no other bpf prog is using the selem.
94                          */
95                         memset(SDATA(selem)->data, 0, smap->map.value_size);
96         } else {
97                 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
98                                         gfp_flags | __GFP_NOWARN);
99         }
100
101         if (selem) {
102                 if (value)
103                         copy_map_value(&smap->map, SDATA(selem)->data, value);
104                 /* No need to call check_and_init_map_value as memory is zero init */
105                 return selem;
106         }
107
108         if (charge_mem)
109                 mem_uncharge(smap, owner, smap->elem_size);
110
111         return NULL;
112 }
113
114 /* rcu tasks trace callback for bpf_ma == false */
115 static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
116 {
117         struct bpf_local_storage *local_storage;
118
119         /* If RCU Tasks Trace grace period implies RCU grace period, do
120          * kfree(), else do kfree_rcu().
121          */
122         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
123         if (rcu_trace_implies_rcu_gp())
124                 kfree(local_storage);
125         else
126                 kfree_rcu(local_storage, rcu);
127 }
128
129 static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
130 {
131         struct bpf_local_storage *local_storage;
132
133         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
134         bpf_mem_cache_raw_free(local_storage);
135 }
136
137 static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
138 {
139         if (rcu_trace_implies_rcu_gp())
140                 bpf_local_storage_free_rcu(rcu);
141         else
142                 call_rcu(rcu, bpf_local_storage_free_rcu);
143 }
144
145 /* Handle bpf_ma == false */
146 static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
147                                      bool vanilla_rcu)
148 {
149         if (vanilla_rcu)
150                 kfree_rcu(local_storage, rcu);
151         else
152                 call_rcu_tasks_trace(&local_storage->rcu,
153                                      __bpf_local_storage_free_trace_rcu);
154 }
155
156 static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
157                                    struct bpf_local_storage_map *smap,
158                                    bool bpf_ma, bool reuse_now)
159 {
160         if (!bpf_ma) {
161                 __bpf_local_storage_free(local_storage, reuse_now);
162                 return;
163         }
164
165         if (!reuse_now) {
166                 call_rcu_tasks_trace(&local_storage->rcu,
167                                      bpf_local_storage_free_trace_rcu);
168                 return;
169         }
170
171         if (smap) {
172                 migrate_disable();
173                 bpf_mem_cache_free(&smap->storage_ma, local_storage);
174                 migrate_enable();
175         } else {
176                 /* smap could be NULL if the selem that triggered
177                  * this 'local_storage' creation had been long gone.
178                  * In this case, directly do call_rcu().
179                  */
180                 call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
181         }
182 }
183
184 /* rcu tasks trace callback for bpf_ma == false */
185 static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
186 {
187         struct bpf_local_storage_elem *selem;
188
189         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
190         if (rcu_trace_implies_rcu_gp())
191                 kfree(selem);
192         else
193                 kfree_rcu(selem, rcu);
194 }
195
196 /* Handle bpf_ma == false */
197 static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
198                              bool vanilla_rcu)
199 {
200         if (vanilla_rcu)
201                 kfree_rcu(selem, rcu);
202         else
203                 call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
204 }
205
206 static void bpf_selem_free_rcu(struct rcu_head *rcu)
207 {
208         struct bpf_local_storage_elem *selem;
209
210         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
211         bpf_mem_cache_raw_free(selem);
212 }
213
214 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
215 {
216         if (rcu_trace_implies_rcu_gp())
217                 bpf_selem_free_rcu(rcu);
218         else
219                 call_rcu(rcu, bpf_selem_free_rcu);
220 }
221
222 void bpf_selem_free(struct bpf_local_storage_elem *selem,
223                     struct bpf_local_storage_map *smap,
224                     bool reuse_now)
225 {
226         bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
227
228         if (!smap->bpf_ma) {
229                 __bpf_selem_free(selem, reuse_now);
230                 return;
231         }
232
233         if (!reuse_now) {
234                 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
235         } else {
236                 /* Instead of using the vanilla call_rcu(),
237                  * bpf_mem_cache_free will be able to reuse selem
238                  * immediately.
239                  */
240                 migrate_disable();
241                 bpf_mem_cache_free(&smap->selem_ma, selem);
242                 migrate_enable();
243         }
244 }
245
246 /* local_storage->lock must be held and selem->local_storage == local_storage.
247  * The caller must ensure selem->smap is still valid to be
248  * dereferenced for its smap->elem_size and smap->cache_idx.
249  */
250 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
251                                             struct bpf_local_storage_elem *selem,
252                                             bool uncharge_mem, bool reuse_now)
253 {
254         struct bpf_local_storage_map *smap;
255         bool free_local_storage;
256         void *owner;
257
258         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
259         owner = local_storage->owner;
260
261         /* All uncharging on the owner must be done first.
262          * The owner may be freed once the last selem is unlinked
263          * from local_storage.
264          */
265         if (uncharge_mem)
266                 mem_uncharge(smap, owner, smap->elem_size);
267
268         free_local_storage = hlist_is_singular_node(&selem->snode,
269                                                     &local_storage->list);
270         if (free_local_storage) {
271                 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
272                 local_storage->owner = NULL;
273
274                 /* After this RCU_INIT, owner may be freed and cannot be used */
275                 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
276
277                 /* local_storage is not freed now.  local_storage->lock is
278                  * still held and raw_spin_unlock_bh(&local_storage->lock)
279                  * will be done by the caller.
280                  *
281                  * Although the unlock will be done under
282                  * rcu_read_lock(),  it is more intuitive to
283                  * read if the freeing of the storage is done
284                  * after the raw_spin_unlock_bh(&local_storage->lock).
285                  *
286                  * Hence, a "bool free_local_storage" is returned
287                  * to the caller which then calls then frees the storage after
288                  * all the RCU grace periods have expired.
289                  */
290         }
291         hlist_del_init_rcu(&selem->snode);
292         if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
293             SDATA(selem))
294                 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
295
296         bpf_selem_free(selem, smap, reuse_now);
297
298         if (rcu_access_pointer(local_storage->smap) == smap)
299                 RCU_INIT_POINTER(local_storage->smap, NULL);
300
301         return free_local_storage;
302 }
303
304 static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
305                                  struct bpf_local_storage_map *storage_smap,
306                                  struct bpf_local_storage_elem *selem)
307 {
308
309         struct bpf_local_storage_map *selem_smap;
310
311         /* local_storage->smap may be NULL. If it is, get the bpf_ma
312          * from any selem in the local_storage->list. The bpf_ma of all
313          * local_storage and selem should have the same value
314          * for the same map type.
315          *
316          * If the local_storage->list is already empty, the caller will not
317          * care about the bpf_ma value also because the caller is not
318          * responsibile to free the local_storage.
319          */
320
321         if (storage_smap)
322                 return storage_smap->bpf_ma;
323
324         if (!selem) {
325                 struct hlist_node *n;
326
327                 n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
328                                           bpf_rcu_lock_held());
329                 if (!n)
330                         return false;
331
332                 selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
333         }
334         selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
335
336         return selem_smap->bpf_ma;
337 }
338
339 static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
340                                      bool reuse_now)
341 {
342         struct bpf_local_storage_map *storage_smap;
343         struct bpf_local_storage *local_storage;
344         bool bpf_ma, free_local_storage = false;
345         unsigned long flags;
346
347         if (unlikely(!selem_linked_to_storage_lockless(selem)))
348                 /* selem has already been unlinked from sk */
349                 return;
350
351         local_storage = rcu_dereference_check(selem->local_storage,
352                                               bpf_rcu_lock_held());
353         storage_smap = rcu_dereference_check(local_storage->smap,
354                                              bpf_rcu_lock_held());
355         bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
356
357         raw_spin_lock_irqsave(&local_storage->lock, flags);
358         if (likely(selem_linked_to_storage(selem)))
359                 free_local_storage = bpf_selem_unlink_storage_nolock(
360                         local_storage, selem, true, reuse_now);
361         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
362
363         if (free_local_storage)
364                 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
365 }
366
367 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
368                                    struct bpf_local_storage_elem *selem)
369 {
370         RCU_INIT_POINTER(selem->local_storage, local_storage);
371         hlist_add_head_rcu(&selem->snode, &local_storage->list);
372 }
373
374 static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
375 {
376         struct bpf_local_storage_map *smap;
377         struct bpf_local_storage_map_bucket *b;
378         unsigned long flags;
379
380         if (unlikely(!selem_linked_to_map_lockless(selem)))
381                 /* selem has already be unlinked from smap */
382                 return;
383
384         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
385         b = select_bucket(smap, selem);
386         raw_spin_lock_irqsave(&b->lock, flags);
387         if (likely(selem_linked_to_map(selem)))
388                 hlist_del_init_rcu(&selem->map_node);
389         raw_spin_unlock_irqrestore(&b->lock, flags);
390 }
391
392 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
393                         struct bpf_local_storage_elem *selem)
394 {
395         struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
396         unsigned long flags;
397
398         raw_spin_lock_irqsave(&b->lock, flags);
399         RCU_INIT_POINTER(SDATA(selem)->smap, smap);
400         hlist_add_head_rcu(&selem->map_node, &b->list);
401         raw_spin_unlock_irqrestore(&b->lock, flags);
402 }
403
404 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
405 {
406         /* Always unlink from map before unlinking from local_storage
407          * because selem will be freed after successfully unlinked from
408          * the local_storage.
409          */
410         bpf_selem_unlink_map(selem);
411         bpf_selem_unlink_storage(selem, reuse_now);
412 }
413
414 /* If cacheit_lockit is false, this lookup function is lockless */
415 struct bpf_local_storage_data *
416 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
417                          struct bpf_local_storage_map *smap,
418                          bool cacheit_lockit)
419 {
420         struct bpf_local_storage_data *sdata;
421         struct bpf_local_storage_elem *selem;
422
423         /* Fast path (cache hit) */
424         sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
425                                       bpf_rcu_lock_held());
426         if (sdata && rcu_access_pointer(sdata->smap) == smap)
427                 return sdata;
428
429         /* Slow path (cache miss) */
430         hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
431                                   rcu_read_lock_trace_held())
432                 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
433                         break;
434
435         if (!selem)
436                 return NULL;
437
438         sdata = SDATA(selem);
439         if (cacheit_lockit) {
440                 unsigned long flags;
441
442                 /* spinlock is needed to avoid racing with the
443                  * parallel delete.  Otherwise, publishing an already
444                  * deleted sdata to the cache will become a use-after-free
445                  * problem in the next bpf_local_storage_lookup().
446                  */
447                 raw_spin_lock_irqsave(&local_storage->lock, flags);
448                 if (selem_linked_to_storage(selem))
449                         rcu_assign_pointer(local_storage->cache[smap->cache_idx],
450                                            sdata);
451                 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
452         }
453
454         return sdata;
455 }
456
457 static int check_flags(const struct bpf_local_storage_data *old_sdata,
458                        u64 map_flags)
459 {
460         if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
461                 /* elem already exists */
462                 return -EEXIST;
463
464         if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
465                 /* elem doesn't exist, cannot update it */
466                 return -ENOENT;
467
468         return 0;
469 }
470
471 int bpf_local_storage_alloc(void *owner,
472                             struct bpf_local_storage_map *smap,
473                             struct bpf_local_storage_elem *first_selem,
474                             gfp_t gfp_flags)
475 {
476         struct bpf_local_storage *prev_storage, *storage;
477         struct bpf_local_storage **owner_storage_ptr;
478         int err;
479
480         err = mem_charge(smap, owner, sizeof(*storage));
481         if (err)
482                 return err;
483
484         if (smap->bpf_ma) {
485                 migrate_disable();
486                 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
487                 migrate_enable();
488         } else {
489                 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
490                                           gfp_flags | __GFP_NOWARN);
491         }
492
493         if (!storage) {
494                 err = -ENOMEM;
495                 goto uncharge;
496         }
497
498         RCU_INIT_POINTER(storage->smap, smap);
499         INIT_HLIST_HEAD(&storage->list);
500         raw_spin_lock_init(&storage->lock);
501         storage->owner = owner;
502
503         bpf_selem_link_storage_nolock(storage, first_selem);
504         bpf_selem_link_map(smap, first_selem);
505
506         owner_storage_ptr =
507                 (struct bpf_local_storage **)owner_storage(smap, owner);
508         /* Publish storage to the owner.
509          * Instead of using any lock of the kernel object (i.e. owner),
510          * cmpxchg will work with any kernel object regardless what
511          * the running context is, bh, irq...etc.
512          *
513          * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
514          * is protected by the storage->lock.  Hence, when freeing
515          * the owner->storage, the storage->lock must be held before
516          * setting owner->storage ptr to NULL.
517          */
518         prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
519         if (unlikely(prev_storage)) {
520                 bpf_selem_unlink_map(first_selem);
521                 err = -EAGAIN;
522                 goto uncharge;
523
524                 /* Note that even first_selem was linked to smap's
525                  * bucket->list, first_selem can be freed immediately
526                  * (instead of kfree_rcu) because
527                  * bpf_local_storage_map_free() does a
528                  * synchronize_rcu_mult (waiting for both sleepable and
529                  * normal programs) before walking the bucket->list.
530                  * Hence, no one is accessing selem from the
531                  * bucket->list under rcu_read_lock().
532                  */
533         }
534
535         return 0;
536
537 uncharge:
538         bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
539         mem_uncharge(smap, owner, sizeof(*storage));
540         return err;
541 }
542
543 /* sk cannot be going away because it is linking new elem
544  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
545  * Otherwise, it will become a leak (and other memory issues
546  * during map destruction).
547  */
548 struct bpf_local_storage_data *
549 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
550                          void *value, u64 map_flags, gfp_t gfp_flags)
551 {
552         struct bpf_local_storage_data *old_sdata = NULL;
553         struct bpf_local_storage_elem *selem = NULL;
554         struct bpf_local_storage *local_storage;
555         unsigned long flags;
556         int err;
557
558         /* BPF_EXIST and BPF_NOEXIST cannot be both set */
559         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
560             /* BPF_F_LOCK can only be used in a value with spin_lock */
561             unlikely((map_flags & BPF_F_LOCK) &&
562                      !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
563                 return ERR_PTR(-EINVAL);
564
565         if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
566                 return ERR_PTR(-EINVAL);
567
568         local_storage = rcu_dereference_check(*owner_storage(smap, owner),
569                                               bpf_rcu_lock_held());
570         if (!local_storage || hlist_empty(&local_storage->list)) {
571                 /* Very first elem for the owner */
572                 err = check_flags(NULL, map_flags);
573                 if (err)
574                         return ERR_PTR(err);
575
576                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
577                 if (!selem)
578                         return ERR_PTR(-ENOMEM);
579
580                 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
581                 if (err) {
582                         bpf_selem_free(selem, smap, true);
583                         mem_uncharge(smap, owner, smap->elem_size);
584                         return ERR_PTR(err);
585                 }
586
587                 return SDATA(selem);
588         }
589
590         if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
591                 /* Hoping to find an old_sdata to do inline update
592                  * such that it can avoid taking the local_storage->lock
593                  * and changing the lists.
594                  */
595                 old_sdata =
596                         bpf_local_storage_lookup(local_storage, smap, false);
597                 err = check_flags(old_sdata, map_flags);
598                 if (err)
599                         return ERR_PTR(err);
600                 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
601                         copy_map_value_locked(&smap->map, old_sdata->data,
602                                               value, false);
603                         return old_sdata;
604                 }
605         }
606
607         if (gfp_flags == GFP_KERNEL) {
608                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
609                 if (!selem)
610                         return ERR_PTR(-ENOMEM);
611         }
612
613         raw_spin_lock_irqsave(&local_storage->lock, flags);
614
615         /* Recheck local_storage->list under local_storage->lock */
616         if (unlikely(hlist_empty(&local_storage->list))) {
617                 /* A parallel del is happening and local_storage is going
618                  * away.  It has just been checked before, so very
619                  * unlikely.  Return instead of retry to keep things
620                  * simple.
621                  */
622                 err = -EAGAIN;
623                 goto unlock_err;
624         }
625
626         old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
627         err = check_flags(old_sdata, map_flags);
628         if (err)
629                 goto unlock_err;
630
631         if (old_sdata && (map_flags & BPF_F_LOCK)) {
632                 copy_map_value_locked(&smap->map, old_sdata->data, value,
633                                       false);
634                 selem = SELEM(old_sdata);
635                 goto unlock;
636         }
637
638         if (gfp_flags != GFP_KERNEL) {
639                 /* local_storage->lock is held.  Hence, we are sure
640                  * we can unlink and uncharge the old_sdata successfully
641                  * later.  Hence, instead of charging the new selem now
642                  * and then uncharge the old selem later (which may cause
643                  * a potential but unnecessary charge failure),  avoid taking
644                  * a charge at all here (the "!old_sdata" check) and the
645                  * old_sdata will not be uncharged later during
646                  * bpf_selem_unlink_storage_nolock().
647                  */
648                 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
649                 if (!selem) {
650                         err = -ENOMEM;
651                         goto unlock_err;
652                 }
653         }
654
655         /* First, link the new selem to the map */
656         bpf_selem_link_map(smap, selem);
657
658         /* Second, link (and publish) the new selem to local_storage */
659         bpf_selem_link_storage_nolock(local_storage, selem);
660
661         /* Third, remove old selem, SELEM(old_sdata) */
662         if (old_sdata) {
663                 bpf_selem_unlink_map(SELEM(old_sdata));
664                 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
665                                                 false, false);
666         }
667
668 unlock:
669         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
670         return SDATA(selem);
671
672 unlock_err:
673         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
674         if (selem) {
675                 mem_uncharge(smap, owner, smap->elem_size);
676                 bpf_selem_free(selem, smap, true);
677         }
678         return ERR_PTR(err);
679 }
680
681 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
682 {
683         u64 min_usage = U64_MAX;
684         u16 i, res = 0;
685
686         spin_lock(&cache->idx_lock);
687
688         for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
689                 if (cache->idx_usage_counts[i] < min_usage) {
690                         min_usage = cache->idx_usage_counts[i];
691                         res = i;
692
693                         /* Found a free cache_idx */
694                         if (!min_usage)
695                                 break;
696                 }
697         }
698         cache->idx_usage_counts[res]++;
699
700         spin_unlock(&cache->idx_lock);
701
702         return res;
703 }
704
705 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
706                                              u16 idx)
707 {
708         spin_lock(&cache->idx_lock);
709         cache->idx_usage_counts[idx]--;
710         spin_unlock(&cache->idx_lock);
711 }
712
713 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
714 {
715         if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
716             !(attr->map_flags & BPF_F_NO_PREALLOC) ||
717             attr->max_entries ||
718             attr->key_size != sizeof(int) || !attr->value_size ||
719             /* Enforce BTF for userspace sk dumping */
720             !attr->btf_key_type_id || !attr->btf_value_type_id)
721                 return -EINVAL;
722
723         if (!bpf_capable())
724                 return -EPERM;
725
726         if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
727                 return -E2BIG;
728
729         return 0;
730 }
731
732 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
733                                     const struct btf *btf,
734                                     const struct btf_type *key_type,
735                                     const struct btf_type *value_type)
736 {
737         u32 int_data;
738
739         if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
740                 return -EINVAL;
741
742         int_data = *(u32 *)(key_type + 1);
743         if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
744                 return -EINVAL;
745
746         return 0;
747 }
748
749 void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
750 {
751         struct bpf_local_storage_map *storage_smap;
752         struct bpf_local_storage_elem *selem;
753         bool bpf_ma, free_storage = false;
754         struct hlist_node *n;
755         unsigned long flags;
756
757         storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
758         bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
759
760         /* Neither the bpf_prog nor the bpf_map's syscall
761          * could be modifying the local_storage->list now.
762          * Thus, no elem can be added to or deleted from the
763          * local_storage->list by the bpf_prog or by the bpf_map's syscall.
764          *
765          * It is racing with bpf_local_storage_map_free() alone
766          * when unlinking elem from the local_storage->list and
767          * the map's bucket->list.
768          */
769         raw_spin_lock_irqsave(&local_storage->lock, flags);
770         hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
771                 /* Always unlink from map before unlinking from
772                  * local_storage.
773                  */
774                 bpf_selem_unlink_map(selem);
775                 /* If local_storage list has only one element, the
776                  * bpf_selem_unlink_storage_nolock() will return true.
777                  * Otherwise, it will return false. The current loop iteration
778                  * intends to remove all local storage. So the last iteration
779                  * of the loop will set the free_cgroup_storage to true.
780                  */
781                 free_storage = bpf_selem_unlink_storage_nolock(
782                         local_storage, selem, false, true);
783         }
784         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
785
786         if (free_storage)
787                 bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
788 }
789
790 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
791 {
792         struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
793         u64 usage = sizeof(*smap);
794
795         /* The dynamically callocated selems are not counted currently. */
796         usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
797         return usage;
798 }
799
800 /* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
801  * A deadlock free allocator is useful for storage that the bpf prog can easily
802  * get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
803  * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
804  * memory immediately. To be reuse-immediate safe, the owner destruction
805  * code path needs to go through a rcu grace period before calling
806  * bpf_local_storage_destroy().
807  *
808  * When bpf_ma == false, the kmalloc and kfree are used.
809  */
810 struct bpf_map *
811 bpf_local_storage_map_alloc(union bpf_attr *attr,
812                             struct bpf_local_storage_cache *cache,
813                             bool bpf_ma)
814 {
815         struct bpf_local_storage_map *smap;
816         unsigned int i;
817         u32 nbuckets;
818         int err;
819
820         smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
821         if (!smap)
822                 return ERR_PTR(-ENOMEM);
823         bpf_map_init_from_attr(&smap->map, attr);
824
825         nbuckets = roundup_pow_of_two(num_possible_cpus());
826         /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
827         nbuckets = max_t(u32, 2, nbuckets);
828         smap->bucket_log = ilog2(nbuckets);
829
830         smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
831                                          nbuckets, GFP_USER | __GFP_NOWARN);
832         if (!smap->buckets) {
833                 err = -ENOMEM;
834                 goto free_smap;
835         }
836
837         for (i = 0; i < nbuckets; i++) {
838                 INIT_HLIST_HEAD(&smap->buckets[i].list);
839                 raw_spin_lock_init(&smap->buckets[i].lock);
840         }
841
842         smap->elem_size = offsetof(struct bpf_local_storage_elem,
843                                    sdata.data[attr->value_size]);
844
845         smap->bpf_ma = bpf_ma;
846         if (bpf_ma) {
847                 err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
848                 if (err)
849                         goto free_smap;
850
851                 err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
852                 if (err) {
853                         bpf_mem_alloc_destroy(&smap->selem_ma);
854                         goto free_smap;
855                 }
856         }
857
858         smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
859         return &smap->map;
860
861 free_smap:
862         kvfree(smap->buckets);
863         bpf_map_area_free(smap);
864         return ERR_PTR(err);
865 }
866
867 void bpf_local_storage_map_free(struct bpf_map *map,
868                                 struct bpf_local_storage_cache *cache,
869                                 int __percpu *busy_counter)
870 {
871         struct bpf_local_storage_map_bucket *b;
872         struct bpf_local_storage_elem *selem;
873         struct bpf_local_storage_map *smap;
874         unsigned int i;
875
876         smap = (struct bpf_local_storage_map *)map;
877         bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
878
879         /* Note that this map might be concurrently cloned from
880          * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
881          * RCU read section to finish before proceeding. New RCU
882          * read sections should be prevented via bpf_map_inc_not_zero.
883          */
884         synchronize_rcu();
885
886         /* bpf prog and the userspace can no longer access this map
887          * now.  No new selem (of this map) can be added
888          * to the owner->storage or to the map bucket's list.
889          *
890          * The elem of this map can be cleaned up here
891          * or when the storage is freed e.g.
892          * by bpf_sk_storage_free() during __sk_destruct().
893          */
894         for (i = 0; i < (1U << smap->bucket_log); i++) {
895                 b = &smap->buckets[i];
896
897                 rcu_read_lock();
898                 /* No one is adding to b->list now */
899                 while ((selem = hlist_entry_safe(
900                                 rcu_dereference_raw(hlist_first_rcu(&b->list)),
901                                 struct bpf_local_storage_elem, map_node))) {
902                         if (busy_counter) {
903                                 migrate_disable();
904                                 this_cpu_inc(*busy_counter);
905                         }
906                         bpf_selem_unlink(selem, true);
907                         if (busy_counter) {
908                                 this_cpu_dec(*busy_counter);
909                                 migrate_enable();
910                         }
911                         cond_resched_rcu();
912                 }
913                 rcu_read_unlock();
914         }
915
916         /* While freeing the storage we may still need to access the map.
917          *
918          * e.g. when bpf_sk_storage_free() has unlinked selem from the map
919          * which then made the above while((selem = ...)) loop
920          * exit immediately.
921          *
922          * However, while freeing the storage one still needs to access the
923          * smap->elem_size to do the uncharging in
924          * bpf_selem_unlink_storage_nolock().
925          *
926          * Hence, wait another rcu grace period for the storage to be freed.
927          */
928         synchronize_rcu();
929
930         if (smap->bpf_ma) {
931                 bpf_mem_alloc_destroy(&smap->selem_ma);
932                 bpf_mem_alloc_destroy(&smap->storage_ma);
933         }
934         kvfree(smap->buckets);
935         bpf_map_area_free(smap);
936 }