Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-block.git] / kernel / bpf / bpf_local_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
11 #include <net/sock.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
17
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22               struct bpf_local_storage_elem *selem)
23 {
24         return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
25 }
26
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
28 {
29         struct bpf_map *map = &smap->map;
30
31         if (!map->ops->map_local_storage_charge)
32                 return 0;
33
34         return map->ops->map_local_storage_charge(smap, owner, size);
35 }
36
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
38                          u32 size)
39 {
40         struct bpf_map *map = &smap->map;
41
42         if (map->ops->map_local_storage_uncharge)
43                 map->ops->map_local_storage_uncharge(smap, owner, size);
44 }
45
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
48 {
49         struct bpf_map *map = &smap->map;
50
51         return map->ops->map_owner_storage_ptr(owner);
52 }
53
54 static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem)
55 {
56         return !hlist_unhashed_lockless(&selem->snode);
57 }
58
59 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
60 {
61         return !hlist_unhashed(&selem->snode);
62 }
63
64 static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem)
65 {
66         return !hlist_unhashed_lockless(&selem->map_node);
67 }
68
69 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
70 {
71         return !hlist_unhashed(&selem->map_node);
72 }
73
74 struct bpf_local_storage_elem *
75 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
76                 void *value, bool charge_mem, gfp_t gfp_flags)
77 {
78         struct bpf_local_storage_elem *selem;
79
80         if (charge_mem && mem_charge(smap, owner, smap->elem_size))
81                 return NULL;
82
83         selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
84                                 gfp_flags | __GFP_NOWARN);
85         if (selem) {
86                 if (value)
87                         copy_map_value(&smap->map, SDATA(selem)->data, value);
88                 /* No need to call check_and_init_map_value as memory is zero init */
89                 return selem;
90         }
91
92         if (charge_mem)
93                 mem_uncharge(smap, owner, smap->elem_size);
94
95         return NULL;
96 }
97
98 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
99 {
100         struct bpf_local_storage *local_storage;
101
102         /* If RCU Tasks Trace grace period implies RCU grace period, do
103          * kfree(), else do kfree_rcu().
104          */
105         local_storage = container_of(rcu, struct bpf_local_storage, rcu);
106         if (rcu_trace_implies_rcu_gp())
107                 kfree(local_storage);
108         else
109                 kfree_rcu(local_storage, rcu);
110 }
111
112 static void bpf_selem_free_fields_rcu(struct rcu_head *rcu)
113 {
114         struct bpf_local_storage_elem *selem;
115         struct bpf_local_storage_map *smap;
116
117         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
118         /* protected by the rcu_barrier*() */
119         smap = rcu_dereference_protected(SDATA(selem)->smap, true);
120         bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
121         kfree(selem);
122 }
123
124 static void bpf_selem_free_fields_trace_rcu(struct rcu_head *rcu)
125 {
126         /* Free directly if Tasks Trace RCU GP also implies RCU GP */
127         if (rcu_trace_implies_rcu_gp())
128                 bpf_selem_free_fields_rcu(rcu);
129         else
130                 call_rcu(rcu, bpf_selem_free_fields_rcu);
131 }
132
133 static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
134 {
135         struct bpf_local_storage_elem *selem;
136
137         selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
138         if (rcu_trace_implies_rcu_gp())
139                 kfree(selem);
140         else
141                 kfree_rcu(selem, rcu);
142 }
143
144 /* local_storage->lock must be held and selem->local_storage == local_storage.
145  * The caller must ensure selem->smap is still valid to be
146  * dereferenced for its smap->elem_size and smap->cache_idx.
147  */
148 static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
149                                             struct bpf_local_storage_elem *selem,
150                                             bool uncharge_mem, bool use_trace_rcu)
151 {
152         struct bpf_local_storage_map *smap;
153         bool free_local_storage;
154         struct btf_record *rec;
155         void *owner;
156
157         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
158         owner = local_storage->owner;
159
160         /* All uncharging on the owner must be done first.
161          * The owner may be freed once the last selem is unlinked
162          * from local_storage.
163          */
164         if (uncharge_mem)
165                 mem_uncharge(smap, owner, smap->elem_size);
166
167         free_local_storage = hlist_is_singular_node(&selem->snode,
168                                                     &local_storage->list);
169         if (free_local_storage) {
170                 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
171                 local_storage->owner = NULL;
172
173                 /* After this RCU_INIT, owner may be freed and cannot be used */
174                 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
175
176                 /* local_storage is not freed now.  local_storage->lock is
177                  * still held and raw_spin_unlock_bh(&local_storage->lock)
178                  * will be done by the caller.
179                  *
180                  * Although the unlock will be done under
181                  * rcu_read_lock(),  it is more intuitive to
182                  * read if the freeing of the storage is done
183                  * after the raw_spin_unlock_bh(&local_storage->lock).
184                  *
185                  * Hence, a "bool free_local_storage" is returned
186                  * to the caller which then calls then frees the storage after
187                  * all the RCU grace periods have expired.
188                  */
189         }
190         hlist_del_init_rcu(&selem->snode);
191         if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
192             SDATA(selem))
193                 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
194
195         /* A different RCU callback is chosen whenever we need to free
196          * additional fields in selem data before freeing selem.
197          * bpf_local_storage_map_free only executes rcu_barrier to wait for RCU
198          * callbacks when it has special fields, hence we can only conditionally
199          * dereference smap, as by this time the map might have already been
200          * freed without waiting for our call_rcu callback if it did not have
201          * any special fields.
202          */
203         rec = smap->map.record;
204         if (use_trace_rcu) {
205                 if (!IS_ERR_OR_NULL(rec))
206                         call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_fields_trace_rcu);
207                 else
208                         call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
209         } else {
210                 if (!IS_ERR_OR_NULL(rec))
211                         call_rcu(&selem->rcu, bpf_selem_free_fields_rcu);
212                 else
213                         kfree_rcu(selem, rcu);
214         }
215
216         return free_local_storage;
217 }
218
219 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
220                                        bool use_trace_rcu)
221 {
222         struct bpf_local_storage *local_storage;
223         bool free_local_storage = false;
224         unsigned long flags;
225
226         if (unlikely(!selem_linked_to_storage_lockless(selem)))
227                 /* selem has already been unlinked from sk */
228                 return;
229
230         local_storage = rcu_dereference_check(selem->local_storage,
231                                               bpf_rcu_lock_held());
232         raw_spin_lock_irqsave(&local_storage->lock, flags);
233         if (likely(selem_linked_to_storage(selem)))
234                 free_local_storage = bpf_selem_unlink_storage_nolock(
235                         local_storage, selem, true, use_trace_rcu);
236         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
237
238         if (free_local_storage) {
239                 if (use_trace_rcu)
240                         call_rcu_tasks_trace(&local_storage->rcu,
241                                      bpf_local_storage_free_rcu);
242                 else
243                         kfree_rcu(local_storage, rcu);
244         }
245 }
246
247 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
248                                    struct bpf_local_storage_elem *selem)
249 {
250         RCU_INIT_POINTER(selem->local_storage, local_storage);
251         hlist_add_head_rcu(&selem->snode, &local_storage->list);
252 }
253
254 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
255 {
256         struct bpf_local_storage_map *smap;
257         struct bpf_local_storage_map_bucket *b;
258         unsigned long flags;
259
260         if (unlikely(!selem_linked_to_map_lockless(selem)))
261                 /* selem has already be unlinked from smap */
262                 return;
263
264         smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
265         b = select_bucket(smap, selem);
266         raw_spin_lock_irqsave(&b->lock, flags);
267         if (likely(selem_linked_to_map(selem)))
268                 hlist_del_init_rcu(&selem->map_node);
269         raw_spin_unlock_irqrestore(&b->lock, flags);
270 }
271
272 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
273                         struct bpf_local_storage_elem *selem)
274 {
275         struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
276         unsigned long flags;
277
278         raw_spin_lock_irqsave(&b->lock, flags);
279         RCU_INIT_POINTER(SDATA(selem)->smap, smap);
280         hlist_add_head_rcu(&selem->map_node, &b->list);
281         raw_spin_unlock_irqrestore(&b->lock, flags);
282 }
283
284 void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
285 {
286         /* Always unlink from map before unlinking from local_storage
287          * because selem will be freed after successfully unlinked from
288          * the local_storage.
289          */
290         bpf_selem_unlink_map(selem);
291         __bpf_selem_unlink_storage(selem, use_trace_rcu);
292 }
293
294 /* If cacheit_lockit is false, this lookup function is lockless */
295 struct bpf_local_storage_data *
296 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
297                          struct bpf_local_storage_map *smap,
298                          bool cacheit_lockit)
299 {
300         struct bpf_local_storage_data *sdata;
301         struct bpf_local_storage_elem *selem;
302
303         /* Fast path (cache hit) */
304         sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
305                                       bpf_rcu_lock_held());
306         if (sdata && rcu_access_pointer(sdata->smap) == smap)
307                 return sdata;
308
309         /* Slow path (cache miss) */
310         hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
311                                   rcu_read_lock_trace_held())
312                 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
313                         break;
314
315         if (!selem)
316                 return NULL;
317
318         sdata = SDATA(selem);
319         if (cacheit_lockit) {
320                 unsigned long flags;
321
322                 /* spinlock is needed to avoid racing with the
323                  * parallel delete.  Otherwise, publishing an already
324                  * deleted sdata to the cache will become a use-after-free
325                  * problem in the next bpf_local_storage_lookup().
326                  */
327                 raw_spin_lock_irqsave(&local_storage->lock, flags);
328                 if (selem_linked_to_storage(selem))
329                         rcu_assign_pointer(local_storage->cache[smap->cache_idx],
330                                            sdata);
331                 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
332         }
333
334         return sdata;
335 }
336
337 static int check_flags(const struct bpf_local_storage_data *old_sdata,
338                        u64 map_flags)
339 {
340         if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
341                 /* elem already exists */
342                 return -EEXIST;
343
344         if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
345                 /* elem doesn't exist, cannot update it */
346                 return -ENOENT;
347
348         return 0;
349 }
350
351 int bpf_local_storage_alloc(void *owner,
352                             struct bpf_local_storage_map *smap,
353                             struct bpf_local_storage_elem *first_selem,
354                             gfp_t gfp_flags)
355 {
356         struct bpf_local_storage *prev_storage, *storage;
357         struct bpf_local_storage **owner_storage_ptr;
358         int err;
359
360         err = mem_charge(smap, owner, sizeof(*storage));
361         if (err)
362                 return err;
363
364         storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
365                                   gfp_flags | __GFP_NOWARN);
366         if (!storage) {
367                 err = -ENOMEM;
368                 goto uncharge;
369         }
370
371         INIT_HLIST_HEAD(&storage->list);
372         raw_spin_lock_init(&storage->lock);
373         storage->owner = owner;
374
375         bpf_selem_link_storage_nolock(storage, first_selem);
376         bpf_selem_link_map(smap, first_selem);
377
378         owner_storage_ptr =
379                 (struct bpf_local_storage **)owner_storage(smap, owner);
380         /* Publish storage to the owner.
381          * Instead of using any lock of the kernel object (i.e. owner),
382          * cmpxchg will work with any kernel object regardless what
383          * the running context is, bh, irq...etc.
384          *
385          * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
386          * is protected by the storage->lock.  Hence, when freeing
387          * the owner->storage, the storage->lock must be held before
388          * setting owner->storage ptr to NULL.
389          */
390         prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
391         if (unlikely(prev_storage)) {
392                 bpf_selem_unlink_map(first_selem);
393                 err = -EAGAIN;
394                 goto uncharge;
395
396                 /* Note that even first_selem was linked to smap's
397                  * bucket->list, first_selem can be freed immediately
398                  * (instead of kfree_rcu) because
399                  * bpf_local_storage_map_free() does a
400                  * synchronize_rcu_mult (waiting for both sleepable and
401                  * normal programs) before walking the bucket->list.
402                  * Hence, no one is accessing selem from the
403                  * bucket->list under rcu_read_lock().
404                  */
405         }
406
407         return 0;
408
409 uncharge:
410         kfree(storage);
411         mem_uncharge(smap, owner, sizeof(*storage));
412         return err;
413 }
414
415 /* sk cannot be going away because it is linking new elem
416  * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
417  * Otherwise, it will become a leak (and other memory issues
418  * during map destruction).
419  */
420 struct bpf_local_storage_data *
421 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
422                          void *value, u64 map_flags, gfp_t gfp_flags)
423 {
424         struct bpf_local_storage_data *old_sdata = NULL;
425         struct bpf_local_storage_elem *selem = NULL;
426         struct bpf_local_storage *local_storage;
427         unsigned long flags;
428         int err;
429
430         /* BPF_EXIST and BPF_NOEXIST cannot be both set */
431         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
432             /* BPF_F_LOCK can only be used in a value with spin_lock */
433             unlikely((map_flags & BPF_F_LOCK) &&
434                      !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK)))
435                 return ERR_PTR(-EINVAL);
436
437         if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST)
438                 return ERR_PTR(-EINVAL);
439
440         local_storage = rcu_dereference_check(*owner_storage(smap, owner),
441                                               bpf_rcu_lock_held());
442         if (!local_storage || hlist_empty(&local_storage->list)) {
443                 /* Very first elem for the owner */
444                 err = check_flags(NULL, map_flags);
445                 if (err)
446                         return ERR_PTR(err);
447
448                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
449                 if (!selem)
450                         return ERR_PTR(-ENOMEM);
451
452                 err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
453                 if (err) {
454                         kfree(selem);
455                         mem_uncharge(smap, owner, smap->elem_size);
456                         return ERR_PTR(err);
457                 }
458
459                 return SDATA(selem);
460         }
461
462         if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
463                 /* Hoping to find an old_sdata to do inline update
464                  * such that it can avoid taking the local_storage->lock
465                  * and changing the lists.
466                  */
467                 old_sdata =
468                         bpf_local_storage_lookup(local_storage, smap, false);
469                 err = check_flags(old_sdata, map_flags);
470                 if (err)
471                         return ERR_PTR(err);
472                 if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) {
473                         copy_map_value_locked(&smap->map, old_sdata->data,
474                                               value, false);
475                         return old_sdata;
476                 }
477         }
478
479         if (gfp_flags == GFP_KERNEL) {
480                 selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
481                 if (!selem)
482                         return ERR_PTR(-ENOMEM);
483         }
484
485         raw_spin_lock_irqsave(&local_storage->lock, flags);
486
487         /* Recheck local_storage->list under local_storage->lock */
488         if (unlikely(hlist_empty(&local_storage->list))) {
489                 /* A parallel del is happening and local_storage is going
490                  * away.  It has just been checked before, so very
491                  * unlikely.  Return instead of retry to keep things
492                  * simple.
493                  */
494                 err = -EAGAIN;
495                 goto unlock_err;
496         }
497
498         old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
499         err = check_flags(old_sdata, map_flags);
500         if (err)
501                 goto unlock_err;
502
503         if (old_sdata && (map_flags & BPF_F_LOCK)) {
504                 copy_map_value_locked(&smap->map, old_sdata->data, value,
505                                       false);
506                 selem = SELEM(old_sdata);
507                 goto unlock;
508         }
509
510         if (gfp_flags != GFP_KERNEL) {
511                 /* local_storage->lock is held.  Hence, we are sure
512                  * we can unlink and uncharge the old_sdata successfully
513                  * later.  Hence, instead of charging the new selem now
514                  * and then uncharge the old selem later (which may cause
515                  * a potential but unnecessary charge failure),  avoid taking
516                  * a charge at all here (the "!old_sdata" check) and the
517                  * old_sdata will not be uncharged later during
518                  * bpf_selem_unlink_storage_nolock().
519                  */
520                 selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
521                 if (!selem) {
522                         err = -ENOMEM;
523                         goto unlock_err;
524                 }
525         }
526
527         /* First, link the new selem to the map */
528         bpf_selem_link_map(smap, selem);
529
530         /* Second, link (and publish) the new selem to local_storage */
531         bpf_selem_link_storage_nolock(local_storage, selem);
532
533         /* Third, remove old selem, SELEM(old_sdata) */
534         if (old_sdata) {
535                 bpf_selem_unlink_map(SELEM(old_sdata));
536                 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
537                                                 false, true);
538         }
539
540 unlock:
541         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
542         return SDATA(selem);
543
544 unlock_err:
545         raw_spin_unlock_irqrestore(&local_storage->lock, flags);
546         if (selem) {
547                 mem_uncharge(smap, owner, smap->elem_size);
548                 kfree(selem);
549         }
550         return ERR_PTR(err);
551 }
552
553 static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
554 {
555         u64 min_usage = U64_MAX;
556         u16 i, res = 0;
557
558         spin_lock(&cache->idx_lock);
559
560         for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
561                 if (cache->idx_usage_counts[i] < min_usage) {
562                         min_usage = cache->idx_usage_counts[i];
563                         res = i;
564
565                         /* Found a free cache_idx */
566                         if (!min_usage)
567                                 break;
568                 }
569         }
570         cache->idx_usage_counts[res]++;
571
572         spin_unlock(&cache->idx_lock);
573
574         return res;
575 }
576
577 static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
578                                              u16 idx)
579 {
580         spin_lock(&cache->idx_lock);
581         cache->idx_usage_counts[idx]--;
582         spin_unlock(&cache->idx_lock);
583 }
584
585 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
586 {
587         if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
588             !(attr->map_flags & BPF_F_NO_PREALLOC) ||
589             attr->max_entries ||
590             attr->key_size != sizeof(int) || !attr->value_size ||
591             /* Enforce BTF for userspace sk dumping */
592             !attr->btf_key_type_id || !attr->btf_value_type_id)
593                 return -EINVAL;
594
595         if (!bpf_capable())
596                 return -EPERM;
597
598         if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
599                 return -E2BIG;
600
601         return 0;
602 }
603
604 static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
605 {
606         struct bpf_local_storage_map *smap;
607         unsigned int i;
608         u32 nbuckets;
609
610         smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
611         if (!smap)
612                 return ERR_PTR(-ENOMEM);
613         bpf_map_init_from_attr(&smap->map, attr);
614
615         nbuckets = roundup_pow_of_two(num_possible_cpus());
616         /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
617         nbuckets = max_t(u32, 2, nbuckets);
618         smap->bucket_log = ilog2(nbuckets);
619
620         smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
621                                          nbuckets, GFP_USER | __GFP_NOWARN);
622         if (!smap->buckets) {
623                 bpf_map_area_free(smap);
624                 return ERR_PTR(-ENOMEM);
625         }
626
627         for (i = 0; i < nbuckets; i++) {
628                 INIT_HLIST_HEAD(&smap->buckets[i].list);
629                 raw_spin_lock_init(&smap->buckets[i].lock);
630         }
631
632         smap->elem_size = offsetof(struct bpf_local_storage_elem,
633                                    sdata.data[attr->value_size]);
634
635         return smap;
636 }
637
638 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
639                                     const struct btf *btf,
640                                     const struct btf_type *key_type,
641                                     const struct btf_type *value_type)
642 {
643         u32 int_data;
644
645         if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
646                 return -EINVAL;
647
648         int_data = *(u32 *)(key_type + 1);
649         if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
650                 return -EINVAL;
651
652         return 0;
653 }
654
655 bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
656 {
657         struct bpf_local_storage_elem *selem;
658         bool free_storage = false;
659         struct hlist_node *n;
660
661         /* Neither the bpf_prog nor the bpf_map's syscall
662          * could be modifying the local_storage->list now.
663          * Thus, no elem can be added to or deleted from the
664          * local_storage->list by the bpf_prog or by the bpf_map's syscall.
665          *
666          * It is racing with bpf_local_storage_map_free() alone
667          * when unlinking elem from the local_storage->list and
668          * the map's bucket->list.
669          */
670         hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
671                 /* Always unlink from map before unlinking from
672                  * local_storage.
673                  */
674                 bpf_selem_unlink_map(selem);
675                 /* If local_storage list has only one element, the
676                  * bpf_selem_unlink_storage_nolock() will return true.
677                  * Otherwise, it will return false. The current loop iteration
678                  * intends to remove all local storage. So the last iteration
679                  * of the loop will set the free_cgroup_storage to true.
680                  */
681                 free_storage = bpf_selem_unlink_storage_nolock(
682                         local_storage, selem, false, false);
683         }
684
685         return free_storage;
686 }
687
688 u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
689 {
690         struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map;
691         u64 usage = sizeof(*smap);
692
693         /* The dynamically callocated selems are not counted currently. */
694         usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log);
695         return usage;
696 }
697
698 struct bpf_map *
699 bpf_local_storage_map_alloc(union bpf_attr *attr,
700                             struct bpf_local_storage_cache *cache)
701 {
702         struct bpf_local_storage_map *smap;
703
704         smap = __bpf_local_storage_map_alloc(attr);
705         if (IS_ERR(smap))
706                 return ERR_CAST(smap);
707
708         smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
709         return &smap->map;
710 }
711
712 void bpf_local_storage_map_free(struct bpf_map *map,
713                                 struct bpf_local_storage_cache *cache,
714                                 int __percpu *busy_counter)
715 {
716         struct bpf_local_storage_map_bucket *b;
717         struct bpf_local_storage_elem *selem;
718         struct bpf_local_storage_map *smap;
719         unsigned int i;
720
721         smap = (struct bpf_local_storage_map *)map;
722         bpf_local_storage_cache_idx_free(cache, smap->cache_idx);
723
724         /* Note that this map might be concurrently cloned from
725          * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
726          * RCU read section to finish before proceeding. New RCU
727          * read sections should be prevented via bpf_map_inc_not_zero.
728          */
729         synchronize_rcu();
730
731         /* bpf prog and the userspace can no longer access this map
732          * now.  No new selem (of this map) can be added
733          * to the owner->storage or to the map bucket's list.
734          *
735          * The elem of this map can be cleaned up here
736          * or when the storage is freed e.g.
737          * by bpf_sk_storage_free() during __sk_destruct().
738          */
739         for (i = 0; i < (1U << smap->bucket_log); i++) {
740                 b = &smap->buckets[i];
741
742                 rcu_read_lock();
743                 /* No one is adding to b->list now */
744                 while ((selem = hlist_entry_safe(
745                                 rcu_dereference_raw(hlist_first_rcu(&b->list)),
746                                 struct bpf_local_storage_elem, map_node))) {
747                         if (busy_counter) {
748                                 migrate_disable();
749                                 this_cpu_inc(*busy_counter);
750                         }
751                         bpf_selem_unlink(selem, false);
752                         if (busy_counter) {
753                                 this_cpu_dec(*busy_counter);
754                                 migrate_enable();
755                         }
756                         cond_resched_rcu();
757                 }
758                 rcu_read_unlock();
759         }
760
761         /* While freeing the storage we may still need to access the map.
762          *
763          * e.g. when bpf_sk_storage_free() has unlinked selem from the map
764          * which then made the above while((selem = ...)) loop
765          * exit immediately.
766          *
767          * However, while freeing the storage one still needs to access the
768          * smap->elem_size to do the uncharging in
769          * bpf_selem_unlink_storage_nolock().
770          *
771          * Hence, wait another rcu grace period for the storage to be freed.
772          */
773         synchronize_rcu();
774
775         /* Only delay freeing of smap, buckets are not needed anymore */
776         kvfree(smap->buckets);
777
778         /* When local storage has special fields, callbacks for
779          * bpf_selem_free_fields_rcu and bpf_selem_free_fields_trace_rcu will
780          * keep using the map BTF record, we need to execute an RCU barrier to
781          * wait for them as the record will be freed right after our map_free
782          * callback.
783          */
784         if (!IS_ERR_OR_NULL(smap->map.record)) {
785                 rcu_barrier_tasks_trace();
786                 /* We cannot skip rcu_barrier() when rcu_trace_implies_rcu_gp()
787                  * is true, because while call_rcu invocation is skipped in that
788                  * case in bpf_selem_free_fields_trace_rcu (and all local
789                  * storage maps pass use_trace_rcu = true), there can be
790                  * call_rcu callbacks based on use_trace_rcu = false in the
791                  * while ((selem = ...)) loop above or when owner's free path
792                  * calls bpf_local_storage_unlink_nolock.
793                  */
794                 rcu_barrier();
795         }
796         bpf_map_area_free(smap);
797 }