1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
9 #include <net/bpf_sk_storage.h>
11 #include <uapi/linux/btf.h>
13 static atomic_t cache_idx;
15 #define SK_STORAGE_CREATE_FLAG_MASK \
16 (BPF_F_NO_PREALLOC | BPF_F_CLONE)
19 struct hlist_head list;
23 /* Thp map is not the primary owner of a bpf_sk_storage_elem.
24 * Instead, the sk->sk_bpf_storage is.
26 * The map (bpf_sk_storage_map) is for two purposes
27 * 1. Define the size of the "sk local storage". It is
28 * the map's value_size.
30 * 2. Maintain a list to keep track of all elems such
31 * that they can be cleaned up during the map destruction.
33 * When a bpf local storage is being looked up for a
34 * particular sk, the "bpf_map" pointer is actually used
35 * as the "key" to search in the list of elem in
38 * Hence, consider sk->sk_bpf_storage is the mini-map
39 * with the "bpf_map" pointer as the searching key.
41 struct bpf_sk_storage_map {
43 /* Lookup elem does not require accessing the map.
45 * Updating/Deleting requires a bucket lock to
46 * link/unlink the elem from the map. Having
47 * multiple buckets to improve contention.
49 struct bucket *buckets;
55 struct bpf_sk_storage_data {
56 /* smap is used as the searching key when looking up
57 * from sk->sk_bpf_storage.
59 * Put it in the same cacheline as the data to minimize
60 * the number of cachelines access during the cache hit case.
62 struct bpf_sk_storage_map __rcu *smap;
63 u8 data[0] __aligned(8);
66 /* Linked to bpf_sk_storage and bpf_sk_storage_map */
67 struct bpf_sk_storage_elem {
68 struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
69 struct hlist_node snode; /* Linked to bpf_sk_storage */
70 struct bpf_sk_storage __rcu *sk_storage;
73 /* The data is stored in aother cacheline to minimize
74 * the number of cachelines access during a cache hit.
76 struct bpf_sk_storage_data sdata ____cacheline_aligned;
79 #define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
80 #define SDATA(_SELEM) (&(_SELEM)->sdata)
81 #define BPF_SK_STORAGE_CACHE_SIZE 16
83 struct bpf_sk_storage {
84 struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
85 struct hlist_head list; /* List of bpf_sk_storage_elem */
86 struct sock *sk; /* The sk that owns the the above "list" of
87 * bpf_sk_storage_elem.
90 raw_spinlock_t lock; /* Protect adding/removing from the "list" */
93 static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
94 struct bpf_sk_storage_elem *selem)
96 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
99 static int omem_charge(struct sock *sk, unsigned int size)
101 /* same check as in sock_kmalloc() */
102 if (size <= sysctl_optmem_max &&
103 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
104 atomic_add(size, &sk->sk_omem_alloc);
111 static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
113 return !hlist_unhashed(&selem->snode);
116 static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
118 return !hlist_unhashed(&selem->map_node);
121 static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
122 struct sock *sk, void *value,
125 struct bpf_sk_storage_elem *selem;
127 if (charge_omem && omem_charge(sk, smap->elem_size))
130 selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
133 memcpy(SDATA(selem)->data, value, smap->map.value_size);
138 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
143 /* sk_storage->lock must be held and selem->sk_storage == sk_storage.
144 * The caller must ensure selem->smap is still valid to be
145 * dereferenced for its smap->elem_size and smap->cache_idx.
147 static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
148 struct bpf_sk_storage_elem *selem,
151 struct bpf_sk_storage_map *smap;
152 bool free_sk_storage;
155 smap = rcu_dereference(SDATA(selem)->smap);
158 /* All uncharging on sk->sk_omem_alloc must be done first.
159 * sk may be freed once the last selem is unlinked from sk_storage.
162 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
164 free_sk_storage = hlist_is_singular_node(&selem->snode,
166 if (free_sk_storage) {
167 atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
168 sk_storage->sk = NULL;
169 /* After this RCU_INIT, sk may be freed and cannot be used */
170 RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
172 /* sk_storage is not freed now. sk_storage->lock is
173 * still held and raw_spin_unlock_bh(&sk_storage->lock)
174 * will be done by the caller.
176 * Although the unlock will be done under
177 * rcu_read_lock(), it is more intutivie to
178 * read if kfree_rcu(sk_storage, rcu) is done
179 * after the raw_spin_unlock_bh(&sk_storage->lock).
181 * Hence, a "bool free_sk_storage" is returned
182 * to the caller which then calls the kfree_rcu()
186 hlist_del_init_rcu(&selem->snode);
187 if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
189 RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
191 kfree_rcu(selem, rcu);
193 return free_sk_storage;
196 static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
198 struct bpf_sk_storage *sk_storage;
199 bool free_sk_storage = false;
201 if (unlikely(!selem_linked_to_sk(selem)))
202 /* selem has already been unlinked from sk */
205 sk_storage = rcu_dereference(selem->sk_storage);
206 raw_spin_lock_bh(&sk_storage->lock);
207 if (likely(selem_linked_to_sk(selem)))
208 free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
209 raw_spin_unlock_bh(&sk_storage->lock);
212 kfree_rcu(sk_storage, rcu);
215 static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
216 struct bpf_sk_storage_elem *selem)
218 RCU_INIT_POINTER(selem->sk_storage, sk_storage);
219 hlist_add_head(&selem->snode, &sk_storage->list);
222 static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
224 struct bpf_sk_storage_map *smap;
227 if (unlikely(!selem_linked_to_map(selem)))
228 /* selem has already be unlinked from smap */
231 smap = rcu_dereference(SDATA(selem)->smap);
232 b = select_bucket(smap, selem);
233 raw_spin_lock_bh(&b->lock);
234 if (likely(selem_linked_to_map(selem)))
235 hlist_del_init_rcu(&selem->map_node);
236 raw_spin_unlock_bh(&b->lock);
239 static void selem_link_map(struct bpf_sk_storage_map *smap,
240 struct bpf_sk_storage_elem *selem)
242 struct bucket *b = select_bucket(smap, selem);
244 raw_spin_lock_bh(&b->lock);
245 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
246 hlist_add_head_rcu(&selem->map_node, &b->list);
247 raw_spin_unlock_bh(&b->lock);
250 static void selem_unlink(struct bpf_sk_storage_elem *selem)
252 /* Always unlink from map before unlinking from sk_storage
253 * because selem will be freed after successfully unlinked from
256 selem_unlink_map(selem);
257 selem_unlink_sk(selem);
260 static struct bpf_sk_storage_data *
261 __sk_storage_lookup(struct bpf_sk_storage *sk_storage,
262 struct bpf_sk_storage_map *smap,
265 struct bpf_sk_storage_data *sdata;
266 struct bpf_sk_storage_elem *selem;
268 /* Fast path (cache hit) */
269 sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
270 if (sdata && rcu_access_pointer(sdata->smap) == smap)
273 /* Slow path (cache miss) */
274 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
275 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
281 sdata = SDATA(selem);
282 if (cacheit_lockit) {
283 /* spinlock is needed to avoid racing with the
284 * parallel delete. Otherwise, publishing an already
285 * deleted sdata to the cache will become a use-after-free
286 * problem in the next __sk_storage_lookup().
288 raw_spin_lock_bh(&sk_storage->lock);
289 if (selem_linked_to_sk(selem))
290 rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
292 raw_spin_unlock_bh(&sk_storage->lock);
298 static struct bpf_sk_storage_data *
299 sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
301 struct bpf_sk_storage *sk_storage;
302 struct bpf_sk_storage_map *smap;
304 sk_storage = rcu_dereference(sk->sk_bpf_storage);
308 smap = (struct bpf_sk_storage_map *)map;
309 return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
312 static int check_flags(const struct bpf_sk_storage_data *old_sdata,
315 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
316 /* elem already exists */
319 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
320 /* elem doesn't exist, cannot update it */
326 static int sk_storage_alloc(struct sock *sk,
327 struct bpf_sk_storage_map *smap,
328 struct bpf_sk_storage_elem *first_selem)
330 struct bpf_sk_storage *prev_sk_storage, *sk_storage;
333 err = omem_charge(sk, sizeof(*sk_storage));
337 sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
342 INIT_HLIST_HEAD(&sk_storage->list);
343 raw_spin_lock_init(&sk_storage->lock);
346 __selem_link_sk(sk_storage, first_selem);
347 selem_link_map(smap, first_selem);
348 /* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
349 * Hence, atomic ops is used to set sk->sk_bpf_storage
350 * from NULL to the newly allocated sk_storage ptr.
352 * From now on, the sk->sk_bpf_storage pointer is protected
353 * by the sk_storage->lock. Hence, when freeing
354 * the sk->sk_bpf_storage, the sk_storage->lock must
355 * be held before setting sk->sk_bpf_storage to NULL.
357 prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
359 if (unlikely(prev_sk_storage)) {
360 selem_unlink_map(first_selem);
364 /* Note that even first_selem was linked to smap's
365 * bucket->list, first_selem can be freed immediately
366 * (instead of kfree_rcu) because
367 * bpf_sk_storage_map_free() does a
368 * synchronize_rcu() before walking the bucket->list.
369 * Hence, no one is accessing selem from the
370 * bucket->list under rcu_read_lock().
378 atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
382 /* sk cannot be going away because it is linking new elem
383 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
384 * Otherwise, it will become a leak (and other memory issues
385 * during map destruction).
387 static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
392 struct bpf_sk_storage_data *old_sdata = NULL;
393 struct bpf_sk_storage_elem *selem;
394 struct bpf_sk_storage *sk_storage;
395 struct bpf_sk_storage_map *smap;
398 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
399 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
400 /* BPF_F_LOCK can only be used in a value with spin_lock */
401 unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
402 return ERR_PTR(-EINVAL);
404 smap = (struct bpf_sk_storage_map *)map;
405 sk_storage = rcu_dereference(sk->sk_bpf_storage);
406 if (!sk_storage || hlist_empty(&sk_storage->list)) {
407 /* Very first elem for this sk */
408 err = check_flags(NULL, map_flags);
412 selem = selem_alloc(smap, sk, value, true);
414 return ERR_PTR(-ENOMEM);
416 err = sk_storage_alloc(sk, smap, selem);
419 atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
426 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
427 /* Hoping to find an old_sdata to do inline update
428 * such that it can avoid taking the sk_storage->lock
429 * and changing the lists.
431 old_sdata = __sk_storage_lookup(sk_storage, smap, false);
432 err = check_flags(old_sdata, map_flags);
435 if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
436 copy_map_value_locked(map, old_sdata->data,
442 raw_spin_lock_bh(&sk_storage->lock);
444 /* Recheck sk_storage->list under sk_storage->lock */
445 if (unlikely(hlist_empty(&sk_storage->list))) {
446 /* A parallel del is happening and sk_storage is going
447 * away. It has just been checked before, so very
448 * unlikely. Return instead of retry to keep things
455 old_sdata = __sk_storage_lookup(sk_storage, smap, false);
456 err = check_flags(old_sdata, map_flags);
460 if (old_sdata && (map_flags & BPF_F_LOCK)) {
461 copy_map_value_locked(map, old_sdata->data, value, false);
462 selem = SELEM(old_sdata);
466 /* sk_storage->lock is held. Hence, we are sure
467 * we can unlink and uncharge the old_sdata successfully
468 * later. Hence, instead of charging the new selem now
469 * and then uncharge the old selem later (which may cause
470 * a potential but unnecessary charge failure), avoid taking
471 * a charge at all here (the "!old_sdata" check) and the
472 * old_sdata will not be uncharged later during __selem_unlink_sk().
474 selem = selem_alloc(smap, sk, value, !old_sdata);
480 /* First, link the new selem to the map */
481 selem_link_map(smap, selem);
483 /* Second, link (and publish) the new selem to sk_storage */
484 __selem_link_sk(sk_storage, selem);
486 /* Third, remove old selem, SELEM(old_sdata) */
488 selem_unlink_map(SELEM(old_sdata));
489 __selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
493 raw_spin_unlock_bh(&sk_storage->lock);
497 raw_spin_unlock_bh(&sk_storage->lock);
501 static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
503 struct bpf_sk_storage_data *sdata;
505 sdata = sk_storage_lookup(sk, map, false);
509 selem_unlink(SELEM(sdata));
514 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
515 void bpf_sk_storage_free(struct sock *sk)
517 struct bpf_sk_storage_elem *selem;
518 struct bpf_sk_storage *sk_storage;
519 bool free_sk_storage = false;
520 struct hlist_node *n;
523 sk_storage = rcu_dereference(sk->sk_bpf_storage);
529 /* Netiher the bpf_prog nor the bpf-map's syscall
530 * could be modifying the sk_storage->list now.
531 * Thus, no elem can be added-to or deleted-from the
532 * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
534 * It is racing with bpf_sk_storage_map_free() alone
535 * when unlinking elem from the sk_storage->list and
536 * the map's bucket->list.
538 raw_spin_lock_bh(&sk_storage->lock);
539 hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
540 /* Always unlink from map before unlinking from
543 selem_unlink_map(selem);
544 free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
546 raw_spin_unlock_bh(&sk_storage->lock);
550 kfree_rcu(sk_storage, rcu);
553 static void bpf_sk_storage_map_free(struct bpf_map *map)
555 struct bpf_sk_storage_elem *selem;
556 struct bpf_sk_storage_map *smap;
560 smap = (struct bpf_sk_storage_map *)map;
562 /* Note that this map might be concurrently cloned from
563 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
564 * RCU read section to finish before proceeding. New RCU
565 * read sections should be prevented via bpf_map_inc_not_zero.
569 /* bpf prog and the userspace can no longer access this map
570 * now. No new selem (of this map) can be added
571 * to the sk->sk_bpf_storage or to the map bucket's list.
573 * The elem of this map can be cleaned up here
575 * by bpf_sk_storage_free() during __sk_destruct().
577 for (i = 0; i < (1U << smap->bucket_log); i++) {
578 b = &smap->buckets[i];
581 /* No one is adding to b->list now */
582 while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
583 struct bpf_sk_storage_elem,
591 /* bpf_sk_storage_free() may still need to access the map.
592 * e.g. bpf_sk_storage_free() has unlinked selem from the map
593 * which then made the above while((selem = ...)) loop
594 * exited immediately.
596 * However, the bpf_sk_storage_free() still needs to access
597 * the smap->elem_size to do the uncharging in
598 * __selem_unlink_sk().
600 * Hence, wait another rcu grace period for the
601 * bpf_sk_storage_free() to finish.
605 kvfree(smap->buckets);
609 static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
611 if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
612 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
614 attr->key_size != sizeof(int) || !attr->value_size ||
615 /* Enforce BTF for userspace sk dumping */
616 !attr->btf_key_type_id || !attr->btf_value_type_id)
619 if (!capable(CAP_SYS_ADMIN))
622 if (attr->value_size >= KMALLOC_MAX_SIZE -
623 MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem) ||
624 /* U16_MAX is much more than enough for sk local storage
625 * considering a tcp_sock is ~2k.
627 attr->value_size > U16_MAX - sizeof(struct bpf_sk_storage_elem))
633 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
635 struct bpf_sk_storage_map *smap;
641 smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
643 return ERR_PTR(-ENOMEM);
644 bpf_map_init_from_attr(&smap->map, attr);
646 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
647 smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus())));
648 nbuckets = 1U << smap->bucket_log;
649 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
651 ret = bpf_map_charge_init(&smap->map.memory, cost);
657 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
658 GFP_USER | __GFP_NOWARN);
659 if (!smap->buckets) {
660 bpf_map_charge_finish(&smap->map.memory);
662 return ERR_PTR(-ENOMEM);
665 for (i = 0; i < nbuckets; i++) {
666 INIT_HLIST_HEAD(&smap->buckets[i].list);
667 raw_spin_lock_init(&smap->buckets[i].lock);
670 smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
671 smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
672 BPF_SK_STORAGE_CACHE_SIZE;
677 static int notsupp_get_next_key(struct bpf_map *map, void *key,
683 static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
684 const struct btf *btf,
685 const struct btf_type *key_type,
686 const struct btf_type *value_type)
690 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
693 int_data = *(u32 *)(key_type + 1);
694 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
700 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
702 struct bpf_sk_storage_data *sdata;
707 sock = sockfd_lookup(fd, &err);
709 sdata = sk_storage_lookup(sock->sk, map, true);
711 return sdata ? sdata->data : NULL;
717 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
718 void *value, u64 map_flags)
720 struct bpf_sk_storage_data *sdata;
725 sock = sockfd_lookup(fd, &err);
727 sdata = sk_storage_update(sock->sk, map, value, map_flags);
729 return PTR_ERR_OR_ZERO(sdata);
735 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
741 sock = sockfd_lookup(fd, &err);
743 err = sk_storage_delete(sock->sk, map);
751 static struct bpf_sk_storage_elem *
752 bpf_sk_storage_clone_elem(struct sock *newsk,
753 struct bpf_sk_storage_map *smap,
754 struct bpf_sk_storage_elem *selem)
756 struct bpf_sk_storage_elem *copy_selem;
758 copy_selem = selem_alloc(smap, newsk, NULL, true);
762 if (map_value_has_spin_lock(&smap->map))
763 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
764 SDATA(selem)->data, true);
766 copy_map_value(&smap->map, SDATA(copy_selem)->data,
772 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
774 struct bpf_sk_storage *new_sk_storage = NULL;
775 struct bpf_sk_storage *sk_storage;
776 struct bpf_sk_storage_elem *selem;
779 RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
782 sk_storage = rcu_dereference(sk->sk_bpf_storage);
784 if (!sk_storage || hlist_empty(&sk_storage->list))
787 hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
788 struct bpf_sk_storage_elem *copy_selem;
789 struct bpf_sk_storage_map *smap;
792 smap = rcu_dereference(SDATA(selem)->smap);
793 if (!(smap->map.map_flags & BPF_F_CLONE))
796 /* Note that for lockless listeners adding new element
797 * here can race with cleanup in bpf_sk_storage_map_free.
798 * Try to grab map refcnt to make sure that it's still
799 * alive and prevent concurrent removal.
801 map = bpf_map_inc_not_zero(&smap->map, false);
805 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
812 if (new_sk_storage) {
813 selem_link_map(smap, copy_selem);
814 __selem_link_sk(new_sk_storage, copy_selem);
816 ret = sk_storage_alloc(newsk, smap, copy_selem);
819 atomic_sub(smap->elem_size,
820 &newsk->sk_omem_alloc);
825 new_sk_storage = rcu_dereference(copy_selem->sk_storage);
833 /* In case of an error, don't free anything explicitly here, the
834 * caller is responsible to call bpf_sk_storage_free.
840 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
841 void *, value, u64, flags)
843 struct bpf_sk_storage_data *sdata;
845 if (flags > BPF_SK_STORAGE_GET_F_CREATE)
846 return (unsigned long)NULL;
848 sdata = sk_storage_lookup(sk, map, true);
850 return (unsigned long)sdata->data;
852 if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
853 /* Cannot add new elem to a going away sk.
854 * Otherwise, the new elem may become a leak
855 * (and also other memory issues during map
858 refcount_inc_not_zero(&sk->sk_refcnt)) {
859 sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
860 /* sk must be a fullsock (guaranteed by verifier),
861 * so sock_gen_put() is unnecessary.
864 return IS_ERR(sdata) ?
865 (unsigned long)NULL : (unsigned long)sdata->data;
868 return (unsigned long)NULL;
871 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
873 if (refcount_inc_not_zero(&sk->sk_refcnt)) {
876 err = sk_storage_delete(sk, map);
884 const struct bpf_map_ops sk_storage_map_ops = {
885 .map_alloc_check = bpf_sk_storage_map_alloc_check,
886 .map_alloc = bpf_sk_storage_map_alloc,
887 .map_free = bpf_sk_storage_map_free,
888 .map_get_next_key = notsupp_get_next_key,
889 .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
890 .map_update_elem = bpf_fd_sk_storage_update_elem,
891 .map_delete_elem = bpf_fd_sk_storage_delete_elem,
892 .map_check_btf = bpf_sk_storage_map_check_btf,
895 const struct bpf_func_proto bpf_sk_storage_get_proto = {
896 .func = bpf_sk_storage_get,
898 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
899 .arg1_type = ARG_CONST_MAP_PTR,
900 .arg2_type = ARG_PTR_TO_SOCKET,
901 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
902 .arg4_type = ARG_ANYTHING,
905 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
906 .func = bpf_sk_storage_delete,
908 .ret_type = RET_INTEGER,
909 .arg1_type = ARG_CONST_MAP_PTR,
910 .arg2_type = ARG_PTR_TO_SOCKET,