1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
6 #include <linux/types.h>
8 #include <linux/bpf_local_storage.h>
9 #include <uapi/linux/btf.h>
10 #include <linux/btf_ids.h>
12 DEFINE_BPF_STORAGE_CACHE(cgroup_cache);
14 static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
16 static void bpf_cgrp_storage_lock(void)
19 this_cpu_inc(bpf_cgrp_storage_busy);
22 static void bpf_cgrp_storage_unlock(void)
24 this_cpu_dec(bpf_cgrp_storage_busy);
28 static bool bpf_cgrp_storage_trylock(void)
31 if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
32 this_cpu_dec(bpf_cgrp_storage_busy);
39 static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
41 struct cgroup *cg = owner;
43 return &cg->bpf_cgrp_storage;
46 void bpf_cgrp_storage_free(struct cgroup *cgroup)
48 struct bpf_local_storage *local_storage;
49 bool free_cgroup_storage = false;
53 local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
59 bpf_cgrp_storage_lock();
60 raw_spin_lock_irqsave(&local_storage->lock, flags);
61 free_cgroup_storage = bpf_local_storage_unlink_nolock(local_storage);
62 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
63 bpf_cgrp_storage_unlock();
66 if (free_cgroup_storage)
67 kfree_rcu(local_storage, rcu);
70 static struct bpf_local_storage_data *
71 cgroup_storage_lookup(struct cgroup *cgroup, struct bpf_map *map, bool cacheit_lockit)
73 struct bpf_local_storage *cgroup_storage;
74 struct bpf_local_storage_map *smap;
76 cgroup_storage = rcu_dereference_check(cgroup->bpf_cgrp_storage,
81 smap = (struct bpf_local_storage_map *)map;
82 return bpf_local_storage_lookup(cgroup_storage, smap, cacheit_lockit);
85 static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
87 struct bpf_local_storage_data *sdata;
88 struct cgroup *cgroup;
92 cgroup = cgroup_get_from_fd(fd);
94 return ERR_CAST(cgroup);
96 bpf_cgrp_storage_lock();
97 sdata = cgroup_storage_lookup(cgroup, map, true);
98 bpf_cgrp_storage_unlock();
100 return sdata ? sdata->data : NULL;
103 static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
104 void *value, u64 map_flags)
106 struct bpf_local_storage_data *sdata;
107 struct cgroup *cgroup;
111 cgroup = cgroup_get_from_fd(fd);
113 return PTR_ERR(cgroup);
115 bpf_cgrp_storage_lock();
116 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
117 value, map_flags, GFP_ATOMIC);
118 bpf_cgrp_storage_unlock();
120 return PTR_ERR_OR_ZERO(sdata);
123 static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
125 struct bpf_local_storage_data *sdata;
127 sdata = cgroup_storage_lookup(cgroup, map, false);
131 bpf_selem_unlink(SELEM(sdata), true);
135 static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
137 struct cgroup *cgroup;
141 cgroup = cgroup_get_from_fd(fd);
143 return PTR_ERR(cgroup);
145 bpf_cgrp_storage_lock();
146 err = cgroup_storage_delete(cgroup, map);
147 bpf_cgrp_storage_unlock();
152 static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
157 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
159 return bpf_local_storage_map_alloc(attr, &cgroup_cache);
162 static void cgroup_storage_map_free(struct bpf_map *map)
164 bpf_local_storage_map_free(map, &cgroup_cache, NULL);
167 /* *gfp_flags* is a hidden argument provided by the verifier */
168 BPF_CALL_5(bpf_cgrp_storage_get, struct bpf_map *, map, struct cgroup *, cgroup,
169 void *, value, u64, flags, gfp_t, gfp_flags)
171 struct bpf_local_storage_data *sdata;
173 WARN_ON_ONCE(!bpf_rcu_lock_held());
174 if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
175 return (unsigned long)NULL;
178 return (unsigned long)NULL;
180 if (!bpf_cgrp_storage_trylock())
181 return (unsigned long)NULL;
183 sdata = cgroup_storage_lookup(cgroup, map, true);
187 /* only allocate new storage, when the cgroup is refcounted */
188 if (!percpu_ref_is_dying(&cgroup->self.refcnt) &&
189 (flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
190 sdata = bpf_local_storage_update(cgroup, (struct bpf_local_storage_map *)map,
191 value, BPF_NOEXIST, gfp_flags);
194 bpf_cgrp_storage_unlock();
195 return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL : (unsigned long)sdata->data;
198 BPF_CALL_2(bpf_cgrp_storage_delete, struct bpf_map *, map, struct cgroup *, cgroup)
202 WARN_ON_ONCE(!bpf_rcu_lock_held());
206 if (!bpf_cgrp_storage_trylock())
209 ret = cgroup_storage_delete(cgroup, map);
210 bpf_cgrp_storage_unlock();
214 BTF_ID_LIST_SINGLE(cgroup_storage_map_btf_ids, struct, bpf_local_storage_map)
215 const struct bpf_map_ops cgrp_storage_map_ops = {
216 .map_meta_equal = bpf_map_meta_equal,
217 .map_alloc_check = bpf_local_storage_map_alloc_check,
218 .map_alloc = cgroup_storage_map_alloc,
219 .map_free = cgroup_storage_map_free,
220 .map_get_next_key = notsupp_get_next_key,
221 .map_lookup_elem = bpf_cgrp_storage_lookup_elem,
222 .map_update_elem = bpf_cgrp_storage_update_elem,
223 .map_delete_elem = bpf_cgrp_storage_delete_elem,
224 .map_check_btf = bpf_local_storage_map_check_btf,
225 .map_btf_id = &cgroup_storage_map_btf_ids[0],
226 .map_owner_storage_ptr = cgroup_storage_ptr,
229 const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
230 .func = bpf_cgrp_storage_get,
232 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
233 .arg1_type = ARG_CONST_MAP_PTR,
234 .arg2_type = ARG_PTR_TO_BTF_ID,
235 .arg2_btf_id = &bpf_cgroup_btf_id[0],
236 .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
237 .arg4_type = ARG_ANYTHING,
240 const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
241 .func = bpf_cgrp_storage_delete,
243 .ret_type = RET_INTEGER,
244 .arg1_type = ARG_CONST_MAP_PTR,
245 .arg2_type = ARG_PTR_TO_BTF_ID,
246 .arg2_btf_id = &bpf_cgroup_btf_id[0],