1 //SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
6 #include <linux/filter.h>
8 #include <linux/rbtree.h>
9 #include <linux/slab.h>
10 #include <uapi/linux/btf.h>
12 #ifdef CONFIG_CGROUP_BPF
14 DEFINE_PER_CPU(struct bpf_cgroup_storage_info,
15 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
17 #include "../cgroup/cgroup-internal.h"
19 #define LOCAL_STORAGE_CREATE_FLAG_MASK \
20 (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
22 struct bpf_cgroup_storage_map {
27 struct list_head list;
30 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map)
32 return container_of(map, struct bpf_cgroup_storage_map, map);
35 static bool attach_type_isolated(const struct bpf_map *map)
37 return map->key_size == sizeof(struct bpf_cgroup_storage_key);
40 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map,
41 const void *_key1, const void *_key2)
43 if (attach_type_isolated(&map->map)) {
44 const struct bpf_cgroup_storage_key *key1 = _key1;
45 const struct bpf_cgroup_storage_key *key2 = _key2;
47 if (key1->cgroup_inode_id < key2->cgroup_inode_id)
49 else if (key1->cgroup_inode_id > key2->cgroup_inode_id)
51 else if (key1->attach_type < key2->attach_type)
53 else if (key1->attach_type > key2->attach_type)
56 const __u64 *cgroup_inode_id1 = _key1;
57 const __u64 *cgroup_inode_id2 = _key2;
59 if (*cgroup_inode_id1 < *cgroup_inode_id2)
61 else if (*cgroup_inode_id1 > *cgroup_inode_id2)
67 struct bpf_cgroup_storage *
68 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
69 void *key, bool locked)
71 struct rb_root *root = &map->root;
75 spin_lock_bh(&map->lock);
79 struct bpf_cgroup_storage *storage;
81 storage = container_of(node, struct bpf_cgroup_storage, node);
83 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) {
88 node = node->rb_right;
92 spin_unlock_bh(&map->lock);
98 spin_unlock_bh(&map->lock);
103 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map,
104 struct bpf_cgroup_storage *storage)
106 struct rb_root *root = &map->root;
107 struct rb_node **new = &(root->rb_node), *parent = NULL;
110 struct bpf_cgroup_storage *this;
112 this = container_of(*new, struct bpf_cgroup_storage, node);
115 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) {
117 new = &((*new)->rb_left);
120 new = &((*new)->rb_right);
127 rb_link_node(&storage->node, parent, new);
128 rb_insert_color(&storage->node, root);
133 static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
135 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
136 struct bpf_cgroup_storage *storage;
138 storage = cgroup_storage_lookup(map, key, false);
142 return &READ_ONCE(storage->buf)->data[0];
145 static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
146 void *value, u64 flags)
148 struct bpf_cgroup_storage *storage;
149 struct bpf_storage_buffer *new;
151 if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST)))
154 if (unlikely((flags & BPF_F_LOCK) &&
155 !map_value_has_spin_lock(map)))
158 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
163 if (flags & BPF_F_LOCK) {
164 copy_map_value_locked(map, storage->buf->data, value, false);
168 new = bpf_map_kmalloc_node(map, sizeof(struct bpf_storage_buffer) +
170 __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
175 memcpy(&new->data[0], value, map->value_size);
176 check_and_init_map_lock(map, new->data);
178 new = xchg(&storage->buf, new);
184 int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key,
187 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
188 struct bpf_cgroup_storage *storage;
193 storage = cgroup_storage_lookup(map, key, false);
199 /* per_cpu areas are zero-filled and bpf programs can only
200 * access 'value_size' of them, so copying rounded areas
201 * will not leak any kernel data
203 size = round_up(_map->value_size, 8);
204 for_each_possible_cpu(cpu) {
205 bpf_long_memcpy(value + off,
206 per_cpu_ptr(storage->percpu_buf, cpu), size);
213 int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key,
214 void *value, u64 map_flags)
216 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
217 struct bpf_cgroup_storage *storage;
221 if (map_flags != BPF_ANY && map_flags != BPF_EXIST)
225 storage = cgroup_storage_lookup(map, key, false);
231 /* the user space will provide round_up(value_size, 8) bytes that
232 * will be copied into per-cpu area. bpf programs can only access
233 * value_size of it. During lookup the same extra bytes will be
234 * returned or zeros which were zero-filled by percpu_alloc,
235 * so no kernel data leaks possible
237 size = round_up(_map->value_size, 8);
238 for_each_possible_cpu(cpu) {
239 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu),
247 static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key,
250 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
251 struct bpf_cgroup_storage *storage;
253 spin_lock_bh(&map->lock);
255 if (list_empty(&map->list))
259 storage = cgroup_storage_lookup(map, key, true);
263 storage = list_next_entry(storage, list_map);
267 storage = list_first_entry(&map->list,
268 struct bpf_cgroup_storage, list_map);
271 spin_unlock_bh(&map->lock);
273 if (attach_type_isolated(&map->map)) {
274 struct bpf_cgroup_storage_key *next = _next_key;
275 *next = storage->key;
277 __u64 *next = _next_key;
278 *next = storage->key.cgroup_inode_id;
283 spin_unlock_bh(&map->lock);
287 static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
289 int numa_node = bpf_map_attr_numa_node(attr);
290 struct bpf_cgroup_storage_map *map;
292 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) &&
293 attr->key_size != sizeof(__u64))
294 return ERR_PTR(-EINVAL);
296 if (attr->value_size == 0)
297 return ERR_PTR(-EINVAL);
299 if (attr->value_size > PAGE_SIZE)
300 return ERR_PTR(-E2BIG);
302 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK ||
303 !bpf_map_flags_access_ok(attr->map_flags))
304 return ERR_PTR(-EINVAL);
306 if (attr->max_entries)
307 /* max_entries is not used and enforced to be 0 */
308 return ERR_PTR(-EINVAL);
310 map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map),
311 __GFP_ZERO | GFP_USER | __GFP_ACCOUNT, numa_node);
313 return ERR_PTR(-ENOMEM);
315 /* copy mandatory map attributes */
316 bpf_map_init_from_attr(&map->map, attr);
318 spin_lock_init(&map->lock);
320 INIT_LIST_HEAD(&map->list);
325 static void cgroup_storage_map_free(struct bpf_map *_map)
327 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
328 struct list_head *storages = &map->list;
329 struct bpf_cgroup_storage *storage, *stmp;
331 mutex_lock(&cgroup_mutex);
333 list_for_each_entry_safe(storage, stmp, storages, list_map) {
334 bpf_cgroup_storage_unlink(storage);
335 bpf_cgroup_storage_free(storage);
338 mutex_unlock(&cgroup_mutex);
340 WARN_ON(!RB_EMPTY_ROOT(&map->root));
341 WARN_ON(!list_empty(&map->list));
346 static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
351 static int cgroup_storage_check_btf(const struct bpf_map *map,
352 const struct btf *btf,
353 const struct btf_type *key_type,
354 const struct btf_type *value_type)
356 if (attach_type_isolated(map)) {
357 struct btf_member *m;
360 /* Key is expected to be of struct bpf_cgroup_storage_key type,
362 * struct bpf_cgroup_storage_key {
363 * __u64 cgroup_inode_id;
369 * Key_type must be a structure with two fields.
371 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
372 BTF_INFO_VLEN(key_type->info) != 2)
376 * The first field must be a 64 bit integer at 0 offset.
378 m = (struct btf_member *)(key_type + 1);
379 size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id);
380 if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
384 * The second field must be a 32 bit integer at 64 bit offset.
387 offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
388 size = sizeof_field(struct bpf_cgroup_storage_key, attach_type);
389 if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
395 * Key is expected to be u64, which stores the cgroup_inode_id
398 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
401 int_data = *(u32 *)(key_type + 1);
402 if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data))
409 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key,
412 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
413 struct bpf_cgroup_storage *storage;
417 storage = cgroup_storage_lookup(map_to_storage(map), key, false);
423 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
424 stype = cgroup_storage_type(map);
425 if (stype == BPF_CGROUP_STORAGE_SHARED) {
427 btf_type_seq_show(map->btf, map->btf_value_type_id,
428 &READ_ONCE(storage->buf)->data[0], m);
431 seq_puts(m, ": {\n");
432 for_each_possible_cpu(cpu) {
433 seq_printf(m, "\tcpu%d: ", cpu);
434 btf_type_seq_show(map->btf, map->btf_value_type_id,
435 per_cpu_ptr(storage->percpu_buf, cpu),
444 static int cgroup_storage_map_btf_id;
445 const struct bpf_map_ops cgroup_storage_map_ops = {
446 .map_alloc = cgroup_storage_map_alloc,
447 .map_free = cgroup_storage_map_free,
448 .map_get_next_key = cgroup_storage_get_next_key,
449 .map_lookup_elem = cgroup_storage_lookup_elem,
450 .map_update_elem = cgroup_storage_update_elem,
451 .map_delete_elem = cgroup_storage_delete_elem,
452 .map_check_btf = cgroup_storage_check_btf,
453 .map_seq_show_elem = cgroup_storage_seq_show_elem,
454 .map_btf_name = "bpf_cgroup_storage_map",
455 .map_btf_id = &cgroup_storage_map_btf_id,
458 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
460 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
462 if (aux->cgroup_storage[stype] &&
463 aux->cgroup_storage[stype] != _map)
466 aux->cgroup_storage[stype] = _map;
470 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages)
474 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) {
475 size = sizeof(struct bpf_storage_buffer) + map->value_size;
476 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size,
477 PAGE_SIZE) >> PAGE_SHIFT;
479 size = map->value_size;
480 *pages = round_up(round_up(size, 8) * num_possible_cpus(),
481 PAGE_SIZE) >> PAGE_SHIFT;
487 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
488 enum bpf_cgroup_storage_type stype)
490 const gfp_t gfp = __GFP_ZERO | GFP_USER;
491 struct bpf_cgroup_storage *storage;
496 map = prog->aux->cgroup_storage[stype];
500 size = bpf_cgroup_storage_calculate_size(map, &pages);
502 storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage),
503 gfp, map->numa_node);
507 if (stype == BPF_CGROUP_STORAGE_SHARED) {
508 storage->buf = bpf_map_kmalloc_node(map, size, gfp,
512 check_and_init_map_lock(map, storage->buf->data);
514 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp);
515 if (!storage->percpu_buf)
519 storage->map = (struct bpf_cgroup_storage_map *)map;
525 return ERR_PTR(-ENOMEM);
528 static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
530 struct bpf_cgroup_storage *storage =
531 container_of(rcu, struct bpf_cgroup_storage, rcu);
537 static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
539 struct bpf_cgroup_storage *storage =
540 container_of(rcu, struct bpf_cgroup_storage, rcu);
542 free_percpu(storage->percpu_buf);
546 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
548 enum bpf_cgroup_storage_type stype;
554 map = &storage->map->map;
555 stype = cgroup_storage_type(map);
556 if (stype == BPF_CGROUP_STORAGE_SHARED)
557 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu);
559 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu);
562 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
563 struct cgroup *cgroup,
564 enum bpf_attach_type type)
566 struct bpf_cgroup_storage_map *map;
571 storage->key.attach_type = type;
572 storage->key.cgroup_inode_id = cgroup_id(cgroup);
576 spin_lock_bh(&map->lock);
577 WARN_ON(cgroup_storage_insert(map, storage));
578 list_add(&storage->list_map, &map->list);
579 list_add(&storage->list_cg, &cgroup->bpf.storages);
580 spin_unlock_bh(&map->lock);
583 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage)
585 struct bpf_cgroup_storage_map *map;
586 struct rb_root *root;
593 spin_lock_bh(&map->lock);
595 rb_erase(&storage->node, root);
597 list_del(&storage->list_map);
598 list_del(&storage->list_cg);
599 spin_unlock_bh(&map->lock);