1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
20 #include "map_in_map.h"
22 #define ARRAY_CREATE_FLAG_MASK \
23 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
25 static void bpf_array_free_percpu(struct bpf_array *array)
29 for (i = 0; i < array->map.max_entries; i++)
30 free_percpu(array->pptrs[i]);
33 static int bpf_array_alloc_percpu(struct bpf_array *array)
38 for (i = 0; i < array->map.max_entries; i++) {
39 ptr = __alloc_percpu_gfp(array->elem_size, 8,
40 GFP_USER | __GFP_NOWARN);
42 bpf_array_free_percpu(array);
45 array->pptrs[i] = ptr;
51 /* Called from syscall */
52 static int array_map_alloc_check(union bpf_attr *attr)
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr);
57 /* check sanity of attributes */
58 if (attr->max_entries == 0 || attr->key_size != 4 ||
59 attr->value_size == 0 ||
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 (percpu && numa_node != NUMA_NO_NODE))
64 if (attr->value_size > KMALLOC_MAX_SIZE)
65 /* if value_size is bigger, the user space won't be able to
66 * access the elements.
73 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
75 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
76 int ret, numa_node = bpf_map_attr_numa_node(attr);
77 u32 elem_size, index_mask, max_entries;
78 bool unpriv = !capable(CAP_SYS_ADMIN);
79 u64 cost, array_size, mask64;
80 struct bpf_array *array;
82 elem_size = round_up(attr->value_size, 8);
84 max_entries = attr->max_entries;
86 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
87 * upper most bit set in u32 space is undefined behavior due to
88 * resulting 1U << 32, so do it manually here in u64 space.
90 mask64 = fls_long(max_entries - 1);
91 mask64 = 1ULL << mask64;
96 /* round up array size to nearest power of 2,
97 * since cpu will speculate within index_mask limits
99 max_entries = index_mask + 1;
100 /* Check for overflows. */
101 if (max_entries < attr->max_entries)
102 return ERR_PTR(-E2BIG);
105 array_size = sizeof(*array);
107 array_size += (u64) max_entries * sizeof(void *);
109 array_size += (u64) max_entries * elem_size;
111 /* make sure there is no u32 overflow later in round_up() */
113 if (cost >= U32_MAX - PAGE_SIZE)
114 return ERR_PTR(-ENOMEM);
116 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
117 if (cost >= U32_MAX - PAGE_SIZE)
118 return ERR_PTR(-ENOMEM);
120 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
122 ret = bpf_map_precharge_memlock(cost);
126 /* allocate all map elements and zero-initialize them */
127 array = bpf_map_area_alloc(array_size, numa_node);
129 return ERR_PTR(-ENOMEM);
130 array->index_mask = index_mask;
131 array->map.unpriv_array = unpriv;
133 /* copy mandatory map attributes */
134 bpf_map_init_from_attr(&array->map, attr);
135 array->map.pages = cost;
136 array->elem_size = elem_size;
138 if (percpu && bpf_array_alloc_percpu(array)) {
139 bpf_map_area_free(array);
140 return ERR_PTR(-ENOMEM);
146 /* Called from syscall or from eBPF program */
147 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
149 struct bpf_array *array = container_of(map, struct bpf_array, map);
150 u32 index = *(u32 *)key;
152 if (unlikely(index >= array->map.max_entries))
155 return array->value + array->elem_size * (index & array->index_mask);
158 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
159 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
161 struct bpf_array *array = container_of(map, struct bpf_array, map);
162 struct bpf_insn *insn = insn_buf;
163 u32 elem_size = round_up(map->value_size, 8);
164 const int ret = BPF_REG_0;
165 const int map_ptr = BPF_REG_1;
166 const int index = BPF_REG_2;
168 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
169 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
170 if (map->unpriv_array) {
171 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
172 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
174 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
177 if (is_power_of_2(elem_size)) {
178 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
180 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
182 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
183 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
184 *insn++ = BPF_MOV64_IMM(ret, 0);
185 return insn - insn_buf;
188 /* Called from eBPF program */
189 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
191 struct bpf_array *array = container_of(map, struct bpf_array, map);
192 u32 index = *(u32 *)key;
194 if (unlikely(index >= array->map.max_entries))
197 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
200 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
202 struct bpf_array *array = container_of(map, struct bpf_array, map);
203 u32 index = *(u32 *)key;
208 if (unlikely(index >= array->map.max_entries))
211 /* per_cpu areas are zero-filled and bpf programs can only
212 * access 'value_size' of them, so copying rounded areas
213 * will not leak any kernel data
215 size = round_up(map->value_size, 8);
217 pptr = array->pptrs[index & array->index_mask];
218 for_each_possible_cpu(cpu) {
219 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
226 /* Called from syscall */
227 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
229 struct bpf_array *array = container_of(map, struct bpf_array, map);
230 u32 index = key ? *(u32 *)key : U32_MAX;
231 u32 *next = (u32 *)next_key;
233 if (index >= array->map.max_entries) {
238 if (index == array->map.max_entries - 1)
245 /* Called from syscall or from eBPF program */
246 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
249 struct bpf_array *array = container_of(map, struct bpf_array, map);
250 u32 index = *(u32 *)key;
252 if (unlikely(map_flags > BPF_EXIST))
256 if (unlikely(index >= array->map.max_entries))
257 /* all elements were pre-allocated, cannot insert a new one */
260 if (unlikely(map_flags == BPF_NOEXIST))
261 /* all elements already exist */
264 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
265 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
266 value, map->value_size);
268 memcpy(array->value +
269 array->elem_size * (index & array->index_mask),
270 value, map->value_size);
274 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
277 struct bpf_array *array = container_of(map, struct bpf_array, map);
278 u32 index = *(u32 *)key;
283 if (unlikely(map_flags > BPF_EXIST))
287 if (unlikely(index >= array->map.max_entries))
288 /* all elements were pre-allocated, cannot insert a new one */
291 if (unlikely(map_flags == BPF_NOEXIST))
292 /* all elements already exist */
295 /* the user space will provide round_up(value_size, 8) bytes that
296 * will be copied into per-cpu area. bpf programs can only access
297 * value_size of it. During lookup the same extra bytes will be
298 * returned or zeros which were zero-filled by percpu_alloc,
299 * so no kernel data leaks possible
301 size = round_up(map->value_size, 8);
303 pptr = array->pptrs[index & array->index_mask];
304 for_each_possible_cpu(cpu) {
305 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
312 /* Called from syscall or from eBPF program */
313 static int array_map_delete_elem(struct bpf_map *map, void *key)
318 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
319 static void array_map_free(struct bpf_map *map)
321 struct bpf_array *array = container_of(map, struct bpf_array, map);
323 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
324 * so the programs (can be more than one that used this map) were
325 * disconnected from events. Wait for outstanding programs to complete
330 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
331 bpf_array_free_percpu(array);
333 bpf_map_area_free(array);
336 const struct bpf_map_ops array_map_ops = {
337 .map_alloc_check = array_map_alloc_check,
338 .map_alloc = array_map_alloc,
339 .map_free = array_map_free,
340 .map_get_next_key = array_map_get_next_key,
341 .map_lookup_elem = array_map_lookup_elem,
342 .map_update_elem = array_map_update_elem,
343 .map_delete_elem = array_map_delete_elem,
344 .map_gen_lookup = array_map_gen_lookup,
347 const struct bpf_map_ops percpu_array_map_ops = {
348 .map_alloc_check = array_map_alloc_check,
349 .map_alloc = array_map_alloc,
350 .map_free = array_map_free,
351 .map_get_next_key = array_map_get_next_key,
352 .map_lookup_elem = percpu_array_map_lookup_elem,
353 .map_update_elem = array_map_update_elem,
354 .map_delete_elem = array_map_delete_elem,
357 static int fd_array_map_alloc_check(union bpf_attr *attr)
359 /* only file descriptors can be stored in this type of map */
360 if (attr->value_size != sizeof(u32))
362 return array_map_alloc_check(attr);
365 static void fd_array_map_free(struct bpf_map *map)
367 struct bpf_array *array = container_of(map, struct bpf_array, map);
372 /* make sure it's empty */
373 for (i = 0; i < array->map.max_entries; i++)
374 BUG_ON(array->ptrs[i] != NULL);
376 bpf_map_area_free(array);
379 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
384 /* only called from syscall */
385 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
390 if (!map->ops->map_fd_sys_lookup_elem)
394 elem = array_map_lookup_elem(map, key);
395 if (elem && (ptr = READ_ONCE(*elem)))
396 *value = map->ops->map_fd_sys_lookup_elem(ptr);
404 /* only called from syscall */
405 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
406 void *key, void *value, u64 map_flags)
408 struct bpf_array *array = container_of(map, struct bpf_array, map);
409 void *new_ptr, *old_ptr;
410 u32 index = *(u32 *)key, ufd;
412 if (map_flags != BPF_ANY)
415 if (index >= array->map.max_entries)
419 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
421 return PTR_ERR(new_ptr);
423 old_ptr = xchg(array->ptrs + index, new_ptr);
425 map->ops->map_fd_put_ptr(old_ptr);
430 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
432 struct bpf_array *array = container_of(map, struct bpf_array, map);
434 u32 index = *(u32 *)key;
436 if (index >= array->map.max_entries)
439 old_ptr = xchg(array->ptrs + index, NULL);
441 map->ops->map_fd_put_ptr(old_ptr);
448 static void *prog_fd_array_get_ptr(struct bpf_map *map,
449 struct file *map_file, int fd)
451 struct bpf_array *array = container_of(map, struct bpf_array, map);
452 struct bpf_prog *prog = bpf_prog_get(fd);
457 if (!bpf_prog_array_compatible(array, prog)) {
459 return ERR_PTR(-EINVAL);
465 static void prog_fd_array_put_ptr(void *ptr)
470 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
472 return ((struct bpf_prog *)ptr)->aux->id;
475 /* decrement refcnt of all bpf_progs that are stored in this map */
476 void bpf_fd_array_map_clear(struct bpf_map *map)
478 struct bpf_array *array = container_of(map, struct bpf_array, map);
481 for (i = 0; i < array->map.max_entries; i++)
482 fd_array_map_delete_elem(map, &i);
485 const struct bpf_map_ops prog_array_map_ops = {
486 .map_alloc_check = fd_array_map_alloc_check,
487 .map_alloc = array_map_alloc,
488 .map_free = fd_array_map_free,
489 .map_get_next_key = array_map_get_next_key,
490 .map_lookup_elem = fd_array_map_lookup_elem,
491 .map_delete_elem = fd_array_map_delete_elem,
492 .map_fd_get_ptr = prog_fd_array_get_ptr,
493 .map_fd_put_ptr = prog_fd_array_put_ptr,
494 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
497 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
498 struct file *map_file)
500 struct bpf_event_entry *ee;
502 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
504 ee->event = perf_file->private_data;
505 ee->perf_file = perf_file;
506 ee->map_file = map_file;
512 static void __bpf_event_entry_free(struct rcu_head *rcu)
514 struct bpf_event_entry *ee;
516 ee = container_of(rcu, struct bpf_event_entry, rcu);
521 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
523 call_rcu(&ee->rcu, __bpf_event_entry_free);
526 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
527 struct file *map_file, int fd)
529 struct bpf_event_entry *ee;
530 struct perf_event *event;
531 struct file *perf_file;
534 perf_file = perf_event_get(fd);
535 if (IS_ERR(perf_file))
538 ee = ERR_PTR(-EOPNOTSUPP);
539 event = perf_file->private_data;
540 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
543 ee = bpf_event_entry_gen(perf_file, map_file);
546 ee = ERR_PTR(-ENOMEM);
552 static void perf_event_fd_array_put_ptr(void *ptr)
554 bpf_event_entry_free_rcu(ptr);
557 static void perf_event_fd_array_release(struct bpf_map *map,
558 struct file *map_file)
560 struct bpf_array *array = container_of(map, struct bpf_array, map);
561 struct bpf_event_entry *ee;
565 for (i = 0; i < array->map.max_entries; i++) {
566 ee = READ_ONCE(array->ptrs[i]);
567 if (ee && ee->map_file == map_file)
568 fd_array_map_delete_elem(map, &i);
573 const struct bpf_map_ops perf_event_array_map_ops = {
574 .map_alloc_check = fd_array_map_alloc_check,
575 .map_alloc = array_map_alloc,
576 .map_free = fd_array_map_free,
577 .map_get_next_key = array_map_get_next_key,
578 .map_lookup_elem = fd_array_map_lookup_elem,
579 .map_delete_elem = fd_array_map_delete_elem,
580 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
581 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
582 .map_release = perf_event_fd_array_release,
585 #ifdef CONFIG_CGROUPS
586 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
587 struct file *map_file /* not used */,
590 return cgroup_get_from_fd(fd);
593 static void cgroup_fd_array_put_ptr(void *ptr)
595 /* cgroup_put free cgrp after a rcu grace period */
599 static void cgroup_fd_array_free(struct bpf_map *map)
601 bpf_fd_array_map_clear(map);
602 fd_array_map_free(map);
605 const struct bpf_map_ops cgroup_array_map_ops = {
606 .map_alloc_check = fd_array_map_alloc_check,
607 .map_alloc = array_map_alloc,
608 .map_free = cgroup_fd_array_free,
609 .map_get_next_key = array_map_get_next_key,
610 .map_lookup_elem = fd_array_map_lookup_elem,
611 .map_delete_elem = fd_array_map_delete_elem,
612 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
613 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
617 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
619 struct bpf_map *map, *inner_map_meta;
621 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
622 if (IS_ERR(inner_map_meta))
623 return inner_map_meta;
625 map = array_map_alloc(attr);
627 bpf_map_meta_free(inner_map_meta);
631 map->inner_map_meta = inner_map_meta;
636 static void array_of_map_free(struct bpf_map *map)
638 /* map->inner_map_meta is only accessed by syscall which
639 * is protected by fdget/fdput.
641 bpf_map_meta_free(map->inner_map_meta);
642 bpf_fd_array_map_clear(map);
643 fd_array_map_free(map);
646 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
648 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
653 return READ_ONCE(*inner_map);
656 static u32 array_of_map_gen_lookup(struct bpf_map *map,
657 struct bpf_insn *insn_buf)
659 struct bpf_array *array = container_of(map, struct bpf_array, map);
660 u32 elem_size = round_up(map->value_size, 8);
661 struct bpf_insn *insn = insn_buf;
662 const int ret = BPF_REG_0;
663 const int map_ptr = BPF_REG_1;
664 const int index = BPF_REG_2;
666 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
667 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
668 if (map->unpriv_array) {
669 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
670 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
672 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
674 if (is_power_of_2(elem_size))
675 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
677 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
678 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
679 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
680 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
681 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
682 *insn++ = BPF_MOV64_IMM(ret, 0);
684 return insn - insn_buf;
687 const struct bpf_map_ops array_of_maps_map_ops = {
688 .map_alloc_check = fd_array_map_alloc_check,
689 .map_alloc = array_of_map_alloc,
690 .map_free = array_of_map_free,
691 .map_get_next_key = array_map_get_next_key,
692 .map_lookup_elem = array_of_map_lookup_elem,
693 .map_delete_elem = fd_array_map_delete_elem,
694 .map_fd_get_ptr = bpf_map_fd_get_ptr,
695 .map_fd_put_ptr = bpf_map_fd_put_ptr,
696 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
697 .map_gen_lookup = array_of_map_gen_lookup,