1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
7 #include <uapi/linux/bpf.h>
8 #include <uapi/linux/filter.h>
10 #include <linux/workqueue.h>
11 #include <linux/file.h>
12 #include <linux/percpu.h>
13 #include <linux/err.h>
14 #include <linux/rbtree_latch.h>
15 #include <linux/numa.h>
16 #include <linux/mm_types.h>
17 #include <linux/wait.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 #include <linux/sched/mm.h>
24 #include <linux/slab.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/stddef.h>
27 #include <linux/bpfptr.h>
28 #include <linux/btf.h>
29 #include <linux/rcupdate_trace.h>
30 #include <linux/static_call.h>
31 #include <linux/memcontrol.h>
32 #include <linux/cfi.h>
34 struct bpf_verifier_env;
35 struct bpf_verifier_log;
45 struct exception_table_entry;
46 struct seq_operations;
47 struct bpf_iter_aux_info;
48 struct bpf_local_storage;
49 struct bpf_local_storage_map;
53 struct bpf_func_state;
57 struct user_namespace;
61 extern struct idr btf_idr;
62 extern spinlock_t btf_idr_lock;
63 extern struct kobject *btf_kobj;
64 extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
65 extern bool bpf_global_ma_set;
67 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
68 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
69 struct bpf_iter_aux_info *aux);
70 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
71 typedef unsigned int (*bpf_func_t)(const void *,
72 const struct bpf_insn *);
73 struct bpf_iter_seq_info {
74 const struct seq_operations *seq_ops;
75 bpf_iter_init_seq_priv_t init_seq_private;
76 bpf_iter_fini_seq_priv_t fini_seq_private;
80 /* map is generic key/value storage optionally accessible by eBPF programs */
82 /* funcs callable from userspace (via syscall) */
83 int (*map_alloc_check)(union bpf_attr *attr);
84 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
85 void (*map_release)(struct bpf_map *map, struct file *map_file);
86 void (*map_free)(struct bpf_map *map);
87 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
88 void (*map_release_uref)(struct bpf_map *map);
89 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
90 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
91 union bpf_attr __user *uattr);
92 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
93 void *value, u64 flags);
94 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
95 const union bpf_attr *attr,
96 union bpf_attr __user *uattr);
97 int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
98 const union bpf_attr *attr,
99 union bpf_attr __user *uattr);
100 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
101 union bpf_attr __user *uattr);
103 /* funcs callable from userspace and from eBPF programs */
104 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
105 long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
106 long (*map_delete_elem)(struct bpf_map *map, void *key);
107 long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
108 long (*map_pop_elem)(struct bpf_map *map, void *value);
109 long (*map_peek_elem)(struct bpf_map *map, void *value);
110 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
112 /* funcs called by prog_array and perf_event_array map */
113 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
115 /* If need_defer is true, the implementation should guarantee that
116 * the to-be-put element is still alive before the bpf program, which
117 * may manipulate it, exists.
119 void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
120 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
121 u32 (*map_fd_sys_lookup_elem)(void *ptr);
122 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
124 int (*map_check_btf)(const struct bpf_map *map,
125 const struct btf *btf,
126 const struct btf_type *key_type,
127 const struct btf_type *value_type);
129 /* Prog poke tracking helpers. */
130 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
131 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
132 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
133 struct bpf_prog *new);
135 /* Direct value access helpers. */
136 int (*map_direct_value_addr)(const struct bpf_map *map,
138 int (*map_direct_value_meta)(const struct bpf_map *map,
140 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
141 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
142 struct poll_table_struct *pts);
143 unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
144 unsigned long len, unsigned long pgoff,
145 unsigned long flags);
147 /* Functions called by bpf_local_storage maps */
148 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
149 void *owner, u32 size);
150 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
151 void *owner, u32 size);
152 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
155 long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
157 /* map_meta_equal must be implemented for maps that can be
158 * used as an inner map. It is a runtime check to ensure
159 * an inner map can be inserted to an outer map.
161 * Some properties of the inner map has been used during the
162 * verification time. When inserting an inner map at the runtime,
163 * map_meta_equal has to ensure the inserting map has the same
164 * properties that the verifier has used earlier.
166 bool (*map_meta_equal)(const struct bpf_map *meta0,
167 const struct bpf_map *meta1);
170 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
171 struct bpf_func_state *caller,
172 struct bpf_func_state *callee);
173 long (*map_for_each_callback)(struct bpf_map *map,
174 bpf_callback_t callback_fn,
175 void *callback_ctx, u64 flags);
177 u64 (*map_mem_usage)(const struct bpf_map *map);
179 /* BTF id of struct allocated by map_alloc */
182 /* bpf_iter info used to open a seq_file */
183 const struct bpf_iter_seq_info *iter_seq_info;
187 /* Support at most 11 fields in a BTF type */
191 enum btf_field_type {
192 BPF_SPIN_LOCK = (1 << 0),
193 BPF_TIMER = (1 << 1),
194 BPF_KPTR_UNREF = (1 << 2),
195 BPF_KPTR_REF = (1 << 3),
196 BPF_KPTR_PERCPU = (1 << 4),
197 BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
198 BPF_LIST_HEAD = (1 << 5),
199 BPF_LIST_NODE = (1 << 6),
200 BPF_RB_ROOT = (1 << 7),
201 BPF_RB_NODE = (1 << 8),
202 BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
203 BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
204 BPF_REFCOUNT = (1 << 9),
205 BPF_WORKQUEUE = (1 << 10),
208 typedef void (*btf_dtor_kfunc_t)(void *);
210 struct btf_field_kptr {
212 struct module *module;
213 /* dtor used if btf_is_kernel(btf), otherwise the type is
214 * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
216 btf_dtor_kfunc_t dtor;
220 struct btf_field_graph_root {
224 struct btf_record *value_rec;
230 enum btf_field_type type;
232 struct btf_field_kptr kptr;
233 struct btf_field_graph_root graph_root;
244 struct btf_field fields[];
247 /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
248 struct bpf_rb_node_kern {
249 struct rb_node rb_node;
251 } __attribute__((aligned(8)));
253 /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
254 struct bpf_list_node_kern {
255 struct list_head list_head;
257 } __attribute__((aligned(8)));
260 const struct bpf_map_ops *ops;
261 struct bpf_map *inner_map_meta;
262 #ifdef CONFIG_SECURITY
265 enum bpf_map_type map_type;
269 u64 map_extra; /* any per-map-type extra fields */
272 struct btf_record *record;
275 u32 btf_value_type_id;
276 u32 btf_vmlinux_value_type_id;
278 #ifdef CONFIG_MEMCG_KMEM
279 struct obj_cgroup *objcg;
281 char name[BPF_OBJ_NAME_LEN];
282 struct mutex freeze_mutex;
285 /* rcu is used before freeing and work is only used during freeing */
287 struct work_struct work;
291 /* 'Ownership' of program-containing map is claimed by the first program
292 * that is going to use this map or by the first program which FD is
293 * stored in the map to make sure that all callers and callees have the
294 * same prog type, JITed flag and xdp_has_frags flag.
298 enum bpf_prog_type type;
303 bool frozen; /* write-once; write-protected by freeze_mutex */
304 bool free_after_mult_rcu_gp;
305 bool free_after_rcu_gp;
306 atomic64_t sleepable_refcnt;
307 s64 __percpu *elem_count;
310 static inline const char *btf_field_type_name(enum btf_field_type type)
314 return "bpf_spin_lock";
322 case BPF_KPTR_PERCPU:
323 return "percpu_kptr";
325 return "bpf_list_head";
327 return "bpf_list_node";
329 return "bpf_rb_root";
331 return "bpf_rb_node";
333 return "bpf_refcount";
340 static inline u32 btf_field_type_size(enum btf_field_type type)
344 return sizeof(struct bpf_spin_lock);
346 return sizeof(struct bpf_timer);
348 return sizeof(struct bpf_wq);
351 case BPF_KPTR_PERCPU:
354 return sizeof(struct bpf_list_head);
356 return sizeof(struct bpf_list_node);
358 return sizeof(struct bpf_rb_root);
360 return sizeof(struct bpf_rb_node);
362 return sizeof(struct bpf_refcount);
369 static inline u32 btf_field_type_align(enum btf_field_type type)
373 return __alignof__(struct bpf_spin_lock);
375 return __alignof__(struct bpf_timer);
377 return __alignof__(struct bpf_wq);
380 case BPF_KPTR_PERCPU:
381 return __alignof__(u64);
383 return __alignof__(struct bpf_list_head);
385 return __alignof__(struct bpf_list_node);
387 return __alignof__(struct bpf_rb_root);
389 return __alignof__(struct bpf_rb_node);
391 return __alignof__(struct bpf_refcount);
398 static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
400 memset(addr, 0, field->size);
402 switch (field->type) {
404 refcount_set((refcount_t *)addr, 1);
407 RB_CLEAR_NODE((struct rb_node *)addr);
411 INIT_LIST_HEAD((struct list_head *)addr);
414 /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
420 case BPF_KPTR_PERCPU:
428 static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
430 if (IS_ERR_OR_NULL(rec))
432 return rec->field_mask & type;
435 static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
439 if (IS_ERR_OR_NULL(rec))
441 for (i = 0; i < rec->cnt; i++)
442 bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
445 /* 'dst' must be a temporary buffer and should not point to memory that is being
446 * used in parallel by a bpf program or bpf syscall, otherwise the access from
447 * the bpf program or bpf syscall may be corrupted by the reinitialization,
448 * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
449 * allocator, it is still possible for 'dst' to be used in parallel by a bpf
450 * program or bpf syscall.
452 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
454 bpf_obj_init(map->record, dst);
457 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
458 * forced to use 'long' read/writes to try to atomically copy long counters.
459 * Best-effort only. No barriers here, since it _will_ race with concurrent
460 * updates from BPF programs. Called from bpf syscall and mostly used with
461 * size 8 or 16 bytes, so ask compiler to inline it.
463 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
465 const long *lsrc = src;
468 size /= sizeof(long);
470 data_race(*ldst++ = *lsrc++);
473 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
474 static inline void bpf_obj_memcpy(struct btf_record *rec,
475 void *dst, void *src, u32 size,
481 if (IS_ERR_OR_NULL(rec)) {
483 bpf_long_memcpy(dst, src, round_up(size, 8));
485 memcpy(dst, src, size);
489 for (i = 0; i < rec->cnt; i++) {
490 u32 next_off = rec->fields[i].offset;
491 u32 sz = next_off - curr_off;
493 memcpy(dst + curr_off, src + curr_off, sz);
494 curr_off += rec->fields[i].size + sz;
496 memcpy(dst + curr_off, src + curr_off, size - curr_off);
499 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
501 bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
504 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
506 bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
509 static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
514 if (IS_ERR_OR_NULL(rec)) {
515 memset(dst, 0, size);
519 for (i = 0; i < rec->cnt; i++) {
520 u32 next_off = rec->fields[i].offset;
521 u32 sz = next_off - curr_off;
523 memset(dst + curr_off, 0, sz);
524 curr_off += rec->fields[i].size + sz;
526 memset(dst + curr_off, 0, size - curr_off);
529 static inline void zero_map_value(struct bpf_map *map, void *dst)
531 bpf_obj_memzero(map->record, dst, map->value_size);
534 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
536 void bpf_timer_cancel_and_free(void *timer);
537 void bpf_wq_cancel_and_free(void *timer);
538 void bpf_list_head_free(const struct btf_field *field, void *list_head,
539 struct bpf_spin_lock *spin_lock);
540 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
541 struct bpf_spin_lock *spin_lock);
542 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
543 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
544 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
546 struct bpf_offload_dev;
547 struct bpf_offloaded_map;
549 struct bpf_map_dev_ops {
550 int (*map_get_next_key)(struct bpf_offloaded_map *map,
551 void *key, void *next_key);
552 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
553 void *key, void *value);
554 int (*map_update_elem)(struct bpf_offloaded_map *map,
555 void *key, void *value, u64 flags);
556 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
559 struct bpf_offloaded_map {
561 struct net_device *netdev;
562 const struct bpf_map_dev_ops *dev_ops;
564 struct list_head offloads;
567 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
569 return container_of(map, struct bpf_offloaded_map, map);
572 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
574 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
577 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
579 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
580 map->ops->map_seq_show_elem;
583 int map_check_no_btf(const struct bpf_map *map,
584 const struct btf *btf,
585 const struct btf_type *key_type,
586 const struct btf_type *value_type);
588 bool bpf_map_meta_equal(const struct bpf_map *meta0,
589 const struct bpf_map *meta1);
591 extern const struct bpf_map_ops bpf_map_offload_ops;
593 /* bpf_type_flag contains a set of flags that are applicable to the values of
594 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
595 * or a memory is read-only. We classify types into two categories: base types
596 * and extended types. Extended types are base types combined with a type flag.
598 * Currently there are no more than 32 base types in arg_type, ret_type and
601 #define BPF_BASE_TYPE_BITS 8
604 /* PTR may be NULL. */
605 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
607 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
608 * compatible with both mutable and immutable memory.
610 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
612 /* MEM points to BPF ring buffer reservation. */
613 MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
615 /* MEM is in user address space. */
616 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
618 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
619 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
620 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
621 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
622 * to the specified cpu.
624 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
626 /* Indicates that the argument will be released. */
627 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
629 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
630 * unreferenced and referenced kptr loaded from map value using a load
631 * instruction, so that they can only be dereferenced but not escape the
632 * BPF program into the kernel (i.e. cannot be passed as arguments to
633 * kfunc or bpf helpers).
635 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
637 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
639 /* DYNPTR points to memory local to the bpf program. */
640 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
642 /* DYNPTR points to a kernel-produced ringbuf record. */
643 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
645 /* Size is known at compile time. */
646 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
648 /* MEM is of an allocated object of type in program BTF. This is used to
649 * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
651 MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
653 /* PTR was passed from the kernel in a trusted context, and may be
654 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
655 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
656 * PTR_UNTRUSTED refers to a kptr that was read directly from a map
657 * without invoking bpf_kptr_xchg(). What we really need to know is
658 * whether a pointer is safe to pass to a kfunc or BPF helper function.
659 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
660 * helpers, they do not cover all possible instances of unsafe
661 * pointers. For example, a pointer that was obtained from walking a
662 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
663 * fact that it may be NULL, invalid, etc. This is due to backwards
664 * compatibility requirements, as this was the behavior that was first
665 * introduced when kptrs were added. The behavior is now considered
666 * deprecated, and PTR_UNTRUSTED will eventually be removed.
668 * PTR_TRUSTED, on the other hand, is a pointer that the kernel
669 * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
670 * For example, pointers passed to tracepoint arguments are considered
671 * PTR_TRUSTED, as are pointers that are passed to struct_ops
672 * callbacks. As alluded to above, pointers that are obtained from
673 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
674 * struct task_struct *task is PTR_TRUSTED, then accessing
675 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
676 * in a BPF register. Similarly, pointers passed to certain programs
677 * types such as kretprobes are not guaranteed to be valid, as they may
678 * for example contain an object that was recently freed.
680 PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
682 /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
683 MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
685 /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
686 * Currently only valid for linked-list and rbtree nodes. If the nodes
687 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
689 NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
691 /* DYNPTR points to sk_buff */
692 DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
694 /* DYNPTR points to xdp_buff */
695 DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
698 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
701 #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
704 /* Max number of base types. */
705 #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
707 /* Max number of all types. */
708 #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
710 /* function argument constraints */
712 ARG_DONTCARE = 0, /* unused argument in helper function */
714 /* the following constraints used to prototype
715 * bpf_map_lookup/update/delete_elem() functions
717 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
718 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
719 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
721 /* Used to prototype bpf_memcmp() and other functions that access data
722 * on eBPF program stack
724 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
727 ARG_CONST_SIZE, /* number of bytes accessed from memory */
728 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
730 ARG_PTR_TO_CTX, /* pointer to context */
731 ARG_ANYTHING, /* any (initialized) argument is ok */
732 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
733 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
734 ARG_PTR_TO_INT, /* pointer to int */
735 ARG_PTR_TO_LONG, /* pointer to long */
736 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
737 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
738 ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
739 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
740 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
741 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
742 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
743 ARG_PTR_TO_STACK, /* pointer to stack */
744 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
745 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
746 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
747 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
750 /* Extended arg_types. */
751 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
752 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
753 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
754 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
755 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
756 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
757 /* pointer to memory does not need to be initialized, helper function must fill
758 * all bytes or clear them in error case.
760 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
761 /* Pointer to valid memory of size known at compile time. */
762 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
764 /* This must be the last entry. Its purpose is to ensure the enum is
765 * wide enough to hold the higher bits reserved for bpf_type_flag.
767 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
769 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
771 /* type of values returned from helper functions */
772 enum bpf_return_type {
773 RET_INTEGER, /* function returns integer */
774 RET_VOID, /* function doesn't return anything */
775 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
776 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
777 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
778 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
779 RET_PTR_TO_MEM, /* returns a pointer to memory */
780 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
781 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
784 /* Extended ret_types. */
785 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
786 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
787 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
788 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
789 RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
790 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
791 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
792 RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
794 /* This must be the last entry. Its purpose is to ensure the enum is
795 * wide enough to hold the higher bits reserved for bpf_type_flag.
797 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
799 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
801 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
802 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
803 * instructions after verifying
805 struct bpf_func_proto {
806 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
810 enum bpf_return_type ret_type;
813 enum bpf_arg_type arg1_type;
814 enum bpf_arg_type arg2_type;
815 enum bpf_arg_type arg3_type;
816 enum bpf_arg_type arg4_type;
817 enum bpf_arg_type arg5_type;
819 enum bpf_arg_type arg_type[5];
839 int *ret_btf_id; /* return value btf_id */
840 bool (*allowed)(const struct bpf_prog *prog);
843 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
844 * the first argument to eBPF programs.
845 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
849 enum bpf_access_type {
854 /* types of values stored in eBPF registers */
855 /* Pointer types represent:
858 * pointer + (u16) var
859 * pointer + (u16) var + imm
860 * if (range > 0) then [ptr, ptr + range - off) is safe to access
861 * if (id > 0) means that some 'var' was added
862 * if (off > 0) means that 'imm' was added
865 NOT_INIT = 0, /* nothing was written into register */
866 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
867 PTR_TO_CTX, /* reg points to bpf_context */
868 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
869 PTR_TO_MAP_VALUE, /* reg points to map element value */
870 PTR_TO_MAP_KEY, /* reg points to a map element key */
871 PTR_TO_STACK, /* reg == frame_pointer + offset */
872 PTR_TO_PACKET_META, /* skb->data - meta_len */
873 PTR_TO_PACKET, /* reg points to skb->data */
874 PTR_TO_PACKET_END, /* skb->data + headlen */
875 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
876 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
877 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
878 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
879 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
880 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
881 /* PTR_TO_BTF_ID points to a kernel struct that does not need
882 * to be null checked by the BPF program. This does not imply the
883 * pointer is _not_ null and in practice this can easily be a null
884 * pointer when reading pointer chains. The assumption is program
885 * context will handle null pointer dereference typically via fault
886 * handling. The verifier must keep this in mind and can make no
887 * assumptions about null or non-null when doing branch analysis.
888 * Further, when passed into helpers the helpers can not, without
889 * additional context, assume the value is non-null.
892 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
893 * been checked for null. Used primarily to inform the verifier
894 * an explicit null check is required for this struct.
896 PTR_TO_MEM, /* reg points to valid memory region */
898 PTR_TO_BUF, /* reg points to a read/write buffer */
899 PTR_TO_FUNC, /* reg points to a bpf program function */
900 CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
903 /* Extended reg_types. */
904 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
905 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
906 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
907 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
908 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
910 /* This must be the last entry. Its purpose is to ensure the enum is
911 * wide enough to hold the higher bits reserved for bpf_type_flag.
913 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
915 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
917 /* The information passed from prog-specific *_is_valid_access
918 * back to the verifier.
920 struct bpf_insn_access_aux {
921 enum bpf_reg_type reg_type;
929 struct bpf_verifier_log *log; /* for verbose logs */
933 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
935 aux->ctx_field_size = size;
938 static bool bpf_is_ldimm64(const struct bpf_insn *insn)
940 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
943 static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
945 return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
948 struct bpf_prog_ops {
949 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
950 union bpf_attr __user *uattr);
953 struct bpf_reg_state;
954 struct bpf_verifier_ops {
955 /* return eBPF function prototype for verification */
956 const struct bpf_func_proto *
957 (*get_func_proto)(enum bpf_func_id func_id,
958 const struct bpf_prog *prog);
960 /* return true if 'size' wide access at offset 'off' within bpf_context
961 * with 'type' (read or write) is allowed
963 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
964 const struct bpf_prog *prog,
965 struct bpf_insn_access_aux *info);
966 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
967 const struct bpf_prog *prog);
968 int (*gen_ld_abs)(const struct bpf_insn *orig,
969 struct bpf_insn *insn_buf);
970 u32 (*convert_ctx_access)(enum bpf_access_type type,
971 const struct bpf_insn *src,
972 struct bpf_insn *dst,
973 struct bpf_prog *prog, u32 *target_size);
974 int (*btf_struct_access)(struct bpf_verifier_log *log,
975 const struct bpf_reg_state *reg,
979 struct bpf_prog_offload_ops {
980 /* verifier basic callbacks */
981 int (*insn_hook)(struct bpf_verifier_env *env,
982 int insn_idx, int prev_insn_idx);
983 int (*finalize)(struct bpf_verifier_env *env);
984 /* verifier optimization callbacks (called after .finalize) */
985 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
986 struct bpf_insn *insn);
987 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
988 /* program management callbacks */
989 int (*prepare)(struct bpf_prog *prog);
990 int (*translate)(struct bpf_prog *prog);
991 void (*destroy)(struct bpf_prog *prog);
994 struct bpf_prog_offload {
995 struct bpf_prog *prog;
996 struct net_device *netdev;
997 struct bpf_offload_dev *offdev;
999 struct list_head offloads;
1006 enum bpf_cgroup_storage_type {
1007 BPF_CGROUP_STORAGE_SHARED,
1008 BPF_CGROUP_STORAGE_PERCPU,
1009 __BPF_CGROUP_STORAGE_MAX
1012 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1014 /* The longest tracepoint has 12 args.
1015 * See include/trace/bpf_probe.h
1017 #define MAX_BPF_FUNC_ARGS 12
1019 /* The maximum number of arguments passed through registers
1020 * a single function may have.
1022 #define MAX_BPF_FUNC_REG_ARGS 5
1024 /* The argument is a structure. */
1025 #define BTF_FMODEL_STRUCT_ARG BIT(0)
1027 /* The argument is signed. */
1028 #define BTF_FMODEL_SIGNED_ARG BIT(1)
1030 struct btf_func_model {
1034 u8 arg_size[MAX_BPF_FUNC_ARGS];
1035 u8 arg_flags[MAX_BPF_FUNC_ARGS];
1038 /* Restore arguments before returning from trampoline to let original function
1039 * continue executing. This flag is used for fentry progs when there are no
1042 #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
1043 /* Call original function after fentry progs, but before fexit progs.
1044 * Makes sense for fentry/fexit, normal calls and indirect calls.
1046 #define BPF_TRAMP_F_CALL_ORIG BIT(1)
1047 /* Skip current frame and return to parent. Makes sense for fentry/fexit
1048 * programs only. Should not be used with normal calls and indirect calls.
1050 #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
1051 /* Store IP address of the caller on the trampoline stack,
1052 * so it's available for trampoline's programs.
1054 #define BPF_TRAMP_F_IP_ARG BIT(3)
1055 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
1056 #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
1058 /* Get original function from stack instead of from provided direct address.
1059 * Makes sense for trampolines with fexit or fmod_ret programs.
1061 #define BPF_TRAMP_F_ORIG_STACK BIT(5)
1063 /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
1064 * e.g., a live patch. This flag is set and cleared by ftrace call backs,
1066 #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
1068 /* Indicate that current trampoline is in a tail call context. Then, it has to
1069 * cache and restore tail_call_cnt to avoid infinite tail call loop.
1071 #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
1074 * Indicate the trampoline should be suitable to receive indirect calls;
1075 * without this indirectly calling the generated code can result in #UD/#CP,
1076 * depending on the CFI options.
1078 * Used by bpf_struct_ops.
1080 * Incompatible with FENTRY usage, overloads @func_addr argument.
1082 #define BPF_TRAMP_F_INDIRECT BIT(8)
1084 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1088 #if defined(__s390x__)
1089 BPF_MAX_TRAMP_LINKS = 27,
1091 BPF_MAX_TRAMP_LINKS = 38,
1095 struct bpf_tramp_links {
1096 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
1100 struct bpf_tramp_run_ctx;
1102 /* Different use cases for BPF trampoline:
1103 * 1. replace nop at the function entry (kprobe equivalent)
1104 * flags = BPF_TRAMP_F_RESTORE_REGS
1105 * fentry = a set of programs to run before returning from trampoline
1107 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1108 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1109 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
1110 * fentry = a set of program to run before calling original function
1111 * fexit = a set of program to run after original function
1113 * 3. replace direct call instruction anywhere in the function body
1114 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1116 * fentry = a set of programs to run before returning from trampoline
1117 * With flags = BPF_TRAMP_F_CALL_ORIG
1118 * orig_call = original callback addr or direct function addr
1119 * fentry = a set of program to run before calling original function
1120 * fexit = a set of program to run after original function
1122 struct bpf_tramp_image;
1123 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1124 const struct btf_func_model *m, u32 flags,
1125 struct bpf_tramp_links *tlinks,
1127 void *arch_alloc_bpf_trampoline(unsigned int size);
1128 void arch_free_bpf_trampoline(void *image, unsigned int size);
1129 int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
1130 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1131 struct bpf_tramp_links *tlinks, void *func_addr);
1133 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
1134 struct bpf_tramp_run_ctx *run_ctx);
1135 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
1136 struct bpf_tramp_run_ctx *run_ctx);
1137 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
1138 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
1139 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
1140 struct bpf_tramp_run_ctx *run_ctx);
1141 typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
1142 struct bpf_tramp_run_ctx *run_ctx);
1143 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
1144 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
1147 unsigned long start;
1149 char name[KSYM_NAME_LEN];
1150 struct list_head lnode;
1151 struct latch_tree_node tnode;
1155 enum bpf_tramp_prog_type {
1158 BPF_TRAMP_MODIFY_RETURN,
1160 BPF_TRAMP_REPLACE, /* more than MAX */
1163 struct bpf_tramp_image {
1166 struct bpf_ksym ksym;
1167 struct percpu_ref pcref;
1168 void *ip_after_call;
1171 struct rcu_head rcu;
1172 struct work_struct work;
1176 struct bpf_trampoline {
1177 /* hlist for trampoline_table */
1178 struct hlist_node hlist;
1179 struct ftrace_ops *fops;
1180 /* serializes access to fields of this trampoline */
1186 struct btf_func_model model;
1188 bool ftrace_managed;
1190 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1191 * program by replacing one of its functions. func.addr is the address
1192 * of the function it replaced.
1194 struct bpf_prog *extension_prog;
1195 /* list of BPF programs using this trampoline */
1196 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
1197 /* Number of attached programs. A counter per kind. */
1198 int progs_cnt[BPF_TRAMP_MAX];
1199 /* Executable image of trampoline */
1200 struct bpf_tramp_image *cur_image;
1203 struct bpf_attach_target_info {
1204 struct btf_func_model fmodel;
1206 struct module *tgt_mod;
1207 const char *tgt_name;
1208 const struct btf_type *tgt_type;
1211 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
1213 struct bpf_dispatcher_prog {
1214 struct bpf_prog *prog;
1218 struct bpf_dispatcher {
1219 /* dispatcher mutex */
1222 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
1227 struct bpf_ksym ksym;
1228 #ifdef CONFIG_HAVE_STATIC_CALL
1229 struct static_call_key *sc_key;
1235 #define __bpfcall __nocfi
1238 static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
1240 const struct bpf_insn *insnsi,
1241 bpf_func_t bpf_func)
1243 return bpf_func(ctx, insnsi);
1246 /* the implementation of the opaque uapi struct bpf_dynptr */
1247 struct bpf_dynptr_kern {
1249 /* Size represents the number of usable bytes of dynptr data.
1250 * If for example the offset is at 4 for a local dynptr whose data is
1251 * of type u64, the number of usable bytes is 4.
1253 * The upper 8 bits are reserved. It is as follows:
1254 * Bits 0 - 23 = size
1255 * Bits 24 - 30 = dynptr type
1256 * Bit 31 = whether dynptr is read-only
1262 enum bpf_dynptr_type {
1263 BPF_DYNPTR_TYPE_INVALID,
1264 /* Points to memory that is local to the bpf program */
1265 BPF_DYNPTR_TYPE_LOCAL,
1266 /* Underlying data is a ringbuf record */
1267 BPF_DYNPTR_TYPE_RINGBUF,
1268 /* Underlying data is a sk_buff */
1269 BPF_DYNPTR_TYPE_SKB,
1270 /* Underlying data is a xdp_buff */
1271 BPF_DYNPTR_TYPE_XDP,
1274 int bpf_dynptr_check_size(u32 size);
1275 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
1276 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
1277 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
1278 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
1280 #ifdef CONFIG_BPF_JIT
1281 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1282 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1283 struct bpf_trampoline *bpf_trampoline_get(u64 key,
1284 struct bpf_attach_target_info *tgt_info);
1285 void bpf_trampoline_put(struct bpf_trampoline *tr);
1286 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
1289 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1290 * indirection with a direct call to the bpf program. If the architecture does
1291 * not have STATIC_CALL, avoid a double-indirection.
1293 #ifdef CONFIG_HAVE_STATIC_CALL
1295 #define __BPF_DISPATCHER_SC_INIT(_name) \
1296 .sc_key = &STATIC_CALL_KEY(_name), \
1297 .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1299 #define __BPF_DISPATCHER_SC(name) \
1300 DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1302 #define __BPF_DISPATCHER_CALL(name) \
1303 static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1305 #define __BPF_DISPATCHER_UPDATE(_d, _new) \
1306 __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1309 #define __BPF_DISPATCHER_SC_INIT(name)
1310 #define __BPF_DISPATCHER_SC(name)
1311 #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
1312 #define __BPF_DISPATCHER_UPDATE(_d, _new)
1315 #define BPF_DISPATCHER_INIT(_name) { \
1316 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
1317 .func = &_name##_func, \
1324 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
1326 __BPF_DISPATCHER_SC_INIT(_name##_call) \
1329 #define DEFINE_BPF_DISPATCHER(name) \
1330 __BPF_DISPATCHER_SC(name); \
1331 noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
1333 const struct bpf_insn *insnsi, \
1334 bpf_func_t bpf_func) \
1336 return __BPF_DISPATCHER_CALL(name); \
1338 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
1339 struct bpf_dispatcher bpf_dispatcher_##name = \
1340 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1342 #define DECLARE_BPF_DISPATCHER(name) \
1343 unsigned int bpf_dispatcher_##name##_func( \
1345 const struct bpf_insn *insnsi, \
1346 bpf_func_t bpf_func); \
1347 extern struct bpf_dispatcher bpf_dispatcher_##name;
1349 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
1350 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1351 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
1352 struct bpf_prog *to);
1353 /* Called only from JIT-enabled code, so there's no need for stubs. */
1354 void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
1355 void bpf_image_ksym_del(struct bpf_ksym *ksym);
1356 void bpf_ksym_add(struct bpf_ksym *ksym);
1357 void bpf_ksym_del(struct bpf_ksym *ksym);
1358 int bpf_jit_charge_modmem(u32 size);
1359 void bpf_jit_uncharge_modmem(u32 size);
1360 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
1362 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1363 struct bpf_trampoline *tr)
1367 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1368 struct bpf_trampoline *tr)
1372 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
1373 struct bpf_attach_target_info *tgt_info)
1377 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
1378 #define DEFINE_BPF_DISPATCHER(name)
1379 #define DECLARE_BPF_DISPATCHER(name)
1380 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1381 #define BPF_DISPATCHER_PTR(name) NULL
1382 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
1383 struct bpf_prog *from,
1384 struct bpf_prog *to) {}
1385 static inline bool is_bpf_image_address(unsigned long address)
1389 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
1395 struct bpf_func_info_aux {
1402 enum bpf_jit_poke_reason {
1403 BPF_POKE_REASON_TAIL_CALL,
1406 /* Descriptor of pokes pointing /into/ the JITed image. */
1407 struct bpf_jit_poke_descriptor {
1408 void *tailcall_target;
1409 void *tailcall_bypass;
1414 struct bpf_map *map;
1418 bool tailcall_target_stable;
1424 /* reg_type info for ctx arguments */
1425 struct bpf_ctx_arg_aux {
1427 enum bpf_reg_type reg_type;
1432 struct btf_mod_pair {
1434 struct module *module;
1437 struct bpf_kfunc_desc_tab;
1439 struct bpf_prog_aux {
1448 u32 func_cnt; /* used by non-func prog as the number of func progs */
1449 u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
1450 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
1451 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1452 u32 ctx_arg_info_size;
1453 u32 max_rdonly_access;
1454 u32 max_rdwr_access;
1455 struct btf *attach_btf;
1456 const struct bpf_ctx_arg_aux *ctx_arg_info;
1457 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1458 struct bpf_prog *dst_prog;
1459 struct bpf_trampoline *dst_trampoline;
1460 enum bpf_prog_type saved_dst_prog_type;
1461 enum bpf_attach_type saved_dst_attach_type;
1462 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1463 bool dev_bound; /* Program is bound to the netdev. */
1464 bool offload_requested; /* Program is bound and offloaded to the netdev. */
1465 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1466 bool attach_tracing_prog; /* true if tracing another tracing program */
1467 bool func_proto_unreliable;
1468 bool tail_call_reachable;
1471 bool exception_boundary;
1472 struct bpf_arena *arena;
1473 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1474 const struct btf_type *attach_func_proto;
1475 /* function name for valid attach_btf_id */
1476 const char *attach_func_name;
1477 struct bpf_prog **func;
1478 void *jit_data; /* JIT specific data. arch dependent */
1479 struct bpf_jit_poke_descriptor *poke_tab;
1480 struct bpf_kfunc_desc_tab *kfunc_tab;
1481 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
1483 #ifdef CONFIG_FINEIBT
1484 struct bpf_ksym ksym_prefix;
1486 struct bpf_ksym ksym;
1487 const struct bpf_prog_ops *ops;
1488 struct bpf_map **used_maps;
1489 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1490 struct btf_mod_pair *used_btfs;
1491 struct bpf_prog *prog;
1492 struct user_struct *user;
1493 u64 load_time; /* ns since boottime */
1495 int cgroup_atype; /* enum cgroup_bpf_attach_type */
1496 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1497 char name[BPF_OBJ_NAME_LEN];
1498 u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
1499 #ifdef CONFIG_SECURITY
1502 struct bpf_token *token;
1503 struct bpf_prog_offload *offload;
1505 struct bpf_func_info *func_info;
1506 struct bpf_func_info_aux *func_info_aux;
1507 /* bpf_line_info loaded from userspace. linfo->insn_off
1508 * has the xlated insn offset.
1509 * Both the main and sub prog share the same linfo.
1510 * The subprog can access its first linfo by
1511 * using the linfo_idx.
1513 struct bpf_line_info *linfo;
1514 /* jited_linfo is the jited addr of the linfo. It has a
1515 * one to one mapping to linfo:
1516 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1517 * Both the main and sub prog share the same jited_linfo.
1518 * The subprog can access its first jited_linfo by
1519 * using the linfo_idx.
1524 /* subprog can use linfo_idx to access its first linfo and
1526 * main prog always has linfo_idx == 0
1531 struct exception_table_entry *extable;
1533 struct work_struct work;
1534 struct rcu_head rcu;
1539 u16 pages; /* Number of allocated pages */
1540 u16 jited:1, /* Is our filter JIT'ed? */
1541 jit_requested:1,/* archs need to JIT the prog */
1542 gpl_compatible:1, /* Is filter GPL compatible? */
1543 cb_access:1, /* Is control block accessed? */
1544 dst_needed:1, /* Do we need dst entry? */
1545 blinding_requested:1, /* needs constant blinding */
1546 blinded:1, /* Was blinded */
1547 is_func:1, /* program is a bpf function */
1548 kprobe_override:1, /* Do we override a kprobe? */
1549 has_callchain_buf:1, /* callchain buffer allocated? */
1550 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1551 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1552 call_get_func_ip:1, /* Do we call get_func_ip() */
1553 tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
1554 sleepable:1; /* BPF program is sleepable */
1555 enum bpf_prog_type type; /* Type of BPF program */
1556 enum bpf_attach_type expected_attach_type; /* For some prog types */
1557 u32 len; /* Number of filter blocks */
1558 u32 jited_len; /* Size of jited insns in bytes */
1559 u8 tag[BPF_TAG_SIZE];
1560 struct bpf_prog_stats __percpu *stats;
1561 int __percpu *active;
1562 unsigned int (*bpf_func)(const void *ctx,
1563 const struct bpf_insn *insn);
1564 struct bpf_prog_aux *aux; /* Auxiliary fields */
1565 struct sock_fprog_kern *orig_prog; /* Original BPF program */
1566 /* Instructions for interpreter */
1568 DECLARE_FLEX_ARRAY(struct sock_filter, insns);
1569 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
1573 struct bpf_array_aux {
1574 /* Programs with direct jumps into programs part of this array. */
1575 struct list_head poke_progs;
1576 struct bpf_map *map;
1577 struct mutex poke_mutex;
1578 struct work_struct work;
1584 enum bpf_link_type type;
1585 const struct bpf_link_ops *ops;
1586 struct bpf_prog *prog;
1587 /* rcu is used before freeing, work can be used to schedule that
1588 * RCU-based freeing before that, so they never overlap
1591 struct rcu_head rcu;
1592 struct work_struct work;
1596 struct bpf_link_ops {
1597 void (*release)(struct bpf_link *link);
1598 /* deallocate link resources callback, called without RCU grace period
1601 void (*dealloc)(struct bpf_link *link);
1602 /* deallocate link resources callback, called after RCU grace period;
1603 * if underlying BPF program is sleepable we go through tasks trace
1604 * RCU GP and then "classic" RCU GP
1606 void (*dealloc_deferred)(struct bpf_link *link);
1607 int (*detach)(struct bpf_link *link);
1608 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1609 struct bpf_prog *old_prog);
1610 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1611 int (*fill_link_info)(const struct bpf_link *link,
1612 struct bpf_link_info *info);
1613 int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
1614 struct bpf_map *old_map);
1617 struct bpf_tramp_link {
1618 struct bpf_link link;
1619 struct hlist_node tramp_hlist;
1623 struct bpf_shim_tramp_link {
1624 struct bpf_tramp_link link;
1625 struct bpf_trampoline *trampoline;
1628 struct bpf_tracing_link {
1629 struct bpf_tramp_link link;
1630 enum bpf_attach_type attach_type;
1631 struct bpf_trampoline *trampoline;
1632 struct bpf_prog *tgt_prog;
1635 struct bpf_raw_tp_link {
1636 struct bpf_link link;
1637 struct bpf_raw_event_map *btp;
1641 struct bpf_link_primer {
1642 struct bpf_link *link;
1648 struct bpf_mount_opts {
1653 /* BPF token-related delegation options */
1657 u64 delegate_attachs;
1661 struct work_struct work;
1663 struct user_namespace *userns;
1667 u64 allowed_attachs;
1668 #ifdef CONFIG_SECURITY
1673 struct bpf_struct_ops_value;
1676 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1678 * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1679 * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1680 * of BPF_PROG_TYPE_STRUCT_OPS progs.
1681 * @verifier_ops: A structure of callbacks that are invoked by the verifier
1682 * when determining whether the struct_ops progs in the
1683 * struct_ops map are valid.
1684 * @init: A callback that is invoked a single time, and before any other
1685 * callback, to initialize the structure. A nonzero return value means
1686 * the subsystem could not be initialized.
1687 * @check_member: When defined, a callback invoked by the verifier to allow
1688 * the subsystem to determine if an entry in the struct_ops map
1689 * is valid. A nonzero return value means that the map is
1690 * invalid and should be rejected by the verifier.
1691 * @init_member: A callback that is invoked for each member of the struct_ops
1692 * map to allow the subsystem to initialize the member. A nonzero
1693 * value means the member could not be initialized. This callback
1694 * is exclusive with the @type, @type_id, @value_type, and
1696 * @reg: A callback that is invoked when the struct_ops map has been
1697 * initialized and is being attached to. Zero means the struct_ops map
1698 * has been successfully registered and is live. A nonzero return value
1699 * means the struct_ops map could not be registered.
1700 * @unreg: A callback that is invoked when the struct_ops map should be
1702 * @update: A callback that is invoked when the live struct_ops map is being
1703 * updated to contain new values. This callback is only invoked when
1704 * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1705 * it is assumed that the struct_ops map cannot be updated.
1706 * @validate: A callback that is invoked after all of the members have been
1707 * initialized. This callback should perform static checks on the
1708 * map, meaning that it should either fail or succeed
1709 * deterministically. A struct_ops map that has been validated may
1710 * not necessarily succeed in being registered if the call to @reg
1711 * fails. For example, a valid struct_ops map may be loaded, but
1712 * then fail to be registered due to there being another active
1713 * struct_ops map on the system in the subsystem already. For this
1714 * reason, if this callback is not defined, the check is skipped as
1715 * the struct_ops map will have final verification performed in
1718 * @value_type: Value type.
1719 * @name: The name of the struct bpf_struct_ops object.
1720 * @func_models: Func models
1721 * @type_id: BTF type id.
1722 * @value_id: BTF value id.
1724 struct bpf_struct_ops {
1725 const struct bpf_verifier_ops *verifier_ops;
1726 int (*init)(struct btf *btf);
1727 int (*check_member)(const struct btf_type *t,
1728 const struct btf_member *member,
1729 const struct bpf_prog *prog);
1730 int (*init_member)(const struct btf_type *t,
1731 const struct btf_member *member,
1732 void *kdata, const void *udata);
1733 int (*reg)(void *kdata);
1734 void (*unreg)(void *kdata);
1735 int (*update)(void *kdata, void *old_kdata);
1736 int (*validate)(void *kdata);
1738 struct module *owner;
1740 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1743 /* Every member of a struct_ops type has an instance even a member is not
1744 * an operator (function pointer). The "info" field will be assigned to
1745 * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
1746 * argument information required by the verifier to verify the program.
1748 * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
1749 * corresponding entry for an given argument.
1751 struct bpf_struct_ops_arg_info {
1752 struct bpf_ctx_arg_aux *info;
1756 struct bpf_struct_ops_desc {
1757 struct bpf_struct_ops *st_ops;
1759 const struct btf_type *type;
1760 const struct btf_type *value_type;
1764 /* Collection of argument information for each member */
1765 struct bpf_struct_ops_arg_info *arg_info;
1768 enum bpf_struct_ops_state {
1769 BPF_STRUCT_OPS_STATE_INIT,
1770 BPF_STRUCT_OPS_STATE_INUSE,
1771 BPF_STRUCT_OPS_STATE_TOBEFREE,
1772 BPF_STRUCT_OPS_STATE_READY,
1775 struct bpf_struct_ops_common_value {
1777 enum bpf_struct_ops_state state;
1780 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1781 /* This macro helps developer to register a struct_ops type and generate
1782 * type information correctly. Developers should use this macro to register
1783 * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
1785 #define register_bpf_struct_ops(st_ops, type) \
1787 struct bpf_struct_ops_##type { \
1788 struct bpf_struct_ops_common_value common; \
1789 struct type data ____cacheline_aligned_in_smp; \
1791 BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
1792 __register_bpf_struct_ops(st_ops); \
1794 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1795 bool bpf_struct_ops_get(const void *kdata);
1796 void bpf_struct_ops_put(const void *kdata);
1797 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1799 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1800 struct bpf_tramp_link *link,
1801 const struct btf_func_model *model,
1803 void **image, u32 *image_off,
1805 void bpf_struct_ops_image_free(void *image);
1806 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1808 if (owner == BPF_MODULE_OWNER)
1809 return bpf_struct_ops_get(data);
1811 return try_module_get(owner);
1813 static inline void bpf_module_put(const void *data, struct module *owner)
1815 if (owner == BPF_MODULE_OWNER)
1816 bpf_struct_ops_put(data);
1820 int bpf_struct_ops_link_create(union bpf_attr *attr);
1823 /* Define it here to avoid the use of forward declaration */
1824 struct bpf_dummy_ops_state {
1828 struct bpf_dummy_ops {
1829 int (*test_1)(struct bpf_dummy_ops_state *cb);
1830 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1831 char a3, unsigned long a4);
1832 int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
1835 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1836 union bpf_attr __user *uattr);
1838 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
1840 struct bpf_verifier_log *log);
1841 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1842 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
1844 #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
1845 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1847 return try_module_get(owner);
1849 static inline void bpf_module_put(const void *data, struct module *owner)
1853 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1859 static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
1863 static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1867 static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
1873 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
1874 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1876 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
1878 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1883 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
1892 struct bpf_array_aux *aux;
1894 DECLARE_FLEX_ARRAY(char, value) __aligned(8);
1895 DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
1896 DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
1900 #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
1901 #define MAX_TAIL_CALL_CNT 33
1903 /* Maximum number of loops for bpf_loop and bpf_iter_num.
1904 * It's enum to expose it (and thus make it discoverable) through BTF.
1907 BPF_MAX_LOOPS = 8 * 1024 * 1024,
1910 #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1911 BPF_F_RDONLY_PROG | \
1915 #define BPF_MAP_CAN_READ BIT(0)
1916 #define BPF_MAP_CAN_WRITE BIT(1)
1918 /* Maximum number of user-producer ring buffer samples that can be drained in
1919 * a call to bpf_user_ringbuf_drain().
1921 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
1923 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1925 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1927 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1930 if (access_flags & BPF_F_RDONLY_PROG)
1931 return BPF_MAP_CAN_READ;
1932 else if (access_flags & BPF_F_WRONLY_PROG)
1933 return BPF_MAP_CAN_WRITE;
1935 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1938 static inline bool bpf_map_flags_access_ok(u32 access_flags)
1940 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1941 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1944 struct bpf_event_entry {
1945 struct perf_event *event;
1946 struct file *perf_file;
1947 struct file *map_file;
1948 struct rcu_head rcu;
1951 static inline bool map_type_contains_progs(struct bpf_map *map)
1953 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1954 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1955 map->map_type == BPF_MAP_TYPE_CPUMAP;
1958 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
1959 int bpf_prog_calc_tag(struct bpf_prog *fp);
1961 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1962 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1964 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1965 unsigned long off, unsigned long len);
1966 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1967 const struct bpf_insn *src,
1968 struct bpf_insn *dst,
1969 struct bpf_prog *prog,
1972 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1973 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1975 /* an array of programs to be executed under rcu_lock.
1978 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
1980 * the structure returned by bpf_prog_array_alloc() should be populated
1981 * with program pointers and the last pointer must be NULL.
1982 * The user has to keep refcnt on the program and make sure the program
1983 * is removed from the array before bpf_prog_put().
1984 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1985 * since other cpus are walking the array of pointers in parallel.
1987 struct bpf_prog_array_item {
1988 struct bpf_prog *prog;
1990 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1995 struct bpf_prog_array {
1996 struct rcu_head rcu;
1997 struct bpf_prog_array_item items[];
2000 struct bpf_empty_prog_array {
2001 struct bpf_prog_array hdr;
2002 struct bpf_prog *null_prog;
2005 /* to avoid allocating empty bpf_prog_array for cgroups that
2006 * don't have bpf program attached use one global 'bpf_empty_prog_array'
2007 * It will not be modified the caller of bpf_prog_array_alloc()
2008 * (since caller requested prog_cnt == 0)
2009 * that pointer should be 'freed' by bpf_prog_array_free()
2011 extern struct bpf_empty_prog_array bpf_empty_prog_array;
2013 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
2014 void bpf_prog_array_free(struct bpf_prog_array *progs);
2015 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
2016 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
2017 int bpf_prog_array_length(struct bpf_prog_array *progs);
2018 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
2019 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
2020 __u32 __user *prog_ids, u32 cnt);
2022 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
2023 struct bpf_prog *old_prog);
2024 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
2025 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2026 struct bpf_prog *prog);
2027 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2028 u32 *prog_ids, u32 request_cnt,
2030 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2031 struct bpf_prog *exclude_prog,
2032 struct bpf_prog *include_prog,
2034 struct bpf_prog_array **new_array);
2036 struct bpf_run_ctx {};
2038 struct bpf_cg_run_ctx {
2039 struct bpf_run_ctx run_ctx;
2040 const struct bpf_prog_array_item *prog_item;
2044 struct bpf_trace_run_ctx {
2045 struct bpf_run_ctx run_ctx;
2050 struct bpf_tramp_run_ctx {
2051 struct bpf_run_ctx run_ctx;
2053 struct bpf_run_ctx *saved_run_ctx;
2056 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
2058 struct bpf_run_ctx *old_ctx = NULL;
2060 #ifdef CONFIG_BPF_SYSCALL
2061 old_ctx = current->bpf_ctx;
2062 current->bpf_ctx = new_ctx;
2067 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
2069 #ifdef CONFIG_BPF_SYSCALL
2070 current->bpf_ctx = old_ctx;
2074 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
2075 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
2076 /* BPF program asks to set CN on the packet. */
2077 #define BPF_RET_SET_CN (1 << 0)
2079 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
2081 static __always_inline u32
2082 bpf_prog_run_array(const struct bpf_prog_array *array,
2083 const void *ctx, bpf_prog_run_fn run_prog)
2085 const struct bpf_prog_array_item *item;
2086 const struct bpf_prog *prog;
2087 struct bpf_run_ctx *old_run_ctx;
2088 struct bpf_trace_run_ctx run_ctx;
2091 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
2093 if (unlikely(!array))
2096 run_ctx.is_uprobe = false;
2099 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2100 item = &array->items[0];
2101 while ((prog = READ_ONCE(item->prog))) {
2102 run_ctx.bpf_cookie = item->bpf_cookie;
2103 ret &= run_prog(prog, ctx);
2106 bpf_reset_run_ctx(old_run_ctx);
2111 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
2113 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
2114 * overall. As a result, we must use the bpf_prog_array_free_sleepable
2115 * in order to use the tasks_trace rcu grace period.
2117 * When a non-sleepable program is inside the array, we take the rcu read
2118 * section and disable preemption for that program alone, so it can access
2119 * rcu-protected dynamically sized maps.
2121 static __always_inline u32
2122 bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
2123 const void *ctx, bpf_prog_run_fn run_prog)
2125 const struct bpf_prog_array_item *item;
2126 const struct bpf_prog *prog;
2127 const struct bpf_prog_array *array;
2128 struct bpf_run_ctx *old_run_ctx;
2129 struct bpf_trace_run_ctx run_ctx;
2134 rcu_read_lock_trace();
2137 run_ctx.is_uprobe = true;
2139 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
2140 if (unlikely(!array))
2142 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2143 item = &array->items[0];
2144 while ((prog = READ_ONCE(item->prog))) {
2145 if (!prog->sleepable)
2148 run_ctx.bpf_cookie = item->bpf_cookie;
2149 ret &= run_prog(prog, ctx);
2152 if (!prog->sleepable)
2155 bpf_reset_run_ctx(old_run_ctx);
2158 rcu_read_unlock_trace();
2162 #ifdef CONFIG_BPF_SYSCALL
2163 DECLARE_PER_CPU(int, bpf_prog_active);
2164 extern struct mutex bpf_stats_enabled_mutex;
2167 * Block execution of BPF programs attached to instrumentation (perf,
2168 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2169 * these events can happen inside a region which holds a map bucket lock
2170 * and can deadlock on it.
2172 static inline void bpf_disable_instrumentation(void)
2175 this_cpu_inc(bpf_prog_active);
2178 static inline void bpf_enable_instrumentation(void)
2180 this_cpu_dec(bpf_prog_active);
2184 extern const struct super_operations bpf_super_ops;
2185 extern const struct file_operations bpf_map_fops;
2186 extern const struct file_operations bpf_prog_fops;
2187 extern const struct file_operations bpf_iter_fops;
2189 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2190 extern const struct bpf_prog_ops _name ## _prog_ops; \
2191 extern const struct bpf_verifier_ops _name ## _verifier_ops;
2192 #define BPF_MAP_TYPE(_id, _ops) \
2193 extern const struct bpf_map_ops _ops;
2194 #define BPF_LINK_TYPE(_id, _name)
2195 #include <linux/bpf_types.h>
2196 #undef BPF_PROG_TYPE
2198 #undef BPF_LINK_TYPE
2200 extern const struct bpf_prog_ops bpf_offload_prog_ops;
2201 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
2202 extern const struct bpf_verifier_ops xdp_analyzer_ops;
2204 struct bpf_prog *bpf_prog_get(u32 ufd);
2205 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2207 void bpf_prog_add(struct bpf_prog *prog, int i);
2208 void bpf_prog_sub(struct bpf_prog *prog, int i);
2209 void bpf_prog_inc(struct bpf_prog *prog);
2210 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
2211 void bpf_prog_put(struct bpf_prog *prog);
2213 void bpf_prog_free_id(struct bpf_prog *prog);
2214 void bpf_map_free_id(struct bpf_map *map);
2216 struct btf_field *btf_record_find(const struct btf_record *rec,
2217 u32 offset, u32 field_mask);
2218 void btf_record_free(struct btf_record *rec);
2219 void bpf_map_free_record(struct bpf_map *map);
2220 struct btf_record *btf_record_dup(const struct btf_record *rec);
2221 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
2222 void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
2223 void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
2224 void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
2225 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
2227 struct bpf_map *bpf_map_get(u32 ufd);
2228 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
2229 struct bpf_map *__bpf_map_get(struct fd f);
2230 void bpf_map_inc(struct bpf_map *map);
2231 void bpf_map_inc_with_uref(struct bpf_map *map);
2232 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2233 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
2234 void bpf_map_put_with_uref(struct bpf_map *map);
2235 void bpf_map_put(struct bpf_map *map);
2236 void *bpf_map_area_alloc(u64 size, int numa_node);
2237 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
2238 void bpf_map_area_free(void *base);
2239 bool bpf_map_write_active(const struct bpf_map *map);
2240 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2241 int generic_map_lookup_batch(struct bpf_map *map,
2242 const union bpf_attr *attr,
2243 union bpf_attr __user *uattr);
2244 int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
2245 const union bpf_attr *attr,
2246 union bpf_attr __user *uattr);
2247 int generic_map_delete_batch(struct bpf_map *map,
2248 const union bpf_attr *attr,
2249 union bpf_attr __user *uattr);
2250 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
2251 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
2253 int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid,
2254 unsigned long nr_pages, struct page **page_array);
2255 #ifdef CONFIG_MEMCG_KMEM
2256 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2258 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
2259 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
2261 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
2262 size_t align, gfp_t flags);
2264 static inline void *
2265 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2268 return kmalloc_node(size, flags, node);
2271 static inline void *
2272 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
2274 return kzalloc(size, flags);
2277 static inline void *
2278 bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
2280 return kvcalloc(n, size, flags);
2283 static inline void __percpu *
2284 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
2287 return __alloc_percpu_gfp(size, align, flags);
2292 bpf_map_init_elem_count(struct bpf_map *map)
2294 size_t size = sizeof(*map->elem_count), align = size;
2295 gfp_t flags = GFP_USER | __GFP_NOWARN;
2297 map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
2298 if (!map->elem_count)
2305 bpf_map_free_elem_count(struct bpf_map *map)
2307 free_percpu(map->elem_count);
2310 static inline void bpf_map_inc_elem_count(struct bpf_map *map)
2312 this_cpu_inc(*map->elem_count);
2315 static inline void bpf_map_dec_elem_count(struct bpf_map *map)
2317 this_cpu_dec(*map->elem_count);
2320 extern int sysctl_unprivileged_bpf_disabled;
2322 bool bpf_token_capable(const struct bpf_token *token, int cap);
2324 static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
2326 return bpf_token_capable(token, CAP_PERFMON);
2329 static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
2331 return bpf_token_capable(token, CAP_PERFMON);
2334 static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
2336 return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2339 static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
2341 return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2344 int bpf_map_new_fd(struct bpf_map *map, int flags);
2345 int bpf_prog_new_fd(struct bpf_prog *prog);
2347 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2348 const struct bpf_link_ops *ops, struct bpf_prog *prog);
2349 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
2350 int bpf_link_settle(struct bpf_link_primer *primer);
2351 void bpf_link_cleanup(struct bpf_link_primer *primer);
2352 void bpf_link_inc(struct bpf_link *link);
2353 void bpf_link_put(struct bpf_link *link);
2354 int bpf_link_new_fd(struct bpf_link *link);
2355 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
2356 struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
2358 void bpf_token_inc(struct bpf_token *token);
2359 void bpf_token_put(struct bpf_token *token);
2360 int bpf_token_create(union bpf_attr *attr);
2361 struct bpf_token *bpf_token_get_from_fd(u32 ufd);
2363 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
2364 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
2365 bool bpf_token_allow_prog_type(const struct bpf_token *token,
2366 enum bpf_prog_type prog_type,
2367 enum bpf_attach_type attach_type);
2369 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
2370 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
2371 struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
2374 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2375 #define DEFINE_BPF_ITER_FUNC(target, args...) \
2376 extern int bpf_iter_ ## target(args); \
2377 int __init bpf_iter_ ## target(args) { return 0; }
2380 * The task type of iterators.
2382 * For BPF task iterators, they can be parameterized with various
2383 * parameters to visit only some of tasks.
2385 * BPF_TASK_ITER_ALL (default)
2386 * Iterate over resources of every task.
2389 * Iterate over resources of a task/tid.
2391 * BPF_TASK_ITER_TGID
2392 * Iterate over resources of every task of a process / task group.
2394 enum bpf_iter_task_type {
2395 BPF_TASK_ITER_ALL = 0,
2400 struct bpf_iter_aux_info {
2401 /* for map_elem iter */
2402 struct bpf_map *map;
2404 /* for cgroup iter */
2406 struct cgroup *start; /* starting cgroup */
2407 enum bpf_cgroup_iter_order order;
2410 enum bpf_iter_task_type type;
2415 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
2416 union bpf_iter_link_info *linfo,
2417 struct bpf_iter_aux_info *aux);
2418 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
2419 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
2420 struct seq_file *seq);
2421 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
2422 struct bpf_link_info *info);
2423 typedef const struct bpf_func_proto *
2424 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
2425 const struct bpf_prog *prog);
2427 enum bpf_iter_feature {
2428 BPF_ITER_RESCHED = BIT(0),
2431 #define BPF_ITER_CTX_ARG_MAX 2
2432 struct bpf_iter_reg {
2434 bpf_iter_attach_target_t attach_target;
2435 bpf_iter_detach_target_t detach_target;
2436 bpf_iter_show_fdinfo_t show_fdinfo;
2437 bpf_iter_fill_link_info_t fill_link_info;
2438 bpf_iter_get_func_proto_t get_func_proto;
2439 u32 ctx_arg_info_size;
2441 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
2442 const struct bpf_iter_seq_info *seq_info;
2445 struct bpf_iter_meta {
2446 __bpf_md_ptr(struct seq_file *, seq);
2451 struct bpf_iter__bpf_map_elem {
2452 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2453 __bpf_md_ptr(struct bpf_map *, map);
2454 __bpf_md_ptr(void *, key);
2455 __bpf_md_ptr(void *, value);
2458 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
2459 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
2460 bool bpf_iter_prog_supported(struct bpf_prog *prog);
2461 const struct bpf_func_proto *
2462 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
2463 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
2464 int bpf_iter_new_fd(struct bpf_link *link);
2465 bool bpf_link_is_iter(struct bpf_link *link);
2466 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
2467 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
2468 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
2469 struct seq_file *seq);
2470 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
2471 struct bpf_link_info *info);
2473 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
2474 struct bpf_func_state *caller,
2475 struct bpf_func_state *callee);
2477 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
2478 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2479 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2481 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2484 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2486 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
2487 void *key, void *value, u64 map_flags);
2488 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2489 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2490 void *key, void *value, u64 map_flags);
2491 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2493 int bpf_get_file_flag(int flags);
2494 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
2495 size_t actual_size);
2497 /* verify correctness of eBPF program */
2498 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
2500 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2501 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
2504 struct btf *bpf_get_btf_vmlinux(void);
2509 struct bpf_dtab_netdev;
2510 struct bpf_cpu_map_entry;
2512 void __dev_flush(void);
2513 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2514 struct net_device *dev_rx);
2515 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2516 struct net_device *dev_rx);
2517 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2518 struct bpf_map *map, bool exclude_ingress);
2519 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
2520 struct bpf_prog *xdp_prog);
2521 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2522 struct bpf_prog *xdp_prog, struct bpf_map *map,
2523 bool exclude_ingress);
2525 void __cpu_map_flush(void);
2526 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
2527 struct net_device *dev_rx);
2528 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2529 struct sk_buff *skb);
2531 /* Return map's numa specified by userspace */
2532 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
2534 return (attr->map_flags & BPF_F_NUMA_NODE) ?
2535 attr->numa_node : NUMA_NO_NODE;
2538 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
2539 int array_map_alloc_check(union bpf_attr *attr);
2541 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
2542 union bpf_attr __user *uattr);
2543 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2544 union bpf_attr __user *uattr);
2545 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2546 const union bpf_attr *kattr,
2547 union bpf_attr __user *uattr);
2548 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2549 const union bpf_attr *kattr,
2550 union bpf_attr __user *uattr);
2551 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
2552 const union bpf_attr *kattr,
2553 union bpf_attr __user *uattr);
2554 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2555 const union bpf_attr *kattr,
2556 union bpf_attr __user *uattr);
2557 int bpf_prog_test_run_nf(struct bpf_prog *prog,
2558 const union bpf_attr *kattr,
2559 union bpf_attr __user *uattr);
2560 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
2561 const struct bpf_prog *prog,
2562 struct bpf_insn_access_aux *info);
2564 static inline bool bpf_tracing_ctx_access(int off, int size,
2565 enum bpf_access_type type)
2567 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
2569 if (type != BPF_READ)
2571 if (off % size != 0)
2576 static inline bool bpf_tracing_btf_ctx_access(int off, int size,
2577 enum bpf_access_type type,
2578 const struct bpf_prog *prog,
2579 struct bpf_insn_access_aux *info)
2581 if (!bpf_tracing_ctx_access(off, size, type))
2583 return btf_ctx_access(off, size, type, prog, info);
2586 int btf_struct_access(struct bpf_verifier_log *log,
2587 const struct bpf_reg_state *reg,
2588 int off, int size, enum bpf_access_type atype,
2589 u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
2590 bool btf_struct_ids_match(struct bpf_verifier_log *log,
2591 const struct btf *btf, u32 id, int off,
2592 const struct btf *need_btf, u32 need_type_id,
2595 int btf_distill_func_proto(struct bpf_verifier_log *log,
2597 const struct btf_type *func_proto,
2598 const char *func_name,
2599 struct btf_func_model *m);
2601 struct bpf_reg_state;
2602 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
2603 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
2604 struct btf *btf, const struct btf_type *t);
2605 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
2606 int comp_idx, const char *tag_key);
2607 int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
2608 int comp_idx, const char *tag_key, int last_id);
2610 struct bpf_prog *bpf_prog_by_id(u32 id);
2611 struct bpf_link *bpf_link_by_id(u32 id);
2613 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
2614 const struct bpf_prog *prog);
2615 void bpf_task_storage_free(struct task_struct *task);
2616 void bpf_cgrp_storage_free(struct cgroup *cgroup);
2617 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
2618 const struct btf_func_model *
2619 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2620 const struct bpf_insn *insn);
2621 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2622 u16 btf_fd_idx, u8 **func_addr);
2624 struct bpf_core_ctx {
2625 struct bpf_verifier_log *log;
2626 const struct btf *btf;
2629 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
2630 const struct bpf_reg_state *reg,
2631 const char *field_name, u32 btf_id, const char *suffix);
2633 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
2634 const struct btf *reg_btf, u32 reg_id,
2635 const struct btf *arg_btf, u32 arg_id);
2637 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
2638 int relo_idx, void *insn);
2640 static inline bool unprivileged_ebpf_enabled(void)
2642 return !sysctl_unprivileged_bpf_disabled;
2645 /* Not all bpf prog type has the bpf_ctx.
2646 * For the bpf prog type that has initialized the bpf_ctx,
2647 * this function can be used to decide if a kernel function
2648 * is called by a bpf program.
2650 static inline bool has_current_bpf_ctx(void)
2652 return !!current->bpf_ctx;
2655 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
2657 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2658 enum bpf_dynptr_type type, u32 offset, u32 size);
2659 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2660 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
2662 bool dev_check_flush(void);
2663 bool cpu_map_check_flush(void);
2664 #else /* !CONFIG_BPF_SYSCALL */
2665 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
2667 return ERR_PTR(-EOPNOTSUPP);
2670 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
2671 enum bpf_prog_type type,
2674 return ERR_PTR(-EOPNOTSUPP);
2677 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
2681 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
2685 static inline void bpf_prog_put(struct bpf_prog *prog)
2689 static inline void bpf_prog_inc(struct bpf_prog *prog)
2693 static inline struct bpf_prog *__must_check
2694 bpf_prog_inc_not_zero(struct bpf_prog *prog)
2696 return ERR_PTR(-EOPNOTSUPP);
2699 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2700 const struct bpf_link_ops *ops,
2701 struct bpf_prog *prog)
2705 static inline int bpf_link_prime(struct bpf_link *link,
2706 struct bpf_link_primer *primer)
2711 static inline int bpf_link_settle(struct bpf_link_primer *primer)
2716 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
2720 static inline void bpf_link_inc(struct bpf_link *link)
2724 static inline void bpf_link_put(struct bpf_link *link)
2728 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
2733 static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
2735 return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
2738 static inline void bpf_token_inc(struct bpf_token *token)
2742 static inline void bpf_token_put(struct bpf_token *token)
2746 static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
2748 return ERR_PTR(-EOPNOTSUPP);
2751 static inline void __dev_flush(void)
2756 struct bpf_dtab_netdev;
2757 struct bpf_cpu_map_entry;
2760 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2761 struct net_device *dev_rx)
2767 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2768 struct net_device *dev_rx)
2774 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2775 struct bpf_map *map, bool exclude_ingress)
2782 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2783 struct sk_buff *skb,
2784 struct bpf_prog *xdp_prog)
2790 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2791 struct bpf_prog *xdp_prog, struct bpf_map *map,
2792 bool exclude_ingress)
2797 static inline void __cpu_map_flush(void)
2801 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
2802 struct xdp_frame *xdpf,
2803 struct net_device *dev_rx)
2808 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2809 struct sk_buff *skb)
2814 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
2815 enum bpf_prog_type type)
2817 return ERR_PTR(-EOPNOTSUPP);
2820 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
2821 const union bpf_attr *kattr,
2822 union bpf_attr __user *uattr)
2827 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
2828 const union bpf_attr *kattr,
2829 union bpf_attr __user *uattr)
2834 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2835 const union bpf_attr *kattr,
2836 union bpf_attr __user *uattr)
2841 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2842 const union bpf_attr *kattr,
2843 union bpf_attr __user *uattr)
2848 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2849 const union bpf_attr *kattr,
2850 union bpf_attr __user *uattr)
2855 static inline void bpf_map_put(struct bpf_map *map)
2859 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2861 return ERR_PTR(-ENOTSUPP);
2864 static inline int btf_struct_access(struct bpf_verifier_log *log,
2865 const struct bpf_reg_state *reg,
2866 int off, int size, enum bpf_access_type atype,
2867 u32 *next_btf_id, enum bpf_type_flag *flag,
2868 const char **field_name)
2873 static inline const struct bpf_func_proto *
2874 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2879 static inline void bpf_task_storage_free(struct task_struct *task)
2883 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2888 static inline const struct btf_func_model *
2889 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2890 const struct bpf_insn *insn)
2896 bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2897 u16 btf_fd_idx, u8 **func_addr)
2902 static inline bool unprivileged_ebpf_enabled(void)
2907 static inline bool has_current_bpf_ctx(void)
2912 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2916 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
2920 static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2921 enum bpf_dynptr_type type, u32 offset, u32 size)
2925 static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
2929 static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
2932 #endif /* CONFIG_BPF_SYSCALL */
2934 static __always_inline int
2935 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
2939 if (IS_ENABLED(CONFIG_BPF_EVENTS))
2940 ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
2941 if (unlikely(ret < 0))
2942 memset(dst, 0, size);
2946 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2947 struct btf_mod_pair *used_btfs, u32 len);
2949 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2950 enum bpf_prog_type type)
2952 return bpf_prog_get_type_dev(ufd, type, false);
2955 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2956 struct bpf_map **used_maps, u32 len);
2958 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2960 int bpf_prog_offload_compile(struct bpf_prog *prog);
2961 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
2962 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2963 struct bpf_prog *prog);
2965 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2967 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2968 int bpf_map_offload_update_elem(struct bpf_map *map,
2969 void *key, void *value, u64 flags);
2970 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2971 int bpf_map_offload_get_next_key(struct bpf_map *map,
2972 void *key, void *next_key);
2974 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2976 struct bpf_offload_dev *
2977 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2978 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2979 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2980 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2981 struct net_device *netdev);
2982 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2983 struct net_device *netdev);
2984 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2986 void unpriv_ebpf_notify(int new_state);
2988 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2989 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
2990 struct bpf_prog_aux *prog_aux);
2991 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
2992 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
2993 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
2994 void bpf_dev_bound_netdev_unregister(struct net_device *dev);
2996 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2998 return aux->dev_bound;
3001 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
3003 return aux->offload_requested;
3006 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
3008 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
3010 return unlikely(map->ops == &bpf_map_offload_ops);
3013 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
3014 void bpf_map_offload_map_free(struct bpf_map *map);
3015 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
3016 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
3017 const union bpf_attr *kattr,
3018 union bpf_attr __user *uattr);
3020 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
3021 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
3022 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
3023 int sock_map_bpf_prog_query(const union bpf_attr *attr,
3024 union bpf_attr __user *uattr);
3025 int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
3027 void sock_map_unhash(struct sock *sk);
3028 void sock_map_destroy(struct sock *sk);
3029 void sock_map_close(struct sock *sk, long timeout);
3031 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
3032 struct bpf_prog_aux *prog_aux)
3037 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
3043 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
3044 union bpf_attr *attr)
3049 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
3050 struct bpf_prog *old_prog)
3055 static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
3059 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
3064 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
3069 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
3074 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
3079 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
3081 return ERR_PTR(-EOPNOTSUPP);
3084 static inline void bpf_map_offload_map_free(struct bpf_map *map)
3088 static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
3093 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
3094 const union bpf_attr *kattr,
3095 union bpf_attr __user *uattr)
3100 #ifdef CONFIG_BPF_SYSCALL
3101 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
3102 struct bpf_prog *prog)
3107 static inline int sock_map_prog_detach(const union bpf_attr *attr,
3108 enum bpf_prog_type ptype)
3113 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
3119 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
3120 union bpf_attr __user *uattr)
3125 static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
3129 #endif /* CONFIG_BPF_SYSCALL */
3130 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
3132 static __always_inline void
3133 bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
3135 const struct bpf_prog_array_item *item;
3136 struct bpf_prog *prog;
3138 if (unlikely(!array))
3141 item = &array->items[0];
3142 while ((prog = READ_ONCE(item->prog))) {
3143 bpf_prog_inc_misses_counter(prog);
3148 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
3149 void bpf_sk_reuseport_detach(struct sock *sk);
3150 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
3152 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
3153 void *value, u64 map_flags);
3155 static inline void bpf_sk_reuseport_detach(struct sock *sk)
3159 #ifdef CONFIG_BPF_SYSCALL
3160 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
3161 void *key, void *value)
3166 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
3167 void *key, void *value,
3172 #endif /* CONFIG_BPF_SYSCALL */
3173 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
3175 /* verifier prototypes for helper functions called from eBPF programs */
3176 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
3177 extern const struct bpf_func_proto bpf_map_update_elem_proto;
3178 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
3179 extern const struct bpf_func_proto bpf_map_push_elem_proto;
3180 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
3181 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
3182 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
3184 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
3185 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
3186 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
3187 extern const struct bpf_func_proto bpf_tail_call_proto;
3188 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
3189 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
3190 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
3191 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
3192 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
3193 extern const struct bpf_func_proto bpf_get_current_comm_proto;
3194 extern const struct bpf_func_proto bpf_get_stackid_proto;
3195 extern const struct bpf_func_proto bpf_get_stack_proto;
3196 extern const struct bpf_func_proto bpf_get_task_stack_proto;
3197 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
3198 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
3199 extern const struct bpf_func_proto bpf_sock_map_update_proto;
3200 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
3201 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
3202 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
3203 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
3204 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
3205 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
3206 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
3207 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
3208 extern const struct bpf_func_proto bpf_spin_lock_proto;
3209 extern const struct bpf_func_proto bpf_spin_unlock_proto;
3210 extern const struct bpf_func_proto bpf_get_local_storage_proto;
3211 extern const struct bpf_func_proto bpf_strtol_proto;
3212 extern const struct bpf_func_proto bpf_strtoul_proto;
3213 extern const struct bpf_func_proto bpf_tcp_sock_proto;
3214 extern const struct bpf_func_proto bpf_jiffies64_proto;
3215 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
3216 extern const struct bpf_func_proto bpf_event_output_data_proto;
3217 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
3218 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
3219 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
3220 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
3221 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
3222 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
3223 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
3224 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
3225 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
3226 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
3227 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
3228 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
3229 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
3230 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3231 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
3232 extern const struct bpf_func_proto bpf_copy_from_user_proto;
3233 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
3234 extern const struct bpf_func_proto bpf_snprintf_proto;
3235 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
3236 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
3237 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
3238 extern const struct bpf_func_proto bpf_sock_from_file_proto;
3239 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
3240 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
3241 extern const struct bpf_func_proto bpf_task_storage_get_proto;
3242 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
3243 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
3244 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3245 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3246 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
3247 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
3248 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
3249 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
3250 extern const struct bpf_func_proto bpf_find_vma_proto;
3251 extern const struct bpf_func_proto bpf_loop_proto;
3252 extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
3253 extern const struct bpf_func_proto bpf_set_retval_proto;
3254 extern const struct bpf_func_proto bpf_get_retval_proto;
3255 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
3256 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
3257 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
3259 const struct bpf_func_proto *tracing_prog_func_proto(
3260 enum bpf_func_id func_id, const struct bpf_prog *prog);
3262 /* Shared helpers among cBPF and eBPF. */
3263 void bpf_user_rnd_init_once(void);
3264 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3265 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3267 #if defined(CONFIG_NET)
3268 bool bpf_sock_common_is_valid_access(int off, int size,
3269 enum bpf_access_type type,
3270 struct bpf_insn_access_aux *info);
3271 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3272 struct bpf_insn_access_aux *info);
3273 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3274 const struct bpf_insn *si,
3275 struct bpf_insn *insn_buf,
3276 struct bpf_prog *prog,
3278 int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3279 struct bpf_dynptr_kern *ptr);
3281 static inline bool bpf_sock_common_is_valid_access(int off, int size,
3282 enum bpf_access_type type,
3283 struct bpf_insn_access_aux *info)
3287 static inline bool bpf_sock_is_valid_access(int off, int size,
3288 enum bpf_access_type type,
3289 struct bpf_insn_access_aux *info)
3293 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3294 const struct bpf_insn *si,
3295 struct bpf_insn *insn_buf,
3296 struct bpf_prog *prog,
3301 static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3302 struct bpf_dynptr_kern *ptr)
3309 struct sk_reuseport_kern {
3310 struct sk_buff *skb;
3312 struct sock *selected_sk;
3313 struct sock *migrating_sk;
3319 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3320 struct bpf_insn_access_aux *info);
3322 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3323 const struct bpf_insn *si,
3324 struct bpf_insn *insn_buf,
3325 struct bpf_prog *prog,
3328 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3329 struct bpf_insn_access_aux *info);
3331 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3332 const struct bpf_insn *si,
3333 struct bpf_insn *insn_buf,
3334 struct bpf_prog *prog,
3337 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
3338 enum bpf_access_type type,
3339 struct bpf_insn_access_aux *info)
3344 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3345 const struct bpf_insn *si,
3346 struct bpf_insn *insn_buf,
3347 struct bpf_prog *prog,
3352 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
3353 enum bpf_access_type type,
3354 struct bpf_insn_access_aux *info)
3359 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3360 const struct bpf_insn *si,
3361 struct bpf_insn *insn_buf,
3362 struct bpf_prog *prog,
3367 #endif /* CONFIG_INET */
3369 enum bpf_text_poke_type {
3374 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3375 void *addr1, void *addr2);
3377 void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3378 struct bpf_prog *new, struct bpf_prog *old);
3380 void *bpf_arch_text_copy(void *dst, void *src, size_t len);
3381 int bpf_arch_text_invalidate(void *dst, size_t len);
3384 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
3386 #define MAX_BPRINTF_VARARGS 12
3387 #define MAX_BPRINTF_BUF 1024
3389 struct bpf_bprintf_data {
3396 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
3397 u32 num_args, struct bpf_bprintf_data *data);
3398 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
3400 #ifdef CONFIG_BPF_LSM
3401 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
3402 void bpf_cgroup_atype_put(int cgroup_atype);
3404 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
3405 static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
3406 #endif /* CONFIG_BPF_LSM */
3415 #endif /* CONFIG_KEYS */
3417 static inline bool type_is_alloc(u32 type)
3419 return type & MEM_ALLOC;
3422 static inline gfp_t bpf_memcg_flags(gfp_t flags)
3424 if (memcg_bpf_enabled())
3425 return flags | __GFP_ACCOUNT;
3429 static inline bool bpf_is_subprog(const struct bpf_prog *prog)
3431 return prog->aux->func_idx != 0;
3434 #endif /* _LINUX_BPF_H */