tools/resolve_btfids: Add support for 8-byte BTF sets
[linux-block.git] / include / linux / bpf.h
CommitLineData
25763b3c 1/* SPDX-License-Identifier: GPL-2.0-only */
99c55f7d 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
99c55f7d
AS
3 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
d687f621 8#include <uapi/linux/filter.h>
74451e66 9
99c55f7d 10#include <linux/workqueue.h>
db20fd2b 11#include <linux/file.h>
b121d1e7 12#include <linux/percpu.h>
002245cc 13#include <linux/err.h>
74451e66 14#include <linux/rbtree_latch.h>
d6e1e46f 15#include <linux/numa.h>
fc970227 16#include <linux/mm_types.h>
ab3f0063 17#include <linux/wait.h>
fec56f58
AS
18#include <linux/refcount.h>
19#include <linux/mutex.h>
85d33df3 20#include <linux/module.h>
bfea9a85 21#include <linux/kallsyms.h>
2c78ee89 22#include <linux/capability.h>
48edc1f7
RG
23#include <linux/sched/mm.h>
24#include <linux/slab.h>
e21aa341 25#include <linux/percpu-refcount.h>
d687f621 26#include <linux/stddef.h>
af2ac3e1 27#include <linux/bpfptr.h>
14a324f6 28#include <linux/btf.h>
8c7dcb84 29#include <linux/rcupdate_trace.h>
99c55f7d 30
cae1927c 31struct bpf_verifier_env;
9e15db66 32struct bpf_verifier_log;
3b1efb19 33struct perf_event;
174a79ff 34struct bpf_prog;
da765a2f 35struct bpf_prog_aux;
99c55f7d 36struct bpf_map;
4f738adb 37struct sock;
a26ca7c9 38struct seq_file;
1b2b234b 39struct btf;
e8d2bec0 40struct btf_type;
3dec541b 41struct exception_table_entry;
ae24345d 42struct seq_operations;
f9c79272 43struct bpf_iter_aux_info;
f836a56e
KS
44struct bpf_local_storage;
45struct bpf_local_storage_map;
36e68442 46struct kobject;
48edc1f7 47struct mem_cgroup;
861de02e 48struct module;
69c087ba 49struct bpf_func_state;
99c55f7d 50
1b9ed84e
QM
51extern struct idr btf_idr;
52extern spinlock_t btf_idr_lock;
36e68442 53extern struct kobject *btf_kobj;
1b9ed84e 54
102acbac 55typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
f9c79272
YS
56typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
57 struct bpf_iter_aux_info *aux);
14fc6bd6 58typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
af3f4134
SF
59typedef unsigned int (*bpf_func_t)(const void *,
60 const struct bpf_insn *);
14fc6bd6
YS
61struct bpf_iter_seq_info {
62 const struct seq_operations *seq_ops;
63 bpf_iter_init_seq_priv_t init_seq_private;
64 bpf_iter_fini_seq_priv_t fini_seq_private;
65 u32 seq_priv_size;
66};
67
5d903493 68/* map is generic key/value storage optionally accessible by eBPF programs */
99c55f7d
AS
69struct bpf_map_ops {
70 /* funcs callable from userspace (via syscall) */
1110f3a9 71 int (*map_alloc_check)(union bpf_attr *attr);
99c55f7d 72 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
61d1b6a4
DB
73 void (*map_release)(struct bpf_map *map, struct file *map_file);
74 void (*map_free)(struct bpf_map *map);
db20fd2b 75 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
ba6b8de4 76 void (*map_release_uref)(struct bpf_map *map);
c6110222 77 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
cb4d03ab
BV
78 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
79 union bpf_attr __user *uattr);
3e87f192
DS
80 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
81 void *value, u64 flags);
05799638
YS
82 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
83 const union bpf_attr *attr,
84 union bpf_attr __user *uattr);
aa2e93b8
BV
85 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
86 union bpf_attr __user *uattr);
87 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
88 union bpf_attr __user *uattr);
db20fd2b
AS
89
90 /* funcs callable from userspace and from eBPF programs */
91 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3274f520 92 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
db20fd2b 93 int (*map_delete_elem)(struct bpf_map *map, void *key);
f1a2e44a
MV
94 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
95 int (*map_pop_elem)(struct bpf_map *map, void *value);
96 int (*map_peek_elem)(struct bpf_map *map, void *value);
07343110 97 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
2a36f0b9
WN
98
99 /* funcs called by prog_array and perf_event_array map */
d056a788
DB
100 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
101 int fd);
102 void (*map_fd_put_ptr)(void *ptr);
4a8f87e6 103 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
14dc6f04 104 u32 (*map_fd_sys_lookup_elem)(void *ptr);
a26ca7c9
MKL
105 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
106 struct seq_file *m);
e8d2bec0 107 int (*map_check_btf)(const struct bpf_map *map,
1b2b234b 108 const struct btf *btf,
e8d2bec0
DB
109 const struct btf_type *key_type,
110 const struct btf_type *value_type);
d8eca5bb 111
da765a2f
DB
112 /* Prog poke tracking helpers. */
113 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
114 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
115 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
116 struct bpf_prog *new);
117
d8eca5bb
DB
118 /* Direct value access helpers. */
119 int (*map_direct_value_addr)(const struct bpf_map *map,
120 u64 *imm, u32 off);
121 int (*map_direct_value_meta)(const struct bpf_map *map,
122 u64 imm, u32 *off);
fc970227 123 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
457f4436
AN
124 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
125 struct poll_table_struct *pts);
41c48f3a 126
f836a56e
KS
127 /* Functions called by bpf_local_storage maps */
128 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
129 void *owner, u32 size);
130 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
131 void *owner, u32 size);
132 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
f4d05259 133
e6a4750f
BT
134 /* Misc helpers.*/
135 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
136
f4d05259
MKL
137 /* map_meta_equal must be implemented for maps that can be
138 * used as an inner map. It is a runtime check to ensure
139 * an inner map can be inserted to an outer map.
140 *
141 * Some properties of the inner map has been used during the
142 * verification time. When inserting an inner map at the runtime,
143 * map_meta_equal has to ensure the inserting map has the same
144 * properties that the verifier has used earlier.
145 */
146 bool (*map_meta_equal)(const struct bpf_map *meta0,
147 const struct bpf_map *meta1);
148
69c087ba
YS
149
150 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
151 struct bpf_func_state *caller,
152 struct bpf_func_state *callee);
102acbac
KC
153 int (*map_for_each_callback)(struct bpf_map *map,
154 bpf_callback_t callback_fn,
69c087ba
YS
155 void *callback_ctx, u64 flags);
156
c317ab71 157 /* BTF id of struct allocated by map_alloc */
41c48f3a 158 int *map_btf_id;
a5cbe05a
YS
159
160 /* bpf_iter info used to open a seq_file */
161 const struct bpf_iter_seq_info *iter_seq_info;
99c55f7d
AS
162};
163
61df10c7
KKD
164enum {
165 /* Support at most 8 pointers in a BPF map value */
166 BPF_MAP_VALUE_OFF_MAX = 8,
4d7d7f69
KKD
167 BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
168 1 + /* for bpf_spin_lock */
169 1, /* for bpf_timer */
61df10c7
KKD
170};
171
c0a5a21c
KKD
172enum bpf_kptr_type {
173 BPF_KPTR_UNREF,
174 BPF_KPTR_REF,
175};
176
61df10c7
KKD
177struct bpf_map_value_off_desc {
178 u32 offset;
c0a5a21c 179 enum bpf_kptr_type type;
61df10c7
KKD
180 struct {
181 struct btf *btf;
14a324f6
KKD
182 struct module *module;
183 btf_dtor_kfunc_t dtor;
61df10c7
KKD
184 u32 btf_id;
185 } kptr;
186};
187
188struct bpf_map_value_off {
189 u32 nr_off;
190 struct bpf_map_value_off_desc off[];
191};
192
4d7d7f69
KKD
193struct bpf_map_off_arr {
194 u32 cnt;
195 u32 field_off[BPF_MAP_OFF_ARR_MAX];
196 u8 field_sz[BPF_MAP_OFF_ARR_MAX];
197};
198
99c55f7d 199struct bpf_map {
a26ca7c9 200 /* The first two cachelines with read-mostly members of which some
be95a845
DB
201 * are also accessed in fast-path (e.g. ops, max_entries).
202 */
203 const struct bpf_map_ops *ops ____cacheline_aligned;
204 struct bpf_map *inner_map_meta;
205#ifdef CONFIG_SECURITY
206 void *security;
207#endif
99c55f7d
AS
208 enum bpf_map_type map_type;
209 u32 key_size;
210 u32 value_size;
211 u32 max_entries;
9330986c 212 u64 map_extra; /* any per-map-type extra fields */
6c905981 213 u32 map_flags;
d83525ca 214 int spin_lock_off; /* >=0 valid offset, <0 error */
61df10c7 215 struct bpf_map_value_off *kptr_off_tab;
b00628b1 216 int timer_off; /* >=0 valid offset, <0 error */
f3f1c054 217 u32 id;
96eabe7a 218 int numa_node;
9b2cf328
MKL
219 u32 btf_key_type_id;
220 u32 btf_value_type_id;
8845b468 221 u32 btf_vmlinux_value_type_id;
a26ca7c9 222 struct btf *btf;
48edc1f7 223#ifdef CONFIG_MEMCG_KMEM
4201d9ab 224 struct obj_cgroup *objcg;
48edc1f7 225#endif
fc970227 226 char name[BPF_OBJ_NAME_LEN];
4d7d7f69 227 struct bpf_map_off_arr *off_arr;
a26ca7c9 228 /* The 3rd and 4th cacheline with misc members to avoid false sharing
be95a845
DB
229 * particularly with refcounting.
230 */
1e0bd5a0
AN
231 atomic64_t refcnt ____cacheline_aligned;
232 atomic64_t usercnt;
be95a845 233 struct work_struct work;
fc970227 234 struct mutex freeze_mutex;
353050be 235 atomic64_t writecnt;
f45d5b6c
THJ
236 /* 'Ownership' of program-containing map is claimed by the first program
237 * that is going to use this map or by the first program which FD is
238 * stored in the map to make sure that all callers and callees have the
239 * same prog type, JITed flag and xdp_has_frags flag.
240 */
241 struct {
242 spinlock_t lock;
243 enum bpf_prog_type type;
244 bool jited;
245 bool xdp_has_frags;
246 } owner;
4d7d7f69
KKD
247 bool bypass_spec_v1;
248 bool frozen; /* write-once; write-protected by freeze_mutex */
99c55f7d
AS
249};
250
d83525ca
AS
251static inline bool map_value_has_spin_lock(const struct bpf_map *map)
252{
253 return map->spin_lock_off >= 0;
254}
255
68134668 256static inline bool map_value_has_timer(const struct bpf_map *map)
d83525ca 257{
68134668 258 return map->timer_off >= 0;
d83525ca
AS
259}
260
61df10c7
KKD
261static inline bool map_value_has_kptrs(const struct bpf_map *map)
262{
263 return !IS_ERR_OR_NULL(map->kptr_off_tab);
264}
265
68134668
AS
266static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
267{
268 if (unlikely(map_value_has_spin_lock(map)))
5eaed6ee 269 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
68134668 270 if (unlikely(map_value_has_timer(map)))
5eaed6ee 271 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
4d7d7f69
KKD
272 if (unlikely(map_value_has_kptrs(map))) {
273 struct bpf_map_value_off *tab = map->kptr_off_tab;
274 int i;
275
276 for (i = 0; i < tab->nr_off; i++)
277 *(u64 *)(dst + tab->off[i].offset) = 0;
278 }
68134668
AS
279}
280
281/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
d83525ca
AS
282static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
283{
4d7d7f69
KKD
284 u32 curr_off = 0;
285 int i;
68134668 286
4d7d7f69
KKD
287 if (likely(!map->off_arr)) {
288 memcpy(dst, src, map->value_size);
289 return;
68134668 290 }
d83525ca 291
4d7d7f69
KKD
292 for (i = 0; i < map->off_arr->cnt; i++) {
293 u32 next_off = map->off_arr->field_off[i];
294
295 memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
296 curr_off += map->off_arr->field_sz[i];
d83525ca 297 }
4d7d7f69 298 memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
d83525ca 299}
96049f3a
AS
300void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
301 bool lock_src);
b00628b1 302void bpf_timer_cancel_and_free(void *timer);
8e7ae251 303int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
d83525ca 304
602144c2 305struct bpf_offload_dev;
a3884572
JK
306struct bpf_offloaded_map;
307
308struct bpf_map_dev_ops {
309 int (*map_get_next_key)(struct bpf_offloaded_map *map,
310 void *key, void *next_key);
311 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
312 void *key, void *value);
313 int (*map_update_elem)(struct bpf_offloaded_map *map,
314 void *key, void *value, u64 flags);
315 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
316};
317
318struct bpf_offloaded_map {
319 struct bpf_map map;
320 struct net_device *netdev;
321 const struct bpf_map_dev_ops *dev_ops;
322 void *dev_priv;
323 struct list_head offloads;
324};
325
326static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
327{
328 return container_of(map, struct bpf_offloaded_map, map);
329}
330
0cd3cbed
JK
331static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
332{
333 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
334}
335
a26ca7c9
MKL
336static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
337{
85d33df3
MKL
338 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
339 map->ops->map_seq_show_elem;
a26ca7c9
MKL
340}
341
e8d2bec0 342int map_check_no_btf(const struct bpf_map *map,
1b2b234b 343 const struct btf *btf,
e8d2bec0
DB
344 const struct btf_type *key_type,
345 const struct btf_type *value_type);
346
f4d05259
MKL
347bool bpf_map_meta_equal(const struct bpf_map *meta0,
348 const struct bpf_map *meta1);
349
a3884572
JK
350extern const struct bpf_map_ops bpf_map_offload_ops;
351
d639b9d1
HL
352/* bpf_type_flag contains a set of flags that are applicable to the values of
353 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
354 * or a memory is read-only. We classify types into two categories: base types
355 * and extended types. Extended types are base types combined with a type flag.
356 *
357 * Currently there are no more than 32 base types in arg_type, ret_type and
358 * reg_types.
359 */
360#define BPF_BASE_TYPE_BITS 8
361
362enum bpf_type_flag {
363 /* PTR may be NULL. */
364 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
365
216e3cd2
HL
366 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
367 * compatible with both mutable and immutable memory.
368 */
20b2aff4
HL
369 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
370
a672b2e3
DB
371 /* MEM was "allocated" from a different helper, and cannot be mixed
372 * with regular non-MEM_ALLOC'ed MEM types.
373 */
374 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
375
c6f1bfe8
YS
376 /* MEM is in user address space. */
377 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
378
5844101a
HL
379 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
380 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
381 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
382 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
383 * to the specified cpu.
384 */
385 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
386
8f14852e
KKD
387 /* Indicates that the argument will be released. */
388 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
389
6efe152d
KKD
390 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
391 * unreferenced and referenced kptr loaded from map value using a load
392 * instruction, so that they can only be dereferenced but not escape the
393 * BPF program into the kernel (i.e. cannot be passed as arguments to
394 * kfunc or bpf helpers).
395 */
396 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
397
16d1e00c
JK
398 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
399
97e03f52
JK
400 /* DYNPTR points to memory local to the bpf program. */
401 DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
402
bc34dee6
JK
403 /* DYNPTR points to a ringbuf record. */
404 DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
405
508362ac
MM
406 /* Size is known at compile time. */
407 MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
408
16d1e00c
JK
409 __BPF_TYPE_FLAG_MAX,
410 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
d639b9d1
HL
411};
412
bc34dee6 413#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
97e03f52 414
d639b9d1
HL
415/* Max number of base types. */
416#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
417
418/* Max number of all types. */
419#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
420
17a52670
AS
421/* function argument constraints */
422enum bpf_arg_type {
80f1d68c 423 ARG_DONTCARE = 0, /* unused argument in helper function */
17a52670
AS
424
425 /* the following constraints used to prototype
426 * bpf_map_lookup/update/delete_elem() functions
427 */
428 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
429 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
430 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
431
16d1e00c
JK
432 /* Used to prototype bpf_memcmp() and other functions that access data
433 * on eBPF program stack
17a52670 434 */
39f19ebb 435 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
435faee1 436
39f19ebb
AS
437 ARG_CONST_SIZE, /* number of bytes accessed from memory */
438 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
80f1d68c 439
608cd71a 440 ARG_PTR_TO_CTX, /* pointer to context */
80f1d68c 441 ARG_ANYTHING, /* any (initialized) argument is ok */
d83525ca 442 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
46f8bc92 443 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
57c3bb72
AI
444 ARG_PTR_TO_INT, /* pointer to int */
445 ARG_PTR_TO_LONG, /* pointer to long */
6ac99e8f 446 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
a7658e1a 447 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
457f4436 448 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
457f4436 449 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
1df8f55a 450 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
eaa6bcb7 451 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
69c087ba 452 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
48946bd6 453 ARG_PTR_TO_STACK, /* pointer to stack */
fff13c4b 454 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
b00628b1 455 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
c0a5a21c 456 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
97e03f52 457 ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
f79e7ea5 458 __BPF_ARG_TYPE_MAX,
d639b9d1 459
48946bd6
HL
460 /* Extended arg_types. */
461 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
462 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
463 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
464 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
465 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
466 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
c0a5a21c 467 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
16d1e00c
JK
468 /* pointer to memory does not need to be initialized, helper function must fill
469 * all bytes or clear them in error case.
470 */
471 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
508362ac
MM
472 /* Pointer to valid memory of size known at compile time. */
473 ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
48946bd6 474
d639b9d1
HL
475 /* This must be the last entry. Its purpose is to ensure the enum is
476 * wide enough to hold the higher bits reserved for bpf_type_flag.
477 */
478 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 479};
d639b9d1 480static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670
AS
481
482/* type of values returned from helper functions */
483enum bpf_return_type {
484 RET_INTEGER, /* function returns integer */
485 RET_VOID, /* function doesn't return anything */
3e6a4b3e 486 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
3c480732
HL
487 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
488 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
489 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
490 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
63d9b80d 491 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
3ca1032a 492 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
d639b9d1
HL
493 __BPF_RET_TYPE_MAX,
494
3c480732
HL
495 /* Extended ret_types. */
496 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
497 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
498 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
499 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
a672b2e3 500 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
34d4ef57 501 RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
3c480732
HL
502 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
503
d639b9d1
HL
504 /* This must be the last entry. Its purpose is to ensure the enum is
505 * wide enough to hold the higher bits reserved for bpf_type_flag.
506 */
507 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 508};
d639b9d1 509static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670 510
09756af4
AS
511/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
512 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
513 * instructions after verifying
514 */
515struct bpf_func_proto {
516 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
517 bool gpl_only;
36bbef52 518 bool pkt_access;
17a52670 519 enum bpf_return_type ret_type;
a7658e1a
AS
520 union {
521 struct {
522 enum bpf_arg_type arg1_type;
523 enum bpf_arg_type arg2_type;
524 enum bpf_arg_type arg3_type;
525 enum bpf_arg_type arg4_type;
526 enum bpf_arg_type arg5_type;
527 };
528 enum bpf_arg_type arg_type[5];
529 };
9436ef6e
LB
530 union {
531 struct {
532 u32 *arg1_btf_id;
533 u32 *arg2_btf_id;
534 u32 *arg3_btf_id;
535 u32 *arg4_btf_id;
536 u32 *arg5_btf_id;
537 };
538 u32 *arg_btf_id[5];
508362ac
MM
539 struct {
540 size_t arg1_size;
541 size_t arg2_size;
542 size_t arg3_size;
543 size_t arg4_size;
544 size_t arg5_size;
545 };
546 size_t arg_size[5];
9436ef6e 547 };
af7ec138 548 int *ret_btf_id; /* return value btf_id */
eae2e83e 549 bool (*allowed)(const struct bpf_prog *prog);
17a52670
AS
550};
551
552/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
553 * the first argument to eBPF programs.
554 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
555 */
556struct bpf_context;
557
558enum bpf_access_type {
559 BPF_READ = 1,
560 BPF_WRITE = 2
09756af4
AS
561};
562
19de99f7 563/* types of values stored in eBPF registers */
f1174f77
EC
564/* Pointer types represent:
565 * pointer
566 * pointer + imm
567 * pointer + (u16) var
568 * pointer + (u16) var + imm
569 * if (range > 0) then [ptr, ptr + range - off) is safe to access
570 * if (id > 0) means that some 'var' was added
571 * if (off > 0) means that 'imm' was added
572 */
19de99f7
AS
573enum bpf_reg_type {
574 NOT_INIT = 0, /* nothing was written into register */
f1174f77 575 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
19de99f7
AS
576 PTR_TO_CTX, /* reg points to bpf_context */
577 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
578 PTR_TO_MAP_VALUE, /* reg points to map element value */
c25b2ae1 579 PTR_TO_MAP_KEY, /* reg points to a map element key */
f1174f77 580 PTR_TO_STACK, /* reg == frame_pointer + offset */
de8f3a83 581 PTR_TO_PACKET_META, /* skb->data - meta_len */
f1174f77 582 PTR_TO_PACKET, /* reg points to skb->data */
19de99f7 583 PTR_TO_PACKET_END, /* skb->data + headlen */
d58e468b 584 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
c64b7983 585 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
46f8bc92 586 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
655a51e5 587 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
9df1c28b 588 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
fada7fdc 589 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
ba5f4cfe
JF
590 /* PTR_TO_BTF_ID points to a kernel struct that does not need
591 * to be null checked by the BPF program. This does not imply the
592 * pointer is _not_ null and in practice this can easily be a null
593 * pointer when reading pointer chains. The assumption is program
594 * context will handle null pointer dereference typically via fault
595 * handling. The verifier must keep this in mind and can make no
596 * assumptions about null or non-null when doing branch analysis.
597 * Further, when passed into helpers the helpers can not, without
598 * additional context, assume the value is non-null.
599 */
600 PTR_TO_BTF_ID,
601 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
602 * been checked for null. Used primarily to inform the verifier
603 * an explicit null check is required for this struct.
604 */
457f4436 605 PTR_TO_MEM, /* reg points to valid memory region */
20b2aff4 606 PTR_TO_BUF, /* reg points to a read/write buffer */
69c087ba 607 PTR_TO_FUNC, /* reg points to a bpf program function */
e6ac2450 608 __BPF_REG_TYPE_MAX,
d639b9d1 609
c25b2ae1
HL
610 /* Extended reg_types. */
611 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
612 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
613 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
614 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
615 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
c25b2ae1 616
d639b9d1
HL
617 /* This must be the last entry. Its purpose is to ensure the enum is
618 * wide enough to hold the higher bits reserved for bpf_type_flag.
619 */
620 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
19de99f7 621};
d639b9d1 622static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
19de99f7 623
23994631
YS
624/* The information passed from prog-specific *_is_valid_access
625 * back to the verifier.
626 */
627struct bpf_insn_access_aux {
628 enum bpf_reg_type reg_type;
9e15db66
AS
629 union {
630 int ctx_field_size;
22dc4a0f
AN
631 struct {
632 struct btf *btf;
633 u32 btf_id;
634 };
9e15db66
AS
635 };
636 struct bpf_verifier_log *log; /* for verbose logs */
23994631
YS
637};
638
f96da094
DB
639static inline void
640bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
641{
642 aux->ctx_field_size = size;
643}
644
3990ed4c
MKL
645static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
646{
647 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
648 insn->src_reg == BPF_PSEUDO_FUNC;
649}
650
7de16e3a
JK
651struct bpf_prog_ops {
652 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
653 union bpf_attr __user *uattr);
654};
655
09756af4
AS
656struct bpf_verifier_ops {
657 /* return eBPF function prototype for verification */
5e43f899
AI
658 const struct bpf_func_proto *
659 (*get_func_proto)(enum bpf_func_id func_id,
660 const struct bpf_prog *prog);
17a52670
AS
661
662 /* return true if 'size' wide access at offset 'off' within bpf_context
663 * with 'type' (read or write) is allowed
664 */
19de99f7 665 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
5e43f899 666 const struct bpf_prog *prog,
23994631 667 struct bpf_insn_access_aux *info);
36bbef52
DB
668 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
669 const struct bpf_prog *prog);
e0cea7ce
DB
670 int (*gen_ld_abs)(const struct bpf_insn *orig,
671 struct bpf_insn *insn_buf);
6b8cc1d1
DB
672 u32 (*convert_ctx_access)(enum bpf_access_type type,
673 const struct bpf_insn *src,
674 struct bpf_insn *dst,
f96da094 675 struct bpf_prog *prog, u32 *target_size);
27ae7997 676 int (*btf_struct_access)(struct bpf_verifier_log *log,
22dc4a0f 677 const struct btf *btf,
27ae7997
MKL
678 const struct btf_type *t, int off, int size,
679 enum bpf_access_type atype,
c6f1bfe8 680 u32 *next_btf_id, enum bpf_type_flag *flag);
09756af4
AS
681};
682
cae1927c 683struct bpf_prog_offload_ops {
08ca90af 684 /* verifier basic callbacks */
cae1927c
JK
685 int (*insn_hook)(struct bpf_verifier_env *env,
686 int insn_idx, int prev_insn_idx);
c941ce9c 687 int (*finalize)(struct bpf_verifier_env *env);
08ca90af
JK
688 /* verifier optimization callbacks (called after .finalize) */
689 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
690 struct bpf_insn *insn);
691 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
692 /* program management callbacks */
16a8cb5c
QM
693 int (*prepare)(struct bpf_prog *prog);
694 int (*translate)(struct bpf_prog *prog);
eb911947 695 void (*destroy)(struct bpf_prog *prog);
cae1927c
JK
696};
697
0a9c1991 698struct bpf_prog_offload {
ab3f0063
JK
699 struct bpf_prog *prog;
700 struct net_device *netdev;
341b3e7b 701 struct bpf_offload_dev *offdev;
ab3f0063
JK
702 void *dev_priv;
703 struct list_head offloads;
704 bool dev_state;
08ca90af 705 bool opt_failed;
fcfb126d
JW
706 void *jited_image;
707 u32 jited_len;
ab3f0063
JK
708};
709
8bad74f9
RG
710enum bpf_cgroup_storage_type {
711 BPF_CGROUP_STORAGE_SHARED,
b741f163 712 BPF_CGROUP_STORAGE_PERCPU,
8bad74f9
RG
713 __BPF_CGROUP_STORAGE_MAX
714};
715
716#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
717
f1b9509c
AS
718/* The longest tracepoint has 12 args.
719 * See include/trace/bpf_probe.h
720 */
721#define MAX_BPF_FUNC_ARGS 12
722
523a4cf4
DB
723/* The maximum number of arguments passed through registers
724 * a single function may have.
725 */
726#define MAX_BPF_FUNC_REG_ARGS 5
727
fec56f58
AS
728struct btf_func_model {
729 u8 ret_size;
730 u8 nr_args;
731 u8 arg_size[MAX_BPF_FUNC_ARGS];
732};
733
734/* Restore arguments before returning from trampoline to let original function
735 * continue executing. This flag is used for fentry progs when there are no
736 * fexit progs.
737 */
738#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
739/* Call original function after fentry progs, but before fexit progs.
740 * Makes sense for fentry/fexit, normal calls and indirect calls.
741 */
742#define BPF_TRAMP_F_CALL_ORIG BIT(1)
743/* Skip current frame and return to parent. Makes sense for fentry/fexit
744 * programs only. Should not be used with normal calls and indirect calls.
745 */
746#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
7e6f3cd8
JO
747/* Store IP address of the caller on the trampoline stack,
748 * so it's available for trampoline's programs.
749 */
750#define BPF_TRAMP_F_IP_ARG BIT(3)
356ed649
HT
751/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
752#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
7e6f3cd8 753
88fd9e53 754/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
b23316aa 755 * bytes on x86.
88fd9e53 756 */
f7e0beaf 757#define BPF_MAX_TRAMP_LINKS 38
88fd9e53 758
f7e0beaf
KFL
759struct bpf_tramp_links {
760 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
761 int nr_links;
88fd9e53
KS
762};
763
e384c7b7
KFL
764struct bpf_tramp_run_ctx;
765
fec56f58
AS
766/* Different use cases for BPF trampoline:
767 * 1. replace nop at the function entry (kprobe equivalent)
768 * flags = BPF_TRAMP_F_RESTORE_REGS
769 * fentry = a set of programs to run before returning from trampoline
770 *
771 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
772 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
773 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
774 * fentry = a set of program to run before calling original function
775 * fexit = a set of program to run after original function
776 *
777 * 3. replace direct call instruction anywhere in the function body
778 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
779 * With flags = 0
780 * fentry = a set of programs to run before returning from trampoline
781 * With flags = BPF_TRAMP_F_CALL_ORIG
782 * orig_call = original callback addr or direct function addr
783 * fentry = a set of program to run before calling original function
784 * fexit = a set of program to run after original function
785 */
e21aa341
AS
786struct bpf_tramp_image;
787int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
85d33df3 788 const struct btf_func_model *m, u32 flags,
f7e0beaf 789 struct bpf_tramp_links *tlinks,
fec56f58
AS
790 void *orig_call);
791/* these two functions are called from generated trampoline */
e384c7b7
KFL
792u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
793void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
794u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
795void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
796 struct bpf_tramp_run_ctx *run_ctx);
69fd337a
SF
797u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
798 struct bpf_tramp_run_ctx *run_ctx);
799void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
800 struct bpf_tramp_run_ctx *run_ctx);
e21aa341
AS
801void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
802void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
fec56f58 803
535911c8
JO
804struct bpf_ksym {
805 unsigned long start;
806 unsigned long end;
bfea9a85 807 char name[KSYM_NAME_LEN];
ecb60d1c 808 struct list_head lnode;
ca4424c9 809 struct latch_tree_node tnode;
cbd76f8d 810 bool prog;
535911c8
JO
811};
812
fec56f58
AS
813enum bpf_tramp_prog_type {
814 BPF_TRAMP_FENTRY,
815 BPF_TRAMP_FEXIT,
ae240823 816 BPF_TRAMP_MODIFY_RETURN,
be8704ff
AS
817 BPF_TRAMP_MAX,
818 BPF_TRAMP_REPLACE, /* more than MAX */
fec56f58
AS
819};
820
e21aa341
AS
821struct bpf_tramp_image {
822 void *image;
823 struct bpf_ksym ksym;
824 struct percpu_ref pcref;
825 void *ip_after_call;
826 void *ip_epilogue;
827 union {
828 struct rcu_head rcu;
829 struct work_struct work;
830 };
831};
832
fec56f58
AS
833struct bpf_trampoline {
834 /* hlist for trampoline_table */
835 struct hlist_node hlist;
836 /* serializes access to fields of this trampoline */
837 struct mutex mutex;
838 refcount_t refcnt;
839 u64 key;
840 struct {
841 struct btf_func_model model;
842 void *addr;
b91e014f 843 bool ftrace_managed;
fec56f58 844 } func;
be8704ff
AS
845 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
846 * program by replacing one of its functions. func.addr is the address
847 * of the function it replaced.
848 */
849 struct bpf_prog *extension_prog;
fec56f58
AS
850 /* list of BPF programs using this trampoline */
851 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
852 /* Number of attached programs. A counter per kind. */
853 int progs_cnt[BPF_TRAMP_MAX];
854 /* Executable image of trampoline */
e21aa341 855 struct bpf_tramp_image *cur_image;
fec56f58 856 u64 selector;
861de02e 857 struct module *mod;
fec56f58 858};
75ccbef6 859
f7b12b6f
THJ
860struct bpf_attach_target_info {
861 struct btf_func_model fmodel;
862 long tgt_addr;
863 const char *tgt_name;
864 const struct btf_type *tgt_type;
865};
866
116eb788 867#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
75ccbef6
BT
868
869struct bpf_dispatcher_prog {
870 struct bpf_prog *prog;
871 refcount_t users;
872};
873
874struct bpf_dispatcher {
875 /* dispatcher mutex */
876 struct mutex mutex;
877 void *func;
878 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
879 int num_progs;
880 void *image;
881 u32 image_off;
517b75e4 882 struct bpf_ksym ksym;
75ccbef6
BT
883};
884
9f5b4009 885static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
7e6897f9
BT
886 const void *ctx,
887 const struct bpf_insn *insnsi,
af3f4134 888 bpf_func_t bpf_func)
7e6897f9
BT
889{
890 return bpf_func(ctx, insnsi);
891}
f7e0beaf 892
fec56f58 893#ifdef CONFIG_BPF_JIT
f7e0beaf
KFL
894int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
895int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
f7b12b6f
THJ
896struct bpf_trampoline *bpf_trampoline_get(u64 key,
897 struct bpf_attach_target_info *tgt_info);
fec56f58 898void bpf_trampoline_put(struct bpf_trampoline *tr);
f45b2974 899int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
517b75e4
JO
900#define BPF_DISPATCHER_INIT(_name) { \
901 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
902 .func = &_name##_func, \
903 .progs = {}, \
904 .num_progs = 0, \
905 .image = NULL, \
906 .image_off = 0, \
907 .ksym = { \
908 .name = #_name, \
909 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
910 }, \
75ccbef6
BT
911}
912
913#define DEFINE_BPF_DISPATCHER(name) \
9f5b4009 914 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
915 const void *ctx, \
916 const struct bpf_insn *insnsi, \
af3f4134 917 bpf_func_t bpf_func) \
75ccbef6
BT
918 { \
919 return bpf_func(ctx, insnsi); \
920 } \
6a64037d
BT
921 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
922 struct bpf_dispatcher bpf_dispatcher_##name = \
923 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
75ccbef6 924#define DECLARE_BPF_DISPATCHER(name) \
6a64037d 925 unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
926 const void *ctx, \
927 const struct bpf_insn *insnsi, \
af3f4134 928 bpf_func_t bpf_func); \
6a64037d
BT
929 extern struct bpf_dispatcher bpf_dispatcher_##name;
930#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
931#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
75ccbef6
BT
932void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
933 struct bpf_prog *to);
dba122fb 934/* Called only from JIT-enabled code, so there's no need for stubs. */
7ac88eba 935void *bpf_jit_alloc_exec_page(void);
a108f7dc
JO
936void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
937void bpf_image_ksym_del(struct bpf_ksym *ksym);
dba122fb
JO
938void bpf_ksym_add(struct bpf_ksym *ksym);
939void bpf_ksym_del(struct bpf_ksym *ksym);
3486bedd
SL
940int bpf_jit_charge_modmem(u32 size);
941void bpf_jit_uncharge_modmem(u32 size);
f92c1e18 942bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
fec56f58 943#else
f7e0beaf 944static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
3aac1ead 945 struct bpf_trampoline *tr)
fec56f58
AS
946{
947 return -ENOTSUPP;
948}
f7e0beaf 949static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
3aac1ead 950 struct bpf_trampoline *tr)
fec56f58
AS
951{
952 return -ENOTSUPP;
953}
f7b12b6f
THJ
954static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
955 struct bpf_attach_target_info *tgt_info)
956{
957 return ERR_PTR(-EOPNOTSUPP);
958}
fec56f58 959static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
75ccbef6
BT
960#define DEFINE_BPF_DISPATCHER(name)
961#define DECLARE_BPF_DISPATCHER(name)
6a64037d 962#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
75ccbef6
BT
963#define BPF_DISPATCHER_PTR(name) NULL
964static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
965 struct bpf_prog *from,
966 struct bpf_prog *to) {}
e9b4e606
JO
967static inline bool is_bpf_image_address(unsigned long address)
968{
969 return false;
970}
f92c1e18
JO
971static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
972{
973 return false;
974}
fec56f58
AS
975#endif
976
8c1b6e69 977struct bpf_func_info_aux {
51c39bb1 978 u16 linkage;
8c1b6e69
AS
979 bool unreliable;
980};
981
a66886fe
DB
982enum bpf_jit_poke_reason {
983 BPF_POKE_REASON_TAIL_CALL,
984};
985
986/* Descriptor of pokes pointing /into/ the JITed image. */
987struct bpf_jit_poke_descriptor {
cf71b174 988 void *tailcall_target;
ebf7d1f5
MF
989 void *tailcall_bypass;
990 void *bypass_addr;
f263a814 991 void *aux;
a66886fe
DB
992 union {
993 struct {
994 struct bpf_map *map;
995 u32 key;
996 } tail_call;
997 };
cf71b174 998 bool tailcall_target_stable;
a66886fe
DB
999 u8 adj_off;
1000 u16 reason;
a748c697 1001 u32 insn_idx;
a66886fe
DB
1002};
1003
3c32cc1b
YS
1004/* reg_type info for ctx arguments */
1005struct bpf_ctx_arg_aux {
1006 u32 offset;
1007 enum bpf_reg_type reg_type;
951cf368 1008 u32 btf_id;
3c32cc1b
YS
1009};
1010
541c3bad
AN
1011struct btf_mod_pair {
1012 struct btf *btf;
1013 struct module *module;
1014};
1015
e6ac2450
MKL
1016struct bpf_kfunc_desc_tab;
1017
09756af4 1018struct bpf_prog_aux {
85192dbf 1019 atomic64_t refcnt;
24701ece 1020 u32 used_map_cnt;
541c3bad 1021 u32 used_btf_cnt;
32bbe007 1022 u32 max_ctx_offset;
e647815a 1023 u32 max_pkt_offset;
9df1c28b 1024 u32 max_tp_access;
8726679a 1025 u32 stack_depth;
dc4bb0e2 1026 u32 id;
ba64e7d8
YS
1027 u32 func_cnt; /* used by non-func prog as the number of func progs */
1028 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
ccfe29eb 1029 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
3c32cc1b 1030 u32 ctx_arg_info_size;
afbf21dc
YS
1031 u32 max_rdonly_access;
1032 u32 max_rdwr_access;
22dc4a0f 1033 struct btf *attach_btf;
3c32cc1b 1034 const struct bpf_ctx_arg_aux *ctx_arg_info;
3aac1ead
THJ
1035 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1036 struct bpf_prog *dst_prog;
1037 struct bpf_trampoline *dst_trampoline;
4a1e7c0c
THJ
1038 enum bpf_prog_type saved_dst_prog_type;
1039 enum bpf_attach_type saved_dst_attach_type;
a4b1d3c1 1040 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
9a18eedb 1041 bool offload_requested;
38207291 1042 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
8c1b6e69 1043 bool func_proto_unreliable;
1e6c62a8 1044 bool sleepable;
ebf7d1f5 1045 bool tail_call_reachable;
c2f2cdbe 1046 bool xdp_has_frags;
38207291
MKL
1047 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1048 const struct btf_type *attach_func_proto;
1049 /* function name for valid attach_btf_id */
1050 const char *attach_func_name;
1c2a088a
AS
1051 struct bpf_prog **func;
1052 void *jit_data; /* JIT specific data. arch dependent */
a66886fe 1053 struct bpf_jit_poke_descriptor *poke_tab;
e6ac2450 1054 struct bpf_kfunc_desc_tab *kfunc_tab;
2357672c 1055 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
a66886fe 1056 u32 size_poke_tab;
535911c8 1057 struct bpf_ksym ksym;
7de16e3a 1058 const struct bpf_prog_ops *ops;
09756af4 1059 struct bpf_map **used_maps;
984fe94f 1060 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
541c3bad 1061 struct btf_mod_pair *used_btfs;
09756af4 1062 struct bpf_prog *prog;
aaac3ba9 1063 struct user_struct *user;
cb4d2b3f 1064 u64 load_time; /* ns since boottime */
aba64c7d 1065 u32 verified_insns;
69fd337a 1066 int cgroup_atype; /* enum cgroup_bpf_attach_type */
8bad74f9 1067 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
067cae47 1068 char name[BPF_OBJ_NAME_LEN];
afdb09c7
CF
1069#ifdef CONFIG_SECURITY
1070 void *security;
1071#endif
0a9c1991 1072 struct bpf_prog_offload *offload;
838e9690 1073 struct btf *btf;
ba64e7d8 1074 struct bpf_func_info *func_info;
8c1b6e69 1075 struct bpf_func_info_aux *func_info_aux;
c454a46b
MKL
1076 /* bpf_line_info loaded from userspace. linfo->insn_off
1077 * has the xlated insn offset.
1078 * Both the main and sub prog share the same linfo.
1079 * The subprog can access its first linfo by
1080 * using the linfo_idx.
1081 */
1082 struct bpf_line_info *linfo;
1083 /* jited_linfo is the jited addr of the linfo. It has a
1084 * one to one mapping to linfo:
1085 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1086 * Both the main and sub prog share the same jited_linfo.
1087 * The subprog can access its first jited_linfo by
1088 * using the linfo_idx.
1089 */
1090 void **jited_linfo;
ba64e7d8 1091 u32 func_info_cnt;
c454a46b
MKL
1092 u32 nr_linfo;
1093 /* subprog can use linfo_idx to access its first linfo and
1094 * jited_linfo.
1095 * main prog always has linfo_idx == 0
1096 */
1097 u32 linfo_idx;
3dec541b
AS
1098 u32 num_exentries;
1099 struct exception_table_entry *extable;
abf2e7d6
AS
1100 union {
1101 struct work_struct work;
1102 struct rcu_head rcu;
1103 };
09756af4
AS
1104};
1105
d687f621
DK
1106struct bpf_prog {
1107 u16 pages; /* Number of allocated pages */
1108 u16 jited:1, /* Is our filter JIT'ed? */
1109 jit_requested:1,/* archs need to JIT the prog */
1110 gpl_compatible:1, /* Is filter GPL compatible? */
1111 cb_access:1, /* Is control block accessed? */
1112 dst_needed:1, /* Do we need dst entry? */
1113 blinding_requested:1, /* needs constant blinding */
1114 blinded:1, /* Was blinded */
1115 is_func:1, /* program is a bpf function */
1116 kprobe_override:1, /* Do we override a kprobe? */
1117 has_callchain_buf:1, /* callchain buffer allocated? */
1118 enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1119 call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1120 call_get_func_ip:1, /* Do we call get_func_ip() */
1121 tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
1122 enum bpf_prog_type type; /* Type of BPF program */
1123 enum bpf_attach_type expected_attach_type; /* For some prog types */
1124 u32 len; /* Number of filter blocks */
1125 u32 jited_len; /* Size of jited insns in bytes */
1126 u8 tag[BPF_TAG_SIZE];
1127 struct bpf_prog_stats __percpu *stats;
1128 int __percpu *active;
1129 unsigned int (*bpf_func)(const void *ctx,
1130 const struct bpf_insn *insn);
1131 struct bpf_prog_aux *aux; /* Auxiliary fields */
1132 struct sock_fprog_kern *orig_prog; /* Original BPF program */
1133 /* Instructions for interpreter */
1134 union {
1135 DECLARE_FLEX_ARRAY(struct sock_filter, insns);
1136 DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
1137 };
1138};
1139
2beee5f5 1140struct bpf_array_aux {
da765a2f
DB
1141 /* Programs with direct jumps into programs part of this array. */
1142 struct list_head poke_progs;
1143 struct bpf_map *map;
1144 struct mutex poke_mutex;
1145 struct work_struct work;
2beee5f5
DB
1146};
1147
6cc7d1e8
AN
1148struct bpf_link {
1149 atomic64_t refcnt;
1150 u32 id;
1151 enum bpf_link_type type;
1152 const struct bpf_link_ops *ops;
1153 struct bpf_prog *prog;
1154 struct work_struct work;
1155};
1156
1157struct bpf_link_ops {
1158 void (*release)(struct bpf_link *link);
1159 void (*dealloc)(struct bpf_link *link);
73b11c2a 1160 int (*detach)(struct bpf_link *link);
6cc7d1e8
AN
1161 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1162 struct bpf_prog *old_prog);
1163 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1164 int (*fill_link_info)(const struct bpf_link *link,
1165 struct bpf_link_info *info);
1166};
1167
f7e0beaf
KFL
1168struct bpf_tramp_link {
1169 struct bpf_link link;
1170 struct hlist_node tramp_hlist;
2fcc8241 1171 u64 cookie;
f7e0beaf
KFL
1172};
1173
69fd337a
SF
1174struct bpf_shim_tramp_link {
1175 struct bpf_tramp_link link;
1176 struct bpf_trampoline *trampoline;
1177};
1178
f7e0beaf
KFL
1179struct bpf_tracing_link {
1180 struct bpf_tramp_link link;
1181 enum bpf_attach_type attach_type;
1182 struct bpf_trampoline *trampoline;
1183 struct bpf_prog *tgt_prog;
1184};
1185
6cc7d1e8
AN
1186struct bpf_link_primer {
1187 struct bpf_link *link;
1188 struct file *file;
1189 int fd;
1190 u32 id;
1191};
1192
85d33df3 1193struct bpf_struct_ops_value;
27ae7997
MKL
1194struct btf_member;
1195
1196#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1197struct bpf_struct_ops {
1198 const struct bpf_verifier_ops *verifier_ops;
1199 int (*init)(struct btf *btf);
1200 int (*check_member)(const struct btf_type *t,
1201 const struct btf_member *member);
85d33df3
MKL
1202 int (*init_member)(const struct btf_type *t,
1203 const struct btf_member *member,
1204 void *kdata, const void *udata);
1205 int (*reg)(void *kdata);
1206 void (*unreg)(void *kdata);
27ae7997 1207 const struct btf_type *type;
85d33df3 1208 const struct btf_type *value_type;
27ae7997
MKL
1209 const char *name;
1210 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1211 u32 type_id;
85d33df3 1212 u32 value_id;
27ae7997
MKL
1213};
1214
1215#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
85d33df3 1216#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
27ae7997 1217const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
d3e42bb0 1218void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
85d33df3
MKL
1219bool bpf_struct_ops_get(const void *kdata);
1220void bpf_struct_ops_put(const void *kdata);
1221int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1222 void *value);
f7e0beaf
KFL
1223int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1224 struct bpf_tramp_link *link,
31a645ae
HT
1225 const struct btf_func_model *model,
1226 void *image, void *image_end);
85d33df3
MKL
1227static inline bool bpf_try_module_get(const void *data, struct module *owner)
1228{
1229 if (owner == BPF_MODULE_OWNER)
1230 return bpf_struct_ops_get(data);
1231 else
1232 return try_module_get(owner);
1233}
1234static inline void bpf_module_put(const void *data, struct module *owner)
1235{
1236 if (owner == BPF_MODULE_OWNER)
1237 bpf_struct_ops_put(data);
1238 else
1239 module_put(owner);
1240}
c196906d
HT
1241
1242#ifdef CONFIG_NET
1243/* Define it here to avoid the use of forward declaration */
1244struct bpf_dummy_ops_state {
1245 int val;
1246};
1247
1248struct bpf_dummy_ops {
1249 int (*test_1)(struct bpf_dummy_ops_state *cb);
1250 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1251 char a3, unsigned long a4);
1252};
1253
1254int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1255 union bpf_attr __user *uattr);
1256#endif
27ae7997
MKL
1257#else
1258static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1259{
1260 return NULL;
1261}
d3e42bb0
MKL
1262static inline void bpf_struct_ops_init(struct btf *btf,
1263 struct bpf_verifier_log *log)
1264{
1265}
85d33df3
MKL
1266static inline bool bpf_try_module_get(const void *data, struct module *owner)
1267{
1268 return try_module_get(owner);
1269}
1270static inline void bpf_module_put(const void *data, struct module *owner)
1271{
1272 module_put(owner);
1273}
1274static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1275 void *key,
1276 void *value)
1277{
1278 return -EINVAL;
1279}
9cb61fda
SF
1280#endif
1281
1282#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
1283int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1284 int cgroup_atype);
1285void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
1286#else
69fd337a
SF
1287static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1288 int cgroup_atype)
1289{
1290 return -EOPNOTSUPP;
1291}
1292static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
1293{
1294}
27ae7997
MKL
1295#endif
1296
04fd61ab
AS
1297struct bpf_array {
1298 struct bpf_map map;
1299 u32 elem_size;
b2157399 1300 u32 index_mask;
2beee5f5 1301 struct bpf_array_aux *aux;
04fd61ab
AS
1302 union {
1303 char value[0] __aligned(8);
2a36f0b9 1304 void *ptrs[0] __aligned(8);
a10423b8 1305 void __percpu *pptrs[0] __aligned(8);
04fd61ab
AS
1306 };
1307};
3b1efb19 1308
c04c0d2b 1309#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
ebf7f6f0 1310#define MAX_TAIL_CALL_CNT 33
04fd61ab 1311
1ade2371
EZ
1312/* Maximum number of loops for bpf_loop */
1313#define BPF_MAX_LOOPS BIT(23)
1314
591fe988
DB
1315#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1316 BPF_F_RDONLY_PROG | \
1317 BPF_F_WRONLY | \
1318 BPF_F_WRONLY_PROG)
1319
1320#define BPF_MAP_CAN_READ BIT(0)
1321#define BPF_MAP_CAN_WRITE BIT(1)
1322
1323static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1324{
1325 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1326
1327 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1328 * not possible.
1329 */
1330 if (access_flags & BPF_F_RDONLY_PROG)
1331 return BPF_MAP_CAN_READ;
1332 else if (access_flags & BPF_F_WRONLY_PROG)
1333 return BPF_MAP_CAN_WRITE;
1334 else
1335 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1336}
1337
1338static inline bool bpf_map_flags_access_ok(u32 access_flags)
1339{
1340 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1341 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1342}
1343
3b1efb19
DB
1344struct bpf_event_entry {
1345 struct perf_event *event;
1346 struct file *perf_file;
1347 struct file *map_file;
1348 struct rcu_head rcu;
1349};
1350
f45d5b6c
THJ
1351static inline bool map_type_contains_progs(struct bpf_map *map)
1352{
1353 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1354 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1355 map->map_type == BPF_MAP_TYPE_CPUMAP;
1356}
1357
1358bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
f1f7714e 1359int bpf_prog_calc_tag(struct bpf_prog *fp);
bd570ff9 1360
0756ea3e 1361const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
10aceb62 1362const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
555c8a86
DB
1363
1364typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
aa7145c1 1365 unsigned long off, unsigned long len);
c64b7983
JS
1366typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1367 const struct bpf_insn *src,
1368 struct bpf_insn *dst,
1369 struct bpf_prog *prog,
1370 u32 *target_size);
555c8a86
DB
1371
1372u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1373 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
04fd61ab 1374
324bda9e
AS
1375/* an array of programs to be executed under rcu_lock.
1376 *
1377 * Typical usage:
055eb955 1378 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
324bda9e
AS
1379 *
1380 * the structure returned by bpf_prog_array_alloc() should be populated
1381 * with program pointers and the last pointer must be NULL.
1382 * The user has to keep refcnt on the program and make sure the program
1383 * is removed from the array before bpf_prog_put().
1384 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1385 * since other cpus are walking the array of pointers in parallel.
1386 */
394e40a2
RG
1387struct bpf_prog_array_item {
1388 struct bpf_prog *prog;
82e6b1ee
AN
1389 union {
1390 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1391 u64 bpf_cookie;
1392 };
394e40a2
RG
1393};
1394
324bda9e
AS
1395struct bpf_prog_array {
1396 struct rcu_head rcu;
d7f10df8 1397 struct bpf_prog_array_item items[];
324bda9e
AS
1398};
1399
46531a30
PB
1400struct bpf_empty_prog_array {
1401 struct bpf_prog_array hdr;
1402 struct bpf_prog *null_prog;
1403};
1404
1405/* to avoid allocating empty bpf_prog_array for cgroups that
1406 * don't have bpf program attached use one global 'bpf_empty_prog_array'
1407 * It will not be modified the caller of bpf_prog_array_alloc()
1408 * (since caller requested prog_cnt == 0)
1409 * that pointer should be 'freed' by bpf_prog_array_free()
1410 */
1411extern struct bpf_empty_prog_array bpf_empty_prog_array;
1412
d29ab6e1 1413struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
54e9c9d4 1414void bpf_prog_array_free(struct bpf_prog_array *progs);
8c7dcb84
DK
1415/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
1416void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
54e9c9d4 1417int bpf_prog_array_length(struct bpf_prog_array *progs);
0d01da6a 1418bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
54e9c9d4 1419int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
468e2f64 1420 __u32 __user *prog_ids, u32 cnt);
324bda9e 1421
54e9c9d4 1422void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
e87c6bc3 1423 struct bpf_prog *old_prog);
ce3aa9cc
JS
1424int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1425int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1426 struct bpf_prog *prog);
54e9c9d4 1427int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
1428 u32 *prog_ids, u32 request_cnt,
1429 u32 *prog_cnt);
54e9c9d4 1430int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1431 struct bpf_prog *exclude_prog,
1432 struct bpf_prog *include_prog,
82e6b1ee 1433 u64 bpf_cookie,
e87c6bc3
YS
1434 struct bpf_prog_array **new_array);
1435
c7603cfa
AN
1436struct bpf_run_ctx {};
1437
1438struct bpf_cg_run_ctx {
1439 struct bpf_run_ctx run_ctx;
7d08c2c9 1440 const struct bpf_prog_array_item *prog_item;
c4dcfdd4 1441 int retval;
c7603cfa
AN
1442};
1443
82e6b1ee
AN
1444struct bpf_trace_run_ctx {
1445 struct bpf_run_ctx run_ctx;
1446 u64 bpf_cookie;
1447};
1448
e384c7b7
KFL
1449struct bpf_tramp_run_ctx {
1450 struct bpf_run_ctx run_ctx;
1451 u64 bpf_cookie;
1452 struct bpf_run_ctx *saved_run_ctx;
1453};
1454
7d08c2c9
AN
1455static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1456{
1457 struct bpf_run_ctx *old_ctx = NULL;
1458
1459#ifdef CONFIG_BPF_SYSCALL
1460 old_ctx = current->bpf_ctx;
1461 current->bpf_ctx = new_ctx;
1462#endif
1463 return old_ctx;
1464}
1465
1466static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1467{
1468#ifdef CONFIG_BPF_SYSCALL
1469 current->bpf_ctx = old_ctx;
1470#endif
1471}
1472
77241217
SF
1473/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1474#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1475/* BPF program asks to set CN on the packet. */
1476#define BPF_RET_SET_CN (1 << 0)
1477
7d08c2c9
AN
1478typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1479
7d08c2c9 1480static __always_inline u32
055eb955 1481bpf_prog_run_array(const struct bpf_prog_array *array,
7d08c2c9
AN
1482 const void *ctx, bpf_prog_run_fn run_prog)
1483{
1484 const struct bpf_prog_array_item *item;
1485 const struct bpf_prog *prog;
82e6b1ee
AN
1486 struct bpf_run_ctx *old_run_ctx;
1487 struct bpf_trace_run_ctx run_ctx;
7d08c2c9
AN
1488 u32 ret = 1;
1489
055eb955
SF
1490 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
1491
7d08c2c9 1492 if (unlikely(!array))
055eb955
SF
1493 return ret;
1494
1495 migrate_disable();
82e6b1ee 1496 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
7d08c2c9
AN
1497 item = &array->items[0];
1498 while ((prog = READ_ONCE(item->prog))) {
82e6b1ee 1499 run_ctx.bpf_cookie = item->bpf_cookie;
7d08c2c9
AN
1500 ret &= run_prog(prog, ctx);
1501 item++;
1502 }
82e6b1ee 1503 bpf_reset_run_ctx(old_run_ctx);
7d08c2c9
AN
1504 migrate_enable();
1505 return ret;
1506}
324bda9e 1507
8c7dcb84
DK
1508/* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
1509 *
1510 * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
1511 * overall. As a result, we must use the bpf_prog_array_free_sleepable
1512 * in order to use the tasks_trace rcu grace period.
1513 *
1514 * When a non-sleepable program is inside the array, we take the rcu read
1515 * section and disable preemption for that program alone, so it can access
1516 * rcu-protected dynamically sized maps.
1517 */
1518static __always_inline u32
1519bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
1520 const void *ctx, bpf_prog_run_fn run_prog)
1521{
1522 const struct bpf_prog_array_item *item;
1523 const struct bpf_prog *prog;
1524 const struct bpf_prog_array *array;
1525 struct bpf_run_ctx *old_run_ctx;
1526 struct bpf_trace_run_ctx run_ctx;
1527 u32 ret = 1;
1528
1529 might_fault();
1530
1531 rcu_read_lock_trace();
1532 migrate_disable();
1533
1534 array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
1535 if (unlikely(!array))
1536 goto out;
1537 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1538 item = &array->items[0];
1539 while ((prog = READ_ONCE(item->prog))) {
1540 if (!prog->aux->sleepable)
1541 rcu_read_lock();
1542
1543 run_ctx.bpf_cookie = item->bpf_cookie;
1544 ret &= run_prog(prog, ctx);
1545 item++;
1546
1547 if (!prog->aux->sleepable)
1548 rcu_read_unlock();
1549 }
1550 bpf_reset_run_ctx(old_run_ctx);
1551out:
1552 migrate_enable();
1553 rcu_read_unlock_trace();
1554 return ret;
1555}
1556
89aa0758 1557#ifdef CONFIG_BPF_SYSCALL
b121d1e7 1558DECLARE_PER_CPU(int, bpf_prog_active);
d46edd67 1559extern struct mutex bpf_stats_enabled_mutex;
b121d1e7 1560
c518cfa0
TG
1561/*
1562 * Block execution of BPF programs attached to instrumentation (perf,
1563 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1564 * these events can happen inside a region which holds a map bucket lock
1565 * and can deadlock on it.
c518cfa0
TG
1566 */
1567static inline void bpf_disable_instrumentation(void)
1568{
1569 migrate_disable();
79364031 1570 this_cpu_inc(bpf_prog_active);
c518cfa0
TG
1571}
1572
1573static inline void bpf_enable_instrumentation(void)
1574{
79364031 1575 this_cpu_dec(bpf_prog_active);
c518cfa0
TG
1576 migrate_enable();
1577}
1578
f66e448c
CF
1579extern const struct file_operations bpf_map_fops;
1580extern const struct file_operations bpf_prog_fops;
367ec3e4 1581extern const struct file_operations bpf_iter_fops;
f66e448c 1582
91cc1a99 1583#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
7de16e3a
JK
1584 extern const struct bpf_prog_ops _name ## _prog_ops; \
1585 extern const struct bpf_verifier_ops _name ## _verifier_ops;
40077e0c
JB
1586#define BPF_MAP_TYPE(_id, _ops) \
1587 extern const struct bpf_map_ops _ops;
f2e10bff 1588#define BPF_LINK_TYPE(_id, _name)
be9370a7
JB
1589#include <linux/bpf_types.h>
1590#undef BPF_PROG_TYPE
40077e0c 1591#undef BPF_MAP_TYPE
f2e10bff 1592#undef BPF_LINK_TYPE
0fc174de 1593
ab3f0063 1594extern const struct bpf_prog_ops bpf_offload_prog_ops;
4f9218aa
JK
1595extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1596extern const struct bpf_verifier_ops xdp_analyzer_ops;
1597
0fc174de 1598struct bpf_prog *bpf_prog_get(u32 ufd);
248f346f 1599struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1600 bool attach_drv);
85192dbf 1601void bpf_prog_add(struct bpf_prog *prog, int i);
c540594f 1602void bpf_prog_sub(struct bpf_prog *prog, int i);
85192dbf 1603void bpf_prog_inc(struct bpf_prog *prog);
a6f6df69 1604struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
61e021f3
DB
1605void bpf_prog_put(struct bpf_prog *prog);
1606
ad8ad79f 1607void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
a3884572 1608void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
ad8ad79f 1609
61df10c7
KKD
1610struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
1611void bpf_map_free_kptr_off_tab(struct bpf_map *map);
1612struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
1613bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
14a324f6 1614void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
61df10c7 1615
1ed4d924 1616struct bpf_map *bpf_map_get(u32 ufd);
c9da161c 1617struct bpf_map *bpf_map_get_with_uref(u32 ufd);
c2101297 1618struct bpf_map *__bpf_map_get(struct fd f);
1e0bd5a0
AN
1619void bpf_map_inc(struct bpf_map *map);
1620void bpf_map_inc_with_uref(struct bpf_map *map);
1621struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
c9da161c 1622void bpf_map_put_with_uref(struct bpf_map *map);
61e021f3 1623void bpf_map_put(struct bpf_map *map);
196e8ca7
DB
1624void *bpf_map_area_alloc(u64 size, int numa_node);
1625void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
d407bd25 1626void bpf_map_area_free(void *base);
353050be 1627bool bpf_map_write_active(const struct bpf_map *map);
bd475643 1628void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
cb4d03ab
BV
1629int generic_map_lookup_batch(struct bpf_map *map,
1630 const union bpf_attr *attr,
aa2e93b8
BV
1631 union bpf_attr __user *uattr);
1632int generic_map_update_batch(struct bpf_map *map,
1633 const union bpf_attr *attr,
1634 union bpf_attr __user *uattr);
1635int generic_map_delete_batch(struct bpf_map *map,
1636 const union bpf_attr *attr,
cb4d03ab 1637 union bpf_attr __user *uattr);
6086d29d 1638struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
a228a64f 1639struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
61e021f3 1640
48edc1f7
RG
1641#ifdef CONFIG_MEMCG_KMEM
1642void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1643 int node);
1644void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1645void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1646 size_t align, gfp_t flags);
1647#else
1648static inline void *
1649bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1650 int node)
1651{
1652 return kmalloc_node(size, flags, node);
1653}
1654
1655static inline void *
1656bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1657{
1658 return kzalloc(size, flags);
1659}
1660
1661static inline void __percpu *
1662bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1663 gfp_t flags)
1664{
1665 return __alloc_percpu_gfp(size, align, flags);
1666}
1667#endif
1668
1be7f75d
AS
1669extern int sysctl_unprivileged_bpf_disabled;
1670
2c78ee89
AS
1671static inline bool bpf_allow_ptr_leaks(void)
1672{
1673 return perfmon_capable();
1674}
1675
01f810ac
AM
1676static inline bool bpf_allow_uninit_stack(void)
1677{
1678 return perfmon_capable();
1679}
1680
41c48f3a
AI
1681static inline bool bpf_allow_ptr_to_map_access(void)
1682{
1683 return perfmon_capable();
1684}
1685
2c78ee89
AS
1686static inline bool bpf_bypass_spec_v1(void)
1687{
1688 return perfmon_capable();
1689}
1690
1691static inline bool bpf_bypass_spec_v4(void)
1692{
1693 return perfmon_capable();
1694}
1695
6e71b04a 1696int bpf_map_new_fd(struct bpf_map *map, int flags);
b2197755
DB
1697int bpf_prog_new_fd(struct bpf_prog *prog);
1698
f2e10bff 1699void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
a3b80e10
AN
1700 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1701int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1702int bpf_link_settle(struct bpf_link_primer *primer);
1703void bpf_link_cleanup(struct bpf_link_primer *primer);
70ed506c
AN
1704void bpf_link_inc(struct bpf_link *link);
1705void bpf_link_put(struct bpf_link *link);
1706int bpf_link_new_fd(struct bpf_link *link);
babf3164 1707struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
70ed506c 1708struct bpf_link *bpf_link_get_from_fd(u32 ufd);
9f883612 1709struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
70ed506c 1710
b2197755 1711int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
6e71b04a 1712int bpf_obj_get_user(const char __user *pathname, int flags);
b2197755 1713
21aef70e 1714#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
e5158d98 1715#define DEFINE_BPF_ITER_FUNC(target, args...) \
21aef70e
YS
1716 extern int bpf_iter_ ## target(args); \
1717 int __init bpf_iter_ ## target(args) { return 0; }
15d83c4d 1718
f9c79272 1719struct bpf_iter_aux_info {
a5cbe05a 1720 struct bpf_map *map;
f9c79272
YS
1721};
1722
5e7b3020
YS
1723typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1724 union bpf_iter_link_info *linfo,
1725 struct bpf_iter_aux_info *aux);
1726typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
6b0a249a
YS
1727typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1728 struct seq_file *seq);
1729typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1730 struct bpf_link_info *info);
3cee6fb8
MKL
1731typedef const struct bpf_func_proto *
1732(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1733 const struct bpf_prog *prog);
a5cbe05a 1734
cf83b2d2
YS
1735enum bpf_iter_feature {
1736 BPF_ITER_RESCHED = BIT(0),
1737};
1738
3c32cc1b 1739#define BPF_ITER_CTX_ARG_MAX 2
ae24345d
YS
1740struct bpf_iter_reg {
1741 const char *target;
5e7b3020
YS
1742 bpf_iter_attach_target_t attach_target;
1743 bpf_iter_detach_target_t detach_target;
6b0a249a
YS
1744 bpf_iter_show_fdinfo_t show_fdinfo;
1745 bpf_iter_fill_link_info_t fill_link_info;
3cee6fb8 1746 bpf_iter_get_func_proto_t get_func_proto;
3c32cc1b 1747 u32 ctx_arg_info_size;
cf83b2d2 1748 u32 feature;
3c32cc1b 1749 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
14fc6bd6 1750 const struct bpf_iter_seq_info *seq_info;
ae24345d
YS
1751};
1752
e5158d98
YS
1753struct bpf_iter_meta {
1754 __bpf_md_ptr(struct seq_file *, seq);
1755 u64 session_id;
1756 u64 seq_num;
1757};
1758
a5cbe05a
YS
1759struct bpf_iter__bpf_map_elem {
1760 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1761 __bpf_md_ptr(struct bpf_map *, map);
1762 __bpf_md_ptr(void *, key);
1763 __bpf_md_ptr(void *, value);
1764};
1765
15172a46 1766int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
ab2ee4fc 1767void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
15d83c4d 1768bool bpf_iter_prog_supported(struct bpf_prog *prog);
3cee6fb8
MKL
1769const struct bpf_func_proto *
1770bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
af2ac3e1 1771int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
ac51d99b 1772int bpf_iter_new_fd(struct bpf_link *link);
367ec3e4 1773bool bpf_link_is_iter(struct bpf_link *link);
e5158d98
YS
1774struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1775int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
b76f2226
YS
1776void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1777 struct seq_file *seq);
1778int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1779 struct bpf_link_info *info);
ae24345d 1780
314ee05e
YS
1781int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1782 struct bpf_func_state *caller,
1783 struct bpf_func_state *callee);
1784
15a07b33
AS
1785int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1786int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1787int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1788 u64 flags);
1789int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1790 u64 flags);
d056a788 1791
557c0c6e 1792int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
15a07b33 1793
d056a788
DB
1794int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1795 void *key, void *value, u64 map_flags);
14dc6f04 1796int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
bcc6b1b7
MKL
1797int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1798 void *key, void *value, u64 map_flags);
14dc6f04 1799int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
d056a788 1800
6e71b04a 1801int bpf_get_file_flag(int flags);
af2ac3e1 1802int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
dcab51f1 1803 size_t actual_size);
6e71b04a 1804
15a07b33
AS
1805/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1806 * forced to use 'long' read/writes to try to atomically copy long counters.
1807 * Best-effort only. No barriers here, since it _will_ race with concurrent
1808 * updates from BPF programs. Called from bpf syscall and mostly used with
1809 * size 8 or 16 bytes, so ask compiler to inline it.
1810 */
1811static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1812{
1813 const long *lsrc = src;
1814 long *ldst = dst;
1815
1816 size /= sizeof(long);
1817 while (size--)
1818 *ldst++ = *lsrc++;
1819}
1820
61e021f3 1821/* verify correctness of eBPF program */
af2ac3e1 1822int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
a643bff7
AN
1823
1824#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01 1825void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
a643bff7 1826#endif
46f55cff 1827
76654e67
AM
1828struct btf *bpf_get_btf_vmlinux(void);
1829
46f55cff 1830/* Map specifics */
d53ad5d8 1831struct xdp_frame;
6d5fc195 1832struct sk_buff;
e6a4750f
BT
1833struct bpf_dtab_netdev;
1834struct bpf_cpu_map_entry;
67f29e07 1835
1d233886 1836void __dev_flush(void);
d53ad5d8 1837int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886 1838 struct net_device *dev_rx);
d53ad5d8 1839int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1840 struct net_device *dev_rx);
d53ad5d8 1841int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed 1842 struct bpf_map *map, bool exclude_ingress);
6d5fc195
TM
1843int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1844 struct bpf_prog *xdp_prog);
e624d4ed
HL
1845int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1846 struct bpf_prog *xdp_prog, struct bpf_map *map,
1847 bool exclude_ingress);
46f55cff 1848
cdfafe98 1849void __cpu_map_flush(void);
d53ad5d8 1850int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
9c270af3 1851 struct net_device *dev_rx);
11941f8a
KKD
1852int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1853 struct sk_buff *skb);
9c270af3 1854
96eabe7a
MKL
1855/* Return map's numa specified by userspace */
1856static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1857{
1858 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1859 attr->numa_node : NUMA_NO_NODE;
1860}
1861
040ee692 1862struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
5dc4c4b7 1863int array_map_alloc_check(union bpf_attr *attr);
040ee692 1864
c695865c
SF
1865int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1866 union bpf_attr __user *uattr);
1867int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1868 union bpf_attr __user *uattr);
da00d2f1
KS
1869int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1870 const union bpf_attr *kattr,
1871 union bpf_attr __user *uattr);
c695865c
SF
1872int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1873 const union bpf_attr *kattr,
1874 union bpf_attr __user *uattr);
1b4d60ec
SL
1875int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1876 const union bpf_attr *kattr,
1877 union bpf_attr __user *uattr);
7c32e8f8
LB
1878int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1879 const union bpf_attr *kattr,
1880 union bpf_attr __user *uattr);
9e15db66
AS
1881bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1882 const struct bpf_prog *prog,
1883 struct bpf_insn_access_aux *info);
35346ab6
HT
1884
1885static inline bool bpf_tracing_ctx_access(int off, int size,
1886 enum bpf_access_type type)
1887{
1888 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1889 return false;
1890 if (type != BPF_READ)
1891 return false;
1892 if (off % size != 0)
1893 return false;
1894 return true;
1895}
1896
1897static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1898 enum bpf_access_type type,
1899 const struct bpf_prog *prog,
1900 struct bpf_insn_access_aux *info)
1901{
1902 if (!bpf_tracing_ctx_access(off, size, type))
1903 return false;
1904 return btf_ctx_access(off, size, type, prog, info);
1905}
1906
22dc4a0f 1907int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
9e15db66
AS
1908 const struct btf_type *t, int off, int size,
1909 enum bpf_access_type atype,
c6f1bfe8 1910 u32 *next_btf_id, enum bpf_type_flag *flag);
faaf4a79 1911bool btf_struct_ids_match(struct bpf_verifier_log *log,
22dc4a0f 1912 const struct btf *btf, u32 id, int off,
2ab3b380
KKD
1913 const struct btf *need_btf, u32 need_type_id,
1914 bool strict);
9e15db66 1915
fec56f58
AS
1916int btf_distill_func_proto(struct bpf_verifier_log *log,
1917 struct btf *btf,
1918 const struct btf_type *func_proto,
1919 const char *func_name,
1920 struct btf_func_model *m);
1921
51c39bb1 1922struct bpf_reg_state;
34747c41
MKL
1923int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1924 struct bpf_reg_state *regs);
e6ac2450
MKL
1925int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1926 const struct btf *btf, u32 func_id,
1927 struct bpf_reg_state *regs);
51c39bb1
AS
1928int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1929 struct bpf_reg_state *reg);
efc68158 1930int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
be8704ff 1931 struct btf *btf, const struct btf_type *t);
8c1b6e69 1932
7e6897f9 1933struct bpf_prog *bpf_prog_by_id(u32 id);
005142b8 1934struct bpf_link *bpf_link_by_id(u32 id);
7e6897f9 1935
6890896b 1936const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
a10787e6 1937void bpf_task_storage_free(struct task_struct *task);
e6ac2450
MKL
1938bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1939const struct btf_func_model *
1940bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1941 const struct bpf_insn *insn);
fbd94c7a
AS
1942struct bpf_core_ctx {
1943 struct bpf_verifier_log *log;
1944 const struct btf *btf;
1945};
1946
1947int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1948 int relo_idx, void *insn);
1949
44a3918c
JP
1950static inline bool unprivileged_ebpf_enabled(void)
1951{
1952 return !sysctl_unprivileged_bpf_disabled;
1953}
1954
9c270af3 1955#else /* !CONFIG_BPF_SYSCALL */
0fc174de
DB
1956static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1957{
1958 return ERR_PTR(-EOPNOTSUPP);
1959}
1960
248f346f
JK
1961static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1962 enum bpf_prog_type type,
288b3de5 1963 bool attach_drv)
248f346f
JK
1964{
1965 return ERR_PTR(-EOPNOTSUPP);
1966}
1967
85192dbf 1968static inline void bpf_prog_add(struct bpf_prog *prog, int i)
cc2e0b3f 1969{
cc2e0b3f 1970}
113214be 1971
c540594f
DB
1972static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1973{
1974}
1975
0fc174de
DB
1976static inline void bpf_prog_put(struct bpf_prog *prog)
1977{
1978}
6d67942d 1979
85192dbf 1980static inline void bpf_prog_inc(struct bpf_prog *prog)
aa6a5f3c 1981{
aa6a5f3c 1982}
5ccb071e 1983
a6f6df69
JF
1984static inline struct bpf_prog *__must_check
1985bpf_prog_inc_not_zero(struct bpf_prog *prog)
1986{
1987 return ERR_PTR(-EOPNOTSUPP);
1988}
1989
6cc7d1e8
AN
1990static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1991 const struct bpf_link_ops *ops,
1992 struct bpf_prog *prog)
1993{
1994}
1995
1996static inline int bpf_link_prime(struct bpf_link *link,
1997 struct bpf_link_primer *primer)
1998{
1999 return -EOPNOTSUPP;
2000}
2001
2002static inline int bpf_link_settle(struct bpf_link_primer *primer)
2003{
2004 return -EOPNOTSUPP;
2005}
2006
2007static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
2008{
2009}
2010
2011static inline void bpf_link_inc(struct bpf_link *link)
2012{
2013}
2014
2015static inline void bpf_link_put(struct bpf_link *link)
2016{
2017}
2018
6e71b04a 2019static inline int bpf_obj_get_user(const char __user *pathname, int flags)
98589a09
SL
2020{
2021 return -EOPNOTSUPP;
2022}
2023
1d233886 2024static inline void __dev_flush(void)
46f55cff
JF
2025{
2026}
9c270af3 2027
d53ad5d8 2028struct xdp_frame;
67f29e07 2029struct bpf_dtab_netdev;
e6a4750f 2030struct bpf_cpu_map_entry;
67f29e07 2031
1d233886 2032static inline
d53ad5d8 2033int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886
THJ
2034 struct net_device *dev_rx)
2035{
2036 return 0;
2037}
2038
67f29e07 2039static inline
d53ad5d8 2040int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 2041 struct net_device *dev_rx)
67f29e07
JDB
2042{
2043 return 0;
2044}
2045
e624d4ed 2046static inline
d53ad5d8 2047int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed
HL
2048 struct bpf_map *map, bool exclude_ingress)
2049{
2050 return 0;
2051}
2052
6d5fc195
TM
2053struct sk_buff;
2054
2055static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2056 struct sk_buff *skb,
2057 struct bpf_prog *xdp_prog)
2058{
2059 return 0;
2060}
2061
e624d4ed
HL
2062static inline
2063int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2064 struct bpf_prog *xdp_prog, struct bpf_map *map,
2065 bool exclude_ingress)
2066{
2067 return 0;
2068}
2069
cdfafe98 2070static inline void __cpu_map_flush(void)
9c270af3
JDB
2071{
2072}
2073
9c270af3 2074static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
d53ad5d8 2075 struct xdp_frame *xdpf,
9c270af3
JDB
2076 struct net_device *dev_rx)
2077{
2078 return 0;
2079}
040ee692 2080
11941f8a
KKD
2081static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2082 struct sk_buff *skb)
2083{
2084 return -EOPNOTSUPP;
2085}
2086
040ee692
AV
2087static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
2088 enum bpf_prog_type type)
2089{
2090 return ERR_PTR(-EOPNOTSUPP);
2091}
c695865c
SF
2092
2093static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
2094 const union bpf_attr *kattr,
2095 union bpf_attr __user *uattr)
2096{
2097 return -ENOTSUPP;
2098}
2099
2100static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
2101 const union bpf_attr *kattr,
2102 union bpf_attr __user *uattr)
2103{
2104 return -ENOTSUPP;
2105}
2106
da00d2f1
KS
2107static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2108 const union bpf_attr *kattr,
2109 union bpf_attr __user *uattr)
2110{
2111 return -ENOTSUPP;
2112}
2113
c695865c
SF
2114static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2115 const union bpf_attr *kattr,
2116 union bpf_attr __user *uattr)
2117{
2118 return -ENOTSUPP;
2119}
6332be04 2120
7c32e8f8
LB
2121static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2122 const union bpf_attr *kattr,
2123 union bpf_attr __user *uattr)
2124{
2125 return -ENOTSUPP;
2126}
2127
6332be04
DB
2128static inline void bpf_map_put(struct bpf_map *map)
2129{
2130}
7e6897f9
BT
2131
2132static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2133{
2134 return ERR_PTR(-ENOTSUPP);
2135}
6890896b
SF
2136
2137static inline const struct bpf_func_proto *
2138bpf_base_func_proto(enum bpf_func_id func_id)
2139{
2140 return NULL;
2141}
a10787e6
SL
2142
2143static inline void bpf_task_storage_free(struct task_struct *task)
2144{
2145}
e6ac2450
MKL
2146
2147static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2148{
2149 return false;
2150}
2151
2152static inline const struct btf_func_model *
2153bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2154 const struct bpf_insn *insn)
2155{
2156 return NULL;
2157}
44a3918c
JP
2158
2159static inline bool unprivileged_ebpf_enabled(void)
2160{
2161 return false;
2162}
2163
61e021f3 2164#endif /* CONFIG_BPF_SYSCALL */
09756af4 2165
541c3bad
AN
2166void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2167 struct btf_mod_pair *used_btfs, u32 len);
2168
479321e9
JK
2169static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2170 enum bpf_prog_type type)
2171{
2172 return bpf_prog_get_type_dev(ufd, type, false);
2173}
2174
936f8946
AN
2175void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2176 struct bpf_map **used_maps, u32 len);
2177
040ee692
AV
2178bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2179
ab3f0063
JK
2180int bpf_prog_offload_compile(struct bpf_prog *prog);
2181void bpf_prog_offload_destroy(struct bpf_prog *prog);
675fc275
JK
2182int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2183 struct bpf_prog *prog);
ab3f0063 2184
52775b33
JK
2185int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2186
a3884572
JK
2187int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2188int bpf_map_offload_update_elem(struct bpf_map *map,
2189 void *key, void *value, u64 flags);
2190int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2191int bpf_map_offload_get_next_key(struct bpf_map *map,
2192 void *key, void *next_key);
2193
09728266 2194bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
a3884572 2195
1385d755 2196struct bpf_offload_dev *
dd27c2e3 2197bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
602144c2 2198void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
dd27c2e3 2199void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
602144c2
JK
2200int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2201 struct net_device *netdev);
2202void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2203 struct net_device *netdev);
fd4f227d 2204bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
9fd7c555 2205
2147c438
JP
2206void unpriv_ebpf_notify(int new_state);
2207
ab3f0063
JK
2208#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2209int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2210
0d830032 2211static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
ab3f0063 2212{
9a18eedb 2213 return aux->offload_requested;
ab3f0063 2214}
a3884572
JK
2215
2216static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2217{
2218 return unlikely(map->ops == &bpf_map_offload_ops);
2219}
2220
2221struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2222void bpf_map_offload_map_free(struct bpf_map *map);
79a7f8bd
AS
2223int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2224 const union bpf_attr *kattr,
2225 union bpf_attr __user *uattr);
17edea21
CW
2226
2227int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2228int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2229int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
748cd572
DZ
2230int sock_map_bpf_prog_query(const union bpf_attr *attr,
2231 union bpf_attr __user *uattr);
2232
17edea21 2233void sock_map_unhash(struct sock *sk);
d8616ee2 2234void sock_map_destroy(struct sock *sk);
17edea21 2235void sock_map_close(struct sock *sk, long timeout);
ab3f0063
JK
2236#else
2237static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2238 union bpf_attr *attr)
2239{
2240 return -EOPNOTSUPP;
2241}
2242
2243static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2244{
2245 return false;
2246}
a3884572
JK
2247
2248static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2249{
2250 return false;
2251}
2252
2253static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2254{
2255 return ERR_PTR(-EOPNOTSUPP);
2256}
2257
2258static inline void bpf_map_offload_map_free(struct bpf_map *map)
2259{
2260}
79a7f8bd
AS
2261
2262static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2263 const union bpf_attr *kattr,
2264 union bpf_attr __user *uattr)
2265{
2266 return -ENOTSUPP;
2267}
fdb5c453 2268
88759609 2269#ifdef CONFIG_BPF_SYSCALL
604326b4
DB
2270static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2271 struct bpf_prog *prog)
fdb5c453
SY
2272{
2273 return -EINVAL;
2274}
bb0de313
LB
2275
2276static inline int sock_map_prog_detach(const union bpf_attr *attr,
2277 enum bpf_prog_type ptype)
2278{
2279 return -EOPNOTSUPP;
2280}
13b79d3f
LB
2281
2282static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2283 u64 flags)
2284{
2285 return -EOPNOTSUPP;
2286}
748cd572
DZ
2287
2288static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2289 union bpf_attr __user *uattr)
2290{
2291 return -EINVAL;
2292}
17edea21
CW
2293#endif /* CONFIG_BPF_SYSCALL */
2294#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
5dc4c4b7 2295
17edea21
CW
2296#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2297void bpf_sk_reuseport_detach(struct sock *sk);
2298int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2299 void *value);
2300int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2301 void *value, u64 map_flags);
2302#else
2303static inline void bpf_sk_reuseport_detach(struct sock *sk)
2304{
2305}
5dc4c4b7 2306
17edea21 2307#ifdef CONFIG_BPF_SYSCALL
5dc4c4b7
MKL
2308static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2309 void *key, void *value)
2310{
2311 return -EOPNOTSUPP;
2312}
2313
2314static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2315 void *key, void *value,
2316 u64 map_flags)
2317{
2318 return -EOPNOTSUPP;
2319}
2320#endif /* CONFIG_BPF_SYSCALL */
2321#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
2322
d0003ec0 2323/* verifier prototypes for helper functions called from eBPF programs */
a2c83fff
DB
2324extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2325extern const struct bpf_func_proto bpf_map_update_elem_proto;
2326extern const struct bpf_func_proto bpf_map_delete_elem_proto;
f1a2e44a
MV
2327extern const struct bpf_func_proto bpf_map_push_elem_proto;
2328extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2329extern const struct bpf_func_proto bpf_map_peek_elem_proto;
07343110 2330extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
d0003ec0 2331
03e69b50 2332extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
c04167ce 2333extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2d0e30c3 2334extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
04fd61ab 2335extern const struct bpf_func_proto bpf_tail_call_proto;
17ca8cbf 2336extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
71d19214 2337extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
ffeedafb
AS
2338extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2339extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2340extern const struct bpf_func_proto bpf_get_current_comm_proto;
d5a3b1f6 2341extern const struct bpf_func_proto bpf_get_stackid_proto;
c195651e 2342extern const struct bpf_func_proto bpf_get_stack_proto;
fa28dcb8 2343extern const struct bpf_func_proto bpf_get_task_stack_proto;
7b04d6d6
SL
2344extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2345extern const struct bpf_func_proto bpf_get_stack_proto_pe;
174a79ff 2346extern const struct bpf_func_proto bpf_sock_map_update_proto;
81110384 2347extern const struct bpf_func_proto bpf_sock_hash_update_proto;
bf6fa2c8 2348extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
0f09abd1 2349extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
604326b4
DB
2350extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2351extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2352extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2353extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
d83525ca
AS
2354extern const struct bpf_func_proto bpf_spin_lock_proto;
2355extern const struct bpf_func_proto bpf_spin_unlock_proto;
cd339431 2356extern const struct bpf_func_proto bpf_get_local_storage_proto;
d7a4cb9b
AI
2357extern const struct bpf_func_proto bpf_strtol_proto;
2358extern const struct bpf_func_proto bpf_strtoul_proto;
0d01da6a 2359extern const struct bpf_func_proto bpf_tcp_sock_proto;
5576b991 2360extern const struct bpf_func_proto bpf_jiffies64_proto;
b4490c5c 2361extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
0456ea17 2362extern const struct bpf_func_proto bpf_event_output_data_proto;
457f4436
AN
2363extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2364extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2365extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2366extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2367extern const struct bpf_func_proto bpf_ringbuf_query_proto;
bc34dee6
JK
2368extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
2369extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
2370extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
af7ec138 2371extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
2372extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2373extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2374extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
0d4fad3e 2375extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
9eeb3aa3 2376extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3bc253c2 2377extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
07be4c4a 2378extern const struct bpf_func_proto bpf_copy_from_user_proto;
c4d0bfb4 2379extern const struct bpf_func_proto bpf_snprintf_btf_proto;
7b15523a 2380extern const struct bpf_func_proto bpf_snprintf_proto;
eaa6bcb7 2381extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
63d9b80d 2382extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
d0551261 2383extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
b60da495 2384extern const struct bpf_func_proto bpf_sock_from_file_proto;
c5dbb89f 2385extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
a10787e6
SL
2386extern const struct bpf_func_proto bpf_task_storage_get_proto;
2387extern const struct bpf_func_proto bpf_task_storage_delete_proto;
69c087ba 2388extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3d78417b 2389extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3cee6fb8
MKL
2390extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2391extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
9113d7e4
SF
2392extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
2393extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
7c7e3d31 2394extern const struct bpf_func_proto bpf_find_vma_proto;
e6f2dd0f 2395extern const struct bpf_func_proto bpf_loop_proto;
376040e4 2396extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
69fd337a
SF
2397extern const struct bpf_func_proto bpf_set_retval_proto;
2398extern const struct bpf_func_proto bpf_get_retval_proto;
cd339431 2399
958a3f2d
JO
2400const struct bpf_func_proto *tracing_prog_func_proto(
2401 enum bpf_func_id func_id, const struct bpf_prog *prog);
2402
3ad00405
DB
2403/* Shared helpers among cBPF and eBPF. */
2404void bpf_user_rnd_init_once(void);
2405u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
6890896b 2406u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3ad00405 2407
c64b7983 2408#if defined(CONFIG_NET)
46f8bc92
MKL
2409bool bpf_sock_common_is_valid_access(int off, int size,
2410 enum bpf_access_type type,
2411 struct bpf_insn_access_aux *info);
c64b7983
JS
2412bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2413 struct bpf_insn_access_aux *info);
2414u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2415 const struct bpf_insn *si,
2416 struct bpf_insn *insn_buf,
2417 struct bpf_prog *prog,
2418 u32 *target_size);
2419#else
46f8bc92
MKL
2420static inline bool bpf_sock_common_is_valid_access(int off, int size,
2421 enum bpf_access_type type,
2422 struct bpf_insn_access_aux *info)
2423{
2424 return false;
2425}
c64b7983
JS
2426static inline bool bpf_sock_is_valid_access(int off, int size,
2427 enum bpf_access_type type,
2428 struct bpf_insn_access_aux *info)
2429{
2430 return false;
2431}
2432static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2433 const struct bpf_insn *si,
2434 struct bpf_insn *insn_buf,
2435 struct bpf_prog *prog,
2436 u32 *target_size)
2437{
2438 return 0;
2439}
2440#endif
2441
655a51e5 2442#ifdef CONFIG_INET
91cc1a99
AS
2443struct sk_reuseport_kern {
2444 struct sk_buff *skb;
2445 struct sock *sk;
2446 struct sock *selected_sk;
d5e4ddae 2447 struct sock *migrating_sk;
91cc1a99
AS
2448 void *data_end;
2449 u32 hash;
2450 u32 reuseport_id;
2451 bool bind_inany;
2452};
655a51e5
MKL
2453bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2454 struct bpf_insn_access_aux *info);
2455
2456u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2457 const struct bpf_insn *si,
2458 struct bpf_insn *insn_buf,
2459 struct bpf_prog *prog,
2460 u32 *target_size);
7f94208c
Y
2461
2462bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2463 struct bpf_insn_access_aux *info);
2464
2465u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2466 const struct bpf_insn *si,
2467 struct bpf_insn *insn_buf,
2468 struct bpf_prog *prog,
2469 u32 *target_size);
655a51e5
MKL
2470#else
2471static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2472 enum bpf_access_type type,
2473 struct bpf_insn_access_aux *info)
2474{
2475 return false;
2476}
2477
2478static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2479 const struct bpf_insn *si,
2480 struct bpf_insn *insn_buf,
2481 struct bpf_prog *prog,
2482 u32 *target_size)
2483{
2484 return 0;
2485}
7f94208c
Y
2486static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2487 enum bpf_access_type type,
2488 struct bpf_insn_access_aux *info)
2489{
2490 return false;
2491}
2492
2493static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2494 const struct bpf_insn *si,
2495 struct bpf_insn *insn_buf,
2496 struct bpf_prog *prog,
2497 u32 *target_size)
2498{
2499 return 0;
2500}
655a51e5
MKL
2501#endif /* CONFIG_INET */
2502
5964b200 2503enum bpf_text_poke_type {
b553a6ec
DB
2504 BPF_MOD_CALL,
2505 BPF_MOD_JUMP,
5964b200 2506};
4b3da77b 2507
5964b200
AS
2508int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2509 void *addr1, void *addr2);
2510
ebc1415d 2511void *bpf_arch_text_copy(void *dst, void *src, size_t len);
fe736565 2512int bpf_arch_text_invalidate(void *dst, size_t len);
ebc1415d 2513
eae2e83e 2514struct btf_id_set;
2af30f11 2515bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
eae2e83e 2516
335ff499
DM
2517#define MAX_BPRINTF_VARARGS 12
2518
48cac3f4
FR
2519int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2520 u32 **bin_buf, u32 num_args);
2521void bpf_bprintf_cleanup(void);
d9c9e4db 2522
97e03f52
JK
2523/* the implementation of the opaque uapi struct bpf_dynptr */
2524struct bpf_dynptr_kern {
2525 void *data;
2526 /* Size represents the number of usable bytes of dynptr data.
2527 * If for example the offset is at 4 for a local dynptr whose data is
2528 * of type u64, the number of usable bytes is 4.
2529 *
2530 * The upper 8 bits are reserved. It is as follows:
2531 * Bits 0 - 23 = size
2532 * Bits 24 - 30 = dynptr type
2533 * Bit 31 = whether dynptr is read-only
2534 */
2535 u32 size;
2536 u32 offset;
2537} __aligned(8);
2538
2539enum bpf_dynptr_type {
2540 BPF_DYNPTR_TYPE_INVALID,
2541 /* Points to memory that is local to the bpf program */
2542 BPF_DYNPTR_TYPE_LOCAL,
bc34dee6
JK
2543 /* Underlying data is a ringbuf record */
2544 BPF_DYNPTR_TYPE_RINGBUF,
97e03f52
JK
2545};
2546
bc34dee6
JK
2547void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2548 enum bpf_dynptr_type type, u32 offset, u32 size);
2549void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2550int bpf_dynptr_check_size(u32 size);
2551
c0e19f2c
SF
2552#ifdef CONFIG_BPF_LSM
2553void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
2554void bpf_cgroup_atype_put(int cgroup_atype);
2555#else
2556static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
2557static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
2558#endif /* CONFIG_BPF_LSM */
2559
99c55f7d 2560#endif /* _LINUX_BPF_H */