bpf, x86: Generate trampolines from bpf_tramp_links
[linux-2.6-block.git] / include / linux / bpf.h
CommitLineData
25763b3c 1/* SPDX-License-Identifier: GPL-2.0-only */
99c55f7d 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
99c55f7d
AS
3 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
74451e66 8
99c55f7d 9#include <linux/workqueue.h>
db20fd2b 10#include <linux/file.h>
b121d1e7 11#include <linux/percpu.h>
002245cc 12#include <linux/err.h>
74451e66 13#include <linux/rbtree_latch.h>
d6e1e46f 14#include <linux/numa.h>
fc970227 15#include <linux/mm_types.h>
ab3f0063 16#include <linux/wait.h>
fec56f58
AS
17#include <linux/refcount.h>
18#include <linux/mutex.h>
85d33df3 19#include <linux/module.h>
bfea9a85 20#include <linux/kallsyms.h>
2c78ee89 21#include <linux/capability.h>
48edc1f7
RG
22#include <linux/sched/mm.h>
23#include <linux/slab.h>
e21aa341 24#include <linux/percpu-refcount.h>
af2ac3e1 25#include <linux/bpfptr.h>
14a324f6 26#include <linux/btf.h>
99c55f7d 27
cae1927c 28struct bpf_verifier_env;
9e15db66 29struct bpf_verifier_log;
3b1efb19 30struct perf_event;
174a79ff 31struct bpf_prog;
da765a2f 32struct bpf_prog_aux;
99c55f7d 33struct bpf_map;
4f738adb 34struct sock;
a26ca7c9 35struct seq_file;
1b2b234b 36struct btf;
e8d2bec0 37struct btf_type;
3dec541b 38struct exception_table_entry;
ae24345d 39struct seq_operations;
f9c79272 40struct bpf_iter_aux_info;
f836a56e
KS
41struct bpf_local_storage;
42struct bpf_local_storage_map;
36e68442 43struct kobject;
48edc1f7 44struct mem_cgroup;
861de02e 45struct module;
69c087ba 46struct bpf_func_state;
99c55f7d 47
1b9ed84e
QM
48extern struct idr btf_idr;
49extern spinlock_t btf_idr_lock;
36e68442 50extern struct kobject *btf_kobj;
1b9ed84e 51
102acbac 52typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
f9c79272
YS
53typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
54 struct bpf_iter_aux_info *aux);
14fc6bd6
YS
55typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
56struct bpf_iter_seq_info {
57 const struct seq_operations *seq_ops;
58 bpf_iter_init_seq_priv_t init_seq_private;
59 bpf_iter_fini_seq_priv_t fini_seq_private;
60 u32 seq_priv_size;
61};
62
5d903493 63/* map is generic key/value storage optionally accessible by eBPF programs */
99c55f7d
AS
64struct bpf_map_ops {
65 /* funcs callable from userspace (via syscall) */
1110f3a9 66 int (*map_alloc_check)(union bpf_attr *attr);
99c55f7d 67 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
61d1b6a4
DB
68 void (*map_release)(struct bpf_map *map, struct file *map_file);
69 void (*map_free)(struct bpf_map *map);
db20fd2b 70 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
ba6b8de4 71 void (*map_release_uref)(struct bpf_map *map);
c6110222 72 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
cb4d03ab
BV
73 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
74 union bpf_attr __user *uattr);
3e87f192
DS
75 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
76 void *value, u64 flags);
05799638
YS
77 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
78 const union bpf_attr *attr,
79 union bpf_attr __user *uattr);
aa2e93b8
BV
80 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
81 union bpf_attr __user *uattr);
82 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
83 union bpf_attr __user *uattr);
db20fd2b
AS
84
85 /* funcs callable from userspace and from eBPF programs */
86 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3274f520 87 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
db20fd2b 88 int (*map_delete_elem)(struct bpf_map *map, void *key);
f1a2e44a
MV
89 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
90 int (*map_pop_elem)(struct bpf_map *map, void *value);
91 int (*map_peek_elem)(struct bpf_map *map, void *value);
2a36f0b9
WN
92
93 /* funcs called by prog_array and perf_event_array map */
d056a788
DB
94 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
95 int fd);
96 void (*map_fd_put_ptr)(void *ptr);
4a8f87e6 97 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
14dc6f04 98 u32 (*map_fd_sys_lookup_elem)(void *ptr);
a26ca7c9
MKL
99 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
100 struct seq_file *m);
e8d2bec0 101 int (*map_check_btf)(const struct bpf_map *map,
1b2b234b 102 const struct btf *btf,
e8d2bec0
DB
103 const struct btf_type *key_type,
104 const struct btf_type *value_type);
d8eca5bb 105
da765a2f
DB
106 /* Prog poke tracking helpers. */
107 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
109 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
110 struct bpf_prog *new);
111
d8eca5bb
DB
112 /* Direct value access helpers. */
113 int (*map_direct_value_addr)(const struct bpf_map *map,
114 u64 *imm, u32 off);
115 int (*map_direct_value_meta)(const struct bpf_map *map,
116 u64 imm, u32 *off);
fc970227 117 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
457f4436
AN
118 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
119 struct poll_table_struct *pts);
41c48f3a 120
f836a56e
KS
121 /* Functions called by bpf_local_storage maps */
122 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
123 void *owner, u32 size);
124 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
125 void *owner, u32 size);
126 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
f4d05259 127
e6a4750f
BT
128 /* Misc helpers.*/
129 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
130
f4d05259
MKL
131 /* map_meta_equal must be implemented for maps that can be
132 * used as an inner map. It is a runtime check to ensure
133 * an inner map can be inserted to an outer map.
134 *
135 * Some properties of the inner map has been used during the
136 * verification time. When inserting an inner map at the runtime,
137 * map_meta_equal has to ensure the inserting map has the same
138 * properties that the verifier has used earlier.
139 */
140 bool (*map_meta_equal)(const struct bpf_map *meta0,
141 const struct bpf_map *meta1);
142
69c087ba
YS
143
144 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
145 struct bpf_func_state *caller,
146 struct bpf_func_state *callee);
102acbac
KC
147 int (*map_for_each_callback)(struct bpf_map *map,
148 bpf_callback_t callback_fn,
69c087ba
YS
149 void *callback_ctx, u64 flags);
150
c317ab71 151 /* BTF id of struct allocated by map_alloc */
41c48f3a 152 int *map_btf_id;
a5cbe05a
YS
153
154 /* bpf_iter info used to open a seq_file */
155 const struct bpf_iter_seq_info *iter_seq_info;
99c55f7d
AS
156};
157
61df10c7
KKD
158enum {
159 /* Support at most 8 pointers in a BPF map value */
160 BPF_MAP_VALUE_OFF_MAX = 8,
4d7d7f69
KKD
161 BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
162 1 + /* for bpf_spin_lock */
163 1, /* for bpf_timer */
61df10c7
KKD
164};
165
c0a5a21c
KKD
166enum bpf_kptr_type {
167 BPF_KPTR_UNREF,
168 BPF_KPTR_REF,
169};
170
61df10c7
KKD
171struct bpf_map_value_off_desc {
172 u32 offset;
c0a5a21c 173 enum bpf_kptr_type type;
61df10c7
KKD
174 struct {
175 struct btf *btf;
14a324f6
KKD
176 struct module *module;
177 btf_dtor_kfunc_t dtor;
61df10c7
KKD
178 u32 btf_id;
179 } kptr;
180};
181
182struct bpf_map_value_off {
183 u32 nr_off;
184 struct bpf_map_value_off_desc off[];
185};
186
4d7d7f69
KKD
187struct bpf_map_off_arr {
188 u32 cnt;
189 u32 field_off[BPF_MAP_OFF_ARR_MAX];
190 u8 field_sz[BPF_MAP_OFF_ARR_MAX];
191};
192
99c55f7d 193struct bpf_map {
a26ca7c9 194 /* The first two cachelines with read-mostly members of which some
be95a845
DB
195 * are also accessed in fast-path (e.g. ops, max_entries).
196 */
197 const struct bpf_map_ops *ops ____cacheline_aligned;
198 struct bpf_map *inner_map_meta;
199#ifdef CONFIG_SECURITY
200 void *security;
201#endif
99c55f7d
AS
202 enum bpf_map_type map_type;
203 u32 key_size;
204 u32 value_size;
205 u32 max_entries;
9330986c 206 u64 map_extra; /* any per-map-type extra fields */
6c905981 207 u32 map_flags;
d83525ca 208 int spin_lock_off; /* >=0 valid offset, <0 error */
61df10c7 209 struct bpf_map_value_off *kptr_off_tab;
b00628b1 210 int timer_off; /* >=0 valid offset, <0 error */
f3f1c054 211 u32 id;
96eabe7a 212 int numa_node;
9b2cf328
MKL
213 u32 btf_key_type_id;
214 u32 btf_value_type_id;
8845b468 215 u32 btf_vmlinux_value_type_id;
a26ca7c9 216 struct btf *btf;
48edc1f7
RG
217#ifdef CONFIG_MEMCG_KMEM
218 struct mem_cgroup *memcg;
219#endif
fc970227 220 char name[BPF_OBJ_NAME_LEN];
4d7d7f69 221 struct bpf_map_off_arr *off_arr;
a26ca7c9 222 /* The 3rd and 4th cacheline with misc members to avoid false sharing
be95a845
DB
223 * particularly with refcounting.
224 */
1e0bd5a0
AN
225 atomic64_t refcnt ____cacheline_aligned;
226 atomic64_t usercnt;
be95a845 227 struct work_struct work;
fc970227 228 struct mutex freeze_mutex;
353050be 229 atomic64_t writecnt;
f45d5b6c
THJ
230 /* 'Ownership' of program-containing map is claimed by the first program
231 * that is going to use this map or by the first program which FD is
232 * stored in the map to make sure that all callers and callees have the
233 * same prog type, JITed flag and xdp_has_frags flag.
234 */
235 struct {
236 spinlock_t lock;
237 enum bpf_prog_type type;
238 bool jited;
239 bool xdp_has_frags;
240 } owner;
4d7d7f69
KKD
241 bool bypass_spec_v1;
242 bool frozen; /* write-once; write-protected by freeze_mutex */
99c55f7d
AS
243};
244
d83525ca
AS
245static inline bool map_value_has_spin_lock(const struct bpf_map *map)
246{
247 return map->spin_lock_off >= 0;
248}
249
68134668 250static inline bool map_value_has_timer(const struct bpf_map *map)
d83525ca 251{
68134668 252 return map->timer_off >= 0;
d83525ca
AS
253}
254
61df10c7
KKD
255static inline bool map_value_has_kptrs(const struct bpf_map *map)
256{
257 return !IS_ERR_OR_NULL(map->kptr_off_tab);
258}
259
68134668
AS
260static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
261{
262 if (unlikely(map_value_has_spin_lock(map)))
5eaed6ee 263 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
68134668 264 if (unlikely(map_value_has_timer(map)))
5eaed6ee 265 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
4d7d7f69
KKD
266 if (unlikely(map_value_has_kptrs(map))) {
267 struct bpf_map_value_off *tab = map->kptr_off_tab;
268 int i;
269
270 for (i = 0; i < tab->nr_off; i++)
271 *(u64 *)(dst + tab->off[i].offset) = 0;
272 }
68134668
AS
273}
274
275/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
d83525ca
AS
276static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
277{
4d7d7f69
KKD
278 u32 curr_off = 0;
279 int i;
68134668 280
4d7d7f69
KKD
281 if (likely(!map->off_arr)) {
282 memcpy(dst, src, map->value_size);
283 return;
68134668 284 }
d83525ca 285
4d7d7f69
KKD
286 for (i = 0; i < map->off_arr->cnt; i++) {
287 u32 next_off = map->off_arr->field_off[i];
288
289 memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
290 curr_off += map->off_arr->field_sz[i];
d83525ca 291 }
4d7d7f69 292 memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
d83525ca 293}
96049f3a
AS
294void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
295 bool lock_src);
b00628b1 296void bpf_timer_cancel_and_free(void *timer);
8e7ae251 297int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
d83525ca 298
602144c2 299struct bpf_offload_dev;
a3884572
JK
300struct bpf_offloaded_map;
301
302struct bpf_map_dev_ops {
303 int (*map_get_next_key)(struct bpf_offloaded_map *map,
304 void *key, void *next_key);
305 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
306 void *key, void *value);
307 int (*map_update_elem)(struct bpf_offloaded_map *map,
308 void *key, void *value, u64 flags);
309 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
310};
311
312struct bpf_offloaded_map {
313 struct bpf_map map;
314 struct net_device *netdev;
315 const struct bpf_map_dev_ops *dev_ops;
316 void *dev_priv;
317 struct list_head offloads;
318};
319
320static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
321{
322 return container_of(map, struct bpf_offloaded_map, map);
323}
324
0cd3cbed
JK
325static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
326{
327 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
328}
329
a26ca7c9
MKL
330static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
331{
85d33df3
MKL
332 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
333 map->ops->map_seq_show_elem;
a26ca7c9
MKL
334}
335
e8d2bec0 336int map_check_no_btf(const struct bpf_map *map,
1b2b234b 337 const struct btf *btf,
e8d2bec0
DB
338 const struct btf_type *key_type,
339 const struct btf_type *value_type);
340
f4d05259
MKL
341bool bpf_map_meta_equal(const struct bpf_map *meta0,
342 const struct bpf_map *meta1);
343
a3884572
JK
344extern const struct bpf_map_ops bpf_map_offload_ops;
345
d639b9d1
HL
346/* bpf_type_flag contains a set of flags that are applicable to the values of
347 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
348 * or a memory is read-only. We classify types into two categories: base types
349 * and extended types. Extended types are base types combined with a type flag.
350 *
351 * Currently there are no more than 32 base types in arg_type, ret_type and
352 * reg_types.
353 */
354#define BPF_BASE_TYPE_BITS 8
355
356enum bpf_type_flag {
357 /* PTR may be NULL. */
358 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
359
216e3cd2
HL
360 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
361 * compatible with both mutable and immutable memory.
362 */
20b2aff4
HL
363 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
364
a672b2e3
DB
365 /* MEM was "allocated" from a different helper, and cannot be mixed
366 * with regular non-MEM_ALLOC'ed MEM types.
367 */
368 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
369
c6f1bfe8
YS
370 /* MEM is in user address space. */
371 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
372
5844101a
HL
373 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
374 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
375 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
376 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
377 * to the specified cpu.
378 */
379 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
380
8f14852e
KKD
381 /* Indicates that the argument will be released. */
382 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
383
6efe152d
KKD
384 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
385 * unreferenced and referenced kptr loaded from map value using a load
386 * instruction, so that they can only be dereferenced but not escape the
387 * BPF program into the kernel (i.e. cannot be passed as arguments to
388 * kfunc or bpf helpers).
389 */
390 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
391
392 __BPF_TYPE_LAST_FLAG = PTR_UNTRUSTED,
d639b9d1
HL
393};
394
395/* Max number of base types. */
396#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
397
398/* Max number of all types. */
399#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
400
17a52670
AS
401/* function argument constraints */
402enum bpf_arg_type {
80f1d68c 403 ARG_DONTCARE = 0, /* unused argument in helper function */
17a52670
AS
404
405 /* the following constraints used to prototype
406 * bpf_map_lookup/update/delete_elem() functions
407 */
408 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
409 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
410 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
2ea864c5 411 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
17a52670
AS
412
413 /* the following constraints used to prototype bpf_memcmp() and other
414 * functions that access data on eBPF program stack
415 */
39f19ebb
AS
416 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
417 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
418 * helper function must fill all bytes or clear
419 * them in error case.
435faee1
DB
420 */
421
39f19ebb
AS
422 ARG_CONST_SIZE, /* number of bytes accessed from memory */
423 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
80f1d68c 424
608cd71a 425 ARG_PTR_TO_CTX, /* pointer to context */
80f1d68c 426 ARG_ANYTHING, /* any (initialized) argument is ok */
d83525ca 427 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
46f8bc92 428 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
57c3bb72
AI
429 ARG_PTR_TO_INT, /* pointer to int */
430 ARG_PTR_TO_LONG, /* pointer to long */
6ac99e8f 431 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
a7658e1a 432 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
457f4436 433 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
457f4436 434 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
1df8f55a 435 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
eaa6bcb7 436 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
69c087ba 437 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
48946bd6 438 ARG_PTR_TO_STACK, /* pointer to stack */
fff13c4b 439 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
b00628b1 440 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
c0a5a21c 441 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
f79e7ea5 442 __BPF_ARG_TYPE_MAX,
d639b9d1 443
48946bd6
HL
444 /* Extended arg_types. */
445 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
446 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
447 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
448 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
449 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
450 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
c0a5a21c 451 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
48946bd6 452
d639b9d1
HL
453 /* This must be the last entry. Its purpose is to ensure the enum is
454 * wide enough to hold the higher bits reserved for bpf_type_flag.
455 */
456 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 457};
d639b9d1 458static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670
AS
459
460/* type of values returned from helper functions */
461enum bpf_return_type {
462 RET_INTEGER, /* function returns integer */
463 RET_VOID, /* function doesn't return anything */
3e6a4b3e 464 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
3c480732
HL
465 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
466 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
467 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
468 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
63d9b80d 469 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
3ca1032a 470 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
d639b9d1
HL
471 __BPF_RET_TYPE_MAX,
472
3c480732
HL
473 /* Extended ret_types. */
474 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
475 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
476 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
477 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
a672b2e3 478 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
3c480732
HL
479 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
480
d639b9d1
HL
481 /* This must be the last entry. Its purpose is to ensure the enum is
482 * wide enough to hold the higher bits reserved for bpf_type_flag.
483 */
484 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 485};
d639b9d1 486static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670 487
09756af4
AS
488/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
489 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
490 * instructions after verifying
491 */
492struct bpf_func_proto {
493 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
494 bool gpl_only;
36bbef52 495 bool pkt_access;
17a52670 496 enum bpf_return_type ret_type;
a7658e1a
AS
497 union {
498 struct {
499 enum bpf_arg_type arg1_type;
500 enum bpf_arg_type arg2_type;
501 enum bpf_arg_type arg3_type;
502 enum bpf_arg_type arg4_type;
503 enum bpf_arg_type arg5_type;
504 };
505 enum bpf_arg_type arg_type[5];
506 };
9436ef6e
LB
507 union {
508 struct {
509 u32 *arg1_btf_id;
510 u32 *arg2_btf_id;
511 u32 *arg3_btf_id;
512 u32 *arg4_btf_id;
513 u32 *arg5_btf_id;
514 };
515 u32 *arg_btf_id[5];
516 };
af7ec138 517 int *ret_btf_id; /* return value btf_id */
eae2e83e 518 bool (*allowed)(const struct bpf_prog *prog);
17a52670
AS
519};
520
521/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
522 * the first argument to eBPF programs.
523 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
524 */
525struct bpf_context;
526
527enum bpf_access_type {
528 BPF_READ = 1,
529 BPF_WRITE = 2
09756af4
AS
530};
531
19de99f7 532/* types of values stored in eBPF registers */
f1174f77
EC
533/* Pointer types represent:
534 * pointer
535 * pointer + imm
536 * pointer + (u16) var
537 * pointer + (u16) var + imm
538 * if (range > 0) then [ptr, ptr + range - off) is safe to access
539 * if (id > 0) means that some 'var' was added
540 * if (off > 0) means that 'imm' was added
541 */
19de99f7
AS
542enum bpf_reg_type {
543 NOT_INIT = 0, /* nothing was written into register */
f1174f77 544 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
19de99f7
AS
545 PTR_TO_CTX, /* reg points to bpf_context */
546 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
547 PTR_TO_MAP_VALUE, /* reg points to map element value */
c25b2ae1 548 PTR_TO_MAP_KEY, /* reg points to a map element key */
f1174f77 549 PTR_TO_STACK, /* reg == frame_pointer + offset */
de8f3a83 550 PTR_TO_PACKET_META, /* skb->data - meta_len */
f1174f77 551 PTR_TO_PACKET, /* reg points to skb->data */
19de99f7 552 PTR_TO_PACKET_END, /* skb->data + headlen */
d58e468b 553 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
c64b7983 554 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
46f8bc92 555 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
655a51e5 556 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
9df1c28b 557 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
fada7fdc 558 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
ba5f4cfe
JF
559 /* PTR_TO_BTF_ID points to a kernel struct that does not need
560 * to be null checked by the BPF program. This does not imply the
561 * pointer is _not_ null and in practice this can easily be a null
562 * pointer when reading pointer chains. The assumption is program
563 * context will handle null pointer dereference typically via fault
564 * handling. The verifier must keep this in mind and can make no
565 * assumptions about null or non-null when doing branch analysis.
566 * Further, when passed into helpers the helpers can not, without
567 * additional context, assume the value is non-null.
568 */
569 PTR_TO_BTF_ID,
570 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
571 * been checked for null. Used primarily to inform the verifier
572 * an explicit null check is required for this struct.
573 */
457f4436 574 PTR_TO_MEM, /* reg points to valid memory region */
20b2aff4 575 PTR_TO_BUF, /* reg points to a read/write buffer */
69c087ba 576 PTR_TO_FUNC, /* reg points to a bpf program function */
e6ac2450 577 __BPF_REG_TYPE_MAX,
d639b9d1 578
c25b2ae1
HL
579 /* Extended reg_types. */
580 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
581 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
582 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
583 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
584 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
c25b2ae1 585
d639b9d1
HL
586 /* This must be the last entry. Its purpose is to ensure the enum is
587 * wide enough to hold the higher bits reserved for bpf_type_flag.
588 */
589 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
19de99f7 590};
d639b9d1 591static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
19de99f7 592
23994631
YS
593/* The information passed from prog-specific *_is_valid_access
594 * back to the verifier.
595 */
596struct bpf_insn_access_aux {
597 enum bpf_reg_type reg_type;
9e15db66
AS
598 union {
599 int ctx_field_size;
22dc4a0f
AN
600 struct {
601 struct btf *btf;
602 u32 btf_id;
603 };
9e15db66
AS
604 };
605 struct bpf_verifier_log *log; /* for verbose logs */
23994631
YS
606};
607
f96da094
DB
608static inline void
609bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
610{
611 aux->ctx_field_size = size;
612}
613
3990ed4c
MKL
614static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
615{
616 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
617 insn->src_reg == BPF_PSEUDO_FUNC;
618}
619
7de16e3a
JK
620struct bpf_prog_ops {
621 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
622 union bpf_attr __user *uattr);
623};
624
09756af4
AS
625struct bpf_verifier_ops {
626 /* return eBPF function prototype for verification */
5e43f899
AI
627 const struct bpf_func_proto *
628 (*get_func_proto)(enum bpf_func_id func_id,
629 const struct bpf_prog *prog);
17a52670
AS
630
631 /* return true if 'size' wide access at offset 'off' within bpf_context
632 * with 'type' (read or write) is allowed
633 */
19de99f7 634 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
5e43f899 635 const struct bpf_prog *prog,
23994631 636 struct bpf_insn_access_aux *info);
36bbef52
DB
637 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
638 const struct bpf_prog *prog);
e0cea7ce
DB
639 int (*gen_ld_abs)(const struct bpf_insn *orig,
640 struct bpf_insn *insn_buf);
6b8cc1d1
DB
641 u32 (*convert_ctx_access)(enum bpf_access_type type,
642 const struct bpf_insn *src,
643 struct bpf_insn *dst,
f96da094 644 struct bpf_prog *prog, u32 *target_size);
27ae7997 645 int (*btf_struct_access)(struct bpf_verifier_log *log,
22dc4a0f 646 const struct btf *btf,
27ae7997
MKL
647 const struct btf_type *t, int off, int size,
648 enum bpf_access_type atype,
c6f1bfe8 649 u32 *next_btf_id, enum bpf_type_flag *flag);
09756af4
AS
650};
651
cae1927c 652struct bpf_prog_offload_ops {
08ca90af 653 /* verifier basic callbacks */
cae1927c
JK
654 int (*insn_hook)(struct bpf_verifier_env *env,
655 int insn_idx, int prev_insn_idx);
c941ce9c 656 int (*finalize)(struct bpf_verifier_env *env);
08ca90af
JK
657 /* verifier optimization callbacks (called after .finalize) */
658 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
659 struct bpf_insn *insn);
660 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
661 /* program management callbacks */
16a8cb5c
QM
662 int (*prepare)(struct bpf_prog *prog);
663 int (*translate)(struct bpf_prog *prog);
eb911947 664 void (*destroy)(struct bpf_prog *prog);
cae1927c
JK
665};
666
0a9c1991 667struct bpf_prog_offload {
ab3f0063
JK
668 struct bpf_prog *prog;
669 struct net_device *netdev;
341b3e7b 670 struct bpf_offload_dev *offdev;
ab3f0063
JK
671 void *dev_priv;
672 struct list_head offloads;
673 bool dev_state;
08ca90af 674 bool opt_failed;
fcfb126d
JW
675 void *jited_image;
676 u32 jited_len;
ab3f0063
JK
677};
678
8bad74f9
RG
679enum bpf_cgroup_storage_type {
680 BPF_CGROUP_STORAGE_SHARED,
b741f163 681 BPF_CGROUP_STORAGE_PERCPU,
8bad74f9
RG
682 __BPF_CGROUP_STORAGE_MAX
683};
684
685#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
686
f1b9509c
AS
687/* The longest tracepoint has 12 args.
688 * See include/trace/bpf_probe.h
689 */
690#define MAX_BPF_FUNC_ARGS 12
691
523a4cf4
DB
692/* The maximum number of arguments passed through registers
693 * a single function may have.
694 */
695#define MAX_BPF_FUNC_REG_ARGS 5
696
fec56f58
AS
697struct btf_func_model {
698 u8 ret_size;
699 u8 nr_args;
700 u8 arg_size[MAX_BPF_FUNC_ARGS];
701};
702
703/* Restore arguments before returning from trampoline to let original function
704 * continue executing. This flag is used for fentry progs when there are no
705 * fexit progs.
706 */
707#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
708/* Call original function after fentry progs, but before fexit progs.
709 * Makes sense for fentry/fexit, normal calls and indirect calls.
710 */
711#define BPF_TRAMP_F_CALL_ORIG BIT(1)
712/* Skip current frame and return to parent. Makes sense for fentry/fexit
713 * programs only. Should not be used with normal calls and indirect calls.
714 */
715#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
7e6f3cd8
JO
716/* Store IP address of the caller on the trampoline stack,
717 * so it's available for trampoline's programs.
718 */
719#define BPF_TRAMP_F_IP_ARG BIT(3)
356ed649
HT
720/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
721#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
7e6f3cd8 722
88fd9e53
KS
723/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
724 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
725 */
f7e0beaf 726#define BPF_MAX_TRAMP_LINKS 38
88fd9e53 727
f7e0beaf
KFL
728struct bpf_tramp_links {
729 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
730 int nr_links;
88fd9e53
KS
731};
732
fec56f58
AS
733/* Different use cases for BPF trampoline:
734 * 1. replace nop at the function entry (kprobe equivalent)
735 * flags = BPF_TRAMP_F_RESTORE_REGS
736 * fentry = a set of programs to run before returning from trampoline
737 *
738 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
739 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
740 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
741 * fentry = a set of program to run before calling original function
742 * fexit = a set of program to run after original function
743 *
744 * 3. replace direct call instruction anywhere in the function body
745 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
746 * With flags = 0
747 * fentry = a set of programs to run before returning from trampoline
748 * With flags = BPF_TRAMP_F_CALL_ORIG
749 * orig_call = original callback addr or direct function addr
750 * fentry = a set of program to run before calling original function
751 * fexit = a set of program to run after original function
752 */
e21aa341
AS
753struct bpf_tramp_image;
754int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
85d33df3 755 const struct btf_func_model *m, u32 flags,
f7e0beaf 756 struct bpf_tramp_links *tlinks,
fec56f58
AS
757 void *orig_call);
758/* these two functions are called from generated trampoline */
ca06f55b 759u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
fec56f58 760void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
ca06f55b 761u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
f2dd3b39 762void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
e21aa341
AS
763void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
764void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
fec56f58 765
535911c8
JO
766struct bpf_ksym {
767 unsigned long start;
768 unsigned long end;
bfea9a85 769 char name[KSYM_NAME_LEN];
ecb60d1c 770 struct list_head lnode;
ca4424c9 771 struct latch_tree_node tnode;
cbd76f8d 772 bool prog;
535911c8
JO
773};
774
fec56f58
AS
775enum bpf_tramp_prog_type {
776 BPF_TRAMP_FENTRY,
777 BPF_TRAMP_FEXIT,
ae240823 778 BPF_TRAMP_MODIFY_RETURN,
be8704ff
AS
779 BPF_TRAMP_MAX,
780 BPF_TRAMP_REPLACE, /* more than MAX */
fec56f58
AS
781};
782
e21aa341
AS
783struct bpf_tramp_image {
784 void *image;
785 struct bpf_ksym ksym;
786 struct percpu_ref pcref;
787 void *ip_after_call;
788 void *ip_epilogue;
789 union {
790 struct rcu_head rcu;
791 struct work_struct work;
792 };
793};
794
fec56f58
AS
795struct bpf_trampoline {
796 /* hlist for trampoline_table */
797 struct hlist_node hlist;
798 /* serializes access to fields of this trampoline */
799 struct mutex mutex;
800 refcount_t refcnt;
801 u64 key;
802 struct {
803 struct btf_func_model model;
804 void *addr;
b91e014f 805 bool ftrace_managed;
fec56f58 806 } func;
be8704ff
AS
807 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
808 * program by replacing one of its functions. func.addr is the address
809 * of the function it replaced.
810 */
811 struct bpf_prog *extension_prog;
fec56f58
AS
812 /* list of BPF programs using this trampoline */
813 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
814 /* Number of attached programs. A counter per kind. */
815 int progs_cnt[BPF_TRAMP_MAX];
816 /* Executable image of trampoline */
e21aa341 817 struct bpf_tramp_image *cur_image;
fec56f58 818 u64 selector;
861de02e 819 struct module *mod;
fec56f58 820};
75ccbef6 821
f7b12b6f
THJ
822struct bpf_attach_target_info {
823 struct btf_func_model fmodel;
824 long tgt_addr;
825 const char *tgt_name;
826 const struct btf_type *tgt_type;
827};
828
116eb788 829#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
75ccbef6
BT
830
831struct bpf_dispatcher_prog {
832 struct bpf_prog *prog;
833 refcount_t users;
834};
835
836struct bpf_dispatcher {
837 /* dispatcher mutex */
838 struct mutex mutex;
839 void *func;
840 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
841 int num_progs;
842 void *image;
843 u32 image_off;
517b75e4 844 struct bpf_ksym ksym;
75ccbef6
BT
845};
846
9f5b4009 847static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
7e6897f9
BT
848 const void *ctx,
849 const struct bpf_insn *insnsi,
850 unsigned int (*bpf_func)(const void *,
851 const struct bpf_insn *))
852{
853 return bpf_func(ctx, insnsi);
854}
f7e0beaf 855
fec56f58 856#ifdef CONFIG_BPF_JIT
f7e0beaf
KFL
857int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
858int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
f7b12b6f
THJ
859struct bpf_trampoline *bpf_trampoline_get(u64 key,
860 struct bpf_attach_target_info *tgt_info);
fec56f58 861void bpf_trampoline_put(struct bpf_trampoline *tr);
f45b2974 862int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
517b75e4
JO
863#define BPF_DISPATCHER_INIT(_name) { \
864 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
865 .func = &_name##_func, \
866 .progs = {}, \
867 .num_progs = 0, \
868 .image = NULL, \
869 .image_off = 0, \
870 .ksym = { \
871 .name = #_name, \
872 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
873 }, \
75ccbef6
BT
874}
875
876#define DEFINE_BPF_DISPATCHER(name) \
9f5b4009 877 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
878 const void *ctx, \
879 const struct bpf_insn *insnsi, \
880 unsigned int (*bpf_func)(const void *, \
881 const struct bpf_insn *)) \
882 { \
883 return bpf_func(ctx, insnsi); \
884 } \
6a64037d
BT
885 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
886 struct bpf_dispatcher bpf_dispatcher_##name = \
887 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
75ccbef6 888#define DECLARE_BPF_DISPATCHER(name) \
6a64037d 889 unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
890 const void *ctx, \
891 const struct bpf_insn *insnsi, \
892 unsigned int (*bpf_func)(const void *, \
893 const struct bpf_insn *)); \
6a64037d
BT
894 extern struct bpf_dispatcher bpf_dispatcher_##name;
895#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
896#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
75ccbef6
BT
897void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
898 struct bpf_prog *to);
dba122fb 899/* Called only from JIT-enabled code, so there's no need for stubs. */
7ac88eba 900void *bpf_jit_alloc_exec_page(void);
a108f7dc
JO
901void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
902void bpf_image_ksym_del(struct bpf_ksym *ksym);
dba122fb
JO
903void bpf_ksym_add(struct bpf_ksym *ksym);
904void bpf_ksym_del(struct bpf_ksym *ksym);
3486bedd
SL
905int bpf_jit_charge_modmem(u32 size);
906void bpf_jit_uncharge_modmem(u32 size);
f92c1e18 907bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
fec56f58 908#else
f7e0beaf 909static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
3aac1ead 910 struct bpf_trampoline *tr)
fec56f58
AS
911{
912 return -ENOTSUPP;
913}
f7e0beaf 914static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
3aac1ead 915 struct bpf_trampoline *tr)
fec56f58
AS
916{
917 return -ENOTSUPP;
918}
f7b12b6f
THJ
919static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
920 struct bpf_attach_target_info *tgt_info)
921{
922 return ERR_PTR(-EOPNOTSUPP);
923}
fec56f58 924static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
75ccbef6
BT
925#define DEFINE_BPF_DISPATCHER(name)
926#define DECLARE_BPF_DISPATCHER(name)
6a64037d 927#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
75ccbef6
BT
928#define BPF_DISPATCHER_PTR(name) NULL
929static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
930 struct bpf_prog *from,
931 struct bpf_prog *to) {}
e9b4e606
JO
932static inline bool is_bpf_image_address(unsigned long address)
933{
934 return false;
935}
f92c1e18
JO
936static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
937{
938 return false;
939}
fec56f58
AS
940#endif
941
8c1b6e69 942struct bpf_func_info_aux {
51c39bb1 943 u16 linkage;
8c1b6e69
AS
944 bool unreliable;
945};
946
a66886fe
DB
947enum bpf_jit_poke_reason {
948 BPF_POKE_REASON_TAIL_CALL,
949};
950
951/* Descriptor of pokes pointing /into/ the JITed image. */
952struct bpf_jit_poke_descriptor {
cf71b174 953 void *tailcall_target;
ebf7d1f5
MF
954 void *tailcall_bypass;
955 void *bypass_addr;
f263a814 956 void *aux;
a66886fe
DB
957 union {
958 struct {
959 struct bpf_map *map;
960 u32 key;
961 } tail_call;
962 };
cf71b174 963 bool tailcall_target_stable;
a66886fe
DB
964 u8 adj_off;
965 u16 reason;
a748c697 966 u32 insn_idx;
a66886fe
DB
967};
968
3c32cc1b
YS
969/* reg_type info for ctx arguments */
970struct bpf_ctx_arg_aux {
971 u32 offset;
972 enum bpf_reg_type reg_type;
951cf368 973 u32 btf_id;
3c32cc1b
YS
974};
975
541c3bad
AN
976struct btf_mod_pair {
977 struct btf *btf;
978 struct module *module;
979};
980
e6ac2450
MKL
981struct bpf_kfunc_desc_tab;
982
09756af4 983struct bpf_prog_aux {
85192dbf 984 atomic64_t refcnt;
24701ece 985 u32 used_map_cnt;
541c3bad 986 u32 used_btf_cnt;
32bbe007 987 u32 max_ctx_offset;
e647815a 988 u32 max_pkt_offset;
9df1c28b 989 u32 max_tp_access;
8726679a 990 u32 stack_depth;
dc4bb0e2 991 u32 id;
ba64e7d8
YS
992 u32 func_cnt; /* used by non-func prog as the number of func progs */
993 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
ccfe29eb 994 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
3c32cc1b 995 u32 ctx_arg_info_size;
afbf21dc
YS
996 u32 max_rdonly_access;
997 u32 max_rdwr_access;
22dc4a0f 998 struct btf *attach_btf;
3c32cc1b 999 const struct bpf_ctx_arg_aux *ctx_arg_info;
3aac1ead
THJ
1000 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1001 struct bpf_prog *dst_prog;
1002 struct bpf_trampoline *dst_trampoline;
4a1e7c0c
THJ
1003 enum bpf_prog_type saved_dst_prog_type;
1004 enum bpf_attach_type saved_dst_attach_type;
a4b1d3c1 1005 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
9a18eedb 1006 bool offload_requested;
38207291 1007 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
8c1b6e69 1008 bool func_proto_unreliable;
1e6c62a8 1009 bool sleepable;
ebf7d1f5 1010 bool tail_call_reachable;
c2f2cdbe 1011 bool xdp_has_frags;
33c98058 1012 bool use_bpf_prog_pack;
38207291
MKL
1013 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1014 const struct btf_type *attach_func_proto;
1015 /* function name for valid attach_btf_id */
1016 const char *attach_func_name;
1c2a088a
AS
1017 struct bpf_prog **func;
1018 void *jit_data; /* JIT specific data. arch dependent */
a66886fe 1019 struct bpf_jit_poke_descriptor *poke_tab;
e6ac2450 1020 struct bpf_kfunc_desc_tab *kfunc_tab;
2357672c 1021 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
a66886fe 1022 u32 size_poke_tab;
535911c8 1023 struct bpf_ksym ksym;
7de16e3a 1024 const struct bpf_prog_ops *ops;
09756af4 1025 struct bpf_map **used_maps;
984fe94f 1026 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
541c3bad 1027 struct btf_mod_pair *used_btfs;
09756af4 1028 struct bpf_prog *prog;
aaac3ba9 1029 struct user_struct *user;
cb4d2b3f 1030 u64 load_time; /* ns since boottime */
aba64c7d 1031 u32 verified_insns;
8bad74f9 1032 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
067cae47 1033 char name[BPF_OBJ_NAME_LEN];
afdb09c7
CF
1034#ifdef CONFIG_SECURITY
1035 void *security;
1036#endif
0a9c1991 1037 struct bpf_prog_offload *offload;
838e9690 1038 struct btf *btf;
ba64e7d8 1039 struct bpf_func_info *func_info;
8c1b6e69 1040 struct bpf_func_info_aux *func_info_aux;
c454a46b
MKL
1041 /* bpf_line_info loaded from userspace. linfo->insn_off
1042 * has the xlated insn offset.
1043 * Both the main and sub prog share the same linfo.
1044 * The subprog can access its first linfo by
1045 * using the linfo_idx.
1046 */
1047 struct bpf_line_info *linfo;
1048 /* jited_linfo is the jited addr of the linfo. It has a
1049 * one to one mapping to linfo:
1050 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1051 * Both the main and sub prog share the same jited_linfo.
1052 * The subprog can access its first jited_linfo by
1053 * using the linfo_idx.
1054 */
1055 void **jited_linfo;
ba64e7d8 1056 u32 func_info_cnt;
c454a46b
MKL
1057 u32 nr_linfo;
1058 /* subprog can use linfo_idx to access its first linfo and
1059 * jited_linfo.
1060 * main prog always has linfo_idx == 0
1061 */
1062 u32 linfo_idx;
3dec541b
AS
1063 u32 num_exentries;
1064 struct exception_table_entry *extable;
abf2e7d6
AS
1065 union {
1066 struct work_struct work;
1067 struct rcu_head rcu;
1068 };
09756af4
AS
1069};
1070
2beee5f5 1071struct bpf_array_aux {
da765a2f
DB
1072 /* Programs with direct jumps into programs part of this array. */
1073 struct list_head poke_progs;
1074 struct bpf_map *map;
1075 struct mutex poke_mutex;
1076 struct work_struct work;
2beee5f5
DB
1077};
1078
6cc7d1e8
AN
1079struct bpf_link {
1080 atomic64_t refcnt;
1081 u32 id;
1082 enum bpf_link_type type;
1083 const struct bpf_link_ops *ops;
1084 struct bpf_prog *prog;
1085 struct work_struct work;
1086};
1087
1088struct bpf_link_ops {
1089 void (*release)(struct bpf_link *link);
1090 void (*dealloc)(struct bpf_link *link);
73b11c2a 1091 int (*detach)(struct bpf_link *link);
6cc7d1e8
AN
1092 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1093 struct bpf_prog *old_prog);
1094 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1095 int (*fill_link_info)(const struct bpf_link *link,
1096 struct bpf_link_info *info);
1097};
1098
f7e0beaf
KFL
1099struct bpf_tramp_link {
1100 struct bpf_link link;
1101 struct hlist_node tramp_hlist;
1102};
1103
1104struct bpf_tracing_link {
1105 struct bpf_tramp_link link;
1106 enum bpf_attach_type attach_type;
1107 struct bpf_trampoline *trampoline;
1108 struct bpf_prog *tgt_prog;
1109};
1110
6cc7d1e8
AN
1111struct bpf_link_primer {
1112 struct bpf_link *link;
1113 struct file *file;
1114 int fd;
1115 u32 id;
1116};
1117
85d33df3 1118struct bpf_struct_ops_value;
27ae7997
MKL
1119struct btf_member;
1120
1121#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1122struct bpf_struct_ops {
1123 const struct bpf_verifier_ops *verifier_ops;
1124 int (*init)(struct btf *btf);
1125 int (*check_member)(const struct btf_type *t,
1126 const struct btf_member *member);
85d33df3
MKL
1127 int (*init_member)(const struct btf_type *t,
1128 const struct btf_member *member,
1129 void *kdata, const void *udata);
1130 int (*reg)(void *kdata);
1131 void (*unreg)(void *kdata);
27ae7997 1132 const struct btf_type *type;
85d33df3 1133 const struct btf_type *value_type;
27ae7997
MKL
1134 const char *name;
1135 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1136 u32 type_id;
85d33df3 1137 u32 value_id;
27ae7997
MKL
1138};
1139
1140#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
85d33df3 1141#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
27ae7997 1142const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
d3e42bb0 1143void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
85d33df3
MKL
1144bool bpf_struct_ops_get(const void *kdata);
1145void bpf_struct_ops_put(const void *kdata);
1146int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1147 void *value);
f7e0beaf
KFL
1148int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1149 struct bpf_tramp_link *link,
31a645ae
HT
1150 const struct btf_func_model *model,
1151 void *image, void *image_end);
85d33df3
MKL
1152static inline bool bpf_try_module_get(const void *data, struct module *owner)
1153{
1154 if (owner == BPF_MODULE_OWNER)
1155 return bpf_struct_ops_get(data);
1156 else
1157 return try_module_get(owner);
1158}
1159static inline void bpf_module_put(const void *data, struct module *owner)
1160{
1161 if (owner == BPF_MODULE_OWNER)
1162 bpf_struct_ops_put(data);
1163 else
1164 module_put(owner);
1165}
c196906d
HT
1166
1167#ifdef CONFIG_NET
1168/* Define it here to avoid the use of forward declaration */
1169struct bpf_dummy_ops_state {
1170 int val;
1171};
1172
1173struct bpf_dummy_ops {
1174 int (*test_1)(struct bpf_dummy_ops_state *cb);
1175 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1176 char a3, unsigned long a4);
1177};
1178
1179int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1180 union bpf_attr __user *uattr);
1181#endif
27ae7997
MKL
1182#else
1183static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1184{
1185 return NULL;
1186}
d3e42bb0
MKL
1187static inline void bpf_struct_ops_init(struct btf *btf,
1188 struct bpf_verifier_log *log)
1189{
1190}
85d33df3
MKL
1191static inline bool bpf_try_module_get(const void *data, struct module *owner)
1192{
1193 return try_module_get(owner);
1194}
1195static inline void bpf_module_put(const void *data, struct module *owner)
1196{
1197 module_put(owner);
1198}
1199static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1200 void *key,
1201 void *value)
1202{
1203 return -EINVAL;
1204}
27ae7997
MKL
1205#endif
1206
04fd61ab
AS
1207struct bpf_array {
1208 struct bpf_map map;
1209 u32 elem_size;
b2157399 1210 u32 index_mask;
2beee5f5 1211 struct bpf_array_aux *aux;
04fd61ab
AS
1212 union {
1213 char value[0] __aligned(8);
2a36f0b9 1214 void *ptrs[0] __aligned(8);
a10423b8 1215 void __percpu *pptrs[0] __aligned(8);
04fd61ab
AS
1216 };
1217};
3b1efb19 1218
c04c0d2b 1219#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
ebf7f6f0 1220#define MAX_TAIL_CALL_CNT 33
04fd61ab 1221
591fe988
DB
1222#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1223 BPF_F_RDONLY_PROG | \
1224 BPF_F_WRONLY | \
1225 BPF_F_WRONLY_PROG)
1226
1227#define BPF_MAP_CAN_READ BIT(0)
1228#define BPF_MAP_CAN_WRITE BIT(1)
1229
1230static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1231{
1232 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1233
1234 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1235 * not possible.
1236 */
1237 if (access_flags & BPF_F_RDONLY_PROG)
1238 return BPF_MAP_CAN_READ;
1239 else if (access_flags & BPF_F_WRONLY_PROG)
1240 return BPF_MAP_CAN_WRITE;
1241 else
1242 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1243}
1244
1245static inline bool bpf_map_flags_access_ok(u32 access_flags)
1246{
1247 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1248 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1249}
1250
3b1efb19
DB
1251struct bpf_event_entry {
1252 struct perf_event *event;
1253 struct file *perf_file;
1254 struct file *map_file;
1255 struct rcu_head rcu;
1256};
1257
f45d5b6c
THJ
1258static inline bool map_type_contains_progs(struct bpf_map *map)
1259{
1260 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1261 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1262 map->map_type == BPF_MAP_TYPE_CPUMAP;
1263}
1264
1265bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
f1f7714e 1266int bpf_prog_calc_tag(struct bpf_prog *fp);
bd570ff9 1267
0756ea3e 1268const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
10aceb62 1269const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
555c8a86
DB
1270
1271typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
aa7145c1 1272 unsigned long off, unsigned long len);
c64b7983
JS
1273typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1274 const struct bpf_insn *src,
1275 struct bpf_insn *dst,
1276 struct bpf_prog *prog,
1277 u32 *target_size);
555c8a86
DB
1278
1279u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1280 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
04fd61ab 1281
324bda9e
AS
1282/* an array of programs to be executed under rcu_lock.
1283 *
1284 * Typical usage:
055eb955 1285 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
324bda9e
AS
1286 *
1287 * the structure returned by bpf_prog_array_alloc() should be populated
1288 * with program pointers and the last pointer must be NULL.
1289 * The user has to keep refcnt on the program and make sure the program
1290 * is removed from the array before bpf_prog_put().
1291 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1292 * since other cpus are walking the array of pointers in parallel.
1293 */
394e40a2
RG
1294struct bpf_prog_array_item {
1295 struct bpf_prog *prog;
82e6b1ee
AN
1296 union {
1297 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1298 u64 bpf_cookie;
1299 };
394e40a2
RG
1300};
1301
324bda9e
AS
1302struct bpf_prog_array {
1303 struct rcu_head rcu;
d7f10df8 1304 struct bpf_prog_array_item items[];
324bda9e
AS
1305};
1306
46531a30
PB
1307struct bpf_empty_prog_array {
1308 struct bpf_prog_array hdr;
1309 struct bpf_prog *null_prog;
1310};
1311
1312/* to avoid allocating empty bpf_prog_array for cgroups that
1313 * don't have bpf program attached use one global 'bpf_empty_prog_array'
1314 * It will not be modified the caller of bpf_prog_array_alloc()
1315 * (since caller requested prog_cnt == 0)
1316 * that pointer should be 'freed' by bpf_prog_array_free()
1317 */
1318extern struct bpf_empty_prog_array bpf_empty_prog_array;
1319
d29ab6e1 1320struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
54e9c9d4
SF
1321void bpf_prog_array_free(struct bpf_prog_array *progs);
1322int bpf_prog_array_length(struct bpf_prog_array *progs);
0d01da6a 1323bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
54e9c9d4 1324int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
468e2f64 1325 __u32 __user *prog_ids, u32 cnt);
324bda9e 1326
54e9c9d4 1327void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
e87c6bc3 1328 struct bpf_prog *old_prog);
ce3aa9cc
JS
1329int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1330int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1331 struct bpf_prog *prog);
54e9c9d4 1332int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
1333 u32 *prog_ids, u32 request_cnt,
1334 u32 *prog_cnt);
54e9c9d4 1335int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1336 struct bpf_prog *exclude_prog,
1337 struct bpf_prog *include_prog,
82e6b1ee 1338 u64 bpf_cookie,
e87c6bc3
YS
1339 struct bpf_prog_array **new_array);
1340
c7603cfa
AN
1341struct bpf_run_ctx {};
1342
1343struct bpf_cg_run_ctx {
1344 struct bpf_run_ctx run_ctx;
7d08c2c9 1345 const struct bpf_prog_array_item *prog_item;
c4dcfdd4 1346 int retval;
c7603cfa
AN
1347};
1348
82e6b1ee
AN
1349struct bpf_trace_run_ctx {
1350 struct bpf_run_ctx run_ctx;
1351 u64 bpf_cookie;
1352};
1353
7d08c2c9
AN
1354static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1355{
1356 struct bpf_run_ctx *old_ctx = NULL;
1357
1358#ifdef CONFIG_BPF_SYSCALL
1359 old_ctx = current->bpf_ctx;
1360 current->bpf_ctx = new_ctx;
1361#endif
1362 return old_ctx;
1363}
1364
1365static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1366{
1367#ifdef CONFIG_BPF_SYSCALL
1368 current->bpf_ctx = old_ctx;
1369#endif
1370}
1371
77241217
SF
1372/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1373#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1374/* BPF program asks to set CN on the packet. */
1375#define BPF_RET_SET_CN (1 << 0)
1376
7d08c2c9
AN
1377typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1378
7d08c2c9 1379static __always_inline u32
055eb955 1380bpf_prog_run_array(const struct bpf_prog_array *array,
7d08c2c9
AN
1381 const void *ctx, bpf_prog_run_fn run_prog)
1382{
1383 const struct bpf_prog_array_item *item;
1384 const struct bpf_prog *prog;
82e6b1ee
AN
1385 struct bpf_run_ctx *old_run_ctx;
1386 struct bpf_trace_run_ctx run_ctx;
7d08c2c9
AN
1387 u32 ret = 1;
1388
055eb955
SF
1389 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
1390
7d08c2c9 1391 if (unlikely(!array))
055eb955
SF
1392 return ret;
1393
1394 migrate_disable();
82e6b1ee 1395 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
7d08c2c9
AN
1396 item = &array->items[0];
1397 while ((prog = READ_ONCE(item->prog))) {
82e6b1ee 1398 run_ctx.bpf_cookie = item->bpf_cookie;
7d08c2c9
AN
1399 ret &= run_prog(prog, ctx);
1400 item++;
1401 }
82e6b1ee 1402 bpf_reset_run_ctx(old_run_ctx);
7d08c2c9
AN
1403 migrate_enable();
1404 return ret;
1405}
324bda9e 1406
89aa0758 1407#ifdef CONFIG_BPF_SYSCALL
b121d1e7 1408DECLARE_PER_CPU(int, bpf_prog_active);
d46edd67 1409extern struct mutex bpf_stats_enabled_mutex;
b121d1e7 1410
c518cfa0
TG
1411/*
1412 * Block execution of BPF programs attached to instrumentation (perf,
1413 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1414 * these events can happen inside a region which holds a map bucket lock
1415 * and can deadlock on it.
c518cfa0
TG
1416 */
1417static inline void bpf_disable_instrumentation(void)
1418{
1419 migrate_disable();
79364031 1420 this_cpu_inc(bpf_prog_active);
c518cfa0
TG
1421}
1422
1423static inline void bpf_enable_instrumentation(void)
1424{
79364031 1425 this_cpu_dec(bpf_prog_active);
c518cfa0
TG
1426 migrate_enable();
1427}
1428
f66e448c
CF
1429extern const struct file_operations bpf_map_fops;
1430extern const struct file_operations bpf_prog_fops;
367ec3e4 1431extern const struct file_operations bpf_iter_fops;
f66e448c 1432
91cc1a99 1433#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
7de16e3a
JK
1434 extern const struct bpf_prog_ops _name ## _prog_ops; \
1435 extern const struct bpf_verifier_ops _name ## _verifier_ops;
40077e0c
JB
1436#define BPF_MAP_TYPE(_id, _ops) \
1437 extern const struct bpf_map_ops _ops;
f2e10bff 1438#define BPF_LINK_TYPE(_id, _name)
be9370a7
JB
1439#include <linux/bpf_types.h>
1440#undef BPF_PROG_TYPE
40077e0c 1441#undef BPF_MAP_TYPE
f2e10bff 1442#undef BPF_LINK_TYPE
0fc174de 1443
ab3f0063 1444extern const struct bpf_prog_ops bpf_offload_prog_ops;
4f9218aa
JK
1445extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1446extern const struct bpf_verifier_ops xdp_analyzer_ops;
1447
0fc174de 1448struct bpf_prog *bpf_prog_get(u32 ufd);
248f346f 1449struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1450 bool attach_drv);
85192dbf 1451void bpf_prog_add(struct bpf_prog *prog, int i);
c540594f 1452void bpf_prog_sub(struct bpf_prog *prog, int i);
85192dbf 1453void bpf_prog_inc(struct bpf_prog *prog);
a6f6df69 1454struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
61e021f3
DB
1455void bpf_prog_put(struct bpf_prog *prog);
1456
ad8ad79f 1457void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
a3884572 1458void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
ad8ad79f 1459
61df10c7
KKD
1460struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
1461void bpf_map_free_kptr_off_tab(struct bpf_map *map);
1462struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
1463bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
14a324f6 1464void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
61df10c7 1465
1ed4d924 1466struct bpf_map *bpf_map_get(u32 ufd);
c9da161c 1467struct bpf_map *bpf_map_get_with_uref(u32 ufd);
c2101297 1468struct bpf_map *__bpf_map_get(struct fd f);
1e0bd5a0
AN
1469void bpf_map_inc(struct bpf_map *map);
1470void bpf_map_inc_with_uref(struct bpf_map *map);
1471struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
c9da161c 1472void bpf_map_put_with_uref(struct bpf_map *map);
61e021f3 1473void bpf_map_put(struct bpf_map *map);
196e8ca7
DB
1474void *bpf_map_area_alloc(u64 size, int numa_node);
1475void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
d407bd25 1476void bpf_map_area_free(void *base);
353050be 1477bool bpf_map_write_active(const struct bpf_map *map);
bd475643 1478void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
cb4d03ab
BV
1479int generic_map_lookup_batch(struct bpf_map *map,
1480 const union bpf_attr *attr,
aa2e93b8
BV
1481 union bpf_attr __user *uattr);
1482int generic_map_update_batch(struct bpf_map *map,
1483 const union bpf_attr *attr,
1484 union bpf_attr __user *uattr);
1485int generic_map_delete_batch(struct bpf_map *map,
1486 const union bpf_attr *attr,
cb4d03ab 1487 union bpf_attr __user *uattr);
6086d29d 1488struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
a228a64f 1489struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
61e021f3 1490
48edc1f7
RG
1491#ifdef CONFIG_MEMCG_KMEM
1492void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1493 int node);
1494void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1495void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1496 size_t align, gfp_t flags);
1497#else
1498static inline void *
1499bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1500 int node)
1501{
1502 return kmalloc_node(size, flags, node);
1503}
1504
1505static inline void *
1506bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1507{
1508 return kzalloc(size, flags);
1509}
1510
1511static inline void __percpu *
1512bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1513 gfp_t flags)
1514{
1515 return __alloc_percpu_gfp(size, align, flags);
1516}
1517#endif
1518
1be7f75d
AS
1519extern int sysctl_unprivileged_bpf_disabled;
1520
2c78ee89
AS
1521static inline bool bpf_allow_ptr_leaks(void)
1522{
1523 return perfmon_capable();
1524}
1525
01f810ac
AM
1526static inline bool bpf_allow_uninit_stack(void)
1527{
1528 return perfmon_capable();
1529}
1530
41c48f3a
AI
1531static inline bool bpf_allow_ptr_to_map_access(void)
1532{
1533 return perfmon_capable();
1534}
1535
2c78ee89
AS
1536static inline bool bpf_bypass_spec_v1(void)
1537{
1538 return perfmon_capable();
1539}
1540
1541static inline bool bpf_bypass_spec_v4(void)
1542{
1543 return perfmon_capable();
1544}
1545
6e71b04a 1546int bpf_map_new_fd(struct bpf_map *map, int flags);
b2197755
DB
1547int bpf_prog_new_fd(struct bpf_prog *prog);
1548
f2e10bff 1549void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
a3b80e10
AN
1550 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1551int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1552int bpf_link_settle(struct bpf_link_primer *primer);
1553void bpf_link_cleanup(struct bpf_link_primer *primer);
70ed506c
AN
1554void bpf_link_inc(struct bpf_link *link);
1555void bpf_link_put(struct bpf_link *link);
1556int bpf_link_new_fd(struct bpf_link *link);
babf3164 1557struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
70ed506c 1558struct bpf_link *bpf_link_get_from_fd(u32 ufd);
9f883612 1559struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
70ed506c 1560
b2197755 1561int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
6e71b04a 1562int bpf_obj_get_user(const char __user *pathname, int flags);
b2197755 1563
21aef70e 1564#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
e5158d98 1565#define DEFINE_BPF_ITER_FUNC(target, args...) \
21aef70e
YS
1566 extern int bpf_iter_ ## target(args); \
1567 int __init bpf_iter_ ## target(args) { return 0; }
15d83c4d 1568
f9c79272 1569struct bpf_iter_aux_info {
a5cbe05a 1570 struct bpf_map *map;
f9c79272
YS
1571};
1572
5e7b3020
YS
1573typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1574 union bpf_iter_link_info *linfo,
1575 struct bpf_iter_aux_info *aux);
1576typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
6b0a249a
YS
1577typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1578 struct seq_file *seq);
1579typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1580 struct bpf_link_info *info);
3cee6fb8
MKL
1581typedef const struct bpf_func_proto *
1582(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1583 const struct bpf_prog *prog);
a5cbe05a 1584
cf83b2d2
YS
1585enum bpf_iter_feature {
1586 BPF_ITER_RESCHED = BIT(0),
1587};
1588
3c32cc1b 1589#define BPF_ITER_CTX_ARG_MAX 2
ae24345d
YS
1590struct bpf_iter_reg {
1591 const char *target;
5e7b3020
YS
1592 bpf_iter_attach_target_t attach_target;
1593 bpf_iter_detach_target_t detach_target;
6b0a249a
YS
1594 bpf_iter_show_fdinfo_t show_fdinfo;
1595 bpf_iter_fill_link_info_t fill_link_info;
3cee6fb8 1596 bpf_iter_get_func_proto_t get_func_proto;
3c32cc1b 1597 u32 ctx_arg_info_size;
cf83b2d2 1598 u32 feature;
3c32cc1b 1599 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
14fc6bd6 1600 const struct bpf_iter_seq_info *seq_info;
ae24345d
YS
1601};
1602
e5158d98
YS
1603struct bpf_iter_meta {
1604 __bpf_md_ptr(struct seq_file *, seq);
1605 u64 session_id;
1606 u64 seq_num;
1607};
1608
a5cbe05a
YS
1609struct bpf_iter__bpf_map_elem {
1610 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1611 __bpf_md_ptr(struct bpf_map *, map);
1612 __bpf_md_ptr(void *, key);
1613 __bpf_md_ptr(void *, value);
1614};
1615
15172a46 1616int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
ab2ee4fc 1617void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
15d83c4d 1618bool bpf_iter_prog_supported(struct bpf_prog *prog);
3cee6fb8
MKL
1619const struct bpf_func_proto *
1620bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
af2ac3e1 1621int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
ac51d99b 1622int bpf_iter_new_fd(struct bpf_link *link);
367ec3e4 1623bool bpf_link_is_iter(struct bpf_link *link);
e5158d98
YS
1624struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1625int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
b76f2226
YS
1626void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1627 struct seq_file *seq);
1628int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1629 struct bpf_link_info *info);
ae24345d 1630
314ee05e
YS
1631int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1632 struct bpf_func_state *caller,
1633 struct bpf_func_state *callee);
1634
15a07b33
AS
1635int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1636int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1637int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1638 u64 flags);
1639int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1640 u64 flags);
d056a788 1641
557c0c6e 1642int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
15a07b33 1643
d056a788
DB
1644int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1645 void *key, void *value, u64 map_flags);
14dc6f04 1646int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
bcc6b1b7
MKL
1647int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1648 void *key, void *value, u64 map_flags);
14dc6f04 1649int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
d056a788 1650
6e71b04a 1651int bpf_get_file_flag(int flags);
af2ac3e1 1652int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
dcab51f1 1653 size_t actual_size);
6e71b04a 1654
15a07b33
AS
1655/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1656 * forced to use 'long' read/writes to try to atomically copy long counters.
1657 * Best-effort only. No barriers here, since it _will_ race with concurrent
1658 * updates from BPF programs. Called from bpf syscall and mostly used with
1659 * size 8 or 16 bytes, so ask compiler to inline it.
1660 */
1661static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1662{
1663 const long *lsrc = src;
1664 long *ldst = dst;
1665
1666 size /= sizeof(long);
1667 while (size--)
1668 *ldst++ = *lsrc++;
1669}
1670
61e021f3 1671/* verify correctness of eBPF program */
af2ac3e1 1672int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
a643bff7
AN
1673
1674#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01 1675void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
a643bff7 1676#endif
46f55cff 1677
76654e67
AM
1678struct btf *bpf_get_btf_vmlinux(void);
1679
46f55cff 1680/* Map specifics */
d53ad5d8 1681struct xdp_frame;
6d5fc195 1682struct sk_buff;
e6a4750f
BT
1683struct bpf_dtab_netdev;
1684struct bpf_cpu_map_entry;
67f29e07 1685
1d233886 1686void __dev_flush(void);
d53ad5d8 1687int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886 1688 struct net_device *dev_rx);
d53ad5d8 1689int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1690 struct net_device *dev_rx);
d53ad5d8 1691int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed 1692 struct bpf_map *map, bool exclude_ingress);
6d5fc195
TM
1693int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1694 struct bpf_prog *xdp_prog);
e624d4ed
HL
1695int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1696 struct bpf_prog *xdp_prog, struct bpf_map *map,
1697 bool exclude_ingress);
46f55cff 1698
cdfafe98 1699void __cpu_map_flush(void);
d53ad5d8 1700int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
9c270af3 1701 struct net_device *dev_rx);
11941f8a
KKD
1702int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1703 struct sk_buff *skb);
9c270af3 1704
96eabe7a
MKL
1705/* Return map's numa specified by userspace */
1706static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1707{
1708 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1709 attr->numa_node : NUMA_NO_NODE;
1710}
1711
040ee692 1712struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
5dc4c4b7 1713int array_map_alloc_check(union bpf_attr *attr);
040ee692 1714
c695865c
SF
1715int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1716 union bpf_attr __user *uattr);
1717int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1718 union bpf_attr __user *uattr);
da00d2f1
KS
1719int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1720 const union bpf_attr *kattr,
1721 union bpf_attr __user *uattr);
c695865c
SF
1722int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1723 const union bpf_attr *kattr,
1724 union bpf_attr __user *uattr);
1b4d60ec
SL
1725int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1726 const union bpf_attr *kattr,
1727 union bpf_attr __user *uattr);
7c32e8f8
LB
1728int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1729 const union bpf_attr *kattr,
1730 union bpf_attr __user *uattr);
9e15db66
AS
1731bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1732 const struct bpf_prog *prog,
1733 struct bpf_insn_access_aux *info);
35346ab6
HT
1734
1735static inline bool bpf_tracing_ctx_access(int off, int size,
1736 enum bpf_access_type type)
1737{
1738 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1739 return false;
1740 if (type != BPF_READ)
1741 return false;
1742 if (off % size != 0)
1743 return false;
1744 return true;
1745}
1746
1747static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1748 enum bpf_access_type type,
1749 const struct bpf_prog *prog,
1750 struct bpf_insn_access_aux *info)
1751{
1752 if (!bpf_tracing_ctx_access(off, size, type))
1753 return false;
1754 return btf_ctx_access(off, size, type, prog, info);
1755}
1756
22dc4a0f 1757int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
9e15db66
AS
1758 const struct btf_type *t, int off, int size,
1759 enum bpf_access_type atype,
c6f1bfe8 1760 u32 *next_btf_id, enum bpf_type_flag *flag);
faaf4a79 1761bool btf_struct_ids_match(struct bpf_verifier_log *log,
22dc4a0f 1762 const struct btf *btf, u32 id, int off,
2ab3b380
KKD
1763 const struct btf *need_btf, u32 need_type_id,
1764 bool strict);
9e15db66 1765
fec56f58
AS
1766int btf_distill_func_proto(struct bpf_verifier_log *log,
1767 struct btf *btf,
1768 const struct btf_type *func_proto,
1769 const char *func_name,
1770 struct btf_func_model *m);
1771
51c39bb1 1772struct bpf_reg_state;
34747c41
MKL
1773int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1774 struct bpf_reg_state *regs);
e6ac2450
MKL
1775int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1776 const struct btf *btf, u32 func_id,
1777 struct bpf_reg_state *regs);
51c39bb1
AS
1778int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1779 struct bpf_reg_state *reg);
efc68158 1780int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
be8704ff 1781 struct btf *btf, const struct btf_type *t);
8c1b6e69 1782
7e6897f9 1783struct bpf_prog *bpf_prog_by_id(u32 id);
005142b8 1784struct bpf_link *bpf_link_by_id(u32 id);
7e6897f9 1785
6890896b 1786const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
a10787e6 1787void bpf_task_storage_free(struct task_struct *task);
e6ac2450
MKL
1788bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1789const struct btf_func_model *
1790bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1791 const struct bpf_insn *insn);
fbd94c7a
AS
1792struct bpf_core_ctx {
1793 struct bpf_verifier_log *log;
1794 const struct btf *btf;
1795};
1796
1797int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1798 int relo_idx, void *insn);
1799
44a3918c
JP
1800static inline bool unprivileged_ebpf_enabled(void)
1801{
1802 return !sysctl_unprivileged_bpf_disabled;
1803}
1804
9c270af3 1805#else /* !CONFIG_BPF_SYSCALL */
0fc174de
DB
1806static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1807{
1808 return ERR_PTR(-EOPNOTSUPP);
1809}
1810
248f346f
JK
1811static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1812 enum bpf_prog_type type,
288b3de5 1813 bool attach_drv)
248f346f
JK
1814{
1815 return ERR_PTR(-EOPNOTSUPP);
1816}
1817
85192dbf 1818static inline void bpf_prog_add(struct bpf_prog *prog, int i)
cc2e0b3f 1819{
cc2e0b3f 1820}
113214be 1821
c540594f
DB
1822static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1823{
1824}
1825
0fc174de
DB
1826static inline void bpf_prog_put(struct bpf_prog *prog)
1827{
1828}
6d67942d 1829
85192dbf 1830static inline void bpf_prog_inc(struct bpf_prog *prog)
aa6a5f3c 1831{
aa6a5f3c 1832}
5ccb071e 1833
a6f6df69
JF
1834static inline struct bpf_prog *__must_check
1835bpf_prog_inc_not_zero(struct bpf_prog *prog)
1836{
1837 return ERR_PTR(-EOPNOTSUPP);
1838}
1839
6cc7d1e8
AN
1840static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1841 const struct bpf_link_ops *ops,
1842 struct bpf_prog *prog)
1843{
1844}
1845
1846static inline int bpf_link_prime(struct bpf_link *link,
1847 struct bpf_link_primer *primer)
1848{
1849 return -EOPNOTSUPP;
1850}
1851
1852static inline int bpf_link_settle(struct bpf_link_primer *primer)
1853{
1854 return -EOPNOTSUPP;
1855}
1856
1857static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1858{
1859}
1860
1861static inline void bpf_link_inc(struct bpf_link *link)
1862{
1863}
1864
1865static inline void bpf_link_put(struct bpf_link *link)
1866{
1867}
1868
6e71b04a 1869static inline int bpf_obj_get_user(const char __user *pathname, int flags)
98589a09
SL
1870{
1871 return -EOPNOTSUPP;
1872}
1873
1d233886 1874static inline void __dev_flush(void)
46f55cff
JF
1875{
1876}
9c270af3 1877
d53ad5d8 1878struct xdp_frame;
67f29e07 1879struct bpf_dtab_netdev;
e6a4750f 1880struct bpf_cpu_map_entry;
67f29e07 1881
1d233886 1882static inline
d53ad5d8 1883int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886
THJ
1884 struct net_device *dev_rx)
1885{
1886 return 0;
1887}
1888
67f29e07 1889static inline
d53ad5d8 1890int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1891 struct net_device *dev_rx)
67f29e07
JDB
1892{
1893 return 0;
1894}
1895
e624d4ed 1896static inline
d53ad5d8 1897int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed
HL
1898 struct bpf_map *map, bool exclude_ingress)
1899{
1900 return 0;
1901}
1902
6d5fc195
TM
1903struct sk_buff;
1904
1905static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1906 struct sk_buff *skb,
1907 struct bpf_prog *xdp_prog)
1908{
1909 return 0;
1910}
1911
e624d4ed
HL
1912static inline
1913int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1914 struct bpf_prog *xdp_prog, struct bpf_map *map,
1915 bool exclude_ingress)
1916{
1917 return 0;
1918}
1919
cdfafe98 1920static inline void __cpu_map_flush(void)
9c270af3
JDB
1921{
1922}
1923
9c270af3 1924static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
d53ad5d8 1925 struct xdp_frame *xdpf,
9c270af3
JDB
1926 struct net_device *dev_rx)
1927{
1928 return 0;
1929}
040ee692 1930
11941f8a
KKD
1931static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1932 struct sk_buff *skb)
1933{
1934 return -EOPNOTSUPP;
1935}
1936
040ee692
AV
1937static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1938 enum bpf_prog_type type)
1939{
1940 return ERR_PTR(-EOPNOTSUPP);
1941}
c695865c
SF
1942
1943static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1944 const union bpf_attr *kattr,
1945 union bpf_attr __user *uattr)
1946{
1947 return -ENOTSUPP;
1948}
1949
1950static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1951 const union bpf_attr *kattr,
1952 union bpf_attr __user *uattr)
1953{
1954 return -ENOTSUPP;
1955}
1956
da00d2f1
KS
1957static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1958 const union bpf_attr *kattr,
1959 union bpf_attr __user *uattr)
1960{
1961 return -ENOTSUPP;
1962}
1963
c695865c
SF
1964static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1965 const union bpf_attr *kattr,
1966 union bpf_attr __user *uattr)
1967{
1968 return -ENOTSUPP;
1969}
6332be04 1970
7c32e8f8
LB
1971static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1972 const union bpf_attr *kattr,
1973 union bpf_attr __user *uattr)
1974{
1975 return -ENOTSUPP;
1976}
1977
6332be04
DB
1978static inline void bpf_map_put(struct bpf_map *map)
1979{
1980}
7e6897f9
BT
1981
1982static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1983{
1984 return ERR_PTR(-ENOTSUPP);
1985}
6890896b
SF
1986
1987static inline const struct bpf_func_proto *
1988bpf_base_func_proto(enum bpf_func_id func_id)
1989{
1990 return NULL;
1991}
a10787e6
SL
1992
1993static inline void bpf_task_storage_free(struct task_struct *task)
1994{
1995}
e6ac2450
MKL
1996
1997static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1998{
1999 return false;
2000}
2001
2002static inline const struct btf_func_model *
2003bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2004 const struct bpf_insn *insn)
2005{
2006 return NULL;
2007}
44a3918c
JP
2008
2009static inline bool unprivileged_ebpf_enabled(void)
2010{
2011 return false;
2012}
2013
61e021f3 2014#endif /* CONFIG_BPF_SYSCALL */
09756af4 2015
541c3bad
AN
2016void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2017 struct btf_mod_pair *used_btfs, u32 len);
2018
479321e9
JK
2019static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2020 enum bpf_prog_type type)
2021{
2022 return bpf_prog_get_type_dev(ufd, type, false);
2023}
2024
936f8946
AN
2025void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2026 struct bpf_map **used_maps, u32 len);
2027
040ee692
AV
2028bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2029
ab3f0063
JK
2030int bpf_prog_offload_compile(struct bpf_prog *prog);
2031void bpf_prog_offload_destroy(struct bpf_prog *prog);
675fc275
JK
2032int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2033 struct bpf_prog *prog);
ab3f0063 2034
52775b33
JK
2035int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2036
a3884572
JK
2037int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2038int bpf_map_offload_update_elem(struct bpf_map *map,
2039 void *key, void *value, u64 flags);
2040int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2041int bpf_map_offload_get_next_key(struct bpf_map *map,
2042 void *key, void *next_key);
2043
09728266 2044bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
a3884572 2045
1385d755 2046struct bpf_offload_dev *
dd27c2e3 2047bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
602144c2 2048void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
dd27c2e3 2049void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
602144c2
JK
2050int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2051 struct net_device *netdev);
2052void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2053 struct net_device *netdev);
fd4f227d 2054bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
9fd7c555 2055
ab3f0063
JK
2056#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2057int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2058
0d830032 2059static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
ab3f0063 2060{
9a18eedb 2061 return aux->offload_requested;
ab3f0063 2062}
a3884572
JK
2063
2064static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2065{
2066 return unlikely(map->ops == &bpf_map_offload_ops);
2067}
2068
2069struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2070void bpf_map_offload_map_free(struct bpf_map *map);
79a7f8bd
AS
2071int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2072 const union bpf_attr *kattr,
2073 union bpf_attr __user *uattr);
17edea21
CW
2074
2075int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2076int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2077int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
748cd572
DZ
2078int sock_map_bpf_prog_query(const union bpf_attr *attr,
2079 union bpf_attr __user *uattr);
2080
17edea21
CW
2081void sock_map_unhash(struct sock *sk);
2082void sock_map_close(struct sock *sk, long timeout);
ab3f0063
JK
2083#else
2084static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2085 union bpf_attr *attr)
2086{
2087 return -EOPNOTSUPP;
2088}
2089
2090static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2091{
2092 return false;
2093}
a3884572
JK
2094
2095static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2096{
2097 return false;
2098}
2099
2100static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2101{
2102 return ERR_PTR(-EOPNOTSUPP);
2103}
2104
2105static inline void bpf_map_offload_map_free(struct bpf_map *map)
2106{
2107}
79a7f8bd
AS
2108
2109static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2110 const union bpf_attr *kattr,
2111 union bpf_attr __user *uattr)
2112{
2113 return -ENOTSUPP;
2114}
fdb5c453 2115
88759609 2116#ifdef CONFIG_BPF_SYSCALL
604326b4
DB
2117static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2118 struct bpf_prog *prog)
fdb5c453
SY
2119{
2120 return -EINVAL;
2121}
bb0de313
LB
2122
2123static inline int sock_map_prog_detach(const union bpf_attr *attr,
2124 enum bpf_prog_type ptype)
2125{
2126 return -EOPNOTSUPP;
2127}
13b79d3f
LB
2128
2129static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2130 u64 flags)
2131{
2132 return -EOPNOTSUPP;
2133}
748cd572
DZ
2134
2135static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2136 union bpf_attr __user *uattr)
2137{
2138 return -EINVAL;
2139}
17edea21
CW
2140#endif /* CONFIG_BPF_SYSCALL */
2141#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
5dc4c4b7 2142
17edea21
CW
2143#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2144void bpf_sk_reuseport_detach(struct sock *sk);
2145int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2146 void *value);
2147int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2148 void *value, u64 map_flags);
2149#else
2150static inline void bpf_sk_reuseport_detach(struct sock *sk)
2151{
2152}
5dc4c4b7 2153
17edea21 2154#ifdef CONFIG_BPF_SYSCALL
5dc4c4b7
MKL
2155static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2156 void *key, void *value)
2157{
2158 return -EOPNOTSUPP;
2159}
2160
2161static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2162 void *key, void *value,
2163 u64 map_flags)
2164{
2165 return -EOPNOTSUPP;
2166}
2167#endif /* CONFIG_BPF_SYSCALL */
2168#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
2169
d0003ec0 2170/* verifier prototypes for helper functions called from eBPF programs */
a2c83fff
DB
2171extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2172extern const struct bpf_func_proto bpf_map_update_elem_proto;
2173extern const struct bpf_func_proto bpf_map_delete_elem_proto;
f1a2e44a
MV
2174extern const struct bpf_func_proto bpf_map_push_elem_proto;
2175extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2176extern const struct bpf_func_proto bpf_map_peek_elem_proto;
d0003ec0 2177
03e69b50 2178extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
c04167ce 2179extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2d0e30c3 2180extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
04fd61ab 2181extern const struct bpf_func_proto bpf_tail_call_proto;
17ca8cbf 2182extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
71d19214 2183extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
ffeedafb
AS
2184extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2185extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2186extern const struct bpf_func_proto bpf_get_current_comm_proto;
d5a3b1f6 2187extern const struct bpf_func_proto bpf_get_stackid_proto;
c195651e 2188extern const struct bpf_func_proto bpf_get_stack_proto;
fa28dcb8 2189extern const struct bpf_func_proto bpf_get_task_stack_proto;
7b04d6d6
SL
2190extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2191extern const struct bpf_func_proto bpf_get_stack_proto_pe;
174a79ff 2192extern const struct bpf_func_proto bpf_sock_map_update_proto;
81110384 2193extern const struct bpf_func_proto bpf_sock_hash_update_proto;
bf6fa2c8 2194extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
0f09abd1 2195extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
604326b4
DB
2196extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2197extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2198extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2199extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
d83525ca
AS
2200extern const struct bpf_func_proto bpf_spin_lock_proto;
2201extern const struct bpf_func_proto bpf_spin_unlock_proto;
cd339431 2202extern const struct bpf_func_proto bpf_get_local_storage_proto;
d7a4cb9b
AI
2203extern const struct bpf_func_proto bpf_strtol_proto;
2204extern const struct bpf_func_proto bpf_strtoul_proto;
0d01da6a 2205extern const struct bpf_func_proto bpf_tcp_sock_proto;
5576b991 2206extern const struct bpf_func_proto bpf_jiffies64_proto;
b4490c5c 2207extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
0456ea17 2208extern const struct bpf_func_proto bpf_event_output_data_proto;
457f4436
AN
2209extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2210extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2211extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2212extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2213extern const struct bpf_func_proto bpf_ringbuf_query_proto;
af7ec138 2214extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
2215extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2216extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2217extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
0d4fad3e 2218extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
9eeb3aa3 2219extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
07be4c4a 2220extern const struct bpf_func_proto bpf_copy_from_user_proto;
c4d0bfb4 2221extern const struct bpf_func_proto bpf_snprintf_btf_proto;
7b15523a 2222extern const struct bpf_func_proto bpf_snprintf_proto;
eaa6bcb7 2223extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
63d9b80d 2224extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
d0551261 2225extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
b60da495 2226extern const struct bpf_func_proto bpf_sock_from_file_proto;
c5dbb89f 2227extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
a10787e6
SL
2228extern const struct bpf_func_proto bpf_task_storage_get_proto;
2229extern const struct bpf_func_proto bpf_task_storage_delete_proto;
69c087ba 2230extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3d78417b 2231extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3cee6fb8
MKL
2232extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2233extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
d6aef08a 2234extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
7c7e3d31 2235extern const struct bpf_func_proto bpf_find_vma_proto;
e6f2dd0f 2236extern const struct bpf_func_proto bpf_loop_proto;
c5fb1993 2237extern const struct bpf_func_proto bpf_strncmp_proto;
376040e4 2238extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
cd339431 2239
958a3f2d
JO
2240const struct bpf_func_proto *tracing_prog_func_proto(
2241 enum bpf_func_id func_id, const struct bpf_prog *prog);
2242
3ad00405
DB
2243/* Shared helpers among cBPF and eBPF. */
2244void bpf_user_rnd_init_once(void);
2245u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
6890896b 2246u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3ad00405 2247
c64b7983 2248#if defined(CONFIG_NET)
46f8bc92
MKL
2249bool bpf_sock_common_is_valid_access(int off, int size,
2250 enum bpf_access_type type,
2251 struct bpf_insn_access_aux *info);
c64b7983
JS
2252bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2253 struct bpf_insn_access_aux *info);
2254u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2255 const struct bpf_insn *si,
2256 struct bpf_insn *insn_buf,
2257 struct bpf_prog *prog,
2258 u32 *target_size);
2259#else
46f8bc92
MKL
2260static inline bool bpf_sock_common_is_valid_access(int off, int size,
2261 enum bpf_access_type type,
2262 struct bpf_insn_access_aux *info)
2263{
2264 return false;
2265}
c64b7983
JS
2266static inline bool bpf_sock_is_valid_access(int off, int size,
2267 enum bpf_access_type type,
2268 struct bpf_insn_access_aux *info)
2269{
2270 return false;
2271}
2272static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2273 const struct bpf_insn *si,
2274 struct bpf_insn *insn_buf,
2275 struct bpf_prog *prog,
2276 u32 *target_size)
2277{
2278 return 0;
2279}
2280#endif
2281
655a51e5 2282#ifdef CONFIG_INET
91cc1a99
AS
2283struct sk_reuseport_kern {
2284 struct sk_buff *skb;
2285 struct sock *sk;
2286 struct sock *selected_sk;
d5e4ddae 2287 struct sock *migrating_sk;
91cc1a99
AS
2288 void *data_end;
2289 u32 hash;
2290 u32 reuseport_id;
2291 bool bind_inany;
2292};
655a51e5
MKL
2293bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2294 struct bpf_insn_access_aux *info);
2295
2296u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2297 const struct bpf_insn *si,
2298 struct bpf_insn *insn_buf,
2299 struct bpf_prog *prog,
2300 u32 *target_size);
7f94208c
Y
2301
2302bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2303 struct bpf_insn_access_aux *info);
2304
2305u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2306 const struct bpf_insn *si,
2307 struct bpf_insn *insn_buf,
2308 struct bpf_prog *prog,
2309 u32 *target_size);
655a51e5
MKL
2310#else
2311static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2312 enum bpf_access_type type,
2313 struct bpf_insn_access_aux *info)
2314{
2315 return false;
2316}
2317
2318static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2319 const struct bpf_insn *si,
2320 struct bpf_insn *insn_buf,
2321 struct bpf_prog *prog,
2322 u32 *target_size)
2323{
2324 return 0;
2325}
7f94208c
Y
2326static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2327 enum bpf_access_type type,
2328 struct bpf_insn_access_aux *info)
2329{
2330 return false;
2331}
2332
2333static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2334 const struct bpf_insn *si,
2335 struct bpf_insn *insn_buf,
2336 struct bpf_prog *prog,
2337 u32 *target_size)
2338{
2339 return 0;
2340}
655a51e5
MKL
2341#endif /* CONFIG_INET */
2342
5964b200 2343enum bpf_text_poke_type {
b553a6ec
DB
2344 BPF_MOD_CALL,
2345 BPF_MOD_JUMP,
5964b200 2346};
4b3da77b 2347
5964b200
AS
2348int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2349 void *addr1, void *addr2);
2350
ebc1415d
SL
2351void *bpf_arch_text_copy(void *dst, void *src, size_t len);
2352
eae2e83e 2353struct btf_id_set;
2af30f11 2354bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
eae2e83e 2355
335ff499
DM
2356#define MAX_BPRINTF_VARARGS 12
2357
48cac3f4
FR
2358int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2359 u32 **bin_buf, u32 num_args);
2360void bpf_bprintf_cleanup(void);
d9c9e4db 2361
99c55f7d 2362#endif /* _LINUX_BPF_H */