x86/pm: Fix false positive kmemleak report in msr_build_context()
[linux-block.git] / include / linux / bpf.h
CommitLineData
25763b3c 1/* SPDX-License-Identifier: GPL-2.0-only */
99c55f7d 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
99c55f7d
AS
3 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
74451e66 8
99c55f7d 9#include <linux/workqueue.h>
db20fd2b 10#include <linux/file.h>
b121d1e7 11#include <linux/percpu.h>
002245cc 12#include <linux/err.h>
74451e66 13#include <linux/rbtree_latch.h>
d6e1e46f 14#include <linux/numa.h>
fc970227 15#include <linux/mm_types.h>
ab3f0063 16#include <linux/wait.h>
fec56f58
AS
17#include <linux/refcount.h>
18#include <linux/mutex.h>
85d33df3 19#include <linux/module.h>
bfea9a85 20#include <linux/kallsyms.h>
2c78ee89 21#include <linux/capability.h>
48edc1f7
RG
22#include <linux/sched/mm.h>
23#include <linux/slab.h>
e21aa341 24#include <linux/percpu-refcount.h>
af2ac3e1 25#include <linux/bpfptr.h>
99c55f7d 26
cae1927c 27struct bpf_verifier_env;
9e15db66 28struct bpf_verifier_log;
3b1efb19 29struct perf_event;
174a79ff 30struct bpf_prog;
da765a2f 31struct bpf_prog_aux;
99c55f7d 32struct bpf_map;
4f738adb 33struct sock;
a26ca7c9 34struct seq_file;
1b2b234b 35struct btf;
e8d2bec0 36struct btf_type;
3dec541b 37struct exception_table_entry;
ae24345d 38struct seq_operations;
f9c79272 39struct bpf_iter_aux_info;
f836a56e
KS
40struct bpf_local_storage;
41struct bpf_local_storage_map;
36e68442 42struct kobject;
48edc1f7 43struct mem_cgroup;
861de02e 44struct module;
69c087ba 45struct bpf_func_state;
99c55f7d 46
1b9ed84e
QM
47extern struct idr btf_idr;
48extern spinlock_t btf_idr_lock;
36e68442 49extern struct kobject *btf_kobj;
1b9ed84e 50
102acbac 51typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
f9c79272
YS
52typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
53 struct bpf_iter_aux_info *aux);
14fc6bd6
YS
54typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
55struct bpf_iter_seq_info {
56 const struct seq_operations *seq_ops;
57 bpf_iter_init_seq_priv_t init_seq_private;
58 bpf_iter_fini_seq_priv_t fini_seq_private;
59 u32 seq_priv_size;
60};
61
5d903493 62/* map is generic key/value storage optionally accessible by eBPF programs */
99c55f7d
AS
63struct bpf_map_ops {
64 /* funcs callable from userspace (via syscall) */
1110f3a9 65 int (*map_alloc_check)(union bpf_attr *attr);
99c55f7d 66 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
61d1b6a4
DB
67 void (*map_release)(struct bpf_map *map, struct file *map_file);
68 void (*map_free)(struct bpf_map *map);
db20fd2b 69 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
ba6b8de4 70 void (*map_release_uref)(struct bpf_map *map);
c6110222 71 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
cb4d03ab
BV
72 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
73 union bpf_attr __user *uattr);
3e87f192
DS
74 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
75 void *value, u64 flags);
05799638
YS
76 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
77 const union bpf_attr *attr,
78 union bpf_attr __user *uattr);
aa2e93b8
BV
79 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
80 union bpf_attr __user *uattr);
81 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
82 union bpf_attr __user *uattr);
db20fd2b
AS
83
84 /* funcs callable from userspace and from eBPF programs */
85 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3274f520 86 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
db20fd2b 87 int (*map_delete_elem)(struct bpf_map *map, void *key);
f1a2e44a
MV
88 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
89 int (*map_pop_elem)(struct bpf_map *map, void *value);
90 int (*map_peek_elem)(struct bpf_map *map, void *value);
2a36f0b9
WN
91
92 /* funcs called by prog_array and perf_event_array map */
d056a788
DB
93 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
94 int fd);
95 void (*map_fd_put_ptr)(void *ptr);
4a8f87e6 96 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
14dc6f04 97 u32 (*map_fd_sys_lookup_elem)(void *ptr);
a26ca7c9
MKL
98 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
99 struct seq_file *m);
e8d2bec0 100 int (*map_check_btf)(const struct bpf_map *map,
1b2b234b 101 const struct btf *btf,
e8d2bec0
DB
102 const struct btf_type *key_type,
103 const struct btf_type *value_type);
d8eca5bb 104
da765a2f
DB
105 /* Prog poke tracking helpers. */
106 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
109 struct bpf_prog *new);
110
d8eca5bb
DB
111 /* Direct value access helpers. */
112 int (*map_direct_value_addr)(const struct bpf_map *map,
113 u64 *imm, u32 off);
114 int (*map_direct_value_meta)(const struct bpf_map *map,
115 u64 imm, u32 *off);
fc970227 116 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
457f4436
AN
117 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
118 struct poll_table_struct *pts);
41c48f3a 119
f836a56e
KS
120 /* Functions called by bpf_local_storage maps */
121 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
122 void *owner, u32 size);
123 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
124 void *owner, u32 size);
125 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
f4d05259 126
e6a4750f
BT
127 /* Misc helpers.*/
128 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
129
f4d05259
MKL
130 /* map_meta_equal must be implemented for maps that can be
131 * used as an inner map. It is a runtime check to ensure
132 * an inner map can be inserted to an outer map.
133 *
134 * Some properties of the inner map has been used during the
135 * verification time. When inserting an inner map at the runtime,
136 * map_meta_equal has to ensure the inserting map has the same
137 * properties that the verifier has used earlier.
138 */
139 bool (*map_meta_equal)(const struct bpf_map *meta0,
140 const struct bpf_map *meta1);
141
69c087ba
YS
142
143 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
144 struct bpf_func_state *caller,
145 struct bpf_func_state *callee);
102acbac
KC
146 int (*map_for_each_callback)(struct bpf_map *map,
147 bpf_callback_t callback_fn,
69c087ba
YS
148 void *callback_ctx, u64 flags);
149
41c48f3a
AI
150 /* BTF name and id of struct allocated by map_alloc */
151 const char * const map_btf_name;
152 int *map_btf_id;
a5cbe05a
YS
153
154 /* bpf_iter info used to open a seq_file */
155 const struct bpf_iter_seq_info *iter_seq_info;
99c55f7d
AS
156};
157
158struct bpf_map {
a26ca7c9 159 /* The first two cachelines with read-mostly members of which some
be95a845
DB
160 * are also accessed in fast-path (e.g. ops, max_entries).
161 */
162 const struct bpf_map_ops *ops ____cacheline_aligned;
163 struct bpf_map *inner_map_meta;
164#ifdef CONFIG_SECURITY
165 void *security;
166#endif
99c55f7d
AS
167 enum bpf_map_type map_type;
168 u32 key_size;
169 u32 value_size;
170 u32 max_entries;
9330986c 171 u64 map_extra; /* any per-map-type extra fields */
6c905981 172 u32 map_flags;
d83525ca 173 int spin_lock_off; /* >=0 valid offset, <0 error */
b00628b1 174 int timer_off; /* >=0 valid offset, <0 error */
f3f1c054 175 u32 id;
96eabe7a 176 int numa_node;
9b2cf328
MKL
177 u32 btf_key_type_id;
178 u32 btf_value_type_id;
8845b468 179 u32 btf_vmlinux_value_type_id;
a26ca7c9 180 struct btf *btf;
48edc1f7
RG
181#ifdef CONFIG_MEMCG_KMEM
182 struct mem_cgroup *memcg;
183#endif
fc970227 184 char name[BPF_OBJ_NAME_LEN];
2c78ee89 185 bool bypass_spec_v1;
fc970227 186 bool frozen; /* write-once; write-protected by freeze_mutex */
8845b468 187 /* 14 bytes hole */
be95a845 188
a26ca7c9 189 /* The 3rd and 4th cacheline with misc members to avoid false sharing
be95a845
DB
190 * particularly with refcounting.
191 */
1e0bd5a0
AN
192 atomic64_t refcnt ____cacheline_aligned;
193 atomic64_t usercnt;
be95a845 194 struct work_struct work;
fc970227 195 struct mutex freeze_mutex;
353050be 196 atomic64_t writecnt;
f45d5b6c
THJ
197 /* 'Ownership' of program-containing map is claimed by the first program
198 * that is going to use this map or by the first program which FD is
199 * stored in the map to make sure that all callers and callees have the
200 * same prog type, JITed flag and xdp_has_frags flag.
201 */
202 struct {
203 spinlock_t lock;
204 enum bpf_prog_type type;
205 bool jited;
206 bool xdp_has_frags;
207 } owner;
99c55f7d
AS
208};
209
d83525ca
AS
210static inline bool map_value_has_spin_lock(const struct bpf_map *map)
211{
212 return map->spin_lock_off >= 0;
213}
214
68134668 215static inline bool map_value_has_timer(const struct bpf_map *map)
d83525ca 216{
68134668 217 return map->timer_off >= 0;
d83525ca
AS
218}
219
68134668
AS
220static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
221{
222 if (unlikely(map_value_has_spin_lock(map)))
5eaed6ee 223 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
68134668 224 if (unlikely(map_value_has_timer(map)))
5eaed6ee 225 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
68134668
AS
226}
227
228/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
d83525ca
AS
229static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
230{
68134668
AS
231 u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
232
d83525ca 233 if (unlikely(map_value_has_spin_lock(map))) {
68134668
AS
234 s_off = map->spin_lock_off;
235 s_sz = sizeof(struct bpf_spin_lock);
a8abb0c3
KKD
236 }
237 if (unlikely(map_value_has_timer(map))) {
68134668
AS
238 t_off = map->timer_off;
239 t_sz = sizeof(struct bpf_timer);
240 }
d83525ca 241
68134668
AS
242 if (unlikely(s_sz || t_sz)) {
243 if (s_off < t_off || !s_sz) {
244 swap(s_off, t_off);
245 swap(s_sz, t_sz);
246 }
247 memcpy(dst, src, t_off);
248 memcpy(dst + t_off + t_sz,
249 src + t_off + t_sz,
250 s_off - t_off - t_sz);
251 memcpy(dst + s_off + s_sz,
252 src + s_off + s_sz,
253 map->value_size - s_off - s_sz);
d83525ca
AS
254 } else {
255 memcpy(dst, src, map->value_size);
256 }
257}
96049f3a
AS
258void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
259 bool lock_src);
b00628b1 260void bpf_timer_cancel_and_free(void *timer);
8e7ae251 261int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
d83525ca 262
602144c2 263struct bpf_offload_dev;
a3884572
JK
264struct bpf_offloaded_map;
265
266struct bpf_map_dev_ops {
267 int (*map_get_next_key)(struct bpf_offloaded_map *map,
268 void *key, void *next_key);
269 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
270 void *key, void *value);
271 int (*map_update_elem)(struct bpf_offloaded_map *map,
272 void *key, void *value, u64 flags);
273 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
274};
275
276struct bpf_offloaded_map {
277 struct bpf_map map;
278 struct net_device *netdev;
279 const struct bpf_map_dev_ops *dev_ops;
280 void *dev_priv;
281 struct list_head offloads;
282};
283
284static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
285{
286 return container_of(map, struct bpf_offloaded_map, map);
287}
288
0cd3cbed
JK
289static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
290{
291 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
292}
293
a26ca7c9
MKL
294static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
295{
85d33df3
MKL
296 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
297 map->ops->map_seq_show_elem;
a26ca7c9
MKL
298}
299
e8d2bec0 300int map_check_no_btf(const struct bpf_map *map,
1b2b234b 301 const struct btf *btf,
e8d2bec0
DB
302 const struct btf_type *key_type,
303 const struct btf_type *value_type);
304
f4d05259
MKL
305bool bpf_map_meta_equal(const struct bpf_map *meta0,
306 const struct bpf_map *meta1);
307
a3884572
JK
308extern const struct bpf_map_ops bpf_map_offload_ops;
309
d639b9d1
HL
310/* bpf_type_flag contains a set of flags that are applicable to the values of
311 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
312 * or a memory is read-only. We classify types into two categories: base types
313 * and extended types. Extended types are base types combined with a type flag.
314 *
315 * Currently there are no more than 32 base types in arg_type, ret_type and
316 * reg_types.
317 */
318#define BPF_BASE_TYPE_BITS 8
319
320enum bpf_type_flag {
321 /* PTR may be NULL. */
322 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
323
216e3cd2
HL
324 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
325 * compatible with both mutable and immutable memory.
326 */
20b2aff4
HL
327 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
328
a672b2e3
DB
329 /* MEM was "allocated" from a different helper, and cannot be mixed
330 * with regular non-MEM_ALLOC'ed MEM types.
331 */
332 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
333
c6f1bfe8
YS
334 /* MEM is in user address space. */
335 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
336
5844101a
HL
337 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
338 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
339 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
340 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
341 * to the specified cpu.
342 */
343 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
344
345 __BPF_TYPE_LAST_FLAG = MEM_PERCPU,
d639b9d1
HL
346};
347
348/* Max number of base types. */
349#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
350
351/* Max number of all types. */
352#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
353
17a52670
AS
354/* function argument constraints */
355enum bpf_arg_type {
80f1d68c 356 ARG_DONTCARE = 0, /* unused argument in helper function */
17a52670
AS
357
358 /* the following constraints used to prototype
359 * bpf_map_lookup/update/delete_elem() functions
360 */
361 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
362 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
363 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
2ea864c5 364 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
17a52670
AS
365
366 /* the following constraints used to prototype bpf_memcmp() and other
367 * functions that access data on eBPF program stack
368 */
39f19ebb
AS
369 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
370 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
371 * helper function must fill all bytes or clear
372 * them in error case.
435faee1
DB
373 */
374
39f19ebb
AS
375 ARG_CONST_SIZE, /* number of bytes accessed from memory */
376 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
80f1d68c 377
608cd71a 378 ARG_PTR_TO_CTX, /* pointer to context */
80f1d68c 379 ARG_ANYTHING, /* any (initialized) argument is ok */
d83525ca 380 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
46f8bc92 381 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
57c3bb72
AI
382 ARG_PTR_TO_INT, /* pointer to int */
383 ARG_PTR_TO_LONG, /* pointer to long */
6ac99e8f 384 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
a7658e1a 385 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
457f4436 386 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
457f4436 387 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
1df8f55a 388 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
eaa6bcb7 389 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
69c087ba 390 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
48946bd6 391 ARG_PTR_TO_STACK, /* pointer to stack */
fff13c4b 392 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
b00628b1 393 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
f79e7ea5 394 __BPF_ARG_TYPE_MAX,
d639b9d1 395
48946bd6
HL
396 /* Extended arg_types. */
397 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
398 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
399 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
400 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
401 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
402 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
403
d639b9d1
HL
404 /* This must be the last entry. Its purpose is to ensure the enum is
405 * wide enough to hold the higher bits reserved for bpf_type_flag.
406 */
407 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 408};
d639b9d1 409static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670
AS
410
411/* type of values returned from helper functions */
412enum bpf_return_type {
413 RET_INTEGER, /* function returns integer */
414 RET_VOID, /* function doesn't return anything */
3e6a4b3e 415 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
3c480732
HL
416 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
417 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
418 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
419 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
63d9b80d 420 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
3ca1032a 421 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
d639b9d1
HL
422 __BPF_RET_TYPE_MAX,
423
3c480732
HL
424 /* Extended ret_types. */
425 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
426 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
427 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
428 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
a672b2e3 429 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
3c480732
HL
430 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
431
d639b9d1
HL
432 /* This must be the last entry. Its purpose is to ensure the enum is
433 * wide enough to hold the higher bits reserved for bpf_type_flag.
434 */
435 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 436};
d639b9d1 437static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670 438
09756af4
AS
439/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
440 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
441 * instructions after verifying
442 */
443struct bpf_func_proto {
444 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
445 bool gpl_only;
36bbef52 446 bool pkt_access;
17a52670 447 enum bpf_return_type ret_type;
a7658e1a
AS
448 union {
449 struct {
450 enum bpf_arg_type arg1_type;
451 enum bpf_arg_type arg2_type;
452 enum bpf_arg_type arg3_type;
453 enum bpf_arg_type arg4_type;
454 enum bpf_arg_type arg5_type;
455 };
456 enum bpf_arg_type arg_type[5];
457 };
9436ef6e
LB
458 union {
459 struct {
460 u32 *arg1_btf_id;
461 u32 *arg2_btf_id;
462 u32 *arg3_btf_id;
463 u32 *arg4_btf_id;
464 u32 *arg5_btf_id;
465 };
466 u32 *arg_btf_id[5];
467 };
af7ec138 468 int *ret_btf_id; /* return value btf_id */
eae2e83e 469 bool (*allowed)(const struct bpf_prog *prog);
17a52670
AS
470};
471
472/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
473 * the first argument to eBPF programs.
474 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
475 */
476struct bpf_context;
477
478enum bpf_access_type {
479 BPF_READ = 1,
480 BPF_WRITE = 2
09756af4
AS
481};
482
19de99f7 483/* types of values stored in eBPF registers */
f1174f77
EC
484/* Pointer types represent:
485 * pointer
486 * pointer + imm
487 * pointer + (u16) var
488 * pointer + (u16) var + imm
489 * if (range > 0) then [ptr, ptr + range - off) is safe to access
490 * if (id > 0) means that some 'var' was added
491 * if (off > 0) means that 'imm' was added
492 */
19de99f7
AS
493enum bpf_reg_type {
494 NOT_INIT = 0, /* nothing was written into register */
f1174f77 495 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
19de99f7
AS
496 PTR_TO_CTX, /* reg points to bpf_context */
497 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
498 PTR_TO_MAP_VALUE, /* reg points to map element value */
c25b2ae1 499 PTR_TO_MAP_KEY, /* reg points to a map element key */
f1174f77 500 PTR_TO_STACK, /* reg == frame_pointer + offset */
de8f3a83 501 PTR_TO_PACKET_META, /* skb->data - meta_len */
f1174f77 502 PTR_TO_PACKET, /* reg points to skb->data */
19de99f7 503 PTR_TO_PACKET_END, /* skb->data + headlen */
d58e468b 504 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
c64b7983 505 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
46f8bc92 506 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
655a51e5 507 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
9df1c28b 508 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
fada7fdc 509 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
ba5f4cfe
JF
510 /* PTR_TO_BTF_ID points to a kernel struct that does not need
511 * to be null checked by the BPF program. This does not imply the
512 * pointer is _not_ null and in practice this can easily be a null
513 * pointer when reading pointer chains. The assumption is program
514 * context will handle null pointer dereference typically via fault
515 * handling. The verifier must keep this in mind and can make no
516 * assumptions about null or non-null when doing branch analysis.
517 * Further, when passed into helpers the helpers can not, without
518 * additional context, assume the value is non-null.
519 */
520 PTR_TO_BTF_ID,
521 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
522 * been checked for null. Used primarily to inform the verifier
523 * an explicit null check is required for this struct.
524 */
457f4436 525 PTR_TO_MEM, /* reg points to valid memory region */
20b2aff4 526 PTR_TO_BUF, /* reg points to a read/write buffer */
69c087ba 527 PTR_TO_FUNC, /* reg points to a bpf program function */
e6ac2450 528 __BPF_REG_TYPE_MAX,
d639b9d1 529
c25b2ae1
HL
530 /* Extended reg_types. */
531 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
532 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
533 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
534 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
535 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
c25b2ae1 536
d639b9d1
HL
537 /* This must be the last entry. Its purpose is to ensure the enum is
538 * wide enough to hold the higher bits reserved for bpf_type_flag.
539 */
540 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
19de99f7 541};
d639b9d1 542static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
19de99f7 543
23994631
YS
544/* The information passed from prog-specific *_is_valid_access
545 * back to the verifier.
546 */
547struct bpf_insn_access_aux {
548 enum bpf_reg_type reg_type;
9e15db66
AS
549 union {
550 int ctx_field_size;
22dc4a0f
AN
551 struct {
552 struct btf *btf;
553 u32 btf_id;
554 };
9e15db66
AS
555 };
556 struct bpf_verifier_log *log; /* for verbose logs */
23994631
YS
557};
558
f96da094
DB
559static inline void
560bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
561{
562 aux->ctx_field_size = size;
563}
564
3990ed4c
MKL
565static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
566{
567 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
568 insn->src_reg == BPF_PSEUDO_FUNC;
569}
570
7de16e3a
JK
571struct bpf_prog_ops {
572 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
573 union bpf_attr __user *uattr);
574};
575
09756af4
AS
576struct bpf_verifier_ops {
577 /* return eBPF function prototype for verification */
5e43f899
AI
578 const struct bpf_func_proto *
579 (*get_func_proto)(enum bpf_func_id func_id,
580 const struct bpf_prog *prog);
17a52670
AS
581
582 /* return true if 'size' wide access at offset 'off' within bpf_context
583 * with 'type' (read or write) is allowed
584 */
19de99f7 585 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
5e43f899 586 const struct bpf_prog *prog,
23994631 587 struct bpf_insn_access_aux *info);
36bbef52
DB
588 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
589 const struct bpf_prog *prog);
e0cea7ce
DB
590 int (*gen_ld_abs)(const struct bpf_insn *orig,
591 struct bpf_insn *insn_buf);
6b8cc1d1
DB
592 u32 (*convert_ctx_access)(enum bpf_access_type type,
593 const struct bpf_insn *src,
594 struct bpf_insn *dst,
f96da094 595 struct bpf_prog *prog, u32 *target_size);
27ae7997 596 int (*btf_struct_access)(struct bpf_verifier_log *log,
22dc4a0f 597 const struct btf *btf,
27ae7997
MKL
598 const struct btf_type *t, int off, int size,
599 enum bpf_access_type atype,
c6f1bfe8 600 u32 *next_btf_id, enum bpf_type_flag *flag);
09756af4
AS
601};
602
cae1927c 603struct bpf_prog_offload_ops {
08ca90af 604 /* verifier basic callbacks */
cae1927c
JK
605 int (*insn_hook)(struct bpf_verifier_env *env,
606 int insn_idx, int prev_insn_idx);
c941ce9c 607 int (*finalize)(struct bpf_verifier_env *env);
08ca90af
JK
608 /* verifier optimization callbacks (called after .finalize) */
609 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
610 struct bpf_insn *insn);
611 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
612 /* program management callbacks */
16a8cb5c
QM
613 int (*prepare)(struct bpf_prog *prog);
614 int (*translate)(struct bpf_prog *prog);
eb911947 615 void (*destroy)(struct bpf_prog *prog);
cae1927c
JK
616};
617
0a9c1991 618struct bpf_prog_offload {
ab3f0063
JK
619 struct bpf_prog *prog;
620 struct net_device *netdev;
341b3e7b 621 struct bpf_offload_dev *offdev;
ab3f0063
JK
622 void *dev_priv;
623 struct list_head offloads;
624 bool dev_state;
08ca90af 625 bool opt_failed;
fcfb126d
JW
626 void *jited_image;
627 u32 jited_len;
ab3f0063
JK
628};
629
8bad74f9
RG
630enum bpf_cgroup_storage_type {
631 BPF_CGROUP_STORAGE_SHARED,
b741f163 632 BPF_CGROUP_STORAGE_PERCPU,
8bad74f9
RG
633 __BPF_CGROUP_STORAGE_MAX
634};
635
636#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
637
f1b9509c
AS
638/* The longest tracepoint has 12 args.
639 * See include/trace/bpf_probe.h
640 */
641#define MAX_BPF_FUNC_ARGS 12
642
523a4cf4
DB
643/* The maximum number of arguments passed through registers
644 * a single function may have.
645 */
646#define MAX_BPF_FUNC_REG_ARGS 5
647
fec56f58
AS
648struct btf_func_model {
649 u8 ret_size;
650 u8 nr_args;
651 u8 arg_size[MAX_BPF_FUNC_ARGS];
652};
653
654/* Restore arguments before returning from trampoline to let original function
655 * continue executing. This flag is used for fentry progs when there are no
656 * fexit progs.
657 */
658#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
659/* Call original function after fentry progs, but before fexit progs.
660 * Makes sense for fentry/fexit, normal calls and indirect calls.
661 */
662#define BPF_TRAMP_F_CALL_ORIG BIT(1)
663/* Skip current frame and return to parent. Makes sense for fentry/fexit
664 * programs only. Should not be used with normal calls and indirect calls.
665 */
666#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
7e6f3cd8
JO
667/* Store IP address of the caller on the trampoline stack,
668 * so it's available for trampoline's programs.
669 */
670#define BPF_TRAMP_F_IP_ARG BIT(3)
356ed649
HT
671/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
672#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
7e6f3cd8 673
88fd9e53
KS
674/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
675 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
676 */
ca06f55b 677#define BPF_MAX_TRAMP_PROGS 38
88fd9e53
KS
678
679struct bpf_tramp_progs {
680 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
681 int nr_progs;
682};
683
fec56f58
AS
684/* Different use cases for BPF trampoline:
685 * 1. replace nop at the function entry (kprobe equivalent)
686 * flags = BPF_TRAMP_F_RESTORE_REGS
687 * fentry = a set of programs to run before returning from trampoline
688 *
689 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
690 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
691 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
692 * fentry = a set of program to run before calling original function
693 * fexit = a set of program to run after original function
694 *
695 * 3. replace direct call instruction anywhere in the function body
696 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
697 * With flags = 0
698 * fentry = a set of programs to run before returning from trampoline
699 * With flags = BPF_TRAMP_F_CALL_ORIG
700 * orig_call = original callback addr or direct function addr
701 * fentry = a set of program to run before calling original function
702 * fexit = a set of program to run after original function
703 */
e21aa341
AS
704struct bpf_tramp_image;
705int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
85d33df3 706 const struct btf_func_model *m, u32 flags,
88fd9e53 707 struct bpf_tramp_progs *tprogs,
fec56f58
AS
708 void *orig_call);
709/* these two functions are called from generated trampoline */
ca06f55b 710u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
fec56f58 711void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
ca06f55b 712u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
f2dd3b39 713void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
e21aa341
AS
714void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
715void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
fec56f58 716
535911c8
JO
717struct bpf_ksym {
718 unsigned long start;
719 unsigned long end;
bfea9a85 720 char name[KSYM_NAME_LEN];
ecb60d1c 721 struct list_head lnode;
ca4424c9 722 struct latch_tree_node tnode;
cbd76f8d 723 bool prog;
535911c8
JO
724};
725
fec56f58
AS
726enum bpf_tramp_prog_type {
727 BPF_TRAMP_FENTRY,
728 BPF_TRAMP_FEXIT,
ae240823 729 BPF_TRAMP_MODIFY_RETURN,
be8704ff
AS
730 BPF_TRAMP_MAX,
731 BPF_TRAMP_REPLACE, /* more than MAX */
fec56f58
AS
732};
733
e21aa341
AS
734struct bpf_tramp_image {
735 void *image;
736 struct bpf_ksym ksym;
737 struct percpu_ref pcref;
738 void *ip_after_call;
739 void *ip_epilogue;
740 union {
741 struct rcu_head rcu;
742 struct work_struct work;
743 };
744};
745
fec56f58
AS
746struct bpf_trampoline {
747 /* hlist for trampoline_table */
748 struct hlist_node hlist;
749 /* serializes access to fields of this trampoline */
750 struct mutex mutex;
751 refcount_t refcnt;
752 u64 key;
753 struct {
754 struct btf_func_model model;
755 void *addr;
b91e014f 756 bool ftrace_managed;
fec56f58 757 } func;
be8704ff
AS
758 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
759 * program by replacing one of its functions. func.addr is the address
760 * of the function it replaced.
761 */
762 struct bpf_prog *extension_prog;
fec56f58
AS
763 /* list of BPF programs using this trampoline */
764 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
765 /* Number of attached programs. A counter per kind. */
766 int progs_cnt[BPF_TRAMP_MAX];
767 /* Executable image of trampoline */
e21aa341 768 struct bpf_tramp_image *cur_image;
fec56f58 769 u64 selector;
861de02e 770 struct module *mod;
fec56f58 771};
75ccbef6 772
f7b12b6f
THJ
773struct bpf_attach_target_info {
774 struct btf_func_model fmodel;
775 long tgt_addr;
776 const char *tgt_name;
777 const struct btf_type *tgt_type;
778};
779
116eb788 780#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
75ccbef6
BT
781
782struct bpf_dispatcher_prog {
783 struct bpf_prog *prog;
784 refcount_t users;
785};
786
787struct bpf_dispatcher {
788 /* dispatcher mutex */
789 struct mutex mutex;
790 void *func;
791 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
792 int num_progs;
793 void *image;
794 u32 image_off;
517b75e4 795 struct bpf_ksym ksym;
75ccbef6
BT
796};
797
9f5b4009 798static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
7e6897f9
BT
799 const void *ctx,
800 const struct bpf_insn *insnsi,
801 unsigned int (*bpf_func)(const void *,
802 const struct bpf_insn *))
803{
804 return bpf_func(ctx, insnsi);
805}
fec56f58 806#ifdef CONFIG_BPF_JIT
3aac1ead
THJ
807int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
808int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
f7b12b6f
THJ
809struct bpf_trampoline *bpf_trampoline_get(u64 key,
810 struct bpf_attach_target_info *tgt_info);
fec56f58 811void bpf_trampoline_put(struct bpf_trampoline *tr);
f45b2974 812int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
517b75e4
JO
813#define BPF_DISPATCHER_INIT(_name) { \
814 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
815 .func = &_name##_func, \
816 .progs = {}, \
817 .num_progs = 0, \
818 .image = NULL, \
819 .image_off = 0, \
820 .ksym = { \
821 .name = #_name, \
822 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
823 }, \
75ccbef6
BT
824}
825
826#define DEFINE_BPF_DISPATCHER(name) \
9f5b4009 827 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
828 const void *ctx, \
829 const struct bpf_insn *insnsi, \
830 unsigned int (*bpf_func)(const void *, \
831 const struct bpf_insn *)) \
832 { \
833 return bpf_func(ctx, insnsi); \
834 } \
6a64037d
BT
835 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
836 struct bpf_dispatcher bpf_dispatcher_##name = \
837 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
75ccbef6 838#define DECLARE_BPF_DISPATCHER(name) \
6a64037d 839 unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
840 const void *ctx, \
841 const struct bpf_insn *insnsi, \
842 unsigned int (*bpf_func)(const void *, \
843 const struct bpf_insn *)); \
6a64037d
BT
844 extern struct bpf_dispatcher bpf_dispatcher_##name;
845#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
846#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
75ccbef6
BT
847void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
848 struct bpf_prog *to);
dba122fb 849/* Called only from JIT-enabled code, so there's no need for stubs. */
7ac88eba 850void *bpf_jit_alloc_exec_page(void);
a108f7dc
JO
851void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
852void bpf_image_ksym_del(struct bpf_ksym *ksym);
dba122fb
JO
853void bpf_ksym_add(struct bpf_ksym *ksym);
854void bpf_ksym_del(struct bpf_ksym *ksym);
3486bedd
SL
855int bpf_jit_charge_modmem(u32 size);
856void bpf_jit_uncharge_modmem(u32 size);
f92c1e18 857bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
fec56f58 858#else
3aac1ead
THJ
859static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
860 struct bpf_trampoline *tr)
fec56f58
AS
861{
862 return -ENOTSUPP;
863}
3aac1ead
THJ
864static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
865 struct bpf_trampoline *tr)
fec56f58
AS
866{
867 return -ENOTSUPP;
868}
f7b12b6f
THJ
869static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
870 struct bpf_attach_target_info *tgt_info)
871{
872 return ERR_PTR(-EOPNOTSUPP);
873}
fec56f58 874static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
75ccbef6
BT
875#define DEFINE_BPF_DISPATCHER(name)
876#define DECLARE_BPF_DISPATCHER(name)
6a64037d 877#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
75ccbef6
BT
878#define BPF_DISPATCHER_PTR(name) NULL
879static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
880 struct bpf_prog *from,
881 struct bpf_prog *to) {}
e9b4e606
JO
882static inline bool is_bpf_image_address(unsigned long address)
883{
884 return false;
885}
f92c1e18
JO
886static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
887{
888 return false;
889}
fec56f58
AS
890#endif
891
8c1b6e69 892struct bpf_func_info_aux {
51c39bb1 893 u16 linkage;
8c1b6e69
AS
894 bool unreliable;
895};
896
a66886fe
DB
897enum bpf_jit_poke_reason {
898 BPF_POKE_REASON_TAIL_CALL,
899};
900
901/* Descriptor of pokes pointing /into/ the JITed image. */
902struct bpf_jit_poke_descriptor {
cf71b174 903 void *tailcall_target;
ebf7d1f5
MF
904 void *tailcall_bypass;
905 void *bypass_addr;
f263a814 906 void *aux;
a66886fe
DB
907 union {
908 struct {
909 struct bpf_map *map;
910 u32 key;
911 } tail_call;
912 };
cf71b174 913 bool tailcall_target_stable;
a66886fe
DB
914 u8 adj_off;
915 u16 reason;
a748c697 916 u32 insn_idx;
a66886fe
DB
917};
918
3c32cc1b
YS
919/* reg_type info for ctx arguments */
920struct bpf_ctx_arg_aux {
921 u32 offset;
922 enum bpf_reg_type reg_type;
951cf368 923 u32 btf_id;
3c32cc1b
YS
924};
925
541c3bad
AN
926struct btf_mod_pair {
927 struct btf *btf;
928 struct module *module;
929};
930
e6ac2450
MKL
931struct bpf_kfunc_desc_tab;
932
09756af4 933struct bpf_prog_aux {
85192dbf 934 atomic64_t refcnt;
24701ece 935 u32 used_map_cnt;
541c3bad 936 u32 used_btf_cnt;
32bbe007 937 u32 max_ctx_offset;
e647815a 938 u32 max_pkt_offset;
9df1c28b 939 u32 max_tp_access;
8726679a 940 u32 stack_depth;
dc4bb0e2 941 u32 id;
ba64e7d8
YS
942 u32 func_cnt; /* used by non-func prog as the number of func progs */
943 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
ccfe29eb 944 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
3c32cc1b 945 u32 ctx_arg_info_size;
afbf21dc
YS
946 u32 max_rdonly_access;
947 u32 max_rdwr_access;
22dc4a0f 948 struct btf *attach_btf;
3c32cc1b 949 const struct bpf_ctx_arg_aux *ctx_arg_info;
3aac1ead
THJ
950 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
951 struct bpf_prog *dst_prog;
952 struct bpf_trampoline *dst_trampoline;
4a1e7c0c
THJ
953 enum bpf_prog_type saved_dst_prog_type;
954 enum bpf_attach_type saved_dst_attach_type;
a4b1d3c1 955 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
9a18eedb 956 bool offload_requested;
38207291 957 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
8c1b6e69 958 bool func_proto_unreliable;
1e6c62a8 959 bool sleepable;
ebf7d1f5 960 bool tail_call_reachable;
c2f2cdbe 961 bool xdp_has_frags;
33c98058 962 bool use_bpf_prog_pack;
fec56f58 963 struct hlist_node tramp_hlist;
38207291
MKL
964 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
965 const struct btf_type *attach_func_proto;
966 /* function name for valid attach_btf_id */
967 const char *attach_func_name;
1c2a088a
AS
968 struct bpf_prog **func;
969 void *jit_data; /* JIT specific data. arch dependent */
a66886fe 970 struct bpf_jit_poke_descriptor *poke_tab;
e6ac2450 971 struct bpf_kfunc_desc_tab *kfunc_tab;
2357672c 972 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
a66886fe 973 u32 size_poke_tab;
535911c8 974 struct bpf_ksym ksym;
7de16e3a 975 const struct bpf_prog_ops *ops;
09756af4 976 struct bpf_map **used_maps;
984fe94f 977 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
541c3bad 978 struct btf_mod_pair *used_btfs;
09756af4 979 struct bpf_prog *prog;
aaac3ba9 980 struct user_struct *user;
cb4d2b3f 981 u64 load_time; /* ns since boottime */
aba64c7d 982 u32 verified_insns;
8bad74f9 983 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
067cae47 984 char name[BPF_OBJ_NAME_LEN];
afdb09c7
CF
985#ifdef CONFIG_SECURITY
986 void *security;
987#endif
0a9c1991 988 struct bpf_prog_offload *offload;
838e9690 989 struct btf *btf;
ba64e7d8 990 struct bpf_func_info *func_info;
8c1b6e69 991 struct bpf_func_info_aux *func_info_aux;
c454a46b
MKL
992 /* bpf_line_info loaded from userspace. linfo->insn_off
993 * has the xlated insn offset.
994 * Both the main and sub prog share the same linfo.
995 * The subprog can access its first linfo by
996 * using the linfo_idx.
997 */
998 struct bpf_line_info *linfo;
999 /* jited_linfo is the jited addr of the linfo. It has a
1000 * one to one mapping to linfo:
1001 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1002 * Both the main and sub prog share the same jited_linfo.
1003 * The subprog can access its first jited_linfo by
1004 * using the linfo_idx.
1005 */
1006 void **jited_linfo;
ba64e7d8 1007 u32 func_info_cnt;
c454a46b
MKL
1008 u32 nr_linfo;
1009 /* subprog can use linfo_idx to access its first linfo and
1010 * jited_linfo.
1011 * main prog always has linfo_idx == 0
1012 */
1013 u32 linfo_idx;
3dec541b
AS
1014 u32 num_exentries;
1015 struct exception_table_entry *extable;
abf2e7d6
AS
1016 union {
1017 struct work_struct work;
1018 struct rcu_head rcu;
1019 };
09756af4
AS
1020};
1021
2beee5f5 1022struct bpf_array_aux {
da765a2f
DB
1023 /* Programs with direct jumps into programs part of this array. */
1024 struct list_head poke_progs;
1025 struct bpf_map *map;
1026 struct mutex poke_mutex;
1027 struct work_struct work;
2beee5f5
DB
1028};
1029
6cc7d1e8
AN
1030struct bpf_link {
1031 atomic64_t refcnt;
1032 u32 id;
1033 enum bpf_link_type type;
1034 const struct bpf_link_ops *ops;
1035 struct bpf_prog *prog;
1036 struct work_struct work;
1037};
1038
1039struct bpf_link_ops {
1040 void (*release)(struct bpf_link *link);
1041 void (*dealloc)(struct bpf_link *link);
73b11c2a 1042 int (*detach)(struct bpf_link *link);
6cc7d1e8
AN
1043 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1044 struct bpf_prog *old_prog);
1045 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1046 int (*fill_link_info)(const struct bpf_link *link,
1047 struct bpf_link_info *info);
1048};
1049
1050struct bpf_link_primer {
1051 struct bpf_link *link;
1052 struct file *file;
1053 int fd;
1054 u32 id;
1055};
1056
85d33df3 1057struct bpf_struct_ops_value;
27ae7997
MKL
1058struct btf_member;
1059
1060#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1061struct bpf_struct_ops {
1062 const struct bpf_verifier_ops *verifier_ops;
1063 int (*init)(struct btf *btf);
1064 int (*check_member)(const struct btf_type *t,
1065 const struct btf_member *member);
85d33df3
MKL
1066 int (*init_member)(const struct btf_type *t,
1067 const struct btf_member *member,
1068 void *kdata, const void *udata);
1069 int (*reg)(void *kdata);
1070 void (*unreg)(void *kdata);
27ae7997 1071 const struct btf_type *type;
85d33df3 1072 const struct btf_type *value_type;
27ae7997
MKL
1073 const char *name;
1074 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1075 u32 type_id;
85d33df3 1076 u32 value_id;
27ae7997
MKL
1077};
1078
1079#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
85d33df3 1080#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
27ae7997 1081const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
d3e42bb0 1082void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
85d33df3
MKL
1083bool bpf_struct_ops_get(const void *kdata);
1084void bpf_struct_ops_put(const void *kdata);
1085int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1086 void *value);
31a645ae
HT
1087int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
1088 struct bpf_prog *prog,
1089 const struct btf_func_model *model,
1090 void *image, void *image_end);
85d33df3
MKL
1091static inline bool bpf_try_module_get(const void *data, struct module *owner)
1092{
1093 if (owner == BPF_MODULE_OWNER)
1094 return bpf_struct_ops_get(data);
1095 else
1096 return try_module_get(owner);
1097}
1098static inline void bpf_module_put(const void *data, struct module *owner)
1099{
1100 if (owner == BPF_MODULE_OWNER)
1101 bpf_struct_ops_put(data);
1102 else
1103 module_put(owner);
1104}
c196906d
HT
1105
1106#ifdef CONFIG_NET
1107/* Define it here to avoid the use of forward declaration */
1108struct bpf_dummy_ops_state {
1109 int val;
1110};
1111
1112struct bpf_dummy_ops {
1113 int (*test_1)(struct bpf_dummy_ops_state *cb);
1114 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1115 char a3, unsigned long a4);
1116};
1117
1118int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1119 union bpf_attr __user *uattr);
1120#endif
27ae7997
MKL
1121#else
1122static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1123{
1124 return NULL;
1125}
d3e42bb0
MKL
1126static inline void bpf_struct_ops_init(struct btf *btf,
1127 struct bpf_verifier_log *log)
1128{
1129}
85d33df3
MKL
1130static inline bool bpf_try_module_get(const void *data, struct module *owner)
1131{
1132 return try_module_get(owner);
1133}
1134static inline void bpf_module_put(const void *data, struct module *owner)
1135{
1136 module_put(owner);
1137}
1138static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1139 void *key,
1140 void *value)
1141{
1142 return -EINVAL;
1143}
27ae7997
MKL
1144#endif
1145
04fd61ab
AS
1146struct bpf_array {
1147 struct bpf_map map;
1148 u32 elem_size;
b2157399 1149 u32 index_mask;
2beee5f5 1150 struct bpf_array_aux *aux;
04fd61ab
AS
1151 union {
1152 char value[0] __aligned(8);
2a36f0b9 1153 void *ptrs[0] __aligned(8);
a10423b8 1154 void __percpu *pptrs[0] __aligned(8);
04fd61ab
AS
1155 };
1156};
3b1efb19 1157
c04c0d2b 1158#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
ebf7f6f0 1159#define MAX_TAIL_CALL_CNT 33
04fd61ab 1160
591fe988
DB
1161#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1162 BPF_F_RDONLY_PROG | \
1163 BPF_F_WRONLY | \
1164 BPF_F_WRONLY_PROG)
1165
1166#define BPF_MAP_CAN_READ BIT(0)
1167#define BPF_MAP_CAN_WRITE BIT(1)
1168
1169static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1170{
1171 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1172
1173 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1174 * not possible.
1175 */
1176 if (access_flags & BPF_F_RDONLY_PROG)
1177 return BPF_MAP_CAN_READ;
1178 else if (access_flags & BPF_F_WRONLY_PROG)
1179 return BPF_MAP_CAN_WRITE;
1180 else
1181 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1182}
1183
1184static inline bool bpf_map_flags_access_ok(u32 access_flags)
1185{
1186 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1187 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1188}
1189
3b1efb19
DB
1190struct bpf_event_entry {
1191 struct perf_event *event;
1192 struct file *perf_file;
1193 struct file *map_file;
1194 struct rcu_head rcu;
1195};
1196
f45d5b6c
THJ
1197static inline bool map_type_contains_progs(struct bpf_map *map)
1198{
1199 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1200 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1201 map->map_type == BPF_MAP_TYPE_CPUMAP;
1202}
1203
1204bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
f1f7714e 1205int bpf_prog_calc_tag(struct bpf_prog *fp);
bd570ff9 1206
0756ea3e 1207const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
10aceb62 1208const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
555c8a86
DB
1209
1210typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
aa7145c1 1211 unsigned long off, unsigned long len);
c64b7983
JS
1212typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1213 const struct bpf_insn *src,
1214 struct bpf_insn *dst,
1215 struct bpf_prog *prog,
1216 u32 *target_size);
555c8a86
DB
1217
1218u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1219 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
04fd61ab 1220
324bda9e
AS
1221/* an array of programs to be executed under rcu_lock.
1222 *
1223 * Typical usage:
fb7dd8bc 1224 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
324bda9e
AS
1225 *
1226 * the structure returned by bpf_prog_array_alloc() should be populated
1227 * with program pointers and the last pointer must be NULL.
1228 * The user has to keep refcnt on the program and make sure the program
1229 * is removed from the array before bpf_prog_put().
1230 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1231 * since other cpus are walking the array of pointers in parallel.
1232 */
394e40a2
RG
1233struct bpf_prog_array_item {
1234 struct bpf_prog *prog;
82e6b1ee
AN
1235 union {
1236 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1237 u64 bpf_cookie;
1238 };
394e40a2
RG
1239};
1240
324bda9e
AS
1241struct bpf_prog_array {
1242 struct rcu_head rcu;
d7f10df8 1243 struct bpf_prog_array_item items[];
324bda9e
AS
1244};
1245
46531a30
PB
1246struct bpf_empty_prog_array {
1247 struct bpf_prog_array hdr;
1248 struct bpf_prog *null_prog;
1249};
1250
1251/* to avoid allocating empty bpf_prog_array for cgroups that
1252 * don't have bpf program attached use one global 'bpf_empty_prog_array'
1253 * It will not be modified the caller of bpf_prog_array_alloc()
1254 * (since caller requested prog_cnt == 0)
1255 * that pointer should be 'freed' by bpf_prog_array_free()
1256 */
1257extern struct bpf_empty_prog_array bpf_empty_prog_array;
1258
d29ab6e1 1259struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
54e9c9d4
SF
1260void bpf_prog_array_free(struct bpf_prog_array *progs);
1261int bpf_prog_array_length(struct bpf_prog_array *progs);
0d01da6a 1262bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
54e9c9d4 1263int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
468e2f64 1264 __u32 __user *prog_ids, u32 cnt);
324bda9e 1265
54e9c9d4 1266void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
e87c6bc3 1267 struct bpf_prog *old_prog);
ce3aa9cc
JS
1268int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1269int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1270 struct bpf_prog *prog);
54e9c9d4 1271int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
1272 u32 *prog_ids, u32 request_cnt,
1273 u32 *prog_cnt);
54e9c9d4 1274int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1275 struct bpf_prog *exclude_prog,
1276 struct bpf_prog *include_prog,
82e6b1ee 1277 u64 bpf_cookie,
e87c6bc3
YS
1278 struct bpf_prog_array **new_array);
1279
c7603cfa
AN
1280struct bpf_run_ctx {};
1281
1282struct bpf_cg_run_ctx {
1283 struct bpf_run_ctx run_ctx;
7d08c2c9 1284 const struct bpf_prog_array_item *prog_item;
c4dcfdd4 1285 int retval;
c7603cfa
AN
1286};
1287
82e6b1ee
AN
1288struct bpf_trace_run_ctx {
1289 struct bpf_run_ctx run_ctx;
1290 u64 bpf_cookie;
1291};
1292
7d08c2c9
AN
1293static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1294{
1295 struct bpf_run_ctx *old_ctx = NULL;
1296
1297#ifdef CONFIG_BPF_SYSCALL
1298 old_ctx = current->bpf_ctx;
1299 current->bpf_ctx = new_ctx;
1300#endif
1301 return old_ctx;
1302}
1303
1304static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1305{
1306#ifdef CONFIG_BPF_SYSCALL
1307 current->bpf_ctx = old_ctx;
1308#endif
1309}
1310
77241217
SF
1311/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1312#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1313/* BPF program asks to set CN on the packet. */
1314#define BPF_RET_SET_CN (1 << 0)
1315
7d08c2c9
AN
1316typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1317
f10d0596 1318static __always_inline int
7d08c2c9
AN
1319BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
1320 const void *ctx, bpf_prog_run_fn run_prog,
c4dcfdd4 1321 int retval, u32 *ret_flags)
7d08c2c9
AN
1322{
1323 const struct bpf_prog_array_item *item;
1324 const struct bpf_prog *prog;
1325 const struct bpf_prog_array *array;
1326 struct bpf_run_ctx *old_run_ctx;
1327 struct bpf_cg_run_ctx run_ctx;
7d08c2c9
AN
1328 u32 func_ret;
1329
c4dcfdd4 1330 run_ctx.retval = retval;
7d08c2c9
AN
1331 migrate_disable();
1332 rcu_read_lock();
1333 array = rcu_dereference(array_rcu);
1334 item = &array->items[0];
1335 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1336 while ((prog = READ_ONCE(item->prog))) {
1337 run_ctx.prog_item = item;
1338 func_ret = run_prog(prog, ctx);
b44123b4 1339 if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval))
c4dcfdd4 1340 run_ctx.retval = -EPERM;
7d08c2c9
AN
1341 *(ret_flags) |= (func_ret >> 1);
1342 item++;
1343 }
1344 bpf_reset_run_ctx(old_run_ctx);
1345 rcu_read_unlock();
1346 migrate_enable();
c4dcfdd4 1347 return run_ctx.retval;
7d08c2c9
AN
1348}
1349
f10d0596 1350static __always_inline int
7d08c2c9 1351BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
c4dcfdd4
YZ
1352 const void *ctx, bpf_prog_run_fn run_prog,
1353 int retval)
7d08c2c9
AN
1354{
1355 const struct bpf_prog_array_item *item;
1356 const struct bpf_prog *prog;
1357 const struct bpf_prog_array *array;
1358 struct bpf_run_ctx *old_run_ctx;
1359 struct bpf_cg_run_ctx run_ctx;
7d08c2c9 1360
c4dcfdd4 1361 run_ctx.retval = retval;
7d08c2c9
AN
1362 migrate_disable();
1363 rcu_read_lock();
1364 array = rcu_dereference(array_rcu);
1365 item = &array->items[0];
1366 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1367 while ((prog = READ_ONCE(item->prog))) {
1368 run_ctx.prog_item = item;
b44123b4 1369 if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval))
c4dcfdd4 1370 run_ctx.retval = -EPERM;
7d08c2c9
AN
1371 item++;
1372 }
1373 bpf_reset_run_ctx(old_run_ctx);
1374 rcu_read_unlock();
1375 migrate_enable();
c4dcfdd4 1376 return run_ctx.retval;
7d08c2c9
AN
1377}
1378
1379static __always_inline u32
1380BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
1381 const void *ctx, bpf_prog_run_fn run_prog)
1382{
1383 const struct bpf_prog_array_item *item;
1384 const struct bpf_prog *prog;
1385 const struct bpf_prog_array *array;
82e6b1ee
AN
1386 struct bpf_run_ctx *old_run_ctx;
1387 struct bpf_trace_run_ctx run_ctx;
7d08c2c9
AN
1388 u32 ret = 1;
1389
1390 migrate_disable();
1391 rcu_read_lock();
1392 array = rcu_dereference(array_rcu);
1393 if (unlikely(!array))
1394 goto out;
82e6b1ee 1395 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
7d08c2c9
AN
1396 item = &array->items[0];
1397 while ((prog = READ_ONCE(item->prog))) {
82e6b1ee 1398 run_ctx.bpf_cookie = item->bpf_cookie;
7d08c2c9
AN
1399 ret &= run_prog(prog, ctx);
1400 item++;
1401 }
82e6b1ee 1402 bpf_reset_run_ctx(old_run_ctx);
7d08c2c9
AN
1403out:
1404 rcu_read_unlock();
1405 migrate_enable();
1406 return ret;
1407}
324bda9e 1408
1f52f6c0 1409/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
1410 * so BPF programs can request cwr for TCP packets.
1411 *
1412 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
1413 * packet. This macro changes the behavior so the low order bit
1414 * indicates whether the packet should be dropped (0) or not (1)
1415 * and the next bit is a congestion notification bit. This could be
1416 * used by TCP to call tcp_enter_cwr()
1417 *
1418 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
1419 * 0: drop packet
1420 * 1: keep packet
1421 * 2: drop packet and cn
1422 * 3: keep packet and cn
1423 *
1424 * This macro then converts it to one of the NET_XMIT or an error
1425 * code that is then interpreted as drop packet (and no cn):
1426 * 0: NET_XMIT_SUCCESS skb should be transmitted
1427 * 1: NET_XMIT_DROP skb should be dropped and cn
1428 * 2: NET_XMIT_CN skb should be transmitted and cn
b44123b4 1429 * 3: -err skb should be dropped
1f52f6c0 1430 */
1431#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
1432 ({ \
77241217
SF
1433 u32 _flags = 0; \
1434 bool _cn; \
1435 u32 _ret; \
c4dcfdd4 1436 _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \
77241217 1437 _cn = _flags & BPF_RET_SET_CN; \
b44123b4
YZ
1438 if (_ret && !IS_ERR_VALUE((long)_ret)) \
1439 _ret = -EFAULT; \
f10d0596 1440 if (!_ret) \
1f52f6c0 1441 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
1442 else \
b44123b4 1443 _ret = (_cn ? NET_XMIT_DROP : _ret); \
1f52f6c0 1444 _ret; \
1445 })
1446
89aa0758 1447#ifdef CONFIG_BPF_SYSCALL
b121d1e7 1448DECLARE_PER_CPU(int, bpf_prog_active);
d46edd67 1449extern struct mutex bpf_stats_enabled_mutex;
b121d1e7 1450
c518cfa0
TG
1451/*
1452 * Block execution of BPF programs attached to instrumentation (perf,
1453 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1454 * these events can happen inside a region which holds a map bucket lock
1455 * and can deadlock on it.
c518cfa0
TG
1456 */
1457static inline void bpf_disable_instrumentation(void)
1458{
1459 migrate_disable();
79364031 1460 this_cpu_inc(bpf_prog_active);
c518cfa0
TG
1461}
1462
1463static inline void bpf_enable_instrumentation(void)
1464{
79364031 1465 this_cpu_dec(bpf_prog_active);
c518cfa0
TG
1466 migrate_enable();
1467}
1468
f66e448c
CF
1469extern const struct file_operations bpf_map_fops;
1470extern const struct file_operations bpf_prog_fops;
367ec3e4 1471extern const struct file_operations bpf_iter_fops;
f66e448c 1472
91cc1a99 1473#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
7de16e3a
JK
1474 extern const struct bpf_prog_ops _name ## _prog_ops; \
1475 extern const struct bpf_verifier_ops _name ## _verifier_ops;
40077e0c
JB
1476#define BPF_MAP_TYPE(_id, _ops) \
1477 extern const struct bpf_map_ops _ops;
f2e10bff 1478#define BPF_LINK_TYPE(_id, _name)
be9370a7
JB
1479#include <linux/bpf_types.h>
1480#undef BPF_PROG_TYPE
40077e0c 1481#undef BPF_MAP_TYPE
f2e10bff 1482#undef BPF_LINK_TYPE
0fc174de 1483
ab3f0063 1484extern const struct bpf_prog_ops bpf_offload_prog_ops;
4f9218aa
JK
1485extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1486extern const struct bpf_verifier_ops xdp_analyzer_ops;
1487
0fc174de 1488struct bpf_prog *bpf_prog_get(u32 ufd);
248f346f 1489struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1490 bool attach_drv);
85192dbf 1491void bpf_prog_add(struct bpf_prog *prog, int i);
c540594f 1492void bpf_prog_sub(struct bpf_prog *prog, int i);
85192dbf 1493void bpf_prog_inc(struct bpf_prog *prog);
a6f6df69 1494struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
61e021f3
DB
1495void bpf_prog_put(struct bpf_prog *prog);
1496
ad8ad79f 1497void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
a3884572 1498void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
ad8ad79f 1499
1ed4d924 1500struct bpf_map *bpf_map_get(u32 ufd);
c9da161c 1501struct bpf_map *bpf_map_get_with_uref(u32 ufd);
c2101297 1502struct bpf_map *__bpf_map_get(struct fd f);
1e0bd5a0
AN
1503void bpf_map_inc(struct bpf_map *map);
1504void bpf_map_inc_with_uref(struct bpf_map *map);
1505struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
c9da161c 1506void bpf_map_put_with_uref(struct bpf_map *map);
61e021f3 1507void bpf_map_put(struct bpf_map *map);
196e8ca7
DB
1508void *bpf_map_area_alloc(u64 size, int numa_node);
1509void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
d407bd25 1510void bpf_map_area_free(void *base);
353050be 1511bool bpf_map_write_active(const struct bpf_map *map);
bd475643 1512void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
cb4d03ab
BV
1513int generic_map_lookup_batch(struct bpf_map *map,
1514 const union bpf_attr *attr,
aa2e93b8
BV
1515 union bpf_attr __user *uattr);
1516int generic_map_update_batch(struct bpf_map *map,
1517 const union bpf_attr *attr,
1518 union bpf_attr __user *uattr);
1519int generic_map_delete_batch(struct bpf_map *map,
1520 const union bpf_attr *attr,
cb4d03ab 1521 union bpf_attr __user *uattr);
6086d29d 1522struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
a228a64f 1523struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
61e021f3 1524
48edc1f7
RG
1525#ifdef CONFIG_MEMCG_KMEM
1526void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1527 int node);
1528void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1529void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1530 size_t align, gfp_t flags);
1531#else
1532static inline void *
1533bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1534 int node)
1535{
1536 return kmalloc_node(size, flags, node);
1537}
1538
1539static inline void *
1540bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1541{
1542 return kzalloc(size, flags);
1543}
1544
1545static inline void __percpu *
1546bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1547 gfp_t flags)
1548{
1549 return __alloc_percpu_gfp(size, align, flags);
1550}
1551#endif
1552
1be7f75d
AS
1553extern int sysctl_unprivileged_bpf_disabled;
1554
2c78ee89
AS
1555static inline bool bpf_allow_ptr_leaks(void)
1556{
1557 return perfmon_capable();
1558}
1559
01f810ac
AM
1560static inline bool bpf_allow_uninit_stack(void)
1561{
1562 return perfmon_capable();
1563}
1564
41c48f3a
AI
1565static inline bool bpf_allow_ptr_to_map_access(void)
1566{
1567 return perfmon_capable();
1568}
1569
2c78ee89
AS
1570static inline bool bpf_bypass_spec_v1(void)
1571{
1572 return perfmon_capable();
1573}
1574
1575static inline bool bpf_bypass_spec_v4(void)
1576{
1577 return perfmon_capable();
1578}
1579
6e71b04a 1580int bpf_map_new_fd(struct bpf_map *map, int flags);
b2197755
DB
1581int bpf_prog_new_fd(struct bpf_prog *prog);
1582
f2e10bff 1583void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
a3b80e10
AN
1584 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1585int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1586int bpf_link_settle(struct bpf_link_primer *primer);
1587void bpf_link_cleanup(struct bpf_link_primer *primer);
70ed506c
AN
1588void bpf_link_inc(struct bpf_link *link);
1589void bpf_link_put(struct bpf_link *link);
1590int bpf_link_new_fd(struct bpf_link *link);
babf3164 1591struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
70ed506c
AN
1592struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1593
b2197755 1594int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
6e71b04a 1595int bpf_obj_get_user(const char __user *pathname, int flags);
b2197755 1596
21aef70e 1597#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
e5158d98 1598#define DEFINE_BPF_ITER_FUNC(target, args...) \
21aef70e
YS
1599 extern int bpf_iter_ ## target(args); \
1600 int __init bpf_iter_ ## target(args) { return 0; }
15d83c4d 1601
f9c79272 1602struct bpf_iter_aux_info {
a5cbe05a 1603 struct bpf_map *map;
f9c79272
YS
1604};
1605
5e7b3020
YS
1606typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1607 union bpf_iter_link_info *linfo,
1608 struct bpf_iter_aux_info *aux);
1609typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
6b0a249a
YS
1610typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1611 struct seq_file *seq);
1612typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1613 struct bpf_link_info *info);
3cee6fb8
MKL
1614typedef const struct bpf_func_proto *
1615(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1616 const struct bpf_prog *prog);
a5cbe05a 1617
cf83b2d2
YS
1618enum bpf_iter_feature {
1619 BPF_ITER_RESCHED = BIT(0),
1620};
1621
3c32cc1b 1622#define BPF_ITER_CTX_ARG_MAX 2
ae24345d
YS
1623struct bpf_iter_reg {
1624 const char *target;
5e7b3020
YS
1625 bpf_iter_attach_target_t attach_target;
1626 bpf_iter_detach_target_t detach_target;
6b0a249a
YS
1627 bpf_iter_show_fdinfo_t show_fdinfo;
1628 bpf_iter_fill_link_info_t fill_link_info;
3cee6fb8 1629 bpf_iter_get_func_proto_t get_func_proto;
3c32cc1b 1630 u32 ctx_arg_info_size;
cf83b2d2 1631 u32 feature;
3c32cc1b 1632 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
14fc6bd6 1633 const struct bpf_iter_seq_info *seq_info;
ae24345d
YS
1634};
1635
e5158d98
YS
1636struct bpf_iter_meta {
1637 __bpf_md_ptr(struct seq_file *, seq);
1638 u64 session_id;
1639 u64 seq_num;
1640};
1641
a5cbe05a
YS
1642struct bpf_iter__bpf_map_elem {
1643 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1644 __bpf_md_ptr(struct bpf_map *, map);
1645 __bpf_md_ptr(void *, key);
1646 __bpf_md_ptr(void *, value);
1647};
1648
15172a46 1649int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
ab2ee4fc 1650void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
15d83c4d 1651bool bpf_iter_prog_supported(struct bpf_prog *prog);
3cee6fb8
MKL
1652const struct bpf_func_proto *
1653bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
af2ac3e1 1654int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
ac51d99b 1655int bpf_iter_new_fd(struct bpf_link *link);
367ec3e4 1656bool bpf_link_is_iter(struct bpf_link *link);
e5158d98
YS
1657struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1658int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
b76f2226
YS
1659void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1660 struct seq_file *seq);
1661int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1662 struct bpf_link_info *info);
ae24345d 1663
314ee05e
YS
1664int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1665 struct bpf_func_state *caller,
1666 struct bpf_func_state *callee);
1667
15a07b33
AS
1668int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1669int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1670int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1671 u64 flags);
1672int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1673 u64 flags);
d056a788 1674
557c0c6e 1675int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
15a07b33 1676
d056a788
DB
1677int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1678 void *key, void *value, u64 map_flags);
14dc6f04 1679int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
bcc6b1b7
MKL
1680int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1681 void *key, void *value, u64 map_flags);
14dc6f04 1682int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
d056a788 1683
6e71b04a 1684int bpf_get_file_flag(int flags);
af2ac3e1 1685int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
dcab51f1 1686 size_t actual_size);
6e71b04a 1687
15a07b33
AS
1688/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1689 * forced to use 'long' read/writes to try to atomically copy long counters.
1690 * Best-effort only. No barriers here, since it _will_ race with concurrent
1691 * updates from BPF programs. Called from bpf syscall and mostly used with
1692 * size 8 or 16 bytes, so ask compiler to inline it.
1693 */
1694static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1695{
1696 const long *lsrc = src;
1697 long *ldst = dst;
1698
1699 size /= sizeof(long);
1700 while (size--)
1701 *ldst++ = *lsrc++;
1702}
1703
61e021f3 1704/* verify correctness of eBPF program */
af2ac3e1 1705int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
a643bff7
AN
1706
1707#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01 1708void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
a643bff7 1709#endif
46f55cff 1710
76654e67
AM
1711struct btf *bpf_get_btf_vmlinux(void);
1712
46f55cff 1713/* Map specifics */
d53ad5d8 1714struct xdp_frame;
6d5fc195 1715struct sk_buff;
e6a4750f
BT
1716struct bpf_dtab_netdev;
1717struct bpf_cpu_map_entry;
67f29e07 1718
1d233886 1719void __dev_flush(void);
d53ad5d8 1720int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886 1721 struct net_device *dev_rx);
d53ad5d8 1722int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1723 struct net_device *dev_rx);
d53ad5d8 1724int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed 1725 struct bpf_map *map, bool exclude_ingress);
6d5fc195
TM
1726int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1727 struct bpf_prog *xdp_prog);
e624d4ed
HL
1728int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1729 struct bpf_prog *xdp_prog, struct bpf_map *map,
1730 bool exclude_ingress);
46f55cff 1731
cdfafe98 1732void __cpu_map_flush(void);
d53ad5d8 1733int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
9c270af3 1734 struct net_device *dev_rx);
11941f8a
KKD
1735int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1736 struct sk_buff *skb);
9c270af3 1737
96eabe7a
MKL
1738/* Return map's numa specified by userspace */
1739static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1740{
1741 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1742 attr->numa_node : NUMA_NO_NODE;
1743}
1744
040ee692 1745struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
5dc4c4b7 1746int array_map_alloc_check(union bpf_attr *attr);
040ee692 1747
c695865c
SF
1748int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1749 union bpf_attr __user *uattr);
1750int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1751 union bpf_attr __user *uattr);
da00d2f1
KS
1752int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1753 const union bpf_attr *kattr,
1754 union bpf_attr __user *uattr);
c695865c
SF
1755int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1756 const union bpf_attr *kattr,
1757 union bpf_attr __user *uattr);
1b4d60ec
SL
1758int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1759 const union bpf_attr *kattr,
1760 union bpf_attr __user *uattr);
7c32e8f8
LB
1761int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1762 const union bpf_attr *kattr,
1763 union bpf_attr __user *uattr);
9e15db66
AS
1764bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1765 const struct bpf_prog *prog,
1766 struct bpf_insn_access_aux *info);
35346ab6
HT
1767
1768static inline bool bpf_tracing_ctx_access(int off, int size,
1769 enum bpf_access_type type)
1770{
1771 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1772 return false;
1773 if (type != BPF_READ)
1774 return false;
1775 if (off % size != 0)
1776 return false;
1777 return true;
1778}
1779
1780static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1781 enum bpf_access_type type,
1782 const struct bpf_prog *prog,
1783 struct bpf_insn_access_aux *info)
1784{
1785 if (!bpf_tracing_ctx_access(off, size, type))
1786 return false;
1787 return btf_ctx_access(off, size, type, prog, info);
1788}
1789
22dc4a0f 1790int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
9e15db66
AS
1791 const struct btf_type *t, int off, int size,
1792 enum bpf_access_type atype,
c6f1bfe8 1793 u32 *next_btf_id, enum bpf_type_flag *flag);
faaf4a79 1794bool btf_struct_ids_match(struct bpf_verifier_log *log,
22dc4a0f
AN
1795 const struct btf *btf, u32 id, int off,
1796 const struct btf *need_btf, u32 need_type_id);
9e15db66 1797
fec56f58
AS
1798int btf_distill_func_proto(struct bpf_verifier_log *log,
1799 struct btf *btf,
1800 const struct btf_type *func_proto,
1801 const char *func_name,
1802 struct btf_func_model *m);
1803
51c39bb1 1804struct bpf_reg_state;
34747c41
MKL
1805int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1806 struct bpf_reg_state *regs);
e6ac2450
MKL
1807int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1808 const struct btf *btf, u32 func_id,
1809 struct bpf_reg_state *regs);
51c39bb1
AS
1810int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1811 struct bpf_reg_state *reg);
efc68158 1812int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
be8704ff 1813 struct btf *btf, const struct btf_type *t);
8c1b6e69 1814
7e6897f9 1815struct bpf_prog *bpf_prog_by_id(u32 id);
005142b8 1816struct bpf_link *bpf_link_by_id(u32 id);
7e6897f9 1817
6890896b 1818const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
a10787e6 1819void bpf_task_storage_free(struct task_struct *task);
e6ac2450
MKL
1820bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1821const struct btf_func_model *
1822bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1823 const struct bpf_insn *insn);
fbd94c7a
AS
1824struct bpf_core_ctx {
1825 struct bpf_verifier_log *log;
1826 const struct btf *btf;
1827};
1828
1829int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1830 int relo_idx, void *insn);
1831
44a3918c
JP
1832static inline bool unprivileged_ebpf_enabled(void)
1833{
1834 return !sysctl_unprivileged_bpf_disabled;
1835}
1836
9c270af3 1837#else /* !CONFIG_BPF_SYSCALL */
0fc174de
DB
1838static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1839{
1840 return ERR_PTR(-EOPNOTSUPP);
1841}
1842
248f346f
JK
1843static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1844 enum bpf_prog_type type,
288b3de5 1845 bool attach_drv)
248f346f
JK
1846{
1847 return ERR_PTR(-EOPNOTSUPP);
1848}
1849
85192dbf 1850static inline void bpf_prog_add(struct bpf_prog *prog, int i)
cc2e0b3f 1851{
cc2e0b3f 1852}
113214be 1853
c540594f
DB
1854static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1855{
1856}
1857
0fc174de
DB
1858static inline void bpf_prog_put(struct bpf_prog *prog)
1859{
1860}
6d67942d 1861
85192dbf 1862static inline void bpf_prog_inc(struct bpf_prog *prog)
aa6a5f3c 1863{
aa6a5f3c 1864}
5ccb071e 1865
a6f6df69
JF
1866static inline struct bpf_prog *__must_check
1867bpf_prog_inc_not_zero(struct bpf_prog *prog)
1868{
1869 return ERR_PTR(-EOPNOTSUPP);
1870}
1871
6cc7d1e8
AN
1872static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1873 const struct bpf_link_ops *ops,
1874 struct bpf_prog *prog)
1875{
1876}
1877
1878static inline int bpf_link_prime(struct bpf_link *link,
1879 struct bpf_link_primer *primer)
1880{
1881 return -EOPNOTSUPP;
1882}
1883
1884static inline int bpf_link_settle(struct bpf_link_primer *primer)
1885{
1886 return -EOPNOTSUPP;
1887}
1888
1889static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1890{
1891}
1892
1893static inline void bpf_link_inc(struct bpf_link *link)
1894{
1895}
1896
1897static inline void bpf_link_put(struct bpf_link *link)
1898{
1899}
1900
6e71b04a 1901static inline int bpf_obj_get_user(const char __user *pathname, int flags)
98589a09
SL
1902{
1903 return -EOPNOTSUPP;
1904}
1905
1d233886 1906static inline void __dev_flush(void)
46f55cff
JF
1907{
1908}
9c270af3 1909
d53ad5d8 1910struct xdp_frame;
67f29e07 1911struct bpf_dtab_netdev;
e6a4750f 1912struct bpf_cpu_map_entry;
67f29e07 1913
1d233886 1914static inline
d53ad5d8 1915int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886
THJ
1916 struct net_device *dev_rx)
1917{
1918 return 0;
1919}
1920
67f29e07 1921static inline
d53ad5d8 1922int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1923 struct net_device *dev_rx)
67f29e07
JDB
1924{
1925 return 0;
1926}
1927
e624d4ed 1928static inline
d53ad5d8 1929int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed
HL
1930 struct bpf_map *map, bool exclude_ingress)
1931{
1932 return 0;
1933}
1934
6d5fc195
TM
1935struct sk_buff;
1936
1937static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1938 struct sk_buff *skb,
1939 struct bpf_prog *xdp_prog)
1940{
1941 return 0;
1942}
1943
e624d4ed
HL
1944static inline
1945int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1946 struct bpf_prog *xdp_prog, struct bpf_map *map,
1947 bool exclude_ingress)
1948{
1949 return 0;
1950}
1951
cdfafe98 1952static inline void __cpu_map_flush(void)
9c270af3
JDB
1953{
1954}
1955
9c270af3 1956static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
d53ad5d8 1957 struct xdp_frame *xdpf,
9c270af3
JDB
1958 struct net_device *dev_rx)
1959{
1960 return 0;
1961}
040ee692 1962
11941f8a
KKD
1963static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1964 struct sk_buff *skb)
1965{
1966 return -EOPNOTSUPP;
1967}
1968
040ee692
AV
1969static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1970 enum bpf_prog_type type)
1971{
1972 return ERR_PTR(-EOPNOTSUPP);
1973}
c695865c
SF
1974
1975static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1976 const union bpf_attr *kattr,
1977 union bpf_attr __user *uattr)
1978{
1979 return -ENOTSUPP;
1980}
1981
1982static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1983 const union bpf_attr *kattr,
1984 union bpf_attr __user *uattr)
1985{
1986 return -ENOTSUPP;
1987}
1988
da00d2f1
KS
1989static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1990 const union bpf_attr *kattr,
1991 union bpf_attr __user *uattr)
1992{
1993 return -ENOTSUPP;
1994}
1995
c695865c
SF
1996static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1997 const union bpf_attr *kattr,
1998 union bpf_attr __user *uattr)
1999{
2000 return -ENOTSUPP;
2001}
6332be04 2002
7c32e8f8
LB
2003static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2004 const union bpf_attr *kattr,
2005 union bpf_attr __user *uattr)
2006{
2007 return -ENOTSUPP;
2008}
2009
6332be04
DB
2010static inline void bpf_map_put(struct bpf_map *map)
2011{
2012}
7e6897f9
BT
2013
2014static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2015{
2016 return ERR_PTR(-ENOTSUPP);
2017}
6890896b
SF
2018
2019static inline const struct bpf_func_proto *
2020bpf_base_func_proto(enum bpf_func_id func_id)
2021{
2022 return NULL;
2023}
a10787e6
SL
2024
2025static inline void bpf_task_storage_free(struct task_struct *task)
2026{
2027}
e6ac2450
MKL
2028
2029static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2030{
2031 return false;
2032}
2033
2034static inline const struct btf_func_model *
2035bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2036 const struct bpf_insn *insn)
2037{
2038 return NULL;
2039}
44a3918c
JP
2040
2041static inline bool unprivileged_ebpf_enabled(void)
2042{
2043 return false;
2044}
2045
61e021f3 2046#endif /* CONFIG_BPF_SYSCALL */
09756af4 2047
541c3bad
AN
2048void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2049 struct btf_mod_pair *used_btfs, u32 len);
2050
479321e9
JK
2051static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2052 enum bpf_prog_type type)
2053{
2054 return bpf_prog_get_type_dev(ufd, type, false);
2055}
2056
936f8946
AN
2057void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2058 struct bpf_map **used_maps, u32 len);
2059
040ee692
AV
2060bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2061
ab3f0063
JK
2062int bpf_prog_offload_compile(struct bpf_prog *prog);
2063void bpf_prog_offload_destroy(struct bpf_prog *prog);
675fc275
JK
2064int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2065 struct bpf_prog *prog);
ab3f0063 2066
52775b33
JK
2067int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2068
a3884572
JK
2069int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2070int bpf_map_offload_update_elem(struct bpf_map *map,
2071 void *key, void *value, u64 flags);
2072int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2073int bpf_map_offload_get_next_key(struct bpf_map *map,
2074 void *key, void *next_key);
2075
09728266 2076bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
a3884572 2077
1385d755 2078struct bpf_offload_dev *
dd27c2e3 2079bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
602144c2 2080void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
dd27c2e3 2081void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
602144c2
JK
2082int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2083 struct net_device *netdev);
2084void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2085 struct net_device *netdev);
fd4f227d 2086bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
9fd7c555 2087
ab3f0063
JK
2088#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2089int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2090
0d830032 2091static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
ab3f0063 2092{
9a18eedb 2093 return aux->offload_requested;
ab3f0063 2094}
a3884572
JK
2095
2096static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2097{
2098 return unlikely(map->ops == &bpf_map_offload_ops);
2099}
2100
2101struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2102void bpf_map_offload_map_free(struct bpf_map *map);
79a7f8bd
AS
2103int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2104 const union bpf_attr *kattr,
2105 union bpf_attr __user *uattr);
17edea21
CW
2106
2107int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2108int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2109int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
748cd572
DZ
2110int sock_map_bpf_prog_query(const union bpf_attr *attr,
2111 union bpf_attr __user *uattr);
2112
17edea21
CW
2113void sock_map_unhash(struct sock *sk);
2114void sock_map_close(struct sock *sk, long timeout);
ab3f0063
JK
2115#else
2116static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2117 union bpf_attr *attr)
2118{
2119 return -EOPNOTSUPP;
2120}
2121
2122static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2123{
2124 return false;
2125}
a3884572
JK
2126
2127static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2128{
2129 return false;
2130}
2131
2132static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2133{
2134 return ERR_PTR(-EOPNOTSUPP);
2135}
2136
2137static inline void bpf_map_offload_map_free(struct bpf_map *map)
2138{
2139}
79a7f8bd
AS
2140
2141static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2142 const union bpf_attr *kattr,
2143 union bpf_attr __user *uattr)
2144{
2145 return -ENOTSUPP;
2146}
fdb5c453 2147
88759609 2148#ifdef CONFIG_BPF_SYSCALL
604326b4
DB
2149static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2150 struct bpf_prog *prog)
fdb5c453
SY
2151{
2152 return -EINVAL;
2153}
bb0de313
LB
2154
2155static inline int sock_map_prog_detach(const union bpf_attr *attr,
2156 enum bpf_prog_type ptype)
2157{
2158 return -EOPNOTSUPP;
2159}
13b79d3f
LB
2160
2161static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2162 u64 flags)
2163{
2164 return -EOPNOTSUPP;
2165}
748cd572
DZ
2166
2167static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2168 union bpf_attr __user *uattr)
2169{
2170 return -EINVAL;
2171}
17edea21
CW
2172#endif /* CONFIG_BPF_SYSCALL */
2173#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
5dc4c4b7 2174
17edea21
CW
2175#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2176void bpf_sk_reuseport_detach(struct sock *sk);
2177int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2178 void *value);
2179int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2180 void *value, u64 map_flags);
2181#else
2182static inline void bpf_sk_reuseport_detach(struct sock *sk)
2183{
2184}
5dc4c4b7 2185
17edea21 2186#ifdef CONFIG_BPF_SYSCALL
5dc4c4b7
MKL
2187static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2188 void *key, void *value)
2189{
2190 return -EOPNOTSUPP;
2191}
2192
2193static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2194 void *key, void *value,
2195 u64 map_flags)
2196{
2197 return -EOPNOTSUPP;
2198}
2199#endif /* CONFIG_BPF_SYSCALL */
2200#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
2201
d0003ec0 2202/* verifier prototypes for helper functions called from eBPF programs */
a2c83fff
DB
2203extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2204extern const struct bpf_func_proto bpf_map_update_elem_proto;
2205extern const struct bpf_func_proto bpf_map_delete_elem_proto;
f1a2e44a
MV
2206extern const struct bpf_func_proto bpf_map_push_elem_proto;
2207extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2208extern const struct bpf_func_proto bpf_map_peek_elem_proto;
d0003ec0 2209
03e69b50 2210extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
c04167ce 2211extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2d0e30c3 2212extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
04fd61ab 2213extern const struct bpf_func_proto bpf_tail_call_proto;
17ca8cbf 2214extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
71d19214 2215extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
ffeedafb
AS
2216extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2217extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2218extern const struct bpf_func_proto bpf_get_current_comm_proto;
d5a3b1f6 2219extern const struct bpf_func_proto bpf_get_stackid_proto;
c195651e 2220extern const struct bpf_func_proto bpf_get_stack_proto;
fa28dcb8 2221extern const struct bpf_func_proto bpf_get_task_stack_proto;
7b04d6d6
SL
2222extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2223extern const struct bpf_func_proto bpf_get_stack_proto_pe;
174a79ff 2224extern const struct bpf_func_proto bpf_sock_map_update_proto;
81110384 2225extern const struct bpf_func_proto bpf_sock_hash_update_proto;
bf6fa2c8 2226extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
0f09abd1 2227extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
604326b4
DB
2228extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2229extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2230extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2231extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
d83525ca
AS
2232extern const struct bpf_func_proto bpf_spin_lock_proto;
2233extern const struct bpf_func_proto bpf_spin_unlock_proto;
cd339431 2234extern const struct bpf_func_proto bpf_get_local_storage_proto;
d7a4cb9b
AI
2235extern const struct bpf_func_proto bpf_strtol_proto;
2236extern const struct bpf_func_proto bpf_strtoul_proto;
0d01da6a 2237extern const struct bpf_func_proto bpf_tcp_sock_proto;
5576b991 2238extern const struct bpf_func_proto bpf_jiffies64_proto;
b4490c5c 2239extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
0456ea17 2240extern const struct bpf_func_proto bpf_event_output_data_proto;
457f4436
AN
2241extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2242extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2243extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2244extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2245extern const struct bpf_func_proto bpf_ringbuf_query_proto;
af7ec138 2246extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
2247extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2248extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2249extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
0d4fad3e 2250extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
9eeb3aa3 2251extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
07be4c4a 2252extern const struct bpf_func_proto bpf_copy_from_user_proto;
c4d0bfb4 2253extern const struct bpf_func_proto bpf_snprintf_btf_proto;
7b15523a 2254extern const struct bpf_func_proto bpf_snprintf_proto;
eaa6bcb7 2255extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
63d9b80d 2256extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
d0551261 2257extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
b60da495 2258extern const struct bpf_func_proto bpf_sock_from_file_proto;
c5dbb89f 2259extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
a10787e6
SL
2260extern const struct bpf_func_proto bpf_task_storage_get_proto;
2261extern const struct bpf_func_proto bpf_task_storage_delete_proto;
69c087ba 2262extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3d78417b 2263extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3cee6fb8
MKL
2264extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2265extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
d6aef08a 2266extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
7c7e3d31 2267extern const struct bpf_func_proto bpf_find_vma_proto;
e6f2dd0f 2268extern const struct bpf_func_proto bpf_loop_proto;
c5fb1993 2269extern const struct bpf_func_proto bpf_strncmp_proto;
376040e4 2270extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
cd339431 2271
958a3f2d
JO
2272const struct bpf_func_proto *tracing_prog_func_proto(
2273 enum bpf_func_id func_id, const struct bpf_prog *prog);
2274
3ad00405
DB
2275/* Shared helpers among cBPF and eBPF. */
2276void bpf_user_rnd_init_once(void);
2277u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
6890896b 2278u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3ad00405 2279
c64b7983 2280#if defined(CONFIG_NET)
46f8bc92
MKL
2281bool bpf_sock_common_is_valid_access(int off, int size,
2282 enum bpf_access_type type,
2283 struct bpf_insn_access_aux *info);
c64b7983
JS
2284bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2285 struct bpf_insn_access_aux *info);
2286u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2287 const struct bpf_insn *si,
2288 struct bpf_insn *insn_buf,
2289 struct bpf_prog *prog,
2290 u32 *target_size);
2291#else
46f8bc92
MKL
2292static inline bool bpf_sock_common_is_valid_access(int off, int size,
2293 enum bpf_access_type type,
2294 struct bpf_insn_access_aux *info)
2295{
2296 return false;
2297}
c64b7983
JS
2298static inline bool bpf_sock_is_valid_access(int off, int size,
2299 enum bpf_access_type type,
2300 struct bpf_insn_access_aux *info)
2301{
2302 return false;
2303}
2304static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2305 const struct bpf_insn *si,
2306 struct bpf_insn *insn_buf,
2307 struct bpf_prog *prog,
2308 u32 *target_size)
2309{
2310 return 0;
2311}
2312#endif
2313
655a51e5 2314#ifdef CONFIG_INET
91cc1a99
AS
2315struct sk_reuseport_kern {
2316 struct sk_buff *skb;
2317 struct sock *sk;
2318 struct sock *selected_sk;
d5e4ddae 2319 struct sock *migrating_sk;
91cc1a99
AS
2320 void *data_end;
2321 u32 hash;
2322 u32 reuseport_id;
2323 bool bind_inany;
2324};
655a51e5
MKL
2325bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2326 struct bpf_insn_access_aux *info);
2327
2328u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2329 const struct bpf_insn *si,
2330 struct bpf_insn *insn_buf,
2331 struct bpf_prog *prog,
2332 u32 *target_size);
7f94208c
Y
2333
2334bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2335 struct bpf_insn_access_aux *info);
2336
2337u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2338 const struct bpf_insn *si,
2339 struct bpf_insn *insn_buf,
2340 struct bpf_prog *prog,
2341 u32 *target_size);
655a51e5
MKL
2342#else
2343static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2344 enum bpf_access_type type,
2345 struct bpf_insn_access_aux *info)
2346{
2347 return false;
2348}
2349
2350static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2351 const struct bpf_insn *si,
2352 struct bpf_insn *insn_buf,
2353 struct bpf_prog *prog,
2354 u32 *target_size)
2355{
2356 return 0;
2357}
7f94208c
Y
2358static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2359 enum bpf_access_type type,
2360 struct bpf_insn_access_aux *info)
2361{
2362 return false;
2363}
2364
2365static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2366 const struct bpf_insn *si,
2367 struct bpf_insn *insn_buf,
2368 struct bpf_prog *prog,
2369 u32 *target_size)
2370{
2371 return 0;
2372}
655a51e5
MKL
2373#endif /* CONFIG_INET */
2374
5964b200 2375enum bpf_text_poke_type {
b553a6ec
DB
2376 BPF_MOD_CALL,
2377 BPF_MOD_JUMP,
5964b200 2378};
4b3da77b 2379
5964b200
AS
2380int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2381 void *addr1, void *addr2);
2382
ebc1415d
SL
2383void *bpf_arch_text_copy(void *dst, void *src, size_t len);
2384
eae2e83e 2385struct btf_id_set;
2af30f11 2386bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
eae2e83e 2387
335ff499
DM
2388#define MAX_BPRINTF_VARARGS 12
2389
48cac3f4
FR
2390int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2391 u32 **bin_buf, u32 num_args);
2392void bpf_bprintf_cleanup(void);
d9c9e4db 2393
99c55f7d 2394#endif /* _LINUX_BPF_H */