selftests/bpf: Fix some bugs in map_lookup_percpu_elem testcase
[linux-2.6-block.git] / include / linux / bpf.h
CommitLineData
25763b3c 1/* SPDX-License-Identifier: GPL-2.0-only */
99c55f7d 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
99c55f7d
AS
3 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
74451e66 8
99c55f7d 9#include <linux/workqueue.h>
db20fd2b 10#include <linux/file.h>
b121d1e7 11#include <linux/percpu.h>
002245cc 12#include <linux/err.h>
74451e66 13#include <linux/rbtree_latch.h>
d6e1e46f 14#include <linux/numa.h>
fc970227 15#include <linux/mm_types.h>
ab3f0063 16#include <linux/wait.h>
fec56f58
AS
17#include <linux/refcount.h>
18#include <linux/mutex.h>
85d33df3 19#include <linux/module.h>
bfea9a85 20#include <linux/kallsyms.h>
2c78ee89 21#include <linux/capability.h>
48edc1f7
RG
22#include <linux/sched/mm.h>
23#include <linux/slab.h>
e21aa341 24#include <linux/percpu-refcount.h>
af2ac3e1 25#include <linux/bpfptr.h>
14a324f6 26#include <linux/btf.h>
99c55f7d 27
cae1927c 28struct bpf_verifier_env;
9e15db66 29struct bpf_verifier_log;
3b1efb19 30struct perf_event;
174a79ff 31struct bpf_prog;
da765a2f 32struct bpf_prog_aux;
99c55f7d 33struct bpf_map;
4f738adb 34struct sock;
a26ca7c9 35struct seq_file;
1b2b234b 36struct btf;
e8d2bec0 37struct btf_type;
3dec541b 38struct exception_table_entry;
ae24345d 39struct seq_operations;
f9c79272 40struct bpf_iter_aux_info;
f836a56e
KS
41struct bpf_local_storage;
42struct bpf_local_storage_map;
36e68442 43struct kobject;
48edc1f7 44struct mem_cgroup;
861de02e 45struct module;
69c087ba 46struct bpf_func_state;
99c55f7d 47
1b9ed84e
QM
48extern struct idr btf_idr;
49extern spinlock_t btf_idr_lock;
36e68442 50extern struct kobject *btf_kobj;
1b9ed84e 51
102acbac 52typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
f9c79272
YS
53typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
54 struct bpf_iter_aux_info *aux);
14fc6bd6
YS
55typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
56struct bpf_iter_seq_info {
57 const struct seq_operations *seq_ops;
58 bpf_iter_init_seq_priv_t init_seq_private;
59 bpf_iter_fini_seq_priv_t fini_seq_private;
60 u32 seq_priv_size;
61};
62
5d903493 63/* map is generic key/value storage optionally accessible by eBPF programs */
99c55f7d
AS
64struct bpf_map_ops {
65 /* funcs callable from userspace (via syscall) */
1110f3a9 66 int (*map_alloc_check)(union bpf_attr *attr);
99c55f7d 67 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
61d1b6a4
DB
68 void (*map_release)(struct bpf_map *map, struct file *map_file);
69 void (*map_free)(struct bpf_map *map);
db20fd2b 70 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
ba6b8de4 71 void (*map_release_uref)(struct bpf_map *map);
c6110222 72 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
cb4d03ab
BV
73 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
74 union bpf_attr __user *uattr);
3e87f192
DS
75 int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
76 void *value, u64 flags);
05799638
YS
77 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
78 const union bpf_attr *attr,
79 union bpf_attr __user *uattr);
aa2e93b8
BV
80 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
81 union bpf_attr __user *uattr);
82 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
83 union bpf_attr __user *uattr);
db20fd2b
AS
84
85 /* funcs callable from userspace and from eBPF programs */
86 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
3274f520 87 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
db20fd2b 88 int (*map_delete_elem)(struct bpf_map *map, void *key);
f1a2e44a
MV
89 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
90 int (*map_pop_elem)(struct bpf_map *map, void *value);
91 int (*map_peek_elem)(struct bpf_map *map, void *value);
07343110 92 void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
2a36f0b9
WN
93
94 /* funcs called by prog_array and perf_event_array map */
d056a788
DB
95 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
96 int fd);
97 void (*map_fd_put_ptr)(void *ptr);
4a8f87e6 98 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
14dc6f04 99 u32 (*map_fd_sys_lookup_elem)(void *ptr);
a26ca7c9
MKL
100 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
101 struct seq_file *m);
e8d2bec0 102 int (*map_check_btf)(const struct bpf_map *map,
1b2b234b 103 const struct btf *btf,
e8d2bec0
DB
104 const struct btf_type *key_type,
105 const struct btf_type *value_type);
d8eca5bb 106
da765a2f
DB
107 /* Prog poke tracking helpers. */
108 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
109 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
110 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
111 struct bpf_prog *new);
112
d8eca5bb
DB
113 /* Direct value access helpers. */
114 int (*map_direct_value_addr)(const struct bpf_map *map,
115 u64 *imm, u32 off);
116 int (*map_direct_value_meta)(const struct bpf_map *map,
117 u64 imm, u32 *off);
fc970227 118 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
457f4436
AN
119 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
120 struct poll_table_struct *pts);
41c48f3a 121
f836a56e
KS
122 /* Functions called by bpf_local_storage maps */
123 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
124 void *owner, u32 size);
125 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
126 void *owner, u32 size);
127 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
f4d05259 128
e6a4750f
BT
129 /* Misc helpers.*/
130 int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
131
f4d05259
MKL
132 /* map_meta_equal must be implemented for maps that can be
133 * used as an inner map. It is a runtime check to ensure
134 * an inner map can be inserted to an outer map.
135 *
136 * Some properties of the inner map has been used during the
137 * verification time. When inserting an inner map at the runtime,
138 * map_meta_equal has to ensure the inserting map has the same
139 * properties that the verifier has used earlier.
140 */
141 bool (*map_meta_equal)(const struct bpf_map *meta0,
142 const struct bpf_map *meta1);
143
69c087ba
YS
144
145 int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
146 struct bpf_func_state *caller,
147 struct bpf_func_state *callee);
102acbac
KC
148 int (*map_for_each_callback)(struct bpf_map *map,
149 bpf_callback_t callback_fn,
69c087ba
YS
150 void *callback_ctx, u64 flags);
151
c317ab71 152 /* BTF id of struct allocated by map_alloc */
41c48f3a 153 int *map_btf_id;
a5cbe05a
YS
154
155 /* bpf_iter info used to open a seq_file */
156 const struct bpf_iter_seq_info *iter_seq_info;
99c55f7d
AS
157};
158
61df10c7
KKD
159enum {
160 /* Support at most 8 pointers in a BPF map value */
161 BPF_MAP_VALUE_OFF_MAX = 8,
4d7d7f69
KKD
162 BPF_MAP_OFF_ARR_MAX = BPF_MAP_VALUE_OFF_MAX +
163 1 + /* for bpf_spin_lock */
164 1, /* for bpf_timer */
61df10c7
KKD
165};
166
c0a5a21c
KKD
167enum bpf_kptr_type {
168 BPF_KPTR_UNREF,
169 BPF_KPTR_REF,
170};
171
61df10c7
KKD
172struct bpf_map_value_off_desc {
173 u32 offset;
c0a5a21c 174 enum bpf_kptr_type type;
61df10c7
KKD
175 struct {
176 struct btf *btf;
14a324f6
KKD
177 struct module *module;
178 btf_dtor_kfunc_t dtor;
61df10c7
KKD
179 u32 btf_id;
180 } kptr;
181};
182
183struct bpf_map_value_off {
184 u32 nr_off;
185 struct bpf_map_value_off_desc off[];
186};
187
4d7d7f69
KKD
188struct bpf_map_off_arr {
189 u32 cnt;
190 u32 field_off[BPF_MAP_OFF_ARR_MAX];
191 u8 field_sz[BPF_MAP_OFF_ARR_MAX];
192};
193
99c55f7d 194struct bpf_map {
a26ca7c9 195 /* The first two cachelines with read-mostly members of which some
be95a845
DB
196 * are also accessed in fast-path (e.g. ops, max_entries).
197 */
198 const struct bpf_map_ops *ops ____cacheline_aligned;
199 struct bpf_map *inner_map_meta;
200#ifdef CONFIG_SECURITY
201 void *security;
202#endif
99c55f7d
AS
203 enum bpf_map_type map_type;
204 u32 key_size;
205 u32 value_size;
206 u32 max_entries;
9330986c 207 u64 map_extra; /* any per-map-type extra fields */
6c905981 208 u32 map_flags;
d83525ca 209 int spin_lock_off; /* >=0 valid offset, <0 error */
61df10c7 210 struct bpf_map_value_off *kptr_off_tab;
b00628b1 211 int timer_off; /* >=0 valid offset, <0 error */
f3f1c054 212 u32 id;
96eabe7a 213 int numa_node;
9b2cf328
MKL
214 u32 btf_key_type_id;
215 u32 btf_value_type_id;
8845b468 216 u32 btf_vmlinux_value_type_id;
a26ca7c9 217 struct btf *btf;
48edc1f7
RG
218#ifdef CONFIG_MEMCG_KMEM
219 struct mem_cgroup *memcg;
220#endif
fc970227 221 char name[BPF_OBJ_NAME_LEN];
4d7d7f69 222 struct bpf_map_off_arr *off_arr;
a26ca7c9 223 /* The 3rd and 4th cacheline with misc members to avoid false sharing
be95a845
DB
224 * particularly with refcounting.
225 */
1e0bd5a0
AN
226 atomic64_t refcnt ____cacheline_aligned;
227 atomic64_t usercnt;
be95a845 228 struct work_struct work;
fc970227 229 struct mutex freeze_mutex;
353050be 230 atomic64_t writecnt;
f45d5b6c
THJ
231 /* 'Ownership' of program-containing map is claimed by the first program
232 * that is going to use this map or by the first program which FD is
233 * stored in the map to make sure that all callers and callees have the
234 * same prog type, JITed flag and xdp_has_frags flag.
235 */
236 struct {
237 spinlock_t lock;
238 enum bpf_prog_type type;
239 bool jited;
240 bool xdp_has_frags;
241 } owner;
4d7d7f69
KKD
242 bool bypass_spec_v1;
243 bool frozen; /* write-once; write-protected by freeze_mutex */
99c55f7d
AS
244};
245
d83525ca
AS
246static inline bool map_value_has_spin_lock(const struct bpf_map *map)
247{
248 return map->spin_lock_off >= 0;
249}
250
68134668 251static inline bool map_value_has_timer(const struct bpf_map *map)
d83525ca 252{
68134668 253 return map->timer_off >= 0;
d83525ca
AS
254}
255
61df10c7
KKD
256static inline bool map_value_has_kptrs(const struct bpf_map *map)
257{
258 return !IS_ERR_OR_NULL(map->kptr_off_tab);
259}
260
68134668
AS
261static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
262{
263 if (unlikely(map_value_has_spin_lock(map)))
5eaed6ee 264 memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
68134668 265 if (unlikely(map_value_has_timer(map)))
5eaed6ee 266 memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
4d7d7f69
KKD
267 if (unlikely(map_value_has_kptrs(map))) {
268 struct bpf_map_value_off *tab = map->kptr_off_tab;
269 int i;
270
271 for (i = 0; i < tab->nr_off; i++)
272 *(u64 *)(dst + tab->off[i].offset) = 0;
273 }
68134668
AS
274}
275
276/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
d83525ca
AS
277static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
278{
4d7d7f69
KKD
279 u32 curr_off = 0;
280 int i;
68134668 281
4d7d7f69
KKD
282 if (likely(!map->off_arr)) {
283 memcpy(dst, src, map->value_size);
284 return;
68134668 285 }
d83525ca 286
4d7d7f69
KKD
287 for (i = 0; i < map->off_arr->cnt; i++) {
288 u32 next_off = map->off_arr->field_off[i];
289
290 memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
291 curr_off += map->off_arr->field_sz[i];
d83525ca 292 }
4d7d7f69 293 memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
d83525ca 294}
96049f3a
AS
295void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
296 bool lock_src);
b00628b1 297void bpf_timer_cancel_and_free(void *timer);
8e7ae251 298int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
d83525ca 299
602144c2 300struct bpf_offload_dev;
a3884572
JK
301struct bpf_offloaded_map;
302
303struct bpf_map_dev_ops {
304 int (*map_get_next_key)(struct bpf_offloaded_map *map,
305 void *key, void *next_key);
306 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
307 void *key, void *value);
308 int (*map_update_elem)(struct bpf_offloaded_map *map,
309 void *key, void *value, u64 flags);
310 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
311};
312
313struct bpf_offloaded_map {
314 struct bpf_map map;
315 struct net_device *netdev;
316 const struct bpf_map_dev_ops *dev_ops;
317 void *dev_priv;
318 struct list_head offloads;
319};
320
321static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
322{
323 return container_of(map, struct bpf_offloaded_map, map);
324}
325
0cd3cbed
JK
326static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
327{
328 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
329}
330
a26ca7c9
MKL
331static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
332{
85d33df3
MKL
333 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
334 map->ops->map_seq_show_elem;
a26ca7c9
MKL
335}
336
e8d2bec0 337int map_check_no_btf(const struct bpf_map *map,
1b2b234b 338 const struct btf *btf,
e8d2bec0
DB
339 const struct btf_type *key_type,
340 const struct btf_type *value_type);
341
f4d05259
MKL
342bool bpf_map_meta_equal(const struct bpf_map *meta0,
343 const struct bpf_map *meta1);
344
a3884572
JK
345extern const struct bpf_map_ops bpf_map_offload_ops;
346
d639b9d1
HL
347/* bpf_type_flag contains a set of flags that are applicable to the values of
348 * arg_type, ret_type and reg_type. For example, a pointer value may be null,
349 * or a memory is read-only. We classify types into two categories: base types
350 * and extended types. Extended types are base types combined with a type flag.
351 *
352 * Currently there are no more than 32 base types in arg_type, ret_type and
353 * reg_types.
354 */
355#define BPF_BASE_TYPE_BITS 8
356
357enum bpf_type_flag {
358 /* PTR may be NULL. */
359 PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
360
216e3cd2
HL
361 /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
362 * compatible with both mutable and immutable memory.
363 */
20b2aff4
HL
364 MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
365
a672b2e3
DB
366 /* MEM was "allocated" from a different helper, and cannot be mixed
367 * with regular non-MEM_ALLOC'ed MEM types.
368 */
369 MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
370
c6f1bfe8
YS
371 /* MEM is in user address space. */
372 MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
373
5844101a
HL
374 /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
375 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
376 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
377 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
378 * to the specified cpu.
379 */
380 MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
381
8f14852e
KKD
382 /* Indicates that the argument will be released. */
383 OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
384
6efe152d
KKD
385 /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
386 * unreferenced and referenced kptr loaded from map value using a load
387 * instruction, so that they can only be dereferenced but not escape the
388 * BPF program into the kernel (i.e. cannot be passed as arguments to
389 * kfunc or bpf helpers).
390 */
391 PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
392
16d1e00c
JK
393 MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
394
395 __BPF_TYPE_FLAG_MAX,
396 __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
d639b9d1
HL
397};
398
399/* Max number of base types. */
400#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
401
402/* Max number of all types. */
403#define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
404
17a52670
AS
405/* function argument constraints */
406enum bpf_arg_type {
80f1d68c 407 ARG_DONTCARE = 0, /* unused argument in helper function */
17a52670
AS
408
409 /* the following constraints used to prototype
410 * bpf_map_lookup/update/delete_elem() functions
411 */
412 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
413 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
414 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
415
16d1e00c
JK
416 /* Used to prototype bpf_memcmp() and other functions that access data
417 * on eBPF program stack
17a52670 418 */
39f19ebb 419 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
435faee1 420
39f19ebb
AS
421 ARG_CONST_SIZE, /* number of bytes accessed from memory */
422 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
80f1d68c 423
608cd71a 424 ARG_PTR_TO_CTX, /* pointer to context */
80f1d68c 425 ARG_ANYTHING, /* any (initialized) argument is ok */
d83525ca 426 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
46f8bc92 427 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
57c3bb72
AI
428 ARG_PTR_TO_INT, /* pointer to int */
429 ARG_PTR_TO_LONG, /* pointer to long */
6ac99e8f 430 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
a7658e1a 431 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
457f4436 432 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
457f4436 433 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
1df8f55a 434 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
eaa6bcb7 435 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
69c087ba 436 ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
48946bd6 437 ARG_PTR_TO_STACK, /* pointer to stack */
fff13c4b 438 ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
b00628b1 439 ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
c0a5a21c 440 ARG_PTR_TO_KPTR, /* pointer to referenced kptr */
f79e7ea5 441 __BPF_ARG_TYPE_MAX,
d639b9d1 442
48946bd6
HL
443 /* Extended arg_types. */
444 ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
445 ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
446 ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
447 ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
448 ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM,
449 ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
c0a5a21c 450 ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
16d1e00c
JK
451 /* pointer to memory does not need to be initialized, helper function must fill
452 * all bytes or clear them in error case.
453 */
454 ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
48946bd6 455
d639b9d1
HL
456 /* This must be the last entry. Its purpose is to ensure the enum is
457 * wide enough to hold the higher bits reserved for bpf_type_flag.
458 */
459 __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 460};
d639b9d1 461static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670
AS
462
463/* type of values returned from helper functions */
464enum bpf_return_type {
465 RET_INTEGER, /* function returns integer */
466 RET_VOID, /* function doesn't return anything */
3e6a4b3e 467 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
3c480732
HL
468 RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
469 RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
470 RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
471 RET_PTR_TO_ALLOC_MEM, /* returns a pointer to dynamically allocated memory */
63d9b80d 472 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
3ca1032a 473 RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
d639b9d1
HL
474 __BPF_RET_TYPE_MAX,
475
3c480732
HL
476 /* Extended ret_types. */
477 RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
478 RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
479 RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
480 RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
a672b2e3 481 RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
3c480732
HL
482 RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
483
d639b9d1
HL
484 /* This must be the last entry. Its purpose is to ensure the enum is
485 * wide enough to hold the higher bits reserved for bpf_type_flag.
486 */
487 __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
17a52670 488};
d639b9d1 489static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
17a52670 490
09756af4
AS
491/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
492 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
493 * instructions after verifying
494 */
495struct bpf_func_proto {
496 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
497 bool gpl_only;
36bbef52 498 bool pkt_access;
17a52670 499 enum bpf_return_type ret_type;
a7658e1a
AS
500 union {
501 struct {
502 enum bpf_arg_type arg1_type;
503 enum bpf_arg_type arg2_type;
504 enum bpf_arg_type arg3_type;
505 enum bpf_arg_type arg4_type;
506 enum bpf_arg_type arg5_type;
507 };
508 enum bpf_arg_type arg_type[5];
509 };
9436ef6e
LB
510 union {
511 struct {
512 u32 *arg1_btf_id;
513 u32 *arg2_btf_id;
514 u32 *arg3_btf_id;
515 u32 *arg4_btf_id;
516 u32 *arg5_btf_id;
517 };
518 u32 *arg_btf_id[5];
519 };
af7ec138 520 int *ret_btf_id; /* return value btf_id */
eae2e83e 521 bool (*allowed)(const struct bpf_prog *prog);
17a52670
AS
522};
523
524/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
525 * the first argument to eBPF programs.
526 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
527 */
528struct bpf_context;
529
530enum bpf_access_type {
531 BPF_READ = 1,
532 BPF_WRITE = 2
09756af4
AS
533};
534
19de99f7 535/* types of values stored in eBPF registers */
f1174f77
EC
536/* Pointer types represent:
537 * pointer
538 * pointer + imm
539 * pointer + (u16) var
540 * pointer + (u16) var + imm
541 * if (range > 0) then [ptr, ptr + range - off) is safe to access
542 * if (id > 0) means that some 'var' was added
543 * if (off > 0) means that 'imm' was added
544 */
19de99f7
AS
545enum bpf_reg_type {
546 NOT_INIT = 0, /* nothing was written into register */
f1174f77 547 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
19de99f7
AS
548 PTR_TO_CTX, /* reg points to bpf_context */
549 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
550 PTR_TO_MAP_VALUE, /* reg points to map element value */
c25b2ae1 551 PTR_TO_MAP_KEY, /* reg points to a map element key */
f1174f77 552 PTR_TO_STACK, /* reg == frame_pointer + offset */
de8f3a83 553 PTR_TO_PACKET_META, /* skb->data - meta_len */
f1174f77 554 PTR_TO_PACKET, /* reg points to skb->data */
19de99f7 555 PTR_TO_PACKET_END, /* skb->data + headlen */
d58e468b 556 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
c64b7983 557 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
46f8bc92 558 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
655a51e5 559 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
9df1c28b 560 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
fada7fdc 561 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
ba5f4cfe
JF
562 /* PTR_TO_BTF_ID points to a kernel struct that does not need
563 * to be null checked by the BPF program. This does not imply the
564 * pointer is _not_ null and in practice this can easily be a null
565 * pointer when reading pointer chains. The assumption is program
566 * context will handle null pointer dereference typically via fault
567 * handling. The verifier must keep this in mind and can make no
568 * assumptions about null or non-null when doing branch analysis.
569 * Further, when passed into helpers the helpers can not, without
570 * additional context, assume the value is non-null.
571 */
572 PTR_TO_BTF_ID,
573 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
574 * been checked for null. Used primarily to inform the verifier
575 * an explicit null check is required for this struct.
576 */
457f4436 577 PTR_TO_MEM, /* reg points to valid memory region */
20b2aff4 578 PTR_TO_BUF, /* reg points to a read/write buffer */
69c087ba 579 PTR_TO_FUNC, /* reg points to a bpf program function */
e6ac2450 580 __BPF_REG_TYPE_MAX,
d639b9d1 581
c25b2ae1
HL
582 /* Extended reg_types. */
583 PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
584 PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
585 PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
586 PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
587 PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
c25b2ae1 588
d639b9d1
HL
589 /* This must be the last entry. Its purpose is to ensure the enum is
590 * wide enough to hold the higher bits reserved for bpf_type_flag.
591 */
592 __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
19de99f7 593};
d639b9d1 594static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
19de99f7 595
23994631
YS
596/* The information passed from prog-specific *_is_valid_access
597 * back to the verifier.
598 */
599struct bpf_insn_access_aux {
600 enum bpf_reg_type reg_type;
9e15db66
AS
601 union {
602 int ctx_field_size;
22dc4a0f
AN
603 struct {
604 struct btf *btf;
605 u32 btf_id;
606 };
9e15db66
AS
607 };
608 struct bpf_verifier_log *log; /* for verbose logs */
23994631
YS
609};
610
f96da094
DB
611static inline void
612bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
613{
614 aux->ctx_field_size = size;
615}
616
3990ed4c
MKL
617static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
618{
619 return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
620 insn->src_reg == BPF_PSEUDO_FUNC;
621}
622
7de16e3a
JK
623struct bpf_prog_ops {
624 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
625 union bpf_attr __user *uattr);
626};
627
09756af4
AS
628struct bpf_verifier_ops {
629 /* return eBPF function prototype for verification */
5e43f899
AI
630 const struct bpf_func_proto *
631 (*get_func_proto)(enum bpf_func_id func_id,
632 const struct bpf_prog *prog);
17a52670
AS
633
634 /* return true if 'size' wide access at offset 'off' within bpf_context
635 * with 'type' (read or write) is allowed
636 */
19de99f7 637 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
5e43f899 638 const struct bpf_prog *prog,
23994631 639 struct bpf_insn_access_aux *info);
36bbef52
DB
640 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
641 const struct bpf_prog *prog);
e0cea7ce
DB
642 int (*gen_ld_abs)(const struct bpf_insn *orig,
643 struct bpf_insn *insn_buf);
6b8cc1d1
DB
644 u32 (*convert_ctx_access)(enum bpf_access_type type,
645 const struct bpf_insn *src,
646 struct bpf_insn *dst,
f96da094 647 struct bpf_prog *prog, u32 *target_size);
27ae7997 648 int (*btf_struct_access)(struct bpf_verifier_log *log,
22dc4a0f 649 const struct btf *btf,
27ae7997
MKL
650 const struct btf_type *t, int off, int size,
651 enum bpf_access_type atype,
c6f1bfe8 652 u32 *next_btf_id, enum bpf_type_flag *flag);
09756af4
AS
653};
654
cae1927c 655struct bpf_prog_offload_ops {
08ca90af 656 /* verifier basic callbacks */
cae1927c
JK
657 int (*insn_hook)(struct bpf_verifier_env *env,
658 int insn_idx, int prev_insn_idx);
c941ce9c 659 int (*finalize)(struct bpf_verifier_env *env);
08ca90af
JK
660 /* verifier optimization callbacks (called after .finalize) */
661 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
662 struct bpf_insn *insn);
663 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
664 /* program management callbacks */
16a8cb5c
QM
665 int (*prepare)(struct bpf_prog *prog);
666 int (*translate)(struct bpf_prog *prog);
eb911947 667 void (*destroy)(struct bpf_prog *prog);
cae1927c
JK
668};
669
0a9c1991 670struct bpf_prog_offload {
ab3f0063
JK
671 struct bpf_prog *prog;
672 struct net_device *netdev;
341b3e7b 673 struct bpf_offload_dev *offdev;
ab3f0063
JK
674 void *dev_priv;
675 struct list_head offloads;
676 bool dev_state;
08ca90af 677 bool opt_failed;
fcfb126d
JW
678 void *jited_image;
679 u32 jited_len;
ab3f0063
JK
680};
681
8bad74f9
RG
682enum bpf_cgroup_storage_type {
683 BPF_CGROUP_STORAGE_SHARED,
b741f163 684 BPF_CGROUP_STORAGE_PERCPU,
8bad74f9
RG
685 __BPF_CGROUP_STORAGE_MAX
686};
687
688#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
689
f1b9509c
AS
690/* The longest tracepoint has 12 args.
691 * See include/trace/bpf_probe.h
692 */
693#define MAX_BPF_FUNC_ARGS 12
694
523a4cf4
DB
695/* The maximum number of arguments passed through registers
696 * a single function may have.
697 */
698#define MAX_BPF_FUNC_REG_ARGS 5
699
fec56f58
AS
700struct btf_func_model {
701 u8 ret_size;
702 u8 nr_args;
703 u8 arg_size[MAX_BPF_FUNC_ARGS];
704};
705
706/* Restore arguments before returning from trampoline to let original function
707 * continue executing. This flag is used for fentry progs when there are no
708 * fexit progs.
709 */
710#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
711/* Call original function after fentry progs, but before fexit progs.
712 * Makes sense for fentry/fexit, normal calls and indirect calls.
713 */
714#define BPF_TRAMP_F_CALL_ORIG BIT(1)
715/* Skip current frame and return to parent. Makes sense for fentry/fexit
716 * programs only. Should not be used with normal calls and indirect calls.
717 */
718#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
7e6f3cd8
JO
719/* Store IP address of the caller on the trampoline stack,
720 * so it's available for trampoline's programs.
721 */
722#define BPF_TRAMP_F_IP_ARG BIT(3)
356ed649
HT
723/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
724#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
7e6f3cd8 725
88fd9e53
KS
726/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
727 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
728 */
f7e0beaf 729#define BPF_MAX_TRAMP_LINKS 38
88fd9e53 730
f7e0beaf
KFL
731struct bpf_tramp_links {
732 struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
733 int nr_links;
88fd9e53
KS
734};
735
e384c7b7
KFL
736struct bpf_tramp_run_ctx;
737
fec56f58
AS
738/* Different use cases for BPF trampoline:
739 * 1. replace nop at the function entry (kprobe equivalent)
740 * flags = BPF_TRAMP_F_RESTORE_REGS
741 * fentry = a set of programs to run before returning from trampoline
742 *
743 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
744 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
745 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
746 * fentry = a set of program to run before calling original function
747 * fexit = a set of program to run after original function
748 *
749 * 3. replace direct call instruction anywhere in the function body
750 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
751 * With flags = 0
752 * fentry = a set of programs to run before returning from trampoline
753 * With flags = BPF_TRAMP_F_CALL_ORIG
754 * orig_call = original callback addr or direct function addr
755 * fentry = a set of program to run before calling original function
756 * fexit = a set of program to run after original function
757 */
e21aa341
AS
758struct bpf_tramp_image;
759int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
85d33df3 760 const struct btf_func_model *m, u32 flags,
f7e0beaf 761 struct bpf_tramp_links *tlinks,
fec56f58
AS
762 void *orig_call);
763/* these two functions are called from generated trampoline */
e384c7b7
KFL
764u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
765void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
766u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
767void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
768 struct bpf_tramp_run_ctx *run_ctx);
e21aa341
AS
769void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
770void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
fec56f58 771
535911c8
JO
772struct bpf_ksym {
773 unsigned long start;
774 unsigned long end;
bfea9a85 775 char name[KSYM_NAME_LEN];
ecb60d1c 776 struct list_head lnode;
ca4424c9 777 struct latch_tree_node tnode;
cbd76f8d 778 bool prog;
535911c8
JO
779};
780
fec56f58
AS
781enum bpf_tramp_prog_type {
782 BPF_TRAMP_FENTRY,
783 BPF_TRAMP_FEXIT,
ae240823 784 BPF_TRAMP_MODIFY_RETURN,
be8704ff
AS
785 BPF_TRAMP_MAX,
786 BPF_TRAMP_REPLACE, /* more than MAX */
fec56f58
AS
787};
788
e21aa341
AS
789struct bpf_tramp_image {
790 void *image;
791 struct bpf_ksym ksym;
792 struct percpu_ref pcref;
793 void *ip_after_call;
794 void *ip_epilogue;
795 union {
796 struct rcu_head rcu;
797 struct work_struct work;
798 };
799};
800
fec56f58
AS
801struct bpf_trampoline {
802 /* hlist for trampoline_table */
803 struct hlist_node hlist;
804 /* serializes access to fields of this trampoline */
805 struct mutex mutex;
806 refcount_t refcnt;
807 u64 key;
808 struct {
809 struct btf_func_model model;
810 void *addr;
b91e014f 811 bool ftrace_managed;
fec56f58 812 } func;
be8704ff
AS
813 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
814 * program by replacing one of its functions. func.addr is the address
815 * of the function it replaced.
816 */
817 struct bpf_prog *extension_prog;
fec56f58
AS
818 /* list of BPF programs using this trampoline */
819 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
820 /* Number of attached programs. A counter per kind. */
821 int progs_cnt[BPF_TRAMP_MAX];
822 /* Executable image of trampoline */
e21aa341 823 struct bpf_tramp_image *cur_image;
fec56f58 824 u64 selector;
861de02e 825 struct module *mod;
fec56f58 826};
75ccbef6 827
f7b12b6f
THJ
828struct bpf_attach_target_info {
829 struct btf_func_model fmodel;
830 long tgt_addr;
831 const char *tgt_name;
832 const struct btf_type *tgt_type;
833};
834
116eb788 835#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
75ccbef6
BT
836
837struct bpf_dispatcher_prog {
838 struct bpf_prog *prog;
839 refcount_t users;
840};
841
842struct bpf_dispatcher {
843 /* dispatcher mutex */
844 struct mutex mutex;
845 void *func;
846 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
847 int num_progs;
848 void *image;
849 u32 image_off;
517b75e4 850 struct bpf_ksym ksym;
75ccbef6
BT
851};
852
9f5b4009 853static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
7e6897f9
BT
854 const void *ctx,
855 const struct bpf_insn *insnsi,
856 unsigned int (*bpf_func)(const void *,
857 const struct bpf_insn *))
858{
859 return bpf_func(ctx, insnsi);
860}
f7e0beaf 861
fec56f58 862#ifdef CONFIG_BPF_JIT
f7e0beaf
KFL
863int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
864int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
f7b12b6f
THJ
865struct bpf_trampoline *bpf_trampoline_get(u64 key,
866 struct bpf_attach_target_info *tgt_info);
fec56f58 867void bpf_trampoline_put(struct bpf_trampoline *tr);
f45b2974 868int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
517b75e4
JO
869#define BPF_DISPATCHER_INIT(_name) { \
870 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
871 .func = &_name##_func, \
872 .progs = {}, \
873 .num_progs = 0, \
874 .image = NULL, \
875 .image_off = 0, \
876 .ksym = { \
877 .name = #_name, \
878 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
879 }, \
75ccbef6
BT
880}
881
882#define DEFINE_BPF_DISPATCHER(name) \
9f5b4009 883 noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
884 const void *ctx, \
885 const struct bpf_insn *insnsi, \
886 unsigned int (*bpf_func)(const void *, \
887 const struct bpf_insn *)) \
888 { \
889 return bpf_func(ctx, insnsi); \
890 } \
6a64037d
BT
891 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
892 struct bpf_dispatcher bpf_dispatcher_##name = \
893 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
75ccbef6 894#define DECLARE_BPF_DISPATCHER(name) \
6a64037d 895 unsigned int bpf_dispatcher_##name##_func( \
75ccbef6
BT
896 const void *ctx, \
897 const struct bpf_insn *insnsi, \
898 unsigned int (*bpf_func)(const void *, \
899 const struct bpf_insn *)); \
6a64037d
BT
900 extern struct bpf_dispatcher bpf_dispatcher_##name;
901#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
902#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
75ccbef6
BT
903void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
904 struct bpf_prog *to);
dba122fb 905/* Called only from JIT-enabled code, so there's no need for stubs. */
7ac88eba 906void *bpf_jit_alloc_exec_page(void);
a108f7dc
JO
907void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
908void bpf_image_ksym_del(struct bpf_ksym *ksym);
dba122fb
JO
909void bpf_ksym_add(struct bpf_ksym *ksym);
910void bpf_ksym_del(struct bpf_ksym *ksym);
3486bedd
SL
911int bpf_jit_charge_modmem(u32 size);
912void bpf_jit_uncharge_modmem(u32 size);
f92c1e18 913bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
fec56f58 914#else
f7e0beaf 915static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
3aac1ead 916 struct bpf_trampoline *tr)
fec56f58
AS
917{
918 return -ENOTSUPP;
919}
f7e0beaf 920static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
3aac1ead 921 struct bpf_trampoline *tr)
fec56f58
AS
922{
923 return -ENOTSUPP;
924}
f7b12b6f
THJ
925static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
926 struct bpf_attach_target_info *tgt_info)
927{
928 return ERR_PTR(-EOPNOTSUPP);
929}
fec56f58 930static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
75ccbef6
BT
931#define DEFINE_BPF_DISPATCHER(name)
932#define DECLARE_BPF_DISPATCHER(name)
6a64037d 933#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
75ccbef6
BT
934#define BPF_DISPATCHER_PTR(name) NULL
935static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
936 struct bpf_prog *from,
937 struct bpf_prog *to) {}
e9b4e606
JO
938static inline bool is_bpf_image_address(unsigned long address)
939{
940 return false;
941}
f92c1e18
JO
942static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
943{
944 return false;
945}
fec56f58
AS
946#endif
947
8c1b6e69 948struct bpf_func_info_aux {
51c39bb1 949 u16 linkage;
8c1b6e69
AS
950 bool unreliable;
951};
952
a66886fe
DB
953enum bpf_jit_poke_reason {
954 BPF_POKE_REASON_TAIL_CALL,
955};
956
957/* Descriptor of pokes pointing /into/ the JITed image. */
958struct bpf_jit_poke_descriptor {
cf71b174 959 void *tailcall_target;
ebf7d1f5
MF
960 void *tailcall_bypass;
961 void *bypass_addr;
f263a814 962 void *aux;
a66886fe
DB
963 union {
964 struct {
965 struct bpf_map *map;
966 u32 key;
967 } tail_call;
968 };
cf71b174 969 bool tailcall_target_stable;
a66886fe
DB
970 u8 adj_off;
971 u16 reason;
a748c697 972 u32 insn_idx;
a66886fe
DB
973};
974
3c32cc1b
YS
975/* reg_type info for ctx arguments */
976struct bpf_ctx_arg_aux {
977 u32 offset;
978 enum bpf_reg_type reg_type;
951cf368 979 u32 btf_id;
3c32cc1b
YS
980};
981
541c3bad
AN
982struct btf_mod_pair {
983 struct btf *btf;
984 struct module *module;
985};
986
e6ac2450
MKL
987struct bpf_kfunc_desc_tab;
988
09756af4 989struct bpf_prog_aux {
85192dbf 990 atomic64_t refcnt;
24701ece 991 u32 used_map_cnt;
541c3bad 992 u32 used_btf_cnt;
32bbe007 993 u32 max_ctx_offset;
e647815a 994 u32 max_pkt_offset;
9df1c28b 995 u32 max_tp_access;
8726679a 996 u32 stack_depth;
dc4bb0e2 997 u32 id;
ba64e7d8
YS
998 u32 func_cnt; /* used by non-func prog as the number of func progs */
999 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
ccfe29eb 1000 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
3c32cc1b 1001 u32 ctx_arg_info_size;
afbf21dc
YS
1002 u32 max_rdonly_access;
1003 u32 max_rdwr_access;
22dc4a0f 1004 struct btf *attach_btf;
3c32cc1b 1005 const struct bpf_ctx_arg_aux *ctx_arg_info;
3aac1ead
THJ
1006 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1007 struct bpf_prog *dst_prog;
1008 struct bpf_trampoline *dst_trampoline;
4a1e7c0c
THJ
1009 enum bpf_prog_type saved_dst_prog_type;
1010 enum bpf_attach_type saved_dst_attach_type;
a4b1d3c1 1011 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
9a18eedb 1012 bool offload_requested;
38207291 1013 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
8c1b6e69 1014 bool func_proto_unreliable;
1e6c62a8 1015 bool sleepable;
ebf7d1f5 1016 bool tail_call_reachable;
c2f2cdbe 1017 bool xdp_has_frags;
33c98058 1018 bool use_bpf_prog_pack;
38207291
MKL
1019 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1020 const struct btf_type *attach_func_proto;
1021 /* function name for valid attach_btf_id */
1022 const char *attach_func_name;
1c2a088a
AS
1023 struct bpf_prog **func;
1024 void *jit_data; /* JIT specific data. arch dependent */
a66886fe 1025 struct bpf_jit_poke_descriptor *poke_tab;
e6ac2450 1026 struct bpf_kfunc_desc_tab *kfunc_tab;
2357672c 1027 struct bpf_kfunc_btf_tab *kfunc_btf_tab;
a66886fe 1028 u32 size_poke_tab;
535911c8 1029 struct bpf_ksym ksym;
7de16e3a 1030 const struct bpf_prog_ops *ops;
09756af4 1031 struct bpf_map **used_maps;
984fe94f 1032 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
541c3bad 1033 struct btf_mod_pair *used_btfs;
09756af4 1034 struct bpf_prog *prog;
aaac3ba9 1035 struct user_struct *user;
cb4d2b3f 1036 u64 load_time; /* ns since boottime */
aba64c7d 1037 u32 verified_insns;
8bad74f9 1038 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
067cae47 1039 char name[BPF_OBJ_NAME_LEN];
afdb09c7
CF
1040#ifdef CONFIG_SECURITY
1041 void *security;
1042#endif
0a9c1991 1043 struct bpf_prog_offload *offload;
838e9690 1044 struct btf *btf;
ba64e7d8 1045 struct bpf_func_info *func_info;
8c1b6e69 1046 struct bpf_func_info_aux *func_info_aux;
c454a46b
MKL
1047 /* bpf_line_info loaded from userspace. linfo->insn_off
1048 * has the xlated insn offset.
1049 * Both the main and sub prog share the same linfo.
1050 * The subprog can access its first linfo by
1051 * using the linfo_idx.
1052 */
1053 struct bpf_line_info *linfo;
1054 /* jited_linfo is the jited addr of the linfo. It has a
1055 * one to one mapping to linfo:
1056 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1057 * Both the main and sub prog share the same jited_linfo.
1058 * The subprog can access its first jited_linfo by
1059 * using the linfo_idx.
1060 */
1061 void **jited_linfo;
ba64e7d8 1062 u32 func_info_cnt;
c454a46b
MKL
1063 u32 nr_linfo;
1064 /* subprog can use linfo_idx to access its first linfo and
1065 * jited_linfo.
1066 * main prog always has linfo_idx == 0
1067 */
1068 u32 linfo_idx;
3dec541b
AS
1069 u32 num_exentries;
1070 struct exception_table_entry *extable;
abf2e7d6
AS
1071 union {
1072 struct work_struct work;
1073 struct rcu_head rcu;
1074 };
09756af4
AS
1075};
1076
2beee5f5 1077struct bpf_array_aux {
da765a2f
DB
1078 /* Programs with direct jumps into programs part of this array. */
1079 struct list_head poke_progs;
1080 struct bpf_map *map;
1081 struct mutex poke_mutex;
1082 struct work_struct work;
2beee5f5
DB
1083};
1084
6cc7d1e8
AN
1085struct bpf_link {
1086 atomic64_t refcnt;
1087 u32 id;
1088 enum bpf_link_type type;
1089 const struct bpf_link_ops *ops;
1090 struct bpf_prog *prog;
1091 struct work_struct work;
1092};
1093
1094struct bpf_link_ops {
1095 void (*release)(struct bpf_link *link);
1096 void (*dealloc)(struct bpf_link *link);
73b11c2a 1097 int (*detach)(struct bpf_link *link);
6cc7d1e8
AN
1098 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1099 struct bpf_prog *old_prog);
1100 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1101 int (*fill_link_info)(const struct bpf_link *link,
1102 struct bpf_link_info *info);
1103};
1104
f7e0beaf
KFL
1105struct bpf_tramp_link {
1106 struct bpf_link link;
1107 struct hlist_node tramp_hlist;
2fcc8241 1108 u64 cookie;
f7e0beaf
KFL
1109};
1110
1111struct bpf_tracing_link {
1112 struct bpf_tramp_link link;
1113 enum bpf_attach_type attach_type;
1114 struct bpf_trampoline *trampoline;
1115 struct bpf_prog *tgt_prog;
1116};
1117
6cc7d1e8
AN
1118struct bpf_link_primer {
1119 struct bpf_link *link;
1120 struct file *file;
1121 int fd;
1122 u32 id;
1123};
1124
85d33df3 1125struct bpf_struct_ops_value;
27ae7997
MKL
1126struct btf_member;
1127
1128#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1129struct bpf_struct_ops {
1130 const struct bpf_verifier_ops *verifier_ops;
1131 int (*init)(struct btf *btf);
1132 int (*check_member)(const struct btf_type *t,
1133 const struct btf_member *member);
85d33df3
MKL
1134 int (*init_member)(const struct btf_type *t,
1135 const struct btf_member *member,
1136 void *kdata, const void *udata);
1137 int (*reg)(void *kdata);
1138 void (*unreg)(void *kdata);
27ae7997 1139 const struct btf_type *type;
85d33df3 1140 const struct btf_type *value_type;
27ae7997
MKL
1141 const char *name;
1142 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1143 u32 type_id;
85d33df3 1144 u32 value_id;
27ae7997
MKL
1145};
1146
1147#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
85d33df3 1148#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
27ae7997 1149const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
d3e42bb0 1150void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
85d33df3
MKL
1151bool bpf_struct_ops_get(const void *kdata);
1152void bpf_struct_ops_put(const void *kdata);
1153int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1154 void *value);
f7e0beaf
KFL
1155int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1156 struct bpf_tramp_link *link,
31a645ae
HT
1157 const struct btf_func_model *model,
1158 void *image, void *image_end);
85d33df3
MKL
1159static inline bool bpf_try_module_get(const void *data, struct module *owner)
1160{
1161 if (owner == BPF_MODULE_OWNER)
1162 return bpf_struct_ops_get(data);
1163 else
1164 return try_module_get(owner);
1165}
1166static inline void bpf_module_put(const void *data, struct module *owner)
1167{
1168 if (owner == BPF_MODULE_OWNER)
1169 bpf_struct_ops_put(data);
1170 else
1171 module_put(owner);
1172}
c196906d
HT
1173
1174#ifdef CONFIG_NET
1175/* Define it here to avoid the use of forward declaration */
1176struct bpf_dummy_ops_state {
1177 int val;
1178};
1179
1180struct bpf_dummy_ops {
1181 int (*test_1)(struct bpf_dummy_ops_state *cb);
1182 int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1183 char a3, unsigned long a4);
1184};
1185
1186int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1187 union bpf_attr __user *uattr);
1188#endif
27ae7997
MKL
1189#else
1190static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1191{
1192 return NULL;
1193}
d3e42bb0
MKL
1194static inline void bpf_struct_ops_init(struct btf *btf,
1195 struct bpf_verifier_log *log)
1196{
1197}
85d33df3
MKL
1198static inline bool bpf_try_module_get(const void *data, struct module *owner)
1199{
1200 return try_module_get(owner);
1201}
1202static inline void bpf_module_put(const void *data, struct module *owner)
1203{
1204 module_put(owner);
1205}
1206static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1207 void *key,
1208 void *value)
1209{
1210 return -EINVAL;
1211}
27ae7997
MKL
1212#endif
1213
04fd61ab
AS
1214struct bpf_array {
1215 struct bpf_map map;
1216 u32 elem_size;
b2157399 1217 u32 index_mask;
2beee5f5 1218 struct bpf_array_aux *aux;
04fd61ab
AS
1219 union {
1220 char value[0] __aligned(8);
2a36f0b9 1221 void *ptrs[0] __aligned(8);
a10423b8 1222 void __percpu *pptrs[0] __aligned(8);
04fd61ab
AS
1223 };
1224};
3b1efb19 1225
c04c0d2b 1226#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
ebf7f6f0 1227#define MAX_TAIL_CALL_CNT 33
04fd61ab 1228
591fe988
DB
1229#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
1230 BPF_F_RDONLY_PROG | \
1231 BPF_F_WRONLY | \
1232 BPF_F_WRONLY_PROG)
1233
1234#define BPF_MAP_CAN_READ BIT(0)
1235#define BPF_MAP_CAN_WRITE BIT(1)
1236
1237static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1238{
1239 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1240
1241 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1242 * not possible.
1243 */
1244 if (access_flags & BPF_F_RDONLY_PROG)
1245 return BPF_MAP_CAN_READ;
1246 else if (access_flags & BPF_F_WRONLY_PROG)
1247 return BPF_MAP_CAN_WRITE;
1248 else
1249 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1250}
1251
1252static inline bool bpf_map_flags_access_ok(u32 access_flags)
1253{
1254 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1255 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1256}
1257
3b1efb19
DB
1258struct bpf_event_entry {
1259 struct perf_event *event;
1260 struct file *perf_file;
1261 struct file *map_file;
1262 struct rcu_head rcu;
1263};
1264
f45d5b6c
THJ
1265static inline bool map_type_contains_progs(struct bpf_map *map)
1266{
1267 return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1268 map->map_type == BPF_MAP_TYPE_DEVMAP ||
1269 map->map_type == BPF_MAP_TYPE_CPUMAP;
1270}
1271
1272bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
f1f7714e 1273int bpf_prog_calc_tag(struct bpf_prog *fp);
bd570ff9 1274
0756ea3e 1275const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
10aceb62 1276const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
555c8a86
DB
1277
1278typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
aa7145c1 1279 unsigned long off, unsigned long len);
c64b7983
JS
1280typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1281 const struct bpf_insn *src,
1282 struct bpf_insn *dst,
1283 struct bpf_prog *prog,
1284 u32 *target_size);
555c8a86
DB
1285
1286u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1287 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
04fd61ab 1288
324bda9e
AS
1289/* an array of programs to be executed under rcu_lock.
1290 *
1291 * Typical usage:
055eb955 1292 * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
324bda9e
AS
1293 *
1294 * the structure returned by bpf_prog_array_alloc() should be populated
1295 * with program pointers and the last pointer must be NULL.
1296 * The user has to keep refcnt on the program and make sure the program
1297 * is removed from the array before bpf_prog_put().
1298 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1299 * since other cpus are walking the array of pointers in parallel.
1300 */
394e40a2
RG
1301struct bpf_prog_array_item {
1302 struct bpf_prog *prog;
82e6b1ee
AN
1303 union {
1304 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1305 u64 bpf_cookie;
1306 };
394e40a2
RG
1307};
1308
324bda9e
AS
1309struct bpf_prog_array {
1310 struct rcu_head rcu;
d7f10df8 1311 struct bpf_prog_array_item items[];
324bda9e
AS
1312};
1313
46531a30
PB
1314struct bpf_empty_prog_array {
1315 struct bpf_prog_array hdr;
1316 struct bpf_prog *null_prog;
1317};
1318
1319/* to avoid allocating empty bpf_prog_array for cgroups that
1320 * don't have bpf program attached use one global 'bpf_empty_prog_array'
1321 * It will not be modified the caller of bpf_prog_array_alloc()
1322 * (since caller requested prog_cnt == 0)
1323 * that pointer should be 'freed' by bpf_prog_array_free()
1324 */
1325extern struct bpf_empty_prog_array bpf_empty_prog_array;
1326
d29ab6e1 1327struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
54e9c9d4
SF
1328void bpf_prog_array_free(struct bpf_prog_array *progs);
1329int bpf_prog_array_length(struct bpf_prog_array *progs);
0d01da6a 1330bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
54e9c9d4 1331int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
468e2f64 1332 __u32 __user *prog_ids, u32 cnt);
324bda9e 1333
54e9c9d4 1334void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
e87c6bc3 1335 struct bpf_prog *old_prog);
ce3aa9cc
JS
1336int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1337int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1338 struct bpf_prog *prog);
54e9c9d4 1339int bpf_prog_array_copy_info(struct bpf_prog_array *array,
3a38bb98
YS
1340 u32 *prog_ids, u32 request_cnt,
1341 u32 *prog_cnt);
54e9c9d4 1342int bpf_prog_array_copy(struct bpf_prog_array *old_array,
e87c6bc3
YS
1343 struct bpf_prog *exclude_prog,
1344 struct bpf_prog *include_prog,
82e6b1ee 1345 u64 bpf_cookie,
e87c6bc3
YS
1346 struct bpf_prog_array **new_array);
1347
c7603cfa
AN
1348struct bpf_run_ctx {};
1349
1350struct bpf_cg_run_ctx {
1351 struct bpf_run_ctx run_ctx;
7d08c2c9 1352 const struct bpf_prog_array_item *prog_item;
c4dcfdd4 1353 int retval;
c7603cfa
AN
1354};
1355
82e6b1ee
AN
1356struct bpf_trace_run_ctx {
1357 struct bpf_run_ctx run_ctx;
1358 u64 bpf_cookie;
1359};
1360
e384c7b7
KFL
1361struct bpf_tramp_run_ctx {
1362 struct bpf_run_ctx run_ctx;
1363 u64 bpf_cookie;
1364 struct bpf_run_ctx *saved_run_ctx;
1365};
1366
7d08c2c9
AN
1367static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1368{
1369 struct bpf_run_ctx *old_ctx = NULL;
1370
1371#ifdef CONFIG_BPF_SYSCALL
1372 old_ctx = current->bpf_ctx;
1373 current->bpf_ctx = new_ctx;
1374#endif
1375 return old_ctx;
1376}
1377
1378static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1379{
1380#ifdef CONFIG_BPF_SYSCALL
1381 current->bpf_ctx = old_ctx;
1382#endif
1383}
1384
77241217
SF
1385/* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1386#define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
1387/* BPF program asks to set CN on the packet. */
1388#define BPF_RET_SET_CN (1 << 0)
1389
7d08c2c9
AN
1390typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1391
7d08c2c9 1392static __always_inline u32
055eb955 1393bpf_prog_run_array(const struct bpf_prog_array *array,
7d08c2c9
AN
1394 const void *ctx, bpf_prog_run_fn run_prog)
1395{
1396 const struct bpf_prog_array_item *item;
1397 const struct bpf_prog *prog;
82e6b1ee
AN
1398 struct bpf_run_ctx *old_run_ctx;
1399 struct bpf_trace_run_ctx run_ctx;
7d08c2c9
AN
1400 u32 ret = 1;
1401
055eb955
SF
1402 RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
1403
7d08c2c9 1404 if (unlikely(!array))
055eb955
SF
1405 return ret;
1406
1407 migrate_disable();
82e6b1ee 1408 old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
7d08c2c9
AN
1409 item = &array->items[0];
1410 while ((prog = READ_ONCE(item->prog))) {
82e6b1ee 1411 run_ctx.bpf_cookie = item->bpf_cookie;
7d08c2c9
AN
1412 ret &= run_prog(prog, ctx);
1413 item++;
1414 }
82e6b1ee 1415 bpf_reset_run_ctx(old_run_ctx);
7d08c2c9
AN
1416 migrate_enable();
1417 return ret;
1418}
324bda9e 1419
89aa0758 1420#ifdef CONFIG_BPF_SYSCALL
b121d1e7 1421DECLARE_PER_CPU(int, bpf_prog_active);
d46edd67 1422extern struct mutex bpf_stats_enabled_mutex;
b121d1e7 1423
c518cfa0
TG
1424/*
1425 * Block execution of BPF programs attached to instrumentation (perf,
1426 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1427 * these events can happen inside a region which holds a map bucket lock
1428 * and can deadlock on it.
c518cfa0
TG
1429 */
1430static inline void bpf_disable_instrumentation(void)
1431{
1432 migrate_disable();
79364031 1433 this_cpu_inc(bpf_prog_active);
c518cfa0
TG
1434}
1435
1436static inline void bpf_enable_instrumentation(void)
1437{
79364031 1438 this_cpu_dec(bpf_prog_active);
c518cfa0
TG
1439 migrate_enable();
1440}
1441
f66e448c
CF
1442extern const struct file_operations bpf_map_fops;
1443extern const struct file_operations bpf_prog_fops;
367ec3e4 1444extern const struct file_operations bpf_iter_fops;
f66e448c 1445
91cc1a99 1446#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
7de16e3a
JK
1447 extern const struct bpf_prog_ops _name ## _prog_ops; \
1448 extern const struct bpf_verifier_ops _name ## _verifier_ops;
40077e0c
JB
1449#define BPF_MAP_TYPE(_id, _ops) \
1450 extern const struct bpf_map_ops _ops;
f2e10bff 1451#define BPF_LINK_TYPE(_id, _name)
be9370a7
JB
1452#include <linux/bpf_types.h>
1453#undef BPF_PROG_TYPE
40077e0c 1454#undef BPF_MAP_TYPE
f2e10bff 1455#undef BPF_LINK_TYPE
0fc174de 1456
ab3f0063 1457extern const struct bpf_prog_ops bpf_offload_prog_ops;
4f9218aa
JK
1458extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1459extern const struct bpf_verifier_ops xdp_analyzer_ops;
1460
0fc174de 1461struct bpf_prog *bpf_prog_get(u32 ufd);
248f346f 1462struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
288b3de5 1463 bool attach_drv);
85192dbf 1464void bpf_prog_add(struct bpf_prog *prog, int i);
c540594f 1465void bpf_prog_sub(struct bpf_prog *prog, int i);
85192dbf 1466void bpf_prog_inc(struct bpf_prog *prog);
a6f6df69 1467struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
61e021f3
DB
1468void bpf_prog_put(struct bpf_prog *prog);
1469
ad8ad79f 1470void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
a3884572 1471void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
ad8ad79f 1472
61df10c7
KKD
1473struct bpf_map_value_off_desc *bpf_map_kptr_off_contains(struct bpf_map *map, u32 offset);
1474void bpf_map_free_kptr_off_tab(struct bpf_map *map);
1475struct bpf_map_value_off *bpf_map_copy_kptr_off_tab(const struct bpf_map *map);
1476bool bpf_map_equal_kptr_off_tab(const struct bpf_map *map_a, const struct bpf_map *map_b);
14a324f6 1477void bpf_map_free_kptrs(struct bpf_map *map, void *map_value);
61df10c7 1478
1ed4d924 1479struct bpf_map *bpf_map_get(u32 ufd);
c9da161c 1480struct bpf_map *bpf_map_get_with_uref(u32 ufd);
c2101297 1481struct bpf_map *__bpf_map_get(struct fd f);
1e0bd5a0
AN
1482void bpf_map_inc(struct bpf_map *map);
1483void bpf_map_inc_with_uref(struct bpf_map *map);
1484struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
c9da161c 1485void bpf_map_put_with_uref(struct bpf_map *map);
61e021f3 1486void bpf_map_put(struct bpf_map *map);
196e8ca7
DB
1487void *bpf_map_area_alloc(u64 size, int numa_node);
1488void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
d407bd25 1489void bpf_map_area_free(void *base);
353050be 1490bool bpf_map_write_active(const struct bpf_map *map);
bd475643 1491void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
cb4d03ab
BV
1492int generic_map_lookup_batch(struct bpf_map *map,
1493 const union bpf_attr *attr,
aa2e93b8
BV
1494 union bpf_attr __user *uattr);
1495int generic_map_update_batch(struct bpf_map *map,
1496 const union bpf_attr *attr,
1497 union bpf_attr __user *uattr);
1498int generic_map_delete_batch(struct bpf_map *map,
1499 const union bpf_attr *attr,
cb4d03ab 1500 union bpf_attr __user *uattr);
6086d29d 1501struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
a228a64f 1502struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
61e021f3 1503
48edc1f7
RG
1504#ifdef CONFIG_MEMCG_KMEM
1505void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1506 int node);
1507void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1508void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1509 size_t align, gfp_t flags);
1510#else
1511static inline void *
1512bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1513 int node)
1514{
1515 return kmalloc_node(size, flags, node);
1516}
1517
1518static inline void *
1519bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1520{
1521 return kzalloc(size, flags);
1522}
1523
1524static inline void __percpu *
1525bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1526 gfp_t flags)
1527{
1528 return __alloc_percpu_gfp(size, align, flags);
1529}
1530#endif
1531
1be7f75d
AS
1532extern int sysctl_unprivileged_bpf_disabled;
1533
2c78ee89
AS
1534static inline bool bpf_allow_ptr_leaks(void)
1535{
1536 return perfmon_capable();
1537}
1538
01f810ac
AM
1539static inline bool bpf_allow_uninit_stack(void)
1540{
1541 return perfmon_capable();
1542}
1543
41c48f3a
AI
1544static inline bool bpf_allow_ptr_to_map_access(void)
1545{
1546 return perfmon_capable();
1547}
1548
2c78ee89
AS
1549static inline bool bpf_bypass_spec_v1(void)
1550{
1551 return perfmon_capable();
1552}
1553
1554static inline bool bpf_bypass_spec_v4(void)
1555{
1556 return perfmon_capable();
1557}
1558
6e71b04a 1559int bpf_map_new_fd(struct bpf_map *map, int flags);
b2197755
DB
1560int bpf_prog_new_fd(struct bpf_prog *prog);
1561
f2e10bff 1562void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
a3b80e10
AN
1563 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1564int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1565int bpf_link_settle(struct bpf_link_primer *primer);
1566void bpf_link_cleanup(struct bpf_link_primer *primer);
70ed506c
AN
1567void bpf_link_inc(struct bpf_link *link);
1568void bpf_link_put(struct bpf_link *link);
1569int bpf_link_new_fd(struct bpf_link *link);
babf3164 1570struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
70ed506c 1571struct bpf_link *bpf_link_get_from_fd(u32 ufd);
9f883612 1572struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
70ed506c 1573
b2197755 1574int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
6e71b04a 1575int bpf_obj_get_user(const char __user *pathname, int flags);
b2197755 1576
21aef70e 1577#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
e5158d98 1578#define DEFINE_BPF_ITER_FUNC(target, args...) \
21aef70e
YS
1579 extern int bpf_iter_ ## target(args); \
1580 int __init bpf_iter_ ## target(args) { return 0; }
15d83c4d 1581
f9c79272 1582struct bpf_iter_aux_info {
a5cbe05a 1583 struct bpf_map *map;
f9c79272
YS
1584};
1585
5e7b3020
YS
1586typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1587 union bpf_iter_link_info *linfo,
1588 struct bpf_iter_aux_info *aux);
1589typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
6b0a249a
YS
1590typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1591 struct seq_file *seq);
1592typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1593 struct bpf_link_info *info);
3cee6fb8
MKL
1594typedef const struct bpf_func_proto *
1595(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1596 const struct bpf_prog *prog);
a5cbe05a 1597
cf83b2d2
YS
1598enum bpf_iter_feature {
1599 BPF_ITER_RESCHED = BIT(0),
1600};
1601
3c32cc1b 1602#define BPF_ITER_CTX_ARG_MAX 2
ae24345d
YS
1603struct bpf_iter_reg {
1604 const char *target;
5e7b3020
YS
1605 bpf_iter_attach_target_t attach_target;
1606 bpf_iter_detach_target_t detach_target;
6b0a249a
YS
1607 bpf_iter_show_fdinfo_t show_fdinfo;
1608 bpf_iter_fill_link_info_t fill_link_info;
3cee6fb8 1609 bpf_iter_get_func_proto_t get_func_proto;
3c32cc1b 1610 u32 ctx_arg_info_size;
cf83b2d2 1611 u32 feature;
3c32cc1b 1612 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
14fc6bd6 1613 const struct bpf_iter_seq_info *seq_info;
ae24345d
YS
1614};
1615
e5158d98
YS
1616struct bpf_iter_meta {
1617 __bpf_md_ptr(struct seq_file *, seq);
1618 u64 session_id;
1619 u64 seq_num;
1620};
1621
a5cbe05a
YS
1622struct bpf_iter__bpf_map_elem {
1623 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1624 __bpf_md_ptr(struct bpf_map *, map);
1625 __bpf_md_ptr(void *, key);
1626 __bpf_md_ptr(void *, value);
1627};
1628
15172a46 1629int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
ab2ee4fc 1630void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
15d83c4d 1631bool bpf_iter_prog_supported(struct bpf_prog *prog);
3cee6fb8
MKL
1632const struct bpf_func_proto *
1633bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
af2ac3e1 1634int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
ac51d99b 1635int bpf_iter_new_fd(struct bpf_link *link);
367ec3e4 1636bool bpf_link_is_iter(struct bpf_link *link);
e5158d98
YS
1637struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1638int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
b76f2226
YS
1639void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1640 struct seq_file *seq);
1641int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1642 struct bpf_link_info *info);
ae24345d 1643
314ee05e
YS
1644int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1645 struct bpf_func_state *caller,
1646 struct bpf_func_state *callee);
1647
15a07b33
AS
1648int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1649int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1650int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1651 u64 flags);
1652int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1653 u64 flags);
d056a788 1654
557c0c6e 1655int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
15a07b33 1656
d056a788
DB
1657int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1658 void *key, void *value, u64 map_flags);
14dc6f04 1659int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
bcc6b1b7
MKL
1660int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1661 void *key, void *value, u64 map_flags);
14dc6f04 1662int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
d056a788 1663
6e71b04a 1664int bpf_get_file_flag(int flags);
af2ac3e1 1665int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
dcab51f1 1666 size_t actual_size);
6e71b04a 1667
15a07b33
AS
1668/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1669 * forced to use 'long' read/writes to try to atomically copy long counters.
1670 * Best-effort only. No barriers here, since it _will_ race with concurrent
1671 * updates from BPF programs. Called from bpf syscall and mostly used with
1672 * size 8 or 16 bytes, so ask compiler to inline it.
1673 */
1674static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1675{
1676 const long *lsrc = src;
1677 long *ldst = dst;
1678
1679 size /= sizeof(long);
1680 while (size--)
1681 *ldst++ = *lsrc++;
1682}
1683
61e021f3 1684/* verify correctness of eBPF program */
af2ac3e1 1685int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
a643bff7
AN
1686
1687#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01 1688void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
a643bff7 1689#endif
46f55cff 1690
76654e67
AM
1691struct btf *bpf_get_btf_vmlinux(void);
1692
46f55cff 1693/* Map specifics */
d53ad5d8 1694struct xdp_frame;
6d5fc195 1695struct sk_buff;
e6a4750f
BT
1696struct bpf_dtab_netdev;
1697struct bpf_cpu_map_entry;
67f29e07 1698
1d233886 1699void __dev_flush(void);
d53ad5d8 1700int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886 1701 struct net_device *dev_rx);
d53ad5d8 1702int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1703 struct net_device *dev_rx);
d53ad5d8 1704int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed 1705 struct bpf_map *map, bool exclude_ingress);
6d5fc195
TM
1706int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1707 struct bpf_prog *xdp_prog);
e624d4ed
HL
1708int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1709 struct bpf_prog *xdp_prog, struct bpf_map *map,
1710 bool exclude_ingress);
46f55cff 1711
cdfafe98 1712void __cpu_map_flush(void);
d53ad5d8 1713int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
9c270af3 1714 struct net_device *dev_rx);
11941f8a
KKD
1715int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1716 struct sk_buff *skb);
9c270af3 1717
96eabe7a
MKL
1718/* Return map's numa specified by userspace */
1719static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1720{
1721 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1722 attr->numa_node : NUMA_NO_NODE;
1723}
1724
040ee692 1725struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
5dc4c4b7 1726int array_map_alloc_check(union bpf_attr *attr);
040ee692 1727
c695865c
SF
1728int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1729 union bpf_attr __user *uattr);
1730int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1731 union bpf_attr __user *uattr);
da00d2f1
KS
1732int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1733 const union bpf_attr *kattr,
1734 union bpf_attr __user *uattr);
c695865c
SF
1735int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1736 const union bpf_attr *kattr,
1737 union bpf_attr __user *uattr);
1b4d60ec
SL
1738int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1739 const union bpf_attr *kattr,
1740 union bpf_attr __user *uattr);
7c32e8f8
LB
1741int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1742 const union bpf_attr *kattr,
1743 union bpf_attr __user *uattr);
9e15db66
AS
1744bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1745 const struct bpf_prog *prog,
1746 struct bpf_insn_access_aux *info);
35346ab6
HT
1747
1748static inline bool bpf_tracing_ctx_access(int off, int size,
1749 enum bpf_access_type type)
1750{
1751 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1752 return false;
1753 if (type != BPF_READ)
1754 return false;
1755 if (off % size != 0)
1756 return false;
1757 return true;
1758}
1759
1760static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1761 enum bpf_access_type type,
1762 const struct bpf_prog *prog,
1763 struct bpf_insn_access_aux *info)
1764{
1765 if (!bpf_tracing_ctx_access(off, size, type))
1766 return false;
1767 return btf_ctx_access(off, size, type, prog, info);
1768}
1769
22dc4a0f 1770int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
9e15db66
AS
1771 const struct btf_type *t, int off, int size,
1772 enum bpf_access_type atype,
c6f1bfe8 1773 u32 *next_btf_id, enum bpf_type_flag *flag);
faaf4a79 1774bool btf_struct_ids_match(struct bpf_verifier_log *log,
22dc4a0f 1775 const struct btf *btf, u32 id, int off,
2ab3b380
KKD
1776 const struct btf *need_btf, u32 need_type_id,
1777 bool strict);
9e15db66 1778
fec56f58
AS
1779int btf_distill_func_proto(struct bpf_verifier_log *log,
1780 struct btf *btf,
1781 const struct btf_type *func_proto,
1782 const char *func_name,
1783 struct btf_func_model *m);
1784
51c39bb1 1785struct bpf_reg_state;
34747c41
MKL
1786int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1787 struct bpf_reg_state *regs);
e6ac2450
MKL
1788int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1789 const struct btf *btf, u32 func_id,
1790 struct bpf_reg_state *regs);
51c39bb1
AS
1791int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1792 struct bpf_reg_state *reg);
efc68158 1793int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
be8704ff 1794 struct btf *btf, const struct btf_type *t);
8c1b6e69 1795
7e6897f9 1796struct bpf_prog *bpf_prog_by_id(u32 id);
005142b8 1797struct bpf_link *bpf_link_by_id(u32 id);
7e6897f9 1798
6890896b 1799const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
a10787e6 1800void bpf_task_storage_free(struct task_struct *task);
e6ac2450
MKL
1801bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1802const struct btf_func_model *
1803bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1804 const struct bpf_insn *insn);
fbd94c7a
AS
1805struct bpf_core_ctx {
1806 struct bpf_verifier_log *log;
1807 const struct btf *btf;
1808};
1809
1810int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1811 int relo_idx, void *insn);
1812
44a3918c
JP
1813static inline bool unprivileged_ebpf_enabled(void)
1814{
1815 return !sysctl_unprivileged_bpf_disabled;
1816}
1817
9c270af3 1818#else /* !CONFIG_BPF_SYSCALL */
0fc174de
DB
1819static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1820{
1821 return ERR_PTR(-EOPNOTSUPP);
1822}
1823
248f346f
JK
1824static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1825 enum bpf_prog_type type,
288b3de5 1826 bool attach_drv)
248f346f
JK
1827{
1828 return ERR_PTR(-EOPNOTSUPP);
1829}
1830
85192dbf 1831static inline void bpf_prog_add(struct bpf_prog *prog, int i)
cc2e0b3f 1832{
cc2e0b3f 1833}
113214be 1834
c540594f
DB
1835static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1836{
1837}
1838
0fc174de
DB
1839static inline void bpf_prog_put(struct bpf_prog *prog)
1840{
1841}
6d67942d 1842
85192dbf 1843static inline void bpf_prog_inc(struct bpf_prog *prog)
aa6a5f3c 1844{
aa6a5f3c 1845}
5ccb071e 1846
a6f6df69
JF
1847static inline struct bpf_prog *__must_check
1848bpf_prog_inc_not_zero(struct bpf_prog *prog)
1849{
1850 return ERR_PTR(-EOPNOTSUPP);
1851}
1852
6cc7d1e8
AN
1853static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1854 const struct bpf_link_ops *ops,
1855 struct bpf_prog *prog)
1856{
1857}
1858
1859static inline int bpf_link_prime(struct bpf_link *link,
1860 struct bpf_link_primer *primer)
1861{
1862 return -EOPNOTSUPP;
1863}
1864
1865static inline int bpf_link_settle(struct bpf_link_primer *primer)
1866{
1867 return -EOPNOTSUPP;
1868}
1869
1870static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1871{
1872}
1873
1874static inline void bpf_link_inc(struct bpf_link *link)
1875{
1876}
1877
1878static inline void bpf_link_put(struct bpf_link *link)
1879{
1880}
1881
6e71b04a 1882static inline int bpf_obj_get_user(const char __user *pathname, int flags)
98589a09
SL
1883{
1884 return -EOPNOTSUPP;
1885}
1886
1d233886 1887static inline void __dev_flush(void)
46f55cff
JF
1888{
1889}
9c270af3 1890
d53ad5d8 1891struct xdp_frame;
67f29e07 1892struct bpf_dtab_netdev;
e6a4750f 1893struct bpf_cpu_map_entry;
67f29e07 1894
1d233886 1895static inline
d53ad5d8 1896int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
1d233886
THJ
1897 struct net_device *dev_rx)
1898{
1899 return 0;
1900}
1901
67f29e07 1902static inline
d53ad5d8 1903int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
38edddb8 1904 struct net_device *dev_rx)
67f29e07
JDB
1905{
1906 return 0;
1907}
1908
e624d4ed 1909static inline
d53ad5d8 1910int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
e624d4ed
HL
1911 struct bpf_map *map, bool exclude_ingress)
1912{
1913 return 0;
1914}
1915
6d5fc195
TM
1916struct sk_buff;
1917
1918static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1919 struct sk_buff *skb,
1920 struct bpf_prog *xdp_prog)
1921{
1922 return 0;
1923}
1924
e624d4ed
HL
1925static inline
1926int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1927 struct bpf_prog *xdp_prog, struct bpf_map *map,
1928 bool exclude_ingress)
1929{
1930 return 0;
1931}
1932
cdfafe98 1933static inline void __cpu_map_flush(void)
9c270af3
JDB
1934{
1935}
1936
9c270af3 1937static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
d53ad5d8 1938 struct xdp_frame *xdpf,
9c270af3
JDB
1939 struct net_device *dev_rx)
1940{
1941 return 0;
1942}
040ee692 1943
11941f8a
KKD
1944static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1945 struct sk_buff *skb)
1946{
1947 return -EOPNOTSUPP;
1948}
1949
040ee692
AV
1950static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1951 enum bpf_prog_type type)
1952{
1953 return ERR_PTR(-EOPNOTSUPP);
1954}
c695865c
SF
1955
1956static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1957 const union bpf_attr *kattr,
1958 union bpf_attr __user *uattr)
1959{
1960 return -ENOTSUPP;
1961}
1962
1963static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1964 const union bpf_attr *kattr,
1965 union bpf_attr __user *uattr)
1966{
1967 return -ENOTSUPP;
1968}
1969
da00d2f1
KS
1970static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1971 const union bpf_attr *kattr,
1972 union bpf_attr __user *uattr)
1973{
1974 return -ENOTSUPP;
1975}
1976
c695865c
SF
1977static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1978 const union bpf_attr *kattr,
1979 union bpf_attr __user *uattr)
1980{
1981 return -ENOTSUPP;
1982}
6332be04 1983
7c32e8f8
LB
1984static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1985 const union bpf_attr *kattr,
1986 union bpf_attr __user *uattr)
1987{
1988 return -ENOTSUPP;
1989}
1990
6332be04
DB
1991static inline void bpf_map_put(struct bpf_map *map)
1992{
1993}
7e6897f9
BT
1994
1995static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1996{
1997 return ERR_PTR(-ENOTSUPP);
1998}
6890896b
SF
1999
2000static inline const struct bpf_func_proto *
2001bpf_base_func_proto(enum bpf_func_id func_id)
2002{
2003 return NULL;
2004}
a10787e6
SL
2005
2006static inline void bpf_task_storage_free(struct task_struct *task)
2007{
2008}
e6ac2450
MKL
2009
2010static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2011{
2012 return false;
2013}
2014
2015static inline const struct btf_func_model *
2016bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2017 const struct bpf_insn *insn)
2018{
2019 return NULL;
2020}
44a3918c
JP
2021
2022static inline bool unprivileged_ebpf_enabled(void)
2023{
2024 return false;
2025}
2026
61e021f3 2027#endif /* CONFIG_BPF_SYSCALL */
09756af4 2028
541c3bad
AN
2029void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2030 struct btf_mod_pair *used_btfs, u32 len);
2031
479321e9
JK
2032static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2033 enum bpf_prog_type type)
2034{
2035 return bpf_prog_get_type_dev(ufd, type, false);
2036}
2037
936f8946
AN
2038void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2039 struct bpf_map **used_maps, u32 len);
2040
040ee692
AV
2041bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2042
ab3f0063
JK
2043int bpf_prog_offload_compile(struct bpf_prog *prog);
2044void bpf_prog_offload_destroy(struct bpf_prog *prog);
675fc275
JK
2045int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2046 struct bpf_prog *prog);
ab3f0063 2047
52775b33
JK
2048int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2049
a3884572
JK
2050int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2051int bpf_map_offload_update_elem(struct bpf_map *map,
2052 void *key, void *value, u64 flags);
2053int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2054int bpf_map_offload_get_next_key(struct bpf_map *map,
2055 void *key, void *next_key);
2056
09728266 2057bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
a3884572 2058
1385d755 2059struct bpf_offload_dev *
dd27c2e3 2060bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
602144c2 2061void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
dd27c2e3 2062void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
602144c2
JK
2063int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2064 struct net_device *netdev);
2065void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2066 struct net_device *netdev);
fd4f227d 2067bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
9fd7c555 2068
ab3f0063
JK
2069#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2070int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2071
0d830032 2072static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
ab3f0063 2073{
9a18eedb 2074 return aux->offload_requested;
ab3f0063 2075}
a3884572
JK
2076
2077static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2078{
2079 return unlikely(map->ops == &bpf_map_offload_ops);
2080}
2081
2082struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2083void bpf_map_offload_map_free(struct bpf_map *map);
79a7f8bd
AS
2084int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2085 const union bpf_attr *kattr,
2086 union bpf_attr __user *uattr);
17edea21
CW
2087
2088int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2089int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2090int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
748cd572
DZ
2091int sock_map_bpf_prog_query(const union bpf_attr *attr,
2092 union bpf_attr __user *uattr);
2093
17edea21
CW
2094void sock_map_unhash(struct sock *sk);
2095void sock_map_close(struct sock *sk, long timeout);
ab3f0063
JK
2096#else
2097static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2098 union bpf_attr *attr)
2099{
2100 return -EOPNOTSUPP;
2101}
2102
2103static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2104{
2105 return false;
2106}
a3884572
JK
2107
2108static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2109{
2110 return false;
2111}
2112
2113static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2114{
2115 return ERR_PTR(-EOPNOTSUPP);
2116}
2117
2118static inline void bpf_map_offload_map_free(struct bpf_map *map)
2119{
2120}
79a7f8bd
AS
2121
2122static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2123 const union bpf_attr *kattr,
2124 union bpf_attr __user *uattr)
2125{
2126 return -ENOTSUPP;
2127}
fdb5c453 2128
88759609 2129#ifdef CONFIG_BPF_SYSCALL
604326b4
DB
2130static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2131 struct bpf_prog *prog)
fdb5c453
SY
2132{
2133 return -EINVAL;
2134}
bb0de313
LB
2135
2136static inline int sock_map_prog_detach(const union bpf_attr *attr,
2137 enum bpf_prog_type ptype)
2138{
2139 return -EOPNOTSUPP;
2140}
13b79d3f
LB
2141
2142static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2143 u64 flags)
2144{
2145 return -EOPNOTSUPP;
2146}
748cd572
DZ
2147
2148static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
2149 union bpf_attr __user *uattr)
2150{
2151 return -EINVAL;
2152}
17edea21
CW
2153#endif /* CONFIG_BPF_SYSCALL */
2154#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
5dc4c4b7 2155
17edea21
CW
2156#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2157void bpf_sk_reuseport_detach(struct sock *sk);
2158int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2159 void *value);
2160int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2161 void *value, u64 map_flags);
2162#else
2163static inline void bpf_sk_reuseport_detach(struct sock *sk)
2164{
2165}
5dc4c4b7 2166
17edea21 2167#ifdef CONFIG_BPF_SYSCALL
5dc4c4b7
MKL
2168static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2169 void *key, void *value)
2170{
2171 return -EOPNOTSUPP;
2172}
2173
2174static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2175 void *key, void *value,
2176 u64 map_flags)
2177{
2178 return -EOPNOTSUPP;
2179}
2180#endif /* CONFIG_BPF_SYSCALL */
2181#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
2182
d0003ec0 2183/* verifier prototypes for helper functions called from eBPF programs */
a2c83fff
DB
2184extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2185extern const struct bpf_func_proto bpf_map_update_elem_proto;
2186extern const struct bpf_func_proto bpf_map_delete_elem_proto;
f1a2e44a
MV
2187extern const struct bpf_func_proto bpf_map_push_elem_proto;
2188extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2189extern const struct bpf_func_proto bpf_map_peek_elem_proto;
07343110 2190extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
d0003ec0 2191
03e69b50 2192extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
c04167ce 2193extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2d0e30c3 2194extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
04fd61ab 2195extern const struct bpf_func_proto bpf_tail_call_proto;
17ca8cbf 2196extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
71d19214 2197extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
ffeedafb
AS
2198extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2199extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2200extern const struct bpf_func_proto bpf_get_current_comm_proto;
d5a3b1f6 2201extern const struct bpf_func_proto bpf_get_stackid_proto;
c195651e 2202extern const struct bpf_func_proto bpf_get_stack_proto;
fa28dcb8 2203extern const struct bpf_func_proto bpf_get_task_stack_proto;
7b04d6d6
SL
2204extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2205extern const struct bpf_func_proto bpf_get_stack_proto_pe;
174a79ff 2206extern const struct bpf_func_proto bpf_sock_map_update_proto;
81110384 2207extern const struct bpf_func_proto bpf_sock_hash_update_proto;
bf6fa2c8 2208extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
0f09abd1 2209extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
604326b4
DB
2210extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2211extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2212extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2213extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
d83525ca
AS
2214extern const struct bpf_func_proto bpf_spin_lock_proto;
2215extern const struct bpf_func_proto bpf_spin_unlock_proto;
cd339431 2216extern const struct bpf_func_proto bpf_get_local_storage_proto;
d7a4cb9b
AI
2217extern const struct bpf_func_proto bpf_strtol_proto;
2218extern const struct bpf_func_proto bpf_strtoul_proto;
0d01da6a 2219extern const struct bpf_func_proto bpf_tcp_sock_proto;
5576b991 2220extern const struct bpf_func_proto bpf_jiffies64_proto;
b4490c5c 2221extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
0456ea17 2222extern const struct bpf_func_proto bpf_event_output_data_proto;
457f4436
AN
2223extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2224extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2225extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2226extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2227extern const struct bpf_func_proto bpf_ringbuf_query_proto;
af7ec138 2228extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
478cfbdf
YS
2229extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2230extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2231extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
0d4fad3e 2232extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
9eeb3aa3 2233extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
07be4c4a 2234extern const struct bpf_func_proto bpf_copy_from_user_proto;
c4d0bfb4 2235extern const struct bpf_func_proto bpf_snprintf_btf_proto;
7b15523a 2236extern const struct bpf_func_proto bpf_snprintf_proto;
eaa6bcb7 2237extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
63d9b80d 2238extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
d0551261 2239extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
b60da495 2240extern const struct bpf_func_proto bpf_sock_from_file_proto;
c5dbb89f 2241extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
a10787e6
SL
2242extern const struct bpf_func_proto bpf_task_storage_get_proto;
2243extern const struct bpf_func_proto bpf_task_storage_delete_proto;
69c087ba 2244extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3d78417b 2245extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3cee6fb8
MKL
2246extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2247extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
d6aef08a 2248extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
7c7e3d31 2249extern const struct bpf_func_proto bpf_find_vma_proto;
e6f2dd0f 2250extern const struct bpf_func_proto bpf_loop_proto;
c5fb1993 2251extern const struct bpf_func_proto bpf_strncmp_proto;
376040e4 2252extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
5b74c690 2253extern const struct bpf_func_proto bpf_kptr_xchg_proto;
cd339431 2254
958a3f2d
JO
2255const struct bpf_func_proto *tracing_prog_func_proto(
2256 enum bpf_func_id func_id, const struct bpf_prog *prog);
2257
3ad00405
DB
2258/* Shared helpers among cBPF and eBPF. */
2259void bpf_user_rnd_init_once(void);
2260u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
6890896b 2261u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3ad00405 2262
c64b7983 2263#if defined(CONFIG_NET)
46f8bc92
MKL
2264bool bpf_sock_common_is_valid_access(int off, int size,
2265 enum bpf_access_type type,
2266 struct bpf_insn_access_aux *info);
c64b7983
JS
2267bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2268 struct bpf_insn_access_aux *info);
2269u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2270 const struct bpf_insn *si,
2271 struct bpf_insn *insn_buf,
2272 struct bpf_prog *prog,
2273 u32 *target_size);
2274#else
46f8bc92
MKL
2275static inline bool bpf_sock_common_is_valid_access(int off, int size,
2276 enum bpf_access_type type,
2277 struct bpf_insn_access_aux *info)
2278{
2279 return false;
2280}
c64b7983
JS
2281static inline bool bpf_sock_is_valid_access(int off, int size,
2282 enum bpf_access_type type,
2283 struct bpf_insn_access_aux *info)
2284{
2285 return false;
2286}
2287static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2288 const struct bpf_insn *si,
2289 struct bpf_insn *insn_buf,
2290 struct bpf_prog *prog,
2291 u32 *target_size)
2292{
2293 return 0;
2294}
2295#endif
2296
655a51e5 2297#ifdef CONFIG_INET
91cc1a99
AS
2298struct sk_reuseport_kern {
2299 struct sk_buff *skb;
2300 struct sock *sk;
2301 struct sock *selected_sk;
d5e4ddae 2302 struct sock *migrating_sk;
91cc1a99
AS
2303 void *data_end;
2304 u32 hash;
2305 u32 reuseport_id;
2306 bool bind_inany;
2307};
655a51e5
MKL
2308bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2309 struct bpf_insn_access_aux *info);
2310
2311u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2312 const struct bpf_insn *si,
2313 struct bpf_insn *insn_buf,
2314 struct bpf_prog *prog,
2315 u32 *target_size);
7f94208c
Y
2316
2317bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2318 struct bpf_insn_access_aux *info);
2319
2320u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2321 const struct bpf_insn *si,
2322 struct bpf_insn *insn_buf,
2323 struct bpf_prog *prog,
2324 u32 *target_size);
655a51e5
MKL
2325#else
2326static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2327 enum bpf_access_type type,
2328 struct bpf_insn_access_aux *info)
2329{
2330 return false;
2331}
2332
2333static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2334 const struct bpf_insn *si,
2335 struct bpf_insn *insn_buf,
2336 struct bpf_prog *prog,
2337 u32 *target_size)
2338{
2339 return 0;
2340}
7f94208c
Y
2341static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2342 enum bpf_access_type type,
2343 struct bpf_insn_access_aux *info)
2344{
2345 return false;
2346}
2347
2348static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2349 const struct bpf_insn *si,
2350 struct bpf_insn *insn_buf,
2351 struct bpf_prog *prog,
2352 u32 *target_size)
2353{
2354 return 0;
2355}
655a51e5
MKL
2356#endif /* CONFIG_INET */
2357
5964b200 2358enum bpf_text_poke_type {
b553a6ec
DB
2359 BPF_MOD_CALL,
2360 BPF_MOD_JUMP,
5964b200 2361};
4b3da77b 2362
5964b200
AS
2363int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2364 void *addr1, void *addr2);
2365
ebc1415d
SL
2366void *bpf_arch_text_copy(void *dst, void *src, size_t len);
2367
eae2e83e 2368struct btf_id_set;
2af30f11 2369bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
eae2e83e 2370
335ff499
DM
2371#define MAX_BPRINTF_VARARGS 12
2372
48cac3f4
FR
2373int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2374 u32 **bin_buf, u32 num_args);
2375void bpf_bprintf_cleanup(void);
d9c9e4db 2376
99c55f7d 2377#endif /* _LINUX_BPF_H */