Commit | Line | Data |
---|---|---|
25763b3c | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
99c55f7d | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
99c55f7d AS |
3 | */ |
4 | #ifndef _LINUX_BPF_H | |
5 | #define _LINUX_BPF_H 1 | |
6 | ||
7 | #include <uapi/linux/bpf.h> | |
d687f621 | 8 | #include <uapi/linux/filter.h> |
74451e66 | 9 | |
99c55f7d | 10 | #include <linux/workqueue.h> |
db20fd2b | 11 | #include <linux/file.h> |
b121d1e7 | 12 | #include <linux/percpu.h> |
002245cc | 13 | #include <linux/err.h> |
74451e66 | 14 | #include <linux/rbtree_latch.h> |
d6e1e46f | 15 | #include <linux/numa.h> |
fc970227 | 16 | #include <linux/mm_types.h> |
ab3f0063 | 17 | #include <linux/wait.h> |
fec56f58 AS |
18 | #include <linux/refcount.h> |
19 | #include <linux/mutex.h> | |
85d33df3 | 20 | #include <linux/module.h> |
bfea9a85 | 21 | #include <linux/kallsyms.h> |
2c78ee89 | 22 | #include <linux/capability.h> |
48edc1f7 RG |
23 | #include <linux/sched/mm.h> |
24 | #include <linux/slab.h> | |
e21aa341 | 25 | #include <linux/percpu-refcount.h> |
d687f621 | 26 | #include <linux/stddef.h> |
af2ac3e1 | 27 | #include <linux/bpfptr.h> |
14a324f6 | 28 | #include <linux/btf.h> |
8c7dcb84 | 29 | #include <linux/rcupdate_trace.h> |
c86df29d | 30 | #include <linux/static_call.h> |
ee53cbfb | 31 | #include <linux/memcontrol.h> |
4f9087f1 | 32 | #include <linux/cfi.h> |
97eb35f3 | 33 | #include <asm/rqspinlock.h> |
99c55f7d | 34 | |
cae1927c | 35 | struct bpf_verifier_env; |
9e15db66 | 36 | struct bpf_verifier_log; |
3b1efb19 | 37 | struct perf_event; |
174a79ff | 38 | struct bpf_prog; |
da765a2f | 39 | struct bpf_prog_aux; |
99c55f7d | 40 | struct bpf_map; |
31746031 | 41 | struct bpf_arena; |
4f738adb | 42 | struct sock; |
a26ca7c9 | 43 | struct seq_file; |
1b2b234b | 44 | struct btf; |
e8d2bec0 | 45 | struct btf_type; |
3dec541b | 46 | struct exception_table_entry; |
ae24345d | 47 | struct seq_operations; |
f9c79272 | 48 | struct bpf_iter_aux_info; |
f836a56e KS |
49 | struct bpf_local_storage; |
50 | struct bpf_local_storage_map; | |
36e68442 | 51 | struct kobject; |
48edc1f7 | 52 | struct mem_cgroup; |
861de02e | 53 | struct module; |
69c087ba | 54 | struct bpf_func_state; |
00963a2e | 55 | struct ftrace_ops; |
d4ccaf58 | 56 | struct cgroup; |
35f96de0 AN |
57 | struct bpf_token; |
58 | struct user_namespace; | |
59 | struct super_block; | |
60 | struct inode; | |
99c55f7d | 61 | |
1b9ed84e QM |
62 | extern struct idr btf_idr; |
63 | extern spinlock_t btf_idr_lock; | |
36e68442 | 64 | extern struct kobject *btf_kobj; |
41a5db8d | 65 | extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; |
1fda5bb6 | 66 | extern bool bpf_global_ma_set; |
1b9ed84e | 67 | |
102acbac | 68 | typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); |
f9c79272 YS |
69 | typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, |
70 | struct bpf_iter_aux_info *aux); | |
14fc6bd6 | 71 | typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); |
af3f4134 SF |
72 | typedef unsigned int (*bpf_func_t)(const void *, |
73 | const struct bpf_insn *); | |
14fc6bd6 YS |
74 | struct bpf_iter_seq_info { |
75 | const struct seq_operations *seq_ops; | |
76 | bpf_iter_init_seq_priv_t init_seq_private; | |
77 | bpf_iter_fini_seq_priv_t fini_seq_private; | |
78 | u32 seq_priv_size; | |
79 | }; | |
80 | ||
5d903493 | 81 | /* map is generic key/value storage optionally accessible by eBPF programs */ |
99c55f7d AS |
82 | struct bpf_map_ops { |
83 | /* funcs callable from userspace (via syscall) */ | |
1110f3a9 | 84 | int (*map_alloc_check)(union bpf_attr *attr); |
99c55f7d | 85 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); |
61d1b6a4 DB |
86 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
87 | void (*map_free)(struct bpf_map *map); | |
db20fd2b | 88 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); |
ba6b8de4 | 89 | void (*map_release_uref)(struct bpf_map *map); |
c6110222 | 90 | void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); |
cb4d03ab BV |
91 | int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, |
92 | union bpf_attr __user *uattr); | |
3e87f192 DS |
93 | int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, |
94 | void *value, u64 flags); | |
05799638 YS |
95 | int (*map_lookup_and_delete_batch)(struct bpf_map *map, |
96 | const union bpf_attr *attr, | |
97 | union bpf_attr __user *uattr); | |
3af43ba4 HT |
98 | int (*map_update_batch)(struct bpf_map *map, struct file *map_file, |
99 | const union bpf_attr *attr, | |
aa2e93b8 BV |
100 | union bpf_attr __user *uattr); |
101 | int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, | |
102 | union bpf_attr __user *uattr); | |
db20fd2b AS |
103 | |
104 | /* funcs callable from userspace and from eBPF programs */ | |
105 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | |
d7ba4cc9 JK |
106 | long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
107 | long (*map_delete_elem)(struct bpf_map *map, void *key); | |
108 | long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); | |
109 | long (*map_pop_elem)(struct bpf_map *map, void *value); | |
110 | long (*map_peek_elem)(struct bpf_map *map, void *value); | |
07343110 | 111 | void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); |
2a36f0b9 WN |
112 | |
113 | /* funcs called by prog_array and perf_event_array map */ | |
d056a788 DB |
114 | void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, |
115 | int fd); | |
20c20bd1 HT |
116 | /* If need_defer is true, the implementation should guarantee that |
117 | * the to-be-put element is still alive before the bpf program, which | |
118 | * may manipulate it, exists. | |
119 | */ | |
120 | void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); | |
4a8f87e6 | 121 | int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); |
14dc6f04 | 122 | u32 (*map_fd_sys_lookup_elem)(void *ptr); |
a26ca7c9 MKL |
123 | void (*map_seq_show_elem)(struct bpf_map *map, void *key, |
124 | struct seq_file *m); | |
e8d2bec0 | 125 | int (*map_check_btf)(const struct bpf_map *map, |
1b2b234b | 126 | const struct btf *btf, |
e8d2bec0 DB |
127 | const struct btf_type *key_type, |
128 | const struct btf_type *value_type); | |
d8eca5bb | 129 | |
da765a2f DB |
130 | /* Prog poke tracking helpers. */ |
131 | int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
132 | void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
133 | void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, | |
134 | struct bpf_prog *new); | |
135 | ||
d8eca5bb DB |
136 | /* Direct value access helpers. */ |
137 | int (*map_direct_value_addr)(const struct bpf_map *map, | |
138 | u64 *imm, u32 off); | |
139 | int (*map_direct_value_meta)(const struct bpf_map *map, | |
140 | u64 imm, u32 *off); | |
fc970227 | 141 | int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); |
457f4436 AN |
142 | __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, |
143 | struct poll_table_struct *pts); | |
cf2c2e4a AS |
144 | unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr, |
145 | unsigned long len, unsigned long pgoff, | |
146 | unsigned long flags); | |
41c48f3a | 147 | |
f836a56e KS |
148 | /* Functions called by bpf_local_storage maps */ |
149 | int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, | |
150 | void *owner, u32 size); | |
151 | void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, | |
152 | void *owner, u32 size); | |
153 | struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); | |
f4d05259 | 154 | |
e6a4750f | 155 | /* Misc helpers.*/ |
d7ba4cc9 | 156 | long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags); |
e6a4750f | 157 | |
f4d05259 MKL |
158 | /* map_meta_equal must be implemented for maps that can be |
159 | * used as an inner map. It is a runtime check to ensure | |
160 | * an inner map can be inserted to an outer map. | |
161 | * | |
162 | * Some properties of the inner map has been used during the | |
163 | * verification time. When inserting an inner map at the runtime, | |
164 | * map_meta_equal has to ensure the inserting map has the same | |
165 | * properties that the verifier has used earlier. | |
166 | */ | |
167 | bool (*map_meta_equal)(const struct bpf_map *meta0, | |
168 | const struct bpf_map *meta1); | |
169 | ||
69c087ba YS |
170 | |
171 | int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, | |
172 | struct bpf_func_state *caller, | |
173 | struct bpf_func_state *callee); | |
d7ba4cc9 | 174 | long (*map_for_each_callback)(struct bpf_map *map, |
102acbac | 175 | bpf_callback_t callback_fn, |
69c087ba YS |
176 | void *callback_ctx, u64 flags); |
177 | ||
90a5527d YS |
178 | u64 (*map_mem_usage)(const struct bpf_map *map); |
179 | ||
c317ab71 | 180 | /* BTF id of struct allocated by map_alloc */ |
41c48f3a | 181 | int *map_btf_id; |
a5cbe05a YS |
182 | |
183 | /* bpf_iter info used to open a seq_file */ | |
184 | const struct bpf_iter_seq_info *iter_seq_info; | |
99c55f7d AS |
185 | }; |
186 | ||
61df10c7 | 187 | enum { |
95c07d58 | 188 | /* Support at most 11 fields in a BTF type */ |
d56b63cf | 189 | BTF_FIELDS_MAX = 11, |
61df10c7 KKD |
190 | }; |
191 | ||
aa3496ac | 192 | enum btf_field_type { |
db559117 KKD |
193 | BPF_SPIN_LOCK = (1 << 0), |
194 | BPF_TIMER = (1 << 1), | |
aa3496ac KKD |
195 | BPF_KPTR_UNREF = (1 << 2), |
196 | BPF_KPTR_REF = (1 << 3), | |
55db92f4 YS |
197 | BPF_KPTR_PERCPU = (1 << 4), |
198 | BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, | |
199 | BPF_LIST_HEAD = (1 << 5), | |
200 | BPF_LIST_NODE = (1 << 6), | |
201 | BPF_RB_ROOT = (1 << 7), | |
202 | BPF_RB_NODE = (1 << 8), | |
790ce3cf DM |
203 | BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, |
204 | BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, | |
55db92f4 | 205 | BPF_REFCOUNT = (1 << 9), |
d56b63cf | 206 | BPF_WORKQUEUE = (1 << 10), |
1cb80d9e | 207 | BPF_UPTR = (1 << 11), |
0de20461 | 208 | BPF_RES_SPIN_LOCK = (1 << 12), |
c0a5a21c KKD |
209 | }; |
210 | ||
c8e18754 | 211 | typedef void (*btf_dtor_kfunc_t)(void *); |
c8e18754 | 212 | |
aa3496ac KKD |
213 | struct btf_field_kptr { |
214 | struct btf *btf; | |
215 | struct module *module; | |
9e36a204 DM |
216 | /* dtor used if btf_is_kernel(btf), otherwise the type is |
217 | * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used | |
218 | */ | |
219 | btf_dtor_kfunc_t dtor; | |
aa3496ac KKD |
220 | u32 btf_id; |
221 | }; | |
222 | ||
30465003 | 223 | struct btf_field_graph_root { |
f0c5941f KKD |
224 | struct btf *btf; |
225 | u32 value_btf_id; | |
226 | u32 node_offset; | |
865ce09a | 227 | struct btf_record *value_rec; |
f0c5941f KKD |
228 | }; |
229 | ||
aa3496ac | 230 | struct btf_field { |
61df10c7 | 231 | u32 offset; |
cd2a8079 | 232 | u32 size; |
aa3496ac KKD |
233 | enum btf_field_type type; |
234 | union { | |
235 | struct btf_field_kptr kptr; | |
30465003 | 236 | struct btf_field_graph_root graph_root; |
aa3496ac | 237 | }; |
61df10c7 KKD |
238 | }; |
239 | ||
aa3496ac KKD |
240 | struct btf_record { |
241 | u32 cnt; | |
242 | u32 field_mask; | |
db559117 | 243 | int spin_lock_off; |
0de20461 | 244 | int res_spin_lock_off; |
db559117 | 245 | int timer_off; |
d56b63cf | 246 | int wq_off; |
d54730b5 | 247 | int refcount_off; |
aa3496ac | 248 | struct btf_field fields[]; |
61df10c7 KKD |
249 | }; |
250 | ||
0a1f7bfe DM |
251 | /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ |
252 | struct bpf_rb_node_kern { | |
253 | struct rb_node rb_node; | |
c3c510ce | 254 | void *owner; |
0a1f7bfe DM |
255 | } __attribute__((aligned(8))); |
256 | ||
257 | /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ | |
258 | struct bpf_list_node_kern { | |
259 | struct list_head list_head; | |
c3c510ce | 260 | void *owner; |
0a1f7bfe DM |
261 | } __attribute__((aligned(8))); |
262 | ||
99c55f7d | 263 | struct bpf_map { |
a3c70a3c | 264 | const struct bpf_map_ops *ops; |
be95a845 DB |
265 | struct bpf_map *inner_map_meta; |
266 | #ifdef CONFIG_SECURITY | |
267 | void *security; | |
268 | #endif | |
99c55f7d AS |
269 | enum bpf_map_type map_type; |
270 | u32 key_size; | |
271 | u32 value_size; | |
272 | u32 max_entries; | |
9330986c | 273 | u64 map_extra; /* any per-map-type extra fields */ |
6c905981 | 274 | u32 map_flags; |
f3f1c054 | 275 | u32 id; |
db559117 | 276 | struct btf_record *record; |
96eabe7a | 277 | int numa_node; |
9b2cf328 MKL |
278 | u32 btf_key_type_id; |
279 | u32 btf_value_type_id; | |
8845b468 | 280 | u32 btf_vmlinux_value_type_id; |
a26ca7c9 | 281 | struct btf *btf; |
3a3b7fec | 282 | #ifdef CONFIG_MEMCG |
4201d9ab | 283 | struct obj_cgroup *objcg; |
48edc1f7 | 284 | #endif |
fc970227 | 285 | char name[BPF_OBJ_NAME_LEN]; |
a3c70a3c AS |
286 | struct mutex freeze_mutex; |
287 | atomic64_t refcnt; | |
1e0bd5a0 | 288 | atomic64_t usercnt; |
87667336 HT |
289 | /* rcu is used before freeing and work is only used during freeing */ |
290 | union { | |
291 | struct work_struct work; | |
292 | struct rcu_head rcu; | |
293 | }; | |
353050be | 294 | atomic64_t writecnt; |
f45d5b6c THJ |
295 | /* 'Ownership' of program-containing map is claimed by the first program |
296 | * that is going to use this map or by the first program which FD is | |
297 | * stored in the map to make sure that all callers and callees have the | |
298 | * same prog type, JITed flag and xdp_has_frags flag. | |
299 | */ | |
300 | struct { | |
28ead3ea | 301 | const struct btf_type *attach_func_proto; |
f45d5b6c THJ |
302 | spinlock_t lock; |
303 | enum bpf_prog_type type; | |
304 | bool jited; | |
305 | bool xdp_has_frags; | |
306 | } owner; | |
4d7d7f69 KKD |
307 | bool bypass_spec_v1; |
308 | bool frozen; /* write-once; write-protected by freeze_mutex */ | |
87667336 | 309 | bool free_after_mult_rcu_gp; |
af66bfd3 HT |
310 | bool free_after_rcu_gp; |
311 | atomic64_t sleepable_refcnt; | |
25954730 | 312 | s64 __percpu *elem_count; |
99c55f7d AS |
313 | }; |
314 | ||
db559117 KKD |
315 | static inline const char *btf_field_type_name(enum btf_field_type type) |
316 | { | |
317 | switch (type) { | |
318 | case BPF_SPIN_LOCK: | |
319 | return "bpf_spin_lock"; | |
0de20461 KKD |
320 | case BPF_RES_SPIN_LOCK: |
321 | return "bpf_res_spin_lock"; | |
db559117 KKD |
322 | case BPF_TIMER: |
323 | return "bpf_timer"; | |
d56b63cf BT |
324 | case BPF_WORKQUEUE: |
325 | return "bpf_wq"; | |
db559117 KKD |
326 | case BPF_KPTR_UNREF: |
327 | case BPF_KPTR_REF: | |
328 | return "kptr"; | |
55db92f4 YS |
329 | case BPF_KPTR_PERCPU: |
330 | return "percpu_kptr"; | |
1cb80d9e KFL |
331 | case BPF_UPTR: |
332 | return "uptr"; | |
f0c5941f KKD |
333 | case BPF_LIST_HEAD: |
334 | return "bpf_list_head"; | |
8ffa5cc1 KKD |
335 | case BPF_LIST_NODE: |
336 | return "bpf_list_node"; | |
9c395c1b DM |
337 | case BPF_RB_ROOT: |
338 | return "bpf_rb_root"; | |
339 | case BPF_RB_NODE: | |
340 | return "bpf_rb_node"; | |
d54730b5 DM |
341 | case BPF_REFCOUNT: |
342 | return "bpf_refcount"; | |
db559117 KKD |
343 | default: |
344 | WARN_ON_ONCE(1); | |
345 | return "unknown"; | |
346 | } | |
347 | } | |
348 | ||
1cb0f56d PC |
349 | #if IS_ENABLED(CONFIG_DEBUG_KERNEL) |
350 | #define BPF_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) | |
351 | #else | |
352 | #define BPF_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) | |
353 | #endif | |
354 | ||
aa3496ac KKD |
355 | static inline u32 btf_field_type_size(enum btf_field_type type) |
356 | { | |
357 | switch (type) { | |
db559117 KKD |
358 | case BPF_SPIN_LOCK: |
359 | return sizeof(struct bpf_spin_lock); | |
0de20461 KKD |
360 | case BPF_RES_SPIN_LOCK: |
361 | return sizeof(struct bpf_res_spin_lock); | |
db559117 KKD |
362 | case BPF_TIMER: |
363 | return sizeof(struct bpf_timer); | |
d56b63cf BT |
364 | case BPF_WORKQUEUE: |
365 | return sizeof(struct bpf_wq); | |
aa3496ac KKD |
366 | case BPF_KPTR_UNREF: |
367 | case BPF_KPTR_REF: | |
55db92f4 | 368 | case BPF_KPTR_PERCPU: |
1cb80d9e | 369 | case BPF_UPTR: |
aa3496ac | 370 | return sizeof(u64); |
f0c5941f KKD |
371 | case BPF_LIST_HEAD: |
372 | return sizeof(struct bpf_list_head); | |
8ffa5cc1 KKD |
373 | case BPF_LIST_NODE: |
374 | return sizeof(struct bpf_list_node); | |
9c395c1b DM |
375 | case BPF_RB_ROOT: |
376 | return sizeof(struct bpf_rb_root); | |
377 | case BPF_RB_NODE: | |
378 | return sizeof(struct bpf_rb_node); | |
d54730b5 DM |
379 | case BPF_REFCOUNT: |
380 | return sizeof(struct bpf_refcount); | |
aa3496ac KKD |
381 | default: |
382 | WARN_ON_ONCE(1); | |
383 | return 0; | |
384 | } | |
385 | } | |
386 | ||
387 | static inline u32 btf_field_type_align(enum btf_field_type type) | |
388 | { | |
389 | switch (type) { | |
db559117 KKD |
390 | case BPF_SPIN_LOCK: |
391 | return __alignof__(struct bpf_spin_lock); | |
0de20461 KKD |
392 | case BPF_RES_SPIN_LOCK: |
393 | return __alignof__(struct bpf_res_spin_lock); | |
db559117 KKD |
394 | case BPF_TIMER: |
395 | return __alignof__(struct bpf_timer); | |
d56b63cf BT |
396 | case BPF_WORKQUEUE: |
397 | return __alignof__(struct bpf_wq); | |
aa3496ac KKD |
398 | case BPF_KPTR_UNREF: |
399 | case BPF_KPTR_REF: | |
55db92f4 | 400 | case BPF_KPTR_PERCPU: |
1cb80d9e | 401 | case BPF_UPTR: |
aa3496ac | 402 | return __alignof__(u64); |
f0c5941f KKD |
403 | case BPF_LIST_HEAD: |
404 | return __alignof__(struct bpf_list_head); | |
8ffa5cc1 KKD |
405 | case BPF_LIST_NODE: |
406 | return __alignof__(struct bpf_list_node); | |
9c395c1b DM |
407 | case BPF_RB_ROOT: |
408 | return __alignof__(struct bpf_rb_root); | |
409 | case BPF_RB_NODE: | |
410 | return __alignof__(struct bpf_rb_node); | |
d54730b5 DM |
411 | case BPF_REFCOUNT: |
412 | return __alignof__(struct bpf_refcount); | |
aa3496ac KKD |
413 | default: |
414 | WARN_ON_ONCE(1); | |
415 | return 0; | |
416 | } | |
417 | } | |
418 | ||
3e81740a DM |
419 | static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) |
420 | { | |
421 | memset(addr, 0, field->size); | |
422 | ||
423 | switch (field->type) { | |
424 | case BPF_REFCOUNT: | |
425 | refcount_set((refcount_t *)addr, 1); | |
426 | break; | |
427 | case BPF_RB_NODE: | |
428 | RB_CLEAR_NODE((struct rb_node *)addr); | |
429 | break; | |
430 | case BPF_LIST_HEAD: | |
431 | case BPF_LIST_NODE: | |
432 | INIT_LIST_HEAD((struct list_head *)addr); | |
433 | break; | |
434 | case BPF_RB_ROOT: | |
435 | /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ | |
436 | case BPF_SPIN_LOCK: | |
0de20461 | 437 | case BPF_RES_SPIN_LOCK: |
3e81740a | 438 | case BPF_TIMER: |
d56b63cf | 439 | case BPF_WORKQUEUE: |
3e81740a DM |
440 | case BPF_KPTR_UNREF: |
441 | case BPF_KPTR_REF: | |
55db92f4 | 442 | case BPF_KPTR_PERCPU: |
ba512b00 | 443 | case BPF_UPTR: |
3e81740a DM |
444 | break; |
445 | default: | |
446 | WARN_ON_ONCE(1); | |
447 | return; | |
448 | } | |
449 | } | |
450 | ||
aa3496ac KKD |
451 | static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type) |
452 | { | |
453 | if (IS_ERR_OR_NULL(rec)) | |
454 | return false; | |
455 | return rec->field_mask & type; | |
456 | } | |
457 | ||
cd2a8079 | 458 | static inline void bpf_obj_init(const struct btf_record *rec, void *obj) |
68134668 | 459 | { |
958cf2e2 | 460 | int i; |
4d7d7f69 | 461 | |
cd2a8079 | 462 | if (IS_ERR_OR_NULL(rec)) |
958cf2e2 | 463 | return; |
cd2a8079 | 464 | for (i = 0; i < rec->cnt; i++) |
3e81740a | 465 | bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset); |
958cf2e2 KKD |
466 | } |
467 | ||
997849c4 HT |
468 | /* 'dst' must be a temporary buffer and should not point to memory that is being |
469 | * used in parallel by a bpf program or bpf syscall, otherwise the access from | |
470 | * the bpf program or bpf syscall may be corrupted by the reinitialization, | |
471 | * leading to weird problems. Even 'dst' is newly-allocated from bpf memory | |
472 | * allocator, it is still possible for 'dst' to be used in parallel by a bpf | |
473 | * program or bpf syscall. | |
474 | */ | |
958cf2e2 KKD |
475 | static inline void check_and_init_map_value(struct bpf_map *map, void *dst) |
476 | { | |
cd2a8079 | 477 | bpf_obj_init(map->record, dst); |
68134668 AS |
478 | } |
479 | ||
44832519 KKD |
480 | /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and |
481 | * forced to use 'long' read/writes to try to atomically copy long counters. | |
482 | * Best-effort only. No barriers here, since it _will_ race with concurrent | |
483 | * updates from BPF programs. Called from bpf syscall and mostly used with | |
484 | * size 8 or 16 bytes, so ask compiler to inline it. | |
485 | */ | |
486 | static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) | |
487 | { | |
488 | const long *lsrc = src; | |
489 | long *ldst = dst; | |
490 | ||
491 | size /= sizeof(long); | |
492 | while (size--) | |
6a86b5b5 | 493 | data_race(*ldst++ = *lsrc++); |
44832519 KKD |
494 | } |
495 | ||
496 | /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ | |
cd2a8079 | 497 | static inline void bpf_obj_memcpy(struct btf_record *rec, |
f71b2f64 KKD |
498 | void *dst, void *src, u32 size, |
499 | bool long_memcpy) | |
d83525ca | 500 | { |
4d7d7f69 KKD |
501 | u32 curr_off = 0; |
502 | int i; | |
68134668 | 503 | |
cd2a8079 | 504 | if (IS_ERR_OR_NULL(rec)) { |
44832519 | 505 | if (long_memcpy) |
f71b2f64 | 506 | bpf_long_memcpy(dst, src, round_up(size, 8)); |
44832519 | 507 | else |
f71b2f64 | 508 | memcpy(dst, src, size); |
4d7d7f69 | 509 | return; |
68134668 | 510 | } |
d83525ca | 511 | |
cd2a8079 DM |
512 | for (i = 0; i < rec->cnt; i++) { |
513 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 514 | u32 sz = next_off - curr_off; |
4d7d7f69 | 515 | |
aa3496ac | 516 | memcpy(dst + curr_off, src + curr_off, sz); |
cd2a8079 | 517 | curr_off += rec->fields[i].size + sz; |
d83525ca | 518 | } |
f71b2f64 | 519 | memcpy(dst + curr_off, src + curr_off, size - curr_off); |
d83525ca | 520 | } |
44832519 KKD |
521 | |
522 | static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) | |
523 | { | |
cd2a8079 | 524 | bpf_obj_memcpy(map->record, dst, src, map->value_size, false); |
44832519 KKD |
525 | } |
526 | ||
527 | static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) | |
528 | { | |
cd2a8079 | 529 | bpf_obj_memcpy(map->record, dst, src, map->value_size, true); |
44832519 KKD |
530 | } |
531 | ||
ba512b00 MKL |
532 | static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) |
533 | { | |
534 | unsigned long *src_uptr, *dst_uptr; | |
535 | const struct btf_field *field; | |
536 | int i; | |
537 | ||
538 | if (!btf_record_has_field(rec, BPF_UPTR)) | |
539 | return; | |
540 | ||
541 | for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { | |
542 | if (field->type != BPF_UPTR) | |
543 | continue; | |
544 | ||
545 | src_uptr = src + field->offset; | |
546 | dst_uptr = dst + field->offset; | |
547 | swap(*src_uptr, *dst_uptr); | |
548 | } | |
549 | } | |
550 | ||
cd2a8079 | 551 | static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) |
cc487558 KKD |
552 | { |
553 | u32 curr_off = 0; | |
554 | int i; | |
555 | ||
cd2a8079 | 556 | if (IS_ERR_OR_NULL(rec)) { |
f71b2f64 | 557 | memset(dst, 0, size); |
cc487558 KKD |
558 | return; |
559 | } | |
560 | ||
cd2a8079 DM |
561 | for (i = 0; i < rec->cnt; i++) { |
562 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 563 | u32 sz = next_off - curr_off; |
cc487558 | 564 | |
aa3496ac | 565 | memset(dst + curr_off, 0, sz); |
cd2a8079 | 566 | curr_off += rec->fields[i].size + sz; |
cc487558 | 567 | } |
f71b2f64 KKD |
568 | memset(dst + curr_off, 0, size - curr_off); |
569 | } | |
570 | ||
571 | static inline void zero_map_value(struct bpf_map *map, void *dst) | |
572 | { | |
cd2a8079 | 573 | bpf_obj_memzero(map->record, dst, map->value_size); |
cc487558 KKD |
574 | } |
575 | ||
96049f3a AS |
576 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
577 | bool lock_src); | |
b00628b1 | 578 | void bpf_timer_cancel_and_free(void *timer); |
246331e3 | 579 | void bpf_wq_cancel_and_free(void *timer); |
f0c5941f KKD |
580 | void bpf_list_head_free(const struct btf_field *field, void *list_head, |
581 | struct bpf_spin_lock *spin_lock); | |
9c395c1b DM |
582 | void bpf_rb_root_free(const struct btf_field *field, void *rb_root, |
583 | struct bpf_spin_lock *spin_lock); | |
31746031 AS |
584 | u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena); |
585 | u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena); | |
8e7ae251 | 586 | int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); |
d83525ca | 587 | |
602144c2 | 588 | struct bpf_offload_dev; |
a3884572 JK |
589 | struct bpf_offloaded_map; |
590 | ||
591 | struct bpf_map_dev_ops { | |
592 | int (*map_get_next_key)(struct bpf_offloaded_map *map, | |
593 | void *key, void *next_key); | |
594 | int (*map_lookup_elem)(struct bpf_offloaded_map *map, | |
595 | void *key, void *value); | |
596 | int (*map_update_elem)(struct bpf_offloaded_map *map, | |
597 | void *key, void *value, u64 flags); | |
598 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); | |
599 | }; | |
600 | ||
601 | struct bpf_offloaded_map { | |
602 | struct bpf_map map; | |
603 | struct net_device *netdev; | |
604 | const struct bpf_map_dev_ops *dev_ops; | |
605 | void *dev_priv; | |
606 | struct list_head offloads; | |
607 | }; | |
608 | ||
609 | static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) | |
610 | { | |
611 | return container_of(map, struct bpf_offloaded_map, map); | |
612 | } | |
613 | ||
0cd3cbed JK |
614 | static inline bool bpf_map_offload_neutral(const struct bpf_map *map) |
615 | { | |
616 | return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; | |
617 | } | |
618 | ||
a26ca7c9 MKL |
619 | static inline bool bpf_map_support_seq_show(const struct bpf_map *map) |
620 | { | |
85d33df3 MKL |
621 | return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && |
622 | map->ops->map_seq_show_elem; | |
a26ca7c9 MKL |
623 | } |
624 | ||
e8d2bec0 | 625 | int map_check_no_btf(const struct bpf_map *map, |
1b2b234b | 626 | const struct btf *btf, |
e8d2bec0 DB |
627 | const struct btf_type *key_type, |
628 | const struct btf_type *value_type); | |
629 | ||
f4d05259 MKL |
630 | bool bpf_map_meta_equal(const struct bpf_map *meta0, |
631 | const struct bpf_map *meta1); | |
632 | ||
a3884572 JK |
633 | extern const struct bpf_map_ops bpf_map_offload_ops; |
634 | ||
d639b9d1 HL |
635 | /* bpf_type_flag contains a set of flags that are applicable to the values of |
636 | * arg_type, ret_type and reg_type. For example, a pointer value may be null, | |
637 | * or a memory is read-only. We classify types into two categories: base types | |
638 | * and extended types. Extended types are base types combined with a type flag. | |
639 | * | |
640 | * Currently there are no more than 32 base types in arg_type, ret_type and | |
641 | * reg_types. | |
642 | */ | |
643 | #define BPF_BASE_TYPE_BITS 8 | |
644 | ||
645 | enum bpf_type_flag { | |
646 | /* PTR may be NULL. */ | |
647 | PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), | |
648 | ||
216e3cd2 HL |
649 | /* MEM is read-only. When applied on bpf_arg, it indicates the arg is |
650 | * compatible with both mutable and immutable memory. | |
651 | */ | |
20b2aff4 HL |
652 | MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), |
653 | ||
894f2a8b KKD |
654 | /* MEM points to BPF ring buffer reservation. */ |
655 | MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS), | |
a672b2e3 | 656 | |
c6f1bfe8 YS |
657 | /* MEM is in user address space. */ |
658 | MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), | |
659 | ||
5844101a HL |
660 | /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged |
661 | * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In | |
662 | * order to drop this tag, it must be passed into bpf_per_cpu_ptr() | |
663 | * or bpf_this_cpu_ptr(), which will return the pointer corresponding | |
664 | * to the specified cpu. | |
665 | */ | |
666 | MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), | |
667 | ||
8f14852e KKD |
668 | /* Indicates that the argument will be released. */ |
669 | OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), | |
670 | ||
6efe152d KKD |
671 | /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark |
672 | * unreferenced and referenced kptr loaded from map value using a load | |
673 | * instruction, so that they can only be dereferenced but not escape the | |
674 | * BPF program into the kernel (i.e. cannot be passed as arguments to | |
675 | * kfunc or bpf helpers). | |
676 | */ | |
677 | PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), | |
678 | ||
6fad274f | 679 | /* MEM can be uninitialized. */ |
16d1e00c JK |
680 | MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), |
681 | ||
97e03f52 JK |
682 | /* DYNPTR points to memory local to the bpf program. */ |
683 | DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), | |
684 | ||
20571567 | 685 | /* DYNPTR points to a kernel-produced ringbuf record. */ |
bc34dee6 JK |
686 | DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), |
687 | ||
508362ac MM |
688 | /* Size is known at compile time. */ |
689 | MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), | |
690 | ||
282de143 KKD |
691 | /* MEM is of an allocated object of type in program BTF. This is used to |
692 | * tag PTR_TO_BTF_ID allocated using bpf_obj_new. | |
693 | */ | |
694 | MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS), | |
695 | ||
3f00c523 DV |
696 | /* PTR was passed from the kernel in a trusted context, and may be |
697 | * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions. | |
698 | * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above. | |
699 | * PTR_UNTRUSTED refers to a kptr that was read directly from a map | |
700 | * without invoking bpf_kptr_xchg(). What we really need to know is | |
701 | * whether a pointer is safe to pass to a kfunc or BPF helper function. | |
702 | * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF | |
703 | * helpers, they do not cover all possible instances of unsafe | |
704 | * pointers. For example, a pointer that was obtained from walking a | |
705 | * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the | |
706 | * fact that it may be NULL, invalid, etc. This is due to backwards | |
707 | * compatibility requirements, as this was the behavior that was first | |
708 | * introduced when kptrs were added. The behavior is now considered | |
709 | * deprecated, and PTR_UNTRUSTED will eventually be removed. | |
710 | * | |
711 | * PTR_TRUSTED, on the other hand, is a pointer that the kernel | |
712 | * guarantees to be valid and safe to pass to kfuncs and BPF helpers. | |
713 | * For example, pointers passed to tracepoint arguments are considered | |
714 | * PTR_TRUSTED, as are pointers that are passed to struct_ops | |
715 | * callbacks. As alluded to above, pointers that are obtained from | |
716 | * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a | |
717 | * struct task_struct *task is PTR_TRUSTED, then accessing | |
718 | * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored | |
719 | * in a BPF register. Similarly, pointers passed to certain programs | |
720 | * types such as kretprobes are not guaranteed to be valid, as they may | |
721 | * for example contain an object that was recently freed. | |
722 | */ | |
723 | PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS), | |
724 | ||
9bb00b28 YS |
725 | /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ |
726 | MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), | |
727 | ||
6a3cd331 | 728 | /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. |
0816b8c6 DM |
729 | * Currently only valid for linked-list and rbtree nodes. If the nodes |
730 | * have a bpf_refcount_field, they must be tagged MEM_RCU as well. | |
6a3cd331 DM |
731 | */ |
732 | NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), | |
733 | ||
b5964b96 JK |
734 | /* DYNPTR points to sk_buff */ |
735 | DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), | |
736 | ||
05421aec JK |
737 | /* DYNPTR points to xdp_buff */ |
738 | DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), | |
739 | ||
32556ce9 DB |
740 | /* Memory must be aligned on some architectures, used in combination with |
741 | * MEM_FIXED_SIZE. | |
742 | */ | |
743 | MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), | |
744 | ||
6fad274f DB |
745 | /* MEM is being written to, often combined with MEM_UNINIT. Non-presence |
746 | * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the | |
747 | * MEM_UNINIT means that memory needs to be initialized since it is also | |
748 | * read. | |
749 | */ | |
750 | MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS), | |
751 | ||
16d1e00c JK |
752 | __BPF_TYPE_FLAG_MAX, |
753 | __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, | |
d639b9d1 HL |
754 | }; |
755 | ||
05421aec JK |
756 | #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ |
757 | | DYNPTR_TYPE_XDP) | |
97e03f52 | 758 | |
d639b9d1 HL |
759 | /* Max number of base types. */ |
760 | #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) | |
761 | ||
762 | /* Max number of all types. */ | |
763 | #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) | |
764 | ||
17a52670 AS |
765 | /* function argument constraints */ |
766 | enum bpf_arg_type { | |
80f1d68c | 767 | ARG_DONTCARE = 0, /* unused argument in helper function */ |
17a52670 AS |
768 | |
769 | /* the following constraints used to prototype | |
770 | * bpf_map_lookup/update/delete_elem() functions | |
771 | */ | |
772 | ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ | |
773 | ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ | |
774 | ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ | |
775 | ||
16d1e00c JK |
776 | /* Used to prototype bpf_memcmp() and other functions that access data |
777 | * on eBPF program stack | |
17a52670 | 778 | */ |
39f19ebb | 779 | ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ |
2edc3de6 | 780 | ARG_PTR_TO_ARENA, |
435faee1 | 781 | |
39f19ebb AS |
782 | ARG_CONST_SIZE, /* number of bytes accessed from memory */ |
783 | ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ | |
80f1d68c | 784 | |
608cd71a | 785 | ARG_PTR_TO_CTX, /* pointer to context */ |
80f1d68c | 786 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
d83525ca | 787 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
46f8bc92 | 788 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ |
6ac99e8f | 789 | ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ |
a7658e1a | 790 | ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ |
894f2a8b | 791 | ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ |
457f4436 | 792 | ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ |
1df8f55a | 793 | ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ |
eaa6bcb7 | 794 | ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ |
69c087ba | 795 | ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ |
48946bd6 | 796 | ARG_PTR_TO_STACK, /* pointer to stack */ |
fff13c4b | 797 | ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ |
b00628b1 | 798 | ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ |
d59232af | 799 | ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */ |
97e03f52 | 800 | ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ |
f79e7ea5 | 801 | __BPF_ARG_TYPE_MAX, |
d639b9d1 | 802 | |
48946bd6 HL |
803 | /* Extended arg_types. */ |
804 | ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, | |
805 | ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, | |
806 | ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, | |
807 | ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, | |
48946bd6 | 808 | ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, |
c0a5a21c | 809 | ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, |
6fad274f DB |
810 | /* Pointer to memory does not need to be initialized, since helper function |
811 | * fills all bytes or clears them in error case. | |
16d1e00c | 812 | */ |
6fad274f | 813 | ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM, |
508362ac MM |
814 | /* Pointer to valid memory of size known at compile time. */ |
815 | ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, | |
48946bd6 | 816 | |
d639b9d1 HL |
817 | /* This must be the last entry. Its purpose is to ensure the enum is |
818 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
819 | */ | |
820 | __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 821 | }; |
d639b9d1 | 822 | static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 AS |
823 | |
824 | /* type of values returned from helper functions */ | |
825 | enum bpf_return_type { | |
826 | RET_INTEGER, /* function returns integer */ | |
827 | RET_VOID, /* function doesn't return anything */ | |
3e6a4b3e | 828 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ |
3c480732 HL |
829 | RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ |
830 | RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ | |
831 | RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ | |
2de2669b | 832 | RET_PTR_TO_MEM, /* returns a pointer to memory */ |
63d9b80d | 833 | RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ |
3ca1032a | 834 | RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ |
d639b9d1 HL |
835 | __BPF_RET_TYPE_MAX, |
836 | ||
3c480732 HL |
837 | /* Extended ret_types. */ |
838 | RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, | |
839 | RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, | |
840 | RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, | |
841 | RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, | |
894f2a8b | 842 | RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM, |
2de2669b | 843 | RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM, |
3c480732 | 844 | RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, |
3f00c523 | 845 | RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID, |
3c480732 | 846 | |
d639b9d1 HL |
847 | /* This must be the last entry. Its purpose is to ensure the enum is |
848 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
849 | */ | |
850 | __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 851 | }; |
d639b9d1 | 852 | static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 | 853 | |
09756af4 AS |
854 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs |
855 | * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL | |
856 | * instructions after verifying | |
857 | */ | |
858 | struct bpf_func_proto { | |
859 | u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
860 | bool gpl_only; | |
36bbef52 | 861 | bool pkt_access; |
01685c5b | 862 | bool might_sleep; |
ae010757 EZ |
863 | /* set to true if helper follows contract for llvm |
864 | * attribute bpf_fastcall: | |
5b5f51bf EZ |
865 | * - void functions do not scratch r0 |
866 | * - functions taking N arguments scratch only registers r1-rN | |
867 | */ | |
ae010757 | 868 | bool allow_fastcall; |
17a52670 | 869 | enum bpf_return_type ret_type; |
a7658e1a AS |
870 | union { |
871 | struct { | |
872 | enum bpf_arg_type arg1_type; | |
873 | enum bpf_arg_type arg2_type; | |
874 | enum bpf_arg_type arg3_type; | |
875 | enum bpf_arg_type arg4_type; | |
876 | enum bpf_arg_type arg5_type; | |
877 | }; | |
878 | enum bpf_arg_type arg_type[5]; | |
879 | }; | |
9436ef6e LB |
880 | union { |
881 | struct { | |
882 | u32 *arg1_btf_id; | |
883 | u32 *arg2_btf_id; | |
884 | u32 *arg3_btf_id; | |
885 | u32 *arg4_btf_id; | |
886 | u32 *arg5_btf_id; | |
887 | }; | |
888 | u32 *arg_btf_id[5]; | |
508362ac MM |
889 | struct { |
890 | size_t arg1_size; | |
891 | size_t arg2_size; | |
892 | size_t arg3_size; | |
893 | size_t arg4_size; | |
894 | size_t arg5_size; | |
895 | }; | |
896 | size_t arg_size[5]; | |
9436ef6e | 897 | }; |
af7ec138 | 898 | int *ret_btf_id; /* return value btf_id */ |
eae2e83e | 899 | bool (*allowed)(const struct bpf_prog *prog); |
17a52670 AS |
900 | }; |
901 | ||
902 | /* bpf_context is intentionally undefined structure. Pointer to bpf_context is | |
903 | * the first argument to eBPF programs. | |
904 | * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' | |
905 | */ | |
906 | struct bpf_context; | |
907 | ||
908 | enum bpf_access_type { | |
909 | BPF_READ = 1, | |
910 | BPF_WRITE = 2 | |
09756af4 AS |
911 | }; |
912 | ||
19de99f7 | 913 | /* types of values stored in eBPF registers */ |
f1174f77 EC |
914 | /* Pointer types represent: |
915 | * pointer | |
916 | * pointer + imm | |
917 | * pointer + (u16) var | |
918 | * pointer + (u16) var + imm | |
919 | * if (range > 0) then [ptr, ptr + range - off) is safe to access | |
920 | * if (id > 0) means that some 'var' was added | |
921 | * if (off > 0) means that 'imm' was added | |
922 | */ | |
19de99f7 AS |
923 | enum bpf_reg_type { |
924 | NOT_INIT = 0, /* nothing was written into register */ | |
f1174f77 | 925 | SCALAR_VALUE, /* reg doesn't contain a valid pointer */ |
19de99f7 AS |
926 | PTR_TO_CTX, /* reg points to bpf_context */ |
927 | CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ | |
928 | PTR_TO_MAP_VALUE, /* reg points to map element value */ | |
c25b2ae1 | 929 | PTR_TO_MAP_KEY, /* reg points to a map element key */ |
f1174f77 | 930 | PTR_TO_STACK, /* reg == frame_pointer + offset */ |
de8f3a83 | 931 | PTR_TO_PACKET_META, /* skb->data - meta_len */ |
f1174f77 | 932 | PTR_TO_PACKET, /* reg points to skb->data */ |
19de99f7 | 933 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
d58e468b | 934 | PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ |
c64b7983 | 935 | PTR_TO_SOCKET, /* reg points to struct bpf_sock */ |
46f8bc92 | 936 | PTR_TO_SOCK_COMMON, /* reg points to sock_common */ |
655a51e5 | 937 | PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ |
9df1c28b | 938 | PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ |
fada7fdc | 939 | PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ |
ba5f4cfe JF |
940 | /* PTR_TO_BTF_ID points to a kernel struct that does not need |
941 | * to be null checked by the BPF program. This does not imply the | |
942 | * pointer is _not_ null and in practice this can easily be a null | |
943 | * pointer when reading pointer chains. The assumption is program | |
944 | * context will handle null pointer dereference typically via fault | |
945 | * handling. The verifier must keep this in mind and can make no | |
946 | * assumptions about null or non-null when doing branch analysis. | |
947 | * Further, when passed into helpers the helpers can not, without | |
948 | * additional context, assume the value is non-null. | |
949 | */ | |
950 | PTR_TO_BTF_ID, | |
457f4436 | 951 | PTR_TO_MEM, /* reg points to valid memory region */ |
6082b6c3 | 952 | PTR_TO_ARENA, |
20b2aff4 | 953 | PTR_TO_BUF, /* reg points to a read/write buffer */ |
69c087ba | 954 | PTR_TO_FUNC, /* reg points to a bpf program function */ |
27060531 | 955 | CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */ |
e6ac2450 | 956 | __BPF_REG_TYPE_MAX, |
d639b9d1 | 957 | |
c25b2ae1 HL |
958 | /* Extended reg_types. */ |
959 | PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, | |
960 | PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, | |
961 | PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, | |
962 | PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, | |
213a6952 MD |
963 | /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not |
964 | * been checked for null. Used primarily to inform the verifier | |
965 | * an explicit null check is required for this struct. | |
966 | */ | |
c25b2ae1 | 967 | PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, |
c25b2ae1 | 968 | |
d639b9d1 HL |
969 | /* This must be the last entry. Its purpose is to ensure the enum is |
970 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
971 | */ | |
972 | __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
19de99f7 | 973 | }; |
d639b9d1 | 974 | static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
19de99f7 | 975 | |
23994631 YS |
976 | /* The information passed from prog-specific *_is_valid_access |
977 | * back to the verifier. | |
978 | */ | |
979 | struct bpf_insn_access_aux { | |
980 | enum bpf_reg_type reg_type; | |
92de3608 | 981 | bool is_ldsx; |
9e15db66 AS |
982 | union { |
983 | int ctx_field_size; | |
22dc4a0f AN |
984 | struct { |
985 | struct btf *btf; | |
986 | u32 btf_id; | |
a687df20 | 987 | u32 ref_obj_id; |
22dc4a0f | 988 | }; |
9e15db66 AS |
989 | }; |
990 | struct bpf_verifier_log *log; /* for verbose logs */ | |
5d99e198 | 991 | bool is_retval; /* is accessing function return value ? */ |
23994631 YS |
992 | }; |
993 | ||
f96da094 DB |
994 | static inline void |
995 | bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) | |
996 | { | |
997 | aux->ctx_field_size = size; | |
998 | } | |
999 | ||
3feb263b AN |
1000 | static bool bpf_is_ldimm64(const struct bpf_insn *insn) |
1001 | { | |
1002 | return insn->code == (BPF_LD | BPF_IMM | BPF_DW); | |
1003 | } | |
1004 | ||
3990ed4c MKL |
1005 | static inline bool bpf_pseudo_func(const struct bpf_insn *insn) |
1006 | { | |
3feb263b | 1007 | return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
3990ed4c MKL |
1008 | } |
1009 | ||
88044230 PY |
1010 | /* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an |
1011 | * atomic load or store, and false if it is a read-modify-write instruction. | |
1012 | */ | |
1013 | static inline bool | |
1014 | bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn) | |
1015 | { | |
1016 | switch (atomic_insn->imm) { | |
1017 | case BPF_LOAD_ACQ: | |
1018 | case BPF_STORE_REL: | |
1019 | return true; | |
1020 | default: | |
1021 | return false; | |
1022 | } | |
1023 | } | |
1024 | ||
7de16e3a JK |
1025 | struct bpf_prog_ops { |
1026 | int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1027 | union bpf_attr __user *uattr); | |
1028 | }; | |
1029 | ||
6728aea7 | 1030 | struct bpf_reg_state; |
09756af4 AS |
1031 | struct bpf_verifier_ops { |
1032 | /* return eBPF function prototype for verification */ | |
5e43f899 AI |
1033 | const struct bpf_func_proto * |
1034 | (*get_func_proto)(enum bpf_func_id func_id, | |
1035 | const struct bpf_prog *prog); | |
17a52670 AS |
1036 | |
1037 | /* return true if 'size' wide access at offset 'off' within bpf_context | |
1038 | * with 'type' (read or write) is allowed | |
1039 | */ | |
19de99f7 | 1040 | bool (*is_valid_access)(int off, int size, enum bpf_access_type type, |
5e43f899 | 1041 | const struct bpf_prog *prog, |
23994631 | 1042 | struct bpf_insn_access_aux *info); |
36bbef52 DB |
1043 | int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, |
1044 | const struct bpf_prog *prog); | |
169c3176 MKL |
1045 | int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog, |
1046 | s16 ctx_stack_off); | |
e0cea7ce DB |
1047 | int (*gen_ld_abs)(const struct bpf_insn *orig, |
1048 | struct bpf_insn *insn_buf); | |
6b8cc1d1 DB |
1049 | u32 (*convert_ctx_access)(enum bpf_access_type type, |
1050 | const struct bpf_insn *src, | |
1051 | struct bpf_insn *dst, | |
f96da094 | 1052 | struct bpf_prog *prog, u32 *target_size); |
27ae7997 | 1053 | int (*btf_struct_access)(struct bpf_verifier_log *log, |
6728aea7 | 1054 | const struct bpf_reg_state *reg, |
b7e852a9 | 1055 | int off, int size); |
09756af4 AS |
1056 | }; |
1057 | ||
cae1927c | 1058 | struct bpf_prog_offload_ops { |
08ca90af | 1059 | /* verifier basic callbacks */ |
cae1927c JK |
1060 | int (*insn_hook)(struct bpf_verifier_env *env, |
1061 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 1062 | int (*finalize)(struct bpf_verifier_env *env); |
08ca90af JK |
1063 | /* verifier optimization callbacks (called after .finalize) */ |
1064 | int (*replace_insn)(struct bpf_verifier_env *env, u32 off, | |
1065 | struct bpf_insn *insn); | |
1066 | int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); | |
1067 | /* program management callbacks */ | |
16a8cb5c QM |
1068 | int (*prepare)(struct bpf_prog *prog); |
1069 | int (*translate)(struct bpf_prog *prog); | |
eb911947 | 1070 | void (*destroy)(struct bpf_prog *prog); |
cae1927c JK |
1071 | }; |
1072 | ||
0a9c1991 | 1073 | struct bpf_prog_offload { |
ab3f0063 JK |
1074 | struct bpf_prog *prog; |
1075 | struct net_device *netdev; | |
341b3e7b | 1076 | struct bpf_offload_dev *offdev; |
ab3f0063 JK |
1077 | void *dev_priv; |
1078 | struct list_head offloads; | |
1079 | bool dev_state; | |
08ca90af | 1080 | bool opt_failed; |
fcfb126d JW |
1081 | void *jited_image; |
1082 | u32 jited_len; | |
ab3f0063 JK |
1083 | }; |
1084 | ||
8bad74f9 RG |
1085 | enum bpf_cgroup_storage_type { |
1086 | BPF_CGROUP_STORAGE_SHARED, | |
b741f163 | 1087 | BPF_CGROUP_STORAGE_PERCPU, |
8bad74f9 RG |
1088 | __BPF_CGROUP_STORAGE_MAX |
1089 | }; | |
1090 | ||
1091 | #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX | |
1092 | ||
f1b9509c AS |
1093 | /* The longest tracepoint has 12 args. |
1094 | * See include/trace/bpf_probe.h | |
1095 | */ | |
1096 | #define MAX_BPF_FUNC_ARGS 12 | |
1097 | ||
523a4cf4 DB |
1098 | /* The maximum number of arguments passed through registers |
1099 | * a single function may have. | |
1100 | */ | |
1101 | #define MAX_BPF_FUNC_REG_ARGS 5 | |
1102 | ||
720e6a43 YS |
1103 | /* The argument is a structure. */ |
1104 | #define BTF_FMODEL_STRUCT_ARG BIT(0) | |
1105 | ||
49f67f39 IL |
1106 | /* The argument is signed. */ |
1107 | #define BTF_FMODEL_SIGNED_ARG BIT(1) | |
1108 | ||
fec56f58 AS |
1109 | struct btf_func_model { |
1110 | u8 ret_size; | |
49f67f39 | 1111 | u8 ret_flags; |
fec56f58 AS |
1112 | u8 nr_args; |
1113 | u8 arg_size[MAX_BPF_FUNC_ARGS]; | |
720e6a43 | 1114 | u8 arg_flags[MAX_BPF_FUNC_ARGS]; |
fec56f58 AS |
1115 | }; |
1116 | ||
1117 | /* Restore arguments before returning from trampoline to let original function | |
1118 | * continue executing. This flag is used for fentry progs when there are no | |
1119 | * fexit progs. | |
1120 | */ | |
1121 | #define BPF_TRAMP_F_RESTORE_REGS BIT(0) | |
1122 | /* Call original function after fentry progs, but before fexit progs. | |
1123 | * Makes sense for fentry/fexit, normal calls and indirect calls. | |
1124 | */ | |
1125 | #define BPF_TRAMP_F_CALL_ORIG BIT(1) | |
1126 | /* Skip current frame and return to parent. Makes sense for fentry/fexit | |
1127 | * programs only. Should not be used with normal calls and indirect calls. | |
1128 | */ | |
1129 | #define BPF_TRAMP_F_SKIP_FRAME BIT(2) | |
7e6f3cd8 JO |
1130 | /* Store IP address of the caller on the trampoline stack, |
1131 | * so it's available for trampoline's programs. | |
1132 | */ | |
1133 | #define BPF_TRAMP_F_IP_ARG BIT(3) | |
356ed649 HT |
1134 | /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ |
1135 | #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) | |
7e6f3cd8 | 1136 | |
316cba62 JO |
1137 | /* Get original function from stack instead of from provided direct address. |
1138 | * Makes sense for trampolines with fexit or fmod_ret programs. | |
1139 | */ | |
1140 | #define BPF_TRAMP_F_ORIG_STACK BIT(5) | |
1141 | ||
00963a2e SL |
1142 | /* This trampoline is on a function with another ftrace_ops with IPMODIFY, |
1143 | * e.g., a live patch. This flag is set and cleared by ftrace call backs, | |
1144 | */ | |
1145 | #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) | |
1146 | ||
2b5dcb31 LH |
1147 | /* Indicate that current trampoline is in a tail call context. Then, it has to |
1148 | * cache and restore tail_call_cnt to avoid infinite tail call loop. | |
1149 | */ | |
1150 | #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) | |
1151 | ||
2cd3e377 PZ |
1152 | /* |
1153 | * Indicate the trampoline should be suitable to receive indirect calls; | |
1154 | * without this indirectly calling the generated code can result in #UD/#CP, | |
1155 | * depending on the CFI options. | |
1156 | * | |
1157 | * Used by bpf_struct_ops. | |
1158 | * | |
1159 | * Incompatible with FENTRY usage, overloads @func_addr argument. | |
1160 | */ | |
1161 | #define BPF_TRAMP_F_INDIRECT BIT(8) | |
1162 | ||
88fd9e53 | 1163 | /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 |
b23316aa | 1164 | * bytes on x86. |
88fd9e53 | 1165 | */ |
390a07a9 | 1166 | enum { |
528eb2cb IL |
1167 | #if defined(__s390x__) |
1168 | BPF_MAX_TRAMP_LINKS = 27, | |
1169 | #else | |
390a07a9 | 1170 | BPF_MAX_TRAMP_LINKS = 38, |
528eb2cb | 1171 | #endif |
390a07a9 | 1172 | }; |
88fd9e53 | 1173 | |
f7e0beaf KFL |
1174 | struct bpf_tramp_links { |
1175 | struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; | |
1176 | int nr_links; | |
88fd9e53 KS |
1177 | }; |
1178 | ||
e384c7b7 KFL |
1179 | struct bpf_tramp_run_ctx; |
1180 | ||
fec56f58 AS |
1181 | /* Different use cases for BPF trampoline: |
1182 | * 1. replace nop at the function entry (kprobe equivalent) | |
1183 | * flags = BPF_TRAMP_F_RESTORE_REGS | |
1184 | * fentry = a set of programs to run before returning from trampoline | |
1185 | * | |
1186 | * 2. replace nop at the function entry (kprobe + kretprobe equivalent) | |
1187 | * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME | |
1188 | * orig_call = fentry_ip + MCOUNT_INSN_SIZE | |
1189 | * fentry = a set of program to run before calling original function | |
1190 | * fexit = a set of program to run after original function | |
1191 | * | |
1192 | * 3. replace direct call instruction anywhere in the function body | |
1193 | * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) | |
1194 | * With flags = 0 | |
1195 | * fentry = a set of programs to run before returning from trampoline | |
1196 | * With flags = BPF_TRAMP_F_CALL_ORIG | |
1197 | * orig_call = original callback addr or direct function addr | |
1198 | * fentry = a set of program to run before calling original function | |
1199 | * fexit = a set of program to run after original function | |
1200 | */ | |
e21aa341 | 1201 | struct bpf_tramp_image; |
7a3d9a15 | 1202 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
85d33df3 | 1203 | const struct btf_func_model *m, u32 flags, |
f7e0beaf | 1204 | struct bpf_tramp_links *tlinks, |
7a3d9a15 | 1205 | void *func_addr); |
82583daa SL |
1206 | void *arch_alloc_bpf_trampoline(unsigned int size); |
1207 | void arch_free_bpf_trampoline(void *image, unsigned int size); | |
c733239f | 1208 | int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); |
96d1b7c0 SL |
1209 | int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, |
1210 | struct bpf_tramp_links *tlinks, void *func_addr); | |
82583daa | 1211 | |
271de525 MKL |
1212 | u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, |
1213 | struct bpf_tramp_run_ctx *run_ctx); | |
1214 | void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, | |
1215 | struct bpf_tramp_run_ctx *run_ctx); | |
e21aa341 AS |
1216 | void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); |
1217 | void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); | |
271de525 MKL |
1218 | typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog, |
1219 | struct bpf_tramp_run_ctx *run_ctx); | |
1220 | typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start, | |
1221 | struct bpf_tramp_run_ctx *run_ctx); | |
1222 | bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog); | |
1223 | bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog); | |
fec56f58 | 1224 | |
535911c8 JO |
1225 | struct bpf_ksym { |
1226 | unsigned long start; | |
1227 | unsigned long end; | |
bfea9a85 | 1228 | char name[KSYM_NAME_LEN]; |
ecb60d1c | 1229 | struct list_head lnode; |
ca4424c9 | 1230 | struct latch_tree_node tnode; |
cbd76f8d | 1231 | bool prog; |
535911c8 JO |
1232 | }; |
1233 | ||
fec56f58 AS |
1234 | enum bpf_tramp_prog_type { |
1235 | BPF_TRAMP_FENTRY, | |
1236 | BPF_TRAMP_FEXIT, | |
ae240823 | 1237 | BPF_TRAMP_MODIFY_RETURN, |
be8704ff AS |
1238 | BPF_TRAMP_MAX, |
1239 | BPF_TRAMP_REPLACE, /* more than MAX */ | |
fec56f58 AS |
1240 | }; |
1241 | ||
e21aa341 AS |
1242 | struct bpf_tramp_image { |
1243 | void *image; | |
26ef208c | 1244 | int size; |
e21aa341 AS |
1245 | struct bpf_ksym ksym; |
1246 | struct percpu_ref pcref; | |
1247 | void *ip_after_call; | |
1248 | void *ip_epilogue; | |
1249 | union { | |
1250 | struct rcu_head rcu; | |
1251 | struct work_struct work; | |
1252 | }; | |
1253 | }; | |
1254 | ||
fec56f58 AS |
1255 | struct bpf_trampoline { |
1256 | /* hlist for trampoline_table */ | |
1257 | struct hlist_node hlist; | |
00963a2e | 1258 | struct ftrace_ops *fops; |
fec56f58 AS |
1259 | /* serializes access to fields of this trampoline */ |
1260 | struct mutex mutex; | |
1261 | refcount_t refcnt; | |
00963a2e | 1262 | u32 flags; |
fec56f58 AS |
1263 | u64 key; |
1264 | struct { | |
1265 | struct btf_func_model model; | |
1266 | void *addr; | |
b91e014f | 1267 | bool ftrace_managed; |
fec56f58 | 1268 | } func; |
be8704ff AS |
1269 | /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF |
1270 | * program by replacing one of its functions. func.addr is the address | |
1271 | * of the function it replaced. | |
1272 | */ | |
1273 | struct bpf_prog *extension_prog; | |
fec56f58 AS |
1274 | /* list of BPF programs using this trampoline */ |
1275 | struct hlist_head progs_hlist[BPF_TRAMP_MAX]; | |
1276 | /* Number of attached programs. A counter per kind. */ | |
1277 | int progs_cnt[BPF_TRAMP_MAX]; | |
1278 | /* Executable image of trampoline */ | |
e21aa341 | 1279 | struct bpf_tramp_image *cur_image; |
fec56f58 | 1280 | }; |
75ccbef6 | 1281 | |
f7b12b6f THJ |
1282 | struct bpf_attach_target_info { |
1283 | struct btf_func_model fmodel; | |
1284 | long tgt_addr; | |
31bf1dbc | 1285 | struct module *tgt_mod; |
f7b12b6f THJ |
1286 | const char *tgt_name; |
1287 | const struct btf_type *tgt_type; | |
1288 | }; | |
1289 | ||
116eb788 | 1290 | #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ |
75ccbef6 BT |
1291 | |
1292 | struct bpf_dispatcher_prog { | |
1293 | struct bpf_prog *prog; | |
1294 | refcount_t users; | |
1295 | }; | |
1296 | ||
1297 | struct bpf_dispatcher { | |
1298 | /* dispatcher mutex */ | |
1299 | struct mutex mutex; | |
1300 | void *func; | |
1301 | struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; | |
1302 | int num_progs; | |
1303 | void *image; | |
19c02415 | 1304 | void *rw_image; |
75ccbef6 | 1305 | u32 image_off; |
517b75e4 | 1306 | struct bpf_ksym ksym; |
c86df29d PZ |
1307 | #ifdef CONFIG_HAVE_STATIC_CALL |
1308 | struct static_call_key *sc_key; | |
1309 | void *sc_tramp; | |
1310 | #endif | |
75ccbef6 BT |
1311 | }; |
1312 | ||
4f9087f1 PZ |
1313 | #ifndef __bpfcall |
1314 | #define __bpfcall __nocfi | |
1315 | #endif | |
1316 | ||
1317 | static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( | |
7e6897f9 BT |
1318 | const void *ctx, |
1319 | const struct bpf_insn *insnsi, | |
af3f4134 | 1320 | bpf_func_t bpf_func) |
7e6897f9 BT |
1321 | { |
1322 | return bpf_func(ctx, insnsi); | |
1323 | } | |
f7e0beaf | 1324 | |
8357b366 JK |
1325 | /* the implementation of the opaque uapi struct bpf_dynptr */ |
1326 | struct bpf_dynptr_kern { | |
1327 | void *data; | |
1328 | /* Size represents the number of usable bytes of dynptr data. | |
1329 | * If for example the offset is at 4 for a local dynptr whose data is | |
1330 | * of type u64, the number of usable bytes is 4. | |
1331 | * | |
1332 | * The upper 8 bits are reserved. It is as follows: | |
1333 | * Bits 0 - 23 = size | |
1334 | * Bits 24 - 30 = dynptr type | |
1335 | * Bit 31 = whether dynptr is read-only | |
1336 | */ | |
1337 | u32 size; | |
1338 | u32 offset; | |
1339 | } __aligned(8); | |
1340 | ||
1341 | enum bpf_dynptr_type { | |
1342 | BPF_DYNPTR_TYPE_INVALID, | |
1343 | /* Points to memory that is local to the bpf program */ | |
1344 | BPF_DYNPTR_TYPE_LOCAL, | |
1345 | /* Underlying data is a ringbuf record */ | |
1346 | BPF_DYNPTR_TYPE_RINGBUF, | |
b5964b96 JK |
1347 | /* Underlying data is a sk_buff */ |
1348 | BPF_DYNPTR_TYPE_SKB, | |
05421aec JK |
1349 | /* Underlying data is a xdp_buff */ |
1350 | BPF_DYNPTR_TYPE_XDP, | |
8357b366 JK |
1351 | }; |
1352 | ||
1353 | int bpf_dynptr_check_size(u32 size); | |
26662d73 | 1354 | u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); |
74523c06 SL |
1355 | const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); |
1356 | void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); | |
3e1c6f35 | 1357 | bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); |
d060b6aa MY |
1358 | int __bpf_dynptr_write(const struct bpf_dynptr_kern *dst, u32 offset, |
1359 | void *src, u32 len, u64 flags); | |
1360 | void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, | |
1361 | void *buffer__opt, u32 buffer__szk); | |
1362 | ||
1363 | static inline int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) | |
1364 | { | |
1365 | u32 size = __bpf_dynptr_size(ptr); | |
1366 | ||
1367 | if (len > size || offset > size - len) | |
1368 | return -E2BIG; | |
1369 | ||
1370 | return 0; | |
1371 | } | |
8357b366 | 1372 | |
fec56f58 | 1373 | #ifdef CONFIG_BPF_JIT |
d6083f04 LH |
1374 | int bpf_trampoline_link_prog(struct bpf_tramp_link *link, |
1375 | struct bpf_trampoline *tr, | |
1376 | struct bpf_prog *tgt_prog); | |
1377 | int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, | |
1378 | struct bpf_trampoline *tr, | |
1379 | struct bpf_prog *tgt_prog); | |
f7b12b6f THJ |
1380 | struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1381 | struct bpf_attach_target_info *tgt_info); | |
fec56f58 | 1382 | void bpf_trampoline_put(struct bpf_trampoline *tr); |
19c02415 | 1383 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); |
c86df29d PZ |
1384 | |
1385 | /* | |
1386 | * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn | |
1387 | * indirection with a direct call to the bpf program. If the architecture does | |
1388 | * not have STATIC_CALL, avoid a double-indirection. | |
1389 | */ | |
1390 | #ifdef CONFIG_HAVE_STATIC_CALL | |
1391 | ||
1392 | #define __BPF_DISPATCHER_SC_INIT(_name) \ | |
1393 | .sc_key = &STATIC_CALL_KEY(_name), \ | |
1394 | .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), | |
1395 | ||
1396 | #define __BPF_DISPATCHER_SC(name) \ | |
1397 | DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) | |
1398 | ||
1399 | #define __BPF_DISPATCHER_CALL(name) \ | |
1400 | static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) | |
1401 | ||
1402 | #define __BPF_DISPATCHER_UPDATE(_d, _new) \ | |
1403 | __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) | |
1404 | ||
1405 | #else | |
1406 | #define __BPF_DISPATCHER_SC_INIT(name) | |
1407 | #define __BPF_DISPATCHER_SC(name) | |
1408 | #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) | |
1409 | #define __BPF_DISPATCHER_UPDATE(_d, _new) | |
1410 | #endif | |
dbe69b29 | 1411 | |
517b75e4 JO |
1412 | #define BPF_DISPATCHER_INIT(_name) { \ |
1413 | .mutex = __MUTEX_INITIALIZER(_name.mutex), \ | |
1414 | .func = &_name##_func, \ | |
1415 | .progs = {}, \ | |
1416 | .num_progs = 0, \ | |
1417 | .image = NULL, \ | |
1418 | .image_off = 0, \ | |
1419 | .ksym = { \ | |
1420 | .name = #_name, \ | |
1421 | .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ | |
1422 | }, \ | |
c86df29d | 1423 | __BPF_DISPATCHER_SC_INIT(_name##_call) \ |
75ccbef6 BT |
1424 | } |
1425 | ||
1426 | #define DEFINE_BPF_DISPATCHER(name) \ | |
c86df29d | 1427 | __BPF_DISPATCHER_SC(name); \ |
4f9087f1 | 1428 | noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1429 | const void *ctx, \ |
1430 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1431 | bpf_func_t bpf_func) \ |
75ccbef6 | 1432 | { \ |
c86df29d | 1433 | return __BPF_DISPATCHER_CALL(name); \ |
75ccbef6 | 1434 | } \ |
6a64037d BT |
1435 | EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ |
1436 | struct bpf_dispatcher bpf_dispatcher_##name = \ | |
18acb7fa | 1437 | BPF_DISPATCHER_INIT(bpf_dispatcher_##name); |
dbe69b29 | 1438 | |
75ccbef6 | 1439 | #define DECLARE_BPF_DISPATCHER(name) \ |
6a64037d | 1440 | unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1441 | const void *ctx, \ |
1442 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1443 | bpf_func_t bpf_func); \ |
6a64037d | 1444 | extern struct bpf_dispatcher bpf_dispatcher_##name; |
c86df29d | 1445 | |
6a64037d BT |
1446 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func |
1447 | #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) | |
75ccbef6 BT |
1448 | void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, |
1449 | struct bpf_prog *to); | |
dba122fb | 1450 | /* Called only from JIT-enabled code, so there's no need for stubs. */ |
7c8ce4ff XK |
1451 | void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym); |
1452 | void bpf_image_ksym_add(struct bpf_ksym *ksym); | |
a108f7dc | 1453 | void bpf_image_ksym_del(struct bpf_ksym *ksym); |
dba122fb JO |
1454 | void bpf_ksym_add(struct bpf_ksym *ksym); |
1455 | void bpf_ksym_del(struct bpf_ksym *ksym); | |
3486bedd SL |
1456 | int bpf_jit_charge_modmem(u32 size); |
1457 | void bpf_jit_uncharge_modmem(u32 size); | |
f92c1e18 | 1458 | bool bpf_prog_has_trampoline(const struct bpf_prog *prog); |
fec56f58 | 1459 | #else |
f7e0beaf | 1460 | static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, |
d6083f04 LH |
1461 | struct bpf_trampoline *tr, |
1462 | struct bpf_prog *tgt_prog) | |
fec56f58 AS |
1463 | { |
1464 | return -ENOTSUPP; | |
1465 | } | |
f7e0beaf | 1466 | static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, |
d6083f04 LH |
1467 | struct bpf_trampoline *tr, |
1468 | struct bpf_prog *tgt_prog) | |
fec56f58 AS |
1469 | { |
1470 | return -ENOTSUPP; | |
1471 | } | |
f7b12b6f THJ |
1472 | static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1473 | struct bpf_attach_target_info *tgt_info) | |
1474 | { | |
b724a641 | 1475 | return NULL; |
f7b12b6f | 1476 | } |
fec56f58 | 1477 | static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} |
75ccbef6 BT |
1478 | #define DEFINE_BPF_DISPATCHER(name) |
1479 | #define DECLARE_BPF_DISPATCHER(name) | |
6a64037d | 1480 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func |
75ccbef6 BT |
1481 | #define BPF_DISPATCHER_PTR(name) NULL |
1482 | static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, | |
1483 | struct bpf_prog *from, | |
1484 | struct bpf_prog *to) {} | |
e9b4e606 JO |
1485 | static inline bool is_bpf_image_address(unsigned long address) |
1486 | { | |
1487 | return false; | |
1488 | } | |
f92c1e18 JO |
1489 | static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) |
1490 | { | |
1491 | return false; | |
1492 | } | |
fec56f58 AS |
1493 | #endif |
1494 | ||
8c1b6e69 | 1495 | struct bpf_func_info_aux { |
51c39bb1 | 1496 | u16 linkage; |
8c1b6e69 | 1497 | bool unreliable; |
2afae08c AN |
1498 | bool called : 1; |
1499 | bool verified : 1; | |
8c1b6e69 AS |
1500 | }; |
1501 | ||
a66886fe DB |
1502 | enum bpf_jit_poke_reason { |
1503 | BPF_POKE_REASON_TAIL_CALL, | |
1504 | }; | |
1505 | ||
1506 | /* Descriptor of pokes pointing /into/ the JITed image. */ | |
1507 | struct bpf_jit_poke_descriptor { | |
cf71b174 | 1508 | void *tailcall_target; |
ebf7d1f5 MF |
1509 | void *tailcall_bypass; |
1510 | void *bypass_addr; | |
f263a814 | 1511 | void *aux; |
a66886fe DB |
1512 | union { |
1513 | struct { | |
1514 | struct bpf_map *map; | |
1515 | u32 key; | |
1516 | } tail_call; | |
1517 | }; | |
cf71b174 | 1518 | bool tailcall_target_stable; |
a66886fe DB |
1519 | u8 adj_off; |
1520 | u16 reason; | |
a748c697 | 1521 | u32 insn_idx; |
a66886fe DB |
1522 | }; |
1523 | ||
3c32cc1b YS |
1524 | /* reg_type info for ctx arguments */ |
1525 | struct bpf_ctx_arg_aux { | |
1526 | u32 offset; | |
1527 | enum bpf_reg_type reg_type; | |
77c0208e | 1528 | struct btf *btf; |
951cf368 | 1529 | u32 btf_id; |
a687df20 AH |
1530 | u32 ref_obj_id; |
1531 | bool refcounted; | |
3c32cc1b YS |
1532 | }; |
1533 | ||
541c3bad AN |
1534 | struct btf_mod_pair { |
1535 | struct btf *btf; | |
1536 | struct module *module; | |
1537 | }; | |
1538 | ||
e6ac2450 MKL |
1539 | struct bpf_kfunc_desc_tab; |
1540 | ||
09756af4 | 1541 | struct bpf_prog_aux { |
85192dbf | 1542 | atomic64_t refcnt; |
24701ece | 1543 | u32 used_map_cnt; |
541c3bad | 1544 | u32 used_btf_cnt; |
32bbe007 | 1545 | u32 max_ctx_offset; |
e647815a | 1546 | u32 max_pkt_offset; |
9df1c28b | 1547 | u32 max_tp_access; |
8726679a | 1548 | u32 stack_depth; |
dc4bb0e2 | 1549 | u32 id; |
ba64e7d8 | 1550 | u32 func_cnt; /* used by non-func prog as the number of func progs */ |
335d1c5b | 1551 | u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ |
ba64e7d8 | 1552 | u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ |
ccfe29eb | 1553 | u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
51d65049 | 1554 | u32 attach_st_ops_member_off; |
3c32cc1b | 1555 | u32 ctx_arg_info_size; |
afbf21dc YS |
1556 | u32 max_rdonly_access; |
1557 | u32 max_rdwr_access; | |
22dc4a0f | 1558 | struct btf *attach_btf; |
43205180 | 1559 | struct bpf_ctx_arg_aux *ctx_arg_info; |
7d1cd70d | 1560 | void __percpu *priv_stack_ptr; |
3aac1ead THJ |
1561 | struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ |
1562 | struct bpf_prog *dst_prog; | |
1563 | struct bpf_trampoline *dst_trampoline; | |
4a1e7c0c THJ |
1564 | enum bpf_prog_type saved_dst_prog_type; |
1565 | enum bpf_attach_type saved_dst_attach_type; | |
a4b1d3c1 | 1566 | bool verifier_zext; /* Zero extensions has been inserted by verifier. */ |
2b3486bc SF |
1567 | bool dev_bound; /* Program is bound to the netdev. */ |
1568 | bool offload_requested; /* Program is bound and offloaded to the netdev. */ | |
38207291 | 1569 | bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ |
19bfcdf9 | 1570 | bool attach_tracing_prog; /* true if tracing another tracing program */ |
8c1b6e69 | 1571 | bool func_proto_unreliable; |
ebf7d1f5 | 1572 | bool tail_call_reachable; |
c2f2cdbe | 1573 | bool xdp_has_frags; |
f18b03fa KKD |
1574 | bool exception_cb; |
1575 | bool exception_boundary; | |
d6083f04 | 1576 | bool is_extended; /* true if extended by freplace program */ |
e00931c0 | 1577 | bool jits_use_priv_stack; |
5bd36da1 | 1578 | bool priv_stack_requested; |
81f6d053 | 1579 | bool changes_pkt_data; |
e2d8f560 | 1580 | bool might_sleep; |
d6083f04 LH |
1581 | u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ |
1582 | struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ | |
2fe99eb0 | 1583 | struct bpf_arena *arena; |
5bd36da1 | 1584 | void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */ |
38207291 MKL |
1585 | /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ |
1586 | const struct btf_type *attach_func_proto; | |
1587 | /* function name for valid attach_btf_id */ | |
1588 | const char *attach_func_name; | |
1c2a088a AS |
1589 | struct bpf_prog **func; |
1590 | void *jit_data; /* JIT specific data. arch dependent */ | |
a66886fe | 1591 | struct bpf_jit_poke_descriptor *poke_tab; |
e6ac2450 | 1592 | struct bpf_kfunc_desc_tab *kfunc_tab; |
2357672c | 1593 | struct bpf_kfunc_btf_tab *kfunc_btf_tab; |
a66886fe | 1594 | u32 size_poke_tab; |
4f9087f1 PZ |
1595 | #ifdef CONFIG_FINEIBT |
1596 | struct bpf_ksym ksym_prefix; | |
1597 | #endif | |
535911c8 | 1598 | struct bpf_ksym ksym; |
7de16e3a | 1599 | const struct bpf_prog_ops *ops; |
51d65049 | 1600 | const struct bpf_struct_ops *st_ops; |
09756af4 | 1601 | struct bpf_map **used_maps; |
984fe94f | 1602 | struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ |
541c3bad | 1603 | struct btf_mod_pair *used_btfs; |
09756af4 | 1604 | struct bpf_prog *prog; |
aaac3ba9 | 1605 | struct user_struct *user; |
cb4d2b3f | 1606 | u64 load_time; /* ns since boottime */ |
aba64c7d | 1607 | u32 verified_insns; |
69fd337a | 1608 | int cgroup_atype; /* enum cgroup_bpf_attach_type */ |
8bad74f9 | 1609 | struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
067cae47 | 1610 | char name[BPF_OBJ_NAME_LEN]; |
852486b3 | 1611 | u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); |
afdb09c7 CF |
1612 | #ifdef CONFIG_SECURITY |
1613 | void *security; | |
1614 | #endif | |
caf8f28e | 1615 | struct bpf_token *token; |
0a9c1991 | 1616 | struct bpf_prog_offload *offload; |
838e9690 | 1617 | struct btf *btf; |
ba64e7d8 | 1618 | struct bpf_func_info *func_info; |
8c1b6e69 | 1619 | struct bpf_func_info_aux *func_info_aux; |
c454a46b MKL |
1620 | /* bpf_line_info loaded from userspace. linfo->insn_off |
1621 | * has the xlated insn offset. | |
1622 | * Both the main and sub prog share the same linfo. | |
1623 | * The subprog can access its first linfo by | |
1624 | * using the linfo_idx. | |
1625 | */ | |
1626 | struct bpf_line_info *linfo; | |
1627 | /* jited_linfo is the jited addr of the linfo. It has a | |
1628 | * one to one mapping to linfo: | |
1629 | * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. | |
1630 | * Both the main and sub prog share the same jited_linfo. | |
1631 | * The subprog can access its first jited_linfo by | |
1632 | * using the linfo_idx. | |
1633 | */ | |
1634 | void **jited_linfo; | |
ba64e7d8 | 1635 | u32 func_info_cnt; |
c454a46b MKL |
1636 | u32 nr_linfo; |
1637 | /* subprog can use linfo_idx to access its first linfo and | |
1638 | * jited_linfo. | |
1639 | * main prog always has linfo_idx == 0 | |
1640 | */ | |
1641 | u32 linfo_idx; | |
31bf1dbc | 1642 | struct module *mod; |
3dec541b AS |
1643 | u32 num_exentries; |
1644 | struct exception_table_entry *extable; | |
abf2e7d6 AS |
1645 | union { |
1646 | struct work_struct work; | |
1647 | struct rcu_head rcu; | |
1648 | }; | |
09756af4 AS |
1649 | }; |
1650 | ||
d687f621 DK |
1651 | struct bpf_prog { |
1652 | u16 pages; /* Number of allocated pages */ | |
1653 | u16 jited:1, /* Is our filter JIT'ed? */ | |
1654 | jit_requested:1,/* archs need to JIT the prog */ | |
1655 | gpl_compatible:1, /* Is filter GPL compatible? */ | |
1656 | cb_access:1, /* Is control block accessed? */ | |
1657 | dst_needed:1, /* Do we need dst entry? */ | |
1658 | blinding_requested:1, /* needs constant blinding */ | |
1659 | blinded:1, /* Was blinded */ | |
1660 | is_func:1, /* program is a bpf function */ | |
1661 | kprobe_override:1, /* Do we override a kprobe? */ | |
1662 | has_callchain_buf:1, /* callchain buffer allocated? */ | |
1663 | enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ | |
1664 | call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ | |
1665 | call_get_func_ip:1, /* Do we call get_func_ip() */ | |
66c84731 AN |
1666 | tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ |
1667 | sleepable:1; /* BPF program is sleepable */ | |
d687f621 DK |
1668 | enum bpf_prog_type type; /* Type of BPF program */ |
1669 | enum bpf_attach_type expected_attach_type; /* For some prog types */ | |
1670 | u32 len; /* Number of filter blocks */ | |
1671 | u32 jited_len; /* Size of jited insns in bytes */ | |
1672 | u8 tag[BPF_TAG_SIZE]; | |
1673 | struct bpf_prog_stats __percpu *stats; | |
1674 | int __percpu *active; | |
1675 | unsigned int (*bpf_func)(const void *ctx, | |
1676 | const struct bpf_insn *insn); | |
1677 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | |
1678 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ | |
1679 | /* Instructions for interpreter */ | |
1680 | union { | |
1681 | DECLARE_FLEX_ARRAY(struct sock_filter, insns); | |
1682 | DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); | |
1683 | }; | |
1684 | }; | |
1685 | ||
2beee5f5 | 1686 | struct bpf_array_aux { |
da765a2f DB |
1687 | /* Programs with direct jumps into programs part of this array. */ |
1688 | struct list_head poke_progs; | |
1689 | struct bpf_map *map; | |
1690 | struct mutex poke_mutex; | |
1691 | struct work_struct work; | |
2beee5f5 DB |
1692 | }; |
1693 | ||
6cc7d1e8 AN |
1694 | struct bpf_link { |
1695 | atomic64_t refcnt; | |
1696 | u32 id; | |
1697 | enum bpf_link_type type; | |
1698 | const struct bpf_link_ops *ops; | |
1699 | struct bpf_prog *prog; | |
61c6fefa AN |
1700 | /* whether BPF link itself has "sleepable" semantics, which can differ |
1701 | * from underlying BPF program having a "sleepable" semantics, as BPF | |
1702 | * link's semantics is determined by target attach hook | |
1703 | */ | |
1704 | bool sleepable; | |
1a80dbcb AN |
1705 | /* rcu is used before freeing, work can be used to schedule that |
1706 | * RCU-based freeing before that, so they never overlap | |
1707 | */ | |
1708 | union { | |
1709 | struct rcu_head rcu; | |
1710 | struct work_struct work; | |
1711 | }; | |
6cc7d1e8 AN |
1712 | }; |
1713 | ||
1714 | struct bpf_link_ops { | |
1715 | void (*release)(struct bpf_link *link); | |
1a80dbcb AN |
1716 | /* deallocate link resources callback, called without RCU grace period |
1717 | * waiting | |
1718 | */ | |
6cc7d1e8 | 1719 | void (*dealloc)(struct bpf_link *link); |
1a80dbcb | 1720 | /* deallocate link resources callback, called after RCU grace period; |
61c6fefa AN |
1721 | * if either the underlying BPF program is sleepable or BPF link's |
1722 | * target hook is sleepable, we'll go through tasks trace RCU GP and | |
1723 | * then "classic" RCU GP; this need for chaining tasks trace and | |
1724 | * classic RCU GPs is designated by setting bpf_link->sleepable flag | |
1a80dbcb AN |
1725 | */ |
1726 | void (*dealloc_deferred)(struct bpf_link *link); | |
73b11c2a | 1727 | int (*detach)(struct bpf_link *link); |
6cc7d1e8 AN |
1728 | int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, |
1729 | struct bpf_prog *old_prog); | |
1730 | void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); | |
1731 | int (*fill_link_info)(const struct bpf_link *link, | |
1732 | struct bpf_link_info *info); | |
aef56f2e KFL |
1733 | int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, |
1734 | struct bpf_map *old_map); | |
1adddc97 | 1735 | __poll_t (*poll)(struct file *file, struct poll_table_struct *pts); |
6cc7d1e8 AN |
1736 | }; |
1737 | ||
f7e0beaf KFL |
1738 | struct bpf_tramp_link { |
1739 | struct bpf_link link; | |
1740 | struct hlist_node tramp_hlist; | |
2fcc8241 | 1741 | u64 cookie; |
f7e0beaf KFL |
1742 | }; |
1743 | ||
69fd337a SF |
1744 | struct bpf_shim_tramp_link { |
1745 | struct bpf_tramp_link link; | |
1746 | struct bpf_trampoline *trampoline; | |
1747 | }; | |
1748 | ||
f7e0beaf KFL |
1749 | struct bpf_tracing_link { |
1750 | struct bpf_tramp_link link; | |
1751 | enum bpf_attach_type attach_type; | |
1752 | struct bpf_trampoline *trampoline; | |
1753 | struct bpf_prog *tgt_prog; | |
1754 | }; | |
1755 | ||
d4dfc570 AN |
1756 | struct bpf_raw_tp_link { |
1757 | struct bpf_link link; | |
1758 | struct bpf_raw_event_map *btp; | |
68ca5d4e | 1759 | u64 cookie; |
d4dfc570 AN |
1760 | }; |
1761 | ||
6cc7d1e8 AN |
1762 | struct bpf_link_primer { |
1763 | struct bpf_link *link; | |
1764 | struct file *file; | |
1765 | int fd; | |
1766 | u32 id; | |
1767 | }; | |
1768 | ||
6fe01d3c AN |
1769 | struct bpf_mount_opts { |
1770 | kuid_t uid; | |
1771 | kgid_t gid; | |
1772 | umode_t mode; | |
1773 | ||
1774 | /* BPF token-related delegation options */ | |
1775 | u64 delegate_cmds; | |
1776 | u64 delegate_maps; | |
1777 | u64 delegate_progs; | |
1778 | u64 delegate_attachs; | |
1779 | }; | |
1780 | ||
35f96de0 AN |
1781 | struct bpf_token { |
1782 | struct work_struct work; | |
1783 | atomic64_t refcnt; | |
1784 | struct user_namespace *userns; | |
1785 | u64 allowed_cmds; | |
a177fc2b | 1786 | u64 allowed_maps; |
caf8f28e AN |
1787 | u64 allowed_progs; |
1788 | u64 allowed_attachs; | |
f568a3d4 AN |
1789 | #ifdef CONFIG_SECURITY |
1790 | void *security; | |
1791 | #endif | |
35f96de0 AN |
1792 | }; |
1793 | ||
85d33df3 | 1794 | struct bpf_struct_ops_value; |
27ae7997 MKL |
1795 | struct btf_member; |
1796 | ||
1797 | #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 | |
bb48cf16 DV |
1798 | /** |
1799 | * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to | |
1800 | * define a BPF_MAP_TYPE_STRUCT_OPS map type composed | |
1801 | * of BPF_PROG_TYPE_STRUCT_OPS progs. | |
1802 | * @verifier_ops: A structure of callbacks that are invoked by the verifier | |
1803 | * when determining whether the struct_ops progs in the | |
1804 | * struct_ops map are valid. | |
1805 | * @init: A callback that is invoked a single time, and before any other | |
1806 | * callback, to initialize the structure. A nonzero return value means | |
1807 | * the subsystem could not be initialized. | |
1808 | * @check_member: When defined, a callback invoked by the verifier to allow | |
1809 | * the subsystem to determine if an entry in the struct_ops map | |
1810 | * is valid. A nonzero return value means that the map is | |
1811 | * invalid and should be rejected by the verifier. | |
1812 | * @init_member: A callback that is invoked for each member of the struct_ops | |
1813 | * map to allow the subsystem to initialize the member. A nonzero | |
1814 | * value means the member could not be initialized. This callback | |
1815 | * is exclusive with the @type, @type_id, @value_type, and | |
1816 | * @value_id fields. | |
1817 | * @reg: A callback that is invoked when the struct_ops map has been | |
1818 | * initialized and is being attached to. Zero means the struct_ops map | |
1819 | * has been successfully registered and is live. A nonzero return value | |
1820 | * means the struct_ops map could not be registered. | |
1821 | * @unreg: A callback that is invoked when the struct_ops map should be | |
1822 | * unregistered. | |
1823 | * @update: A callback that is invoked when the live struct_ops map is being | |
1824 | * updated to contain new values. This callback is only invoked when | |
1825 | * the struct_ops map is loaded with BPF_F_LINK. If not defined, the | |
1826 | * it is assumed that the struct_ops map cannot be updated. | |
1827 | * @validate: A callback that is invoked after all of the members have been | |
1828 | * initialized. This callback should perform static checks on the | |
1829 | * map, meaning that it should either fail or succeed | |
1830 | * deterministically. A struct_ops map that has been validated may | |
1831 | * not necessarily succeed in being registered if the call to @reg | |
1832 | * fails. For example, a valid struct_ops map may be loaded, but | |
1833 | * then fail to be registered due to there being another active | |
1834 | * struct_ops map on the system in the subsystem already. For this | |
1835 | * reason, if this callback is not defined, the check is skipped as | |
1836 | * the struct_ops map will have final verification performed in | |
1837 | * @reg. | |
1838 | * @type: BTF type. | |
1839 | * @value_type: Value type. | |
1840 | * @name: The name of the struct bpf_struct_ops object. | |
1841 | * @func_models: Func models | |
1842 | * @type_id: BTF type id. | |
1843 | * @value_id: BTF value id. | |
1844 | */ | |
27ae7997 MKL |
1845 | struct bpf_struct_ops { |
1846 | const struct bpf_verifier_ops *verifier_ops; | |
1847 | int (*init)(struct btf *btf); | |
1848 | int (*check_member)(const struct btf_type *t, | |
51a52a29 DV |
1849 | const struct btf_member *member, |
1850 | const struct bpf_prog *prog); | |
85d33df3 MKL |
1851 | int (*init_member)(const struct btf_type *t, |
1852 | const struct btf_member *member, | |
1853 | void *kdata, const void *udata); | |
73287fe2 KFL |
1854 | int (*reg)(void *kdata, struct bpf_link *link); |
1855 | void (*unreg)(void *kdata, struct bpf_link *link); | |
1856 | int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); | |
68b04864 | 1857 | int (*validate)(void *kdata); |
4c5763ed | 1858 | void *cfi_stubs; |
e3f87fdf | 1859 | struct module *owner; |
27ae7997 MKL |
1860 | const char *name; |
1861 | struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; | |
4c5763ed KFL |
1862 | }; |
1863 | ||
16116035 KFL |
1864 | /* Every member of a struct_ops type has an instance even a member is not |
1865 | * an operator (function pointer). The "info" field will be assigned to | |
1866 | * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the | |
1867 | * argument information required by the verifier to verify the program. | |
1868 | * | |
1869 | * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the | |
1870 | * corresponding entry for an given argument. | |
1871 | */ | |
1872 | struct bpf_struct_ops_arg_info { | |
1873 | struct bpf_ctx_arg_aux *info; | |
1874 | u32 cnt; | |
1875 | }; | |
1876 | ||
4c5763ed KFL |
1877 | struct bpf_struct_ops_desc { |
1878 | struct bpf_struct_ops *st_ops; | |
1879 | ||
1880 | const struct btf_type *type; | |
1881 | const struct btf_type *value_type; | |
27ae7997 | 1882 | u32 type_id; |
85d33df3 | 1883 | u32 value_id; |
16116035 KFL |
1884 | |
1885 | /* Collection of argument information for each member */ | |
1886 | struct bpf_struct_ops_arg_info *arg_info; | |
27ae7997 MKL |
1887 | }; |
1888 | ||
612d087d KFL |
1889 | enum bpf_struct_ops_state { |
1890 | BPF_STRUCT_OPS_STATE_INIT, | |
1891 | BPF_STRUCT_OPS_STATE_INUSE, | |
1892 | BPF_STRUCT_OPS_STATE_TOBEFREE, | |
1893 | BPF_STRUCT_OPS_STATE_READY, | |
1894 | }; | |
1895 | ||
1896 | struct bpf_struct_ops_common_value { | |
1897 | refcount_t refcnt; | |
1898 | enum bpf_struct_ops_state state; | |
1899 | }; | |
1900 | ||
27ae7997 | 1901 | #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) |
f6be98d1 KFL |
1902 | /* This macro helps developer to register a struct_ops type and generate |
1903 | * type information correctly. Developers should use this macro to register | |
1904 | * a struct_ops type instead of calling __register_bpf_struct_ops() directly. | |
1905 | */ | |
1906 | #define register_bpf_struct_ops(st_ops, type) \ | |
1907 | ({ \ | |
1908 | struct bpf_struct_ops_##type { \ | |
1909 | struct bpf_struct_ops_common_value common; \ | |
1910 | struct type data ____cacheline_aligned_in_smp; \ | |
1911 | }; \ | |
1912 | BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \ | |
1913 | __register_bpf_struct_ops(st_ops); \ | |
1914 | }) | |
85d33df3 | 1915 | #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) |
85d33df3 MKL |
1916 | bool bpf_struct_ops_get(const void *kdata); |
1917 | void bpf_struct_ops_put(const void *kdata); | |
e42ac141 | 1918 | int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff); |
85d33df3 MKL |
1919 | int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, |
1920 | void *value); | |
f7e0beaf KFL |
1921 | int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, |
1922 | struct bpf_tramp_link *link, | |
31a645ae | 1923 | const struct btf_func_model *model, |
2cd3e377 | 1924 | void *stub_func, |
187e2af0 KFL |
1925 | void **image, u32 *image_off, |
1926 | bool allow_alloc); | |
1927 | void bpf_struct_ops_image_free(void *image); | |
85d33df3 MKL |
1928 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1929 | { | |
1930 | if (owner == BPF_MODULE_OWNER) | |
1931 | return bpf_struct_ops_get(data); | |
1932 | else | |
1933 | return try_module_get(owner); | |
1934 | } | |
1935 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1936 | { | |
1937 | if (owner == BPF_MODULE_OWNER) | |
1938 | bpf_struct_ops_put(data); | |
1939 | else | |
1940 | module_put(owner); | |
1941 | } | |
68b04864 | 1942 | int bpf_struct_ops_link_create(union bpf_attr *attr); |
c196906d HT |
1943 | |
1944 | #ifdef CONFIG_NET | |
1945 | /* Define it here to avoid the use of forward declaration */ | |
1946 | struct bpf_dummy_ops_state { | |
1947 | int val; | |
1948 | }; | |
1949 | ||
1950 | struct bpf_dummy_ops { | |
1951 | int (*test_1)(struct bpf_dummy_ops_state *cb); | |
1952 | int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, | |
1953 | char a3, unsigned long a4); | |
7dd88059 | 1954 | int (*test_sleepable)(struct bpf_dummy_ops_state *cb); |
c196906d HT |
1955 | }; |
1956 | ||
1957 | int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1958 | union bpf_attr __user *uattr); | |
1959 | #endif | |
f6be98d1 KFL |
1960 | int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, |
1961 | struct btf *btf, | |
1962 | struct bpf_verifier_log *log); | |
1338b933 | 1963 | void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); |
16116035 | 1964 | void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc); |
27ae7997 | 1965 | #else |
f6be98d1 | 1966 | #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; }) |
85d33df3 MKL |
1967 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1968 | { | |
1969 | return try_module_get(owner); | |
1970 | } | |
1971 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1972 | { | |
1973 | module_put(owner); | |
1974 | } | |
e42ac141 MKL |
1975 | static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) |
1976 | { | |
1977 | return -ENOTSUPP; | |
1978 | } | |
85d33df3 MKL |
1979 | static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, |
1980 | void *key, | |
1981 | void *value) | |
1982 | { | |
1983 | return -EINVAL; | |
1984 | } | |
68b04864 KFL |
1985 | static inline int bpf_struct_ops_link_create(union bpf_attr *attr) |
1986 | { | |
1987 | return -EOPNOTSUPP; | |
1988 | } | |
1338b933 KFL |
1989 | static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) |
1990 | { | |
1991 | } | |
68b04864 | 1992 | |
16116035 KFL |
1993 | static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc) |
1994 | { | |
1995 | } | |
1996 | ||
9cb61fda SF |
1997 | #endif |
1998 | ||
43205180 AH |
1999 | int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog, |
2000 | const struct bpf_ctx_arg_aux *info, u32 cnt); | |
2001 | ||
9cb61fda SF |
2002 | #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) |
2003 | int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, | |
2004 | int cgroup_atype); | |
2005 | void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); | |
2006 | #else | |
69fd337a SF |
2007 | static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, |
2008 | int cgroup_atype) | |
2009 | { | |
2010 | return -EOPNOTSUPP; | |
2011 | } | |
2012 | static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) | |
2013 | { | |
2014 | } | |
27ae7997 MKL |
2015 | #endif |
2016 | ||
04fd61ab AS |
2017 | struct bpf_array { |
2018 | struct bpf_map map; | |
2019 | u32 elem_size; | |
b2157399 | 2020 | u32 index_mask; |
2beee5f5 | 2021 | struct bpf_array_aux *aux; |
04fd61ab | 2022 | union { |
129d868e KC |
2023 | DECLARE_FLEX_ARRAY(char, value) __aligned(8); |
2024 | DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); | |
2025 | DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); | |
04fd61ab AS |
2026 | }; |
2027 | }; | |
3b1efb19 | 2028 | |
c04c0d2b | 2029 | #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ |
ebf7f6f0 | 2030 | #define MAX_TAIL_CALL_CNT 33 |
04fd61ab | 2031 | |
6018e1f4 AN |
2032 | /* Maximum number of loops for bpf_loop and bpf_iter_num. |
2033 | * It's enum to expose it (and thus make it discoverable) through BTF. | |
2034 | */ | |
2035 | enum { | |
2036 | BPF_MAX_LOOPS = 8 * 1024 * 1024, | |
e723608b | 2037 | BPF_MAX_TIMED_LOOPS = 0xffff, |
6018e1f4 | 2038 | }; |
1ade2371 | 2039 | |
591fe988 DB |
2040 | #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ |
2041 | BPF_F_RDONLY_PROG | \ | |
2042 | BPF_F_WRONLY | \ | |
2043 | BPF_F_WRONLY_PROG) | |
2044 | ||
2045 | #define BPF_MAP_CAN_READ BIT(0) | |
2046 | #define BPF_MAP_CAN_WRITE BIT(1) | |
2047 | ||
20571567 DV |
2048 | /* Maximum number of user-producer ring buffer samples that can be drained in |
2049 | * a call to bpf_user_ringbuf_drain(). | |
2050 | */ | |
2051 | #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) | |
2052 | ||
591fe988 DB |
2053 | static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) |
2054 | { | |
2055 | u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
2056 | ||
2057 | /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is | |
2058 | * not possible. | |
2059 | */ | |
2060 | if (access_flags & BPF_F_RDONLY_PROG) | |
2061 | return BPF_MAP_CAN_READ; | |
2062 | else if (access_flags & BPF_F_WRONLY_PROG) | |
2063 | return BPF_MAP_CAN_WRITE; | |
2064 | else | |
2065 | return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; | |
2066 | } | |
2067 | ||
2068 | static inline bool bpf_map_flags_access_ok(u32 access_flags) | |
2069 | { | |
2070 | return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != | |
2071 | (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
2072 | } | |
2073 | ||
3b1efb19 DB |
2074 | struct bpf_event_entry { |
2075 | struct perf_event *event; | |
2076 | struct file *perf_file; | |
2077 | struct file *map_file; | |
2078 | struct rcu_head rcu; | |
2079 | }; | |
2080 | ||
f45d5b6c THJ |
2081 | static inline bool map_type_contains_progs(struct bpf_map *map) |
2082 | { | |
2083 | return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || | |
2084 | map->map_type == BPF_MAP_TYPE_DEVMAP || | |
2085 | map->map_type == BPF_MAP_TYPE_CPUMAP; | |
2086 | } | |
2087 | ||
2088 | bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); | |
f1f7714e | 2089 | int bpf_prog_calc_tag(struct bpf_prog *fp); |
bd570ff9 | 2090 | |
0756ea3e | 2091 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
10aceb62 | 2092 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); |
555c8a86 | 2093 | |
ae0a457f ET |
2094 | const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void); |
2095 | ||
555c8a86 | 2096 | typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, |
aa7145c1 | 2097 | unsigned long off, unsigned long len); |
c64b7983 JS |
2098 | typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, |
2099 | const struct bpf_insn *src, | |
2100 | struct bpf_insn *dst, | |
2101 | struct bpf_prog *prog, | |
2102 | u32 *target_size); | |
555c8a86 DB |
2103 | |
2104 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |
2105 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); | |
04fd61ab | 2106 | |
324bda9e AS |
2107 | /* an array of programs to be executed under rcu_lock. |
2108 | * | |
2109 | * Typical usage: | |
055eb955 | 2110 | * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); |
324bda9e AS |
2111 | * |
2112 | * the structure returned by bpf_prog_array_alloc() should be populated | |
2113 | * with program pointers and the last pointer must be NULL. | |
2114 | * The user has to keep refcnt on the program and make sure the program | |
2115 | * is removed from the array before bpf_prog_put(). | |
2116 | * The 'struct bpf_prog_array *' should only be replaced with xchg() | |
2117 | * since other cpus are walking the array of pointers in parallel. | |
2118 | */ | |
394e40a2 RG |
2119 | struct bpf_prog_array_item { |
2120 | struct bpf_prog *prog; | |
82e6b1ee AN |
2121 | union { |
2122 | struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; | |
2123 | u64 bpf_cookie; | |
2124 | }; | |
394e40a2 RG |
2125 | }; |
2126 | ||
324bda9e AS |
2127 | struct bpf_prog_array { |
2128 | struct rcu_head rcu; | |
d7f10df8 | 2129 | struct bpf_prog_array_item items[]; |
324bda9e AS |
2130 | }; |
2131 | ||
46531a30 PB |
2132 | struct bpf_empty_prog_array { |
2133 | struct bpf_prog_array hdr; | |
2134 | struct bpf_prog *null_prog; | |
2135 | }; | |
2136 | ||
2137 | /* to avoid allocating empty bpf_prog_array for cgroups that | |
2138 | * don't have bpf program attached use one global 'bpf_empty_prog_array' | |
2139 | * It will not be modified the caller of bpf_prog_array_alloc() | |
2140 | * (since caller requested prog_cnt == 0) | |
2141 | * that pointer should be 'freed' by bpf_prog_array_free() | |
2142 | */ | |
2143 | extern struct bpf_empty_prog_array bpf_empty_prog_array; | |
2144 | ||
d29ab6e1 | 2145 | struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); |
54e9c9d4 | 2146 | void bpf_prog_array_free(struct bpf_prog_array *progs); |
8c7dcb84 DK |
2147 | /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ |
2148 | void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); | |
54e9c9d4 | 2149 | int bpf_prog_array_length(struct bpf_prog_array *progs); |
0d01da6a | 2150 | bool bpf_prog_array_is_empty(struct bpf_prog_array *array); |
54e9c9d4 | 2151 | int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, |
468e2f64 | 2152 | __u32 __user *prog_ids, u32 cnt); |
324bda9e | 2153 | |
54e9c9d4 | 2154 | void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, |
e87c6bc3 | 2155 | struct bpf_prog *old_prog); |
ce3aa9cc JS |
2156 | int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); |
2157 | int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, | |
2158 | struct bpf_prog *prog); | |
54e9c9d4 | 2159 | int bpf_prog_array_copy_info(struct bpf_prog_array *array, |
3a38bb98 YS |
2160 | u32 *prog_ids, u32 request_cnt, |
2161 | u32 *prog_cnt); | |
54e9c9d4 | 2162 | int bpf_prog_array_copy(struct bpf_prog_array *old_array, |
e87c6bc3 YS |
2163 | struct bpf_prog *exclude_prog, |
2164 | struct bpf_prog *include_prog, | |
82e6b1ee | 2165 | u64 bpf_cookie, |
e87c6bc3 YS |
2166 | struct bpf_prog_array **new_array); |
2167 | ||
c7603cfa AN |
2168 | struct bpf_run_ctx {}; |
2169 | ||
2170 | struct bpf_cg_run_ctx { | |
2171 | struct bpf_run_ctx run_ctx; | |
7d08c2c9 | 2172 | const struct bpf_prog_array_item *prog_item; |
c4dcfdd4 | 2173 | int retval; |
c7603cfa AN |
2174 | }; |
2175 | ||
82e6b1ee AN |
2176 | struct bpf_trace_run_ctx { |
2177 | struct bpf_run_ctx run_ctx; | |
2178 | u64 bpf_cookie; | |
a3c485a5 | 2179 | bool is_uprobe; |
82e6b1ee AN |
2180 | }; |
2181 | ||
e384c7b7 KFL |
2182 | struct bpf_tramp_run_ctx { |
2183 | struct bpf_run_ctx run_ctx; | |
2184 | u64 bpf_cookie; | |
2185 | struct bpf_run_ctx *saved_run_ctx; | |
2186 | }; | |
2187 | ||
7d08c2c9 AN |
2188 | static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) |
2189 | { | |
2190 | struct bpf_run_ctx *old_ctx = NULL; | |
2191 | ||
2192 | #ifdef CONFIG_BPF_SYSCALL | |
2193 | old_ctx = current->bpf_ctx; | |
2194 | current->bpf_ctx = new_ctx; | |
2195 | #endif | |
2196 | return old_ctx; | |
2197 | } | |
2198 | ||
2199 | static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) | |
2200 | { | |
2201 | #ifdef CONFIG_BPF_SYSCALL | |
2202 | current->bpf_ctx = old_ctx; | |
2203 | #endif | |
2204 | } | |
2205 | ||
77241217 SF |
2206 | /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ |
2207 | #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) | |
2208 | /* BPF program asks to set CN on the packet. */ | |
2209 | #define BPF_RET_SET_CN (1 << 0) | |
2210 | ||
7d08c2c9 AN |
2211 | typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); |
2212 | ||
7d08c2c9 | 2213 | static __always_inline u32 |
055eb955 | 2214 | bpf_prog_run_array(const struct bpf_prog_array *array, |
7d08c2c9 AN |
2215 | const void *ctx, bpf_prog_run_fn run_prog) |
2216 | { | |
2217 | const struct bpf_prog_array_item *item; | |
2218 | const struct bpf_prog *prog; | |
82e6b1ee AN |
2219 | struct bpf_run_ctx *old_run_ctx; |
2220 | struct bpf_trace_run_ctx run_ctx; | |
7d08c2c9 AN |
2221 | u32 ret = 1; |
2222 | ||
055eb955 SF |
2223 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); |
2224 | ||
7d08c2c9 | 2225 | if (unlikely(!array)) |
055eb955 SF |
2226 | return ret; |
2227 | ||
a3c485a5 JO |
2228 | run_ctx.is_uprobe = false; |
2229 | ||
055eb955 | 2230 | migrate_disable(); |
82e6b1ee | 2231 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
7d08c2c9 AN |
2232 | item = &array->items[0]; |
2233 | while ((prog = READ_ONCE(item->prog))) { | |
82e6b1ee | 2234 | run_ctx.bpf_cookie = item->bpf_cookie; |
7d08c2c9 AN |
2235 | ret &= run_prog(prog, ctx); |
2236 | item++; | |
2237 | } | |
82e6b1ee | 2238 | bpf_reset_run_ctx(old_run_ctx); |
7d08c2c9 AN |
2239 | migrate_enable(); |
2240 | return ret; | |
2241 | } | |
324bda9e | 2242 | |
8c7dcb84 DK |
2243 | /* Notes on RCU design for bpf_prog_arrays containing sleepable programs: |
2244 | * | |
2245 | * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array | |
2246 | * overall. As a result, we must use the bpf_prog_array_free_sleepable | |
2247 | * in order to use the tasks_trace rcu grace period. | |
2248 | * | |
2249 | * When a non-sleepable program is inside the array, we take the rcu read | |
2250 | * section and disable preemption for that program alone, so it can access | |
2251 | * rcu-protected dynamically sized maps. | |
2252 | */ | |
2253 | static __always_inline u32 | |
7d0d6736 | 2254 | bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, |
a3c485a5 | 2255 | const void *ctx, bpf_prog_run_fn run_prog) |
8c7dcb84 DK |
2256 | { |
2257 | const struct bpf_prog_array_item *item; | |
2258 | const struct bpf_prog *prog; | |
8c7dcb84 DK |
2259 | struct bpf_run_ctx *old_run_ctx; |
2260 | struct bpf_trace_run_ctx run_ctx; | |
2261 | u32 ret = 1; | |
2262 | ||
2263 | might_fault(); | |
7d0d6736 JH |
2264 | RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held"); |
2265 | ||
2266 | if (unlikely(!array)) | |
2267 | return ret; | |
8c7dcb84 | 2268 | |
8c7dcb84 DK |
2269 | migrate_disable(); |
2270 | ||
a3c485a5 JO |
2271 | run_ctx.is_uprobe = true; |
2272 | ||
8c7dcb84 DK |
2273 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
2274 | item = &array->items[0]; | |
2275 | while ((prog = READ_ONCE(item->prog))) { | |
66c84731 | 2276 | if (!prog->sleepable) |
8c7dcb84 DK |
2277 | rcu_read_lock(); |
2278 | ||
2279 | run_ctx.bpf_cookie = item->bpf_cookie; | |
2280 | ret &= run_prog(prog, ctx); | |
2281 | item++; | |
2282 | ||
66c84731 | 2283 | if (!prog->sleepable) |
8c7dcb84 DK |
2284 | rcu_read_unlock(); |
2285 | } | |
2286 | bpf_reset_run_ctx(old_run_ctx); | |
8c7dcb84 | 2287 | migrate_enable(); |
8c7dcb84 DK |
2288 | return ret; |
2289 | } | |
2290 | ||
89aa0758 | 2291 | #ifdef CONFIG_BPF_SYSCALL |
b121d1e7 | 2292 | DECLARE_PER_CPU(int, bpf_prog_active); |
d46edd67 | 2293 | extern struct mutex bpf_stats_enabled_mutex; |
b121d1e7 | 2294 | |
c518cfa0 TG |
2295 | /* |
2296 | * Block execution of BPF programs attached to instrumentation (perf, | |
2297 | * kprobes, tracepoints) to prevent deadlocks on map operations as any of | |
2298 | * these events can happen inside a region which holds a map bucket lock | |
2299 | * and can deadlock on it. | |
c518cfa0 TG |
2300 | */ |
2301 | static inline void bpf_disable_instrumentation(void) | |
2302 | { | |
2303 | migrate_disable(); | |
79364031 | 2304 | this_cpu_inc(bpf_prog_active); |
c518cfa0 TG |
2305 | } |
2306 | ||
2307 | static inline void bpf_enable_instrumentation(void) | |
2308 | { | |
79364031 | 2309 | this_cpu_dec(bpf_prog_active); |
c518cfa0 TG |
2310 | migrate_enable(); |
2311 | } | |
2312 | ||
35f96de0 | 2313 | extern const struct super_operations bpf_super_ops; |
f66e448c CF |
2314 | extern const struct file_operations bpf_map_fops; |
2315 | extern const struct file_operations bpf_prog_fops; | |
367ec3e4 | 2316 | extern const struct file_operations bpf_iter_fops; |
f66e448c | 2317 | |
91cc1a99 | 2318 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
7de16e3a JK |
2319 | extern const struct bpf_prog_ops _name ## _prog_ops; \ |
2320 | extern const struct bpf_verifier_ops _name ## _verifier_ops; | |
40077e0c JB |
2321 | #define BPF_MAP_TYPE(_id, _ops) \ |
2322 | extern const struct bpf_map_ops _ops; | |
f2e10bff | 2323 | #define BPF_LINK_TYPE(_id, _name) |
be9370a7 JB |
2324 | #include <linux/bpf_types.h> |
2325 | #undef BPF_PROG_TYPE | |
40077e0c | 2326 | #undef BPF_MAP_TYPE |
f2e10bff | 2327 | #undef BPF_LINK_TYPE |
0fc174de | 2328 | |
ab3f0063 | 2329 | extern const struct bpf_prog_ops bpf_offload_prog_ops; |
4f9218aa JK |
2330 | extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; |
2331 | extern const struct bpf_verifier_ops xdp_analyzer_ops; | |
2332 | ||
0fc174de | 2333 | struct bpf_prog *bpf_prog_get(u32 ufd); |
248f346f | 2334 | struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, |
288b3de5 | 2335 | bool attach_drv); |
85192dbf | 2336 | void bpf_prog_add(struct bpf_prog *prog, int i); |
c540594f | 2337 | void bpf_prog_sub(struct bpf_prog *prog, int i); |
85192dbf | 2338 | void bpf_prog_inc(struct bpf_prog *prog); |
a6f6df69 | 2339 | struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); |
61e021f3 DB |
2340 | void bpf_prog_put(struct bpf_prog *prog); |
2341 | ||
e7895f01 | 2342 | void bpf_prog_free_id(struct bpf_prog *prog); |
158e5e9e | 2343 | void bpf_map_free_id(struct bpf_map *map); |
ad8ad79f | 2344 | |
aa3496ac | 2345 | struct btf_field *btf_record_find(const struct btf_record *rec, |
74843b57 | 2346 | u32 offset, u32 field_mask); |
aa3496ac KKD |
2347 | void btf_record_free(struct btf_record *rec); |
2348 | void bpf_map_free_record(struct bpf_map *map); | |
2349 | struct btf_record *btf_record_dup(const struct btf_record *rec); | |
2350 | bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); | |
db559117 | 2351 | void bpf_obj_free_timer(const struct btf_record *rec, void *obj); |
246331e3 | 2352 | void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj); |
aa3496ac | 2353 | void bpf_obj_free_fields(const struct btf_record *rec, void *obj); |
e383a459 | 2354 | void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); |
61df10c7 | 2355 | |
1ed4d924 | 2356 | struct bpf_map *bpf_map_get(u32 ufd); |
c9da161c | 2357 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
55f32595 | 2358 | |
4e885fab AP |
2359 | /* |
2360 | * The __bpf_map_get() and __btf_get_by_fd() functions parse a file | |
2361 | * descriptor and return a corresponding map or btf object. | |
2362 | * Their names are double underscored to emphasize the fact that they | |
2363 | * do not increase refcnt. To also increase refcnt use corresponding | |
2364 | * bpf_map_get() and btf_get_by_fd() functions. | |
2365 | */ | |
2366 | ||
55f32595 AV |
2367 | static inline struct bpf_map *__bpf_map_get(struct fd f) |
2368 | { | |
2369 | if (fd_empty(f)) | |
2370 | return ERR_PTR(-EBADF); | |
2371 | if (unlikely(fd_file(f)->f_op != &bpf_map_fops)) | |
2372 | return ERR_PTR(-EINVAL); | |
2373 | return fd_file(f)->private_data; | |
2374 | } | |
2375 | ||
4e885fab AP |
2376 | static inline struct btf *__btf_get_by_fd(struct fd f) |
2377 | { | |
2378 | if (fd_empty(f)) | |
2379 | return ERR_PTR(-EBADF); | |
2380 | if (unlikely(fd_file(f)->f_op != &btf_fops)) | |
2381 | return ERR_PTR(-EINVAL); | |
2382 | return fd_file(f)->private_data; | |
2383 | } | |
2384 | ||
1e0bd5a0 AN |
2385 | void bpf_map_inc(struct bpf_map *map); |
2386 | void bpf_map_inc_with_uref(struct bpf_map *map); | |
b671c206 | 2387 | struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); |
1e0bd5a0 | 2388 | struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); |
c9da161c | 2389 | void bpf_map_put_with_uref(struct bpf_map *map); |
61e021f3 | 2390 | void bpf_map_put(struct bpf_map *map); |
196e8ca7 DB |
2391 | void *bpf_map_area_alloc(u64 size, int numa_node); |
2392 | void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); | |
d407bd25 | 2393 | void bpf_map_area_free(void *base); |
353050be | 2394 | bool bpf_map_write_active(const struct bpf_map *map); |
bd475643 | 2395 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
cb4d03ab BV |
2396 | int generic_map_lookup_batch(struct bpf_map *map, |
2397 | const union bpf_attr *attr, | |
aa2e93b8 | 2398 | union bpf_attr __user *uattr); |
3af43ba4 | 2399 | int generic_map_update_batch(struct bpf_map *map, struct file *map_file, |
aa2e93b8 BV |
2400 | const union bpf_attr *attr, |
2401 | union bpf_attr __user *uattr); | |
2402 | int generic_map_delete_batch(struct bpf_map *map, | |
2403 | const union bpf_attr *attr, | |
cb4d03ab | 2404 | union bpf_attr __user *uattr); |
6086d29d | 2405 | struct bpf_map *bpf_map_get_curr_or_next(u32 *id); |
a228a64f | 2406 | struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); |
61e021f3 | 2407 | |
c9eb8102 | 2408 | int bpf_map_alloc_pages(const struct bpf_map *map, int nid, |
31746031 | 2409 | unsigned long nr_pages, struct page **page_array); |
3a3b7fec | 2410 | #ifdef CONFIG_MEMCG |
48edc1f7 RG |
2411 | void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, |
2412 | int node); | |
2413 | void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); | |
ddef81b5 YS |
2414 | void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, |
2415 | gfp_t flags); | |
48edc1f7 RG |
2416 | void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, |
2417 | size_t align, gfp_t flags); | |
2418 | #else | |
3b0ba54d SB |
2419 | /* |
2420 | * These specialized allocators have to be macros for their allocations to be | |
2421 | * accounted separately (to have separate alloc_tag). | |
2422 | */ | |
2c321f3f SB |
2423 | #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ |
2424 | kmalloc_node(_size, _flags, _node) | |
2425 | #define bpf_map_kzalloc(_map, _size, _flags) \ | |
2426 | kzalloc(_size, _flags) | |
2427 | #define bpf_map_kvcalloc(_map, _n, _size, _flags) \ | |
2428 | kvcalloc(_n, _size, _flags) | |
2429 | #define bpf_map_alloc_percpu(_map, _size, _align, _flags) \ | |
2430 | __alloc_percpu_gfp(_size, _align, _flags) | |
48edc1f7 RG |
2431 | #endif |
2432 | ||
25954730 AP |
2433 | static inline int |
2434 | bpf_map_init_elem_count(struct bpf_map *map) | |
2435 | { | |
2436 | size_t size = sizeof(*map->elem_count), align = size; | |
2437 | gfp_t flags = GFP_USER | __GFP_NOWARN; | |
2438 | ||
2439 | map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); | |
2440 | if (!map->elem_count) | |
2441 | return -ENOMEM; | |
2442 | ||
2443 | return 0; | |
2444 | } | |
2445 | ||
2446 | static inline void | |
2447 | bpf_map_free_elem_count(struct bpf_map *map) | |
2448 | { | |
2449 | free_percpu(map->elem_count); | |
2450 | } | |
2451 | ||
2452 | static inline void bpf_map_inc_elem_count(struct bpf_map *map) | |
2453 | { | |
2454 | this_cpu_inc(*map->elem_count); | |
2455 | } | |
2456 | ||
2457 | static inline void bpf_map_dec_elem_count(struct bpf_map *map) | |
2458 | { | |
2459 | this_cpu_dec(*map->elem_count); | |
2460 | } | |
2461 | ||
1be7f75d AS |
2462 | extern int sysctl_unprivileged_bpf_disabled; |
2463 | ||
35f96de0 AN |
2464 | bool bpf_token_capable(const struct bpf_token *token, int cap); |
2465 | ||
d79a3549 | 2466 | static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token) |
2c78ee89 | 2467 | { |
d79a3549 | 2468 | return bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2469 | } |
2470 | ||
d79a3549 | 2471 | static inline bool bpf_allow_uninit_stack(const struct bpf_token *token) |
01f810ac | 2472 | { |
d79a3549 | 2473 | return bpf_token_capable(token, CAP_PERFMON); |
01f810ac AM |
2474 | } |
2475 | ||
d79a3549 | 2476 | static inline bool bpf_bypass_spec_v1(const struct bpf_token *token) |
2c78ee89 | 2477 | { |
d79a3549 | 2478 | return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2479 | } |
2480 | ||
d79a3549 | 2481 | static inline bool bpf_bypass_spec_v4(const struct bpf_token *token) |
2c78ee89 | 2482 | { |
d79a3549 | 2483 | return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2484 | } |
2485 | ||
6e71b04a | 2486 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
b2197755 DB |
2487 | int bpf_prog_new_fd(struct bpf_prog *prog); |
2488 | ||
f2e10bff | 2489 | void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
a3b80e10 | 2490 | const struct bpf_link_ops *ops, struct bpf_prog *prog); |
61c6fefa AN |
2491 | void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, |
2492 | const struct bpf_link_ops *ops, struct bpf_prog *prog, | |
2493 | bool sleepable); | |
a3b80e10 AN |
2494 | int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); |
2495 | int bpf_link_settle(struct bpf_link_primer *primer); | |
2496 | void bpf_link_cleanup(struct bpf_link_primer *primer); | |
70ed506c | 2497 | void bpf_link_inc(struct bpf_link *link); |
67c3e835 | 2498 | struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link); |
70ed506c AN |
2499 | void bpf_link_put(struct bpf_link *link); |
2500 | int bpf_link_new_fd(struct bpf_link *link); | |
2501 | struct bpf_link *bpf_link_get_from_fd(u32 ufd); | |
9f883612 | 2502 | struct bpf_link *bpf_link_get_curr_or_next(u32 *id); |
70ed506c | 2503 | |
35f96de0 AN |
2504 | void bpf_token_inc(struct bpf_token *token); |
2505 | void bpf_token_put(struct bpf_token *token); | |
2506 | int bpf_token_create(union bpf_attr *attr); | |
2507 | struct bpf_token *bpf_token_get_from_fd(u32 ufd); | |
2508 | ||
2509 | bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd); | |
a177fc2b | 2510 | bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type); |
caf8f28e AN |
2511 | bool bpf_token_allow_prog_type(const struct bpf_token *token, |
2512 | enum bpf_prog_type prog_type, | |
2513 | enum bpf_attach_type attach_type); | |
35f96de0 | 2514 | |
cb8edce2 AN |
2515 | int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname); |
2516 | int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags); | |
35f96de0 AN |
2517 | struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir, |
2518 | umode_t mode); | |
b2197755 | 2519 | |
21aef70e | 2520 | #define BPF_ITER_FUNC_PREFIX "bpf_iter_" |
e5158d98 | 2521 | #define DEFINE_BPF_ITER_FUNC(target, args...) \ |
21aef70e YS |
2522 | extern int bpf_iter_ ## target(args); \ |
2523 | int __init bpf_iter_ ## target(args) { return 0; } | |
15d83c4d | 2524 | |
f0d74c4d KFL |
2525 | /* |
2526 | * The task type of iterators. | |
2527 | * | |
2528 | * For BPF task iterators, they can be parameterized with various | |
2529 | * parameters to visit only some of tasks. | |
2530 | * | |
2531 | * BPF_TASK_ITER_ALL (default) | |
2532 | * Iterate over resources of every task. | |
2533 | * | |
2534 | * BPF_TASK_ITER_TID | |
2535 | * Iterate over resources of a task/tid. | |
2536 | * | |
2537 | * BPF_TASK_ITER_TGID | |
2538 | * Iterate over resources of every task of a process / task group. | |
2539 | */ | |
2540 | enum bpf_iter_task_type { | |
2541 | BPF_TASK_ITER_ALL = 0, | |
2542 | BPF_TASK_ITER_TID, | |
2543 | BPF_TASK_ITER_TGID, | |
2544 | }; | |
2545 | ||
f9c79272 | 2546 | struct bpf_iter_aux_info { |
d4ccaf58 | 2547 | /* for map_elem iter */ |
a5cbe05a | 2548 | struct bpf_map *map; |
d4ccaf58 HL |
2549 | |
2550 | /* for cgroup iter */ | |
2551 | struct { | |
2552 | struct cgroup *start; /* starting cgroup */ | |
2553 | enum bpf_cgroup_iter_order order; | |
2554 | } cgroup; | |
f0d74c4d KFL |
2555 | struct { |
2556 | enum bpf_iter_task_type type; | |
2557 | u32 pid; | |
2558 | } task; | |
f9c79272 YS |
2559 | }; |
2560 | ||
5e7b3020 YS |
2561 | typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, |
2562 | union bpf_iter_link_info *linfo, | |
2563 | struct bpf_iter_aux_info *aux); | |
2564 | typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); | |
6b0a249a YS |
2565 | typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, |
2566 | struct seq_file *seq); | |
2567 | typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, | |
2568 | struct bpf_link_info *info); | |
3cee6fb8 MKL |
2569 | typedef const struct bpf_func_proto * |
2570 | (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, | |
2571 | const struct bpf_prog *prog); | |
a5cbe05a | 2572 | |
cf83b2d2 YS |
2573 | enum bpf_iter_feature { |
2574 | BPF_ITER_RESCHED = BIT(0), | |
2575 | }; | |
2576 | ||
3c32cc1b | 2577 | #define BPF_ITER_CTX_ARG_MAX 2 |
ae24345d YS |
2578 | struct bpf_iter_reg { |
2579 | const char *target; | |
5e7b3020 YS |
2580 | bpf_iter_attach_target_t attach_target; |
2581 | bpf_iter_detach_target_t detach_target; | |
6b0a249a YS |
2582 | bpf_iter_show_fdinfo_t show_fdinfo; |
2583 | bpf_iter_fill_link_info_t fill_link_info; | |
3cee6fb8 | 2584 | bpf_iter_get_func_proto_t get_func_proto; |
3c32cc1b | 2585 | u32 ctx_arg_info_size; |
cf83b2d2 | 2586 | u32 feature; |
3c32cc1b | 2587 | struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; |
14fc6bd6 | 2588 | const struct bpf_iter_seq_info *seq_info; |
ae24345d YS |
2589 | }; |
2590 | ||
e5158d98 YS |
2591 | struct bpf_iter_meta { |
2592 | __bpf_md_ptr(struct seq_file *, seq); | |
2593 | u64 session_id; | |
2594 | u64 seq_num; | |
2595 | }; | |
2596 | ||
a5cbe05a YS |
2597 | struct bpf_iter__bpf_map_elem { |
2598 | __bpf_md_ptr(struct bpf_iter_meta *, meta); | |
2599 | __bpf_md_ptr(struct bpf_map *, map); | |
2600 | __bpf_md_ptr(void *, key); | |
2601 | __bpf_md_ptr(void *, value); | |
2602 | }; | |
2603 | ||
15172a46 | 2604 | int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); |
ab2ee4fc | 2605 | void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); |
43205180 | 2606 | int bpf_iter_prog_supported(struct bpf_prog *prog); |
3cee6fb8 MKL |
2607 | const struct bpf_func_proto * |
2608 | bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); | |
af2ac3e1 | 2609 | int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); |
ac51d99b | 2610 | int bpf_iter_new_fd(struct bpf_link *link); |
367ec3e4 | 2611 | bool bpf_link_is_iter(struct bpf_link *link); |
e5158d98 YS |
2612 | struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); |
2613 | int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); | |
b76f2226 YS |
2614 | void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, |
2615 | struct seq_file *seq); | |
2616 | int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, | |
2617 | struct bpf_link_info *info); | |
ae24345d | 2618 | |
314ee05e YS |
2619 | int map_set_for_each_callback_args(struct bpf_verifier_env *env, |
2620 | struct bpf_func_state *caller, | |
2621 | struct bpf_func_state *callee); | |
2622 | ||
15a07b33 AS |
2623 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); |
2624 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); | |
2625 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
2626 | u64 flags); | |
2627 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, | |
2628 | u64 flags); | |
d056a788 | 2629 | |
557c0c6e | 2630 | int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); |
15a07b33 | 2631 | |
d056a788 DB |
2632 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
2633 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2634 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
bcc6b1b7 MKL |
2635 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, |
2636 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2637 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
d056a788 | 2638 | |
6e71b04a | 2639 | int bpf_get_file_flag(int flags); |
af2ac3e1 | 2640 | int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, |
dcab51f1 | 2641 | size_t actual_size); |
6e71b04a | 2642 | |
61e021f3 | 2643 | /* verify correctness of eBPF program */ |
47a71c1f | 2644 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size); |
a643bff7 AN |
2645 | |
2646 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | |
1ea47e01 | 2647 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); |
a643bff7 | 2648 | #endif |
46f55cff | 2649 | |
76654e67 AM |
2650 | struct btf *bpf_get_btf_vmlinux(void); |
2651 | ||
46f55cff | 2652 | /* Map specifics */ |
d53ad5d8 | 2653 | struct xdp_frame; |
6d5fc195 | 2654 | struct sk_buff; |
e6a4750f BT |
2655 | struct bpf_dtab_netdev; |
2656 | struct bpf_cpu_map_entry; | |
67f29e07 | 2657 | |
d839a731 | 2658 | void __dev_flush(struct list_head *flush_list); |
d53ad5d8 | 2659 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 | 2660 | struct net_device *dev_rx); |
d53ad5d8 | 2661 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2662 | struct net_device *dev_rx); |
d53ad5d8 | 2663 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed | 2664 | struct bpf_map *map, bool exclude_ingress); |
6d5fc195 | 2665 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
7cd1107f | 2666 | const struct bpf_prog *xdp_prog); |
e624d4ed | 2667 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, |
7cd1107f AL |
2668 | const struct bpf_prog *xdp_prog, |
2669 | struct bpf_map *map, bool exclude_ingress); | |
46f55cff | 2670 | |
d839a731 | 2671 | void __cpu_map_flush(struct list_head *flush_list); |
d53ad5d8 | 2672 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, |
9c270af3 | 2673 | struct net_device *dev_rx); |
11941f8a KKD |
2674 | int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2675 | struct sk_buff *skb); | |
9c270af3 | 2676 | |
96eabe7a MKL |
2677 | /* Return map's numa specified by userspace */ |
2678 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) | |
2679 | { | |
2680 | return (attr->map_flags & BPF_F_NUMA_NODE) ? | |
2681 | attr->numa_node : NUMA_NO_NODE; | |
2682 | } | |
2683 | ||
040ee692 | 2684 | struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); |
5dc4c4b7 | 2685 | int array_map_alloc_check(union bpf_attr *attr); |
040ee692 | 2686 | |
c695865c SF |
2687 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
2688 | union bpf_attr __user *uattr); | |
2689 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |
2690 | union bpf_attr __user *uattr); | |
da00d2f1 KS |
2691 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2692 | const union bpf_attr *kattr, | |
2693 | union bpf_attr __user *uattr); | |
c695865c SF |
2694 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2695 | const union bpf_attr *kattr, | |
2696 | union bpf_attr __user *uattr); | |
1b4d60ec SL |
2697 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, |
2698 | const union bpf_attr *kattr, | |
2699 | union bpf_attr __user *uattr); | |
7c32e8f8 LB |
2700 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
2701 | const union bpf_attr *kattr, | |
2702 | union bpf_attr __user *uattr); | |
2b99ef22 FW |
2703 | int bpf_prog_test_run_nf(struct bpf_prog *prog, |
2704 | const union bpf_attr *kattr, | |
2705 | union bpf_attr __user *uattr); | |
9e15db66 AS |
2706 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
2707 | const struct bpf_prog *prog, | |
2708 | struct bpf_insn_access_aux *info); | |
35346ab6 HT |
2709 | |
2710 | static inline bool bpf_tracing_ctx_access(int off, int size, | |
2711 | enum bpf_access_type type) | |
2712 | { | |
2713 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) | |
2714 | return false; | |
2715 | if (type != BPF_READ) | |
2716 | return false; | |
2717 | if (off % size != 0) | |
2718 | return false; | |
2719 | return true; | |
2720 | } | |
2721 | ||
2722 | static inline bool bpf_tracing_btf_ctx_access(int off, int size, | |
2723 | enum bpf_access_type type, | |
2724 | const struct bpf_prog *prog, | |
2725 | struct bpf_insn_access_aux *info) | |
2726 | { | |
2727 | if (!bpf_tracing_ctx_access(off, size, type)) | |
2728 | return false; | |
2729 | return btf_ctx_access(off, size, type, prog, info); | |
2730 | } | |
2731 | ||
6728aea7 KKD |
2732 | int btf_struct_access(struct bpf_verifier_log *log, |
2733 | const struct bpf_reg_state *reg, | |
2734 | int off, int size, enum bpf_access_type atype, | |
63260df1 | 2735 | u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name); |
faaf4a79 | 2736 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
22dc4a0f | 2737 | const struct btf *btf, u32 id, int off, |
2ab3b380 KKD |
2738 | const struct btf *need_btf, u32 need_type_id, |
2739 | bool strict); | |
9e15db66 | 2740 | |
fec56f58 AS |
2741 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
2742 | struct btf *btf, | |
2743 | const struct btf_type *func_proto, | |
2744 | const char *func_name, | |
2745 | struct btf_func_model *m); | |
2746 | ||
51c39bb1 | 2747 | struct bpf_reg_state; |
4ba1d0f2 | 2748 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog); |
efc68158 | 2749 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
be8704ff | 2750 | struct btf *btf, const struct btf_type *t); |
b9ae0c9d KKD |
2751 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
2752 | int comp_idx, const char *tag_key); | |
522bb2c1 AN |
2753 | int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt, |
2754 | int comp_idx, const char *tag_key, int last_id); | |
8c1b6e69 | 2755 | |
7e6897f9 | 2756 | struct bpf_prog *bpf_prog_by_id(u32 id); |
005142b8 | 2757 | struct bpf_link *bpf_link_by_id(u32 id); |
7e6897f9 | 2758 | |
bbc1d247 AN |
2759 | const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id, |
2760 | const struct bpf_prog *prog); | |
a10787e6 | 2761 | void bpf_task_storage_free(struct task_struct *task); |
c4bcfb38 | 2762 | void bpf_cgrp_storage_free(struct cgroup *cgroup); |
e6ac2450 MKL |
2763 | bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); |
2764 | const struct btf_func_model * | |
2765 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
2766 | const struct bpf_insn *insn); | |
1cf3bfc6 IL |
2767 | int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, |
2768 | u16 btf_fd_idx, u8 **func_addr); | |
2769 | ||
fbd94c7a AS |
2770 | struct bpf_core_ctx { |
2771 | struct bpf_verifier_log *log; | |
2772 | const struct btf *btf; | |
2773 | }; | |
2774 | ||
57539b1c DV |
2775 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
2776 | const struct bpf_reg_state *reg, | |
63260df1 | 2777 | const char *field_name, u32 btf_id, const char *suffix); |
57539b1c | 2778 | |
b613d335 DV |
2779 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
2780 | const struct btf *reg_btf, u32 reg_id, | |
2781 | const struct btf *arg_btf, u32 arg_id); | |
2782 | ||
fbd94c7a AS |
2783 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
2784 | int relo_idx, void *insn); | |
2785 | ||
44a3918c JP |
2786 | static inline bool unprivileged_ebpf_enabled(void) |
2787 | { | |
2788 | return !sysctl_unprivileged_bpf_disabled; | |
2789 | } | |
2790 | ||
24426654 MKL |
2791 | /* Not all bpf prog type has the bpf_ctx. |
2792 | * For the bpf prog type that has initialized the bpf_ctx, | |
2793 | * this function can be used to decide if a kernel function | |
2794 | * is called by a bpf program. | |
2795 | */ | |
2796 | static inline bool has_current_bpf_ctx(void) | |
2797 | { | |
2798 | return !!current->bpf_ctx; | |
2799 | } | |
05b24ff9 JO |
2800 | |
2801 | void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); | |
8357b366 JK |
2802 | |
2803 | void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
2804 | enum bpf_dynptr_type type, u32 offset, u32 size); | |
2805 | void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); | |
2806 | void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); | |
9a675ba5 | 2807 | |
9c270af3 | 2808 | #else /* !CONFIG_BPF_SYSCALL */ |
0fc174de DB |
2809 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) |
2810 | { | |
2811 | return ERR_PTR(-EOPNOTSUPP); | |
2812 | } | |
2813 | ||
248f346f JK |
2814 | static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, |
2815 | enum bpf_prog_type type, | |
288b3de5 | 2816 | bool attach_drv) |
248f346f JK |
2817 | { |
2818 | return ERR_PTR(-EOPNOTSUPP); | |
2819 | } | |
2820 | ||
85192dbf | 2821 | static inline void bpf_prog_add(struct bpf_prog *prog, int i) |
cc2e0b3f | 2822 | { |
cc2e0b3f | 2823 | } |
113214be | 2824 | |
c540594f DB |
2825 | static inline void bpf_prog_sub(struct bpf_prog *prog, int i) |
2826 | { | |
2827 | } | |
2828 | ||
0fc174de DB |
2829 | static inline void bpf_prog_put(struct bpf_prog *prog) |
2830 | { | |
2831 | } | |
6d67942d | 2832 | |
85192dbf | 2833 | static inline void bpf_prog_inc(struct bpf_prog *prog) |
aa6a5f3c | 2834 | { |
aa6a5f3c | 2835 | } |
5ccb071e | 2836 | |
a6f6df69 JF |
2837 | static inline struct bpf_prog *__must_check |
2838 | bpf_prog_inc_not_zero(struct bpf_prog *prog) | |
2839 | { | |
2840 | return ERR_PTR(-EOPNOTSUPP); | |
2841 | } | |
2842 | ||
6cc7d1e8 AN |
2843 | static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
2844 | const struct bpf_link_ops *ops, | |
2845 | struct bpf_prog *prog) | |
2846 | { | |
2847 | } | |
2848 | ||
61c6fefa AN |
2849 | static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, |
2850 | const struct bpf_link_ops *ops, struct bpf_prog *prog, | |
2851 | bool sleepable) | |
2852 | { | |
2853 | } | |
2854 | ||
6cc7d1e8 AN |
2855 | static inline int bpf_link_prime(struct bpf_link *link, |
2856 | struct bpf_link_primer *primer) | |
2857 | { | |
2858 | return -EOPNOTSUPP; | |
2859 | } | |
2860 | ||
2861 | static inline int bpf_link_settle(struct bpf_link_primer *primer) | |
2862 | { | |
2863 | return -EOPNOTSUPP; | |
2864 | } | |
2865 | ||
2866 | static inline void bpf_link_cleanup(struct bpf_link_primer *primer) | |
2867 | { | |
2868 | } | |
2869 | ||
2870 | static inline void bpf_link_inc(struct bpf_link *link) | |
2871 | { | |
2872 | } | |
2873 | ||
67c3e835 KFL |
2874 | static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) |
2875 | { | |
2876 | return NULL; | |
2877 | } | |
2878 | ||
6cc7d1e8 AN |
2879 | static inline void bpf_link_put(struct bpf_link *link) |
2880 | { | |
2881 | } | |
2882 | ||
6e71b04a | 2883 | static inline int bpf_obj_get_user(const char __user *pathname, int flags) |
98589a09 SL |
2884 | { |
2885 | return -EOPNOTSUPP; | |
2886 | } | |
2887 | ||
35f96de0 AN |
2888 | static inline bool bpf_token_capable(const struct bpf_token *token, int cap) |
2889 | { | |
2890 | return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN)); | |
2891 | } | |
2892 | ||
2893 | static inline void bpf_token_inc(struct bpf_token *token) | |
2894 | { | |
2895 | } | |
2896 | ||
2897 | static inline void bpf_token_put(struct bpf_token *token) | |
2898 | { | |
2899 | } | |
2900 | ||
2901 | static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd) | |
2902 | { | |
2903 | return ERR_PTR(-EOPNOTSUPP); | |
2904 | } | |
2905 | ||
d839a731 | 2906 | static inline void __dev_flush(struct list_head *flush_list) |
46f55cff JF |
2907 | { |
2908 | } | |
9c270af3 | 2909 | |
d53ad5d8 | 2910 | struct xdp_frame; |
67f29e07 | 2911 | struct bpf_dtab_netdev; |
e6a4750f | 2912 | struct bpf_cpu_map_entry; |
67f29e07 | 2913 | |
1d233886 | 2914 | static inline |
d53ad5d8 | 2915 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 THJ |
2916 | struct net_device *dev_rx) |
2917 | { | |
2918 | return 0; | |
2919 | } | |
2920 | ||
67f29e07 | 2921 | static inline |
d53ad5d8 | 2922 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2923 | struct net_device *dev_rx) |
67f29e07 JDB |
2924 | { |
2925 | return 0; | |
2926 | } | |
2927 | ||
e624d4ed | 2928 | static inline |
d53ad5d8 | 2929 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed HL |
2930 | struct bpf_map *map, bool exclude_ingress) |
2931 | { | |
2932 | return 0; | |
2933 | } | |
2934 | ||
6d5fc195 TM |
2935 | struct sk_buff; |
2936 | ||
2937 | static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, | |
2938 | struct sk_buff *skb, | |
7cd1107f | 2939 | const struct bpf_prog *xdp_prog) |
6d5fc195 TM |
2940 | { |
2941 | return 0; | |
2942 | } | |
2943 | ||
e624d4ed HL |
2944 | static inline |
2945 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, | |
7cd1107f AL |
2946 | const struct bpf_prog *xdp_prog, |
2947 | struct bpf_map *map, bool exclude_ingress) | |
e624d4ed HL |
2948 | { |
2949 | return 0; | |
2950 | } | |
2951 | ||
d839a731 | 2952 | static inline void __cpu_map_flush(struct list_head *flush_list) |
9c270af3 JDB |
2953 | { |
2954 | } | |
2955 | ||
9c270af3 | 2956 | static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, |
d53ad5d8 | 2957 | struct xdp_frame *xdpf, |
9c270af3 JDB |
2958 | struct net_device *dev_rx) |
2959 | { | |
2960 | return 0; | |
2961 | } | |
040ee692 | 2962 | |
11941f8a KKD |
2963 | static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2964 | struct sk_buff *skb) | |
2965 | { | |
2966 | return -EOPNOTSUPP; | |
2967 | } | |
2968 | ||
040ee692 AV |
2969 | static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, |
2970 | enum bpf_prog_type type) | |
2971 | { | |
2972 | return ERR_PTR(-EOPNOTSUPP); | |
2973 | } | |
c695865c SF |
2974 | |
2975 | static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, | |
2976 | const union bpf_attr *kattr, | |
2977 | union bpf_attr __user *uattr) | |
2978 | { | |
2979 | return -ENOTSUPP; | |
2980 | } | |
2981 | ||
2982 | static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, | |
2983 | const union bpf_attr *kattr, | |
2984 | union bpf_attr __user *uattr) | |
2985 | { | |
2986 | return -ENOTSUPP; | |
2987 | } | |
2988 | ||
da00d2f1 KS |
2989 | static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2990 | const union bpf_attr *kattr, | |
2991 | union bpf_attr __user *uattr) | |
2992 | { | |
2993 | return -ENOTSUPP; | |
2994 | } | |
2995 | ||
c695865c SF |
2996 | static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2997 | const union bpf_attr *kattr, | |
2998 | union bpf_attr __user *uattr) | |
2999 | { | |
3000 | return -ENOTSUPP; | |
3001 | } | |
6332be04 | 3002 | |
7c32e8f8 LB |
3003 | static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
3004 | const union bpf_attr *kattr, | |
3005 | union bpf_attr __user *uattr) | |
3006 | { | |
3007 | return -ENOTSUPP; | |
3008 | } | |
3009 | ||
6332be04 DB |
3010 | static inline void bpf_map_put(struct bpf_map *map) |
3011 | { | |
3012 | } | |
7e6897f9 BT |
3013 | |
3014 | static inline struct bpf_prog *bpf_prog_by_id(u32 id) | |
3015 | { | |
3016 | return ERR_PTR(-ENOTSUPP); | |
3017 | } | |
6890896b | 3018 | |
d4f7bdb2 | 3019 | static inline int btf_struct_access(struct bpf_verifier_log *log, |
6728aea7 KKD |
3020 | const struct bpf_reg_state *reg, |
3021 | int off, int size, enum bpf_access_type atype, | |
63260df1 AS |
3022 | u32 *next_btf_id, enum bpf_type_flag *flag, |
3023 | const char **field_name) | |
d4f7bdb2 DX |
3024 | { |
3025 | return -EACCES; | |
3026 | } | |
3027 | ||
6890896b | 3028 | static inline const struct bpf_func_proto * |
bbc1d247 | 3029 | bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
6890896b SF |
3030 | { |
3031 | return NULL; | |
3032 | } | |
a10787e6 SL |
3033 | |
3034 | static inline void bpf_task_storage_free(struct task_struct *task) | |
3035 | { | |
3036 | } | |
e6ac2450 MKL |
3037 | |
3038 | static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) | |
3039 | { | |
3040 | return false; | |
3041 | } | |
3042 | ||
3043 | static inline const struct btf_func_model * | |
3044 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
3045 | const struct bpf_insn *insn) | |
3046 | { | |
3047 | return NULL; | |
3048 | } | |
44a3918c | 3049 | |
1cf3bfc6 IL |
3050 | static inline int |
3051 | bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, | |
3052 | u16 btf_fd_idx, u8 **func_addr) | |
3053 | { | |
3054 | return -ENOTSUPP; | |
3055 | } | |
3056 | ||
44a3918c JP |
3057 | static inline bool unprivileged_ebpf_enabled(void) |
3058 | { | |
3059 | return false; | |
3060 | } | |
3061 | ||
24426654 MKL |
3062 | static inline bool has_current_bpf_ctx(void) |
3063 | { | |
3064 | return false; | |
3065 | } | |
05b24ff9 JO |
3066 | |
3067 | static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) | |
3068 | { | |
3069 | } | |
c4bcfb38 YS |
3070 | |
3071 | static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) | |
3072 | { | |
3073 | } | |
8357b366 JK |
3074 | |
3075 | static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
3076 | enum bpf_dynptr_type type, u32 offset, u32 size) | |
3077 | { | |
3078 | } | |
3079 | ||
3080 | static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) | |
3081 | { | |
3082 | } | |
3083 | ||
3084 | static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) | |
3085 | { | |
3086 | } | |
61e021f3 | 3087 | #endif /* CONFIG_BPF_SYSCALL */ |
09756af4 | 3088 | |
6a5a148a AB |
3089 | static __always_inline int |
3090 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) | |
3091 | { | |
3092 | int ret = -EFAULT; | |
3093 | ||
3094 | if (IS_ENABLED(CONFIG_BPF_EVENTS)) | |
3095 | ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); | |
3096 | if (unlikely(ret < 0)) | |
3097 | memset(dst, 0, size); | |
3098 | return ret; | |
3099 | } | |
3100 | ||
ab224b9e | 3101 | void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len); |
541c3bad | 3102 | |
479321e9 JK |
3103 | static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, |
3104 | enum bpf_prog_type type) | |
3105 | { | |
3106 | return bpf_prog_get_type_dev(ufd, type, false); | |
3107 | } | |
3108 | ||
936f8946 AN |
3109 | void __bpf_free_used_maps(struct bpf_prog_aux *aux, |
3110 | struct bpf_map **used_maps, u32 len); | |
3111 | ||
040ee692 AV |
3112 | bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); |
3113 | ||
ab3f0063 | 3114 | int bpf_prog_offload_compile(struct bpf_prog *prog); |
2b3486bc | 3115 | void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); |
675fc275 JK |
3116 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
3117 | struct bpf_prog *prog); | |
ab3f0063 | 3118 | |
52775b33 JK |
3119 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); |
3120 | ||
a3884572 JK |
3121 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); |
3122 | int bpf_map_offload_update_elem(struct bpf_map *map, | |
3123 | void *key, void *value, u64 flags); | |
3124 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); | |
3125 | int bpf_map_offload_get_next_key(struct bpf_map *map, | |
3126 | void *key, void *next_key); | |
3127 | ||
09728266 | 3128 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); |
a3884572 | 3129 | |
1385d755 | 3130 | struct bpf_offload_dev * |
dd27c2e3 | 3131 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); |
602144c2 | 3132 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); |
dd27c2e3 | 3133 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); |
602144c2 JK |
3134 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
3135 | struct net_device *netdev); | |
3136 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, | |
3137 | struct net_device *netdev); | |
fd4f227d | 3138 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); |
9fd7c555 | 3139 | |
2147c438 JP |
3140 | void unpriv_ebpf_notify(int new_state); |
3141 | ||
ab3f0063 | 3142 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
3d76a4d3 SF |
3143 | int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
3144 | struct bpf_prog_aux *prog_aux); | |
3145 | void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); | |
2b3486bc | 3146 | int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); |
fd7c211d | 3147 | int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); |
2b3486bc | 3148 | void bpf_dev_bound_netdev_unregister(struct net_device *dev); |
ab3f0063 | 3149 | |
0d830032 | 3150 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) |
2b3486bc SF |
3151 | { |
3152 | return aux->dev_bound; | |
3153 | } | |
ab3f0063 | 3154 | |
9d03ebc7 | 3155 | static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) |
ab3f0063 | 3156 | { |
9a18eedb | 3157 | return aux->offload_requested; |
ab3f0063 | 3158 | } |
a3884572 | 3159 | |
fd7c211d THJ |
3160 | bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); |
3161 | ||
9d03ebc7 | 3162 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
3163 | { |
3164 | return unlikely(map->ops == &bpf_map_offload_ops); | |
3165 | } | |
3166 | ||
3167 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); | |
3168 | void bpf_map_offload_map_free(struct bpf_map *map); | |
9629363c | 3169 | u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map); |
79a7f8bd AS |
3170 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
3171 | const union bpf_attr *kattr, | |
3172 | union bpf_attr __user *uattr); | |
17edea21 CW |
3173 | |
3174 | int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); | |
3175 | int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); | |
3176 | int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); | |
748cd572 DZ |
3177 | int sock_map_bpf_prog_query(const union bpf_attr *attr, |
3178 | union bpf_attr __user *uattr); | |
699c23f0 | 3179 | int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog); |
748cd572 | 3180 | |
17edea21 | 3181 | void sock_map_unhash(struct sock *sk); |
d8616ee2 | 3182 | void sock_map_destroy(struct sock *sk); |
17edea21 | 3183 | void sock_map_close(struct sock *sk, long timeout); |
ab3f0063 | 3184 | #else |
3d76a4d3 SF |
3185 | static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
3186 | struct bpf_prog_aux *prog_aux) | |
3187 | { | |
3188 | return -EOPNOTSUPP; | |
3189 | } | |
3190 | ||
3191 | static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, | |
3192 | u32 func_id) | |
3193 | { | |
3194 | return NULL; | |
3195 | } | |
3196 | ||
2b3486bc | 3197 | static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, |
3d76a4d3 | 3198 | union bpf_attr *attr) |
ab3f0063 JK |
3199 | { |
3200 | return -EOPNOTSUPP; | |
3201 | } | |
3202 | ||
fd7c211d THJ |
3203 | static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, |
3204 | struct bpf_prog *old_prog) | |
3205 | { | |
3206 | return -EOPNOTSUPP; | |
3207 | } | |
3208 | ||
2b3486bc SF |
3209 | static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) |
3210 | { | |
3211 | } | |
3212 | ||
3213 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) | |
3214 | { | |
3215 | return false; | |
3216 | } | |
3217 | ||
9d03ebc7 | 3218 | static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) |
ab3f0063 JK |
3219 | { |
3220 | return false; | |
3221 | } | |
a3884572 | 3222 | |
fd7c211d | 3223 | static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) |
ab3f0063 JK |
3224 | { |
3225 | return false; | |
3226 | } | |
a3884572 | 3227 | |
9d03ebc7 | 3228 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
3229 | { |
3230 | return false; | |
3231 | } | |
3232 | ||
3233 | static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) | |
3234 | { | |
3235 | return ERR_PTR(-EOPNOTSUPP); | |
3236 | } | |
3237 | ||
3238 | static inline void bpf_map_offload_map_free(struct bpf_map *map) | |
3239 | { | |
3240 | } | |
79a7f8bd | 3241 | |
9629363c YS |
3242 | static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) |
3243 | { | |
3244 | return 0; | |
3245 | } | |
3246 | ||
79a7f8bd AS |
3247 | static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
3248 | const union bpf_attr *kattr, | |
3249 | union bpf_attr __user *uattr) | |
3250 | { | |
3251 | return -ENOTSUPP; | |
3252 | } | |
fdb5c453 | 3253 | |
88759609 | 3254 | #ifdef CONFIG_BPF_SYSCALL |
604326b4 DB |
3255 | static inline int sock_map_get_from_fd(const union bpf_attr *attr, |
3256 | struct bpf_prog *prog) | |
fdb5c453 SY |
3257 | { |
3258 | return -EINVAL; | |
3259 | } | |
bb0de313 LB |
3260 | |
3261 | static inline int sock_map_prog_detach(const union bpf_attr *attr, | |
3262 | enum bpf_prog_type ptype) | |
3263 | { | |
3264 | return -EOPNOTSUPP; | |
3265 | } | |
13b79d3f LB |
3266 | |
3267 | static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, | |
3268 | u64 flags) | |
3269 | { | |
3270 | return -EOPNOTSUPP; | |
3271 | } | |
748cd572 DZ |
3272 | |
3273 | static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, | |
3274 | union bpf_attr __user *uattr) | |
3275 | { | |
3276 | return -EINVAL; | |
3277 | } | |
699c23f0 YS |
3278 | |
3279 | static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) | |
3280 | { | |
3281 | return -EOPNOTSUPP; | |
3282 | } | |
17edea21 CW |
3283 | #endif /* CONFIG_BPF_SYSCALL */ |
3284 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ | |
5dc4c4b7 | 3285 | |
dd865789 JO |
3286 | static __always_inline void |
3287 | bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) | |
3288 | { | |
3289 | const struct bpf_prog_array_item *item; | |
3290 | struct bpf_prog *prog; | |
3291 | ||
3292 | if (unlikely(!array)) | |
3293 | return; | |
3294 | ||
3295 | item = &array->items[0]; | |
3296 | while ((prog = READ_ONCE(item->prog))) { | |
3297 | bpf_prog_inc_misses_counter(prog); | |
3298 | item++; | |
3299 | } | |
3300 | } | |
3301 | ||
17edea21 CW |
3302 | #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) |
3303 | void bpf_sk_reuseport_detach(struct sock *sk); | |
3304 | int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, | |
3305 | void *value); | |
3306 | int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, | |
3307 | void *value, u64 map_flags); | |
3308 | #else | |
3309 | static inline void bpf_sk_reuseport_detach(struct sock *sk) | |
3310 | { | |
3311 | } | |
5dc4c4b7 | 3312 | |
17edea21 | 3313 | #ifdef CONFIG_BPF_SYSCALL |
5dc4c4b7 MKL |
3314 | static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, |
3315 | void *key, void *value) | |
3316 | { | |
3317 | return -EOPNOTSUPP; | |
3318 | } | |
3319 | ||
3320 | static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, | |
3321 | void *key, void *value, | |
3322 | u64 map_flags) | |
3323 | { | |
3324 | return -EOPNOTSUPP; | |
3325 | } | |
3326 | #endif /* CONFIG_BPF_SYSCALL */ | |
3327 | #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ | |
3328 | ||
d0003ec0 | 3329 | /* verifier prototypes for helper functions called from eBPF programs */ |
a2c83fff DB |
3330 | extern const struct bpf_func_proto bpf_map_lookup_elem_proto; |
3331 | extern const struct bpf_func_proto bpf_map_update_elem_proto; | |
3332 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; | |
f1a2e44a MV |
3333 | extern const struct bpf_func_proto bpf_map_push_elem_proto; |
3334 | extern const struct bpf_func_proto bpf_map_pop_elem_proto; | |
3335 | extern const struct bpf_func_proto bpf_map_peek_elem_proto; | |
07343110 | 3336 | extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; |
d0003ec0 | 3337 | |
03e69b50 | 3338 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
c04167ce | 3339 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
2d0e30c3 | 3340 | extern const struct bpf_func_proto bpf_get_numa_node_id_proto; |
04fd61ab | 3341 | extern const struct bpf_func_proto bpf_tail_call_proto; |
17ca8cbf | 3342 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; |
71d19214 | 3343 | extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; |
c8996c98 | 3344 | extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; |
ffeedafb AS |
3345 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; |
3346 | extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; | |
3347 | extern const struct bpf_func_proto bpf_get_current_comm_proto; | |
d5a3b1f6 | 3348 | extern const struct bpf_func_proto bpf_get_stackid_proto; |
c195651e | 3349 | extern const struct bpf_func_proto bpf_get_stack_proto; |
d4dd9775 | 3350 | extern const struct bpf_func_proto bpf_get_stack_sleepable_proto; |
fa28dcb8 | 3351 | extern const struct bpf_func_proto bpf_get_task_stack_proto; |
d4dd9775 | 3352 | extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto; |
7b04d6d6 SL |
3353 | extern const struct bpf_func_proto bpf_get_stackid_proto_pe; |
3354 | extern const struct bpf_func_proto bpf_get_stack_proto_pe; | |
174a79ff | 3355 | extern const struct bpf_func_proto bpf_sock_map_update_proto; |
81110384 | 3356 | extern const struct bpf_func_proto bpf_sock_hash_update_proto; |
bf6fa2c8 | 3357 | extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; |
0f09abd1 | 3358 | extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; |
bed89185 | 3359 | extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; |
7f628741 | 3360 | extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto; |
604326b4 DB |
3361 | extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; |
3362 | extern const struct bpf_func_proto bpf_msg_redirect_map_proto; | |
3363 | extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; | |
3364 | extern const struct bpf_func_proto bpf_sk_redirect_map_proto; | |
d83525ca AS |
3365 | extern const struct bpf_func_proto bpf_spin_lock_proto; |
3366 | extern const struct bpf_func_proto bpf_spin_unlock_proto; | |
cd339431 | 3367 | extern const struct bpf_func_proto bpf_get_local_storage_proto; |
d7a4cb9b AI |
3368 | extern const struct bpf_func_proto bpf_strtol_proto; |
3369 | extern const struct bpf_func_proto bpf_strtoul_proto; | |
0d01da6a | 3370 | extern const struct bpf_func_proto bpf_tcp_sock_proto; |
5576b991 | 3371 | extern const struct bpf_func_proto bpf_jiffies64_proto; |
b4490c5c | 3372 | extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; |
0456ea17 | 3373 | extern const struct bpf_func_proto bpf_event_output_data_proto; |
457f4436 AN |
3374 | extern const struct bpf_func_proto bpf_ringbuf_output_proto; |
3375 | extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; | |
3376 | extern const struct bpf_func_proto bpf_ringbuf_submit_proto; | |
3377 | extern const struct bpf_func_proto bpf_ringbuf_discard_proto; | |
3378 | extern const struct bpf_func_proto bpf_ringbuf_query_proto; | |
bc34dee6 JK |
3379 | extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; |
3380 | extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; | |
3381 | extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; | |
af7ec138 | 3382 | extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; |
478cfbdf YS |
3383 | extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; |
3384 | extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; | |
3385 | extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e | 3386 | extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; |
9eeb3aa3 | 3387 | extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; |
3bc253c2 | 3388 | extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; |
07be4c4a | 3389 | extern const struct bpf_func_proto bpf_copy_from_user_proto; |
c4d0bfb4 | 3390 | extern const struct bpf_func_proto bpf_snprintf_btf_proto; |
7b15523a | 3391 | extern const struct bpf_func_proto bpf_snprintf_proto; |
eaa6bcb7 | 3392 | extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; |
63d9b80d | 3393 | extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; |
d0551261 | 3394 | extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; |
b60da495 | 3395 | extern const struct bpf_func_proto bpf_sock_from_file_proto; |
c5dbb89f | 3396 | extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; |
0593dd34 | 3397 | extern const struct bpf_func_proto bpf_task_storage_get_recur_proto; |
a10787e6 | 3398 | extern const struct bpf_func_proto bpf_task_storage_get_proto; |
0593dd34 | 3399 | extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto; |
a10787e6 | 3400 | extern const struct bpf_func_proto bpf_task_storage_delete_proto; |
69c087ba | 3401 | extern const struct bpf_func_proto bpf_for_each_map_elem_proto; |
3d78417b | 3402 | extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; |
3cee6fb8 MKL |
3403 | extern const struct bpf_func_proto bpf_sk_setsockopt_proto; |
3404 | extern const struct bpf_func_proto bpf_sk_getsockopt_proto; | |
9113d7e4 SF |
3405 | extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; |
3406 | extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; | |
7c7e3d31 | 3407 | extern const struct bpf_func_proto bpf_find_vma_proto; |
e6f2dd0f | 3408 | extern const struct bpf_func_proto bpf_loop_proto; |
376040e4 | 3409 | extern const struct bpf_func_proto bpf_copy_from_user_task_proto; |
69fd337a SF |
3410 | extern const struct bpf_func_proto bpf_set_retval_proto; |
3411 | extern const struct bpf_func_proto bpf_get_retval_proto; | |
20571567 | 3412 | extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; |
c4bcfb38 YS |
3413 | extern const struct bpf_func_proto bpf_cgrp_storage_get_proto; |
3414 | extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto; | |
cd339431 | 3415 | |
958a3f2d JO |
3416 | const struct bpf_func_proto *tracing_prog_func_proto( |
3417 | enum bpf_func_id func_id, const struct bpf_prog *prog); | |
3418 | ||
3ad00405 DB |
3419 | /* Shared helpers among cBPF and eBPF. */ |
3420 | void bpf_user_rnd_init_once(void); | |
3421 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
6890896b | 3422 | u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
3ad00405 | 3423 | |
c64b7983 | 3424 | #if defined(CONFIG_NET) |
46f8bc92 MKL |
3425 | bool bpf_sock_common_is_valid_access(int off, int size, |
3426 | enum bpf_access_type type, | |
3427 | struct bpf_insn_access_aux *info); | |
c64b7983 JS |
3428 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3429 | struct bpf_insn_access_aux *info); | |
3430 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3431 | const struct bpf_insn *si, | |
3432 | struct bpf_insn *insn_buf, | |
3433 | struct bpf_prog *prog, | |
3434 | u32 *target_size); | |
cce4c40b DX |
3435 | int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, |
3436 | struct bpf_dynptr *ptr); | |
c64b7983 | 3437 | #else |
46f8bc92 MKL |
3438 | static inline bool bpf_sock_common_is_valid_access(int off, int size, |
3439 | enum bpf_access_type type, | |
3440 | struct bpf_insn_access_aux *info) | |
3441 | { | |
3442 | return false; | |
3443 | } | |
c64b7983 JS |
3444 | static inline bool bpf_sock_is_valid_access(int off, int size, |
3445 | enum bpf_access_type type, | |
3446 | struct bpf_insn_access_aux *info) | |
3447 | { | |
3448 | return false; | |
3449 | } | |
3450 | static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3451 | const struct bpf_insn *si, | |
3452 | struct bpf_insn *insn_buf, | |
3453 | struct bpf_prog *prog, | |
3454 | u32 *target_size) | |
3455 | { | |
3456 | return 0; | |
3457 | } | |
cce4c40b DX |
3458 | static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, |
3459 | struct bpf_dynptr *ptr) | |
b5964b96 JK |
3460 | { |
3461 | return -EOPNOTSUPP; | |
3462 | } | |
c64b7983 JS |
3463 | #endif |
3464 | ||
655a51e5 | 3465 | #ifdef CONFIG_INET |
91cc1a99 AS |
3466 | struct sk_reuseport_kern { |
3467 | struct sk_buff *skb; | |
3468 | struct sock *sk; | |
3469 | struct sock *selected_sk; | |
d5e4ddae | 3470 | struct sock *migrating_sk; |
91cc1a99 AS |
3471 | void *data_end; |
3472 | u32 hash; | |
3473 | u32 reuseport_id; | |
3474 | bool bind_inany; | |
3475 | }; | |
655a51e5 MKL |
3476 | bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3477 | struct bpf_insn_access_aux *info); | |
3478 | ||
3479 | u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3480 | const struct bpf_insn *si, | |
3481 | struct bpf_insn *insn_buf, | |
3482 | struct bpf_prog *prog, | |
3483 | u32 *target_size); | |
7f94208c Y |
3484 | |
3485 | bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, | |
3486 | struct bpf_insn_access_aux *info); | |
3487 | ||
3488 | u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3489 | const struct bpf_insn *si, | |
3490 | struct bpf_insn *insn_buf, | |
3491 | struct bpf_prog *prog, | |
3492 | u32 *target_size); | |
655a51e5 MKL |
3493 | #else |
3494 | static inline bool bpf_tcp_sock_is_valid_access(int off, int size, | |
3495 | enum bpf_access_type type, | |
3496 | struct bpf_insn_access_aux *info) | |
3497 | { | |
3498 | return false; | |
3499 | } | |
3500 | ||
3501 | static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3502 | const struct bpf_insn *si, | |
3503 | struct bpf_insn *insn_buf, | |
3504 | struct bpf_prog *prog, | |
3505 | u32 *target_size) | |
3506 | { | |
3507 | return 0; | |
3508 | } | |
7f94208c Y |
3509 | static inline bool bpf_xdp_sock_is_valid_access(int off, int size, |
3510 | enum bpf_access_type type, | |
3511 | struct bpf_insn_access_aux *info) | |
3512 | { | |
3513 | return false; | |
3514 | } | |
3515 | ||
3516 | static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3517 | const struct bpf_insn *si, | |
3518 | struct bpf_insn *insn_buf, | |
3519 | struct bpf_prog *prog, | |
3520 | u32 *target_size) | |
3521 | { | |
3522 | return 0; | |
3523 | } | |
655a51e5 MKL |
3524 | #endif /* CONFIG_INET */ |
3525 | ||
5964b200 | 3526 | enum bpf_text_poke_type { |
b553a6ec DB |
3527 | BPF_MOD_CALL, |
3528 | BPF_MOD_JUMP, | |
5964b200 | 3529 | }; |
4b3da77b | 3530 | |
5964b200 AS |
3531 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
3532 | void *addr1, void *addr2); | |
3533 | ||
4b7de801 JO |
3534 | void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
3535 | struct bpf_prog *new, struct bpf_prog *old); | |
3536 | ||
ebc1415d | 3537 | void *bpf_arch_text_copy(void *dst, void *src, size_t len); |
fe736565 | 3538 | int bpf_arch_text_invalidate(void *dst, size_t len); |
ebc1415d | 3539 | |
eae2e83e | 3540 | struct btf_id_set; |
2af30f11 | 3541 | bool btf_id_set_contains(const struct btf_id_set *set, u32 id); |
eae2e83e | 3542 | |
335ff499 | 3543 | #define MAX_BPRINTF_VARARGS 12 |
e2bb9e01 | 3544 | #define MAX_BPRINTF_BUF 1024 |
335ff499 | 3545 | |
78aa1cc9 JO |
3546 | struct bpf_bprintf_data { |
3547 | u32 *bin_args; | |
e2bb9e01 | 3548 | char *buf; |
78aa1cc9 | 3549 | bool get_bin_args; |
e2bb9e01 | 3550 | bool get_buf; |
78aa1cc9 JO |
3551 | }; |
3552 | ||
48cac3f4 | 3553 | int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, |
78aa1cc9 | 3554 | u32 num_args, struct bpf_bprintf_data *data); |
f19a4050 | 3555 | void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); |
d9c9e4db | 3556 | |
c0e19f2c SF |
3557 | #ifdef CONFIG_BPF_LSM |
3558 | void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); | |
3559 | void bpf_cgroup_atype_put(int cgroup_atype); | |
3560 | #else | |
3561 | static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} | |
3562 | static inline void bpf_cgroup_atype_put(int cgroup_atype) {} | |
3563 | #endif /* CONFIG_BPF_LSM */ | |
3564 | ||
f3cf4134 RS |
3565 | struct key; |
3566 | ||
3567 | #ifdef CONFIG_KEYS | |
3568 | struct bpf_key { | |
3569 | struct key *key; | |
3570 | bool has_ref; | |
3571 | }; | |
3572 | #endif /* CONFIG_KEYS */ | |
282de143 KKD |
3573 | |
3574 | static inline bool type_is_alloc(u32 type) | |
3575 | { | |
3576 | return type & MEM_ALLOC; | |
3577 | } | |
3578 | ||
ee53cbfb YS |
3579 | static inline gfp_t bpf_memcg_flags(gfp_t flags) |
3580 | { | |
3581 | if (memcg_bpf_enabled()) | |
3582 | return flags | __GFP_ACCOUNT; | |
3583 | return flags; | |
3584 | } | |
3585 | ||
9af27da6 KKD |
3586 | static inline bool bpf_is_subprog(const struct bpf_prog *prog) |
3587 | { | |
3588 | return prog->aux->func_idx != 0; | |
3589 | } | |
3590 | ||
99c55f7d | 3591 | #endif /* _LINUX_BPF_H */ |