Commit | Line | Data |
---|---|---|
25763b3c | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
99c55f7d | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
99c55f7d AS |
3 | */ |
4 | #ifndef _LINUX_BPF_H | |
5 | #define _LINUX_BPF_H 1 | |
6 | ||
7 | #include <uapi/linux/bpf.h> | |
d687f621 | 8 | #include <uapi/linux/filter.h> |
74451e66 | 9 | |
99c55f7d | 10 | #include <linux/workqueue.h> |
db20fd2b | 11 | #include <linux/file.h> |
b121d1e7 | 12 | #include <linux/percpu.h> |
002245cc | 13 | #include <linux/err.h> |
74451e66 | 14 | #include <linux/rbtree_latch.h> |
d6e1e46f | 15 | #include <linux/numa.h> |
fc970227 | 16 | #include <linux/mm_types.h> |
ab3f0063 | 17 | #include <linux/wait.h> |
fec56f58 AS |
18 | #include <linux/refcount.h> |
19 | #include <linux/mutex.h> | |
85d33df3 | 20 | #include <linux/module.h> |
bfea9a85 | 21 | #include <linux/kallsyms.h> |
2c78ee89 | 22 | #include <linux/capability.h> |
48edc1f7 RG |
23 | #include <linux/sched/mm.h> |
24 | #include <linux/slab.h> | |
e21aa341 | 25 | #include <linux/percpu-refcount.h> |
d687f621 | 26 | #include <linux/stddef.h> |
af2ac3e1 | 27 | #include <linux/bpfptr.h> |
14a324f6 | 28 | #include <linux/btf.h> |
8c7dcb84 | 29 | #include <linux/rcupdate_trace.h> |
c86df29d | 30 | #include <linux/static_call.h> |
ee53cbfb | 31 | #include <linux/memcontrol.h> |
4f9087f1 | 32 | #include <linux/cfi.h> |
99c55f7d | 33 | |
cae1927c | 34 | struct bpf_verifier_env; |
9e15db66 | 35 | struct bpf_verifier_log; |
3b1efb19 | 36 | struct perf_event; |
174a79ff | 37 | struct bpf_prog; |
da765a2f | 38 | struct bpf_prog_aux; |
99c55f7d | 39 | struct bpf_map; |
4f738adb | 40 | struct sock; |
a26ca7c9 | 41 | struct seq_file; |
1b2b234b | 42 | struct btf; |
e8d2bec0 | 43 | struct btf_type; |
3dec541b | 44 | struct exception_table_entry; |
ae24345d | 45 | struct seq_operations; |
f9c79272 | 46 | struct bpf_iter_aux_info; |
f836a56e KS |
47 | struct bpf_local_storage; |
48 | struct bpf_local_storage_map; | |
36e68442 | 49 | struct kobject; |
48edc1f7 | 50 | struct mem_cgroup; |
861de02e | 51 | struct module; |
69c087ba | 52 | struct bpf_func_state; |
00963a2e | 53 | struct ftrace_ops; |
d4ccaf58 | 54 | struct cgroup; |
99c55f7d | 55 | |
1b9ed84e QM |
56 | extern struct idr btf_idr; |
57 | extern spinlock_t btf_idr_lock; | |
36e68442 | 58 | extern struct kobject *btf_kobj; |
41a5db8d | 59 | extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; |
1fda5bb6 | 60 | extern bool bpf_global_ma_set; |
1b9ed84e | 61 | |
102acbac | 62 | typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); |
f9c79272 YS |
63 | typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, |
64 | struct bpf_iter_aux_info *aux); | |
14fc6bd6 | 65 | typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); |
af3f4134 SF |
66 | typedef unsigned int (*bpf_func_t)(const void *, |
67 | const struct bpf_insn *); | |
14fc6bd6 YS |
68 | struct bpf_iter_seq_info { |
69 | const struct seq_operations *seq_ops; | |
70 | bpf_iter_init_seq_priv_t init_seq_private; | |
71 | bpf_iter_fini_seq_priv_t fini_seq_private; | |
72 | u32 seq_priv_size; | |
73 | }; | |
74 | ||
5d903493 | 75 | /* map is generic key/value storage optionally accessible by eBPF programs */ |
99c55f7d AS |
76 | struct bpf_map_ops { |
77 | /* funcs callable from userspace (via syscall) */ | |
1110f3a9 | 78 | int (*map_alloc_check)(union bpf_attr *attr); |
99c55f7d | 79 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); |
61d1b6a4 DB |
80 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
81 | void (*map_free)(struct bpf_map *map); | |
db20fd2b | 82 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); |
ba6b8de4 | 83 | void (*map_release_uref)(struct bpf_map *map); |
c6110222 | 84 | void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); |
cb4d03ab BV |
85 | int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, |
86 | union bpf_attr __user *uattr); | |
3e87f192 DS |
87 | int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, |
88 | void *value, u64 flags); | |
05799638 YS |
89 | int (*map_lookup_and_delete_batch)(struct bpf_map *map, |
90 | const union bpf_attr *attr, | |
91 | union bpf_attr __user *uattr); | |
3af43ba4 HT |
92 | int (*map_update_batch)(struct bpf_map *map, struct file *map_file, |
93 | const union bpf_attr *attr, | |
aa2e93b8 BV |
94 | union bpf_attr __user *uattr); |
95 | int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, | |
96 | union bpf_attr __user *uattr); | |
db20fd2b AS |
97 | |
98 | /* funcs callable from userspace and from eBPF programs */ | |
99 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | |
d7ba4cc9 JK |
100 | long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
101 | long (*map_delete_elem)(struct bpf_map *map, void *key); | |
102 | long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); | |
103 | long (*map_pop_elem)(struct bpf_map *map, void *value); | |
104 | long (*map_peek_elem)(struct bpf_map *map, void *value); | |
07343110 | 105 | void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); |
2a36f0b9 WN |
106 | |
107 | /* funcs called by prog_array and perf_event_array map */ | |
d056a788 DB |
108 | void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, |
109 | int fd); | |
20c20bd1 HT |
110 | /* If need_defer is true, the implementation should guarantee that |
111 | * the to-be-put element is still alive before the bpf program, which | |
112 | * may manipulate it, exists. | |
113 | */ | |
114 | void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); | |
4a8f87e6 | 115 | int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); |
14dc6f04 | 116 | u32 (*map_fd_sys_lookup_elem)(void *ptr); |
a26ca7c9 MKL |
117 | void (*map_seq_show_elem)(struct bpf_map *map, void *key, |
118 | struct seq_file *m); | |
e8d2bec0 | 119 | int (*map_check_btf)(const struct bpf_map *map, |
1b2b234b | 120 | const struct btf *btf, |
e8d2bec0 DB |
121 | const struct btf_type *key_type, |
122 | const struct btf_type *value_type); | |
d8eca5bb | 123 | |
da765a2f DB |
124 | /* Prog poke tracking helpers. */ |
125 | int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
126 | void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
127 | void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, | |
128 | struct bpf_prog *new); | |
129 | ||
d8eca5bb DB |
130 | /* Direct value access helpers. */ |
131 | int (*map_direct_value_addr)(const struct bpf_map *map, | |
132 | u64 *imm, u32 off); | |
133 | int (*map_direct_value_meta)(const struct bpf_map *map, | |
134 | u64 imm, u32 *off); | |
fc970227 | 135 | int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); |
457f4436 AN |
136 | __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, |
137 | struct poll_table_struct *pts); | |
41c48f3a | 138 | |
f836a56e KS |
139 | /* Functions called by bpf_local_storage maps */ |
140 | int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, | |
141 | void *owner, u32 size); | |
142 | void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, | |
143 | void *owner, u32 size); | |
144 | struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); | |
f4d05259 | 145 | |
e6a4750f | 146 | /* Misc helpers.*/ |
d7ba4cc9 | 147 | long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags); |
e6a4750f | 148 | |
f4d05259 MKL |
149 | /* map_meta_equal must be implemented for maps that can be |
150 | * used as an inner map. It is a runtime check to ensure | |
151 | * an inner map can be inserted to an outer map. | |
152 | * | |
153 | * Some properties of the inner map has been used during the | |
154 | * verification time. When inserting an inner map at the runtime, | |
155 | * map_meta_equal has to ensure the inserting map has the same | |
156 | * properties that the verifier has used earlier. | |
157 | */ | |
158 | bool (*map_meta_equal)(const struct bpf_map *meta0, | |
159 | const struct bpf_map *meta1); | |
160 | ||
69c087ba YS |
161 | |
162 | int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, | |
163 | struct bpf_func_state *caller, | |
164 | struct bpf_func_state *callee); | |
d7ba4cc9 | 165 | long (*map_for_each_callback)(struct bpf_map *map, |
102acbac | 166 | bpf_callback_t callback_fn, |
69c087ba YS |
167 | void *callback_ctx, u64 flags); |
168 | ||
90a5527d YS |
169 | u64 (*map_mem_usage)(const struct bpf_map *map); |
170 | ||
c317ab71 | 171 | /* BTF id of struct allocated by map_alloc */ |
41c48f3a | 172 | int *map_btf_id; |
a5cbe05a YS |
173 | |
174 | /* bpf_iter info used to open a seq_file */ | |
175 | const struct bpf_iter_seq_info *iter_seq_info; | |
99c55f7d AS |
176 | }; |
177 | ||
61df10c7 | 178 | enum { |
2d577252 KKD |
179 | /* Support at most 10 fields in a BTF type */ |
180 | BTF_FIELDS_MAX = 10, | |
61df10c7 KKD |
181 | }; |
182 | ||
aa3496ac | 183 | enum btf_field_type { |
db559117 KKD |
184 | BPF_SPIN_LOCK = (1 << 0), |
185 | BPF_TIMER = (1 << 1), | |
aa3496ac KKD |
186 | BPF_KPTR_UNREF = (1 << 2), |
187 | BPF_KPTR_REF = (1 << 3), | |
55db92f4 YS |
188 | BPF_KPTR_PERCPU = (1 << 4), |
189 | BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, | |
190 | BPF_LIST_HEAD = (1 << 5), | |
191 | BPF_LIST_NODE = (1 << 6), | |
192 | BPF_RB_ROOT = (1 << 7), | |
193 | BPF_RB_NODE = (1 << 8), | |
790ce3cf DM |
194 | BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, |
195 | BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, | |
55db92f4 | 196 | BPF_REFCOUNT = (1 << 9), |
c0a5a21c KKD |
197 | }; |
198 | ||
c8e18754 | 199 | typedef void (*btf_dtor_kfunc_t)(void *); |
c8e18754 | 200 | |
aa3496ac KKD |
201 | struct btf_field_kptr { |
202 | struct btf *btf; | |
203 | struct module *module; | |
9e36a204 DM |
204 | /* dtor used if btf_is_kernel(btf), otherwise the type is |
205 | * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used | |
206 | */ | |
207 | btf_dtor_kfunc_t dtor; | |
aa3496ac KKD |
208 | u32 btf_id; |
209 | }; | |
210 | ||
30465003 | 211 | struct btf_field_graph_root { |
f0c5941f KKD |
212 | struct btf *btf; |
213 | u32 value_btf_id; | |
214 | u32 node_offset; | |
865ce09a | 215 | struct btf_record *value_rec; |
f0c5941f KKD |
216 | }; |
217 | ||
aa3496ac | 218 | struct btf_field { |
61df10c7 | 219 | u32 offset; |
cd2a8079 | 220 | u32 size; |
aa3496ac KKD |
221 | enum btf_field_type type; |
222 | union { | |
223 | struct btf_field_kptr kptr; | |
30465003 | 224 | struct btf_field_graph_root graph_root; |
aa3496ac | 225 | }; |
61df10c7 KKD |
226 | }; |
227 | ||
aa3496ac KKD |
228 | struct btf_record { |
229 | u32 cnt; | |
230 | u32 field_mask; | |
db559117 KKD |
231 | int spin_lock_off; |
232 | int timer_off; | |
d54730b5 | 233 | int refcount_off; |
aa3496ac | 234 | struct btf_field fields[]; |
61df10c7 KKD |
235 | }; |
236 | ||
0a1f7bfe DM |
237 | /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ |
238 | struct bpf_rb_node_kern { | |
239 | struct rb_node rb_node; | |
c3c510ce | 240 | void *owner; |
0a1f7bfe DM |
241 | } __attribute__((aligned(8))); |
242 | ||
243 | /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ | |
244 | struct bpf_list_node_kern { | |
245 | struct list_head list_head; | |
c3c510ce | 246 | void *owner; |
0a1f7bfe DM |
247 | } __attribute__((aligned(8))); |
248 | ||
99c55f7d | 249 | struct bpf_map { |
a26ca7c9 | 250 | /* The first two cachelines with read-mostly members of which some |
be95a845 DB |
251 | * are also accessed in fast-path (e.g. ops, max_entries). |
252 | */ | |
253 | const struct bpf_map_ops *ops ____cacheline_aligned; | |
254 | struct bpf_map *inner_map_meta; | |
255 | #ifdef CONFIG_SECURITY | |
256 | void *security; | |
257 | #endif | |
99c55f7d AS |
258 | enum bpf_map_type map_type; |
259 | u32 key_size; | |
260 | u32 value_size; | |
261 | u32 max_entries; | |
9330986c | 262 | u64 map_extra; /* any per-map-type extra fields */ |
6c905981 | 263 | u32 map_flags; |
f3f1c054 | 264 | u32 id; |
db559117 | 265 | struct btf_record *record; |
96eabe7a | 266 | int numa_node; |
9b2cf328 MKL |
267 | u32 btf_key_type_id; |
268 | u32 btf_value_type_id; | |
8845b468 | 269 | u32 btf_vmlinux_value_type_id; |
a26ca7c9 | 270 | struct btf *btf; |
48edc1f7 | 271 | #ifdef CONFIG_MEMCG_KMEM |
4201d9ab | 272 | struct obj_cgroup *objcg; |
48edc1f7 | 273 | #endif |
fc970227 | 274 | char name[BPF_OBJ_NAME_LEN]; |
a26ca7c9 | 275 | /* The 3rd and 4th cacheline with misc members to avoid false sharing |
be95a845 DB |
276 | * particularly with refcounting. |
277 | */ | |
1e0bd5a0 AN |
278 | atomic64_t refcnt ____cacheline_aligned; |
279 | atomic64_t usercnt; | |
87667336 HT |
280 | /* rcu is used before freeing and work is only used during freeing */ |
281 | union { | |
282 | struct work_struct work; | |
283 | struct rcu_head rcu; | |
284 | }; | |
fc970227 | 285 | struct mutex freeze_mutex; |
353050be | 286 | atomic64_t writecnt; |
f45d5b6c THJ |
287 | /* 'Ownership' of program-containing map is claimed by the first program |
288 | * that is going to use this map or by the first program which FD is | |
289 | * stored in the map to make sure that all callers and callees have the | |
290 | * same prog type, JITed flag and xdp_has_frags flag. | |
291 | */ | |
292 | struct { | |
293 | spinlock_t lock; | |
294 | enum bpf_prog_type type; | |
295 | bool jited; | |
296 | bool xdp_has_frags; | |
297 | } owner; | |
4d7d7f69 KKD |
298 | bool bypass_spec_v1; |
299 | bool frozen; /* write-once; write-protected by freeze_mutex */ | |
87667336 | 300 | bool free_after_mult_rcu_gp; |
af66bfd3 HT |
301 | bool free_after_rcu_gp; |
302 | atomic64_t sleepable_refcnt; | |
25954730 | 303 | s64 __percpu *elem_count; |
99c55f7d AS |
304 | }; |
305 | ||
db559117 KKD |
306 | static inline const char *btf_field_type_name(enum btf_field_type type) |
307 | { | |
308 | switch (type) { | |
309 | case BPF_SPIN_LOCK: | |
310 | return "bpf_spin_lock"; | |
311 | case BPF_TIMER: | |
312 | return "bpf_timer"; | |
313 | case BPF_KPTR_UNREF: | |
314 | case BPF_KPTR_REF: | |
315 | return "kptr"; | |
55db92f4 YS |
316 | case BPF_KPTR_PERCPU: |
317 | return "percpu_kptr"; | |
f0c5941f KKD |
318 | case BPF_LIST_HEAD: |
319 | return "bpf_list_head"; | |
8ffa5cc1 KKD |
320 | case BPF_LIST_NODE: |
321 | return "bpf_list_node"; | |
9c395c1b DM |
322 | case BPF_RB_ROOT: |
323 | return "bpf_rb_root"; | |
324 | case BPF_RB_NODE: | |
325 | return "bpf_rb_node"; | |
d54730b5 DM |
326 | case BPF_REFCOUNT: |
327 | return "bpf_refcount"; | |
db559117 KKD |
328 | default: |
329 | WARN_ON_ONCE(1); | |
330 | return "unknown"; | |
331 | } | |
332 | } | |
333 | ||
aa3496ac KKD |
334 | static inline u32 btf_field_type_size(enum btf_field_type type) |
335 | { | |
336 | switch (type) { | |
db559117 KKD |
337 | case BPF_SPIN_LOCK: |
338 | return sizeof(struct bpf_spin_lock); | |
339 | case BPF_TIMER: | |
340 | return sizeof(struct bpf_timer); | |
aa3496ac KKD |
341 | case BPF_KPTR_UNREF: |
342 | case BPF_KPTR_REF: | |
55db92f4 | 343 | case BPF_KPTR_PERCPU: |
aa3496ac | 344 | return sizeof(u64); |
f0c5941f KKD |
345 | case BPF_LIST_HEAD: |
346 | return sizeof(struct bpf_list_head); | |
8ffa5cc1 KKD |
347 | case BPF_LIST_NODE: |
348 | return sizeof(struct bpf_list_node); | |
9c395c1b DM |
349 | case BPF_RB_ROOT: |
350 | return sizeof(struct bpf_rb_root); | |
351 | case BPF_RB_NODE: | |
352 | return sizeof(struct bpf_rb_node); | |
d54730b5 DM |
353 | case BPF_REFCOUNT: |
354 | return sizeof(struct bpf_refcount); | |
aa3496ac KKD |
355 | default: |
356 | WARN_ON_ONCE(1); | |
357 | return 0; | |
358 | } | |
359 | } | |
360 | ||
361 | static inline u32 btf_field_type_align(enum btf_field_type type) | |
362 | { | |
363 | switch (type) { | |
db559117 KKD |
364 | case BPF_SPIN_LOCK: |
365 | return __alignof__(struct bpf_spin_lock); | |
366 | case BPF_TIMER: | |
367 | return __alignof__(struct bpf_timer); | |
aa3496ac KKD |
368 | case BPF_KPTR_UNREF: |
369 | case BPF_KPTR_REF: | |
55db92f4 | 370 | case BPF_KPTR_PERCPU: |
aa3496ac | 371 | return __alignof__(u64); |
f0c5941f KKD |
372 | case BPF_LIST_HEAD: |
373 | return __alignof__(struct bpf_list_head); | |
8ffa5cc1 KKD |
374 | case BPF_LIST_NODE: |
375 | return __alignof__(struct bpf_list_node); | |
9c395c1b DM |
376 | case BPF_RB_ROOT: |
377 | return __alignof__(struct bpf_rb_root); | |
378 | case BPF_RB_NODE: | |
379 | return __alignof__(struct bpf_rb_node); | |
d54730b5 DM |
380 | case BPF_REFCOUNT: |
381 | return __alignof__(struct bpf_refcount); | |
aa3496ac KKD |
382 | default: |
383 | WARN_ON_ONCE(1); | |
384 | return 0; | |
385 | } | |
386 | } | |
387 | ||
3e81740a DM |
388 | static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) |
389 | { | |
390 | memset(addr, 0, field->size); | |
391 | ||
392 | switch (field->type) { | |
393 | case BPF_REFCOUNT: | |
394 | refcount_set((refcount_t *)addr, 1); | |
395 | break; | |
396 | case BPF_RB_NODE: | |
397 | RB_CLEAR_NODE((struct rb_node *)addr); | |
398 | break; | |
399 | case BPF_LIST_HEAD: | |
400 | case BPF_LIST_NODE: | |
401 | INIT_LIST_HEAD((struct list_head *)addr); | |
402 | break; | |
403 | case BPF_RB_ROOT: | |
404 | /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ | |
405 | case BPF_SPIN_LOCK: | |
406 | case BPF_TIMER: | |
407 | case BPF_KPTR_UNREF: | |
408 | case BPF_KPTR_REF: | |
55db92f4 | 409 | case BPF_KPTR_PERCPU: |
3e81740a DM |
410 | break; |
411 | default: | |
412 | WARN_ON_ONCE(1); | |
413 | return; | |
414 | } | |
415 | } | |
416 | ||
aa3496ac KKD |
417 | static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type) |
418 | { | |
419 | if (IS_ERR_OR_NULL(rec)) | |
420 | return false; | |
421 | return rec->field_mask & type; | |
422 | } | |
423 | ||
cd2a8079 | 424 | static inline void bpf_obj_init(const struct btf_record *rec, void *obj) |
68134668 | 425 | { |
958cf2e2 | 426 | int i; |
4d7d7f69 | 427 | |
cd2a8079 | 428 | if (IS_ERR_OR_NULL(rec)) |
958cf2e2 | 429 | return; |
cd2a8079 | 430 | for (i = 0; i < rec->cnt; i++) |
3e81740a | 431 | bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset); |
958cf2e2 KKD |
432 | } |
433 | ||
997849c4 HT |
434 | /* 'dst' must be a temporary buffer and should not point to memory that is being |
435 | * used in parallel by a bpf program or bpf syscall, otherwise the access from | |
436 | * the bpf program or bpf syscall may be corrupted by the reinitialization, | |
437 | * leading to weird problems. Even 'dst' is newly-allocated from bpf memory | |
438 | * allocator, it is still possible for 'dst' to be used in parallel by a bpf | |
439 | * program or bpf syscall. | |
440 | */ | |
958cf2e2 KKD |
441 | static inline void check_and_init_map_value(struct bpf_map *map, void *dst) |
442 | { | |
cd2a8079 | 443 | bpf_obj_init(map->record, dst); |
68134668 AS |
444 | } |
445 | ||
44832519 KKD |
446 | /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and |
447 | * forced to use 'long' read/writes to try to atomically copy long counters. | |
448 | * Best-effort only. No barriers here, since it _will_ race with concurrent | |
449 | * updates from BPF programs. Called from bpf syscall and mostly used with | |
450 | * size 8 or 16 bytes, so ask compiler to inline it. | |
451 | */ | |
452 | static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) | |
453 | { | |
454 | const long *lsrc = src; | |
455 | long *ldst = dst; | |
456 | ||
457 | size /= sizeof(long); | |
458 | while (size--) | |
6a86b5b5 | 459 | data_race(*ldst++ = *lsrc++); |
44832519 KKD |
460 | } |
461 | ||
462 | /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ | |
cd2a8079 | 463 | static inline void bpf_obj_memcpy(struct btf_record *rec, |
f71b2f64 KKD |
464 | void *dst, void *src, u32 size, |
465 | bool long_memcpy) | |
d83525ca | 466 | { |
4d7d7f69 KKD |
467 | u32 curr_off = 0; |
468 | int i; | |
68134668 | 469 | |
cd2a8079 | 470 | if (IS_ERR_OR_NULL(rec)) { |
44832519 | 471 | if (long_memcpy) |
f71b2f64 | 472 | bpf_long_memcpy(dst, src, round_up(size, 8)); |
44832519 | 473 | else |
f71b2f64 | 474 | memcpy(dst, src, size); |
4d7d7f69 | 475 | return; |
68134668 | 476 | } |
d83525ca | 477 | |
cd2a8079 DM |
478 | for (i = 0; i < rec->cnt; i++) { |
479 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 480 | u32 sz = next_off - curr_off; |
4d7d7f69 | 481 | |
aa3496ac | 482 | memcpy(dst + curr_off, src + curr_off, sz); |
cd2a8079 | 483 | curr_off += rec->fields[i].size + sz; |
d83525ca | 484 | } |
f71b2f64 | 485 | memcpy(dst + curr_off, src + curr_off, size - curr_off); |
d83525ca | 486 | } |
44832519 KKD |
487 | |
488 | static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) | |
489 | { | |
cd2a8079 | 490 | bpf_obj_memcpy(map->record, dst, src, map->value_size, false); |
44832519 KKD |
491 | } |
492 | ||
493 | static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) | |
494 | { | |
cd2a8079 | 495 | bpf_obj_memcpy(map->record, dst, src, map->value_size, true); |
44832519 KKD |
496 | } |
497 | ||
cd2a8079 | 498 | static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) |
cc487558 KKD |
499 | { |
500 | u32 curr_off = 0; | |
501 | int i; | |
502 | ||
cd2a8079 | 503 | if (IS_ERR_OR_NULL(rec)) { |
f71b2f64 | 504 | memset(dst, 0, size); |
cc487558 KKD |
505 | return; |
506 | } | |
507 | ||
cd2a8079 DM |
508 | for (i = 0; i < rec->cnt; i++) { |
509 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 510 | u32 sz = next_off - curr_off; |
cc487558 | 511 | |
aa3496ac | 512 | memset(dst + curr_off, 0, sz); |
cd2a8079 | 513 | curr_off += rec->fields[i].size + sz; |
cc487558 | 514 | } |
f71b2f64 KKD |
515 | memset(dst + curr_off, 0, size - curr_off); |
516 | } | |
517 | ||
518 | static inline void zero_map_value(struct bpf_map *map, void *dst) | |
519 | { | |
cd2a8079 | 520 | bpf_obj_memzero(map->record, dst, map->value_size); |
cc487558 KKD |
521 | } |
522 | ||
96049f3a AS |
523 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
524 | bool lock_src); | |
b00628b1 | 525 | void bpf_timer_cancel_and_free(void *timer); |
f0c5941f KKD |
526 | void bpf_list_head_free(const struct btf_field *field, void *list_head, |
527 | struct bpf_spin_lock *spin_lock); | |
9c395c1b DM |
528 | void bpf_rb_root_free(const struct btf_field *field, void *rb_root, |
529 | struct bpf_spin_lock *spin_lock); | |
530 | ||
f0c5941f | 531 | |
8e7ae251 | 532 | int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); |
d83525ca | 533 | |
602144c2 | 534 | struct bpf_offload_dev; |
a3884572 JK |
535 | struct bpf_offloaded_map; |
536 | ||
537 | struct bpf_map_dev_ops { | |
538 | int (*map_get_next_key)(struct bpf_offloaded_map *map, | |
539 | void *key, void *next_key); | |
540 | int (*map_lookup_elem)(struct bpf_offloaded_map *map, | |
541 | void *key, void *value); | |
542 | int (*map_update_elem)(struct bpf_offloaded_map *map, | |
543 | void *key, void *value, u64 flags); | |
544 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); | |
545 | }; | |
546 | ||
547 | struct bpf_offloaded_map { | |
548 | struct bpf_map map; | |
549 | struct net_device *netdev; | |
550 | const struct bpf_map_dev_ops *dev_ops; | |
551 | void *dev_priv; | |
552 | struct list_head offloads; | |
553 | }; | |
554 | ||
555 | static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) | |
556 | { | |
557 | return container_of(map, struct bpf_offloaded_map, map); | |
558 | } | |
559 | ||
0cd3cbed JK |
560 | static inline bool bpf_map_offload_neutral(const struct bpf_map *map) |
561 | { | |
562 | return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; | |
563 | } | |
564 | ||
a26ca7c9 MKL |
565 | static inline bool bpf_map_support_seq_show(const struct bpf_map *map) |
566 | { | |
85d33df3 MKL |
567 | return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && |
568 | map->ops->map_seq_show_elem; | |
a26ca7c9 MKL |
569 | } |
570 | ||
e8d2bec0 | 571 | int map_check_no_btf(const struct bpf_map *map, |
1b2b234b | 572 | const struct btf *btf, |
e8d2bec0 DB |
573 | const struct btf_type *key_type, |
574 | const struct btf_type *value_type); | |
575 | ||
f4d05259 MKL |
576 | bool bpf_map_meta_equal(const struct bpf_map *meta0, |
577 | const struct bpf_map *meta1); | |
578 | ||
a3884572 JK |
579 | extern const struct bpf_map_ops bpf_map_offload_ops; |
580 | ||
d639b9d1 HL |
581 | /* bpf_type_flag contains a set of flags that are applicable to the values of |
582 | * arg_type, ret_type and reg_type. For example, a pointer value may be null, | |
583 | * or a memory is read-only. We classify types into two categories: base types | |
584 | * and extended types. Extended types are base types combined with a type flag. | |
585 | * | |
586 | * Currently there are no more than 32 base types in arg_type, ret_type and | |
587 | * reg_types. | |
588 | */ | |
589 | #define BPF_BASE_TYPE_BITS 8 | |
590 | ||
591 | enum bpf_type_flag { | |
592 | /* PTR may be NULL. */ | |
593 | PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), | |
594 | ||
216e3cd2 HL |
595 | /* MEM is read-only. When applied on bpf_arg, it indicates the arg is |
596 | * compatible with both mutable and immutable memory. | |
597 | */ | |
20b2aff4 HL |
598 | MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), |
599 | ||
894f2a8b KKD |
600 | /* MEM points to BPF ring buffer reservation. */ |
601 | MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS), | |
a672b2e3 | 602 | |
c6f1bfe8 YS |
603 | /* MEM is in user address space. */ |
604 | MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), | |
605 | ||
5844101a HL |
606 | /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged |
607 | * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In | |
608 | * order to drop this tag, it must be passed into bpf_per_cpu_ptr() | |
609 | * or bpf_this_cpu_ptr(), which will return the pointer corresponding | |
610 | * to the specified cpu. | |
611 | */ | |
612 | MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), | |
613 | ||
8f14852e KKD |
614 | /* Indicates that the argument will be released. */ |
615 | OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), | |
616 | ||
6efe152d KKD |
617 | /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark |
618 | * unreferenced and referenced kptr loaded from map value using a load | |
619 | * instruction, so that they can only be dereferenced but not escape the | |
620 | * BPF program into the kernel (i.e. cannot be passed as arguments to | |
621 | * kfunc or bpf helpers). | |
622 | */ | |
623 | PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), | |
624 | ||
16d1e00c JK |
625 | MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), |
626 | ||
97e03f52 JK |
627 | /* DYNPTR points to memory local to the bpf program. */ |
628 | DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), | |
629 | ||
20571567 | 630 | /* DYNPTR points to a kernel-produced ringbuf record. */ |
bc34dee6 JK |
631 | DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), |
632 | ||
508362ac MM |
633 | /* Size is known at compile time. */ |
634 | MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), | |
635 | ||
282de143 KKD |
636 | /* MEM is of an allocated object of type in program BTF. This is used to |
637 | * tag PTR_TO_BTF_ID allocated using bpf_obj_new. | |
638 | */ | |
639 | MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS), | |
640 | ||
3f00c523 DV |
641 | /* PTR was passed from the kernel in a trusted context, and may be |
642 | * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions. | |
643 | * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above. | |
644 | * PTR_UNTRUSTED refers to a kptr that was read directly from a map | |
645 | * without invoking bpf_kptr_xchg(). What we really need to know is | |
646 | * whether a pointer is safe to pass to a kfunc or BPF helper function. | |
647 | * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF | |
648 | * helpers, they do not cover all possible instances of unsafe | |
649 | * pointers. For example, a pointer that was obtained from walking a | |
650 | * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the | |
651 | * fact that it may be NULL, invalid, etc. This is due to backwards | |
652 | * compatibility requirements, as this was the behavior that was first | |
653 | * introduced when kptrs were added. The behavior is now considered | |
654 | * deprecated, and PTR_UNTRUSTED will eventually be removed. | |
655 | * | |
656 | * PTR_TRUSTED, on the other hand, is a pointer that the kernel | |
657 | * guarantees to be valid and safe to pass to kfuncs and BPF helpers. | |
658 | * For example, pointers passed to tracepoint arguments are considered | |
659 | * PTR_TRUSTED, as are pointers that are passed to struct_ops | |
660 | * callbacks. As alluded to above, pointers that are obtained from | |
661 | * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a | |
662 | * struct task_struct *task is PTR_TRUSTED, then accessing | |
663 | * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored | |
664 | * in a BPF register. Similarly, pointers passed to certain programs | |
665 | * types such as kretprobes are not guaranteed to be valid, as they may | |
666 | * for example contain an object that was recently freed. | |
667 | */ | |
668 | PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS), | |
669 | ||
9bb00b28 YS |
670 | /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ |
671 | MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), | |
672 | ||
6a3cd331 | 673 | /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. |
0816b8c6 DM |
674 | * Currently only valid for linked-list and rbtree nodes. If the nodes |
675 | * have a bpf_refcount_field, they must be tagged MEM_RCU as well. | |
6a3cd331 DM |
676 | */ |
677 | NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), | |
678 | ||
b5964b96 JK |
679 | /* DYNPTR points to sk_buff */ |
680 | DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), | |
681 | ||
05421aec JK |
682 | /* DYNPTR points to xdp_buff */ |
683 | DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), | |
684 | ||
16d1e00c JK |
685 | __BPF_TYPE_FLAG_MAX, |
686 | __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, | |
d639b9d1 HL |
687 | }; |
688 | ||
05421aec JK |
689 | #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ |
690 | | DYNPTR_TYPE_XDP) | |
97e03f52 | 691 | |
d639b9d1 HL |
692 | /* Max number of base types. */ |
693 | #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) | |
694 | ||
695 | /* Max number of all types. */ | |
696 | #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) | |
697 | ||
17a52670 AS |
698 | /* function argument constraints */ |
699 | enum bpf_arg_type { | |
80f1d68c | 700 | ARG_DONTCARE = 0, /* unused argument in helper function */ |
17a52670 AS |
701 | |
702 | /* the following constraints used to prototype | |
703 | * bpf_map_lookup/update/delete_elem() functions | |
704 | */ | |
705 | ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ | |
706 | ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ | |
707 | ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ | |
708 | ||
16d1e00c JK |
709 | /* Used to prototype bpf_memcmp() and other functions that access data |
710 | * on eBPF program stack | |
17a52670 | 711 | */ |
39f19ebb | 712 | ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ |
435faee1 | 713 | |
39f19ebb AS |
714 | ARG_CONST_SIZE, /* number of bytes accessed from memory */ |
715 | ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ | |
80f1d68c | 716 | |
608cd71a | 717 | ARG_PTR_TO_CTX, /* pointer to context */ |
80f1d68c | 718 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
d83525ca | 719 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
46f8bc92 | 720 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ |
57c3bb72 AI |
721 | ARG_PTR_TO_INT, /* pointer to int */ |
722 | ARG_PTR_TO_LONG, /* pointer to long */ | |
6ac99e8f | 723 | ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ |
a7658e1a | 724 | ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ |
894f2a8b | 725 | ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ |
457f4436 | 726 | ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ |
1df8f55a | 727 | ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ |
eaa6bcb7 | 728 | ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ |
69c087ba | 729 | ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ |
48946bd6 | 730 | ARG_PTR_TO_STACK, /* pointer to stack */ |
fff13c4b | 731 | ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ |
b00628b1 | 732 | ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ |
c0a5a21c | 733 | ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ |
97e03f52 | 734 | ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ |
f79e7ea5 | 735 | __BPF_ARG_TYPE_MAX, |
d639b9d1 | 736 | |
48946bd6 HL |
737 | /* Extended arg_types. */ |
738 | ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, | |
739 | ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, | |
740 | ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, | |
741 | ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, | |
48946bd6 | 742 | ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, |
c0a5a21c | 743 | ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, |
16d1e00c JK |
744 | /* pointer to memory does not need to be initialized, helper function must fill |
745 | * all bytes or clear them in error case. | |
746 | */ | |
747 | ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, | |
508362ac MM |
748 | /* Pointer to valid memory of size known at compile time. */ |
749 | ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, | |
48946bd6 | 750 | |
d639b9d1 HL |
751 | /* This must be the last entry. Its purpose is to ensure the enum is |
752 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
753 | */ | |
754 | __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 755 | }; |
d639b9d1 | 756 | static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 AS |
757 | |
758 | /* type of values returned from helper functions */ | |
759 | enum bpf_return_type { | |
760 | RET_INTEGER, /* function returns integer */ | |
761 | RET_VOID, /* function doesn't return anything */ | |
3e6a4b3e | 762 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ |
3c480732 HL |
763 | RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ |
764 | RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ | |
765 | RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ | |
2de2669b | 766 | RET_PTR_TO_MEM, /* returns a pointer to memory */ |
63d9b80d | 767 | RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ |
3ca1032a | 768 | RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ |
d639b9d1 HL |
769 | __BPF_RET_TYPE_MAX, |
770 | ||
3c480732 HL |
771 | /* Extended ret_types. */ |
772 | RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, | |
773 | RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, | |
774 | RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, | |
775 | RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, | |
894f2a8b | 776 | RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM, |
2de2669b | 777 | RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM, |
3c480732 | 778 | RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, |
3f00c523 | 779 | RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID, |
3c480732 | 780 | |
d639b9d1 HL |
781 | /* This must be the last entry. Its purpose is to ensure the enum is |
782 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
783 | */ | |
784 | __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 785 | }; |
d639b9d1 | 786 | static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 | 787 | |
09756af4 AS |
788 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs |
789 | * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL | |
790 | * instructions after verifying | |
791 | */ | |
792 | struct bpf_func_proto { | |
793 | u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
794 | bool gpl_only; | |
36bbef52 | 795 | bool pkt_access; |
01685c5b | 796 | bool might_sleep; |
17a52670 | 797 | enum bpf_return_type ret_type; |
a7658e1a AS |
798 | union { |
799 | struct { | |
800 | enum bpf_arg_type arg1_type; | |
801 | enum bpf_arg_type arg2_type; | |
802 | enum bpf_arg_type arg3_type; | |
803 | enum bpf_arg_type arg4_type; | |
804 | enum bpf_arg_type arg5_type; | |
805 | }; | |
806 | enum bpf_arg_type arg_type[5]; | |
807 | }; | |
9436ef6e LB |
808 | union { |
809 | struct { | |
810 | u32 *arg1_btf_id; | |
811 | u32 *arg2_btf_id; | |
812 | u32 *arg3_btf_id; | |
813 | u32 *arg4_btf_id; | |
814 | u32 *arg5_btf_id; | |
815 | }; | |
816 | u32 *arg_btf_id[5]; | |
508362ac MM |
817 | struct { |
818 | size_t arg1_size; | |
819 | size_t arg2_size; | |
820 | size_t arg3_size; | |
821 | size_t arg4_size; | |
822 | size_t arg5_size; | |
823 | }; | |
824 | size_t arg_size[5]; | |
9436ef6e | 825 | }; |
af7ec138 | 826 | int *ret_btf_id; /* return value btf_id */ |
eae2e83e | 827 | bool (*allowed)(const struct bpf_prog *prog); |
17a52670 AS |
828 | }; |
829 | ||
830 | /* bpf_context is intentionally undefined structure. Pointer to bpf_context is | |
831 | * the first argument to eBPF programs. | |
832 | * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' | |
833 | */ | |
834 | struct bpf_context; | |
835 | ||
836 | enum bpf_access_type { | |
837 | BPF_READ = 1, | |
838 | BPF_WRITE = 2 | |
09756af4 AS |
839 | }; |
840 | ||
19de99f7 | 841 | /* types of values stored in eBPF registers */ |
f1174f77 EC |
842 | /* Pointer types represent: |
843 | * pointer | |
844 | * pointer + imm | |
845 | * pointer + (u16) var | |
846 | * pointer + (u16) var + imm | |
847 | * if (range > 0) then [ptr, ptr + range - off) is safe to access | |
848 | * if (id > 0) means that some 'var' was added | |
849 | * if (off > 0) means that 'imm' was added | |
850 | */ | |
19de99f7 AS |
851 | enum bpf_reg_type { |
852 | NOT_INIT = 0, /* nothing was written into register */ | |
f1174f77 | 853 | SCALAR_VALUE, /* reg doesn't contain a valid pointer */ |
19de99f7 AS |
854 | PTR_TO_CTX, /* reg points to bpf_context */ |
855 | CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ | |
856 | PTR_TO_MAP_VALUE, /* reg points to map element value */ | |
c25b2ae1 | 857 | PTR_TO_MAP_KEY, /* reg points to a map element key */ |
f1174f77 | 858 | PTR_TO_STACK, /* reg == frame_pointer + offset */ |
de8f3a83 | 859 | PTR_TO_PACKET_META, /* skb->data - meta_len */ |
f1174f77 | 860 | PTR_TO_PACKET, /* reg points to skb->data */ |
19de99f7 | 861 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
d58e468b | 862 | PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ |
c64b7983 | 863 | PTR_TO_SOCKET, /* reg points to struct bpf_sock */ |
46f8bc92 | 864 | PTR_TO_SOCK_COMMON, /* reg points to sock_common */ |
655a51e5 | 865 | PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ |
9df1c28b | 866 | PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ |
fada7fdc | 867 | PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ |
ba5f4cfe JF |
868 | /* PTR_TO_BTF_ID points to a kernel struct that does not need |
869 | * to be null checked by the BPF program. This does not imply the | |
870 | * pointer is _not_ null and in practice this can easily be a null | |
871 | * pointer when reading pointer chains. The assumption is program | |
872 | * context will handle null pointer dereference typically via fault | |
873 | * handling. The verifier must keep this in mind and can make no | |
874 | * assumptions about null or non-null when doing branch analysis. | |
875 | * Further, when passed into helpers the helpers can not, without | |
876 | * additional context, assume the value is non-null. | |
877 | */ | |
878 | PTR_TO_BTF_ID, | |
879 | /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not | |
880 | * been checked for null. Used primarily to inform the verifier | |
881 | * an explicit null check is required for this struct. | |
882 | */ | |
457f4436 | 883 | PTR_TO_MEM, /* reg points to valid memory region */ |
20b2aff4 | 884 | PTR_TO_BUF, /* reg points to a read/write buffer */ |
69c087ba | 885 | PTR_TO_FUNC, /* reg points to a bpf program function */ |
27060531 | 886 | CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */ |
e6ac2450 | 887 | __BPF_REG_TYPE_MAX, |
d639b9d1 | 888 | |
c25b2ae1 HL |
889 | /* Extended reg_types. */ |
890 | PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, | |
891 | PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, | |
892 | PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, | |
893 | PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, | |
894 | PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, | |
c25b2ae1 | 895 | |
d639b9d1 HL |
896 | /* This must be the last entry. Its purpose is to ensure the enum is |
897 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
898 | */ | |
899 | __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
19de99f7 | 900 | }; |
d639b9d1 | 901 | static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
19de99f7 | 902 | |
23994631 YS |
903 | /* The information passed from prog-specific *_is_valid_access |
904 | * back to the verifier. | |
905 | */ | |
906 | struct bpf_insn_access_aux { | |
907 | enum bpf_reg_type reg_type; | |
9e15db66 AS |
908 | union { |
909 | int ctx_field_size; | |
22dc4a0f AN |
910 | struct { |
911 | struct btf *btf; | |
912 | u32 btf_id; | |
913 | }; | |
9e15db66 AS |
914 | }; |
915 | struct bpf_verifier_log *log; /* for verbose logs */ | |
23994631 YS |
916 | }; |
917 | ||
f96da094 DB |
918 | static inline void |
919 | bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) | |
920 | { | |
921 | aux->ctx_field_size = size; | |
922 | } | |
923 | ||
3feb263b AN |
924 | static bool bpf_is_ldimm64(const struct bpf_insn *insn) |
925 | { | |
926 | return insn->code == (BPF_LD | BPF_IMM | BPF_DW); | |
927 | } | |
928 | ||
3990ed4c MKL |
929 | static inline bool bpf_pseudo_func(const struct bpf_insn *insn) |
930 | { | |
3feb263b | 931 | return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
3990ed4c MKL |
932 | } |
933 | ||
7de16e3a JK |
934 | struct bpf_prog_ops { |
935 | int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, | |
936 | union bpf_attr __user *uattr); | |
937 | }; | |
938 | ||
6728aea7 | 939 | struct bpf_reg_state; |
09756af4 AS |
940 | struct bpf_verifier_ops { |
941 | /* return eBPF function prototype for verification */ | |
5e43f899 AI |
942 | const struct bpf_func_proto * |
943 | (*get_func_proto)(enum bpf_func_id func_id, | |
944 | const struct bpf_prog *prog); | |
17a52670 AS |
945 | |
946 | /* return true if 'size' wide access at offset 'off' within bpf_context | |
947 | * with 'type' (read or write) is allowed | |
948 | */ | |
19de99f7 | 949 | bool (*is_valid_access)(int off, int size, enum bpf_access_type type, |
5e43f899 | 950 | const struct bpf_prog *prog, |
23994631 | 951 | struct bpf_insn_access_aux *info); |
36bbef52 DB |
952 | int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, |
953 | const struct bpf_prog *prog); | |
e0cea7ce DB |
954 | int (*gen_ld_abs)(const struct bpf_insn *orig, |
955 | struct bpf_insn *insn_buf); | |
6b8cc1d1 DB |
956 | u32 (*convert_ctx_access)(enum bpf_access_type type, |
957 | const struct bpf_insn *src, | |
958 | struct bpf_insn *dst, | |
f96da094 | 959 | struct bpf_prog *prog, u32 *target_size); |
27ae7997 | 960 | int (*btf_struct_access)(struct bpf_verifier_log *log, |
6728aea7 | 961 | const struct bpf_reg_state *reg, |
b7e852a9 | 962 | int off, int size); |
09756af4 AS |
963 | }; |
964 | ||
cae1927c | 965 | struct bpf_prog_offload_ops { |
08ca90af | 966 | /* verifier basic callbacks */ |
cae1927c JK |
967 | int (*insn_hook)(struct bpf_verifier_env *env, |
968 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 969 | int (*finalize)(struct bpf_verifier_env *env); |
08ca90af JK |
970 | /* verifier optimization callbacks (called after .finalize) */ |
971 | int (*replace_insn)(struct bpf_verifier_env *env, u32 off, | |
972 | struct bpf_insn *insn); | |
973 | int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); | |
974 | /* program management callbacks */ | |
16a8cb5c QM |
975 | int (*prepare)(struct bpf_prog *prog); |
976 | int (*translate)(struct bpf_prog *prog); | |
eb911947 | 977 | void (*destroy)(struct bpf_prog *prog); |
cae1927c JK |
978 | }; |
979 | ||
0a9c1991 | 980 | struct bpf_prog_offload { |
ab3f0063 JK |
981 | struct bpf_prog *prog; |
982 | struct net_device *netdev; | |
341b3e7b | 983 | struct bpf_offload_dev *offdev; |
ab3f0063 JK |
984 | void *dev_priv; |
985 | struct list_head offloads; | |
986 | bool dev_state; | |
08ca90af | 987 | bool opt_failed; |
fcfb126d JW |
988 | void *jited_image; |
989 | u32 jited_len; | |
ab3f0063 JK |
990 | }; |
991 | ||
8bad74f9 RG |
992 | enum bpf_cgroup_storage_type { |
993 | BPF_CGROUP_STORAGE_SHARED, | |
b741f163 | 994 | BPF_CGROUP_STORAGE_PERCPU, |
8bad74f9 RG |
995 | __BPF_CGROUP_STORAGE_MAX |
996 | }; | |
997 | ||
998 | #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX | |
999 | ||
f1b9509c AS |
1000 | /* The longest tracepoint has 12 args. |
1001 | * See include/trace/bpf_probe.h | |
1002 | */ | |
1003 | #define MAX_BPF_FUNC_ARGS 12 | |
1004 | ||
523a4cf4 DB |
1005 | /* The maximum number of arguments passed through registers |
1006 | * a single function may have. | |
1007 | */ | |
1008 | #define MAX_BPF_FUNC_REG_ARGS 5 | |
1009 | ||
720e6a43 YS |
1010 | /* The argument is a structure. */ |
1011 | #define BTF_FMODEL_STRUCT_ARG BIT(0) | |
1012 | ||
49f67f39 IL |
1013 | /* The argument is signed. */ |
1014 | #define BTF_FMODEL_SIGNED_ARG BIT(1) | |
1015 | ||
fec56f58 AS |
1016 | struct btf_func_model { |
1017 | u8 ret_size; | |
49f67f39 | 1018 | u8 ret_flags; |
fec56f58 AS |
1019 | u8 nr_args; |
1020 | u8 arg_size[MAX_BPF_FUNC_ARGS]; | |
720e6a43 | 1021 | u8 arg_flags[MAX_BPF_FUNC_ARGS]; |
fec56f58 AS |
1022 | }; |
1023 | ||
1024 | /* Restore arguments before returning from trampoline to let original function | |
1025 | * continue executing. This flag is used for fentry progs when there are no | |
1026 | * fexit progs. | |
1027 | */ | |
1028 | #define BPF_TRAMP_F_RESTORE_REGS BIT(0) | |
1029 | /* Call original function after fentry progs, but before fexit progs. | |
1030 | * Makes sense for fentry/fexit, normal calls and indirect calls. | |
1031 | */ | |
1032 | #define BPF_TRAMP_F_CALL_ORIG BIT(1) | |
1033 | /* Skip current frame and return to parent. Makes sense for fentry/fexit | |
1034 | * programs only. Should not be used with normal calls and indirect calls. | |
1035 | */ | |
1036 | #define BPF_TRAMP_F_SKIP_FRAME BIT(2) | |
7e6f3cd8 JO |
1037 | /* Store IP address of the caller on the trampoline stack, |
1038 | * so it's available for trampoline's programs. | |
1039 | */ | |
1040 | #define BPF_TRAMP_F_IP_ARG BIT(3) | |
356ed649 HT |
1041 | /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ |
1042 | #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) | |
7e6f3cd8 | 1043 | |
316cba62 JO |
1044 | /* Get original function from stack instead of from provided direct address. |
1045 | * Makes sense for trampolines with fexit or fmod_ret programs. | |
1046 | */ | |
1047 | #define BPF_TRAMP_F_ORIG_STACK BIT(5) | |
1048 | ||
00963a2e SL |
1049 | /* This trampoline is on a function with another ftrace_ops with IPMODIFY, |
1050 | * e.g., a live patch. This flag is set and cleared by ftrace call backs, | |
1051 | */ | |
1052 | #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) | |
1053 | ||
2b5dcb31 LH |
1054 | /* Indicate that current trampoline is in a tail call context. Then, it has to |
1055 | * cache and restore tail_call_cnt to avoid infinite tail call loop. | |
1056 | */ | |
1057 | #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) | |
1058 | ||
2cd3e377 PZ |
1059 | /* |
1060 | * Indicate the trampoline should be suitable to receive indirect calls; | |
1061 | * without this indirectly calling the generated code can result in #UD/#CP, | |
1062 | * depending on the CFI options. | |
1063 | * | |
1064 | * Used by bpf_struct_ops. | |
1065 | * | |
1066 | * Incompatible with FENTRY usage, overloads @func_addr argument. | |
1067 | */ | |
1068 | #define BPF_TRAMP_F_INDIRECT BIT(8) | |
1069 | ||
88fd9e53 | 1070 | /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 |
b23316aa | 1071 | * bytes on x86. |
88fd9e53 | 1072 | */ |
390a07a9 | 1073 | enum { |
528eb2cb IL |
1074 | #if defined(__s390x__) |
1075 | BPF_MAX_TRAMP_LINKS = 27, | |
1076 | #else | |
390a07a9 | 1077 | BPF_MAX_TRAMP_LINKS = 38, |
528eb2cb | 1078 | #endif |
390a07a9 | 1079 | }; |
88fd9e53 | 1080 | |
f7e0beaf KFL |
1081 | struct bpf_tramp_links { |
1082 | struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; | |
1083 | int nr_links; | |
88fd9e53 KS |
1084 | }; |
1085 | ||
e384c7b7 KFL |
1086 | struct bpf_tramp_run_ctx; |
1087 | ||
fec56f58 AS |
1088 | /* Different use cases for BPF trampoline: |
1089 | * 1. replace nop at the function entry (kprobe equivalent) | |
1090 | * flags = BPF_TRAMP_F_RESTORE_REGS | |
1091 | * fentry = a set of programs to run before returning from trampoline | |
1092 | * | |
1093 | * 2. replace nop at the function entry (kprobe + kretprobe equivalent) | |
1094 | * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME | |
1095 | * orig_call = fentry_ip + MCOUNT_INSN_SIZE | |
1096 | * fentry = a set of program to run before calling original function | |
1097 | * fexit = a set of program to run after original function | |
1098 | * | |
1099 | * 3. replace direct call instruction anywhere in the function body | |
1100 | * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) | |
1101 | * With flags = 0 | |
1102 | * fentry = a set of programs to run before returning from trampoline | |
1103 | * With flags = BPF_TRAMP_F_CALL_ORIG | |
1104 | * orig_call = original callback addr or direct function addr | |
1105 | * fentry = a set of program to run before calling original function | |
1106 | * fexit = a set of program to run after original function | |
1107 | */ | |
e21aa341 | 1108 | struct bpf_tramp_image; |
7a3d9a15 | 1109 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
85d33df3 | 1110 | const struct btf_func_model *m, u32 flags, |
f7e0beaf | 1111 | struct bpf_tramp_links *tlinks, |
7a3d9a15 | 1112 | void *func_addr); |
82583daa SL |
1113 | void *arch_alloc_bpf_trampoline(unsigned int size); |
1114 | void arch_free_bpf_trampoline(void *image, unsigned int size); | |
1115 | void arch_protect_bpf_trampoline(void *image, unsigned int size); | |
1116 | void arch_unprotect_bpf_trampoline(void *image, unsigned int size); | |
96d1b7c0 SL |
1117 | int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, |
1118 | struct bpf_tramp_links *tlinks, void *func_addr); | |
82583daa | 1119 | |
271de525 MKL |
1120 | u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, |
1121 | struct bpf_tramp_run_ctx *run_ctx); | |
1122 | void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, | |
1123 | struct bpf_tramp_run_ctx *run_ctx); | |
e21aa341 AS |
1124 | void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); |
1125 | void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); | |
271de525 MKL |
1126 | typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog, |
1127 | struct bpf_tramp_run_ctx *run_ctx); | |
1128 | typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start, | |
1129 | struct bpf_tramp_run_ctx *run_ctx); | |
1130 | bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog); | |
1131 | bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog); | |
fec56f58 | 1132 | |
535911c8 JO |
1133 | struct bpf_ksym { |
1134 | unsigned long start; | |
1135 | unsigned long end; | |
bfea9a85 | 1136 | char name[KSYM_NAME_LEN]; |
ecb60d1c | 1137 | struct list_head lnode; |
ca4424c9 | 1138 | struct latch_tree_node tnode; |
cbd76f8d | 1139 | bool prog; |
535911c8 JO |
1140 | }; |
1141 | ||
fec56f58 AS |
1142 | enum bpf_tramp_prog_type { |
1143 | BPF_TRAMP_FENTRY, | |
1144 | BPF_TRAMP_FEXIT, | |
ae240823 | 1145 | BPF_TRAMP_MODIFY_RETURN, |
be8704ff AS |
1146 | BPF_TRAMP_MAX, |
1147 | BPF_TRAMP_REPLACE, /* more than MAX */ | |
fec56f58 AS |
1148 | }; |
1149 | ||
e21aa341 AS |
1150 | struct bpf_tramp_image { |
1151 | void *image; | |
26ef208c | 1152 | int size; |
e21aa341 AS |
1153 | struct bpf_ksym ksym; |
1154 | struct percpu_ref pcref; | |
1155 | void *ip_after_call; | |
1156 | void *ip_epilogue; | |
1157 | union { | |
1158 | struct rcu_head rcu; | |
1159 | struct work_struct work; | |
1160 | }; | |
1161 | }; | |
1162 | ||
fec56f58 AS |
1163 | struct bpf_trampoline { |
1164 | /* hlist for trampoline_table */ | |
1165 | struct hlist_node hlist; | |
00963a2e | 1166 | struct ftrace_ops *fops; |
fec56f58 AS |
1167 | /* serializes access to fields of this trampoline */ |
1168 | struct mutex mutex; | |
1169 | refcount_t refcnt; | |
00963a2e | 1170 | u32 flags; |
fec56f58 AS |
1171 | u64 key; |
1172 | struct { | |
1173 | struct btf_func_model model; | |
1174 | void *addr; | |
b91e014f | 1175 | bool ftrace_managed; |
fec56f58 | 1176 | } func; |
be8704ff AS |
1177 | /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF |
1178 | * program by replacing one of its functions. func.addr is the address | |
1179 | * of the function it replaced. | |
1180 | */ | |
1181 | struct bpf_prog *extension_prog; | |
fec56f58 AS |
1182 | /* list of BPF programs using this trampoline */ |
1183 | struct hlist_head progs_hlist[BPF_TRAMP_MAX]; | |
1184 | /* Number of attached programs. A counter per kind. */ | |
1185 | int progs_cnt[BPF_TRAMP_MAX]; | |
1186 | /* Executable image of trampoline */ | |
e21aa341 | 1187 | struct bpf_tramp_image *cur_image; |
861de02e | 1188 | struct module *mod; |
fec56f58 | 1189 | }; |
75ccbef6 | 1190 | |
f7b12b6f THJ |
1191 | struct bpf_attach_target_info { |
1192 | struct btf_func_model fmodel; | |
1193 | long tgt_addr; | |
31bf1dbc | 1194 | struct module *tgt_mod; |
f7b12b6f THJ |
1195 | const char *tgt_name; |
1196 | const struct btf_type *tgt_type; | |
1197 | }; | |
1198 | ||
116eb788 | 1199 | #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ |
75ccbef6 BT |
1200 | |
1201 | struct bpf_dispatcher_prog { | |
1202 | struct bpf_prog *prog; | |
1203 | refcount_t users; | |
1204 | }; | |
1205 | ||
1206 | struct bpf_dispatcher { | |
1207 | /* dispatcher mutex */ | |
1208 | struct mutex mutex; | |
1209 | void *func; | |
1210 | struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; | |
1211 | int num_progs; | |
1212 | void *image; | |
19c02415 | 1213 | void *rw_image; |
75ccbef6 | 1214 | u32 image_off; |
517b75e4 | 1215 | struct bpf_ksym ksym; |
c86df29d PZ |
1216 | #ifdef CONFIG_HAVE_STATIC_CALL |
1217 | struct static_call_key *sc_key; | |
1218 | void *sc_tramp; | |
1219 | #endif | |
75ccbef6 BT |
1220 | }; |
1221 | ||
4f9087f1 PZ |
1222 | #ifndef __bpfcall |
1223 | #define __bpfcall __nocfi | |
1224 | #endif | |
1225 | ||
1226 | static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( | |
7e6897f9 BT |
1227 | const void *ctx, |
1228 | const struct bpf_insn *insnsi, | |
af3f4134 | 1229 | bpf_func_t bpf_func) |
7e6897f9 BT |
1230 | { |
1231 | return bpf_func(ctx, insnsi); | |
1232 | } | |
f7e0beaf | 1233 | |
8357b366 JK |
1234 | /* the implementation of the opaque uapi struct bpf_dynptr */ |
1235 | struct bpf_dynptr_kern { | |
1236 | void *data; | |
1237 | /* Size represents the number of usable bytes of dynptr data. | |
1238 | * If for example the offset is at 4 for a local dynptr whose data is | |
1239 | * of type u64, the number of usable bytes is 4. | |
1240 | * | |
1241 | * The upper 8 bits are reserved. It is as follows: | |
1242 | * Bits 0 - 23 = size | |
1243 | * Bits 24 - 30 = dynptr type | |
1244 | * Bit 31 = whether dynptr is read-only | |
1245 | */ | |
1246 | u32 size; | |
1247 | u32 offset; | |
1248 | } __aligned(8); | |
1249 | ||
1250 | enum bpf_dynptr_type { | |
1251 | BPF_DYNPTR_TYPE_INVALID, | |
1252 | /* Points to memory that is local to the bpf program */ | |
1253 | BPF_DYNPTR_TYPE_LOCAL, | |
1254 | /* Underlying data is a ringbuf record */ | |
1255 | BPF_DYNPTR_TYPE_RINGBUF, | |
b5964b96 JK |
1256 | /* Underlying data is a sk_buff */ |
1257 | BPF_DYNPTR_TYPE_SKB, | |
05421aec JK |
1258 | /* Underlying data is a xdp_buff */ |
1259 | BPF_DYNPTR_TYPE_XDP, | |
8357b366 JK |
1260 | }; |
1261 | ||
1262 | int bpf_dynptr_check_size(u32 size); | |
26662d73 | 1263 | u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); |
74523c06 SL |
1264 | const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); |
1265 | void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); | |
8357b366 | 1266 | |
fec56f58 | 1267 | #ifdef CONFIG_BPF_JIT |
f7e0beaf KFL |
1268 | int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); |
1269 | int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); | |
f7b12b6f THJ |
1270 | struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1271 | struct bpf_attach_target_info *tgt_info); | |
fec56f58 | 1272 | void bpf_trampoline_put(struct bpf_trampoline *tr); |
19c02415 | 1273 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); |
c86df29d PZ |
1274 | |
1275 | /* | |
1276 | * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn | |
1277 | * indirection with a direct call to the bpf program. If the architecture does | |
1278 | * not have STATIC_CALL, avoid a double-indirection. | |
1279 | */ | |
1280 | #ifdef CONFIG_HAVE_STATIC_CALL | |
1281 | ||
1282 | #define __BPF_DISPATCHER_SC_INIT(_name) \ | |
1283 | .sc_key = &STATIC_CALL_KEY(_name), \ | |
1284 | .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), | |
1285 | ||
1286 | #define __BPF_DISPATCHER_SC(name) \ | |
1287 | DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) | |
1288 | ||
1289 | #define __BPF_DISPATCHER_CALL(name) \ | |
1290 | static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) | |
1291 | ||
1292 | #define __BPF_DISPATCHER_UPDATE(_d, _new) \ | |
1293 | __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) | |
1294 | ||
1295 | #else | |
1296 | #define __BPF_DISPATCHER_SC_INIT(name) | |
1297 | #define __BPF_DISPATCHER_SC(name) | |
1298 | #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) | |
1299 | #define __BPF_DISPATCHER_UPDATE(_d, _new) | |
1300 | #endif | |
dbe69b29 | 1301 | |
517b75e4 JO |
1302 | #define BPF_DISPATCHER_INIT(_name) { \ |
1303 | .mutex = __MUTEX_INITIALIZER(_name.mutex), \ | |
1304 | .func = &_name##_func, \ | |
1305 | .progs = {}, \ | |
1306 | .num_progs = 0, \ | |
1307 | .image = NULL, \ | |
1308 | .image_off = 0, \ | |
1309 | .ksym = { \ | |
1310 | .name = #_name, \ | |
1311 | .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ | |
1312 | }, \ | |
c86df29d | 1313 | __BPF_DISPATCHER_SC_INIT(_name##_call) \ |
75ccbef6 BT |
1314 | } |
1315 | ||
1316 | #define DEFINE_BPF_DISPATCHER(name) \ | |
c86df29d | 1317 | __BPF_DISPATCHER_SC(name); \ |
4f9087f1 | 1318 | noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1319 | const void *ctx, \ |
1320 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1321 | bpf_func_t bpf_func) \ |
75ccbef6 | 1322 | { \ |
c86df29d | 1323 | return __BPF_DISPATCHER_CALL(name); \ |
75ccbef6 | 1324 | } \ |
6a64037d BT |
1325 | EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ |
1326 | struct bpf_dispatcher bpf_dispatcher_##name = \ | |
18acb7fa | 1327 | BPF_DISPATCHER_INIT(bpf_dispatcher_##name); |
dbe69b29 | 1328 | |
75ccbef6 | 1329 | #define DECLARE_BPF_DISPATCHER(name) \ |
6a64037d | 1330 | unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1331 | const void *ctx, \ |
1332 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1333 | bpf_func_t bpf_func); \ |
6a64037d | 1334 | extern struct bpf_dispatcher bpf_dispatcher_##name; |
c86df29d | 1335 | |
6a64037d BT |
1336 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func |
1337 | #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) | |
75ccbef6 BT |
1338 | void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, |
1339 | struct bpf_prog *to); | |
dba122fb | 1340 | /* Called only from JIT-enabled code, so there's no need for stubs. */ |
26ef208c | 1341 | void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); |
a108f7dc | 1342 | void bpf_image_ksym_del(struct bpf_ksym *ksym); |
dba122fb JO |
1343 | void bpf_ksym_add(struct bpf_ksym *ksym); |
1344 | void bpf_ksym_del(struct bpf_ksym *ksym); | |
3486bedd SL |
1345 | int bpf_jit_charge_modmem(u32 size); |
1346 | void bpf_jit_uncharge_modmem(u32 size); | |
f92c1e18 | 1347 | bool bpf_prog_has_trampoline(const struct bpf_prog *prog); |
fec56f58 | 1348 | #else |
f7e0beaf | 1349 | static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, |
3aac1ead | 1350 | struct bpf_trampoline *tr) |
fec56f58 AS |
1351 | { |
1352 | return -ENOTSUPP; | |
1353 | } | |
f7e0beaf | 1354 | static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, |
3aac1ead | 1355 | struct bpf_trampoline *tr) |
fec56f58 AS |
1356 | { |
1357 | return -ENOTSUPP; | |
1358 | } | |
f7b12b6f THJ |
1359 | static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1360 | struct bpf_attach_target_info *tgt_info) | |
1361 | { | |
b724a641 | 1362 | return NULL; |
f7b12b6f | 1363 | } |
fec56f58 | 1364 | static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} |
75ccbef6 BT |
1365 | #define DEFINE_BPF_DISPATCHER(name) |
1366 | #define DECLARE_BPF_DISPATCHER(name) | |
6a64037d | 1367 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func |
75ccbef6 BT |
1368 | #define BPF_DISPATCHER_PTR(name) NULL |
1369 | static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, | |
1370 | struct bpf_prog *from, | |
1371 | struct bpf_prog *to) {} | |
e9b4e606 JO |
1372 | static inline bool is_bpf_image_address(unsigned long address) |
1373 | { | |
1374 | return false; | |
1375 | } | |
f92c1e18 JO |
1376 | static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) |
1377 | { | |
1378 | return false; | |
1379 | } | |
fec56f58 AS |
1380 | #endif |
1381 | ||
8c1b6e69 | 1382 | struct bpf_func_info_aux { |
51c39bb1 | 1383 | u16 linkage; |
8c1b6e69 | 1384 | bool unreliable; |
2afae08c AN |
1385 | bool called : 1; |
1386 | bool verified : 1; | |
8c1b6e69 AS |
1387 | }; |
1388 | ||
a66886fe DB |
1389 | enum bpf_jit_poke_reason { |
1390 | BPF_POKE_REASON_TAIL_CALL, | |
1391 | }; | |
1392 | ||
1393 | /* Descriptor of pokes pointing /into/ the JITed image. */ | |
1394 | struct bpf_jit_poke_descriptor { | |
cf71b174 | 1395 | void *tailcall_target; |
ebf7d1f5 MF |
1396 | void *tailcall_bypass; |
1397 | void *bypass_addr; | |
f263a814 | 1398 | void *aux; |
a66886fe DB |
1399 | union { |
1400 | struct { | |
1401 | struct bpf_map *map; | |
1402 | u32 key; | |
1403 | } tail_call; | |
1404 | }; | |
cf71b174 | 1405 | bool tailcall_target_stable; |
a66886fe DB |
1406 | u8 adj_off; |
1407 | u16 reason; | |
a748c697 | 1408 | u32 insn_idx; |
a66886fe DB |
1409 | }; |
1410 | ||
3c32cc1b YS |
1411 | /* reg_type info for ctx arguments */ |
1412 | struct bpf_ctx_arg_aux { | |
1413 | u32 offset; | |
1414 | enum bpf_reg_type reg_type; | |
951cf368 | 1415 | u32 btf_id; |
3c32cc1b YS |
1416 | }; |
1417 | ||
541c3bad AN |
1418 | struct btf_mod_pair { |
1419 | struct btf *btf; | |
1420 | struct module *module; | |
1421 | }; | |
1422 | ||
e6ac2450 MKL |
1423 | struct bpf_kfunc_desc_tab; |
1424 | ||
09756af4 | 1425 | struct bpf_prog_aux { |
85192dbf | 1426 | atomic64_t refcnt; |
24701ece | 1427 | u32 used_map_cnt; |
541c3bad | 1428 | u32 used_btf_cnt; |
32bbe007 | 1429 | u32 max_ctx_offset; |
e647815a | 1430 | u32 max_pkt_offset; |
9df1c28b | 1431 | u32 max_tp_access; |
8726679a | 1432 | u32 stack_depth; |
dc4bb0e2 | 1433 | u32 id; |
ba64e7d8 | 1434 | u32 func_cnt; /* used by non-func prog as the number of func progs */ |
335d1c5b | 1435 | u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ |
ba64e7d8 | 1436 | u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ |
ccfe29eb | 1437 | u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
3c32cc1b | 1438 | u32 ctx_arg_info_size; |
afbf21dc YS |
1439 | u32 max_rdonly_access; |
1440 | u32 max_rdwr_access; | |
22dc4a0f | 1441 | struct btf *attach_btf; |
3c32cc1b | 1442 | const struct bpf_ctx_arg_aux *ctx_arg_info; |
3aac1ead THJ |
1443 | struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ |
1444 | struct bpf_prog *dst_prog; | |
1445 | struct bpf_trampoline *dst_trampoline; | |
4a1e7c0c THJ |
1446 | enum bpf_prog_type saved_dst_prog_type; |
1447 | enum bpf_attach_type saved_dst_attach_type; | |
a4b1d3c1 | 1448 | bool verifier_zext; /* Zero extensions has been inserted by verifier. */ |
2b3486bc SF |
1449 | bool dev_bound; /* Program is bound to the netdev. */ |
1450 | bool offload_requested; /* Program is bound and offloaded to the netdev. */ | |
38207291 | 1451 | bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ |
19bfcdf9 | 1452 | bool attach_tracing_prog; /* true if tracing another tracing program */ |
8c1b6e69 | 1453 | bool func_proto_unreliable; |
1e6c62a8 | 1454 | bool sleepable; |
ebf7d1f5 | 1455 | bool tail_call_reachable; |
c2f2cdbe | 1456 | bool xdp_has_frags; |
f18b03fa KKD |
1457 | bool exception_cb; |
1458 | bool exception_boundary; | |
38207291 MKL |
1459 | /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ |
1460 | const struct btf_type *attach_func_proto; | |
1461 | /* function name for valid attach_btf_id */ | |
1462 | const char *attach_func_name; | |
1c2a088a AS |
1463 | struct bpf_prog **func; |
1464 | void *jit_data; /* JIT specific data. arch dependent */ | |
a66886fe | 1465 | struct bpf_jit_poke_descriptor *poke_tab; |
e6ac2450 | 1466 | struct bpf_kfunc_desc_tab *kfunc_tab; |
2357672c | 1467 | struct bpf_kfunc_btf_tab *kfunc_btf_tab; |
a66886fe | 1468 | u32 size_poke_tab; |
4f9087f1 PZ |
1469 | #ifdef CONFIG_FINEIBT |
1470 | struct bpf_ksym ksym_prefix; | |
1471 | #endif | |
535911c8 | 1472 | struct bpf_ksym ksym; |
7de16e3a | 1473 | const struct bpf_prog_ops *ops; |
09756af4 | 1474 | struct bpf_map **used_maps; |
984fe94f | 1475 | struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ |
541c3bad | 1476 | struct btf_mod_pair *used_btfs; |
09756af4 | 1477 | struct bpf_prog *prog; |
aaac3ba9 | 1478 | struct user_struct *user; |
cb4d2b3f | 1479 | u64 load_time; /* ns since boottime */ |
aba64c7d | 1480 | u32 verified_insns; |
69fd337a | 1481 | int cgroup_atype; /* enum cgroup_bpf_attach_type */ |
8bad74f9 | 1482 | struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
067cae47 | 1483 | char name[BPF_OBJ_NAME_LEN]; |
852486b3 | 1484 | u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); |
afdb09c7 CF |
1485 | #ifdef CONFIG_SECURITY |
1486 | void *security; | |
1487 | #endif | |
0a9c1991 | 1488 | struct bpf_prog_offload *offload; |
838e9690 | 1489 | struct btf *btf; |
ba64e7d8 | 1490 | struct bpf_func_info *func_info; |
8c1b6e69 | 1491 | struct bpf_func_info_aux *func_info_aux; |
c454a46b MKL |
1492 | /* bpf_line_info loaded from userspace. linfo->insn_off |
1493 | * has the xlated insn offset. | |
1494 | * Both the main and sub prog share the same linfo. | |
1495 | * The subprog can access its first linfo by | |
1496 | * using the linfo_idx. | |
1497 | */ | |
1498 | struct bpf_line_info *linfo; | |
1499 | /* jited_linfo is the jited addr of the linfo. It has a | |
1500 | * one to one mapping to linfo: | |
1501 | * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. | |
1502 | * Both the main and sub prog share the same jited_linfo. | |
1503 | * The subprog can access its first jited_linfo by | |
1504 | * using the linfo_idx. | |
1505 | */ | |
1506 | void **jited_linfo; | |
ba64e7d8 | 1507 | u32 func_info_cnt; |
c454a46b MKL |
1508 | u32 nr_linfo; |
1509 | /* subprog can use linfo_idx to access its first linfo and | |
1510 | * jited_linfo. | |
1511 | * main prog always has linfo_idx == 0 | |
1512 | */ | |
1513 | u32 linfo_idx; | |
31bf1dbc | 1514 | struct module *mod; |
3dec541b AS |
1515 | u32 num_exentries; |
1516 | struct exception_table_entry *extable; | |
abf2e7d6 AS |
1517 | union { |
1518 | struct work_struct work; | |
1519 | struct rcu_head rcu; | |
1520 | }; | |
09756af4 AS |
1521 | }; |
1522 | ||
d687f621 DK |
1523 | struct bpf_prog { |
1524 | u16 pages; /* Number of allocated pages */ | |
1525 | u16 jited:1, /* Is our filter JIT'ed? */ | |
1526 | jit_requested:1,/* archs need to JIT the prog */ | |
1527 | gpl_compatible:1, /* Is filter GPL compatible? */ | |
1528 | cb_access:1, /* Is control block accessed? */ | |
1529 | dst_needed:1, /* Do we need dst entry? */ | |
1530 | blinding_requested:1, /* needs constant blinding */ | |
1531 | blinded:1, /* Was blinded */ | |
1532 | is_func:1, /* program is a bpf function */ | |
1533 | kprobe_override:1, /* Do we override a kprobe? */ | |
1534 | has_callchain_buf:1, /* callchain buffer allocated? */ | |
1535 | enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ | |
1536 | call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ | |
1537 | call_get_func_ip:1, /* Do we call get_func_ip() */ | |
1538 | tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ | |
1539 | enum bpf_prog_type type; /* Type of BPF program */ | |
1540 | enum bpf_attach_type expected_attach_type; /* For some prog types */ | |
1541 | u32 len; /* Number of filter blocks */ | |
1542 | u32 jited_len; /* Size of jited insns in bytes */ | |
1543 | u8 tag[BPF_TAG_SIZE]; | |
1544 | struct bpf_prog_stats __percpu *stats; | |
1545 | int __percpu *active; | |
1546 | unsigned int (*bpf_func)(const void *ctx, | |
1547 | const struct bpf_insn *insn); | |
1548 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | |
1549 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ | |
1550 | /* Instructions for interpreter */ | |
1551 | union { | |
1552 | DECLARE_FLEX_ARRAY(struct sock_filter, insns); | |
1553 | DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); | |
1554 | }; | |
1555 | }; | |
1556 | ||
2beee5f5 | 1557 | struct bpf_array_aux { |
da765a2f DB |
1558 | /* Programs with direct jumps into programs part of this array. */ |
1559 | struct list_head poke_progs; | |
1560 | struct bpf_map *map; | |
1561 | struct mutex poke_mutex; | |
1562 | struct work_struct work; | |
2beee5f5 DB |
1563 | }; |
1564 | ||
6cc7d1e8 AN |
1565 | struct bpf_link { |
1566 | atomic64_t refcnt; | |
1567 | u32 id; | |
1568 | enum bpf_link_type type; | |
1569 | const struct bpf_link_ops *ops; | |
1570 | struct bpf_prog *prog; | |
1571 | struct work_struct work; | |
1572 | }; | |
1573 | ||
1574 | struct bpf_link_ops { | |
1575 | void (*release)(struct bpf_link *link); | |
1576 | void (*dealloc)(struct bpf_link *link); | |
73b11c2a | 1577 | int (*detach)(struct bpf_link *link); |
6cc7d1e8 AN |
1578 | int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, |
1579 | struct bpf_prog *old_prog); | |
1580 | void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); | |
1581 | int (*fill_link_info)(const struct bpf_link *link, | |
1582 | struct bpf_link_info *info); | |
aef56f2e KFL |
1583 | int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, |
1584 | struct bpf_map *old_map); | |
6cc7d1e8 AN |
1585 | }; |
1586 | ||
f7e0beaf KFL |
1587 | struct bpf_tramp_link { |
1588 | struct bpf_link link; | |
1589 | struct hlist_node tramp_hlist; | |
2fcc8241 | 1590 | u64 cookie; |
f7e0beaf KFL |
1591 | }; |
1592 | ||
69fd337a SF |
1593 | struct bpf_shim_tramp_link { |
1594 | struct bpf_tramp_link link; | |
1595 | struct bpf_trampoline *trampoline; | |
1596 | }; | |
1597 | ||
f7e0beaf KFL |
1598 | struct bpf_tracing_link { |
1599 | struct bpf_tramp_link link; | |
1600 | enum bpf_attach_type attach_type; | |
1601 | struct bpf_trampoline *trampoline; | |
1602 | struct bpf_prog *tgt_prog; | |
1603 | }; | |
1604 | ||
6cc7d1e8 AN |
1605 | struct bpf_link_primer { |
1606 | struct bpf_link *link; | |
1607 | struct file *file; | |
1608 | int fd; | |
1609 | u32 id; | |
1610 | }; | |
1611 | ||
85d33df3 | 1612 | struct bpf_struct_ops_value; |
27ae7997 MKL |
1613 | struct btf_member; |
1614 | ||
1615 | #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 | |
bb48cf16 DV |
1616 | /** |
1617 | * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to | |
1618 | * define a BPF_MAP_TYPE_STRUCT_OPS map type composed | |
1619 | * of BPF_PROG_TYPE_STRUCT_OPS progs. | |
1620 | * @verifier_ops: A structure of callbacks that are invoked by the verifier | |
1621 | * when determining whether the struct_ops progs in the | |
1622 | * struct_ops map are valid. | |
1623 | * @init: A callback that is invoked a single time, and before any other | |
1624 | * callback, to initialize the structure. A nonzero return value means | |
1625 | * the subsystem could not be initialized. | |
1626 | * @check_member: When defined, a callback invoked by the verifier to allow | |
1627 | * the subsystem to determine if an entry in the struct_ops map | |
1628 | * is valid. A nonzero return value means that the map is | |
1629 | * invalid and should be rejected by the verifier. | |
1630 | * @init_member: A callback that is invoked for each member of the struct_ops | |
1631 | * map to allow the subsystem to initialize the member. A nonzero | |
1632 | * value means the member could not be initialized. This callback | |
1633 | * is exclusive with the @type, @type_id, @value_type, and | |
1634 | * @value_id fields. | |
1635 | * @reg: A callback that is invoked when the struct_ops map has been | |
1636 | * initialized and is being attached to. Zero means the struct_ops map | |
1637 | * has been successfully registered and is live. A nonzero return value | |
1638 | * means the struct_ops map could not be registered. | |
1639 | * @unreg: A callback that is invoked when the struct_ops map should be | |
1640 | * unregistered. | |
1641 | * @update: A callback that is invoked when the live struct_ops map is being | |
1642 | * updated to contain new values. This callback is only invoked when | |
1643 | * the struct_ops map is loaded with BPF_F_LINK. If not defined, the | |
1644 | * it is assumed that the struct_ops map cannot be updated. | |
1645 | * @validate: A callback that is invoked after all of the members have been | |
1646 | * initialized. This callback should perform static checks on the | |
1647 | * map, meaning that it should either fail or succeed | |
1648 | * deterministically. A struct_ops map that has been validated may | |
1649 | * not necessarily succeed in being registered if the call to @reg | |
1650 | * fails. For example, a valid struct_ops map may be loaded, but | |
1651 | * then fail to be registered due to there being another active | |
1652 | * struct_ops map on the system in the subsystem already. For this | |
1653 | * reason, if this callback is not defined, the check is skipped as | |
1654 | * the struct_ops map will have final verification performed in | |
1655 | * @reg. | |
1656 | * @type: BTF type. | |
1657 | * @value_type: Value type. | |
1658 | * @name: The name of the struct bpf_struct_ops object. | |
1659 | * @func_models: Func models | |
1660 | * @type_id: BTF type id. | |
1661 | * @value_id: BTF value id. | |
1662 | */ | |
27ae7997 MKL |
1663 | struct bpf_struct_ops { |
1664 | const struct bpf_verifier_ops *verifier_ops; | |
1665 | int (*init)(struct btf *btf); | |
1666 | int (*check_member)(const struct btf_type *t, | |
51a52a29 DV |
1667 | const struct btf_member *member, |
1668 | const struct bpf_prog *prog); | |
85d33df3 MKL |
1669 | int (*init_member)(const struct btf_type *t, |
1670 | const struct btf_member *member, | |
1671 | void *kdata, const void *udata); | |
1672 | int (*reg)(void *kdata); | |
1673 | void (*unreg)(void *kdata); | |
aef56f2e | 1674 | int (*update)(void *kdata, void *old_kdata); |
68b04864 | 1675 | int (*validate)(void *kdata); |
27ae7997 | 1676 | const struct btf_type *type; |
85d33df3 | 1677 | const struct btf_type *value_type; |
27ae7997 MKL |
1678 | const char *name; |
1679 | struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; | |
1680 | u32 type_id; | |
85d33df3 | 1681 | u32 value_id; |
2cd3e377 | 1682 | void *cfi_stubs; |
27ae7997 MKL |
1683 | }; |
1684 | ||
1685 | #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) | |
85d33df3 | 1686 | #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) |
27ae7997 | 1687 | const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id); |
d3e42bb0 | 1688 | void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log); |
85d33df3 MKL |
1689 | bool bpf_struct_ops_get(const void *kdata); |
1690 | void bpf_struct_ops_put(const void *kdata); | |
1691 | int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, | |
1692 | void *value); | |
f7e0beaf KFL |
1693 | int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, |
1694 | struct bpf_tramp_link *link, | |
31a645ae | 1695 | const struct btf_func_model *model, |
2cd3e377 | 1696 | void *stub_func, |
31a645ae | 1697 | void *image, void *image_end); |
85d33df3 MKL |
1698 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1699 | { | |
1700 | if (owner == BPF_MODULE_OWNER) | |
1701 | return bpf_struct_ops_get(data); | |
1702 | else | |
1703 | return try_module_get(owner); | |
1704 | } | |
1705 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1706 | { | |
1707 | if (owner == BPF_MODULE_OWNER) | |
1708 | bpf_struct_ops_put(data); | |
1709 | else | |
1710 | module_put(owner); | |
1711 | } | |
68b04864 | 1712 | int bpf_struct_ops_link_create(union bpf_attr *attr); |
c196906d HT |
1713 | |
1714 | #ifdef CONFIG_NET | |
1715 | /* Define it here to avoid the use of forward declaration */ | |
1716 | struct bpf_dummy_ops_state { | |
1717 | int val; | |
1718 | }; | |
1719 | ||
1720 | struct bpf_dummy_ops { | |
1721 | int (*test_1)(struct bpf_dummy_ops_state *cb); | |
1722 | int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, | |
1723 | char a3, unsigned long a4); | |
7dd88059 | 1724 | int (*test_sleepable)(struct bpf_dummy_ops_state *cb); |
c196906d HT |
1725 | }; |
1726 | ||
1727 | int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1728 | union bpf_attr __user *uattr); | |
1729 | #endif | |
27ae7997 MKL |
1730 | #else |
1731 | static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id) | |
1732 | { | |
1733 | return NULL; | |
1734 | } | |
d3e42bb0 MKL |
1735 | static inline void bpf_struct_ops_init(struct btf *btf, |
1736 | struct bpf_verifier_log *log) | |
1737 | { | |
1738 | } | |
85d33df3 MKL |
1739 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1740 | { | |
1741 | return try_module_get(owner); | |
1742 | } | |
1743 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1744 | { | |
1745 | module_put(owner); | |
1746 | } | |
1747 | static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, | |
1748 | void *key, | |
1749 | void *value) | |
1750 | { | |
1751 | return -EINVAL; | |
1752 | } | |
68b04864 KFL |
1753 | static inline int bpf_struct_ops_link_create(union bpf_attr *attr) |
1754 | { | |
1755 | return -EOPNOTSUPP; | |
1756 | } | |
1757 | ||
9cb61fda SF |
1758 | #endif |
1759 | ||
1760 | #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) | |
1761 | int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, | |
1762 | int cgroup_atype); | |
1763 | void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); | |
1764 | #else | |
69fd337a SF |
1765 | static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, |
1766 | int cgroup_atype) | |
1767 | { | |
1768 | return -EOPNOTSUPP; | |
1769 | } | |
1770 | static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) | |
1771 | { | |
1772 | } | |
27ae7997 MKL |
1773 | #endif |
1774 | ||
04fd61ab AS |
1775 | struct bpf_array { |
1776 | struct bpf_map map; | |
1777 | u32 elem_size; | |
b2157399 | 1778 | u32 index_mask; |
2beee5f5 | 1779 | struct bpf_array_aux *aux; |
04fd61ab | 1780 | union { |
129d868e KC |
1781 | DECLARE_FLEX_ARRAY(char, value) __aligned(8); |
1782 | DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); | |
1783 | DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); | |
04fd61ab AS |
1784 | }; |
1785 | }; | |
3b1efb19 | 1786 | |
c04c0d2b | 1787 | #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ |
ebf7f6f0 | 1788 | #define MAX_TAIL_CALL_CNT 33 |
04fd61ab | 1789 | |
6018e1f4 AN |
1790 | /* Maximum number of loops for bpf_loop and bpf_iter_num. |
1791 | * It's enum to expose it (and thus make it discoverable) through BTF. | |
1792 | */ | |
1793 | enum { | |
1794 | BPF_MAX_LOOPS = 8 * 1024 * 1024, | |
1795 | }; | |
1ade2371 | 1796 | |
591fe988 DB |
1797 | #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ |
1798 | BPF_F_RDONLY_PROG | \ | |
1799 | BPF_F_WRONLY | \ | |
1800 | BPF_F_WRONLY_PROG) | |
1801 | ||
1802 | #define BPF_MAP_CAN_READ BIT(0) | |
1803 | #define BPF_MAP_CAN_WRITE BIT(1) | |
1804 | ||
20571567 DV |
1805 | /* Maximum number of user-producer ring buffer samples that can be drained in |
1806 | * a call to bpf_user_ringbuf_drain(). | |
1807 | */ | |
1808 | #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) | |
1809 | ||
591fe988 DB |
1810 | static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) |
1811 | { | |
1812 | u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
1813 | ||
1814 | /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is | |
1815 | * not possible. | |
1816 | */ | |
1817 | if (access_flags & BPF_F_RDONLY_PROG) | |
1818 | return BPF_MAP_CAN_READ; | |
1819 | else if (access_flags & BPF_F_WRONLY_PROG) | |
1820 | return BPF_MAP_CAN_WRITE; | |
1821 | else | |
1822 | return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; | |
1823 | } | |
1824 | ||
1825 | static inline bool bpf_map_flags_access_ok(u32 access_flags) | |
1826 | { | |
1827 | return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != | |
1828 | (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
1829 | } | |
1830 | ||
3b1efb19 DB |
1831 | struct bpf_event_entry { |
1832 | struct perf_event *event; | |
1833 | struct file *perf_file; | |
1834 | struct file *map_file; | |
1835 | struct rcu_head rcu; | |
1836 | }; | |
1837 | ||
f45d5b6c THJ |
1838 | static inline bool map_type_contains_progs(struct bpf_map *map) |
1839 | { | |
1840 | return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || | |
1841 | map->map_type == BPF_MAP_TYPE_DEVMAP || | |
1842 | map->map_type == BPF_MAP_TYPE_CPUMAP; | |
1843 | } | |
1844 | ||
1845 | bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); | |
f1f7714e | 1846 | int bpf_prog_calc_tag(struct bpf_prog *fp); |
bd570ff9 | 1847 | |
0756ea3e | 1848 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
10aceb62 | 1849 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); |
555c8a86 DB |
1850 | |
1851 | typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, | |
aa7145c1 | 1852 | unsigned long off, unsigned long len); |
c64b7983 JS |
1853 | typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, |
1854 | const struct bpf_insn *src, | |
1855 | struct bpf_insn *dst, | |
1856 | struct bpf_prog *prog, | |
1857 | u32 *target_size); | |
555c8a86 DB |
1858 | |
1859 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |
1860 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); | |
04fd61ab | 1861 | |
324bda9e AS |
1862 | /* an array of programs to be executed under rcu_lock. |
1863 | * | |
1864 | * Typical usage: | |
055eb955 | 1865 | * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); |
324bda9e AS |
1866 | * |
1867 | * the structure returned by bpf_prog_array_alloc() should be populated | |
1868 | * with program pointers and the last pointer must be NULL. | |
1869 | * The user has to keep refcnt on the program and make sure the program | |
1870 | * is removed from the array before bpf_prog_put(). | |
1871 | * The 'struct bpf_prog_array *' should only be replaced with xchg() | |
1872 | * since other cpus are walking the array of pointers in parallel. | |
1873 | */ | |
394e40a2 RG |
1874 | struct bpf_prog_array_item { |
1875 | struct bpf_prog *prog; | |
82e6b1ee AN |
1876 | union { |
1877 | struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; | |
1878 | u64 bpf_cookie; | |
1879 | }; | |
394e40a2 RG |
1880 | }; |
1881 | ||
324bda9e AS |
1882 | struct bpf_prog_array { |
1883 | struct rcu_head rcu; | |
d7f10df8 | 1884 | struct bpf_prog_array_item items[]; |
324bda9e AS |
1885 | }; |
1886 | ||
46531a30 PB |
1887 | struct bpf_empty_prog_array { |
1888 | struct bpf_prog_array hdr; | |
1889 | struct bpf_prog *null_prog; | |
1890 | }; | |
1891 | ||
1892 | /* to avoid allocating empty bpf_prog_array for cgroups that | |
1893 | * don't have bpf program attached use one global 'bpf_empty_prog_array' | |
1894 | * It will not be modified the caller of bpf_prog_array_alloc() | |
1895 | * (since caller requested prog_cnt == 0) | |
1896 | * that pointer should be 'freed' by bpf_prog_array_free() | |
1897 | */ | |
1898 | extern struct bpf_empty_prog_array bpf_empty_prog_array; | |
1899 | ||
d29ab6e1 | 1900 | struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); |
54e9c9d4 | 1901 | void bpf_prog_array_free(struct bpf_prog_array *progs); |
8c7dcb84 DK |
1902 | /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ |
1903 | void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); | |
54e9c9d4 | 1904 | int bpf_prog_array_length(struct bpf_prog_array *progs); |
0d01da6a | 1905 | bool bpf_prog_array_is_empty(struct bpf_prog_array *array); |
54e9c9d4 | 1906 | int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, |
468e2f64 | 1907 | __u32 __user *prog_ids, u32 cnt); |
324bda9e | 1908 | |
54e9c9d4 | 1909 | void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, |
e87c6bc3 | 1910 | struct bpf_prog *old_prog); |
ce3aa9cc JS |
1911 | int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); |
1912 | int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, | |
1913 | struct bpf_prog *prog); | |
54e9c9d4 | 1914 | int bpf_prog_array_copy_info(struct bpf_prog_array *array, |
3a38bb98 YS |
1915 | u32 *prog_ids, u32 request_cnt, |
1916 | u32 *prog_cnt); | |
54e9c9d4 | 1917 | int bpf_prog_array_copy(struct bpf_prog_array *old_array, |
e87c6bc3 YS |
1918 | struct bpf_prog *exclude_prog, |
1919 | struct bpf_prog *include_prog, | |
82e6b1ee | 1920 | u64 bpf_cookie, |
e87c6bc3 YS |
1921 | struct bpf_prog_array **new_array); |
1922 | ||
c7603cfa AN |
1923 | struct bpf_run_ctx {}; |
1924 | ||
1925 | struct bpf_cg_run_ctx { | |
1926 | struct bpf_run_ctx run_ctx; | |
7d08c2c9 | 1927 | const struct bpf_prog_array_item *prog_item; |
c4dcfdd4 | 1928 | int retval; |
c7603cfa AN |
1929 | }; |
1930 | ||
82e6b1ee AN |
1931 | struct bpf_trace_run_ctx { |
1932 | struct bpf_run_ctx run_ctx; | |
1933 | u64 bpf_cookie; | |
a3c485a5 | 1934 | bool is_uprobe; |
82e6b1ee AN |
1935 | }; |
1936 | ||
e384c7b7 KFL |
1937 | struct bpf_tramp_run_ctx { |
1938 | struct bpf_run_ctx run_ctx; | |
1939 | u64 bpf_cookie; | |
1940 | struct bpf_run_ctx *saved_run_ctx; | |
1941 | }; | |
1942 | ||
7d08c2c9 AN |
1943 | static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) |
1944 | { | |
1945 | struct bpf_run_ctx *old_ctx = NULL; | |
1946 | ||
1947 | #ifdef CONFIG_BPF_SYSCALL | |
1948 | old_ctx = current->bpf_ctx; | |
1949 | current->bpf_ctx = new_ctx; | |
1950 | #endif | |
1951 | return old_ctx; | |
1952 | } | |
1953 | ||
1954 | static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) | |
1955 | { | |
1956 | #ifdef CONFIG_BPF_SYSCALL | |
1957 | current->bpf_ctx = old_ctx; | |
1958 | #endif | |
1959 | } | |
1960 | ||
77241217 SF |
1961 | /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ |
1962 | #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) | |
1963 | /* BPF program asks to set CN on the packet. */ | |
1964 | #define BPF_RET_SET_CN (1 << 0) | |
1965 | ||
7d08c2c9 AN |
1966 | typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); |
1967 | ||
7d08c2c9 | 1968 | static __always_inline u32 |
055eb955 | 1969 | bpf_prog_run_array(const struct bpf_prog_array *array, |
7d08c2c9 AN |
1970 | const void *ctx, bpf_prog_run_fn run_prog) |
1971 | { | |
1972 | const struct bpf_prog_array_item *item; | |
1973 | const struct bpf_prog *prog; | |
82e6b1ee AN |
1974 | struct bpf_run_ctx *old_run_ctx; |
1975 | struct bpf_trace_run_ctx run_ctx; | |
7d08c2c9 AN |
1976 | u32 ret = 1; |
1977 | ||
055eb955 SF |
1978 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); |
1979 | ||
7d08c2c9 | 1980 | if (unlikely(!array)) |
055eb955 SF |
1981 | return ret; |
1982 | ||
a3c485a5 JO |
1983 | run_ctx.is_uprobe = false; |
1984 | ||
055eb955 | 1985 | migrate_disable(); |
82e6b1ee | 1986 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
7d08c2c9 AN |
1987 | item = &array->items[0]; |
1988 | while ((prog = READ_ONCE(item->prog))) { | |
82e6b1ee | 1989 | run_ctx.bpf_cookie = item->bpf_cookie; |
7d08c2c9 AN |
1990 | ret &= run_prog(prog, ctx); |
1991 | item++; | |
1992 | } | |
82e6b1ee | 1993 | bpf_reset_run_ctx(old_run_ctx); |
7d08c2c9 AN |
1994 | migrate_enable(); |
1995 | return ret; | |
1996 | } | |
324bda9e | 1997 | |
8c7dcb84 DK |
1998 | /* Notes on RCU design for bpf_prog_arrays containing sleepable programs: |
1999 | * | |
2000 | * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array | |
2001 | * overall. As a result, we must use the bpf_prog_array_free_sleepable | |
2002 | * in order to use the tasks_trace rcu grace period. | |
2003 | * | |
2004 | * When a non-sleepable program is inside the array, we take the rcu read | |
2005 | * section and disable preemption for that program alone, so it can access | |
2006 | * rcu-protected dynamically sized maps. | |
2007 | */ | |
2008 | static __always_inline u32 | |
a3c485a5 JO |
2009 | bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, |
2010 | const void *ctx, bpf_prog_run_fn run_prog) | |
8c7dcb84 DK |
2011 | { |
2012 | const struct bpf_prog_array_item *item; | |
2013 | const struct bpf_prog *prog; | |
2014 | const struct bpf_prog_array *array; | |
2015 | struct bpf_run_ctx *old_run_ctx; | |
2016 | struct bpf_trace_run_ctx run_ctx; | |
2017 | u32 ret = 1; | |
2018 | ||
2019 | might_fault(); | |
2020 | ||
2021 | rcu_read_lock_trace(); | |
2022 | migrate_disable(); | |
2023 | ||
a3c485a5 JO |
2024 | run_ctx.is_uprobe = true; |
2025 | ||
8c7dcb84 DK |
2026 | array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); |
2027 | if (unlikely(!array)) | |
2028 | goto out; | |
2029 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); | |
2030 | item = &array->items[0]; | |
2031 | while ((prog = READ_ONCE(item->prog))) { | |
2032 | if (!prog->aux->sleepable) | |
2033 | rcu_read_lock(); | |
2034 | ||
2035 | run_ctx.bpf_cookie = item->bpf_cookie; | |
2036 | ret &= run_prog(prog, ctx); | |
2037 | item++; | |
2038 | ||
2039 | if (!prog->aux->sleepable) | |
2040 | rcu_read_unlock(); | |
2041 | } | |
2042 | bpf_reset_run_ctx(old_run_ctx); | |
2043 | out: | |
2044 | migrate_enable(); | |
2045 | rcu_read_unlock_trace(); | |
2046 | return ret; | |
2047 | } | |
2048 | ||
89aa0758 | 2049 | #ifdef CONFIG_BPF_SYSCALL |
b121d1e7 | 2050 | DECLARE_PER_CPU(int, bpf_prog_active); |
d46edd67 | 2051 | extern struct mutex bpf_stats_enabled_mutex; |
b121d1e7 | 2052 | |
c518cfa0 TG |
2053 | /* |
2054 | * Block execution of BPF programs attached to instrumentation (perf, | |
2055 | * kprobes, tracepoints) to prevent deadlocks on map operations as any of | |
2056 | * these events can happen inside a region which holds a map bucket lock | |
2057 | * and can deadlock on it. | |
c518cfa0 TG |
2058 | */ |
2059 | static inline void bpf_disable_instrumentation(void) | |
2060 | { | |
2061 | migrate_disable(); | |
79364031 | 2062 | this_cpu_inc(bpf_prog_active); |
c518cfa0 TG |
2063 | } |
2064 | ||
2065 | static inline void bpf_enable_instrumentation(void) | |
2066 | { | |
79364031 | 2067 | this_cpu_dec(bpf_prog_active); |
c518cfa0 TG |
2068 | migrate_enable(); |
2069 | } | |
2070 | ||
f66e448c CF |
2071 | extern const struct file_operations bpf_map_fops; |
2072 | extern const struct file_operations bpf_prog_fops; | |
367ec3e4 | 2073 | extern const struct file_operations bpf_iter_fops; |
f66e448c | 2074 | |
91cc1a99 | 2075 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
7de16e3a JK |
2076 | extern const struct bpf_prog_ops _name ## _prog_ops; \ |
2077 | extern const struct bpf_verifier_ops _name ## _verifier_ops; | |
40077e0c JB |
2078 | #define BPF_MAP_TYPE(_id, _ops) \ |
2079 | extern const struct bpf_map_ops _ops; | |
f2e10bff | 2080 | #define BPF_LINK_TYPE(_id, _name) |
be9370a7 JB |
2081 | #include <linux/bpf_types.h> |
2082 | #undef BPF_PROG_TYPE | |
40077e0c | 2083 | #undef BPF_MAP_TYPE |
f2e10bff | 2084 | #undef BPF_LINK_TYPE |
0fc174de | 2085 | |
ab3f0063 | 2086 | extern const struct bpf_prog_ops bpf_offload_prog_ops; |
4f9218aa JK |
2087 | extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; |
2088 | extern const struct bpf_verifier_ops xdp_analyzer_ops; | |
2089 | ||
0fc174de | 2090 | struct bpf_prog *bpf_prog_get(u32 ufd); |
248f346f | 2091 | struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, |
288b3de5 | 2092 | bool attach_drv); |
85192dbf | 2093 | void bpf_prog_add(struct bpf_prog *prog, int i); |
c540594f | 2094 | void bpf_prog_sub(struct bpf_prog *prog, int i); |
85192dbf | 2095 | void bpf_prog_inc(struct bpf_prog *prog); |
a6f6df69 | 2096 | struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); |
61e021f3 DB |
2097 | void bpf_prog_put(struct bpf_prog *prog); |
2098 | ||
e7895f01 | 2099 | void bpf_prog_free_id(struct bpf_prog *prog); |
158e5e9e | 2100 | void bpf_map_free_id(struct bpf_map *map); |
ad8ad79f | 2101 | |
aa3496ac | 2102 | struct btf_field *btf_record_find(const struct btf_record *rec, |
74843b57 | 2103 | u32 offset, u32 field_mask); |
aa3496ac KKD |
2104 | void btf_record_free(struct btf_record *rec); |
2105 | void bpf_map_free_record(struct bpf_map *map); | |
2106 | struct btf_record *btf_record_dup(const struct btf_record *rec); | |
2107 | bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); | |
db559117 | 2108 | void bpf_obj_free_timer(const struct btf_record *rec, void *obj); |
aa3496ac | 2109 | void bpf_obj_free_fields(const struct btf_record *rec, void *obj); |
e383a459 | 2110 | void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); |
61df10c7 | 2111 | |
1ed4d924 | 2112 | struct bpf_map *bpf_map_get(u32 ufd); |
c9da161c | 2113 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
c2101297 | 2114 | struct bpf_map *__bpf_map_get(struct fd f); |
1e0bd5a0 AN |
2115 | void bpf_map_inc(struct bpf_map *map); |
2116 | void bpf_map_inc_with_uref(struct bpf_map *map); | |
b671c206 | 2117 | struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); |
1e0bd5a0 | 2118 | struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); |
c9da161c | 2119 | void bpf_map_put_with_uref(struct bpf_map *map); |
61e021f3 | 2120 | void bpf_map_put(struct bpf_map *map); |
196e8ca7 DB |
2121 | void *bpf_map_area_alloc(u64 size, int numa_node); |
2122 | void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); | |
d407bd25 | 2123 | void bpf_map_area_free(void *base); |
353050be | 2124 | bool bpf_map_write_active(const struct bpf_map *map); |
bd475643 | 2125 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
cb4d03ab BV |
2126 | int generic_map_lookup_batch(struct bpf_map *map, |
2127 | const union bpf_attr *attr, | |
aa2e93b8 | 2128 | union bpf_attr __user *uattr); |
3af43ba4 | 2129 | int generic_map_update_batch(struct bpf_map *map, struct file *map_file, |
aa2e93b8 BV |
2130 | const union bpf_attr *attr, |
2131 | union bpf_attr __user *uattr); | |
2132 | int generic_map_delete_batch(struct bpf_map *map, | |
2133 | const union bpf_attr *attr, | |
cb4d03ab | 2134 | union bpf_attr __user *uattr); |
6086d29d | 2135 | struct bpf_map *bpf_map_get_curr_or_next(u32 *id); |
a228a64f | 2136 | struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); |
61e021f3 | 2137 | |
48edc1f7 RG |
2138 | #ifdef CONFIG_MEMCG_KMEM |
2139 | void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, | |
2140 | int node); | |
2141 | void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); | |
ddef81b5 YS |
2142 | void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, |
2143 | gfp_t flags); | |
48edc1f7 RG |
2144 | void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, |
2145 | size_t align, gfp_t flags); | |
2146 | #else | |
2147 | static inline void * | |
2148 | bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, | |
2149 | int node) | |
2150 | { | |
2151 | return kmalloc_node(size, flags, node); | |
2152 | } | |
2153 | ||
2154 | static inline void * | |
2155 | bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) | |
2156 | { | |
2157 | return kzalloc(size, flags); | |
2158 | } | |
2159 | ||
ddef81b5 YS |
2160 | static inline void * |
2161 | bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) | |
2162 | { | |
2163 | return kvcalloc(n, size, flags); | |
2164 | } | |
2165 | ||
48edc1f7 RG |
2166 | static inline void __percpu * |
2167 | bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, | |
2168 | gfp_t flags) | |
2169 | { | |
2170 | return __alloc_percpu_gfp(size, align, flags); | |
2171 | } | |
2172 | #endif | |
2173 | ||
25954730 AP |
2174 | static inline int |
2175 | bpf_map_init_elem_count(struct bpf_map *map) | |
2176 | { | |
2177 | size_t size = sizeof(*map->elem_count), align = size; | |
2178 | gfp_t flags = GFP_USER | __GFP_NOWARN; | |
2179 | ||
2180 | map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); | |
2181 | if (!map->elem_count) | |
2182 | return -ENOMEM; | |
2183 | ||
2184 | return 0; | |
2185 | } | |
2186 | ||
2187 | static inline void | |
2188 | bpf_map_free_elem_count(struct bpf_map *map) | |
2189 | { | |
2190 | free_percpu(map->elem_count); | |
2191 | } | |
2192 | ||
2193 | static inline void bpf_map_inc_elem_count(struct bpf_map *map) | |
2194 | { | |
2195 | this_cpu_inc(*map->elem_count); | |
2196 | } | |
2197 | ||
2198 | static inline void bpf_map_dec_elem_count(struct bpf_map *map) | |
2199 | { | |
2200 | this_cpu_dec(*map->elem_count); | |
2201 | } | |
2202 | ||
1be7f75d AS |
2203 | extern int sysctl_unprivileged_bpf_disabled; |
2204 | ||
d17aff80 | 2205 | static inline bool bpf_allow_ptr_leaks(void) |
2c78ee89 | 2206 | { |
d17aff80 | 2207 | return perfmon_capable(); |
2c78ee89 AS |
2208 | } |
2209 | ||
d17aff80 | 2210 | static inline bool bpf_allow_uninit_stack(void) |
01f810ac | 2211 | { |
d17aff80 | 2212 | return perfmon_capable(); |
01f810ac AM |
2213 | } |
2214 | ||
d17aff80 | 2215 | static inline bool bpf_bypass_spec_v1(void) |
2c78ee89 | 2216 | { |
d17aff80 | 2217 | return cpu_mitigations_off() || perfmon_capable(); |
2c78ee89 AS |
2218 | } |
2219 | ||
d17aff80 | 2220 | static inline bool bpf_bypass_spec_v4(void) |
2c78ee89 | 2221 | { |
d17aff80 | 2222 | return cpu_mitigations_off() || perfmon_capable(); |
2c78ee89 AS |
2223 | } |
2224 | ||
6e71b04a | 2225 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
b2197755 DB |
2226 | int bpf_prog_new_fd(struct bpf_prog *prog); |
2227 | ||
f2e10bff | 2228 | void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
a3b80e10 AN |
2229 | const struct bpf_link_ops *ops, struct bpf_prog *prog); |
2230 | int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); | |
2231 | int bpf_link_settle(struct bpf_link_primer *primer); | |
2232 | void bpf_link_cleanup(struct bpf_link_primer *primer); | |
70ed506c AN |
2233 | void bpf_link_inc(struct bpf_link *link); |
2234 | void bpf_link_put(struct bpf_link *link); | |
2235 | int bpf_link_new_fd(struct bpf_link *link); | |
2236 | struct bpf_link *bpf_link_get_from_fd(u32 ufd); | |
9f883612 | 2237 | struct bpf_link *bpf_link_get_curr_or_next(u32 *id); |
70ed506c | 2238 | |
cb8edce2 AN |
2239 | int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname); |
2240 | int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags); | |
b2197755 | 2241 | |
21aef70e | 2242 | #define BPF_ITER_FUNC_PREFIX "bpf_iter_" |
e5158d98 | 2243 | #define DEFINE_BPF_ITER_FUNC(target, args...) \ |
21aef70e YS |
2244 | extern int bpf_iter_ ## target(args); \ |
2245 | int __init bpf_iter_ ## target(args) { return 0; } | |
15d83c4d | 2246 | |
f0d74c4d KFL |
2247 | /* |
2248 | * The task type of iterators. | |
2249 | * | |
2250 | * For BPF task iterators, they can be parameterized with various | |
2251 | * parameters to visit only some of tasks. | |
2252 | * | |
2253 | * BPF_TASK_ITER_ALL (default) | |
2254 | * Iterate over resources of every task. | |
2255 | * | |
2256 | * BPF_TASK_ITER_TID | |
2257 | * Iterate over resources of a task/tid. | |
2258 | * | |
2259 | * BPF_TASK_ITER_TGID | |
2260 | * Iterate over resources of every task of a process / task group. | |
2261 | */ | |
2262 | enum bpf_iter_task_type { | |
2263 | BPF_TASK_ITER_ALL = 0, | |
2264 | BPF_TASK_ITER_TID, | |
2265 | BPF_TASK_ITER_TGID, | |
2266 | }; | |
2267 | ||
f9c79272 | 2268 | struct bpf_iter_aux_info { |
d4ccaf58 | 2269 | /* for map_elem iter */ |
a5cbe05a | 2270 | struct bpf_map *map; |
d4ccaf58 HL |
2271 | |
2272 | /* for cgroup iter */ | |
2273 | struct { | |
2274 | struct cgroup *start; /* starting cgroup */ | |
2275 | enum bpf_cgroup_iter_order order; | |
2276 | } cgroup; | |
f0d74c4d KFL |
2277 | struct { |
2278 | enum bpf_iter_task_type type; | |
2279 | u32 pid; | |
2280 | } task; | |
f9c79272 YS |
2281 | }; |
2282 | ||
5e7b3020 YS |
2283 | typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, |
2284 | union bpf_iter_link_info *linfo, | |
2285 | struct bpf_iter_aux_info *aux); | |
2286 | typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); | |
6b0a249a YS |
2287 | typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, |
2288 | struct seq_file *seq); | |
2289 | typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, | |
2290 | struct bpf_link_info *info); | |
3cee6fb8 MKL |
2291 | typedef const struct bpf_func_proto * |
2292 | (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, | |
2293 | const struct bpf_prog *prog); | |
a5cbe05a | 2294 | |
cf83b2d2 YS |
2295 | enum bpf_iter_feature { |
2296 | BPF_ITER_RESCHED = BIT(0), | |
2297 | }; | |
2298 | ||
3c32cc1b | 2299 | #define BPF_ITER_CTX_ARG_MAX 2 |
ae24345d YS |
2300 | struct bpf_iter_reg { |
2301 | const char *target; | |
5e7b3020 YS |
2302 | bpf_iter_attach_target_t attach_target; |
2303 | bpf_iter_detach_target_t detach_target; | |
6b0a249a YS |
2304 | bpf_iter_show_fdinfo_t show_fdinfo; |
2305 | bpf_iter_fill_link_info_t fill_link_info; | |
3cee6fb8 | 2306 | bpf_iter_get_func_proto_t get_func_proto; |
3c32cc1b | 2307 | u32 ctx_arg_info_size; |
cf83b2d2 | 2308 | u32 feature; |
3c32cc1b | 2309 | struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; |
14fc6bd6 | 2310 | const struct bpf_iter_seq_info *seq_info; |
ae24345d YS |
2311 | }; |
2312 | ||
e5158d98 YS |
2313 | struct bpf_iter_meta { |
2314 | __bpf_md_ptr(struct seq_file *, seq); | |
2315 | u64 session_id; | |
2316 | u64 seq_num; | |
2317 | }; | |
2318 | ||
a5cbe05a YS |
2319 | struct bpf_iter__bpf_map_elem { |
2320 | __bpf_md_ptr(struct bpf_iter_meta *, meta); | |
2321 | __bpf_md_ptr(struct bpf_map *, map); | |
2322 | __bpf_md_ptr(void *, key); | |
2323 | __bpf_md_ptr(void *, value); | |
2324 | }; | |
2325 | ||
15172a46 | 2326 | int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); |
ab2ee4fc | 2327 | void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); |
15d83c4d | 2328 | bool bpf_iter_prog_supported(struct bpf_prog *prog); |
3cee6fb8 MKL |
2329 | const struct bpf_func_proto * |
2330 | bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); | |
af2ac3e1 | 2331 | int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); |
ac51d99b | 2332 | int bpf_iter_new_fd(struct bpf_link *link); |
367ec3e4 | 2333 | bool bpf_link_is_iter(struct bpf_link *link); |
e5158d98 YS |
2334 | struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); |
2335 | int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); | |
b76f2226 YS |
2336 | void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, |
2337 | struct seq_file *seq); | |
2338 | int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, | |
2339 | struct bpf_link_info *info); | |
ae24345d | 2340 | |
314ee05e YS |
2341 | int map_set_for_each_callback_args(struct bpf_verifier_env *env, |
2342 | struct bpf_func_state *caller, | |
2343 | struct bpf_func_state *callee); | |
2344 | ||
15a07b33 AS |
2345 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); |
2346 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); | |
2347 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
2348 | u64 flags); | |
2349 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, | |
2350 | u64 flags); | |
d056a788 | 2351 | |
557c0c6e | 2352 | int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); |
15a07b33 | 2353 | |
d056a788 DB |
2354 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
2355 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2356 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
bcc6b1b7 MKL |
2357 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, |
2358 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2359 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
d056a788 | 2360 | |
6e71b04a | 2361 | int bpf_get_file_flag(int flags); |
af2ac3e1 | 2362 | int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, |
dcab51f1 | 2363 | size_t actual_size); |
6e71b04a | 2364 | |
61e021f3 | 2365 | /* verify correctness of eBPF program */ |
47a71c1f | 2366 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size); |
a643bff7 AN |
2367 | |
2368 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | |
1ea47e01 | 2369 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); |
a643bff7 | 2370 | #endif |
46f55cff | 2371 | |
76654e67 AM |
2372 | struct btf *bpf_get_btf_vmlinux(void); |
2373 | ||
46f55cff | 2374 | /* Map specifics */ |
d53ad5d8 | 2375 | struct xdp_frame; |
6d5fc195 | 2376 | struct sk_buff; |
e6a4750f BT |
2377 | struct bpf_dtab_netdev; |
2378 | struct bpf_cpu_map_entry; | |
67f29e07 | 2379 | |
1d233886 | 2380 | void __dev_flush(void); |
d53ad5d8 | 2381 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 | 2382 | struct net_device *dev_rx); |
d53ad5d8 | 2383 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2384 | struct net_device *dev_rx); |
d53ad5d8 | 2385 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed | 2386 | struct bpf_map *map, bool exclude_ingress); |
6d5fc195 TM |
2387 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
2388 | struct bpf_prog *xdp_prog); | |
e624d4ed HL |
2389 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, |
2390 | struct bpf_prog *xdp_prog, struct bpf_map *map, | |
2391 | bool exclude_ingress); | |
46f55cff | 2392 | |
cdfafe98 | 2393 | void __cpu_map_flush(void); |
d53ad5d8 | 2394 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, |
9c270af3 | 2395 | struct net_device *dev_rx); |
11941f8a KKD |
2396 | int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2397 | struct sk_buff *skb); | |
9c270af3 | 2398 | |
96eabe7a MKL |
2399 | /* Return map's numa specified by userspace */ |
2400 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) | |
2401 | { | |
2402 | return (attr->map_flags & BPF_F_NUMA_NODE) ? | |
2403 | attr->numa_node : NUMA_NO_NODE; | |
2404 | } | |
2405 | ||
040ee692 | 2406 | struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); |
5dc4c4b7 | 2407 | int array_map_alloc_check(union bpf_attr *attr); |
040ee692 | 2408 | |
c695865c SF |
2409 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
2410 | union bpf_attr __user *uattr); | |
2411 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |
2412 | union bpf_attr __user *uattr); | |
da00d2f1 KS |
2413 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2414 | const union bpf_attr *kattr, | |
2415 | union bpf_attr __user *uattr); | |
c695865c SF |
2416 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2417 | const union bpf_attr *kattr, | |
2418 | union bpf_attr __user *uattr); | |
1b4d60ec SL |
2419 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, |
2420 | const union bpf_attr *kattr, | |
2421 | union bpf_attr __user *uattr); | |
7c32e8f8 LB |
2422 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
2423 | const union bpf_attr *kattr, | |
2424 | union bpf_attr __user *uattr); | |
2b99ef22 FW |
2425 | int bpf_prog_test_run_nf(struct bpf_prog *prog, |
2426 | const union bpf_attr *kattr, | |
2427 | union bpf_attr __user *uattr); | |
9e15db66 AS |
2428 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
2429 | const struct bpf_prog *prog, | |
2430 | struct bpf_insn_access_aux *info); | |
35346ab6 HT |
2431 | |
2432 | static inline bool bpf_tracing_ctx_access(int off, int size, | |
2433 | enum bpf_access_type type) | |
2434 | { | |
2435 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) | |
2436 | return false; | |
2437 | if (type != BPF_READ) | |
2438 | return false; | |
2439 | if (off % size != 0) | |
2440 | return false; | |
2441 | return true; | |
2442 | } | |
2443 | ||
2444 | static inline bool bpf_tracing_btf_ctx_access(int off, int size, | |
2445 | enum bpf_access_type type, | |
2446 | const struct bpf_prog *prog, | |
2447 | struct bpf_insn_access_aux *info) | |
2448 | { | |
2449 | if (!bpf_tracing_ctx_access(off, size, type)) | |
2450 | return false; | |
2451 | return btf_ctx_access(off, size, type, prog, info); | |
2452 | } | |
2453 | ||
6728aea7 KKD |
2454 | int btf_struct_access(struct bpf_verifier_log *log, |
2455 | const struct bpf_reg_state *reg, | |
2456 | int off, int size, enum bpf_access_type atype, | |
63260df1 | 2457 | u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name); |
faaf4a79 | 2458 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
22dc4a0f | 2459 | const struct btf *btf, u32 id, int off, |
2ab3b380 KKD |
2460 | const struct btf *need_btf, u32 need_type_id, |
2461 | bool strict); | |
9e15db66 | 2462 | |
fec56f58 AS |
2463 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
2464 | struct btf *btf, | |
2465 | const struct btf_type *func_proto, | |
2466 | const char *func_name, | |
2467 | struct btf_func_model *m); | |
2468 | ||
51c39bb1 | 2469 | struct bpf_reg_state; |
4ba1d0f2 | 2470 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog); |
efc68158 | 2471 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
be8704ff | 2472 | struct btf *btf, const struct btf_type *t); |
b9ae0c9d KKD |
2473 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
2474 | int comp_idx, const char *tag_key); | |
8c1b6e69 | 2475 | |
7e6897f9 | 2476 | struct bpf_prog *bpf_prog_by_id(u32 id); |
005142b8 | 2477 | struct bpf_link *bpf_link_by_id(u32 id); |
7e6897f9 | 2478 | |
d17aff80 | 2479 | const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); |
a10787e6 | 2480 | void bpf_task_storage_free(struct task_struct *task); |
c4bcfb38 | 2481 | void bpf_cgrp_storage_free(struct cgroup *cgroup); |
e6ac2450 MKL |
2482 | bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); |
2483 | const struct btf_func_model * | |
2484 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
2485 | const struct bpf_insn *insn); | |
1cf3bfc6 IL |
2486 | int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, |
2487 | u16 btf_fd_idx, u8 **func_addr); | |
2488 | ||
fbd94c7a AS |
2489 | struct bpf_core_ctx { |
2490 | struct bpf_verifier_log *log; | |
2491 | const struct btf *btf; | |
2492 | }; | |
2493 | ||
57539b1c DV |
2494 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
2495 | const struct bpf_reg_state *reg, | |
63260df1 | 2496 | const char *field_name, u32 btf_id, const char *suffix); |
57539b1c | 2497 | |
b613d335 DV |
2498 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
2499 | const struct btf *reg_btf, u32 reg_id, | |
2500 | const struct btf *arg_btf, u32 arg_id); | |
2501 | ||
fbd94c7a AS |
2502 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
2503 | int relo_idx, void *insn); | |
2504 | ||
44a3918c JP |
2505 | static inline bool unprivileged_ebpf_enabled(void) |
2506 | { | |
2507 | return !sysctl_unprivileged_bpf_disabled; | |
2508 | } | |
2509 | ||
24426654 MKL |
2510 | /* Not all bpf prog type has the bpf_ctx. |
2511 | * For the bpf prog type that has initialized the bpf_ctx, | |
2512 | * this function can be used to decide if a kernel function | |
2513 | * is called by a bpf program. | |
2514 | */ | |
2515 | static inline bool has_current_bpf_ctx(void) | |
2516 | { | |
2517 | return !!current->bpf_ctx; | |
2518 | } | |
05b24ff9 JO |
2519 | |
2520 | void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); | |
8357b366 JK |
2521 | |
2522 | void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
2523 | enum bpf_dynptr_type type, u32 offset, u32 size); | |
2524 | void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); | |
2525 | void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); | |
9a675ba5 SAS |
2526 | |
2527 | bool dev_check_flush(void); | |
2528 | bool cpu_map_check_flush(void); | |
9c270af3 | 2529 | #else /* !CONFIG_BPF_SYSCALL */ |
0fc174de DB |
2530 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) |
2531 | { | |
2532 | return ERR_PTR(-EOPNOTSUPP); | |
2533 | } | |
2534 | ||
248f346f JK |
2535 | static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, |
2536 | enum bpf_prog_type type, | |
288b3de5 | 2537 | bool attach_drv) |
248f346f JK |
2538 | { |
2539 | return ERR_PTR(-EOPNOTSUPP); | |
2540 | } | |
2541 | ||
85192dbf | 2542 | static inline void bpf_prog_add(struct bpf_prog *prog, int i) |
cc2e0b3f | 2543 | { |
cc2e0b3f | 2544 | } |
113214be | 2545 | |
c540594f DB |
2546 | static inline void bpf_prog_sub(struct bpf_prog *prog, int i) |
2547 | { | |
2548 | } | |
2549 | ||
0fc174de DB |
2550 | static inline void bpf_prog_put(struct bpf_prog *prog) |
2551 | { | |
2552 | } | |
6d67942d | 2553 | |
85192dbf | 2554 | static inline void bpf_prog_inc(struct bpf_prog *prog) |
aa6a5f3c | 2555 | { |
aa6a5f3c | 2556 | } |
5ccb071e | 2557 | |
a6f6df69 JF |
2558 | static inline struct bpf_prog *__must_check |
2559 | bpf_prog_inc_not_zero(struct bpf_prog *prog) | |
2560 | { | |
2561 | return ERR_PTR(-EOPNOTSUPP); | |
2562 | } | |
2563 | ||
6cc7d1e8 AN |
2564 | static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
2565 | const struct bpf_link_ops *ops, | |
2566 | struct bpf_prog *prog) | |
2567 | { | |
2568 | } | |
2569 | ||
2570 | static inline int bpf_link_prime(struct bpf_link *link, | |
2571 | struct bpf_link_primer *primer) | |
2572 | { | |
2573 | return -EOPNOTSUPP; | |
2574 | } | |
2575 | ||
2576 | static inline int bpf_link_settle(struct bpf_link_primer *primer) | |
2577 | { | |
2578 | return -EOPNOTSUPP; | |
2579 | } | |
2580 | ||
2581 | static inline void bpf_link_cleanup(struct bpf_link_primer *primer) | |
2582 | { | |
2583 | } | |
2584 | ||
2585 | static inline void bpf_link_inc(struct bpf_link *link) | |
2586 | { | |
2587 | } | |
2588 | ||
2589 | static inline void bpf_link_put(struct bpf_link *link) | |
2590 | { | |
2591 | } | |
2592 | ||
6e71b04a | 2593 | static inline int bpf_obj_get_user(const char __user *pathname, int flags) |
98589a09 SL |
2594 | { |
2595 | return -EOPNOTSUPP; | |
2596 | } | |
2597 | ||
1d233886 | 2598 | static inline void __dev_flush(void) |
46f55cff JF |
2599 | { |
2600 | } | |
9c270af3 | 2601 | |
d53ad5d8 | 2602 | struct xdp_frame; |
67f29e07 | 2603 | struct bpf_dtab_netdev; |
e6a4750f | 2604 | struct bpf_cpu_map_entry; |
67f29e07 | 2605 | |
1d233886 | 2606 | static inline |
d53ad5d8 | 2607 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 THJ |
2608 | struct net_device *dev_rx) |
2609 | { | |
2610 | return 0; | |
2611 | } | |
2612 | ||
67f29e07 | 2613 | static inline |
d53ad5d8 | 2614 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2615 | struct net_device *dev_rx) |
67f29e07 JDB |
2616 | { |
2617 | return 0; | |
2618 | } | |
2619 | ||
e624d4ed | 2620 | static inline |
d53ad5d8 | 2621 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed HL |
2622 | struct bpf_map *map, bool exclude_ingress) |
2623 | { | |
2624 | return 0; | |
2625 | } | |
2626 | ||
6d5fc195 TM |
2627 | struct sk_buff; |
2628 | ||
2629 | static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, | |
2630 | struct sk_buff *skb, | |
2631 | struct bpf_prog *xdp_prog) | |
2632 | { | |
2633 | return 0; | |
2634 | } | |
2635 | ||
e624d4ed HL |
2636 | static inline |
2637 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, | |
2638 | struct bpf_prog *xdp_prog, struct bpf_map *map, | |
2639 | bool exclude_ingress) | |
2640 | { | |
2641 | return 0; | |
2642 | } | |
2643 | ||
cdfafe98 | 2644 | static inline void __cpu_map_flush(void) |
9c270af3 JDB |
2645 | { |
2646 | } | |
2647 | ||
9c270af3 | 2648 | static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, |
d53ad5d8 | 2649 | struct xdp_frame *xdpf, |
9c270af3 JDB |
2650 | struct net_device *dev_rx) |
2651 | { | |
2652 | return 0; | |
2653 | } | |
040ee692 | 2654 | |
11941f8a KKD |
2655 | static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2656 | struct sk_buff *skb) | |
2657 | { | |
2658 | return -EOPNOTSUPP; | |
2659 | } | |
2660 | ||
040ee692 AV |
2661 | static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, |
2662 | enum bpf_prog_type type) | |
2663 | { | |
2664 | return ERR_PTR(-EOPNOTSUPP); | |
2665 | } | |
c695865c SF |
2666 | |
2667 | static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, | |
2668 | const union bpf_attr *kattr, | |
2669 | union bpf_attr __user *uattr) | |
2670 | { | |
2671 | return -ENOTSUPP; | |
2672 | } | |
2673 | ||
2674 | static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, | |
2675 | const union bpf_attr *kattr, | |
2676 | union bpf_attr __user *uattr) | |
2677 | { | |
2678 | return -ENOTSUPP; | |
2679 | } | |
2680 | ||
da00d2f1 KS |
2681 | static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2682 | const union bpf_attr *kattr, | |
2683 | union bpf_attr __user *uattr) | |
2684 | { | |
2685 | return -ENOTSUPP; | |
2686 | } | |
2687 | ||
c695865c SF |
2688 | static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2689 | const union bpf_attr *kattr, | |
2690 | union bpf_attr __user *uattr) | |
2691 | { | |
2692 | return -ENOTSUPP; | |
2693 | } | |
6332be04 | 2694 | |
7c32e8f8 LB |
2695 | static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
2696 | const union bpf_attr *kattr, | |
2697 | union bpf_attr __user *uattr) | |
2698 | { | |
2699 | return -ENOTSUPP; | |
2700 | } | |
2701 | ||
6332be04 DB |
2702 | static inline void bpf_map_put(struct bpf_map *map) |
2703 | { | |
2704 | } | |
7e6897f9 BT |
2705 | |
2706 | static inline struct bpf_prog *bpf_prog_by_id(u32 id) | |
2707 | { | |
2708 | return ERR_PTR(-ENOTSUPP); | |
2709 | } | |
6890896b | 2710 | |
d4f7bdb2 | 2711 | static inline int btf_struct_access(struct bpf_verifier_log *log, |
6728aea7 KKD |
2712 | const struct bpf_reg_state *reg, |
2713 | int off, int size, enum bpf_access_type atype, | |
63260df1 AS |
2714 | u32 *next_btf_id, enum bpf_type_flag *flag, |
2715 | const char **field_name) | |
d4f7bdb2 DX |
2716 | { |
2717 | return -EACCES; | |
2718 | } | |
2719 | ||
6890896b | 2720 | static inline const struct bpf_func_proto * |
d17aff80 | 2721 | bpf_base_func_proto(enum bpf_func_id func_id) |
6890896b SF |
2722 | { |
2723 | return NULL; | |
2724 | } | |
a10787e6 SL |
2725 | |
2726 | static inline void bpf_task_storage_free(struct task_struct *task) | |
2727 | { | |
2728 | } | |
e6ac2450 MKL |
2729 | |
2730 | static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) | |
2731 | { | |
2732 | return false; | |
2733 | } | |
2734 | ||
2735 | static inline const struct btf_func_model * | |
2736 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
2737 | const struct bpf_insn *insn) | |
2738 | { | |
2739 | return NULL; | |
2740 | } | |
44a3918c | 2741 | |
1cf3bfc6 IL |
2742 | static inline int |
2743 | bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, | |
2744 | u16 btf_fd_idx, u8 **func_addr) | |
2745 | { | |
2746 | return -ENOTSUPP; | |
2747 | } | |
2748 | ||
44a3918c JP |
2749 | static inline bool unprivileged_ebpf_enabled(void) |
2750 | { | |
2751 | return false; | |
2752 | } | |
2753 | ||
24426654 MKL |
2754 | static inline bool has_current_bpf_ctx(void) |
2755 | { | |
2756 | return false; | |
2757 | } | |
05b24ff9 JO |
2758 | |
2759 | static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) | |
2760 | { | |
2761 | } | |
c4bcfb38 YS |
2762 | |
2763 | static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) | |
2764 | { | |
2765 | } | |
8357b366 JK |
2766 | |
2767 | static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
2768 | enum bpf_dynptr_type type, u32 offset, u32 size) | |
2769 | { | |
2770 | } | |
2771 | ||
2772 | static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) | |
2773 | { | |
2774 | } | |
2775 | ||
2776 | static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) | |
2777 | { | |
2778 | } | |
61e021f3 | 2779 | #endif /* CONFIG_BPF_SYSCALL */ |
09756af4 | 2780 | |
6a5a148a AB |
2781 | static __always_inline int |
2782 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) | |
2783 | { | |
2784 | int ret = -EFAULT; | |
2785 | ||
2786 | if (IS_ENABLED(CONFIG_BPF_EVENTS)) | |
2787 | ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); | |
2788 | if (unlikely(ret < 0)) | |
2789 | memset(dst, 0, size); | |
2790 | return ret; | |
2791 | } | |
2792 | ||
541c3bad AN |
2793 | void __bpf_free_used_btfs(struct bpf_prog_aux *aux, |
2794 | struct btf_mod_pair *used_btfs, u32 len); | |
2795 | ||
479321e9 JK |
2796 | static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, |
2797 | enum bpf_prog_type type) | |
2798 | { | |
2799 | return bpf_prog_get_type_dev(ufd, type, false); | |
2800 | } | |
2801 | ||
936f8946 AN |
2802 | void __bpf_free_used_maps(struct bpf_prog_aux *aux, |
2803 | struct bpf_map **used_maps, u32 len); | |
2804 | ||
040ee692 AV |
2805 | bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); |
2806 | ||
ab3f0063 | 2807 | int bpf_prog_offload_compile(struct bpf_prog *prog); |
2b3486bc | 2808 | void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); |
675fc275 JK |
2809 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
2810 | struct bpf_prog *prog); | |
ab3f0063 | 2811 | |
52775b33 JK |
2812 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); |
2813 | ||
a3884572 JK |
2814 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); |
2815 | int bpf_map_offload_update_elem(struct bpf_map *map, | |
2816 | void *key, void *value, u64 flags); | |
2817 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); | |
2818 | int bpf_map_offload_get_next_key(struct bpf_map *map, | |
2819 | void *key, void *next_key); | |
2820 | ||
09728266 | 2821 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); |
a3884572 | 2822 | |
1385d755 | 2823 | struct bpf_offload_dev * |
dd27c2e3 | 2824 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); |
602144c2 | 2825 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); |
dd27c2e3 | 2826 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); |
602144c2 JK |
2827 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
2828 | struct net_device *netdev); | |
2829 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, | |
2830 | struct net_device *netdev); | |
fd4f227d | 2831 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); |
9fd7c555 | 2832 | |
2147c438 JP |
2833 | void unpriv_ebpf_notify(int new_state); |
2834 | ||
ab3f0063 | 2835 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
3d76a4d3 SF |
2836 | int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
2837 | struct bpf_prog_aux *prog_aux); | |
2838 | void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); | |
2b3486bc | 2839 | int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); |
fd7c211d | 2840 | int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); |
2b3486bc | 2841 | void bpf_dev_bound_netdev_unregister(struct net_device *dev); |
ab3f0063 | 2842 | |
0d830032 | 2843 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) |
2b3486bc SF |
2844 | { |
2845 | return aux->dev_bound; | |
2846 | } | |
ab3f0063 | 2847 | |
9d03ebc7 | 2848 | static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) |
ab3f0063 | 2849 | { |
9a18eedb | 2850 | return aux->offload_requested; |
ab3f0063 | 2851 | } |
a3884572 | 2852 | |
fd7c211d THJ |
2853 | bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); |
2854 | ||
9d03ebc7 | 2855 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
2856 | { |
2857 | return unlikely(map->ops == &bpf_map_offload_ops); | |
2858 | } | |
2859 | ||
2860 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); | |
2861 | void bpf_map_offload_map_free(struct bpf_map *map); | |
9629363c | 2862 | u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map); |
79a7f8bd AS |
2863 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
2864 | const union bpf_attr *kattr, | |
2865 | union bpf_attr __user *uattr); | |
17edea21 CW |
2866 | |
2867 | int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); | |
2868 | int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); | |
2869 | int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); | |
748cd572 DZ |
2870 | int sock_map_bpf_prog_query(const union bpf_attr *attr, |
2871 | union bpf_attr __user *uattr); | |
2872 | ||
17edea21 | 2873 | void sock_map_unhash(struct sock *sk); |
d8616ee2 | 2874 | void sock_map_destroy(struct sock *sk); |
17edea21 | 2875 | void sock_map_close(struct sock *sk, long timeout); |
ab3f0063 | 2876 | #else |
3d76a4d3 SF |
2877 | static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
2878 | struct bpf_prog_aux *prog_aux) | |
2879 | { | |
2880 | return -EOPNOTSUPP; | |
2881 | } | |
2882 | ||
2883 | static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, | |
2884 | u32 func_id) | |
2885 | { | |
2886 | return NULL; | |
2887 | } | |
2888 | ||
2b3486bc | 2889 | static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, |
3d76a4d3 | 2890 | union bpf_attr *attr) |
ab3f0063 JK |
2891 | { |
2892 | return -EOPNOTSUPP; | |
2893 | } | |
2894 | ||
fd7c211d THJ |
2895 | static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, |
2896 | struct bpf_prog *old_prog) | |
2897 | { | |
2898 | return -EOPNOTSUPP; | |
2899 | } | |
2900 | ||
2b3486bc SF |
2901 | static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) |
2902 | { | |
2903 | } | |
2904 | ||
2905 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) | |
2906 | { | |
2907 | return false; | |
2908 | } | |
2909 | ||
9d03ebc7 | 2910 | static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) |
ab3f0063 JK |
2911 | { |
2912 | return false; | |
2913 | } | |
a3884572 | 2914 | |
fd7c211d | 2915 | static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) |
ab3f0063 JK |
2916 | { |
2917 | return false; | |
2918 | } | |
a3884572 | 2919 | |
9d03ebc7 | 2920 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
2921 | { |
2922 | return false; | |
2923 | } | |
2924 | ||
2925 | static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) | |
2926 | { | |
2927 | return ERR_PTR(-EOPNOTSUPP); | |
2928 | } | |
2929 | ||
2930 | static inline void bpf_map_offload_map_free(struct bpf_map *map) | |
2931 | { | |
2932 | } | |
79a7f8bd | 2933 | |
9629363c YS |
2934 | static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) |
2935 | { | |
2936 | return 0; | |
2937 | } | |
2938 | ||
79a7f8bd AS |
2939 | static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
2940 | const union bpf_attr *kattr, | |
2941 | union bpf_attr __user *uattr) | |
2942 | { | |
2943 | return -ENOTSUPP; | |
2944 | } | |
fdb5c453 | 2945 | |
88759609 | 2946 | #ifdef CONFIG_BPF_SYSCALL |
604326b4 DB |
2947 | static inline int sock_map_get_from_fd(const union bpf_attr *attr, |
2948 | struct bpf_prog *prog) | |
fdb5c453 SY |
2949 | { |
2950 | return -EINVAL; | |
2951 | } | |
bb0de313 LB |
2952 | |
2953 | static inline int sock_map_prog_detach(const union bpf_attr *attr, | |
2954 | enum bpf_prog_type ptype) | |
2955 | { | |
2956 | return -EOPNOTSUPP; | |
2957 | } | |
13b79d3f LB |
2958 | |
2959 | static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, | |
2960 | u64 flags) | |
2961 | { | |
2962 | return -EOPNOTSUPP; | |
2963 | } | |
748cd572 DZ |
2964 | |
2965 | static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, | |
2966 | union bpf_attr __user *uattr) | |
2967 | { | |
2968 | return -EINVAL; | |
2969 | } | |
17edea21 CW |
2970 | #endif /* CONFIG_BPF_SYSCALL */ |
2971 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ | |
5dc4c4b7 | 2972 | |
dd865789 JO |
2973 | static __always_inline void |
2974 | bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) | |
2975 | { | |
2976 | const struct bpf_prog_array_item *item; | |
2977 | struct bpf_prog *prog; | |
2978 | ||
2979 | if (unlikely(!array)) | |
2980 | return; | |
2981 | ||
2982 | item = &array->items[0]; | |
2983 | while ((prog = READ_ONCE(item->prog))) { | |
2984 | bpf_prog_inc_misses_counter(prog); | |
2985 | item++; | |
2986 | } | |
2987 | } | |
2988 | ||
17edea21 CW |
2989 | #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) |
2990 | void bpf_sk_reuseport_detach(struct sock *sk); | |
2991 | int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, | |
2992 | void *value); | |
2993 | int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, | |
2994 | void *value, u64 map_flags); | |
2995 | #else | |
2996 | static inline void bpf_sk_reuseport_detach(struct sock *sk) | |
2997 | { | |
2998 | } | |
5dc4c4b7 | 2999 | |
17edea21 | 3000 | #ifdef CONFIG_BPF_SYSCALL |
5dc4c4b7 MKL |
3001 | static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, |
3002 | void *key, void *value) | |
3003 | { | |
3004 | return -EOPNOTSUPP; | |
3005 | } | |
3006 | ||
3007 | static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, | |
3008 | void *key, void *value, | |
3009 | u64 map_flags) | |
3010 | { | |
3011 | return -EOPNOTSUPP; | |
3012 | } | |
3013 | #endif /* CONFIG_BPF_SYSCALL */ | |
3014 | #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ | |
3015 | ||
d0003ec0 | 3016 | /* verifier prototypes for helper functions called from eBPF programs */ |
a2c83fff DB |
3017 | extern const struct bpf_func_proto bpf_map_lookup_elem_proto; |
3018 | extern const struct bpf_func_proto bpf_map_update_elem_proto; | |
3019 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; | |
f1a2e44a MV |
3020 | extern const struct bpf_func_proto bpf_map_push_elem_proto; |
3021 | extern const struct bpf_func_proto bpf_map_pop_elem_proto; | |
3022 | extern const struct bpf_func_proto bpf_map_peek_elem_proto; | |
07343110 | 3023 | extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; |
d0003ec0 | 3024 | |
03e69b50 | 3025 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
c04167ce | 3026 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
2d0e30c3 | 3027 | extern const struct bpf_func_proto bpf_get_numa_node_id_proto; |
04fd61ab | 3028 | extern const struct bpf_func_proto bpf_tail_call_proto; |
17ca8cbf | 3029 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; |
71d19214 | 3030 | extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; |
c8996c98 | 3031 | extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; |
ffeedafb AS |
3032 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; |
3033 | extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; | |
3034 | extern const struct bpf_func_proto bpf_get_current_comm_proto; | |
d5a3b1f6 | 3035 | extern const struct bpf_func_proto bpf_get_stackid_proto; |
c195651e | 3036 | extern const struct bpf_func_proto bpf_get_stack_proto; |
fa28dcb8 | 3037 | extern const struct bpf_func_proto bpf_get_task_stack_proto; |
7b04d6d6 SL |
3038 | extern const struct bpf_func_proto bpf_get_stackid_proto_pe; |
3039 | extern const struct bpf_func_proto bpf_get_stack_proto_pe; | |
174a79ff | 3040 | extern const struct bpf_func_proto bpf_sock_map_update_proto; |
81110384 | 3041 | extern const struct bpf_func_proto bpf_sock_hash_update_proto; |
bf6fa2c8 | 3042 | extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; |
0f09abd1 | 3043 | extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; |
bed89185 | 3044 | extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; |
604326b4 DB |
3045 | extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; |
3046 | extern const struct bpf_func_proto bpf_msg_redirect_map_proto; | |
3047 | extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; | |
3048 | extern const struct bpf_func_proto bpf_sk_redirect_map_proto; | |
d83525ca AS |
3049 | extern const struct bpf_func_proto bpf_spin_lock_proto; |
3050 | extern const struct bpf_func_proto bpf_spin_unlock_proto; | |
cd339431 | 3051 | extern const struct bpf_func_proto bpf_get_local_storage_proto; |
d7a4cb9b AI |
3052 | extern const struct bpf_func_proto bpf_strtol_proto; |
3053 | extern const struct bpf_func_proto bpf_strtoul_proto; | |
0d01da6a | 3054 | extern const struct bpf_func_proto bpf_tcp_sock_proto; |
5576b991 | 3055 | extern const struct bpf_func_proto bpf_jiffies64_proto; |
b4490c5c | 3056 | extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; |
0456ea17 | 3057 | extern const struct bpf_func_proto bpf_event_output_data_proto; |
457f4436 AN |
3058 | extern const struct bpf_func_proto bpf_ringbuf_output_proto; |
3059 | extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; | |
3060 | extern const struct bpf_func_proto bpf_ringbuf_submit_proto; | |
3061 | extern const struct bpf_func_proto bpf_ringbuf_discard_proto; | |
3062 | extern const struct bpf_func_proto bpf_ringbuf_query_proto; | |
bc34dee6 JK |
3063 | extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; |
3064 | extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; | |
3065 | extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; | |
af7ec138 | 3066 | extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; |
478cfbdf YS |
3067 | extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; |
3068 | extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; | |
3069 | extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e | 3070 | extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; |
9eeb3aa3 | 3071 | extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; |
3bc253c2 | 3072 | extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; |
07be4c4a | 3073 | extern const struct bpf_func_proto bpf_copy_from_user_proto; |
c4d0bfb4 | 3074 | extern const struct bpf_func_proto bpf_snprintf_btf_proto; |
7b15523a | 3075 | extern const struct bpf_func_proto bpf_snprintf_proto; |
eaa6bcb7 | 3076 | extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; |
63d9b80d | 3077 | extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; |
d0551261 | 3078 | extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; |
b60da495 | 3079 | extern const struct bpf_func_proto bpf_sock_from_file_proto; |
c5dbb89f | 3080 | extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; |
0593dd34 | 3081 | extern const struct bpf_func_proto bpf_task_storage_get_recur_proto; |
a10787e6 | 3082 | extern const struct bpf_func_proto bpf_task_storage_get_proto; |
0593dd34 | 3083 | extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto; |
a10787e6 | 3084 | extern const struct bpf_func_proto bpf_task_storage_delete_proto; |
69c087ba | 3085 | extern const struct bpf_func_proto bpf_for_each_map_elem_proto; |
3d78417b | 3086 | extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; |
3cee6fb8 MKL |
3087 | extern const struct bpf_func_proto bpf_sk_setsockopt_proto; |
3088 | extern const struct bpf_func_proto bpf_sk_getsockopt_proto; | |
9113d7e4 SF |
3089 | extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; |
3090 | extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; | |
7c7e3d31 | 3091 | extern const struct bpf_func_proto bpf_find_vma_proto; |
e6f2dd0f | 3092 | extern const struct bpf_func_proto bpf_loop_proto; |
376040e4 | 3093 | extern const struct bpf_func_proto bpf_copy_from_user_task_proto; |
69fd337a SF |
3094 | extern const struct bpf_func_proto bpf_set_retval_proto; |
3095 | extern const struct bpf_func_proto bpf_get_retval_proto; | |
20571567 | 3096 | extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; |
c4bcfb38 YS |
3097 | extern const struct bpf_func_proto bpf_cgrp_storage_get_proto; |
3098 | extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto; | |
cd339431 | 3099 | |
958a3f2d JO |
3100 | const struct bpf_func_proto *tracing_prog_func_proto( |
3101 | enum bpf_func_id func_id, const struct bpf_prog *prog); | |
3102 | ||
3ad00405 DB |
3103 | /* Shared helpers among cBPF and eBPF. */ |
3104 | void bpf_user_rnd_init_once(void); | |
3105 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
6890896b | 3106 | u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
3ad00405 | 3107 | |
c64b7983 | 3108 | #if defined(CONFIG_NET) |
46f8bc92 MKL |
3109 | bool bpf_sock_common_is_valid_access(int off, int size, |
3110 | enum bpf_access_type type, | |
3111 | struct bpf_insn_access_aux *info); | |
c64b7983 JS |
3112 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3113 | struct bpf_insn_access_aux *info); | |
3114 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3115 | const struct bpf_insn *si, | |
3116 | struct bpf_insn *insn_buf, | |
3117 | struct bpf_prog *prog, | |
3118 | u32 *target_size); | |
b5964b96 JK |
3119 | int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, |
3120 | struct bpf_dynptr_kern *ptr); | |
c64b7983 | 3121 | #else |
46f8bc92 MKL |
3122 | static inline bool bpf_sock_common_is_valid_access(int off, int size, |
3123 | enum bpf_access_type type, | |
3124 | struct bpf_insn_access_aux *info) | |
3125 | { | |
3126 | return false; | |
3127 | } | |
c64b7983 JS |
3128 | static inline bool bpf_sock_is_valid_access(int off, int size, |
3129 | enum bpf_access_type type, | |
3130 | struct bpf_insn_access_aux *info) | |
3131 | { | |
3132 | return false; | |
3133 | } | |
3134 | static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3135 | const struct bpf_insn *si, | |
3136 | struct bpf_insn *insn_buf, | |
3137 | struct bpf_prog *prog, | |
3138 | u32 *target_size) | |
3139 | { | |
3140 | return 0; | |
3141 | } | |
b5964b96 JK |
3142 | static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, |
3143 | struct bpf_dynptr_kern *ptr) | |
3144 | { | |
3145 | return -EOPNOTSUPP; | |
3146 | } | |
c64b7983 JS |
3147 | #endif |
3148 | ||
655a51e5 | 3149 | #ifdef CONFIG_INET |
91cc1a99 AS |
3150 | struct sk_reuseport_kern { |
3151 | struct sk_buff *skb; | |
3152 | struct sock *sk; | |
3153 | struct sock *selected_sk; | |
d5e4ddae | 3154 | struct sock *migrating_sk; |
91cc1a99 AS |
3155 | void *data_end; |
3156 | u32 hash; | |
3157 | u32 reuseport_id; | |
3158 | bool bind_inany; | |
3159 | }; | |
655a51e5 MKL |
3160 | bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3161 | struct bpf_insn_access_aux *info); | |
3162 | ||
3163 | u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3164 | const struct bpf_insn *si, | |
3165 | struct bpf_insn *insn_buf, | |
3166 | struct bpf_prog *prog, | |
3167 | u32 *target_size); | |
7f94208c Y |
3168 | |
3169 | bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, | |
3170 | struct bpf_insn_access_aux *info); | |
3171 | ||
3172 | u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3173 | const struct bpf_insn *si, | |
3174 | struct bpf_insn *insn_buf, | |
3175 | struct bpf_prog *prog, | |
3176 | u32 *target_size); | |
655a51e5 MKL |
3177 | #else |
3178 | static inline bool bpf_tcp_sock_is_valid_access(int off, int size, | |
3179 | enum bpf_access_type type, | |
3180 | struct bpf_insn_access_aux *info) | |
3181 | { | |
3182 | return false; | |
3183 | } | |
3184 | ||
3185 | static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3186 | const struct bpf_insn *si, | |
3187 | struct bpf_insn *insn_buf, | |
3188 | struct bpf_prog *prog, | |
3189 | u32 *target_size) | |
3190 | { | |
3191 | return 0; | |
3192 | } | |
7f94208c Y |
3193 | static inline bool bpf_xdp_sock_is_valid_access(int off, int size, |
3194 | enum bpf_access_type type, | |
3195 | struct bpf_insn_access_aux *info) | |
3196 | { | |
3197 | return false; | |
3198 | } | |
3199 | ||
3200 | static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3201 | const struct bpf_insn *si, | |
3202 | struct bpf_insn *insn_buf, | |
3203 | struct bpf_prog *prog, | |
3204 | u32 *target_size) | |
3205 | { | |
3206 | return 0; | |
3207 | } | |
655a51e5 MKL |
3208 | #endif /* CONFIG_INET */ |
3209 | ||
5964b200 | 3210 | enum bpf_text_poke_type { |
b553a6ec DB |
3211 | BPF_MOD_CALL, |
3212 | BPF_MOD_JUMP, | |
5964b200 | 3213 | }; |
4b3da77b | 3214 | |
5964b200 AS |
3215 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
3216 | void *addr1, void *addr2); | |
3217 | ||
4b7de801 JO |
3218 | void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
3219 | struct bpf_prog *new, struct bpf_prog *old); | |
3220 | ||
ebc1415d | 3221 | void *bpf_arch_text_copy(void *dst, void *src, size_t len); |
fe736565 | 3222 | int bpf_arch_text_invalidate(void *dst, size_t len); |
ebc1415d | 3223 | |
eae2e83e | 3224 | struct btf_id_set; |
2af30f11 | 3225 | bool btf_id_set_contains(const struct btf_id_set *set, u32 id); |
eae2e83e | 3226 | |
335ff499 | 3227 | #define MAX_BPRINTF_VARARGS 12 |
e2bb9e01 | 3228 | #define MAX_BPRINTF_BUF 1024 |
335ff499 | 3229 | |
78aa1cc9 JO |
3230 | struct bpf_bprintf_data { |
3231 | u32 *bin_args; | |
e2bb9e01 | 3232 | char *buf; |
78aa1cc9 | 3233 | bool get_bin_args; |
e2bb9e01 | 3234 | bool get_buf; |
78aa1cc9 JO |
3235 | }; |
3236 | ||
48cac3f4 | 3237 | int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, |
78aa1cc9 | 3238 | u32 num_args, struct bpf_bprintf_data *data); |
f19a4050 | 3239 | void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); |
d9c9e4db | 3240 | |
c0e19f2c SF |
3241 | #ifdef CONFIG_BPF_LSM |
3242 | void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); | |
3243 | void bpf_cgroup_atype_put(int cgroup_atype); | |
3244 | #else | |
3245 | static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} | |
3246 | static inline void bpf_cgroup_atype_put(int cgroup_atype) {} | |
3247 | #endif /* CONFIG_BPF_LSM */ | |
3248 | ||
f3cf4134 RS |
3249 | struct key; |
3250 | ||
3251 | #ifdef CONFIG_KEYS | |
3252 | struct bpf_key { | |
3253 | struct key *key; | |
3254 | bool has_ref; | |
3255 | }; | |
3256 | #endif /* CONFIG_KEYS */ | |
282de143 KKD |
3257 | |
3258 | static inline bool type_is_alloc(u32 type) | |
3259 | { | |
3260 | return type & MEM_ALLOC; | |
3261 | } | |
3262 | ||
ee53cbfb YS |
3263 | static inline gfp_t bpf_memcg_flags(gfp_t flags) |
3264 | { | |
3265 | if (memcg_bpf_enabled()) | |
3266 | return flags | __GFP_ACCOUNT; | |
3267 | return flags; | |
3268 | } | |
3269 | ||
9af27da6 KKD |
3270 | static inline bool bpf_is_subprog(const struct bpf_prog *prog) |
3271 | { | |
3272 | return prog->aux->func_idx != 0; | |
3273 | } | |
3274 | ||
99c55f7d | 3275 | #endif /* _LINUX_BPF_H */ |