Commit | Line | Data |
---|---|---|
25763b3c | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
58e2af8b | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
58e2af8b JK |
3 | */ |
4 | #ifndef _LINUX_BPF_VERIFIER_H | |
5 | #define _LINUX_BPF_VERIFIER_H 1 | |
6 | ||
7 | #include <linux/bpf.h> /* for enum bpf_reg_type */ | |
8 | #include <linux/filter.h> /* for MAX_BPF_STACK */ | |
f1174f77 | 9 | #include <linux/tnum.h> |
58e2af8b | 10 | |
b03c9f9f EC |
11 | /* Maximum variable offset umax_value permitted when resolving memory accesses. |
12 | * In practice this is far bigger than any realistic pointer offset; this limit | |
13 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. | |
14 | */ | |
bb7f0f98 | 15 | #define BPF_MAX_VAR_OFF (1 << 29) |
b03c9f9f EC |
16 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures |
17 | * that converting umax_value to int cannot overflow. | |
18 | */ | |
bb7f0f98 | 19 | #define BPF_MAX_VAR_SIZ (1 << 29) |
48461135 | 20 | |
8e9cd9ce EC |
21 | /* Liveness marks, used for registers and spilled-regs (in stack slots). |
22 | * Read marks propagate upwards until they find a write mark; they record that | |
23 | * "one of this state's descendants read this reg" (and therefore the reg is | |
24 | * relevant for states_equal() checks). | |
25 | * Write marks collect downwards and do not propagate; they record that "the | |
26 | * straight-line code that reached this state (from its parent) wrote this reg" | |
27 | * (and therefore that reads propagated from this state or its descendants | |
28 | * should not propagate to its parent). | |
29 | * A state with a write mark can receive read marks; it just won't propagate | |
30 | * them to its parent, since the write mark is a property, not of the state, | |
31 | * but of the link between it and its parent. See mark_reg_read() and | |
32 | * mark_stack_slot_read() in kernel/bpf/verifier.c. | |
33 | */ | |
dc503a8a EC |
34 | enum bpf_reg_liveness { |
35 | REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ | |
5327ed3d JW |
36 | REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ |
37 | REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ | |
38 | REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, | |
39 | REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ | |
40 | REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ | |
dc503a8a EC |
41 | }; |
42 | ||
58e2af8b | 43 | struct bpf_reg_state { |
679c782d | 44 | /* Ordering of fields matters. See states_equal() */ |
58e2af8b JK |
45 | enum bpf_reg_type type; |
46 | union { | |
f1174f77 EC |
47 | /* valid when type == PTR_TO_PACKET */ |
48 | u16 range; | |
58e2af8b JK |
49 | |
50 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | | |
51 | * PTR_TO_MAP_VALUE_OR_NULL | |
52 | */ | |
53 | struct bpf_map *map_ptr; | |
0962590e | 54 | |
9e15db66 AS |
55 | u32 btf_id; /* for PTR_TO_BTF_ID */ |
56 | ||
0962590e DB |
57 | /* Max size from any of the above. */ |
58 | unsigned long raw; | |
58e2af8b | 59 | }; |
f1174f77 EC |
60 | /* Fixed part of pointer offset, pointer types only */ |
61 | s32 off; | |
62 | /* For PTR_TO_PACKET, used to find other pointers with the same variable | |
63 | * offset, so they can share range knowledge. | |
64 | * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we | |
65 | * came from, when one is tested for != NULL. | |
c64b7983 JS |
66 | * For PTR_TO_SOCKET this is used to share which pointers retain the |
67 | * same reference to the socket, to determine proper reference freeing. | |
f1174f77 | 68 | */ |
d2a4dd37 | 69 | u32 id; |
1b986589 MKL |
70 | /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned |
71 | * from a pointer-cast helper, bpf_sk_fullsock() and | |
72 | * bpf_tcp_sock(). | |
73 | * | |
74 | * Consider the following where "sk" is a reference counted | |
75 | * pointer returned from "sk = bpf_sk_lookup_tcp();": | |
76 | * | |
77 | * 1: sk = bpf_sk_lookup_tcp(); | |
78 | * 2: if (!sk) { return 0; } | |
79 | * 3: fullsock = bpf_sk_fullsock(sk); | |
80 | * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } | |
81 | * 5: tp = bpf_tcp_sock(fullsock); | |
82 | * 6: if (!tp) { bpf_sk_release(sk); return 0; } | |
83 | * 7: bpf_sk_release(sk); | |
84 | * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain | |
85 | * | |
86 | * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and | |
87 | * "tp" ptr should be invalidated also. In order to do that, | |
88 | * the reg holding "fullsock" and "sk" need to remember | |
89 | * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id | |
90 | * such that the verifier can reset all regs which have | |
91 | * ref_obj_id matching the sk_reg->id. | |
92 | * | |
93 | * sk_reg->ref_obj_id is set to sk_reg->id at line 1. | |
94 | * sk_reg->id will stay as NULL-marking purpose only. | |
95 | * After NULL-marking is done, sk_reg->id can be reset to 0. | |
96 | * | |
97 | * After "fullsock = bpf_sk_fullsock(sk);" at line 3, | |
98 | * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. | |
99 | * | |
100 | * After "tp = bpf_tcp_sock(fullsock);" at line 5, | |
101 | * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id | |
102 | * which is the same as sk_reg->ref_obj_id. | |
103 | * | |
104 | * From the verifier perspective, if sk, fullsock and tp | |
105 | * are not NULL, they are the same ptr with different | |
106 | * reg->type. In particular, bpf_sk_release(tp) is also | |
107 | * allowed and has the same effect as bpf_sk_release(sk). | |
108 | */ | |
109 | u32 ref_obj_id; | |
f1174f77 EC |
110 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
111 | * the actual value. | |
112 | * For pointer types, this represents the variable part of the offset | |
113 | * from the pointed-to object, and is shared with all bpf_reg_states | |
114 | * with the same id as us. | |
115 | */ | |
116 | struct tnum var_off; | |
d2a4dd37 | 117 | /* Used to determine if any memory access using this register will |
f1174f77 EC |
118 | * result in a bad access. |
119 | * These refer to the same value as var_off, not necessarily the actual | |
120 | * contents of the register. | |
d2a4dd37 | 121 | */ |
b03c9f9f EC |
122 | s64 smin_value; /* minimum possible (s64)value */ |
123 | s64 smax_value; /* maximum possible (s64)value */ | |
124 | u64 umin_value; /* minimum possible (u64)value */ | |
125 | u64 umax_value; /* maximum possible (u64)value */ | |
679c782d EC |
126 | /* parentage chain for liveness checking */ |
127 | struct bpf_reg_state *parent; | |
f4d7e40a AS |
128 | /* Inside the callee two registers can be both PTR_TO_STACK like |
129 | * R1=fp-8 and R2=fp-8, but one of them points to this function stack | |
130 | * while another to the caller's stack. To differentiate them 'frameno' | |
131 | * is used which is an index in bpf_verifier_state->frame[] array | |
132 | * pointing to bpf_func_state. | |
f4d7e40a AS |
133 | */ |
134 | u32 frameno; | |
5327ed3d JW |
135 | /* Tracks subreg definition. The stored value is the insn_idx of the |
136 | * writing insn. This is safe because subreg_def is used before any insn | |
137 | * patching which only happens after main verification finished. | |
138 | */ | |
139 | s32 subreg_def; | |
dc503a8a | 140 | enum bpf_reg_liveness live; |
b5dc0163 AS |
141 | /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ |
142 | bool precise; | |
58e2af8b JK |
143 | }; |
144 | ||
145 | enum bpf_stack_slot_type { | |
146 | STACK_INVALID, /* nothing was stored in this stack slot */ | |
147 | STACK_SPILL, /* register spilled into stack */ | |
cc2b14d5 AS |
148 | STACK_MISC, /* BPF program wrote some data into this slot */ |
149 | STACK_ZERO, /* BPF program wrote constant zero */ | |
58e2af8b JK |
150 | }; |
151 | ||
152 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | |
153 | ||
638f5b90 AS |
154 | struct bpf_stack_state { |
155 | struct bpf_reg_state spilled_ptr; | |
156 | u8 slot_type[BPF_REG_SIZE]; | |
157 | }; | |
158 | ||
fd978bf7 JS |
159 | struct bpf_reference_state { |
160 | /* Track each reference created with a unique id, even if the same | |
161 | * instruction creates the reference multiple times (eg, via CALL). | |
162 | */ | |
163 | int id; | |
164 | /* Instruction where the allocation of this reference occurred. This | |
165 | * is used purely to inform the user of a reference leak. | |
166 | */ | |
167 | int insn_idx; | |
168 | }; | |
169 | ||
58e2af8b JK |
170 | /* state of the program: |
171 | * type of all registers and stack info | |
172 | */ | |
f4d7e40a | 173 | struct bpf_func_state { |
58e2af8b | 174 | struct bpf_reg_state regs[MAX_BPF_REG]; |
f4d7e40a AS |
175 | /* index of call instruction that called into this func */ |
176 | int callsite; | |
177 | /* stack frame number of this function state from pov of | |
178 | * enclosing bpf_verifier_state. | |
179 | * 0 = main function, 1 = first callee. | |
180 | */ | |
181 | u32 frameno; | |
182 | /* subprog number == index within subprog_stack_depth | |
183 | * zero == main subprog | |
184 | */ | |
185 | u32 subprogno; | |
186 | ||
fd978bf7 JS |
187 | /* The following fields should be last. See copy_func_state() */ |
188 | int acquired_refs; | |
189 | struct bpf_reference_state *refs; | |
638f5b90 AS |
190 | int allocated_stack; |
191 | struct bpf_stack_state *stack; | |
58e2af8b JK |
192 | }; |
193 | ||
b5dc0163 AS |
194 | struct bpf_idx_pair { |
195 | u32 prev_idx; | |
196 | u32 idx; | |
197 | }; | |
198 | ||
f4d7e40a AS |
199 | #define MAX_CALL_FRAMES 8 |
200 | struct bpf_verifier_state { | |
201 | /* call stack tracking */ | |
202 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | |
2589726d AS |
203 | struct bpf_verifier_state *parent; |
204 | /* | |
205 | * 'branches' field is the number of branches left to explore: | |
206 | * 0 - all possible paths from this state reached bpf_exit or | |
207 | * were safely pruned | |
208 | * 1 - at least one path is being explored. | |
209 | * This state hasn't reached bpf_exit | |
210 | * 2 - at least two paths are being explored. | |
211 | * This state is an immediate parent of two children. | |
212 | * One is fallthrough branch with branches==1 and another | |
213 | * state is pushed into stack (to be explored later) also with | |
214 | * branches==1. The parent of this state has branches==1. | |
215 | * The verifier state tree connected via 'parent' pointer looks like: | |
216 | * 1 | |
217 | * 1 | |
218 | * 2 -> 1 (first 'if' pushed into stack) | |
219 | * 1 | |
220 | * 2 -> 1 (second 'if' pushed into stack) | |
221 | * 1 | |
222 | * 1 | |
223 | * 1 bpf_exit. | |
224 | * | |
225 | * Once do_check() reaches bpf_exit, it calls update_branch_counts() | |
226 | * and the verifier state tree will look: | |
227 | * 1 | |
228 | * 1 | |
229 | * 2 -> 1 (first 'if' pushed into stack) | |
230 | * 1 | |
231 | * 1 -> 1 (second 'if' pushed into stack) | |
232 | * 0 | |
233 | * 0 | |
234 | * 0 bpf_exit. | |
235 | * After pop_stack() the do_check() will resume at second 'if'. | |
236 | * | |
237 | * If is_state_visited() sees a state with branches > 0 it means | |
238 | * there is a loop. If such state is exactly equal to the current state | |
239 | * it's an infinite loop. Note states_equal() checks for states | |
240 | * equvalency, so two states being 'states_equal' does not mean | |
241 | * infinite loop. The exact comparison is provided by | |
242 | * states_maybe_looping() function. It's a stronger pre-check and | |
243 | * much faster than states_equal(). | |
244 | * | |
245 | * This algorithm may not find all possible infinite loops or | |
246 | * loop iteration count may be too high. | |
247 | * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. | |
248 | */ | |
249 | u32 branches; | |
dc2a4ebc | 250 | u32 insn_idx; |
f4d7e40a | 251 | u32 curframe; |
d83525ca | 252 | u32 active_spin_lock; |
979d63d5 | 253 | bool speculative; |
b5dc0163 AS |
254 | |
255 | /* first and last insn idx of this verifier state */ | |
256 | u32 first_insn_idx; | |
257 | u32 last_insn_idx; | |
258 | /* jmp history recorded from first to last. | |
259 | * backtracking is using it to go from last to first. | |
260 | * For most states jmp_history_cnt is [0-3]. | |
261 | * For loops can go up to ~40. | |
262 | */ | |
263 | struct bpf_idx_pair *jmp_history; | |
264 | u32 jmp_history_cnt; | |
f4d7e40a AS |
265 | }; |
266 | ||
f3709f69 JS |
267 | #define bpf_get_spilled_reg(slot, frame) \ |
268 | (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ | |
269 | (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ | |
270 | ? &frame->stack[slot].spilled_ptr : NULL) | |
271 | ||
272 | /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ | |
273 | #define bpf_for_each_spilled_reg(iter, frame, reg) \ | |
274 | for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ | |
275 | iter < frame->allocated_stack / BPF_REG_SIZE; \ | |
276 | iter++, reg = bpf_get_spilled_reg(iter, frame)) | |
277 | ||
58e2af8b JK |
278 | /* linked list of verifier states used to prune search */ |
279 | struct bpf_verifier_state_list { | |
280 | struct bpf_verifier_state state; | |
281 | struct bpf_verifier_state_list *next; | |
9f4686c4 | 282 | int miss_cnt, hit_cnt; |
58e2af8b JK |
283 | }; |
284 | ||
979d63d5 DB |
285 | /* Possible states for alu_state member. */ |
286 | #define BPF_ALU_SANITIZE_SRC 1U | |
287 | #define BPF_ALU_SANITIZE_DST 2U | |
288 | #define BPF_ALU_NEG_VALUE (1U << 2) | |
d3bd7413 | 289 | #define BPF_ALU_NON_POINTER (1U << 3) |
979d63d5 DB |
290 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
291 | BPF_ALU_SANITIZE_DST) | |
292 | ||
58e2af8b | 293 | struct bpf_insn_aux_data { |
81ed18ab AS |
294 | union { |
295 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | |
c93552c4 | 296 | unsigned long map_state; /* pointer/poison value for maps */ |
1c2a088a | 297 | s32 call_imm; /* saved imm field of call insn */ |
979d63d5 | 298 | u32 alu_limit; /* limit for add/sub register with pointer */ |
d8eca5bb DB |
299 | struct { |
300 | u32 map_index; /* index into used_maps[] */ | |
301 | u32 map_off; /* offset from value base address */ | |
302 | }; | |
81ed18ab | 303 | }; |
23994631 | 304 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
af86ca4e | 305 | int sanitize_stack_off; /* stack slot to be cleared */ |
c131187d | 306 | bool seen; /* this insn was processed by the verifier */ |
5327ed3d | 307 | bool zext_dst; /* this insn zero extends dst reg */ |
979d63d5 | 308 | u8 alu_state; /* used in combination with alu_limit */ |
a8f500af | 309 | bool prune_point; |
9e4c24e7 | 310 | unsigned int orig_idx; /* original instruction index */ |
58e2af8b JK |
311 | }; |
312 | ||
313 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | |
314 | ||
a2a7d570 JK |
315 | #define BPF_VERIFIER_TMP_LOG_SIZE 1024 |
316 | ||
b9193c1b | 317 | struct bpf_verifier_log { |
e7bf8249 | 318 | u32 level; |
a2a7d570 | 319 | char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; |
e7bf8249 JK |
320 | char __user *ubuf; |
321 | u32 len_used; | |
322 | u32 len_total; | |
323 | }; | |
324 | ||
b9193c1b | 325 | static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) |
e7bf8249 JK |
326 | { |
327 | return log->len_used >= log->len_total - 1; | |
328 | } | |
329 | ||
06ee7115 AS |
330 | #define BPF_LOG_LEVEL1 1 |
331 | #define BPF_LOG_LEVEL2 2 | |
332 | #define BPF_LOG_STATS 4 | |
333 | #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) | |
334 | #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) | |
8580ac94 | 335 | #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ |
06ee7115 | 336 | |
77d2e05a MKL |
337 | static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) |
338 | { | |
8580ac94 AS |
339 | return (log->level && log->ubuf && !bpf_verifier_log_full(log)) || |
340 | log->level == BPF_LOG_KERNEL; | |
77d2e05a MKL |
341 | } |
342 | ||
cc8b0b92 AS |
343 | #define BPF_MAX_SUBPROGS 256 |
344 | ||
9c8105bd | 345 | struct bpf_subprog_info { |
8c1b6e69 | 346 | /* 'start' has to be the first field otherwise find_subprog() won't work */ |
9c8105bd | 347 | u32 start; /* insn idx of function entry point */ |
c454a46b | 348 | u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ |
9c8105bd JW |
349 | u16 stack_depth; /* max. stack depth used by this function */ |
350 | }; | |
351 | ||
58e2af8b JK |
352 | /* single container for all structs |
353 | * one verifier_env per bpf_check() call | |
354 | */ | |
355 | struct bpf_verifier_env { | |
c08435ec DB |
356 | u32 insn_idx; |
357 | u32 prev_insn_idx; | |
58e2af8b | 358 | struct bpf_prog *prog; /* eBPF program being verified */ |
00176a34 | 359 | const struct bpf_verifier_ops *ops; |
58e2af8b JK |
360 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
361 | int stack_size; /* number of states to be processed */ | |
e07b98d9 | 362 | bool strict_alignment; /* perform strict pointer alignment checks */ |
10d274e8 | 363 | bool test_state_freq; /* test verifier with different pruning frequency */ |
638f5b90 | 364 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
58e2af8b | 365 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
9f4686c4 | 366 | struct bpf_verifier_state_list *free_list; |
58e2af8b JK |
367 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
368 | u32 used_map_cnt; /* number of used maps */ | |
369 | u32 id_gen; /* used to generate unique reg IDs */ | |
370 | bool allow_ptr_leaks; | |
371 | bool seen_direct_write; | |
372 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | |
d9762e84 | 373 | const struct bpf_line_info *prev_linfo; |
b9193c1b | 374 | struct bpf_verifier_log log; |
9c8105bd | 375 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; |
7df737e9 AS |
376 | struct { |
377 | int *insn_state; | |
378 | int *insn_stack; | |
379 | int cur_stack; | |
380 | } cfg; | |
cc8b0b92 | 381 | u32 subprog_cnt; |
06ee7115 | 382 | /* number of instructions analyzed by the verifier */ |
2589726d AS |
383 | u32 prev_insn_processed, insn_processed; |
384 | /* number of jmps, calls, exits analyzed so far */ | |
385 | u32 prev_jmps_processed, jmps_processed; | |
06ee7115 AS |
386 | /* total verification time */ |
387 | u64 verification_time; | |
388 | /* maximum number of verifier states kept in 'branching' instructions */ | |
389 | u32 max_states_per_insn; | |
390 | /* total number of allocated verifier states */ | |
391 | u32 total_states; | |
392 | /* some states are freed during program analysis. | |
393 | * this is peak number of states. this number dominates kernel | |
394 | * memory consumption during verification | |
395 | */ | |
396 | u32 peak_states; | |
397 | /* longest register parentage chain walked for liveness marking */ | |
398 | u32 longest_mark_read_walk; | |
58e2af8b JK |
399 | }; |
400 | ||
be2d04d1 MM |
401 | __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, |
402 | const char *fmt, va_list args); | |
430e68d1 QM |
403 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, |
404 | const char *fmt, ...); | |
9e15db66 AS |
405 | __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, |
406 | const char *fmt, ...); | |
430e68d1 | 407 | |
fd978bf7 | 408 | static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) |
638f5b90 | 409 | { |
f4d7e40a AS |
410 | struct bpf_verifier_state *cur = env->cur_state; |
411 | ||
fd978bf7 JS |
412 | return cur->frame[cur->curframe]; |
413 | } | |
414 | ||
415 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) | |
416 | { | |
417 | return cur_func(env)->regs; | |
638f5b90 AS |
418 | } |
419 | ||
a40a2632 | 420 | int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); |
cae1927c JK |
421 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
422 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 423 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
08ca90af JK |
424 | void |
425 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, | |
426 | struct bpf_insn *insn); | |
427 | void | |
428 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); | |
ab3f0063 | 429 | |
58e2af8b | 430 | #endif /* _LINUX_BPF_VERIFIER_H */ |