Linux 6.16-rc6
[linux-block.git] / include / linux / bpf_verifier.h
CommitLineData
25763b3c 1/* SPDX-License-Identifier: GPL-2.0-only */
58e2af8b 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
58e2af8b
JK
3 */
4#ifndef _LINUX_BPF_VERIFIER_H
5#define _LINUX_BPF_VERIFIER_H 1
6
7#include <linux/bpf.h> /* for enum bpf_reg_type */
22dc4a0f 8#include <linux/btf.h> /* for struct btf and btf_id() */
58e2af8b 9#include <linux/filter.h> /* for MAX_BPF_STACK */
f1174f77 10#include <linux/tnum.h>
58e2af8b 11
b03c9f9f
EC
12/* Maximum variable offset umax_value permitted when resolving memory accesses.
13 * In practice this is far bigger than any realistic pointer offset; this limit
14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15 */
bb7f0f98 16#define BPF_MAX_VAR_OFF (1 << 29)
b03c9f9f
EC
17/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18 * that converting umax_value to int cannot overflow.
19 */
bb7f0f98 20#define BPF_MAX_VAR_SIZ (1 << 29)
d9439c21
AN
21/* size of tmp_str_buf in bpf_verifier.
22 * we need at least 306 bytes to fit full stack mask representation
23 * (in the "-8,-16,...,-512" form)
24 */
25#define TMP_STR_BUF_LEN 320
6f606ffd 26/* Patch buffer size */
940ce73b 27#define INSN_BUF_SIZE 32
48461135 28
8e9cd9ce
EC
29/* Liveness marks, used for registers and spilled-regs (in stack slots).
30 * Read marks propagate upwards until they find a write mark; they record that
31 * "one of this state's descendants read this reg" (and therefore the reg is
32 * relevant for states_equal() checks).
33 * Write marks collect downwards and do not propagate; they record that "the
34 * straight-line code that reached this state (from its parent) wrote this reg"
35 * (and therefore that reads propagated from this state or its descendants
36 * should not propagate to its parent).
37 * A state with a write mark can receive read marks; it just won't propagate
38 * them to its parent, since the write mark is a property, not of the state,
39 * but of the link between it and its parent. See mark_reg_read() and
40 * mark_stack_slot_read() in kernel/bpf/verifier.c.
41 */
dc503a8a
EC
42enum bpf_reg_liveness {
43 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
5327ed3d
JW
44 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
45 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
46 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
47 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
48 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
dc503a8a
EC
49};
50
215bf496
AN
51#define ITER_PREFIX "bpf_iter_"
52
06accc87
AN
53enum bpf_iter_state {
54 BPF_ITER_STATE_INVALID, /* for non-first slot */
55 BPF_ITER_STATE_ACTIVE,
56 BPF_ITER_STATE_DRAINED,
57};
58
58e2af8b 59struct bpf_reg_state {
679c782d 60 /* Ordering of fields matters. See states_equal() */
58e2af8b 61 enum bpf_reg_type type;
98d7ca37
AS
62 /*
63 * Fixed part of pointer offset, pointer types only.
64 * Or constant delta between "linked" scalars with the same ID.
65 */
22dc4a0f 66 s32 off;
58e2af8b 67 union {
f1174f77 68 /* valid when type == PTR_TO_PACKET */
6d94e741 69 int range;
58e2af8b
JK
70
71 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
72 * PTR_TO_MAP_VALUE_OR_NULL
73 */
3e8ce298
AS
74 struct {
75 struct bpf_map *map_ptr;
76 /* To distinguish map lookups from outer map
77 * the map_uid is non-zero for registers
78 * pointing to inner maps.
79 */
80 u32 map_uid;
81 };
0962590e 82
22dc4a0f
AN
83 /* for PTR_TO_BTF_ID */
84 struct {
85 struct btf *btf;
86 u32 btf_id;
87 };
9e15db66 88
f8064ab9
KKD
89 struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
90 u32 mem_size;
91 u32 dynptr_id; /* for dynptr slices */
92 };
457f4436 93
97e03f52
JK
94 /* For dynptr stack slots */
95 struct {
96 enum bpf_dynptr_type type;
97 /* A dynptr is 16 bytes so it takes up 2 stack slots.
98 * We need to track which slot is the first slot
99 * to protect against cases where the user may try to
100 * pass in an address starting at the second slot of the
101 * dynptr.
102 */
103 bool first_slot;
104 } dynptr;
105
06accc87
AN
106 /* For bpf_iter stack slots */
107 struct {
108 /* BTF container and BTF type ID describing
109 * struct bpf_iter_<type> of an iterator state
110 */
111 struct btf *btf;
112 u32 btf_id;
113 /* packing following two fields to fit iter state into 16 bytes */
114 enum bpf_iter_state state:2;
115 int depth:30;
116 } iter;
117
0de20461
KKD
118 /* For irq stack slots */
119 struct {
120 enum {
121 IRQ_NATIVE_KFUNC,
122 IRQ_LOCK_KFUNC,
123 } kfunc_class;
124 } irq;
125
0962590e 126 /* Max size from any of the above. */
22dc4a0f
AN
127 struct {
128 unsigned long raw1;
129 unsigned long raw2;
130 } raw;
69c087ba
YS
131
132 u32 subprogno; /* for PTR_TO_FUNC */
58e2af8b 133 };
a73bf9f2
AN
134 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
135 * the actual value.
136 * For pointer types, this represents the variable part of the offset
137 * from the pointed-to object, and is shared with all bpf_reg_states
138 * with the same id as us.
139 */
140 struct tnum var_off;
141 /* Used to determine if any memory access using this register will
142 * result in a bad access.
143 * These refer to the same value as var_off, not necessarily the actual
144 * contents of the register.
145 */
146 s64 smin_value; /* minimum possible (s64)value */
147 s64 smax_value; /* maximum possible (s64)value */
148 u64 umin_value; /* minimum possible (u64)value */
149 u64 umax_value; /* maximum possible (u64)value */
150 s32 s32_min_value; /* minimum possible (s32)value */
151 s32 s32_max_value; /* maximum possible (s32)value */
152 u32 u32_min_value; /* minimum possible (u32)value */
153 u32 u32_max_value; /* maximum possible (u32)value */
f1174f77
EC
154 /* For PTR_TO_PACKET, used to find other pointers with the same variable
155 * offset, so they can share range knowledge.
156 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
157 * came from, when one is tested for != NULL.
457f4436
AN
158 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
159 * for the purpose of tracking that it's freed.
c64b7983
JS
160 * For PTR_TO_SOCKET this is used to share which pointers retain the
161 * same reference to the socket, to determine proper reference freeing.
bc34dee6
JK
162 * For stack slots that are dynptrs, this is used to track references to
163 * the dynptr to determine proper reference freeing.
06accc87
AN
164 * Similarly to dynptrs, we use ID to track "belonging" of a reference
165 * to a specific instance of bpf_iter.
f1174f77 166 */
98d7ca37
AS
167 /*
168 * Upper bit of ID is used to remember relationship between "linked"
169 * registers. Example:
170 * r1 = r2; both will have r1->id == r2->id == N
171 * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
172 */
173#define BPF_ADD_CONST (1U << 31)
d2a4dd37 174 u32 id;
1b986589
MKL
175 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
176 * from a pointer-cast helper, bpf_sk_fullsock() and
177 * bpf_tcp_sock().
178 *
179 * Consider the following where "sk" is a reference counted
180 * pointer returned from "sk = bpf_sk_lookup_tcp();":
181 *
182 * 1: sk = bpf_sk_lookup_tcp();
183 * 2: if (!sk) { return 0; }
184 * 3: fullsock = bpf_sk_fullsock(sk);
185 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
186 * 5: tp = bpf_tcp_sock(fullsock);
187 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
188 * 7: bpf_sk_release(sk);
189 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
190 *
191 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
192 * "tp" ptr should be invalidated also. In order to do that,
193 * the reg holding "fullsock" and "sk" need to remember
194 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
195 * such that the verifier can reset all regs which have
196 * ref_obj_id matching the sk_reg->id.
197 *
198 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
199 * sk_reg->id will stay as NULL-marking purpose only.
200 * After NULL-marking is done, sk_reg->id can be reset to 0.
201 *
202 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
203 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
204 *
205 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
206 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
207 * which is the same as sk_reg->ref_obj_id.
208 *
209 * From the verifier perspective, if sk, fullsock and tp
210 * are not NULL, they are the same ptr with different
211 * reg->type. In particular, bpf_sk_release(tp) is also
212 * allowed and has the same effect as bpf_sk_release(sk).
213 */
214 u32 ref_obj_id;
679c782d
EC
215 /* parentage chain for liveness checking */
216 struct bpf_reg_state *parent;
f4d7e40a
AS
217 /* Inside the callee two registers can be both PTR_TO_STACK like
218 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
219 * while another to the caller's stack. To differentiate them 'frameno'
220 * is used which is an index in bpf_verifier_state->frame[] array
221 * pointing to bpf_func_state.
f4d7e40a
AS
222 */
223 u32 frameno;
5327ed3d
JW
224 /* Tracks subreg definition. The stored value is the insn_idx of the
225 * writing insn. This is safe because subreg_def is used before any insn
226 * patching which only happens after main verification finished.
227 */
228 s32 subreg_def;
dc503a8a 229 enum bpf_reg_liveness live;
b5dc0163
AS
230 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
231 bool precise;
58e2af8b
JK
232};
233
234enum bpf_stack_slot_type {
235 STACK_INVALID, /* nothing was stored in this stack slot */
236 STACK_SPILL, /* register spilled into stack */
cc2b14d5
AS
237 STACK_MISC, /* BPF program wrote some data into this slot */
238 STACK_ZERO, /* BPF program wrote constant zero */
97e03f52
JK
239 /* A dynptr is stored in this stack slot. The type of dynptr
240 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
241 */
242 STACK_DYNPTR,
06accc87 243 STACK_ITER,
c8e2ee1f 244 STACK_IRQ_FLAG,
58e2af8b
JK
245};
246
247#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
06accc87 248
407958a0
AN
249#define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
250 (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
251 (1 << BPF_REG_5))
252
97e03f52
JK
253#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
254#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
58e2af8b 255
638f5b90
AS
256struct bpf_stack_state {
257 struct bpf_reg_state spilled_ptr;
258 u8 slot_type[BPF_REG_SIZE];
259};
260
fd978bf7 261struct bpf_reference_state {
f6b9a69a
KKD
262 /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
263 * default to pointer reference on zero initialization of a state.
264 */
265 enum ref_state_type {
0de20461
KKD
266 REF_TYPE_PTR = (1 << 1),
267 REF_TYPE_IRQ = (1 << 2),
268 REF_TYPE_LOCK = (1 << 3),
269 REF_TYPE_RES_LOCK = (1 << 4),
270 REF_TYPE_RES_LOCK_IRQ = (1 << 5),
ea21771c 271 REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
f6b9a69a 272 } type;
fd978bf7
JS
273 /* Track each reference created with a unique id, even if the same
274 * instruction creates the reference multiple times (eg, via CALL).
275 */
276 int id;
277 /* Instruction where the allocation of this reference occurred. This
278 * is used purely to inform the user of a reference leak.
279 */
280 int insn_idx;
ae6e3a27
KKD
281 /* Use to keep track of the source object of a lock, to ensure
282 * it matches on unlock.
283 */
284 void *ptr;
fd978bf7
JS
285};
286
8fa4ecd4
AN
287struct bpf_retval_range {
288 s32 minval;
289 s32 maxval;
290};
291
58e2af8b
JK
292/* state of the program:
293 * type of all registers and stack info
294 */
f4d7e40a 295struct bpf_func_state {
58e2af8b 296 struct bpf_reg_state regs[MAX_BPF_REG];
f4d7e40a
AS
297 /* index of call instruction that called into this func */
298 int callsite;
299 /* stack frame number of this function state from pov of
300 * enclosing bpf_verifier_state.
301 * 0 = main function, 1 = first callee.
302 */
303 u32 frameno;
01f810ac 304 /* subprog number == index within subprog_info
f4d7e40a
AS
305 * zero == main subprog
306 */
307 u32 subprogno;
bfc6bb74
AS
308 /* Every bpf_timer_start will increment async_entry_cnt.
309 * It's used to distinguish:
310 * void foo(void) { for(;;); }
311 * void foo(void) { bpf_timer_set_callback(,foo); }
312 */
313 u32 async_entry_cnt;
8fa4ecd4 314 struct bpf_retval_range callback_ret_range;
45b5623f 315 bool in_callback_fn;
bfc6bb74 316 bool in_async_callback_fn;
b9ae0c9d 317 bool in_exception_callback_fn;
bb124da6
EZ
318 /* For callback calling functions that limit number of possible
319 * callback executions (e.g. bpf_loop) keeps track of current
320 * simulated iteration number.
321 * Value in frame N refers to number of times callback with frame
322 * N+1 was simulated, e.g. for the following call:
323 *
324 * bpf_loop(..., fn, ...); | suppose current frame is N
325 * | fn would be simulated in frame N+1
326 * | number of simulations is tracked in frame N
327 */
328 u32 callback_depth;
f4d7e40a 329
fd978bf7 330 /* The following fields should be last. See copy_func_state() */
92e1567e
AM
331 /* The state of the stack. Each element of the array describes BPF_REG_SIZE
332 * (i.e. 8) bytes worth of stack memory.
333 * stack[0] represents bytes [*(r10-8)..*(r10-1)]
334 * stack[1] represents bytes [*(r10-16)..*(r10-9)]
335 * ...
336 * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
337 */
638f5b90 338 struct bpf_stack_state *stack;
92e1567e
AM
339 /* Size of the current stack, in bytes. The stack state is tracked below, in
340 * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
341 */
45b5623f 342 int allocated_stack;
58e2af8b
JK
343};
344
41f6f64e
AN
345#define MAX_CALL_FRAMES 8
346
96a30e46 347/* instruction history flags, used in bpf_insn_hist_entry.flags field */
41f6f64e
AN
348enum {
349 /* instruction references stack slot through PTR_TO_STACK register;
350 * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
351 * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
352 * 8 bytes per slot, so slot index (spi) is [0, 63])
353 */
354 INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
355
356 INSN_F_SPI_MASK = 0x3f, /* 6 bits */
357 INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
358
e2d2115e
YS
359 INSN_F_STACK_ACCESS = BIT(9),
360
361 INSN_F_DST_REG_STACK = BIT(10), /* dst_reg is PTR_TO_STACK */
362 INSN_F_SRC_REG_STACK = BIT(11), /* src_reg is PTR_TO_STACK */
363 /* total 12 bits are used now. */
41f6f64e
AN
364};
365
366static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
367static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
368
96a30e46 369struct bpf_insn_hist_entry {
b5dc0163 370 u32 idx;
41f6f64e 371 /* insn idx can't be bigger than 1 million */
e2d2115e
YS
372 u32 prev_idx : 20;
373 /* special INSN_F_xxx flags */
374 u32 flags : 12;
4bf79f9b
EZ
375 /* additional registers that need precision tracking when this
376 * jump is backtracked, vector of six 10-bit records
377 */
378 u64 linked_regs;
b5dc0163
AS
379};
380
5dd9cdbc
EZ
381/* Maximum number of register states that can exist at once */
382#define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
f4d7e40a
AS
383struct bpf_verifier_state {
384 /* call stack tracking */
385 struct bpf_func_state *frame[MAX_CALL_FRAMES];
2589726d 386 struct bpf_verifier_state *parent;
1995edc5
KKD
387 /* Acquired reference states */
388 struct bpf_reference_state *refs;
2589726d
AS
389 /*
390 * 'branches' field is the number of branches left to explore:
391 * 0 - all possible paths from this state reached bpf_exit or
392 * were safely pruned
393 * 1 - at least one path is being explored.
394 * This state hasn't reached bpf_exit
395 * 2 - at least two paths are being explored.
396 * This state is an immediate parent of two children.
397 * One is fallthrough branch with branches==1 and another
398 * state is pushed into stack (to be explored later) also with
399 * branches==1. The parent of this state has branches==1.
400 * The verifier state tree connected via 'parent' pointer looks like:
401 * 1
402 * 1
403 * 2 -> 1 (first 'if' pushed into stack)
404 * 1
405 * 2 -> 1 (second 'if' pushed into stack)
406 * 1
407 * 1
408 * 1 bpf_exit.
409 *
410 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
411 * and the verifier state tree will look:
412 * 1
413 * 1
414 * 2 -> 1 (first 'if' pushed into stack)
415 * 1
416 * 1 -> 1 (second 'if' pushed into stack)
417 * 0
418 * 0
419 * 0 bpf_exit.
420 * After pop_stack() the do_check() will resume at second 'if'.
421 *
422 * If is_state_visited() sees a state with branches > 0 it means
423 * there is a loop. If such state is exactly equal to the current state
424 * it's an infinite loop. Note states_equal() checks for states
6dbdc9f3 425 * equivalency, so two states being 'states_equal' does not mean
2589726d
AS
426 * infinite loop. The exact comparison is provided by
427 * states_maybe_looping() function. It's a stronger pre-check and
428 * much faster than states_equal().
429 *
430 * This algorithm may not find all possible infinite loops or
431 * loop iteration count may be too high.
432 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
433 */
434 u32 branches;
dc2a4ebc 435 u32 insn_idx;
f4d7e40a 436 u32 curframe;
6a3cd331 437
1995edc5
KKD
438 u32 acquired_refs;
439 u32 active_locks;
440 u32 active_preempt_locks;
c8e2ee1f 441 u32 active_irq_id;
ea21771c
KKD
442 u32 active_lock_id;
443 void *active_lock_ptr;
9bb00b28 444 bool active_rcu_lock;
1995edc5
KKD
445
446 bool speculative;
81f1d7a5 447 bool in_sleepable;
b5dc0163
AS
448
449 /* first and last insn idx of this verifier state */
450 u32 first_insn_idx;
451 u32 last_insn_idx;
2a099282
EZ
452 /* If this state is a part of states loop this field points to some
453 * parent of this state such that:
454 * - it is also a member of the same states loop;
455 * - DFS states traversal starting from initial state visits loop_entry
456 * state before this state.
457 * Used to compute topmost loop entry for state loops.
458 * State loops might appear because of open coded iterators logic.
459 * See get_loop_entry() for more information.
460 */
461 struct bpf_verifier_state *loop_entry;
96a30e46
AN
462 /* Sub-range of env->insn_hist[] corresponding to this state's
463 * instruction history.
464 * Backtracking is using it to go from last to first.
465 * For most states instruction history is short, 0-3 instructions.
b5dc0163
AS
466 * For loops can go up to ~40.
467 */
96a30e46
AN
468 u32 insn_hist_start;
469 u32 insn_hist_end;
2793a8b0 470 u32 dfs_depth;
ab5cfac1 471 u32 callback_unroll_depth;
011832b9 472 u32 may_goto_depth;
408fcf94
EZ
473 /* If this state was ever pointed-to by other state's loop_entry field
474 * this flag would be set to true. Used to avoid freeing such states
475 * while they are still in use.
476 */
477 u32 used_as_loop_entry;
f4d7e40a
AS
478};
479
dfab99df 480#define bpf_get_spilled_reg(slot, frame, mask) \
f3709f69 481 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
32f55dd4 482 ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
f3709f69
JS
483 ? &frame->stack[slot].spilled_ptr : NULL)
484
485/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
dfab99df
CZ
486#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
487 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
f3709f69 488 iter < frame->allocated_stack / BPF_REG_SIZE; \
dfab99df 489 iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
f3709f69 490
dfab99df 491#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
b239da34
KKD
492 ({ \
493 struct bpf_verifier_state *___vstate = __vst; \
494 int ___i, ___j; \
495 for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
496 struct bpf_reg_state *___regs; \
497 __state = ___vstate->frame[___i]; \
498 ___regs = __state->regs; \
499 for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
500 __reg = &___regs[___j]; \
501 (void)(__expr); \
502 } \
dfab99df 503 bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
b239da34
KKD
504 if (!__reg) \
505 continue; \
506 (void)(__expr); \
507 } \
508 } \
509 })
510
dfab99df
CZ
511/* Invoke __expr over regsiters in __vst, setting __state and __reg */
512#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
513 bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
514
58e2af8b
JK
515/* linked list of verifier states used to prune search */
516struct bpf_verifier_state_list {
517 struct bpf_verifier_state state;
5564ee3a 518 struct list_head node;
408fcf94
EZ
519 u32 miss_cnt;
520 u32 hit_cnt:31;
521 u32 in_free_list:1;
58e2af8b
JK
522};
523
1ade2371 524struct bpf_loop_inline_state {
f16214c1
MB
525 unsigned int initialized:1; /* set to true upon first entry */
526 unsigned int fit_for_inline:1; /* true if callback function is the same
527 * at each call and flags are always zero
528 */
1ade2371
EZ
529 u32 callback_subprogno; /* valid when fit_for_inline is true */
530};
531
0a525621
PL
532/* pointer and state for maps */
533struct bpf_map_ptr_state {
534 struct bpf_map *map_ptr;
535 bool poison;
536 bool unpriv;
537};
538
979d63d5 539/* Possible states for alu_state member. */
801c6058
DB
540#define BPF_ALU_SANITIZE_SRC (1U << 0)
541#define BPF_ALU_SANITIZE_DST (1U << 1)
979d63d5 542#define BPF_ALU_NEG_VALUE (1U << 2)
d3bd7413 543#define BPF_ALU_NON_POINTER (1U << 3)
801c6058 544#define BPF_ALU_IMMEDIATE (1U << 4)
979d63d5
DB
545#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
546 BPF_ALU_SANITIZE_DST)
547
58e2af8b 548struct bpf_insn_aux_data {
81ed18ab
AS
549 union {
550 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
0a525621 551 struct bpf_map_ptr_state map_ptr_state;
1c2a088a 552 s32 call_imm; /* saved imm field of call insn */
979d63d5 553 u32 alu_limit; /* limit for add/sub register with pointer */
d8eca5bb
DB
554 struct {
555 u32 map_index; /* index into used_maps[] */
556 u32 map_off; /* offset from value base address */
557 };
4976b718
HL
558 struct {
559 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
560 union {
22dc4a0f
AN
561 struct {
562 struct btf *btf;
563 u32 btf_id; /* btf_id for struct typed var */
564 };
4976b718
HL
565 u32 mem_size; /* mem_size for non-struct typed var */
566 };
567 } btf_var;
1ade2371
EZ
568 /* if instruction is a call to bpf_loop this field tracks
569 * the state of the relevant registers to make decision about inlining
570 */
571 struct bpf_loop_inline_state loop_inline_state;
81ed18ab 572 };
d2dcc67d
DM
573 union {
574 /* remember the size of type passed to bpf_obj_new to rewrite R1 */
575 u64 obj_new_size;
576 /* remember the offset of node field within type to rewrite */
577 u64 insert_off;
578 };
958cf2e2 579 struct btf_struct_meta *kptr_struct_meta;
d2e4c1e6 580 u64 map_key_state; /* constant (32 bit) key tracking for maps */
23994631 581 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
51c39bb1 582 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
2039f26f 583 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
5327ed3d 584 bool zext_dst; /* this insn zero extends dst reg */
6082b6c3 585 bool needs_zext; /* alu op needs to clear upper bits */
9bb00b28 586 bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
06accc87 587 bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
01cc55af 588 bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
979d63d5 589 u8 alu_state; /* used in combination with alu_limit */
5b5f51bf 590 /* true if STX or LDX instruction is a part of a spill/fill
ae010757 591 * pattern for a bpf_fastcall call.
5b5f51bf 592 */
ae010757 593 u8 fastcall_pattern:1;
5b5f51bf 594 /* for CALL instructions, a number of spill/fill pairs in the
ae010757 595 * bpf_fastcall pattern.
5b5f51bf 596 */
ae010757 597 u8 fastcall_spills_num:3;
bc049387 598 u8 arg_prog:4;
51c39bb1
AS
599
600 /* below fields are initialized once */
9e4c24e7 601 unsigned int orig_idx; /* original instruction index */
bffdeaa8 602 bool jmp_point;
4b5ce570
AN
603 bool prune_point;
604 /* ensure we check state equivalence and save state checkpoint and
605 * this instruction, regardless of any heuristics
606 */
607 bool force_checkpoint;
ab5cfac1
EZ
608 /* true if instruction is a call to a helper function that
609 * accepts callback function as a parameter.
610 */
611 bool calls_callback;
14c8552d
EZ
612 /* registers alive before this instruction. */
613 u16 live_regs_before;
58e2af8b
JK
614};
615
616#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
541c3bad 617#define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
58e2af8b 618
a2a7d570
JK
619#define BPF_VERIFIER_TMP_LOG_SIZE 1024
620
b9193c1b 621struct bpf_verifier_log {
12166409
AN
622 /* Logical start and end positions of a "log window" of the verifier log.
623 * start_pos == 0 means we haven't truncated anything.
624 * Once truncation starts to happen, start_pos + len_total == end_pos,
625 * except during log reset situations, in which (end_pos - start_pos)
626 * might get smaller than len_total (see bpf_vlog_reset()).
627 * Generally, (end_pos - start_pos) gives number of useful data in
628 * user log buffer.
629 */
630 u64 start_pos;
631 u64 end_pos;
e7bf8249 632 char __user *ubuf;
12166409 633 u32 level;
e7bf8249 634 u32 len_total;
fa1c7d5c 635 u32 len_max;
12166409 636 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
e7bf8249
JK
637};
638
06ee7115
AS
639#define BPF_LOG_LEVEL1 1
640#define BPF_LOG_LEVEL2 2
641#define BPF_LOG_STATS 4
12166409 642#define BPF_LOG_FIXED 8
06ee7115 643#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
12166409 644#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
8580ac94 645#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
2e576648
CL
646#define BPF_LOG_MIN_ALIGNMENT 8U
647#define BPF_LOG_ALIGNMENT 40U
06ee7115 648
77d2e05a
MKL
649static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
650{
fa1c7d5c 651 return log && log->level;
77d2e05a
MKL
652}
653
cc8b0b92
AS
654#define BPF_MAX_SUBPROGS 256
655
4ba1d0f2
AN
656struct bpf_subprog_arg_info {
657 enum bpf_arg_type arg_type;
658 union {
659 u32 mem_size;
e2b3c4ff 660 u32 btf_id;
4ba1d0f2
AN
661 };
662};
663
a76ab573
YS
664enum priv_stack_mode {
665 PRIV_STACK_UNKNOWN,
666 NO_PRIV_STACK,
667 PRIV_STACK_ADAPTIVE,
668};
669
9c8105bd 670struct bpf_subprog_info {
8c1b6e69 671 /* 'start' has to be the first field otherwise find_subprog() won't work */
9c8105bd 672 u32 start; /* insn idx of function entry point */
c454a46b 673 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
9c8105bd 674 u16 stack_depth; /* max. stack depth used by this function */
011832b9 675 u16 stack_extra;
ae010757
EZ
676 /* offsets in range [stack_depth .. fastcall_stack_off)
677 * are used for bpf_fastcall spills and fills.
5b5f51bf 678 */
ae010757 679 s16 fastcall_stack_off;
406a6fa4
AN
680 bool has_tail_call: 1;
681 bool tail_call_reachable: 1;
682 bool has_ld_abs: 1;
683 bool is_cb: 1;
684 bool is_async_cb: 1;
685 bool is_exception_cb: 1;
4ba1d0f2 686 bool args_cached: 1;
ae010757
EZ
687 /* true if bpf_fastcall stack region is used by functions that can't be inlined */
688 bool keep_fastcall_stack: 1;
51081a3f 689 bool changes_pkt_data: 1;
e2d8f560 690 bool might_sleep: 1;
4ba1d0f2 691
a76ab573 692 enum priv_stack_mode priv_stack_mode;
4ba1d0f2
AN
693 u8 arg_cnt;
694 struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
9c8105bd
JW
695};
696
407958a0
AN
697struct bpf_verifier_env;
698
699struct backtrack_state {
700 struct bpf_verifier_env *env;
701 u32 frame;
702 u32 reg_masks[MAX_CALL_FRAMES];
703 u64 stack_masks[MAX_CALL_FRAMES];
704};
705
1ffc85d9
EZ
706struct bpf_id_pair {
707 u32 old;
708 u32 cur;
709};
710
711struct bpf_idmap {
712 u32 tmp_id_gen;
713 struct bpf_id_pair map[BPF_ID_MAP_SIZE];
714};
715
904e6ddf
EZ
716struct bpf_idset {
717 u32 count;
718 u32 ids[BPF_ID_MAP_SIZE];
719};
720
58e2af8b
JK
721/* single container for all structs
722 * one verifier_env per bpf_check() call
723 */
724struct bpf_verifier_env {
c08435ec
DB
725 u32 insn_idx;
726 u32 prev_insn_idx;
58e2af8b 727 struct bpf_prog *prog; /* eBPF program being verified */
00176a34 728 const struct bpf_verifier_ops *ops;
e3f87fdf 729 struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
58e2af8b
JK
730 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
731 int stack_size; /* number of states to be processed */
e07b98d9 732 bool strict_alignment; /* perform strict pointer alignment checks */
10d274e8 733 bool test_state_freq; /* test verifier with different pruning frequency */
ff8867af 734 bool test_reg_invariants; /* fail verification on register invariants violations */
638f5b90 735 struct bpf_verifier_state *cur_state; /* current verifier state */
5564ee3a
EZ
736 /* Search pruning optimization, array of list_heads for
737 * lists of struct bpf_verifier_state_list.
738 */
739 struct list_head *explored_states;
740 struct list_head free_list; /* list of struct bpf_verifier_state_list */
58e2af8b 741 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
541c3bad 742 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
58e2af8b 743 u32 used_map_cnt; /* number of used maps */
541c3bad 744 u32 used_btf_cnt; /* number of used BTF objects */
58e2af8b 745 u32 id_gen; /* used to generate unique reg IDs */
335d1c5b 746 u32 hidden_subprog_cnt; /* number of hidden subprogs */
f18b03fa 747 int exception_callback_subprog;
e042aa53 748 bool explore_alu_limits;
58e2af8b 749 bool allow_ptr_leaks;
92e1567e
AM
750 /* Allow access to uninitialized stack memory. Writes with fixed offset are
751 * always allowed, so this refers to reads (with fixed or variable offset),
752 * to writes with variable offset and to indirect (helper) accesses.
753 */
01f810ac 754 bool allow_uninit_stack;
2c78ee89
AS
755 bool bpf_capable;
756 bool bypass_spec_v1;
757 bool bypass_spec_v4;
58e2af8b 758 bool seen_direct_write;
f18b03fa 759 bool seen_exception;
58e2af8b 760 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
d9762e84 761 const struct bpf_line_info *prev_linfo;
b9193c1b 762 struct bpf_verifier_log log;
335d1c5b 763 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
904e6ddf 764 union {
1ffc85d9 765 struct bpf_idmap idmap_scratch;
904e6ddf
EZ
766 struct bpf_idset idset_scratch;
767 };
7df737e9
AS
768 struct {
769 int *insn_state;
770 int *insn_stack;
14c8552d
EZ
771 /* vector of instruction indexes sorted in post-order */
772 int *insn_postorder;
7df737e9 773 int cur_stack;
14c8552d
EZ
774 /* current position in the insn_postorder vector */
775 int cur_postorder;
7df737e9 776 } cfg;
407958a0 777 struct backtrack_state bt;
96a30e46
AN
778 struct bpf_insn_hist_entry *insn_hist;
779 struct bpf_insn_hist_entry *cur_hist_ent;
780 u32 insn_hist_cap;
51c39bb1 781 u32 pass_cnt; /* number of times do_check() was called */
cc8b0b92 782 u32 subprog_cnt;
06ee7115 783 /* number of instructions analyzed by the verifier */
2589726d
AS
784 u32 prev_insn_processed, insn_processed;
785 /* number of jmps, calls, exits analyzed so far */
786 u32 prev_jmps_processed, jmps_processed;
06ee7115
AS
787 /* total verification time */
788 u64 verification_time;
789 /* maximum number of verifier states kept in 'branching' instructions */
790 u32 max_states_per_insn;
791 /* total number of allocated verifier states */
792 u32 total_states;
793 /* some states are freed during program analysis.
794 * this is peak number of states. this number dominates kernel
795 * memory consumption during verification
796 */
797 u32 peak_states;
798 /* longest register parentage chain walked for liveness marking */
799 u32 longest_mark_read_walk;
574078b0
EZ
800 u32 free_list_size;
801 u32 explored_states_size;
387544bf 802 bpfptr_t fd_array;
0f55f9ed
CL
803
804 /* bit mask to keep track of whether a register has been accessed
805 * since the last time the function state was printed
806 */
807 u32 scratched_regs;
808 /* Same as scratched_regs but for stack slots */
809 u64 scratched_stack_slots;
12166409 810 u64 prev_log_pos, prev_insn_print_pos;
92424801
DB
811 /* buffer used to temporary hold constants as scalar registers */
812 struct bpf_reg_state fake_reg[2];
d9439c21
AN
813 /* buffer used to generate temporary string representations,
814 * e.g., in reg_type_str() to generate reg_type string
815 */
816 char tmp_str_buf[TMP_STR_BUF_LEN];
6f606ffd 817 struct bpf_insn insn_buf[INSN_BUF_SIZE];
169c3176 818 struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
58e2af8b
JK
819};
820
e26080d0
AN
821static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
822{
823 return &env->prog->aux->func_info_aux[subprog];
824}
825
4ba1d0f2
AN
826static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
827{
828 return &env->subprog_info[subprog];
829}
830
be2d04d1
MM
831__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
832 const char *fmt, va_list args);
430e68d1
QM
833__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
834 const char *fmt, ...);
9e15db66
AS
835__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
836 const char *fmt, ...);
bdcab414
AN
837int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
838 char __user *log_buf, u32 log_size);
12166409 839void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
bdcab414 840int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
430e68d1 841
db840d38
AN
842__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
843 u32 insn_off,
844 const char *prefix_fmt, ...);
845
1cb0f56d
PC
846#define verifier_bug_if(cond, env, fmt, args...) \
847 ({ \
848 bool __cond = (cond); \
849 if (unlikely(__cond)) { \
850 BPF_WARN_ONCE(1, "verifier bug: " fmt "(" #cond ")\n", ##args); \
851 bpf_log(&env->log, "verifier bug: " fmt "(" #cond ")\n", ##args); \
852 } \
853 (__cond); \
854 })
855#define verifier_bug(env, fmt, args...) verifier_bug_if(1, env, fmt, ##args)
856
fd978bf7 857static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
638f5b90 858{
f4d7e40a
AS
859 struct bpf_verifier_state *cur = env->cur_state;
860
fd978bf7
JS
861 return cur->frame[cur->curframe];
862}
863
864static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
865{
866 return cur_func(env)->regs;
638f5b90
AS
867}
868
a40a2632 869int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
cae1927c
JK
870int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
871 int insn_idx, int prev_insn_idx);
c941ce9c 872int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
08ca90af
JK
873void
874bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
875 struct bpf_insn *insn);
876void
877bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
ab3f0063 878
f7b12b6f
THJ
879/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
880static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
22dc4a0f 881 struct btf *btf, u32 btf_id)
f7b12b6f 882{
22dc4a0f
AN
883 if (tgt_prog)
884 return ((u64)tgt_prog->aux->id << 32) | btf_id;
885 else
886 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
f7b12b6f
THJ
887}
888
441e8c66
THJ
889/* unpack the IDs from the key as constructed above */
890static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
891{
892 if (obj_id)
893 *obj_id = key >> 32;
894 if (btf_id)
895 *btf_id = key & 0x7FFFFFFF;
896}
897
f7b12b6f
THJ
898int bpf_check_attach_target(struct bpf_verifier_log *log,
899 const struct bpf_prog *prog,
900 const struct bpf_prog *tgt_prog,
901 u32 btf_id,
902 struct bpf_attach_target_info *tgt_info);
2357672c
KKD
903void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
904
eb1f7f71
BT
905int mark_chain_precision(struct bpf_verifier_env *env, int regno);
906
d639b9d1
HL
907#define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
908
909/* extract base type from bpf_{arg, return, reg}_type. */
910static inline u32 base_type(u32 type)
911{
912 return type & BPF_BASE_TYPE_MASK;
913}
914
915/* extract flags from an extended type. See bpf_type_flag in bpf.h. */
916static inline u32 type_flag(u32 type)
917{
918 return type & ~BPF_BASE_TYPE_MASK;
919}
f7b12b6f 920
4a9c7bbe 921/* only use after check_attach_btf_id() */
271de525 922static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
5c073f26 923{
fdad456c
LH
924 return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
925 prog->aux->saved_dst_prog_type : prog->type;
5c073f26
KKD
926}
927
271de525
MKL
928static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
929{
930 switch (resolve_prog_type(prog)) {
931 case BPF_PROG_TYPE_TRACING:
932 return prog->expected_attach_type != BPF_TRACE_ITER;
933 case BPF_PROG_TYPE_STRUCT_OPS:
5bd36da1 934 return prog->aux->jits_use_priv_stack;
271de525
MKL
935 case BPF_PROG_TYPE_LSM:
936 return false;
937 default:
938 return true;
939 }
940}
941
2a6d50b5 942#define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
3f00c523
DV
943
944static inline bool bpf_type_has_unsafe_modifiers(u32 type)
945{
946 return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
947}
948
42feb662
AN
949static inline bool type_is_ptr_alloc_obj(u32 type)
950{
951 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
952}
953
954static inline bool type_is_non_owning_ref(u32 type)
955{
956 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
957}
958
959static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
960{
961 type = base_type(type);
962 return type == PTR_TO_PACKET ||
963 type == PTR_TO_PACKET_META;
964}
965
966static inline bool type_is_sk_pointer(enum bpf_reg_type type)
967{
968 return type == PTR_TO_SOCKET ||
969 type == PTR_TO_SOCK_COMMON ||
970 type == PTR_TO_TCP_SOCK ||
971 type == PTR_TO_XDP_SOCK;
972}
973
1ae497c7
SHY
974static inline bool type_may_be_null(u32 type)
975{
976 return type & PTR_MAYBE_NULL;
977}
978
42feb662
AN
979static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
980{
981 env->scratched_regs |= 1U << regno;
982}
983
984static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
985{
986 env->scratched_stack_slots |= 1ULL << spi;
987}
988
989static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
990{
991 return (env->scratched_regs >> regno) & 1;
992}
993
994static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
995{
996 return (env->scratched_stack_slots >> regno) & 1;
997}
998
999static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
1000{
1001 return env->scratched_regs || env->scratched_stack_slots;
1002}
1003
1004static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
1005{
1006 env->scratched_regs = 0U;
1007 env->scratched_stack_slots = 0ULL;
1008}
1009
1010/* Used for printing the entire verifier state. */
1011static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
1012{
1013 env->scratched_regs = ~0U;
1014 env->scratched_stack_slots = ~0ULL;
1015}
1016
c1e6148c
MM
1017static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
1018{
1019#ifdef __BIG_ENDIAN
1020 off -= spill_size - fill_size;
1021#endif
1022
1023 return !(off % BPF_REG_SIZE);
1024}
1025
42feb662
AN
1026const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
1027const char *dynptr_type_str(enum bpf_dynptr_type type);
1028const char *iter_type_str(const struct btf *btf, u32 btf_id);
1029const char *iter_state_str(enum bpf_iter_state state);
1030
1995edc5
KKD
1031void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1032 u32 frameno, bool print_all);
1033void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
1034 u32 frameno);
42feb662 1035
58e2af8b 1036#endif /* _LINUX_BPF_VERIFIER_H */