Merge branch 'add-support-cpu-v4-insns-for-rv64'
[linux-block.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
aef2feda 7#include <linux/bpf-cgroup.h>
51580e79
AS
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
838e9690 12#include <linux/btf.h>
58e2af8b 13#include <linux/bpf_verifier.h>
51580e79
AS
14#include <linux/filter.h>
15#include <net/netlink.h>
16#include <linux/file.h>
17#include <linux/vmalloc.h>
ebb676da 18#include <linux/stringify.h>
cc8b0b92
AS
19#include <linux/bsearch.h>
20#include <linux/sort.h>
c195651e 21#include <linux/perf_event.h>
d9762e84 22#include <linux/ctype.h>
6ba43b76 23#include <linux/error-injection.h>
9e4e01df 24#include <linux/bpf_lsm.h>
1e6c62a8 25#include <linux/btf_ids.h>
47e34cb7 26#include <linux/poison.h>
bd5314f8 27#include <linux/module.h>
f42bcd16 28#include <linux/cpumask.h>
680ee045 29#include <net/xdp.h>
51580e79 30
f4ac7e0b
JK
31#include "disasm.h"
32
00176a34 33static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 34#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
35 [_id] = & _name ## _verifier_ops,
36#define BPF_MAP_TYPE(_id, _ops)
f2e10bff 37#define BPF_LINK_TYPE(_id, _name)
00176a34
JK
38#include <linux/bpf_types.h>
39#undef BPF_PROG_TYPE
40#undef BPF_MAP_TYPE
f2e10bff 41#undef BPF_LINK_TYPE
00176a34
JK
42};
43
51580e79
AS
44/* bpf_check() is a static code analyzer that walks eBPF program
45 * instruction by instruction and updates register/stack state.
46 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
47 *
48 * The first pass is depth-first-search to check that the program is a DAG.
49 * It rejects the following programs:
50 * - larger than BPF_MAXINSNS insns
51 * - if loop is present (detected via back-edge)
52 * - unreachable insns exist (shouldn't be a forest. program = one function)
53 * - out of bounds or malformed jumps
54 * The second pass is all possible path descent from the 1st insn.
8fb33b60 55 * Since it's analyzing all paths through the program, the length of the
eba38a96 56 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
57 * insn is less then 4K, but there are too many branches that change stack/regs.
58 * Number of 'branches to be analyzed' is limited to 1k
59 *
60 * On entry to each instruction, each register has a type, and the instruction
61 * changes the types of the registers depending on instruction semantics.
62 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
63 * copied to R1.
64 *
65 * All registers are 64-bit.
66 * R0 - return register
67 * R1-R5 argument passing registers
68 * R6-R9 callee saved registers
69 * R10 - frame pointer read-only
70 *
71 * At the start of BPF program the register R1 contains a pointer to bpf_context
72 * and has type PTR_TO_CTX.
73 *
74 * Verifier tracks arithmetic operations on pointers in case:
75 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
76 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
77 * 1st insn copies R10 (which has FRAME_PTR) type into R1
78 * and 2nd arithmetic instruction is pattern matched to recognize
79 * that it wants to construct a pointer to some element within stack.
80 * So after 2nd insn, the register R1 has type PTR_TO_STACK
81 * (and -20 constant is saved for further stack bounds checking).
82 * Meaning that this reg is a pointer to stack plus known immediate constant.
83 *
f1174f77 84 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 85 * means the register has some value, but it's not a valid pointer.
f1174f77 86 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
87 *
88 * When verifier sees load or store instructions the type of base register
c64b7983
JS
89 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
90 * four pointer types recognized by check_mem_access() function.
51580e79
AS
91 *
92 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
93 * and the range of [ptr, ptr + map's value_size) is accessible.
94 *
95 * registers used to pass values to function calls are checked against
96 * function argument constraints.
97 *
98 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
99 * It means that the register type passed to this function must be
100 * PTR_TO_STACK and it will be used inside the function as
101 * 'pointer to map element key'
102 *
103 * For example the argument constraints for bpf_map_lookup_elem():
104 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
105 * .arg1_type = ARG_CONST_MAP_PTR,
106 * .arg2_type = ARG_PTR_TO_MAP_KEY,
107 *
108 * ret_type says that this function returns 'pointer to map elem value or null'
109 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
110 * 2nd argument should be a pointer to stack, which will be used inside
111 * the helper function as a pointer to map element key.
112 *
113 * On the kernel side the helper function looks like:
114 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
115 * {
116 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
117 * void *key = (void *) (unsigned long) r2;
118 * void *value;
119 *
120 * here kernel can access 'key' and 'map' pointers safely, knowing that
121 * [key, key + map->key_size) bytes are valid and were initialized on
122 * the stack of eBPF program.
123 * }
124 *
125 * Corresponding eBPF program may look like:
126 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
127 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
128 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
129 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
130 * here verifier looks at prototype of map_lookup_elem() and sees:
131 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
132 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
133 *
134 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
135 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
136 * and were initialized prior to this call.
137 * If it's ok, then verifier allows this BPF_CALL insn and looks at
138 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
139 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
8fb33b60 140 * returns either pointer to map value or NULL.
51580e79
AS
141 *
142 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
143 * insn, the register holding that pointer in the true branch changes state to
144 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
145 * branch. See check_cond_jmp_op().
146 *
147 * After the call R0 is set to return type of the function and registers R1-R5
148 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
149 *
150 * The following reference types represent a potential reference to a kernel
151 * resource which, after first being allocated, must be checked and freed by
152 * the BPF program:
153 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
154 *
155 * When the verifier sees a helper call return a reference type, it allocates a
156 * pointer id for the reference and stores it in the current function state.
157 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
158 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
159 * passes through a NULL-check conditional. For the branch wherein the state is
160 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
161 *
162 * For each helper function that allocates a reference, such as
163 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
164 * bpf_sk_release(). When a reference type passes into the release function,
165 * the verifier also releases the reference. If any unchecked or unreleased
166 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
167 */
168
17a52670 169/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 170struct bpf_verifier_stack_elem {
17a52670
AS
171 /* verifer state is 'st'
172 * before processing instruction 'insn_idx'
173 * and after processing instruction 'prev_insn_idx'
174 */
58e2af8b 175 struct bpf_verifier_state st;
17a52670
AS
176 int insn_idx;
177 int prev_insn_idx;
58e2af8b 178 struct bpf_verifier_stack_elem *next;
6f8a57cc
AN
179 /* length of verifier log at the time this state was pushed on stack */
180 u32 log_pos;
cbd35700
AS
181};
182
b285fcb7 183#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 184#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 185
d2e4c1e6
DB
186#define BPF_MAP_KEY_POISON (1ULL << 63)
187#define BPF_MAP_KEY_SEEN (1ULL << 62)
188
c93552c4
DB
189#define BPF_MAP_PTR_UNPRIV 1UL
190#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
191 POISON_POINTER_DELTA))
192#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
193
bc34dee6
JK
194static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
195static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
6a3cd331 196static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
5d92ddc3 197static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
6a3cd331
DM
198static int ref_set_non_owning(struct bpf_verifier_env *env,
199 struct bpf_reg_state *reg);
1cf3bfc6
IL
200static void specialize_kfunc(struct bpf_verifier_env *env,
201 u32 func_id, u16 offset, unsigned long *addr);
51302c95 202static bool is_trusted_reg(const struct bpf_reg_state *reg);
bc34dee6 203
c93552c4
DB
204static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
205{
d2e4c1e6 206 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
207}
208
209static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
210{
d2e4c1e6 211 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
212}
213
214static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
215 const struct bpf_map *map, bool unpriv)
216{
217 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
218 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
219 aux->map_ptr_state = (unsigned long)map |
220 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
221}
222
223static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
224{
225 return aux->map_key_state & BPF_MAP_KEY_POISON;
226}
227
228static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
229{
230 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
231}
232
233static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
234{
235 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
236}
237
238static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
239{
240 bool poisoned = bpf_map_key_poisoned(aux);
241
242 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
243 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 244}
fad73a1a 245
fde2a388
AN
246static bool bpf_helper_call(const struct bpf_insn *insn)
247{
248 return insn->code == (BPF_JMP | BPF_CALL) &&
249 insn->src_reg == 0;
250}
251
23a2d70c
YS
252static bool bpf_pseudo_call(const struct bpf_insn *insn)
253{
254 return insn->code == (BPF_JMP | BPF_CALL) &&
255 insn->src_reg == BPF_PSEUDO_CALL;
256}
257
e6ac2450
MKL
258static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
259{
260 return insn->code == (BPF_JMP | BPF_CALL) &&
261 insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
262}
263
33ff9823
DB
264struct bpf_call_arg_meta {
265 struct bpf_map *map_ptr;
435faee1 266 bool raw_mode;
36bbef52 267 bool pkt_access;
8f14852e 268 u8 release_regno;
435faee1
DB
269 int regno;
270 int access_size;
457f4436 271 int mem_size;
10060503 272 u64 msize_max_value;
1b986589 273 int ref_obj_id;
f8064ab9 274 int dynptr_id;
3e8ce298 275 int map_uid;
d83525ca 276 int func_id;
22dc4a0f 277 struct btf *btf;
eaa6bcb7 278 u32 btf_id;
22dc4a0f 279 struct btf *ret_btf;
eaa6bcb7 280 u32 ret_btf_id;
69c087ba 281 u32 subprogno;
aa3496ac 282 struct btf_field *kptr_field;
33ff9823
DB
283};
284
d0e1ac22
AN
285struct bpf_kfunc_call_arg_meta {
286 /* In parameters */
287 struct btf *btf;
288 u32 func_id;
289 u32 kfunc_flags;
290 const struct btf_type *func_proto;
291 const char *func_name;
292 /* Out parameters */
293 u32 ref_obj_id;
294 u8 release_regno;
295 bool r0_rdonly;
296 u32 ret_btf_id;
297 u64 r0_size;
298 u32 subprogno;
299 struct {
300 u64 value;
301 bool found;
302 } arg_constant;
4d585f48 303
7793fc3b 304 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling,
4d585f48
DM
305 * generally to pass info about user-defined local kptr types to later
306 * verification logic
307 * bpf_obj_drop
308 * Record the local kptr type to be drop'd
309 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type)
7793fc3b
DM
310 * Record the local kptr type to be refcount_incr'd and use
311 * arg_owning_ref to determine whether refcount_acquire should be
312 * fallible
4d585f48
DM
313 */
314 struct btf *arg_btf;
315 u32 arg_btf_id;
7793fc3b 316 bool arg_owning_ref;
4d585f48 317
d0e1ac22
AN
318 struct {
319 struct btf_field *field;
320 } arg_list_head;
321 struct {
322 struct btf_field *field;
323 } arg_rbtree_root;
324 struct {
325 enum bpf_dynptr_type type;
326 u32 id;
361f129f 327 u32 ref_obj_id;
d0e1ac22 328 } initialized_dynptr;
06accc87
AN
329 struct {
330 u8 spi;
331 u8 frameno;
332 } iter;
d0e1ac22
AN
333 u64 mem_size;
334};
335
8580ac94
AS
336struct btf *btf_vmlinux;
337
cbd35700
AS
338static DEFINE_MUTEX(bpf_verifier_lock);
339
d9762e84
MKL
340static const struct bpf_line_info *
341find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
342{
343 const struct bpf_line_info *linfo;
344 const struct bpf_prog *prog;
345 u32 i, nr_linfo;
346
347 prog = env->prog;
348 nr_linfo = prog->aux->nr_linfo;
349
350 if (!nr_linfo || insn_off >= prog->len)
351 return NULL;
352
353 linfo = prog->aux->linfo;
354 for (i = 1; i < nr_linfo; i++)
355 if (insn_off < linfo[i].insn_off)
356 break;
357
358 return &linfo[i - 1];
359}
360
abe08840
JO
361__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
362{
77d2e05a 363 struct bpf_verifier_env *env = private_data;
abe08840
JO
364 va_list args;
365
77d2e05a
MKL
366 if (!bpf_verifier_log_needed(&env->log))
367 return;
368
abe08840 369 va_start(args, fmt);
77d2e05a 370 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
371 va_end(args);
372}
cbd35700 373
d9762e84
MKL
374static const char *ltrim(const char *s)
375{
376 while (isspace(*s))
377 s++;
378
379 return s;
380}
381
382__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
383 u32 insn_off,
384 const char *prefix_fmt, ...)
385{
386 const struct bpf_line_info *linfo;
387
388 if (!bpf_verifier_log_needed(&env->log))
389 return;
390
391 linfo = find_linfo(env, insn_off);
392 if (!linfo || linfo == env->prev_linfo)
393 return;
394
395 if (prefix_fmt) {
396 va_list args;
397
398 va_start(args, prefix_fmt);
399 bpf_verifier_vlog(&env->log, prefix_fmt, args);
400 va_end(args);
401 }
402
403 verbose(env, "%s\n",
404 ltrim(btf_name_by_offset(env->prog->aux->btf,
405 linfo->line_off)));
406
407 env->prev_linfo = linfo;
408}
409
bc2591d6
YS
410static void verbose_invalid_scalar(struct bpf_verifier_env *env,
411 struct bpf_reg_state *reg,
412 struct tnum *range, const char *ctx,
413 const char *reg_name)
414{
415 char tn_buf[48];
416
417 verbose(env, "At %s the register %s ", ctx, reg_name);
418 if (!tnum_is_unknown(reg->var_off)) {
419 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
420 verbose(env, "has value %s", tn_buf);
421 } else {
422 verbose(env, "has unknown scalar value");
423 }
424 tnum_strn(tn_buf, sizeof(tn_buf), *range);
425 verbose(env, " should have been in %s\n", tn_buf);
426}
427
de8f3a83
DB
428static bool type_is_pkt_pointer(enum bpf_reg_type type)
429{
0c9a7a7e 430 type = base_type(type);
de8f3a83
DB
431 return type == PTR_TO_PACKET ||
432 type == PTR_TO_PACKET_META;
433}
434
46f8bc92
MKL
435static bool type_is_sk_pointer(enum bpf_reg_type type)
436{
437 return type == PTR_TO_SOCKET ||
655a51e5 438 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
439 type == PTR_TO_TCP_SOCK ||
440 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
441}
442
1057d299
AS
443static bool type_may_be_null(u32 type)
444{
445 return type & PTR_MAYBE_NULL;
446}
447
51302c95 448static bool reg_not_null(const struct bpf_reg_state *reg)
cac616db 449{
51302c95
DV
450 enum bpf_reg_type type;
451
452 type = reg->type;
1057d299
AS
453 if (type_may_be_null(type))
454 return false;
455
456 type = base_type(type);
cac616db
JF
457 return type == PTR_TO_SOCKET ||
458 type == PTR_TO_TCP_SOCK ||
459 type == PTR_TO_MAP_VALUE ||
69c087ba 460 type == PTR_TO_MAP_KEY ||
d5271c5b 461 type == PTR_TO_SOCK_COMMON ||
51302c95 462 (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) ||
d5271c5b 463 type == PTR_TO_MEM;
cac616db
JF
464}
465
d8939cb0
DM
466static bool type_is_ptr_alloc_obj(u32 type)
467{
468 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
469}
470
6a3cd331
DM
471static bool type_is_non_owning_ref(u32 type)
472{
473 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
474}
475
4e814da0
KKD
476static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
477{
478 struct btf_record *rec = NULL;
479 struct btf_struct_meta *meta;
480
481 if (reg->type == PTR_TO_MAP_VALUE) {
482 rec = reg->map_ptr->record;
d8939cb0 483 } else if (type_is_ptr_alloc_obj(reg->type)) {
4e814da0
KKD
484 meta = btf_find_struct_meta(reg->btf, reg->btf_id);
485 if (meta)
486 rec = meta->record;
487 }
488 return rec;
489}
490
fde2a388
AN
491static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
492{
493 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
494
495 return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL;
496}
497
d83525ca
AS
498static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
499{
4e814da0 500 return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
cba368c1
MKL
501}
502
20b2aff4
HL
503static bool type_is_rdonly_mem(u32 type)
504{
505 return type & MEM_RDONLY;
cba368c1
MKL
506}
507
64d85290
JS
508static bool is_acquire_function(enum bpf_func_id func_id,
509 const struct bpf_map *map)
510{
511 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
512
513 if (func_id == BPF_FUNC_sk_lookup_tcp ||
514 func_id == BPF_FUNC_sk_lookup_udp ||
457f4436 515 func_id == BPF_FUNC_skc_lookup_tcp ||
c0a5a21c
KKD
516 func_id == BPF_FUNC_ringbuf_reserve ||
517 func_id == BPF_FUNC_kptr_xchg)
64d85290
JS
518 return true;
519
520 if (func_id == BPF_FUNC_map_lookup_elem &&
521 (map_type == BPF_MAP_TYPE_SOCKMAP ||
522 map_type == BPF_MAP_TYPE_SOCKHASH))
523 return true;
524
525 return false;
46f8bc92
MKL
526}
527
1b986589
MKL
528static bool is_ptr_cast_function(enum bpf_func_id func_id)
529{
530 return func_id == BPF_FUNC_tcp_sock ||
1df8f55a
MKL
531 func_id == BPF_FUNC_sk_fullsock ||
532 func_id == BPF_FUNC_skc_to_tcp_sock ||
533 func_id == BPF_FUNC_skc_to_tcp6_sock ||
534 func_id == BPF_FUNC_skc_to_udp6_sock ||
3bc253c2 535 func_id == BPF_FUNC_skc_to_mptcp_sock ||
1df8f55a
MKL
536 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
537 func_id == BPF_FUNC_skc_to_tcp_request_sock;
1b986589
MKL
538}
539
88374342 540static bool is_dynptr_ref_function(enum bpf_func_id func_id)
b2d8ef19
DM
541{
542 return func_id == BPF_FUNC_dynptr_data;
543}
544
fde2a388
AN
545static bool is_callback_calling_kfunc(u32 btf_id);
546
be2ef816
AN
547static bool is_callback_calling_function(enum bpf_func_id func_id)
548{
549 return func_id == BPF_FUNC_for_each_map_elem ||
550 func_id == BPF_FUNC_timer_set_callback ||
551 func_id == BPF_FUNC_find_vma ||
552 func_id == BPF_FUNC_loop ||
553 func_id == BPF_FUNC_user_ringbuf_drain;
554}
555
fde2a388
AN
556static bool is_async_callback_calling_function(enum bpf_func_id func_id)
557{
558 return func_id == BPF_FUNC_timer_set_callback;
559}
560
9bb00b28
YS
561static bool is_storage_get_function(enum bpf_func_id func_id)
562{
563 return func_id == BPF_FUNC_sk_storage_get ||
564 func_id == BPF_FUNC_inode_storage_get ||
565 func_id == BPF_FUNC_task_storage_get ||
566 func_id == BPF_FUNC_cgrp_storage_get;
567}
568
b2d8ef19
DM
569static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
570 const struct bpf_map *map)
571{
572 int ref_obj_uses = 0;
573
574 if (is_ptr_cast_function(func_id))
575 ref_obj_uses++;
576 if (is_acquire_function(func_id, map))
577 ref_obj_uses++;
88374342 578 if (is_dynptr_ref_function(func_id))
b2d8ef19
DM
579 ref_obj_uses++;
580
581 return ref_obj_uses > 1;
582}
583
39491867
BJ
584static bool is_cmpxchg_insn(const struct bpf_insn *insn)
585{
586 return BPF_CLASS(insn->code) == BPF_STX &&
587 BPF_MODE(insn->code) == BPF_ATOMIC &&
588 insn->imm == BPF_CMPXCHG;
589}
590
c25b2ae1
HL
591/* string representation of 'enum bpf_reg_type'
592 *
593 * Note that reg_type_str() can not appear more than once in a single verbose()
594 * statement.
595 */
596static const char *reg_type_str(struct bpf_verifier_env *env,
597 enum bpf_reg_type type)
598{
ef66c547 599 char postfix[16] = {0}, prefix[64] = {0};
c25b2ae1
HL
600 static const char * const str[] = {
601 [NOT_INIT] = "?",
7df5072c 602 [SCALAR_VALUE] = "scalar",
c25b2ae1
HL
603 [PTR_TO_CTX] = "ctx",
604 [CONST_PTR_TO_MAP] = "map_ptr",
605 [PTR_TO_MAP_VALUE] = "map_value",
606 [PTR_TO_STACK] = "fp",
607 [PTR_TO_PACKET] = "pkt",
608 [PTR_TO_PACKET_META] = "pkt_meta",
609 [PTR_TO_PACKET_END] = "pkt_end",
610 [PTR_TO_FLOW_KEYS] = "flow_keys",
611 [PTR_TO_SOCKET] = "sock",
612 [PTR_TO_SOCK_COMMON] = "sock_common",
613 [PTR_TO_TCP_SOCK] = "tcp_sock",
614 [PTR_TO_TP_BUFFER] = "tp_buffer",
615 [PTR_TO_XDP_SOCK] = "xdp_sock",
616 [PTR_TO_BTF_ID] = "ptr_",
c25b2ae1 617 [PTR_TO_MEM] = "mem",
20b2aff4 618 [PTR_TO_BUF] = "buf",
c25b2ae1
HL
619 [PTR_TO_FUNC] = "func",
620 [PTR_TO_MAP_KEY] = "map_key",
27060531 621 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
c25b2ae1
HL
622 };
623
624 if (type & PTR_MAYBE_NULL) {
5844101a 625 if (base_type(type) == PTR_TO_BTF_ID)
c25b2ae1
HL
626 strncpy(postfix, "or_null_", 16);
627 else
628 strncpy(postfix, "_or_null", 16);
629 }
630
9bb00b28 631 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
ef66c547
DV
632 type & MEM_RDONLY ? "rdonly_" : "",
633 type & MEM_RINGBUF ? "ringbuf_" : "",
634 type & MEM_USER ? "user_" : "",
635 type & MEM_PERCPU ? "percpu_" : "",
9bb00b28 636 type & MEM_RCU ? "rcu_" : "",
3f00c523
DV
637 type & PTR_UNTRUSTED ? "untrusted_" : "",
638 type & PTR_TRUSTED ? "trusted_" : ""
ef66c547 639 );
20b2aff4 640
d9439c21 641 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
20b2aff4 642 prefix, str[base_type(type)], postfix);
d9439c21 643 return env->tmp_str_buf;
c25b2ae1 644}
17a52670 645
8efea21d
EC
646static char slot_type_char[] = {
647 [STACK_INVALID] = '?',
648 [STACK_SPILL] = 'r',
649 [STACK_MISC] = 'm',
650 [STACK_ZERO] = '0',
97e03f52 651 [STACK_DYNPTR] = 'd',
06accc87 652 [STACK_ITER] = 'i',
8efea21d
EC
653};
654
4e92024a
AS
655static void print_liveness(struct bpf_verifier_env *env,
656 enum bpf_reg_liveness live)
657{
9242b5f5 658 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
659 verbose(env, "_");
660 if (live & REG_LIVE_READ)
661 verbose(env, "r");
662 if (live & REG_LIVE_WRITTEN)
663 verbose(env, "w");
9242b5f5
AS
664 if (live & REG_LIVE_DONE)
665 verbose(env, "D");
4e92024a
AS
666}
667
79168a66 668static int __get_spi(s32 off)
97e03f52
JK
669{
670 return (-off - 1) / BPF_REG_SIZE;
671}
672
f5b625e5
KKD
673static struct bpf_func_state *func(struct bpf_verifier_env *env,
674 const struct bpf_reg_state *reg)
675{
676 struct bpf_verifier_state *cur = env->cur_state;
677
678 return cur->frame[reg->frameno];
679}
680
97e03f52
JK
681static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
682{
f5b625e5 683 int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
97e03f52 684
f5b625e5
KKD
685 /* We need to check that slots between [spi - nr_slots + 1, spi] are
686 * within [0, allocated_stack).
687 *
688 * Please note that the spi grows downwards. For example, a dynptr
689 * takes the size of two stack slots; the first slot will be at
690 * spi and the second slot will be at spi - 1.
691 */
692 return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
97e03f52
JK
693}
694
a461f5ad
AN
695static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
696 const char *obj_kind, int nr_slots)
f4d7e40a 697{
79168a66 698 int off, spi;
f4d7e40a 699
79168a66 700 if (!tnum_is_const(reg->var_off)) {
a461f5ad 701 verbose(env, "%s has to be at a constant offset\n", obj_kind);
79168a66
KKD
702 return -EINVAL;
703 }
704
705 off = reg->off + reg->var_off.value;
706 if (off % BPF_REG_SIZE) {
a461f5ad 707 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
79168a66
KKD
708 return -EINVAL;
709 }
710
711 spi = __get_spi(off);
a461f5ad
AN
712 if (spi + 1 < nr_slots) {
713 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
79168a66
KKD
714 return -EINVAL;
715 }
97e03f52 716
a461f5ad 717 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots))
f5b625e5
KKD
718 return -ERANGE;
719 return spi;
f4d7e40a
AS
720}
721
a461f5ad
AN
722static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
723{
724 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS);
725}
726
06accc87
AN
727static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
728{
729 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
730}
731
b32a5dae 732static const char *btf_type_name(const struct btf *btf, u32 id)
9e15db66 733{
22dc4a0f 734 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
9e15db66
AS
735}
736
d54e0f6c
AN
737static const char *dynptr_type_str(enum bpf_dynptr_type type)
738{
739 switch (type) {
740 case BPF_DYNPTR_TYPE_LOCAL:
741 return "local";
742 case BPF_DYNPTR_TYPE_RINGBUF:
743 return "ringbuf";
744 case BPF_DYNPTR_TYPE_SKB:
745 return "skb";
746 case BPF_DYNPTR_TYPE_XDP:
747 return "xdp";
748 case BPF_DYNPTR_TYPE_INVALID:
749 return "<invalid>";
750 default:
751 WARN_ONCE(1, "unknown dynptr type %d\n", type);
752 return "<unknown>";
753 }
754}
755
06accc87
AN
756static const char *iter_type_str(const struct btf *btf, u32 btf_id)
757{
758 if (!btf || btf_id == 0)
759 return "<invalid>";
760
761 /* we already validated that type is valid and has conforming name */
b32a5dae 762 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
06accc87
AN
763}
764
765static const char *iter_state_str(enum bpf_iter_state state)
766{
767 switch (state) {
768 case BPF_ITER_STATE_ACTIVE:
769 return "active";
770 case BPF_ITER_STATE_DRAINED:
771 return "drained";
772 case BPF_ITER_STATE_INVALID:
773 return "<invalid>";
774 default:
775 WARN_ONCE(1, "unknown iter state %d\n", state);
776 return "<unknown>";
777 }
778}
779
0f55f9ed
CL
780static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
781{
782 env->scratched_regs |= 1U << regno;
783}
784
785static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
786{
343e5375 787 env->scratched_stack_slots |= 1ULL << spi;
0f55f9ed
CL
788}
789
790static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
791{
792 return (env->scratched_regs >> regno) & 1;
793}
794
795static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
796{
797 return (env->scratched_stack_slots >> regno) & 1;
798}
799
800static bool verifier_state_scratched(const struct bpf_verifier_env *env)
801{
802 return env->scratched_regs || env->scratched_stack_slots;
803}
804
805static void mark_verifier_state_clean(struct bpf_verifier_env *env)
806{
807 env->scratched_regs = 0U;
343e5375 808 env->scratched_stack_slots = 0ULL;
0f55f9ed
CL
809}
810
811/* Used for printing the entire verifier state. */
812static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
813{
814 env->scratched_regs = ~0U;
343e5375 815 env->scratched_stack_slots = ~0ULL;
0f55f9ed
CL
816}
817
97e03f52
JK
818static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
819{
820 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
821 case DYNPTR_TYPE_LOCAL:
822 return BPF_DYNPTR_TYPE_LOCAL;
bc34dee6
JK
823 case DYNPTR_TYPE_RINGBUF:
824 return BPF_DYNPTR_TYPE_RINGBUF;
b5964b96
JK
825 case DYNPTR_TYPE_SKB:
826 return BPF_DYNPTR_TYPE_SKB;
05421aec
JK
827 case DYNPTR_TYPE_XDP:
828 return BPF_DYNPTR_TYPE_XDP;
97e03f52
JK
829 default:
830 return BPF_DYNPTR_TYPE_INVALID;
831 }
832}
833
66e3a13e
JK
834static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
835{
836 switch (type) {
837 case BPF_DYNPTR_TYPE_LOCAL:
838 return DYNPTR_TYPE_LOCAL;
839 case BPF_DYNPTR_TYPE_RINGBUF:
840 return DYNPTR_TYPE_RINGBUF;
841 case BPF_DYNPTR_TYPE_SKB:
842 return DYNPTR_TYPE_SKB;
843 case BPF_DYNPTR_TYPE_XDP:
844 return DYNPTR_TYPE_XDP;
845 default:
846 return 0;
847 }
848}
849
bc34dee6
JK
850static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
851{
852 return type == BPF_DYNPTR_TYPE_RINGBUF;
853}
854
27060531
KKD
855static void __mark_dynptr_reg(struct bpf_reg_state *reg,
856 enum bpf_dynptr_type type,
f8064ab9 857 bool first_slot, int dynptr_id);
27060531
KKD
858
859static void __mark_reg_not_init(const struct bpf_verifier_env *env,
860 struct bpf_reg_state *reg);
861
f8064ab9
KKD
862static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
863 struct bpf_reg_state *sreg1,
27060531
KKD
864 struct bpf_reg_state *sreg2,
865 enum bpf_dynptr_type type)
866{
f8064ab9
KKD
867 int id = ++env->id_gen;
868
869 __mark_dynptr_reg(sreg1, type, true, id);
870 __mark_dynptr_reg(sreg2, type, false, id);
27060531
KKD
871}
872
f8064ab9
KKD
873static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
874 struct bpf_reg_state *reg,
27060531
KKD
875 enum bpf_dynptr_type type)
876{
f8064ab9 877 __mark_dynptr_reg(reg, type, true, ++env->id_gen);
27060531
KKD
878}
879
ef8fc7a0
KKD
880static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
881 struct bpf_func_state *state, int spi);
27060531 882
97e03f52 883static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
361f129f 884 enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id)
97e03f52
JK
885{
886 struct bpf_func_state *state = func(env, reg);
887 enum bpf_dynptr_type type;
361f129f 888 int spi, i, err;
97e03f52 889
79168a66
KKD
890 spi = dynptr_get_spi(env, reg);
891 if (spi < 0)
892 return spi;
97e03f52 893
379d4ba8
KKD
894 /* We cannot assume both spi and spi - 1 belong to the same dynptr,
895 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
896 * to ensure that for the following example:
897 * [d1][d1][d2][d2]
898 * spi 3 2 1 0
899 * So marking spi = 2 should lead to destruction of both d1 and d2. In
900 * case they do belong to same dynptr, second call won't see slot_type
901 * as STACK_DYNPTR and will simply skip destruction.
902 */
903 err = destroy_if_dynptr_stack_slot(env, state, spi);
904 if (err)
905 return err;
906 err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
907 if (err)
908 return err;
97e03f52
JK
909
910 for (i = 0; i < BPF_REG_SIZE; i++) {
911 state->stack[spi].slot_type[i] = STACK_DYNPTR;
912 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
913 }
914
915 type = arg_to_dynptr_type(arg_type);
916 if (type == BPF_DYNPTR_TYPE_INVALID)
917 return -EINVAL;
918
f8064ab9 919 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
27060531 920 &state->stack[spi - 1].spilled_ptr, type);
97e03f52 921
bc34dee6
JK
922 if (dynptr_type_refcounted(type)) {
923 /* The id is used to track proper releasing */
361f129f
JK
924 int id;
925
926 if (clone_ref_obj_id)
927 id = clone_ref_obj_id;
928 else
929 id = acquire_reference_state(env, insn_idx);
930
bc34dee6
JK
931 if (id < 0)
932 return id;
933
27060531
KKD
934 state->stack[spi].spilled_ptr.ref_obj_id = id;
935 state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
bc34dee6
JK
936 }
937
d6fefa11
KKD
938 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
939 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
940
97e03f52
JK
941 return 0;
942}
943
361f129f 944static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi)
97e03f52 945{
361f129f 946 int i;
97e03f52
JK
947
948 for (i = 0; i < BPF_REG_SIZE; i++) {
949 state->stack[spi].slot_type[i] = STACK_INVALID;
950 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
951 }
952
27060531
KKD
953 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
954 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
d6fefa11
KKD
955
956 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
957 *
958 * While we don't allow reading STACK_INVALID, it is still possible to
959 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
960 * helpers or insns can do partial read of that part without failing,
961 * but check_stack_range_initialized, check_stack_read_var_off, and
962 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
963 * the slot conservatively. Hence we need to prevent those liveness
964 * marking walks.
965 *
966 * This was not a problem before because STACK_INVALID is only set by
967 * default (where the default reg state has its reg->parent as NULL), or
968 * in clean_live_states after REG_LIVE_DONE (at which point
969 * mark_reg_read won't walk reg->parent chain), but not randomly during
970 * verifier state exploration (like we did above). Hence, for our case
971 * parentage chain will still be live (i.e. reg->parent may be
972 * non-NULL), while earlier reg->parent was NULL, so we need
973 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
974 * done later on reads or by mark_dynptr_read as well to unnecessary
975 * mark registers in verifier state.
976 */
977 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
978 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
361f129f
JK
979}
980
981static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
982{
983 struct bpf_func_state *state = func(env, reg);
984 int spi, ref_obj_id, i;
985
986 spi = dynptr_get_spi(env, reg);
987 if (spi < 0)
988 return spi;
989
990 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
991 invalidate_dynptr(env, state, spi);
992 return 0;
993 }
994
995 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id;
996
997 /* If the dynptr has a ref_obj_id, then we need to invalidate
998 * two things:
999 *
1000 * 1) Any dynptrs with a matching ref_obj_id (clones)
1001 * 2) Any slices derived from this dynptr.
1002 */
1003
1004 /* Invalidate any slices associated with this dynptr */
1005 WARN_ON_ONCE(release_reference(env, ref_obj_id));
1006
1007 /* Invalidate any dynptr clones */
1008 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) {
1009 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id)
1010 continue;
1011
1012 /* it should always be the case that if the ref obj id
1013 * matches then the stack slot also belongs to a
1014 * dynptr
1015 */
1016 if (state->stack[i].slot_type[0] != STACK_DYNPTR) {
1017 verbose(env, "verifier internal error: misconfigured ref_obj_id\n");
1018 return -EFAULT;
1019 }
1020 if (state->stack[i].spilled_ptr.dynptr.first_slot)
1021 invalidate_dynptr(env, state, i);
1022 }
d6fefa11 1023
97e03f52
JK
1024 return 0;
1025}
1026
ef8fc7a0
KKD
1027static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1028 struct bpf_reg_state *reg);
1029
dbd8d228
KKD
1030static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1031{
1032 if (!env->allow_ptr_leaks)
1033 __mark_reg_not_init(env, reg);
1034 else
1035 __mark_reg_unknown(env, reg);
1036}
1037
ef8fc7a0
KKD
1038static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
1039 struct bpf_func_state *state, int spi)
97e03f52 1040{
f8064ab9
KKD
1041 struct bpf_func_state *fstate;
1042 struct bpf_reg_state *dreg;
1043 int i, dynptr_id;
27060531 1044
ef8fc7a0
KKD
1045 /* We always ensure that STACK_DYNPTR is never set partially,
1046 * hence just checking for slot_type[0] is enough. This is
1047 * different for STACK_SPILL, where it may be only set for
1048 * 1 byte, so code has to use is_spilled_reg.
1049 */
1050 if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
1051 return 0;
97e03f52 1052
ef8fc7a0
KKD
1053 /* Reposition spi to first slot */
1054 if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
1055 spi = spi + 1;
1056
1057 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
1058 verbose(env, "cannot overwrite referenced dynptr\n");
1059 return -EINVAL;
1060 }
1061
1062 mark_stack_slot_scratched(env, spi);
1063 mark_stack_slot_scratched(env, spi - 1);
97e03f52 1064
ef8fc7a0 1065 /* Writing partially to one dynptr stack slot destroys both. */
97e03f52 1066 for (i = 0; i < BPF_REG_SIZE; i++) {
ef8fc7a0
KKD
1067 state->stack[spi].slot_type[i] = STACK_INVALID;
1068 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
97e03f52
JK
1069 }
1070
f8064ab9
KKD
1071 dynptr_id = state->stack[spi].spilled_ptr.id;
1072 /* Invalidate any slices associated with this dynptr */
1073 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
1074 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
1075 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
1076 continue;
dbd8d228
KKD
1077 if (dreg->dynptr_id == dynptr_id)
1078 mark_reg_invalid(env, dreg);
f8064ab9 1079 }));
ef8fc7a0
KKD
1080
1081 /* Do not release reference state, we are destroying dynptr on stack,
1082 * not using some helper to release it. Just reset register.
1083 */
1084 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
1085 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
1086
1087 /* Same reason as unmark_stack_slots_dynptr above */
1088 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1089 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
1090
1091 return 0;
1092}
1093
7e0dac28 1094static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
97e03f52 1095{
7e0dac28
JK
1096 int spi;
1097
27060531
KKD
1098 if (reg->type == CONST_PTR_TO_DYNPTR)
1099 return false;
97e03f52 1100
7e0dac28
JK
1101 spi = dynptr_get_spi(env, reg);
1102
1103 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an
1104 * error because this just means the stack state hasn't been updated yet.
1105 * We will do check_mem_access to check and update stack bounds later.
f5b625e5 1106 */
7e0dac28
JK
1107 if (spi < 0 && spi != -ERANGE)
1108 return false;
1109
1110 /* We don't need to check if the stack slots are marked by previous
1111 * dynptr initializations because we allow overwriting existing unreferenced
1112 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls
1113 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are
1114 * touching are completely destructed before we reinitialize them for a new
1115 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early
1116 * instead of delaying it until the end where the user will get "Unreleased
379d4ba8
KKD
1117 * reference" error.
1118 */
97e03f52
JK
1119 return true;
1120}
1121
7e0dac28 1122static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
97e03f52
JK
1123{
1124 struct bpf_func_state *state = func(env, reg);
7e0dac28 1125 int i, spi;
97e03f52 1126
7e0dac28
JK
1127 /* This already represents first slot of initialized bpf_dynptr.
1128 *
1129 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
1130 * check_func_arg_reg_off's logic, so we don't need to check its
1131 * offset and alignment.
1132 */
27060531
KKD
1133 if (reg->type == CONST_PTR_TO_DYNPTR)
1134 return true;
1135
7e0dac28 1136 spi = dynptr_get_spi(env, reg);
79168a66
KKD
1137 if (spi < 0)
1138 return false;
f5b625e5 1139 if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
97e03f52
JK
1140 return false;
1141
1142 for (i = 0; i < BPF_REG_SIZE; i++) {
1143 if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
1144 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
1145 return false;
1146 }
1147
e9e315b4
RS
1148 return true;
1149}
1150
6b75bd3d
KKD
1151static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1152 enum bpf_arg_type arg_type)
e9e315b4
RS
1153{
1154 struct bpf_func_state *state = func(env, reg);
1155 enum bpf_dynptr_type dynptr_type;
27060531 1156 int spi;
e9e315b4 1157
97e03f52
JK
1158 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1159 if (arg_type == ARG_PTR_TO_DYNPTR)
1160 return true;
1161
e9e315b4 1162 dynptr_type = arg_to_dynptr_type(arg_type);
27060531
KKD
1163 if (reg->type == CONST_PTR_TO_DYNPTR) {
1164 return reg->dynptr.type == dynptr_type;
1165 } else {
79168a66
KKD
1166 spi = dynptr_get_spi(env, reg);
1167 if (spi < 0)
1168 return false;
27060531
KKD
1169 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
1170 }
97e03f52
JK
1171}
1172
06accc87
AN
1173static void __mark_reg_known_zero(struct bpf_reg_state *reg);
1174
1175static int mark_stack_slots_iter(struct bpf_verifier_env *env,
1176 struct bpf_reg_state *reg, int insn_idx,
1177 struct btf *btf, u32 btf_id, int nr_slots)
1178{
1179 struct bpf_func_state *state = func(env, reg);
1180 int spi, i, j, id;
1181
1182 spi = iter_get_spi(env, reg, nr_slots);
1183 if (spi < 0)
1184 return spi;
1185
1186 id = acquire_reference_state(env, insn_idx);
1187 if (id < 0)
1188 return id;
1189
1190 for (i = 0; i < nr_slots; i++) {
1191 struct bpf_stack_state *slot = &state->stack[spi - i];
1192 struct bpf_reg_state *st = &slot->spilled_ptr;
1193
1194 __mark_reg_known_zero(st);
1195 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
1196 st->live |= REG_LIVE_WRITTEN;
1197 st->ref_obj_id = i == 0 ? id : 0;
1198 st->iter.btf = btf;
1199 st->iter.btf_id = btf_id;
1200 st->iter.state = BPF_ITER_STATE_ACTIVE;
1201 st->iter.depth = 0;
1202
1203 for (j = 0; j < BPF_REG_SIZE; j++)
1204 slot->slot_type[j] = STACK_ITER;
1205
1206 mark_stack_slot_scratched(env, spi - i);
1207 }
1208
1209 return 0;
1210}
1211
1212static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
1213 struct bpf_reg_state *reg, int nr_slots)
1214{
1215 struct bpf_func_state *state = func(env, reg);
1216 int spi, i, j;
1217
1218 spi = iter_get_spi(env, reg, nr_slots);
1219 if (spi < 0)
1220 return spi;
1221
1222 for (i = 0; i < nr_slots; i++) {
1223 struct bpf_stack_state *slot = &state->stack[spi - i];
1224 struct bpf_reg_state *st = &slot->spilled_ptr;
1225
1226 if (i == 0)
1227 WARN_ON_ONCE(release_reference(env, st->ref_obj_id));
1228
1229 __mark_reg_not_init(env, st);
1230
1231 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
1232 st->live |= REG_LIVE_WRITTEN;
1233
1234 for (j = 0; j < BPF_REG_SIZE; j++)
1235 slot->slot_type[j] = STACK_INVALID;
1236
1237 mark_stack_slot_scratched(env, spi - i);
1238 }
1239
1240 return 0;
1241}
1242
1243static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
1244 struct bpf_reg_state *reg, int nr_slots)
1245{
1246 struct bpf_func_state *state = func(env, reg);
1247 int spi, i, j;
1248
1249 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
1250 * will do check_mem_access to check and update stack bounds later, so
1251 * return true for that case.
1252 */
1253 spi = iter_get_spi(env, reg, nr_slots);
1254 if (spi == -ERANGE)
1255 return true;
1256 if (spi < 0)
1257 return false;
1258
1259 for (i = 0; i < nr_slots; i++) {
1260 struct bpf_stack_state *slot = &state->stack[spi - i];
1261
1262 for (j = 0; j < BPF_REG_SIZE; j++)
1263 if (slot->slot_type[j] == STACK_ITER)
1264 return false;
1265 }
1266
1267 return true;
1268}
1269
1270static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1271 struct btf *btf, u32 btf_id, int nr_slots)
1272{
1273 struct bpf_func_state *state = func(env, reg);
1274 int spi, i, j;
1275
1276 spi = iter_get_spi(env, reg, nr_slots);
1277 if (spi < 0)
1278 return false;
1279
1280 for (i = 0; i < nr_slots; i++) {
1281 struct bpf_stack_state *slot = &state->stack[spi - i];
1282 struct bpf_reg_state *st = &slot->spilled_ptr;
1283
1284 /* only main (first) slot has ref_obj_id set */
1285 if (i == 0 && !st->ref_obj_id)
1286 return false;
1287 if (i != 0 && st->ref_obj_id)
1288 return false;
1289 if (st->iter.btf != btf || st->iter.btf_id != btf_id)
1290 return false;
1291
1292 for (j = 0; j < BPF_REG_SIZE; j++)
1293 if (slot->slot_type[j] != STACK_ITER)
1294 return false;
1295 }
1296
1297 return true;
1298}
1299
1300/* Check if given stack slot is "special":
1301 * - spilled register state (STACK_SPILL);
1302 * - dynptr state (STACK_DYNPTR);
1303 * - iter state (STACK_ITER).
1304 */
1305static bool is_stack_slot_special(const struct bpf_stack_state *stack)
1306{
1307 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1];
1308
1309 switch (type) {
1310 case STACK_SPILL:
1311 case STACK_DYNPTR:
1312 case STACK_ITER:
1313 return true;
1314 case STACK_INVALID:
1315 case STACK_MISC:
1316 case STACK_ZERO:
1317 return false;
1318 default:
1319 WARN_ONCE(1, "unknown stack slot type %d\n", type);
1320 return true;
1321 }
1322}
1323
27113c59
MKL
1324/* The reg state of a pointer or a bounded scalar was saved when
1325 * it was spilled to the stack.
1326 */
1327static bool is_spilled_reg(const struct bpf_stack_state *stack)
1328{
1329 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1330}
1331
407958a0
AN
1332static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack)
1333{
1334 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL &&
1335 stack->spilled_ptr.type == SCALAR_VALUE;
1336}
1337
354e8f19
MKL
1338static void scrub_spilled_slot(u8 *stype)
1339{
1340 if (*stype != STACK_INVALID)
1341 *stype = STACK_MISC;
1342}
1343
61bd5218 1344static void print_verifier_state(struct bpf_verifier_env *env,
0f55f9ed
CL
1345 const struct bpf_func_state *state,
1346 bool print_all)
17a52670 1347{
f4d7e40a 1348 const struct bpf_reg_state *reg;
17a52670
AS
1349 enum bpf_reg_type t;
1350 int i;
1351
f4d7e40a
AS
1352 if (state->frameno)
1353 verbose(env, " frame%d:", state->frameno);
17a52670 1354 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
1355 reg = &state->regs[i];
1356 t = reg->type;
17a52670
AS
1357 if (t == NOT_INIT)
1358 continue;
0f55f9ed
CL
1359 if (!print_all && !reg_scratched(env, i))
1360 continue;
4e92024a
AS
1361 verbose(env, " R%d", i);
1362 print_liveness(env, reg->live);
7df5072c 1363 verbose(env, "=");
b5dc0163
AS
1364 if (t == SCALAR_VALUE && reg->precise)
1365 verbose(env, "P");
f1174f77
EC
1366 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
1367 tnum_is_const(reg->var_off)) {
1368 /* reg->off should be 0 for SCALAR_VALUE */
7df5072c 1369 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
61bd5218 1370 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 1371 } else {
7df5072c
ML
1372 const char *sep = "";
1373
1374 verbose(env, "%s", reg_type_str(env, t));
5844101a 1375 if (base_type(t) == PTR_TO_BTF_ID)
b32a5dae 1376 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
7df5072c
ML
1377 verbose(env, "(");
1378/*
1379 * _a stands for append, was shortened to avoid multiline statements below.
1380 * This macro is used to output a comma separated list of attributes.
1381 */
1382#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
1383
1384 if (reg->id)
1385 verbose_a("id=%d", reg->id);
a28ace78 1386 if (reg->ref_obj_id)
7df5072c 1387 verbose_a("ref_obj_id=%d", reg->ref_obj_id);
6a3cd331
DM
1388 if (type_is_non_owning_ref(reg->type))
1389 verbose_a("%s", "non_own_ref");
f1174f77 1390 if (t != SCALAR_VALUE)
7df5072c 1391 verbose_a("off=%d", reg->off);
de8f3a83 1392 if (type_is_pkt_pointer(t))
7df5072c 1393 verbose_a("r=%d", reg->range);
c25b2ae1
HL
1394 else if (base_type(t) == CONST_PTR_TO_MAP ||
1395 base_type(t) == PTR_TO_MAP_KEY ||
1396 base_type(t) == PTR_TO_MAP_VALUE)
7df5072c
ML
1397 verbose_a("ks=%d,vs=%d",
1398 reg->map_ptr->key_size,
1399 reg->map_ptr->value_size);
7d1238f2
EC
1400 if (tnum_is_const(reg->var_off)) {
1401 /* Typically an immediate SCALAR_VALUE, but
1402 * could be a pointer whose offset is too big
1403 * for reg->off
1404 */
7df5072c 1405 verbose_a("imm=%llx", reg->var_off.value);
7d1238f2
EC
1406 } else {
1407 if (reg->smin_value != reg->umin_value &&
1408 reg->smin_value != S64_MIN)
7df5072c 1409 verbose_a("smin=%lld", (long long)reg->smin_value);
7d1238f2
EC
1410 if (reg->smax_value != reg->umax_value &&
1411 reg->smax_value != S64_MAX)
7df5072c 1412 verbose_a("smax=%lld", (long long)reg->smax_value);
7d1238f2 1413 if (reg->umin_value != 0)
7df5072c 1414 verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
7d1238f2 1415 if (reg->umax_value != U64_MAX)
7df5072c 1416 verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
7d1238f2
EC
1417 if (!tnum_is_unknown(reg->var_off)) {
1418 char tn_buf[48];
f1174f77 1419
7d1238f2 1420 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7df5072c 1421 verbose_a("var_off=%s", tn_buf);
7d1238f2 1422 }
3f50f132
JF
1423 if (reg->s32_min_value != reg->smin_value &&
1424 reg->s32_min_value != S32_MIN)
7df5072c 1425 verbose_a("s32_min=%d", (int)(reg->s32_min_value));
3f50f132
JF
1426 if (reg->s32_max_value != reg->smax_value &&
1427 reg->s32_max_value != S32_MAX)
7df5072c 1428 verbose_a("s32_max=%d", (int)(reg->s32_max_value));
3f50f132
JF
1429 if (reg->u32_min_value != reg->umin_value &&
1430 reg->u32_min_value != U32_MIN)
7df5072c 1431 verbose_a("u32_min=%d", (int)(reg->u32_min_value));
3f50f132
JF
1432 if (reg->u32_max_value != reg->umax_value &&
1433 reg->u32_max_value != U32_MAX)
7df5072c 1434 verbose_a("u32_max=%d", (int)(reg->u32_max_value));
f1174f77 1435 }
7df5072c
ML
1436#undef verbose_a
1437
61bd5218 1438 verbose(env, ")");
f1174f77 1439 }
17a52670 1440 }
638f5b90 1441 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
1442 char types_buf[BPF_REG_SIZE + 1];
1443 bool valid = false;
1444 int j;
1445
1446 for (j = 0; j < BPF_REG_SIZE; j++) {
1447 if (state->stack[i].slot_type[j] != STACK_INVALID)
1448 valid = true;
d54e0f6c 1449 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
8efea21d
EC
1450 }
1451 types_buf[BPF_REG_SIZE] = 0;
1452 if (!valid)
1453 continue;
0f55f9ed
CL
1454 if (!print_all && !stack_slot_scratched(env, i))
1455 continue;
d54e0f6c
AN
1456 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
1457 case STACK_SPILL:
b5dc0163
AS
1458 reg = &state->stack[i].spilled_ptr;
1459 t = reg->type;
d54e0f6c
AN
1460
1461 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1462 print_liveness(env, reg->live);
7df5072c 1463 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
b5dc0163
AS
1464 if (t == SCALAR_VALUE && reg->precise)
1465 verbose(env, "P");
1466 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
1467 verbose(env, "%lld", reg->var_off.value + reg->off);
d54e0f6c
AN
1468 break;
1469 case STACK_DYNPTR:
1470 i += BPF_DYNPTR_NR_SLOTS - 1;
1471 reg = &state->stack[i].spilled_ptr;
1472
1473 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1474 print_liveness(env, reg->live);
1475 verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
1476 if (reg->ref_obj_id)
1477 verbose(env, "(ref_id=%d)", reg->ref_obj_id);
1478 break;
06accc87
AN
1479 case STACK_ITER:
1480 /* only main slot has ref_obj_id set; skip others */
1481 reg = &state->stack[i].spilled_ptr;
1482 if (!reg->ref_obj_id)
1483 continue;
1484
1485 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1486 print_liveness(env, reg->live);
1487 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
1488 iter_type_str(reg->iter.btf, reg->iter.btf_id),
1489 reg->ref_obj_id, iter_state_str(reg->iter.state),
1490 reg->iter.depth);
1491 break;
d54e0f6c
AN
1492 case STACK_MISC:
1493 case STACK_ZERO:
1494 default:
1495 reg = &state->stack[i].spilled_ptr;
1496
1497 for (j = 0; j < BPF_REG_SIZE; j++)
1498 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
1499 types_buf[BPF_REG_SIZE] = 0;
1500
1501 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1502 print_liveness(env, reg->live);
8efea21d 1503 verbose(env, "=%s", types_buf);
d54e0f6c 1504 break;
b5dc0163 1505 }
17a52670 1506 }
fd978bf7
JS
1507 if (state->acquired_refs && state->refs[0].id) {
1508 verbose(env, " refs=%d", state->refs[0].id);
1509 for (i = 1; i < state->acquired_refs; i++)
1510 if (state->refs[i].id)
1511 verbose(env, ",%d", state->refs[i].id);
1512 }
bfc6bb74
AS
1513 if (state->in_callback_fn)
1514 verbose(env, " cb");
1515 if (state->in_async_callback_fn)
1516 verbose(env, " async_cb");
61bd5218 1517 verbose(env, "\n");
0f55f9ed 1518 mark_verifier_state_clean(env);
17a52670
AS
1519}
1520
2e576648
CL
1521static inline u32 vlog_alignment(u32 pos)
1522{
1523 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1524 BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1525}
1526
1527static void print_insn_state(struct bpf_verifier_env *env,
1528 const struct bpf_func_state *state)
1529{
12166409 1530 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
2e576648 1531 /* remove new line character */
12166409
AN
1532 bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
1533 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
2e576648
CL
1534 } else {
1535 verbose(env, "%d:", env->insn_idx);
1536 }
1537 print_verifier_state(env, state, false);
17a52670
AS
1538}
1539
c69431aa
LB
1540/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1541 * small to hold src. This is different from krealloc since we don't want to preserve
1542 * the contents of dst.
1543 *
1544 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1545 * not be allocated.
638f5b90 1546 */
c69431aa 1547static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
638f5b90 1548{
45435d8d
KC
1549 size_t alloc_bytes;
1550 void *orig = dst;
c69431aa
LB
1551 size_t bytes;
1552
1553 if (ZERO_OR_NULL_PTR(src))
1554 goto out;
1555
1556 if (unlikely(check_mul_overflow(n, size, &bytes)))
1557 return NULL;
1558
45435d8d
KC
1559 alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1560 dst = krealloc(orig, alloc_bytes, flags);
1561 if (!dst) {
1562 kfree(orig);
1563 return NULL;
c69431aa
LB
1564 }
1565
1566 memcpy(dst, src, bytes);
1567out:
1568 return dst ? dst : ZERO_SIZE_PTR;
1569}
1570
1571/* resize an array from old_n items to new_n items. the array is reallocated if it's too
1572 * small to hold new_n items. new items are zeroed out if the array grows.
1573 *
1574 * Contrary to krealloc_array, does not free arr if new_n is zero.
1575 */
1576static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1577{
ceb35b66 1578 size_t alloc_size;
42378a9c
KC
1579 void *new_arr;
1580
c69431aa
LB
1581 if (!new_n || old_n == new_n)
1582 goto out;
1583
ceb35b66
KC
1584 alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1585 new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
42378a9c
KC
1586 if (!new_arr) {
1587 kfree(arr);
c69431aa 1588 return NULL;
42378a9c
KC
1589 }
1590 arr = new_arr;
c69431aa
LB
1591
1592 if (new_n > old_n)
1593 memset(arr + old_n * size, 0, (new_n - old_n) * size);
1594
1595out:
1596 return arr ? arr : ZERO_SIZE_PTR;
1597}
1598
1599static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1600{
1601 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1602 sizeof(struct bpf_reference_state), GFP_KERNEL);
1603 if (!dst->refs)
1604 return -ENOMEM;
1605
1606 dst->acquired_refs = src->acquired_refs;
1607 return 0;
1608}
1609
1610static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1611{
1612 size_t n = src->allocated_stack / BPF_REG_SIZE;
1613
1614 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1615 GFP_KERNEL);
1616 if (!dst->stack)
1617 return -ENOMEM;
1618
1619 dst->allocated_stack = src->allocated_stack;
1620 return 0;
1621}
1622
1623static int resize_reference_state(struct bpf_func_state *state, size_t n)
1624{
1625 state->refs = realloc_array(state->refs, state->acquired_refs, n,
1626 sizeof(struct bpf_reference_state));
1627 if (!state->refs)
1628 return -ENOMEM;
1629
1630 state->acquired_refs = n;
1631 return 0;
1632}
1633
1634static int grow_stack_state(struct bpf_func_state *state, int size)
1635{
1636 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1637
1638 if (old_n >= n)
1639 return 0;
1640
1641 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1642 if (!state->stack)
1643 return -ENOMEM;
1644
1645 state->allocated_stack = size;
1646 return 0;
fd978bf7
JS
1647}
1648
1649/* Acquire a pointer id from the env and update the state->refs to include
1650 * this new pointer reference.
1651 * On success, returns a valid pointer id to associate with the register
1652 * On failure, returns a negative errno.
638f5b90 1653 */
fd978bf7 1654static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 1655{
fd978bf7
JS
1656 struct bpf_func_state *state = cur_func(env);
1657 int new_ofs = state->acquired_refs;
1658 int id, err;
1659
c69431aa 1660 err = resize_reference_state(state, state->acquired_refs + 1);
fd978bf7
JS
1661 if (err)
1662 return err;
1663 id = ++env->id_gen;
1664 state->refs[new_ofs].id = id;
1665 state->refs[new_ofs].insn_idx = insn_idx;
9d9d00ac 1666 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
638f5b90 1667
fd978bf7
JS
1668 return id;
1669}
1670
1671/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 1672static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
1673{
1674 int i, last_idx;
1675
fd978bf7
JS
1676 last_idx = state->acquired_refs - 1;
1677 for (i = 0; i < state->acquired_refs; i++) {
1678 if (state->refs[i].id == ptr_id) {
9d9d00ac
KKD
1679 /* Cannot release caller references in callbacks */
1680 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1681 return -EINVAL;
fd978bf7
JS
1682 if (last_idx && i != last_idx)
1683 memcpy(&state->refs[i], &state->refs[last_idx],
1684 sizeof(*state->refs));
1685 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1686 state->acquired_refs--;
638f5b90 1687 return 0;
638f5b90 1688 }
638f5b90 1689 }
46f8bc92 1690 return -EINVAL;
fd978bf7
JS
1691}
1692
f4d7e40a
AS
1693static void free_func_state(struct bpf_func_state *state)
1694{
5896351e
AS
1695 if (!state)
1696 return;
fd978bf7 1697 kfree(state->refs);
f4d7e40a
AS
1698 kfree(state->stack);
1699 kfree(state);
1700}
1701
b5dc0163
AS
1702static void clear_jmp_history(struct bpf_verifier_state *state)
1703{
1704 kfree(state->jmp_history);
1705 state->jmp_history = NULL;
1706 state->jmp_history_cnt = 0;
1707}
1708
1969db47
AS
1709static void free_verifier_state(struct bpf_verifier_state *state,
1710 bool free_self)
638f5b90 1711{
f4d7e40a
AS
1712 int i;
1713
1714 for (i = 0; i <= state->curframe; i++) {
1715 free_func_state(state->frame[i]);
1716 state->frame[i] = NULL;
1717 }
b5dc0163 1718 clear_jmp_history(state);
1969db47
AS
1719 if (free_self)
1720 kfree(state);
638f5b90
AS
1721}
1722
1723/* copy verifier state from src to dst growing dst stack space
1724 * when necessary to accommodate larger src stack
1725 */
f4d7e40a
AS
1726static int copy_func_state(struct bpf_func_state *dst,
1727 const struct bpf_func_state *src)
638f5b90
AS
1728{
1729 int err;
1730
fd978bf7
JS
1731 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1732 err = copy_reference_state(dst, src);
638f5b90
AS
1733 if (err)
1734 return err;
638f5b90
AS
1735 return copy_stack_state(dst, src);
1736}
1737
f4d7e40a
AS
1738static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1739 const struct bpf_verifier_state *src)
1740{
1741 struct bpf_func_state *dst;
1742 int i, err;
1743
06ab6a50
LB
1744 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1745 src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1746 GFP_USER);
1747 if (!dst_state->jmp_history)
1748 return -ENOMEM;
b5dc0163
AS
1749 dst_state->jmp_history_cnt = src->jmp_history_cnt;
1750
f4d7e40a
AS
1751 /* if dst has more stack frames then src frame, free them */
1752 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1753 free_func_state(dst_state->frame[i]);
1754 dst_state->frame[i] = NULL;
1755 }
979d63d5 1756 dst_state->speculative = src->speculative;
9bb00b28 1757 dst_state->active_rcu_lock = src->active_rcu_lock;
f4d7e40a 1758 dst_state->curframe = src->curframe;
d0d78c1d
KKD
1759 dst_state->active_lock.ptr = src->active_lock.ptr;
1760 dst_state->active_lock.id = src->active_lock.id;
2589726d
AS
1761 dst_state->branches = src->branches;
1762 dst_state->parent = src->parent;
b5dc0163
AS
1763 dst_state->first_insn_idx = src->first_insn_idx;
1764 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
1765 for (i = 0; i <= src->curframe; i++) {
1766 dst = dst_state->frame[i];
1767 if (!dst) {
1768 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1769 if (!dst)
1770 return -ENOMEM;
1771 dst_state->frame[i] = dst;
1772 }
1773 err = copy_func_state(dst, src->frame[i]);
1774 if (err)
1775 return err;
1776 }
1777 return 0;
1778}
1779
2589726d
AS
1780static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1781{
1782 while (st) {
1783 u32 br = --st->branches;
1784
1785 /* WARN_ON(br > 1) technically makes sense here,
1786 * but see comment in push_stack(), hence:
1787 */
1788 WARN_ONCE((int)br < 0,
1789 "BUG update_branch_counts:branches_to_explore=%d\n",
1790 br);
1791 if (br)
1792 break;
1793 st = st->parent;
1794 }
1795}
1796
638f5b90 1797static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
6f8a57cc 1798 int *insn_idx, bool pop_log)
638f5b90
AS
1799{
1800 struct bpf_verifier_state *cur = env->cur_state;
1801 struct bpf_verifier_stack_elem *elem, *head = env->head;
1802 int err;
17a52670
AS
1803
1804 if (env->head == NULL)
638f5b90 1805 return -ENOENT;
17a52670 1806
638f5b90
AS
1807 if (cur) {
1808 err = copy_verifier_state(cur, &head->st);
1809 if (err)
1810 return err;
1811 }
6f8a57cc
AN
1812 if (pop_log)
1813 bpf_vlog_reset(&env->log, head->log_pos);
638f5b90
AS
1814 if (insn_idx)
1815 *insn_idx = head->insn_idx;
17a52670 1816 if (prev_insn_idx)
638f5b90
AS
1817 *prev_insn_idx = head->prev_insn_idx;
1818 elem = head->next;
1969db47 1819 free_verifier_state(&head->st, false);
638f5b90 1820 kfree(head);
17a52670
AS
1821 env->head = elem;
1822 env->stack_size--;
638f5b90 1823 return 0;
17a52670
AS
1824}
1825
58e2af8b 1826static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
1827 int insn_idx, int prev_insn_idx,
1828 bool speculative)
17a52670 1829{
638f5b90 1830 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 1831 struct bpf_verifier_stack_elem *elem;
638f5b90 1832 int err;
17a52670 1833
638f5b90 1834 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
1835 if (!elem)
1836 goto err;
1837
17a52670
AS
1838 elem->insn_idx = insn_idx;
1839 elem->prev_insn_idx = prev_insn_idx;
1840 elem->next = env->head;
12166409 1841 elem->log_pos = env->log.end_pos;
17a52670
AS
1842 env->head = elem;
1843 env->stack_size++;
1969db47
AS
1844 err = copy_verifier_state(&elem->st, cur);
1845 if (err)
1846 goto err;
979d63d5 1847 elem->st.speculative |= speculative;
b285fcb7
AS
1848 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1849 verbose(env, "The sequence of %d jumps is too complex.\n",
1850 env->stack_size);
17a52670
AS
1851 goto err;
1852 }
2589726d
AS
1853 if (elem->st.parent) {
1854 ++elem->st.parent->branches;
1855 /* WARN_ON(branches > 2) technically makes sense here,
1856 * but
1857 * 1. speculative states will bump 'branches' for non-branch
1858 * instructions
1859 * 2. is_state_visited() heuristics may decide not to create
1860 * a new state for a sequence of branches and all such current
1861 * and cloned states will be pointing to a single parent state
1862 * which might have large 'branches' count.
1863 */
1864 }
17a52670
AS
1865 return &elem->st;
1866err:
5896351e
AS
1867 free_verifier_state(env->cur_state, true);
1868 env->cur_state = NULL;
17a52670 1869 /* pop all elements and return */
6f8a57cc 1870 while (!pop_stack(env, NULL, NULL, false));
17a52670
AS
1871 return NULL;
1872}
1873
1874#define CALLER_SAVED_REGS 6
1875static const int caller_saved[CALLER_SAVED_REGS] = {
1876 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1877};
1878
e688c3db
AS
1879/* This helper doesn't clear reg->id */
1880static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
b03c9f9f 1881{
b03c9f9f
EC
1882 reg->var_off = tnum_const(imm);
1883 reg->smin_value = (s64)imm;
1884 reg->smax_value = (s64)imm;
1885 reg->umin_value = imm;
1886 reg->umax_value = imm;
3f50f132
JF
1887
1888 reg->s32_min_value = (s32)imm;
1889 reg->s32_max_value = (s32)imm;
1890 reg->u32_min_value = (u32)imm;
1891 reg->u32_max_value = (u32)imm;
1892}
1893
e688c3db
AS
1894/* Mark the unknown part of a register (variable offset or scalar value) as
1895 * known to have the value @imm.
1896 */
1897static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1898{
a73bf9f2 1899 /* Clear off and union(map_ptr, range) */
e688c3db
AS
1900 memset(((u8 *)reg) + sizeof(reg->type), 0,
1901 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
a73bf9f2
AN
1902 reg->id = 0;
1903 reg->ref_obj_id = 0;
e688c3db
AS
1904 ___mark_reg_known(reg, imm);
1905}
1906
3f50f132
JF
1907static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1908{
1909 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1910 reg->s32_min_value = (s32)imm;
1911 reg->s32_max_value = (s32)imm;
1912 reg->u32_min_value = (u32)imm;
1913 reg->u32_max_value = (u32)imm;
b03c9f9f
EC
1914}
1915
f1174f77
EC
1916/* Mark the 'variable offset' part of a register as zero. This should be
1917 * used only on registers holding a pointer type.
1918 */
1919static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 1920{
b03c9f9f 1921 __mark_reg_known(reg, 0);
f1174f77 1922}
a9789ef9 1923
cc2b14d5
AS
1924static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1925{
1926 __mark_reg_known(reg, 0);
cc2b14d5
AS
1927 reg->type = SCALAR_VALUE;
1928}
1929
61bd5218
JK
1930static void mark_reg_known_zero(struct bpf_verifier_env *env,
1931 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1932{
1933 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1934 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
1935 /* Something bad happened, let's kill all regs */
1936 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 1937 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1938 return;
1939 }
1940 __mark_reg_known_zero(regs + regno);
1941}
1942
27060531 1943static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
f8064ab9 1944 bool first_slot, int dynptr_id)
27060531
KKD
1945{
1946 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1947 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1948 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1949 */
1950 __mark_reg_known_zero(reg);
1951 reg->type = CONST_PTR_TO_DYNPTR;
f8064ab9
KKD
1952 /* Give each dynptr a unique id to uniquely associate slices to it. */
1953 reg->id = dynptr_id;
27060531
KKD
1954 reg->dynptr.type = type;
1955 reg->dynptr.first_slot = first_slot;
1956}
1957
4ddb7416
DB
1958static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1959{
c25b2ae1 1960 if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
4ddb7416
DB
1961 const struct bpf_map *map = reg->map_ptr;
1962
1963 if (map->inner_map_meta) {
1964 reg->type = CONST_PTR_TO_MAP;
1965 reg->map_ptr = map->inner_map_meta;
3e8ce298
AS
1966 /* transfer reg's id which is unique for every map_lookup_elem
1967 * as UID of the inner map.
1968 */
db559117 1969 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
34d11a44 1970 reg->map_uid = reg->id;
4ddb7416
DB
1971 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1972 reg->type = PTR_TO_XDP_SOCK;
1973 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1974 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1975 reg->type = PTR_TO_SOCKET;
1976 } else {
1977 reg->type = PTR_TO_MAP_VALUE;
1978 }
c25b2ae1 1979 return;
4ddb7416 1980 }
c25b2ae1
HL
1981
1982 reg->type &= ~PTR_MAYBE_NULL;
4ddb7416
DB
1983}
1984
5d92ddc3
DM
1985static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1986 struct btf_field_graph_root *ds_head)
1987{
1988 __mark_reg_known_zero(&regs[regno]);
1989 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1990 regs[regno].btf = ds_head->btf;
1991 regs[regno].btf_id = ds_head->value_btf_id;
1992 regs[regno].off = ds_head->node_offset;
1993}
1994
de8f3a83
DB
1995static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1996{
1997 return type_is_pkt_pointer(reg->type);
1998}
1999
2000static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
2001{
2002 return reg_is_pkt_pointer(reg) ||
2003 reg->type == PTR_TO_PACKET_END;
2004}
2005
66e3a13e
JK
2006static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
2007{
2008 return base_type(reg->type) == PTR_TO_MEM &&
2009 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP);
2010}
2011
de8f3a83
DB
2012/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
2013static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
2014 enum bpf_reg_type which)
2015{
2016 /* The register can already have a range from prior markings.
2017 * This is fine as long as it hasn't been advanced from its
2018 * origin.
2019 */
2020 return reg->type == which &&
2021 reg->id == 0 &&
2022 reg->off == 0 &&
2023 tnum_equals_const(reg->var_off, 0);
2024}
2025
3f50f132
JF
2026/* Reset the min/max bounds of a register */
2027static void __mark_reg_unbounded(struct bpf_reg_state *reg)
2028{
2029 reg->smin_value = S64_MIN;
2030 reg->smax_value = S64_MAX;
2031 reg->umin_value = 0;
2032 reg->umax_value = U64_MAX;
2033
2034 reg->s32_min_value = S32_MIN;
2035 reg->s32_max_value = S32_MAX;
2036 reg->u32_min_value = 0;
2037 reg->u32_max_value = U32_MAX;
2038}
2039
2040static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
2041{
2042 reg->smin_value = S64_MIN;
2043 reg->smax_value = S64_MAX;
2044 reg->umin_value = 0;
2045 reg->umax_value = U64_MAX;
2046}
2047
2048static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
2049{
2050 reg->s32_min_value = S32_MIN;
2051 reg->s32_max_value = S32_MAX;
2052 reg->u32_min_value = 0;
2053 reg->u32_max_value = U32_MAX;
2054}
2055
2056static void __update_reg32_bounds(struct bpf_reg_state *reg)
2057{
2058 struct tnum var32_off = tnum_subreg(reg->var_off);
2059
2060 /* min signed is max(sign bit) | min(other bits) */
2061 reg->s32_min_value = max_t(s32, reg->s32_min_value,
2062 var32_off.value | (var32_off.mask & S32_MIN));
2063 /* max signed is min(sign bit) | max(other bits) */
2064 reg->s32_max_value = min_t(s32, reg->s32_max_value,
2065 var32_off.value | (var32_off.mask & S32_MAX));
2066 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
2067 reg->u32_max_value = min(reg->u32_max_value,
2068 (u32)(var32_off.value | var32_off.mask));
2069}
2070
2071static void __update_reg64_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
2072{
2073 /* min signed is max(sign bit) | min(other bits) */
2074 reg->smin_value = max_t(s64, reg->smin_value,
2075 reg->var_off.value | (reg->var_off.mask & S64_MIN));
2076 /* max signed is min(sign bit) | max(other bits) */
2077 reg->smax_value = min_t(s64, reg->smax_value,
2078 reg->var_off.value | (reg->var_off.mask & S64_MAX));
2079 reg->umin_value = max(reg->umin_value, reg->var_off.value);
2080 reg->umax_value = min(reg->umax_value,
2081 reg->var_off.value | reg->var_off.mask);
2082}
2083
3f50f132
JF
2084static void __update_reg_bounds(struct bpf_reg_state *reg)
2085{
2086 __update_reg32_bounds(reg);
2087 __update_reg64_bounds(reg);
2088}
2089
b03c9f9f 2090/* Uses signed min/max values to inform unsigned, and vice-versa */
3f50f132
JF
2091static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
2092{
2093 /* Learn sign from signed bounds.
2094 * If we cannot cross the sign boundary, then signed and unsigned bounds
2095 * are the same, so combine. This works even in the negative case, e.g.
2096 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2097 */
2098 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
2099 reg->s32_min_value = reg->u32_min_value =
2100 max_t(u32, reg->s32_min_value, reg->u32_min_value);
2101 reg->s32_max_value = reg->u32_max_value =
2102 min_t(u32, reg->s32_max_value, reg->u32_max_value);
2103 return;
2104 }
2105 /* Learn sign from unsigned bounds. Signed bounds cross the sign
2106 * boundary, so we must be careful.
2107 */
2108 if ((s32)reg->u32_max_value >= 0) {
2109 /* Positive. We can't learn anything from the smin, but smax
2110 * is positive, hence safe.
2111 */
2112 reg->s32_min_value = reg->u32_min_value;
2113 reg->s32_max_value = reg->u32_max_value =
2114 min_t(u32, reg->s32_max_value, reg->u32_max_value);
2115 } else if ((s32)reg->u32_min_value < 0) {
2116 /* Negative. We can't learn anything from the smax, but smin
2117 * is negative, hence safe.
2118 */
2119 reg->s32_min_value = reg->u32_min_value =
2120 max_t(u32, reg->s32_min_value, reg->u32_min_value);
2121 reg->s32_max_value = reg->u32_max_value;
2122 }
2123}
2124
2125static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
2126{
2127 /* Learn sign from signed bounds.
2128 * If we cannot cross the sign boundary, then signed and unsigned bounds
2129 * are the same, so combine. This works even in the negative case, e.g.
2130 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2131 */
2132 if (reg->smin_value >= 0 || reg->smax_value < 0) {
2133 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2134 reg->umin_value);
2135 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2136 reg->umax_value);
2137 return;
2138 }
2139 /* Learn sign from unsigned bounds. Signed bounds cross the sign
2140 * boundary, so we must be careful.
2141 */
2142 if ((s64)reg->umax_value >= 0) {
2143 /* Positive. We can't learn anything from the smin, but smax
2144 * is positive, hence safe.
2145 */
2146 reg->smin_value = reg->umin_value;
2147 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2148 reg->umax_value);
2149 } else if ((s64)reg->umin_value < 0) {
2150 /* Negative. We can't learn anything from the smax, but smin
2151 * is negative, hence safe.
2152 */
2153 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2154 reg->umin_value);
2155 reg->smax_value = reg->umax_value;
2156 }
2157}
2158
3f50f132
JF
2159static void __reg_deduce_bounds(struct bpf_reg_state *reg)
2160{
2161 __reg32_deduce_bounds(reg);
2162 __reg64_deduce_bounds(reg);
2163}
2164
b03c9f9f
EC
2165/* Attempts to improve var_off based on unsigned min/max information */
2166static void __reg_bound_offset(struct bpf_reg_state *reg)
2167{
3f50f132
JF
2168 struct tnum var64_off = tnum_intersect(reg->var_off,
2169 tnum_range(reg->umin_value,
2170 reg->umax_value));
7be14c1c
DB
2171 struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off),
2172 tnum_range(reg->u32_min_value,
2173 reg->u32_max_value));
3f50f132
JF
2174
2175 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
b03c9f9f
EC
2176}
2177
3844d153
DB
2178static void reg_bounds_sync(struct bpf_reg_state *reg)
2179{
2180 /* We might have learned new bounds from the var_off. */
2181 __update_reg_bounds(reg);
2182 /* We might have learned something about the sign bit. */
2183 __reg_deduce_bounds(reg);
2184 /* We might have learned some bits from the bounds. */
2185 __reg_bound_offset(reg);
2186 /* Intersecting with the old var_off might have improved our bounds
2187 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2188 * then new var_off is (0; 0x7f...fc) which improves our umax.
2189 */
2190 __update_reg_bounds(reg);
2191}
2192
e572ff80
DB
2193static bool __reg32_bound_s64(s32 a)
2194{
2195 return a >= 0 && a <= S32_MAX;
2196}
2197
3f50f132 2198static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
b03c9f9f 2199{
3f50f132
JF
2200 reg->umin_value = reg->u32_min_value;
2201 reg->umax_value = reg->u32_max_value;
e572ff80
DB
2202
2203 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
2204 * be positive otherwise set to worse case bounds and refine later
2205 * from tnum.
3f50f132 2206 */
e572ff80
DB
2207 if (__reg32_bound_s64(reg->s32_min_value) &&
2208 __reg32_bound_s64(reg->s32_max_value)) {
3a71dc36 2209 reg->smin_value = reg->s32_min_value;
e572ff80
DB
2210 reg->smax_value = reg->s32_max_value;
2211 } else {
3a71dc36 2212 reg->smin_value = 0;
e572ff80
DB
2213 reg->smax_value = U32_MAX;
2214 }
3f50f132
JF
2215}
2216
2217static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
2218{
2219 /* special case when 64-bit register has upper 32-bit register
2220 * zeroed. Typically happens after zext or <<32, >>32 sequence
2221 * allowing us to use 32-bit bounds directly,
2222 */
2223 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
2224 __reg_assign_32_into_64(reg);
2225 } else {
2226 /* Otherwise the best we can do is push lower 32bit known and
2227 * unknown bits into register (var_off set from jmp logic)
2228 * then learn as much as possible from the 64-bit tnum
2229 * known and unknown bits. The previous smin/smax bounds are
2230 * invalid here because of jmp32 compare so mark them unknown
2231 * so they do not impact tnum bounds calculation.
2232 */
2233 __mark_reg64_unbounded(reg);
3f50f132 2234 }
3844d153 2235 reg_bounds_sync(reg);
3f50f132
JF
2236}
2237
2238static bool __reg64_bound_s32(s64 a)
2239{
388e2c0b 2240 return a >= S32_MIN && a <= S32_MAX;
3f50f132
JF
2241}
2242
2243static bool __reg64_bound_u32(u64 a)
2244{
b9979db8 2245 return a >= U32_MIN && a <= U32_MAX;
3f50f132
JF
2246}
2247
2248static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
2249{
2250 __mark_reg32_unbounded(reg);
b0270958 2251 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
3f50f132 2252 reg->s32_min_value = (s32)reg->smin_value;
3f50f132 2253 reg->s32_max_value = (s32)reg->smax_value;
b0270958 2254 }
10bf4e83 2255 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
3f50f132 2256 reg->u32_min_value = (u32)reg->umin_value;
3f50f132 2257 reg->u32_max_value = (u32)reg->umax_value;
10bf4e83 2258 }
3844d153 2259 reg_bounds_sync(reg);
b03c9f9f
EC
2260}
2261
f1174f77 2262/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
2263static void __mark_reg_unknown(const struct bpf_verifier_env *env,
2264 struct bpf_reg_state *reg)
f1174f77 2265{
a9c676bc 2266 /*
a73bf9f2 2267 * Clear type, off, and union(map_ptr, range) and
a9c676bc
AS
2268 * padding between 'type' and union
2269 */
2270 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 2271 reg->type = SCALAR_VALUE;
a73bf9f2
AN
2272 reg->id = 0;
2273 reg->ref_obj_id = 0;
f1174f77 2274 reg->var_off = tnum_unknown;
f4d7e40a 2275 reg->frameno = 0;
be2ef816 2276 reg->precise = !env->bpf_capable;
b03c9f9f 2277 __mark_reg_unbounded(reg);
f1174f77
EC
2278}
2279
61bd5218
JK
2280static void mark_reg_unknown(struct bpf_verifier_env *env,
2281 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
2282{
2283 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 2284 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
2285 /* Something bad happened, let's kill all regs except FP */
2286 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 2287 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
2288 return;
2289 }
f54c7898 2290 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
2291}
2292
f54c7898
DB
2293static void __mark_reg_not_init(const struct bpf_verifier_env *env,
2294 struct bpf_reg_state *reg)
f1174f77 2295{
f54c7898 2296 __mark_reg_unknown(env, reg);
f1174f77
EC
2297 reg->type = NOT_INIT;
2298}
2299
61bd5218
JK
2300static void mark_reg_not_init(struct bpf_verifier_env *env,
2301 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
2302{
2303 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 2304 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
2305 /* Something bad happened, let's kill all regs except FP */
2306 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 2307 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
2308 return;
2309 }
f54c7898 2310 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
2311}
2312
41c48f3a
AI
2313static void mark_btf_ld_reg(struct bpf_verifier_env *env,
2314 struct bpf_reg_state *regs, u32 regno,
22dc4a0f 2315 enum bpf_reg_type reg_type,
c6f1bfe8
YS
2316 struct btf *btf, u32 btf_id,
2317 enum bpf_type_flag flag)
41c48f3a
AI
2318{
2319 if (reg_type == SCALAR_VALUE) {
2320 mark_reg_unknown(env, regs, regno);
2321 return;
2322 }
2323 mark_reg_known_zero(env, regs, regno);
c6f1bfe8 2324 regs[regno].type = PTR_TO_BTF_ID | flag;
22dc4a0f 2325 regs[regno].btf = btf;
41c48f3a
AI
2326 regs[regno].btf_id = btf_id;
2327}
2328
5327ed3d 2329#define DEF_NOT_SUBREG (0)
61bd5218 2330static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 2331 struct bpf_func_state *state)
17a52670 2332{
f4d7e40a 2333 struct bpf_reg_state *regs = state->regs;
17a52670
AS
2334 int i;
2335
dc503a8a 2336 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 2337 mark_reg_not_init(env, regs, i);
dc503a8a 2338 regs[i].live = REG_LIVE_NONE;
679c782d 2339 regs[i].parent = NULL;
5327ed3d 2340 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 2341 }
17a52670
AS
2342
2343 /* frame pointer */
f1174f77 2344 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 2345 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 2346 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
2347}
2348
f4d7e40a
AS
2349#define BPF_MAIN_FUNC (-1)
2350static void init_func_state(struct bpf_verifier_env *env,
2351 struct bpf_func_state *state,
2352 int callsite, int frameno, int subprogno)
2353{
2354 state->callsite = callsite;
2355 state->frameno = frameno;
2356 state->subprogno = subprogno;
1bfe26fb 2357 state->callback_ret_range = tnum_range(0, 0);
f4d7e40a 2358 init_reg_state(env, state);
0f55f9ed 2359 mark_verifier_state_scratched(env);
f4d7e40a
AS
2360}
2361
bfc6bb74
AS
2362/* Similar to push_stack(), but for async callbacks */
2363static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
2364 int insn_idx, int prev_insn_idx,
2365 int subprog)
2366{
2367 struct bpf_verifier_stack_elem *elem;
2368 struct bpf_func_state *frame;
2369
2370 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2371 if (!elem)
2372 goto err;
2373
2374 elem->insn_idx = insn_idx;
2375 elem->prev_insn_idx = prev_insn_idx;
2376 elem->next = env->head;
12166409 2377 elem->log_pos = env->log.end_pos;
bfc6bb74
AS
2378 env->head = elem;
2379 env->stack_size++;
2380 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2381 verbose(env,
2382 "The sequence of %d jumps is too complex for async cb.\n",
2383 env->stack_size);
2384 goto err;
2385 }
2386 /* Unlike push_stack() do not copy_verifier_state().
2387 * The caller state doesn't matter.
2388 * This is async callback. It starts in a fresh stack.
2389 * Initialize it similar to do_check_common().
2390 */
2391 elem->st.branches = 1;
2392 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2393 if (!frame)
2394 goto err;
2395 init_func_state(env, frame,
2396 BPF_MAIN_FUNC /* callsite */,
2397 0 /* frameno within this callchain */,
2398 subprog /* subprog number within this prog */);
2399 elem->st.frame[0] = frame;
2400 return &elem->st;
2401err:
2402 free_verifier_state(env->cur_state, true);
2403 env->cur_state = NULL;
2404 /* pop all elements and return */
2405 while (!pop_stack(env, NULL, NULL, false));
2406 return NULL;
2407}
2408
2409
17a52670
AS
2410enum reg_arg_type {
2411 SRC_OP, /* register is used as source operand */
2412 DST_OP, /* register is used as destination operand */
2413 DST_OP_NO_MARK /* same as above, check only, don't mark */
2414};
2415
cc8b0b92
AS
2416static int cmp_subprogs(const void *a, const void *b)
2417{
9c8105bd
JW
2418 return ((struct bpf_subprog_info *)a)->start -
2419 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
2420}
2421
2422static int find_subprog(struct bpf_verifier_env *env, int off)
2423{
9c8105bd 2424 struct bpf_subprog_info *p;
cc8b0b92 2425
9c8105bd
JW
2426 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2427 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
2428 if (!p)
2429 return -ENOENT;
9c8105bd 2430 return p - env->subprog_info;
cc8b0b92
AS
2431
2432}
2433
2434static int add_subprog(struct bpf_verifier_env *env, int off)
2435{
2436 int insn_cnt = env->prog->len;
2437 int ret;
2438
2439 if (off >= insn_cnt || off < 0) {
2440 verbose(env, "call to invalid destination\n");
2441 return -EINVAL;
2442 }
2443 ret = find_subprog(env, off);
2444 if (ret >= 0)
282a0f46 2445 return ret;
4cb3d99c 2446 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
2447 verbose(env, "too many subprograms\n");
2448 return -E2BIG;
2449 }
e6ac2450 2450 /* determine subprog starts. The end is one before the next starts */
9c8105bd
JW
2451 env->subprog_info[env->subprog_cnt++].start = off;
2452 sort(env->subprog_info, env->subprog_cnt,
2453 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
282a0f46 2454 return env->subprog_cnt - 1;
cc8b0b92
AS
2455}
2456
2357672c
KKD
2457#define MAX_KFUNC_DESCS 256
2458#define MAX_KFUNC_BTFS 256
2459
e6ac2450
MKL
2460struct bpf_kfunc_desc {
2461 struct btf_func_model func_model;
2462 u32 func_id;
2463 s32 imm;
2357672c 2464 u16 offset;
1cf3bfc6 2465 unsigned long addr;
2357672c
KKD
2466};
2467
2468struct bpf_kfunc_btf {
2469 struct btf *btf;
2470 struct module *module;
2471 u16 offset;
e6ac2450
MKL
2472};
2473
e6ac2450 2474struct bpf_kfunc_desc_tab {
1cf3bfc6
IL
2475 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during
2476 * verification. JITs do lookups by bpf_insn, where func_id may not be
2477 * available, therefore at the end of verification do_misc_fixups()
2478 * sorts this by imm and offset.
2479 */
e6ac2450
MKL
2480 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2481 u32 nr_descs;
2482};
2483
2357672c
KKD
2484struct bpf_kfunc_btf_tab {
2485 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2486 u32 nr_descs;
2487};
2488
2489static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
e6ac2450
MKL
2490{
2491 const struct bpf_kfunc_desc *d0 = a;
2492 const struct bpf_kfunc_desc *d1 = b;
2493
2494 /* func_id is not greater than BTF_MAX_TYPE */
2357672c
KKD
2495 return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2496}
2497
2498static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2499{
2500 const struct bpf_kfunc_btf *d0 = a;
2501 const struct bpf_kfunc_btf *d1 = b;
2502
2503 return d0->offset - d1->offset;
e6ac2450
MKL
2504}
2505
2506static const struct bpf_kfunc_desc *
2357672c 2507find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
e6ac2450
MKL
2508{
2509 struct bpf_kfunc_desc desc = {
2510 .func_id = func_id,
2357672c 2511 .offset = offset,
e6ac2450
MKL
2512 };
2513 struct bpf_kfunc_desc_tab *tab;
2514
2515 tab = prog->aux->kfunc_tab;
2516 return bsearch(&desc, tab->descs, tab->nr_descs,
2357672c
KKD
2517 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2518}
2519
1cf3bfc6
IL
2520int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2521 u16 btf_fd_idx, u8 **func_addr)
2522{
2523 const struct bpf_kfunc_desc *desc;
2524
2525 desc = find_kfunc_desc(prog, func_id, btf_fd_idx);
2526 if (!desc)
2527 return -EFAULT;
2528
2529 *func_addr = (u8 *)desc->addr;
2530 return 0;
2531}
2532
2357672c 2533static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
b202d844 2534 s16 offset)
2357672c
KKD
2535{
2536 struct bpf_kfunc_btf kf_btf = { .offset = offset };
2537 struct bpf_kfunc_btf_tab *tab;
2538 struct bpf_kfunc_btf *b;
2539 struct module *mod;
2540 struct btf *btf;
2541 int btf_fd;
2542
2543 tab = env->prog->aux->kfunc_btf_tab;
2544 b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2545 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2546 if (!b) {
2547 if (tab->nr_descs == MAX_KFUNC_BTFS) {
2548 verbose(env, "too many different module BTFs\n");
2549 return ERR_PTR(-E2BIG);
2550 }
2551
2552 if (bpfptr_is_null(env->fd_array)) {
2553 verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2554 return ERR_PTR(-EPROTO);
2555 }
2556
2557 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2558 offset * sizeof(btf_fd),
2559 sizeof(btf_fd)))
2560 return ERR_PTR(-EFAULT);
2561
2562 btf = btf_get_by_fd(btf_fd);
588cd7ef
KKD
2563 if (IS_ERR(btf)) {
2564 verbose(env, "invalid module BTF fd specified\n");
2357672c 2565 return btf;
588cd7ef 2566 }
2357672c
KKD
2567
2568 if (!btf_is_module(btf)) {
2569 verbose(env, "BTF fd for kfunc is not a module BTF\n");
2570 btf_put(btf);
2571 return ERR_PTR(-EINVAL);
2572 }
2573
2574 mod = btf_try_get_module(btf);
2575 if (!mod) {
2576 btf_put(btf);
2577 return ERR_PTR(-ENXIO);
2578 }
2579
2580 b = &tab->descs[tab->nr_descs++];
2581 b->btf = btf;
2582 b->module = mod;
2583 b->offset = offset;
2584
2585 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2586 kfunc_btf_cmp_by_off, NULL);
2587 }
2357672c 2588 return b->btf;
e6ac2450
MKL
2589}
2590
2357672c
KKD
2591void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2592{
2593 if (!tab)
2594 return;
2595
2596 while (tab->nr_descs--) {
2597 module_put(tab->descs[tab->nr_descs].module);
2598 btf_put(tab->descs[tab->nr_descs].btf);
2599 }
2600 kfree(tab);
2601}
2602
43bf0878 2603static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2357672c 2604{
2357672c
KKD
2605 if (offset) {
2606 if (offset < 0) {
2607 /* In the future, this can be allowed to increase limit
2608 * of fd index into fd_array, interpreted as u16.
2609 */
2610 verbose(env, "negative offset disallowed for kernel module function call\n");
2611 return ERR_PTR(-EINVAL);
2612 }
2613
b202d844 2614 return __find_kfunc_desc_btf(env, offset);
2357672c
KKD
2615 }
2616 return btf_vmlinux ?: ERR_PTR(-ENOENT);
e6ac2450
MKL
2617}
2618
2357672c 2619static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
e6ac2450
MKL
2620{
2621 const struct btf_type *func, *func_proto;
2357672c 2622 struct bpf_kfunc_btf_tab *btf_tab;
e6ac2450
MKL
2623 struct bpf_kfunc_desc_tab *tab;
2624 struct bpf_prog_aux *prog_aux;
2625 struct bpf_kfunc_desc *desc;
2626 const char *func_name;
2357672c 2627 struct btf *desc_btf;
8cbf062a 2628 unsigned long call_imm;
e6ac2450
MKL
2629 unsigned long addr;
2630 int err;
2631
2632 prog_aux = env->prog->aux;
2633 tab = prog_aux->kfunc_tab;
2357672c 2634 btf_tab = prog_aux->kfunc_btf_tab;
e6ac2450
MKL
2635 if (!tab) {
2636 if (!btf_vmlinux) {
2637 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2638 return -ENOTSUPP;
2639 }
2640
2641 if (!env->prog->jit_requested) {
2642 verbose(env, "JIT is required for calling kernel function\n");
2643 return -ENOTSUPP;
2644 }
2645
2646 if (!bpf_jit_supports_kfunc_call()) {
2647 verbose(env, "JIT does not support calling kernel function\n");
2648 return -ENOTSUPP;
2649 }
2650
2651 if (!env->prog->gpl_compatible) {
2652 verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2653 return -EINVAL;
2654 }
2655
2656 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2657 if (!tab)
2658 return -ENOMEM;
2659 prog_aux->kfunc_tab = tab;
2660 }
2661
a5d82727
KKD
2662 /* func_id == 0 is always invalid, but instead of returning an error, be
2663 * conservative and wait until the code elimination pass before returning
2664 * error, so that invalid calls that get pruned out can be in BPF programs
2665 * loaded from userspace. It is also required that offset be untouched
2666 * for such calls.
2667 */
2668 if (!func_id && !offset)
2669 return 0;
2670
2357672c
KKD
2671 if (!btf_tab && offset) {
2672 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2673 if (!btf_tab)
2674 return -ENOMEM;
2675 prog_aux->kfunc_btf_tab = btf_tab;
2676 }
2677
43bf0878 2678 desc_btf = find_kfunc_desc_btf(env, offset);
2357672c
KKD
2679 if (IS_ERR(desc_btf)) {
2680 verbose(env, "failed to find BTF for kernel function\n");
2681 return PTR_ERR(desc_btf);
2682 }
2683
2684 if (find_kfunc_desc(env->prog, func_id, offset))
e6ac2450
MKL
2685 return 0;
2686
2687 if (tab->nr_descs == MAX_KFUNC_DESCS) {
2688 verbose(env, "too many different kernel function calls\n");
2689 return -E2BIG;
2690 }
2691
2357672c 2692 func = btf_type_by_id(desc_btf, func_id);
e6ac2450
MKL
2693 if (!func || !btf_type_is_func(func)) {
2694 verbose(env, "kernel btf_id %u is not a function\n",
2695 func_id);
2696 return -EINVAL;
2697 }
2357672c 2698 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450
MKL
2699 if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2700 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2701 func_id);
2702 return -EINVAL;
2703 }
2704
2357672c 2705 func_name = btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
2706 addr = kallsyms_lookup_name(func_name);
2707 if (!addr) {
2708 verbose(env, "cannot find address for kernel function %s\n",
2709 func_name);
2710 return -EINVAL;
2711 }
1cf3bfc6 2712 specialize_kfunc(env, func_id, offset, &addr);
e6ac2450 2713
1cf3bfc6
IL
2714 if (bpf_jit_supports_far_kfunc_call()) {
2715 call_imm = func_id;
2716 } else {
2717 call_imm = BPF_CALL_IMM(addr);
2718 /* Check whether the relative offset overflows desc->imm */
2719 if ((unsigned long)(s32)call_imm != call_imm) {
2720 verbose(env, "address of kernel function %s is out of range\n",
2721 func_name);
2722 return -EINVAL;
2723 }
8cbf062a
HT
2724 }
2725
3d76a4d3
SF
2726 if (bpf_dev_bound_kfunc_id(func_id)) {
2727 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2728 if (err)
2729 return err;
2730 }
2731
e6ac2450
MKL
2732 desc = &tab->descs[tab->nr_descs++];
2733 desc->func_id = func_id;
8cbf062a 2734 desc->imm = call_imm;
2357672c 2735 desc->offset = offset;
1cf3bfc6 2736 desc->addr = addr;
2357672c 2737 err = btf_distill_func_proto(&env->log, desc_btf,
e6ac2450
MKL
2738 func_proto, func_name,
2739 &desc->func_model);
2740 if (!err)
2741 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2357672c 2742 kfunc_desc_cmp_by_id_off, NULL);
e6ac2450
MKL
2743 return err;
2744}
2745
1cf3bfc6 2746static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b)
e6ac2450
MKL
2747{
2748 const struct bpf_kfunc_desc *d0 = a;
2749 const struct bpf_kfunc_desc *d1 = b;
2750
1cf3bfc6
IL
2751 if (d0->imm != d1->imm)
2752 return d0->imm < d1->imm ? -1 : 1;
2753 if (d0->offset != d1->offset)
2754 return d0->offset < d1->offset ? -1 : 1;
e6ac2450
MKL
2755 return 0;
2756}
2757
1cf3bfc6 2758static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog)
e6ac2450
MKL
2759{
2760 struct bpf_kfunc_desc_tab *tab;
2761
2762 tab = prog->aux->kfunc_tab;
2763 if (!tab)
2764 return;
2765
2766 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1cf3bfc6 2767 kfunc_desc_cmp_by_imm_off, NULL);
e6ac2450
MKL
2768}
2769
2770bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2771{
2772 return !!prog->aux->kfunc_tab;
2773}
2774
2775const struct btf_func_model *
2776bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2777 const struct bpf_insn *insn)
2778{
2779 const struct bpf_kfunc_desc desc = {
2780 .imm = insn->imm,
1cf3bfc6 2781 .offset = insn->off,
e6ac2450
MKL
2782 };
2783 const struct bpf_kfunc_desc *res;
2784 struct bpf_kfunc_desc_tab *tab;
2785
2786 tab = prog->aux->kfunc_tab;
2787 res = bsearch(&desc, tab->descs, tab->nr_descs,
1cf3bfc6 2788 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off);
e6ac2450
MKL
2789
2790 return res ? &res->func_model : NULL;
2791}
2792
2793static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
cc8b0b92 2794{
9c8105bd 2795 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92 2796 struct bpf_insn *insn = env->prog->insnsi;
e6ac2450 2797 int i, ret, insn_cnt = env->prog->len;
cc8b0b92 2798
f910cefa
JW
2799 /* Add entry function. */
2800 ret = add_subprog(env, 0);
e6ac2450 2801 if (ret)
f910cefa
JW
2802 return ret;
2803
e6ac2450
MKL
2804 for (i = 0; i < insn_cnt; i++, insn++) {
2805 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2806 !bpf_pseudo_kfunc_call(insn))
cc8b0b92 2807 continue;
e6ac2450 2808
2c78ee89 2809 if (!env->bpf_capable) {
e6ac2450 2810 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
cc8b0b92
AS
2811 return -EPERM;
2812 }
e6ac2450 2813
3990ed4c 2814 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
e6ac2450 2815 ret = add_subprog(env, i + insn->imm + 1);
3990ed4c 2816 else
2357672c 2817 ret = add_kfunc_call(env, insn->imm, insn->off);
e6ac2450 2818
cc8b0b92
AS
2819 if (ret < 0)
2820 return ret;
2821 }
2822
4cb3d99c
JW
2823 /* Add a fake 'exit' subprog which could simplify subprog iteration
2824 * logic. 'subprog_cnt' should not be increased.
2825 */
2826 subprog[env->subprog_cnt].start = insn_cnt;
2827
06ee7115 2828 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 2829 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 2830 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92 2831
e6ac2450
MKL
2832 return 0;
2833}
2834
2835static int check_subprogs(struct bpf_verifier_env *env)
2836{
2837 int i, subprog_start, subprog_end, off, cur_subprog = 0;
2838 struct bpf_subprog_info *subprog = env->subprog_info;
2839 struct bpf_insn *insn = env->prog->insnsi;
2840 int insn_cnt = env->prog->len;
2841
cc8b0b92 2842 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
2843 subprog_start = subprog[cur_subprog].start;
2844 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2845 for (i = 0; i < insn_cnt; i++) {
2846 u8 code = insn[i].code;
2847
7f6e4312 2848 if (code == (BPF_JMP | BPF_CALL) &&
df2ccc18
IL
2849 insn[i].src_reg == 0 &&
2850 insn[i].imm == BPF_FUNC_tail_call)
7f6e4312 2851 subprog[cur_subprog].has_tail_call = true;
09b28d76
AS
2852 if (BPF_CLASS(code) == BPF_LD &&
2853 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2854 subprog[cur_subprog].has_ld_abs = true;
092ed096 2855 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
2856 goto next;
2857 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2858 goto next;
4cd58e9a
YS
2859 if (code == (BPF_JMP32 | BPF_JA))
2860 off = i + insn[i].imm + 1;
2861 else
2862 off = i + insn[i].off + 1;
cc8b0b92
AS
2863 if (off < subprog_start || off >= subprog_end) {
2864 verbose(env, "jump out of range from insn %d to %d\n", i, off);
2865 return -EINVAL;
2866 }
2867next:
2868 if (i == subprog_end - 1) {
2869 /* to avoid fall-through from one subprog into another
2870 * the last insn of the subprog should be either exit
2871 * or unconditional jump back
2872 */
2873 if (code != (BPF_JMP | BPF_EXIT) &&
4cd58e9a 2874 code != (BPF_JMP32 | BPF_JA) &&
cc8b0b92
AS
2875 code != (BPF_JMP | BPF_JA)) {
2876 verbose(env, "last insn is not an exit or jmp\n");
2877 return -EINVAL;
2878 }
2879 subprog_start = subprog_end;
4cb3d99c
JW
2880 cur_subprog++;
2881 if (cur_subprog < env->subprog_cnt)
9c8105bd 2882 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2883 }
2884 }
2885 return 0;
2886}
2887
679c782d
EC
2888/* Parentage chain of this register (or stack slot) should take care of all
2889 * issues like callee-saved registers, stack slot allocation time, etc.
2890 */
f4d7e40a 2891static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 2892 const struct bpf_reg_state *state,
5327ed3d 2893 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
2894{
2895 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 2896 int cnt = 0;
dc503a8a
EC
2897
2898 while (parent) {
2899 /* if read wasn't screened by an earlier write ... */
679c782d 2900 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 2901 break;
9242b5f5
AS
2902 if (parent->live & REG_LIVE_DONE) {
2903 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
c25b2ae1 2904 reg_type_str(env, parent->type),
9242b5f5
AS
2905 parent->var_off.value, parent->off);
2906 return -EFAULT;
2907 }
5327ed3d
JW
2908 /* The first condition is more likely to be true than the
2909 * second, checked it first.
2910 */
2911 if ((parent->live & REG_LIVE_READ) == flag ||
2912 parent->live & REG_LIVE_READ64)
25af32da
AS
2913 /* The parentage chain never changes and
2914 * this parent was already marked as LIVE_READ.
2915 * There is no need to keep walking the chain again and
2916 * keep re-marking all parents as LIVE_READ.
2917 * This case happens when the same register is read
2918 * multiple times without writes into it in-between.
5327ed3d
JW
2919 * Also, if parent has the stronger REG_LIVE_READ64 set,
2920 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
2921 */
2922 break;
dc503a8a 2923 /* ... then we depend on parent's value */
5327ed3d
JW
2924 parent->live |= flag;
2925 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2926 if (flag == REG_LIVE_READ64)
2927 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
2928 state = parent;
2929 parent = state->parent;
f4d7e40a 2930 writes = true;
06ee7115 2931 cnt++;
dc503a8a 2932 }
06ee7115
AS
2933
2934 if (env->longest_mark_read_walk < cnt)
2935 env->longest_mark_read_walk = cnt;
f4d7e40a 2936 return 0;
dc503a8a
EC
2937}
2938
d6fefa11
KKD
2939static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2940{
2941 struct bpf_func_state *state = func(env, reg);
2942 int spi, ret;
2943
2944 /* For CONST_PTR_TO_DYNPTR, it must have already been done by
2945 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
2946 * check_kfunc_call.
2947 */
2948 if (reg->type == CONST_PTR_TO_DYNPTR)
2949 return 0;
79168a66
KKD
2950 spi = dynptr_get_spi(env, reg);
2951 if (spi < 0)
2952 return spi;
d6fefa11
KKD
2953 /* Caller ensures dynptr is valid and initialized, which means spi is in
2954 * bounds and spi is the first dynptr slot. Simply mark stack slot as
2955 * read.
2956 */
2957 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
2958 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
2959 if (ret)
2960 return ret;
2961 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
2962 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
2963}
2964
06accc87
AN
2965static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
2966 int spi, int nr_slots)
2967{
2968 struct bpf_func_state *state = func(env, reg);
2969 int err, i;
2970
2971 for (i = 0; i < nr_slots; i++) {
2972 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
2973
2974 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
2975 if (err)
2976 return err;
2977
2978 mark_stack_slot_scratched(env, spi - i);
2979 }
2980
2981 return 0;
2982}
2983
5327ed3d
JW
2984/* This function is supposed to be used by the following 32-bit optimization
2985 * code only. It returns TRUE if the source or destination register operates
2986 * on 64-bit, otherwise return FALSE.
2987 */
2988static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2989 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2990{
2991 u8 code, class, op;
2992
2993 code = insn->code;
2994 class = BPF_CLASS(code);
2995 op = BPF_OP(code);
2996 if (class == BPF_JMP) {
2997 /* BPF_EXIT for "main" will reach here. Return TRUE
2998 * conservatively.
2999 */
3000 if (op == BPF_EXIT)
3001 return true;
3002 if (op == BPF_CALL) {
3003 /* BPF to BPF call will reach here because of marking
3004 * caller saved clobber with DST_OP_NO_MARK for which we
3005 * don't care the register def because they are anyway
3006 * marked as NOT_INIT already.
3007 */
3008 if (insn->src_reg == BPF_PSEUDO_CALL)
3009 return false;
3010 /* Helper call will reach here because of arg type
3011 * check, conservatively return TRUE.
3012 */
3013 if (t == SRC_OP)
3014 return true;
3015
3016 return false;
3017 }
3018 }
3019
0845c3db
YS
3020 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32))
3021 return false;
3022
5327ed3d 3023 if (class == BPF_ALU64 || class == BPF_JMP ||
5327ed3d
JW
3024 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
3025 return true;
3026
3027 if (class == BPF_ALU || class == BPF_JMP32)
3028 return false;
3029
3030 if (class == BPF_LDX) {
3031 if (t != SRC_OP)
3032 return BPF_SIZE(code) == BPF_DW;
3033 /* LDX source must be ptr. */
3034 return true;
3035 }
3036
3037 if (class == BPF_STX) {
83a28819
IL
3038 /* BPF_STX (including atomic variants) has multiple source
3039 * operands, one of which is a ptr. Check whether the caller is
3040 * asking about it.
3041 */
3042 if (t == SRC_OP && reg->type != SCALAR_VALUE)
5327ed3d
JW
3043 return true;
3044 return BPF_SIZE(code) == BPF_DW;
3045 }
3046
3047 if (class == BPF_LD) {
3048 u8 mode = BPF_MODE(code);
3049
3050 /* LD_IMM64 */
3051 if (mode == BPF_IMM)
3052 return true;
3053
3054 /* Both LD_IND and LD_ABS return 32-bit data. */
3055 if (t != SRC_OP)
3056 return false;
3057
3058 /* Implicit ctx ptr. */
3059 if (regno == BPF_REG_6)
3060 return true;
3061
3062 /* Explicit source could be any width. */
3063 return true;
3064 }
3065
3066 if (class == BPF_ST)
3067 /* The only source register for BPF_ST is a ptr. */
3068 return true;
3069
3070 /* Conservatively return true at default. */
3071 return true;
3072}
3073
83a28819
IL
3074/* Return the regno defined by the insn, or -1. */
3075static int insn_def_regno(const struct bpf_insn *insn)
b325fbca 3076{
83a28819
IL
3077 switch (BPF_CLASS(insn->code)) {
3078 case BPF_JMP:
3079 case BPF_JMP32:
3080 case BPF_ST:
3081 return -1;
3082 case BPF_STX:
3083 if (BPF_MODE(insn->code) == BPF_ATOMIC &&
3084 (insn->imm & BPF_FETCH)) {
3085 if (insn->imm == BPF_CMPXCHG)
3086 return BPF_REG_0;
3087 else
3088 return insn->src_reg;
3089 } else {
3090 return -1;
3091 }
3092 default:
3093 return insn->dst_reg;
3094 }
b325fbca
JW
3095}
3096
3097/* Return TRUE if INSN has defined any 32-bit value explicitly. */
3098static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
3099{
83a28819
IL
3100 int dst_reg = insn_def_regno(insn);
3101
3102 if (dst_reg == -1)
b325fbca
JW
3103 return false;
3104
83a28819 3105 return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
b325fbca
JW
3106}
3107
5327ed3d
JW
3108static void mark_insn_zext(struct bpf_verifier_env *env,
3109 struct bpf_reg_state *reg)
3110{
3111 s32 def_idx = reg->subreg_def;
3112
3113 if (def_idx == DEF_NOT_SUBREG)
3114 return;
3115
3116 env->insn_aux_data[def_idx - 1].zext_dst = true;
3117 /* The dst will be zero extended, so won't be sub-register anymore. */
3118 reg->subreg_def = DEF_NOT_SUBREG;
3119}
3120
dc503a8a 3121static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
3122 enum reg_arg_type t)
3123{
f4d7e40a
AS
3124 struct bpf_verifier_state *vstate = env->cur_state;
3125 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 3126 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 3127 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 3128 bool rw64;
dc503a8a 3129
17a52670 3130 if (regno >= MAX_BPF_REG) {
61bd5218 3131 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
3132 return -EINVAL;
3133 }
3134
0f55f9ed
CL
3135 mark_reg_scratched(env, regno);
3136
c342dc10 3137 reg = &regs[regno];
5327ed3d 3138 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
3139 if (t == SRC_OP) {
3140 /* check whether register used as source operand can be read */
c342dc10 3141 if (reg->type == NOT_INIT) {
61bd5218 3142 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
3143 return -EACCES;
3144 }
679c782d 3145 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
3146 if (regno == BPF_REG_FP)
3147 return 0;
3148
5327ed3d
JW
3149 if (rw64)
3150 mark_insn_zext(env, reg);
3151
3152 return mark_reg_read(env, reg, reg->parent,
3153 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
3154 } else {
3155 /* check whether register used as dest operand can be written to */
3156 if (regno == BPF_REG_FP) {
61bd5218 3157 verbose(env, "frame pointer is read only\n");
17a52670
AS
3158 return -EACCES;
3159 }
c342dc10 3160 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 3161 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 3162 if (t == DST_OP)
61bd5218 3163 mark_reg_unknown(env, regs, regno);
17a52670
AS
3164 }
3165 return 0;
3166}
3167
bffdeaa8
AN
3168static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
3169{
3170 env->insn_aux_data[idx].jmp_point = true;
3171}
3172
3173static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
3174{
3175 return env->insn_aux_data[insn_idx].jmp_point;
3176}
3177
b5dc0163
AS
3178/* for any branch, call, exit record the history of jmps in the given state */
3179static int push_jmp_history(struct bpf_verifier_env *env,
3180 struct bpf_verifier_state *cur)
3181{
3182 u32 cnt = cur->jmp_history_cnt;
3183 struct bpf_idx_pair *p;
ceb35b66 3184 size_t alloc_size;
b5dc0163 3185
bffdeaa8
AN
3186 if (!is_jmp_point(env, env->insn_idx))
3187 return 0;
3188
b5dc0163 3189 cnt++;
ceb35b66
KC
3190 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
3191 p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
b5dc0163
AS
3192 if (!p)
3193 return -ENOMEM;
3194 p[cnt - 1].idx = env->insn_idx;
3195 p[cnt - 1].prev_idx = env->prev_insn_idx;
3196 cur->jmp_history = p;
3197 cur->jmp_history_cnt = cnt;
3198 return 0;
3199}
3200
3201/* Backtrack one insn at a time. If idx is not at the top of recorded
3202 * history then previous instruction came from straight line execution.
3203 */
3204static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
3205 u32 *history)
3206{
3207 u32 cnt = *history;
3208
3209 if (cnt && st->jmp_history[cnt - 1].idx == i) {
3210 i = st->jmp_history[cnt - 1].prev_idx;
3211 (*history)--;
3212 } else {
3213 i--;
3214 }
3215 return i;
3216}
3217
e6ac2450
MKL
3218static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
3219{
3220 const struct btf_type *func;
2357672c 3221 struct btf *desc_btf;
e6ac2450
MKL
3222
3223 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
3224 return NULL;
3225
43bf0878 3226 desc_btf = find_kfunc_desc_btf(data, insn->off);
2357672c
KKD
3227 if (IS_ERR(desc_btf))
3228 return "<error>";
3229
3230 func = btf_type_by_id(desc_btf, insn->imm);
3231 return btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
3232}
3233
407958a0
AN
3234static inline void bt_init(struct backtrack_state *bt, u32 frame)
3235{
3236 bt->frame = frame;
3237}
3238
3239static inline void bt_reset(struct backtrack_state *bt)
3240{
3241 struct bpf_verifier_env *env = bt->env;
3242
3243 memset(bt, 0, sizeof(*bt));
3244 bt->env = env;
3245}
3246
3247static inline u32 bt_empty(struct backtrack_state *bt)
3248{
3249 u64 mask = 0;
3250 int i;
3251
3252 for (i = 0; i <= bt->frame; i++)
3253 mask |= bt->reg_masks[i] | bt->stack_masks[i];
3254
3255 return mask == 0;
3256}
3257
3258static inline int bt_subprog_enter(struct backtrack_state *bt)
3259{
3260 if (bt->frame == MAX_CALL_FRAMES - 1) {
3261 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame);
3262 WARN_ONCE(1, "verifier backtracking bug");
3263 return -EFAULT;
3264 }
3265 bt->frame++;
3266 return 0;
3267}
3268
3269static inline int bt_subprog_exit(struct backtrack_state *bt)
3270{
3271 if (bt->frame == 0) {
3272 verbose(bt->env, "BUG subprog exit from frame 0\n");
3273 WARN_ONCE(1, "verifier backtracking bug");
3274 return -EFAULT;
3275 }
3276 bt->frame--;
3277 return 0;
3278}
3279
3280static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
3281{
3282 bt->reg_masks[frame] |= 1 << reg;
3283}
3284
3285static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg)
3286{
3287 bt->reg_masks[frame] &= ~(1 << reg);
3288}
3289
3290static inline void bt_set_reg(struct backtrack_state *bt, u32 reg)
3291{
3292 bt_set_frame_reg(bt, bt->frame, reg);
3293}
3294
3295static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg)
3296{
3297 bt_clear_frame_reg(bt, bt->frame, reg);
3298}
3299
3300static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
3301{
3302 bt->stack_masks[frame] |= 1ull << slot;
3303}
3304
3305static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot)
3306{
3307 bt->stack_masks[frame] &= ~(1ull << slot);
3308}
3309
3310static inline void bt_set_slot(struct backtrack_state *bt, u32 slot)
3311{
3312 bt_set_frame_slot(bt, bt->frame, slot);
3313}
3314
3315static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot)
3316{
3317 bt_clear_frame_slot(bt, bt->frame, slot);
3318}
3319
3320static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame)
3321{
3322 return bt->reg_masks[frame];
3323}
3324
3325static inline u32 bt_reg_mask(struct backtrack_state *bt)
3326{
3327 return bt->reg_masks[bt->frame];
3328}
3329
3330static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame)
3331{
3332 return bt->stack_masks[frame];
3333}
3334
3335static inline u64 bt_stack_mask(struct backtrack_state *bt)
3336{
3337 return bt->stack_masks[bt->frame];
3338}
3339
3340static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
3341{
3342 return bt->reg_masks[bt->frame] & (1 << reg);
3343}
3344
3345static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
3346{
3347 return bt->stack_masks[bt->frame] & (1ull << slot);
3348}
3349
d9439c21
AN
3350/* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
3351static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask)
3352{
3353 DECLARE_BITMAP(mask, 64);
3354 bool first = true;
3355 int i, n;
3356
3357 buf[0] = '\0';
3358
3359 bitmap_from_u64(mask, reg_mask);
3360 for_each_set_bit(i, mask, 32) {
3361 n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i);
3362 first = false;
3363 buf += n;
3364 buf_sz -= n;
3365 if (buf_sz < 0)
3366 break;
3367 }
3368}
3369/* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */
3370static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask)
3371{
3372 DECLARE_BITMAP(mask, 64);
3373 bool first = true;
3374 int i, n;
3375
3376 buf[0] = '\0';
3377
3378 bitmap_from_u64(mask, stack_mask);
3379 for_each_set_bit(i, mask, 64) {
3380 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8);
3381 first = false;
3382 buf += n;
3383 buf_sz -= n;
3384 if (buf_sz < 0)
3385 break;
3386 }
3387}
3388
b5dc0163
AS
3389/* For given verifier state backtrack_insn() is called from the last insn to
3390 * the first insn. Its purpose is to compute a bitmask of registers and
3391 * stack slots that needs precision in the parent verifier state.
fde2a388
AN
3392 *
3393 * @idx is an index of the instruction we are currently processing;
3394 * @subseq_idx is an index of the subsequent instruction that:
3395 * - *would be* executed next, if jump history is viewed in forward order;
3396 * - *was* processed previously during backtracking.
b5dc0163 3397 */
fde2a388 3398static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
407958a0 3399 struct backtrack_state *bt)
b5dc0163
AS
3400{
3401 const struct bpf_insn_cbs cbs = {
e6ac2450 3402 .cb_call = disasm_kfunc_name,
b5dc0163
AS
3403 .cb_print = verbose,
3404 .private_data = env,
3405 };
3406 struct bpf_insn *insn = env->prog->insnsi + idx;
3407 u8 class = BPF_CLASS(insn->code);
3408 u8 opcode = BPF_OP(insn->code);
3409 u8 mode = BPF_MODE(insn->code);
407958a0
AN
3410 u32 dreg = insn->dst_reg;
3411 u32 sreg = insn->src_reg;
fde2a388 3412 u32 spi, i;
b5dc0163
AS
3413
3414 if (insn->code == 0)
3415 return 0;
496f3324 3416 if (env->log.level & BPF_LOG_LEVEL2) {
d9439c21
AN
3417 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt));
3418 verbose(env, "mark_precise: frame%d: regs=%s ",
3419 bt->frame, env->tmp_str_buf);
3420 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt));
3421 verbose(env, "stack=%s before ", env->tmp_str_buf);
b5dc0163
AS
3422 verbose(env, "%d: ", idx);
3423 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
3424 }
3425
3426 if (class == BPF_ALU || class == BPF_ALU64) {
407958a0 3427 if (!bt_is_reg_set(bt, dreg))
b5dc0163
AS
3428 return 0;
3429 if (opcode == BPF_MOV) {
3430 if (BPF_SRC(insn->code) == BPF_X) {
8100928c 3431 /* dreg = sreg or dreg = (s8, s16, s32)sreg
b5dc0163
AS
3432 * dreg needs precision after this insn
3433 * sreg needs precision before this insn
3434 */
407958a0
AN
3435 bt_clear_reg(bt, dreg);
3436 bt_set_reg(bt, sreg);
b5dc0163
AS
3437 } else {
3438 /* dreg = K
3439 * dreg needs precision after this insn.
3440 * Corresponding register is already marked
3441 * as precise=true in this verifier state.
3442 * No further markings in parent are necessary
3443 */
407958a0 3444 bt_clear_reg(bt, dreg);
b5dc0163
AS
3445 }
3446 } else {
3447 if (BPF_SRC(insn->code) == BPF_X) {
3448 /* dreg += sreg
3449 * both dreg and sreg need precision
3450 * before this insn
3451 */
407958a0 3452 bt_set_reg(bt, sreg);
b5dc0163
AS
3453 } /* else dreg += K
3454 * dreg still needs precision before this insn
3455 */
3456 }
3457 } else if (class == BPF_LDX) {
407958a0 3458 if (!bt_is_reg_set(bt, dreg))
b5dc0163 3459 return 0;
407958a0 3460 bt_clear_reg(bt, dreg);
b5dc0163
AS
3461
3462 /* scalars can only be spilled into stack w/o losing precision.
3463 * Load from any other memory can be zero extended.
3464 * The desire to keep that precision is already indicated
3465 * by 'precise' mark in corresponding register of this state.
3466 * No further tracking necessary.
3467 */
3468 if (insn->src_reg != BPF_REG_FP)
3469 return 0;
b5dc0163
AS
3470
3471 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
3472 * that [fp - off] slot contains scalar that needs to be
3473 * tracked with precision
3474 */
3475 spi = (-insn->off - 1) / BPF_REG_SIZE;
3476 if (spi >= 64) {
3477 verbose(env, "BUG spi %d\n", spi);
3478 WARN_ONCE(1, "verifier backtracking bug");
3479 return -EFAULT;
3480 }
407958a0 3481 bt_set_slot(bt, spi);
b3b50f05 3482 } else if (class == BPF_STX || class == BPF_ST) {
407958a0 3483 if (bt_is_reg_set(bt, dreg))
b3b50f05 3484 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
3485 * to access memory. It means backtracking
3486 * encountered a case of pointer subtraction.
3487 */
3488 return -ENOTSUPP;
3489 /* scalars can only be spilled into stack */
3490 if (insn->dst_reg != BPF_REG_FP)
3491 return 0;
b5dc0163
AS
3492 spi = (-insn->off - 1) / BPF_REG_SIZE;
3493 if (spi >= 64) {
3494 verbose(env, "BUG spi %d\n", spi);
3495 WARN_ONCE(1, "verifier backtracking bug");
3496 return -EFAULT;
3497 }
407958a0 3498 if (!bt_is_slot_set(bt, spi))
b5dc0163 3499 return 0;
407958a0 3500 bt_clear_slot(bt, spi);
b3b50f05 3501 if (class == BPF_STX)
407958a0 3502 bt_set_reg(bt, sreg);
b5dc0163 3503 } else if (class == BPF_JMP || class == BPF_JMP32) {
fde2a388
AN
3504 if (bpf_pseudo_call(insn)) {
3505 int subprog_insn_idx, subprog;
3506
3507 subprog_insn_idx = idx + insn->imm + 1;
3508 subprog = find_subprog(env, subprog_insn_idx);
3509 if (subprog < 0)
3510 return -EFAULT;
3511
3512 if (subprog_is_global(env, subprog)) {
3513 /* check that jump history doesn't have any
3514 * extra instructions from subprog; the next
3515 * instruction after call to global subprog
3516 * should be literally next instruction in
3517 * caller program
3518 */
3519 WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug");
3520 /* r1-r5 are invalidated after subprog call,
3521 * so for global func call it shouldn't be set
3522 * anymore
3523 */
3524 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
3525 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3526 WARN_ONCE(1, "verifier backtracking bug");
3527 return -EFAULT;
3528 }
3529 /* global subprog always sets R0 */
3530 bt_clear_reg(bt, BPF_REG_0);
3531 return 0;
3532 } else {
3533 /* static subprog call instruction, which
3534 * means that we are exiting current subprog,
3535 * so only r1-r5 could be still requested as
3536 * precise, r0 and r6-r10 or any stack slot in
3537 * the current frame should be zero by now
3538 */
3539 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
3540 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3541 WARN_ONCE(1, "verifier backtracking bug");
3542 return -EFAULT;
3543 }
3544 /* we don't track register spills perfectly,
3545 * so fallback to force-precise instead of failing */
3546 if (bt_stack_mask(bt) != 0)
3547 return -ENOTSUPP;
3548 /* propagate r1-r5 to the caller */
3549 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
3550 if (bt_is_reg_set(bt, i)) {
3551 bt_clear_reg(bt, i);
3552 bt_set_frame_reg(bt, bt->frame - 1, i);
3553 }
3554 }
3555 if (bt_subprog_exit(bt))
3556 return -EFAULT;
3557 return 0;
3558 }
3559 } else if ((bpf_helper_call(insn) &&
3560 is_callback_calling_function(insn->imm) &&
3561 !is_async_callback_calling_function(insn->imm)) ||
3562 (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) {
3563 /* callback-calling helper or kfunc call, which means
3564 * we are exiting from subprog, but unlike the subprog
3565 * call handling above, we shouldn't propagate
3566 * precision of r1-r5 (if any requested), as they are
3567 * not actually arguments passed directly to callback
3568 * subprogs
be2ef816 3569 */
fde2a388
AN
3570 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) {
3571 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3572 WARN_ONCE(1, "verifier backtracking bug");
3573 return -EFAULT;
3574 }
3575 if (bt_stack_mask(bt) != 0)
be2ef816 3576 return -ENOTSUPP;
fde2a388
AN
3577 /* clear r1-r5 in callback subprog's mask */
3578 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3579 bt_clear_reg(bt, i);
3580 if (bt_subprog_exit(bt))
3581 return -EFAULT;
3582 return 0;
3583 } else if (opcode == BPF_CALL) {
d3178e8a
HS
3584 /* kfunc with imm==0 is invalid and fixup_kfunc_call will
3585 * catch this error later. Make backtracking conservative
3586 * with ENOTSUPP.
3587 */
3588 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
3589 return -ENOTSUPP;
b5dc0163 3590 /* regular helper call sets R0 */
407958a0
AN
3591 bt_clear_reg(bt, BPF_REG_0);
3592 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
b5dc0163
AS
3593 /* if backtracing was looking for registers R1-R5
3594 * they should have been found already.
3595 */
407958a0 3596 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
b5dc0163
AS
3597 WARN_ONCE(1, "verifier backtracking bug");
3598 return -EFAULT;
3599 }
3600 } else if (opcode == BPF_EXIT) {
fde2a388
AN
3601 bool r0_precise;
3602
3603 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) {
3604 /* if backtracing was looking for registers R1-R5
3605 * they should have been found already.
3606 */
3607 verbose(env, "BUG regs %x\n", bt_reg_mask(bt));
3608 WARN_ONCE(1, "verifier backtracking bug");
3609 return -EFAULT;
3610 }
3611
3612 /* BPF_EXIT in subprog or callback always returns
3613 * right after the call instruction, so by checking
3614 * whether the instruction at subseq_idx-1 is subprog
3615 * call or not we can distinguish actual exit from
3616 * *subprog* from exit from *callback*. In the former
3617 * case, we need to propagate r0 precision, if
3618 * necessary. In the former we never do that.
3619 */
3620 r0_precise = subseq_idx - 1 >= 0 &&
3621 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) &&
3622 bt_is_reg_set(bt, BPF_REG_0);
3623
3624 bt_clear_reg(bt, BPF_REG_0);
3625 if (bt_subprog_enter(bt))
3626 return -EFAULT;
3627
3628 if (r0_precise)
3629 bt_set_reg(bt, BPF_REG_0);
3630 /* r6-r9 and stack slots will stay set in caller frame
3631 * bitmasks until we return back from callee(s)
3632 */
3633 return 0;
71b547f5 3634 } else if (BPF_SRC(insn->code) == BPF_X) {
407958a0 3635 if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg))
71b547f5
DB
3636 return 0;
3637 /* dreg <cond> sreg
3638 * Both dreg and sreg need precision before
3639 * this insn. If only sreg was marked precise
3640 * before it would be equally necessary to
3641 * propagate it to dreg.
3642 */
407958a0
AN
3643 bt_set_reg(bt, dreg);
3644 bt_set_reg(bt, sreg);
71b547f5
DB
3645 /* else dreg <cond> K
3646 * Only dreg still needs precision before
3647 * this insn, so for the K-based conditional
3648 * there is nothing new to be marked.
3649 */
b5dc0163
AS
3650 }
3651 } else if (class == BPF_LD) {
407958a0 3652 if (!bt_is_reg_set(bt, dreg))
b5dc0163 3653 return 0;
407958a0 3654 bt_clear_reg(bt, dreg);
b5dc0163
AS
3655 /* It's ld_imm64 or ld_abs or ld_ind.
3656 * For ld_imm64 no further tracking of precision
3657 * into parent is necessary
3658 */
3659 if (mode == BPF_IND || mode == BPF_ABS)
3660 /* to be analyzed */
3661 return -ENOTSUPP;
b5dc0163
AS
3662 }
3663 return 0;
3664}
3665
3666/* the scalar precision tracking algorithm:
3667 * . at the start all registers have precise=false.
3668 * . scalar ranges are tracked as normal through alu and jmp insns.
3669 * . once precise value of the scalar register is used in:
3670 * . ptr + scalar alu
3671 * . if (scalar cond K|scalar)
3672 * . helper_call(.., scalar, ...) where ARG_CONST is expected
3673 * backtrack through the verifier states and mark all registers and
3674 * stack slots with spilled constants that these scalar regisers
3675 * should be precise.
3676 * . during state pruning two registers (or spilled stack slots)
3677 * are equivalent if both are not precise.
3678 *
3679 * Note the verifier cannot simply walk register parentage chain,
3680 * since many different registers and stack slots could have been
3681 * used to compute single precise scalar.
3682 *
3683 * The approach of starting with precise=true for all registers and then
3684 * backtrack to mark a register as not precise when the verifier detects
3685 * that program doesn't care about specific value (e.g., when helper
3686 * takes register as ARG_ANYTHING parameter) is not safe.
3687 *
3688 * It's ok to walk single parentage chain of the verifier states.
3689 * It's possible that this backtracking will go all the way till 1st insn.
3690 * All other branches will be explored for needing precision later.
3691 *
3692 * The backtracking needs to deal with cases like:
3693 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
3694 * r9 -= r8
3695 * r5 = r9
3696 * if r5 > 0x79f goto pc+7
3697 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
3698 * r5 += 1
3699 * ...
3700 * call bpf_perf_event_output#25
3701 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3702 *
3703 * and this case:
3704 * r6 = 1
3705 * call foo // uses callee's r6 inside to compute r0
3706 * r0 += r6
3707 * if r0 == 0 goto
3708 *
3709 * to track above reg_mask/stack_mask needs to be independent for each frame.
3710 *
3711 * Also if parent's curframe > frame where backtracking started,
3712 * the verifier need to mark registers in both frames, otherwise callees
3713 * may incorrectly prune callers. This is similar to
3714 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3715 *
3716 * For now backtracking falls back into conservative marking.
3717 */
3718static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3719 struct bpf_verifier_state *st)
3720{
3721 struct bpf_func_state *func;
3722 struct bpf_reg_state *reg;
3723 int i, j;
3724
d9439c21
AN
3725 if (env->log.level & BPF_LOG_LEVEL2) {
3726 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n",
3727 st->curframe);
3728 }
3729
b5dc0163
AS
3730 /* big hammer: mark all scalars precise in this path.
3731 * pop_stack may still get !precise scalars.
f63181b6
AN
3732 * We also skip current state and go straight to first parent state,
3733 * because precision markings in current non-checkpointed state are
3734 * not needed. See why in the comment in __mark_chain_precision below.
b5dc0163 3735 */
f63181b6 3736 for (st = st->parent; st; st = st->parent) {
b5dc0163
AS
3737 for (i = 0; i <= st->curframe; i++) {
3738 func = st->frame[i];
3739 for (j = 0; j < BPF_REG_FP; j++) {
3740 reg = &func->regs[j];
d9439c21 3741 if (reg->type != SCALAR_VALUE || reg->precise)
b5dc0163
AS
3742 continue;
3743 reg->precise = true;
d9439c21
AN
3744 if (env->log.level & BPF_LOG_LEVEL2) {
3745 verbose(env, "force_precise: frame%d: forcing r%d to be precise\n",
3746 i, j);
3747 }
b5dc0163
AS
3748 }
3749 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
27113c59 3750 if (!is_spilled_reg(&func->stack[j]))
b5dc0163
AS
3751 continue;
3752 reg = &func->stack[j].spilled_ptr;
d9439c21 3753 if (reg->type != SCALAR_VALUE || reg->precise)
b5dc0163
AS
3754 continue;
3755 reg->precise = true;
d9439c21
AN
3756 if (env->log.level & BPF_LOG_LEVEL2) {
3757 verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n",
3758 i, -(j + 1) * 8);
3759 }
b5dc0163
AS
3760 }
3761 }
f63181b6 3762 }
b5dc0163
AS
3763}
3764
7a830b53
AN
3765static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3766{
3767 struct bpf_func_state *func;
3768 struct bpf_reg_state *reg;
3769 int i, j;
3770
3771 for (i = 0; i <= st->curframe; i++) {
3772 func = st->frame[i];
3773 for (j = 0; j < BPF_REG_FP; j++) {
3774 reg = &func->regs[j];
3775 if (reg->type != SCALAR_VALUE)
3776 continue;
3777 reg->precise = false;
3778 }
3779 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3780 if (!is_spilled_reg(&func->stack[j]))
3781 continue;
3782 reg = &func->stack[j].spilled_ptr;
3783 if (reg->type != SCALAR_VALUE)
3784 continue;
3785 reg->precise = false;
3786 }
3787 }
3788}
3789
904e6ddf
EZ
3790static bool idset_contains(struct bpf_idset *s, u32 id)
3791{
3792 u32 i;
3793
3794 for (i = 0; i < s->count; ++i)
3795 if (s->ids[i] == id)
3796 return true;
3797
3798 return false;
3799}
3800
3801static int idset_push(struct bpf_idset *s, u32 id)
3802{
3803 if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids)))
3804 return -EFAULT;
3805 s->ids[s->count++] = id;
3806 return 0;
3807}
3808
3809static void idset_reset(struct bpf_idset *s)
3810{
3811 s->count = 0;
3812}
3813
3814/* Collect a set of IDs for all registers currently marked as precise in env->bt.
3815 * Mark all registers with these IDs as precise.
3816 */
3817static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3818{
3819 struct bpf_idset *precise_ids = &env->idset_scratch;
3820 struct backtrack_state *bt = &env->bt;
3821 struct bpf_func_state *func;
3822 struct bpf_reg_state *reg;
3823 DECLARE_BITMAP(mask, 64);
3824 int i, fr;
3825
3826 idset_reset(precise_ids);
3827
3828 for (fr = bt->frame; fr >= 0; fr--) {
3829 func = st->frame[fr];
3830
3831 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
3832 for_each_set_bit(i, mask, 32) {
3833 reg = &func->regs[i];
3834 if (!reg->id || reg->type != SCALAR_VALUE)
3835 continue;
3836 if (idset_push(precise_ids, reg->id))
3837 return -EFAULT;
3838 }
3839
3840 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
3841 for_each_set_bit(i, mask, 64) {
3842 if (i >= func->allocated_stack / BPF_REG_SIZE)
3843 break;
3844 if (!is_spilled_scalar_reg(&func->stack[i]))
3845 continue;
3846 reg = &func->stack[i].spilled_ptr;
3847 if (!reg->id)
3848 continue;
3849 if (idset_push(precise_ids, reg->id))
3850 return -EFAULT;
3851 }
3852 }
3853
3854 for (fr = 0; fr <= st->curframe; ++fr) {
3855 func = st->frame[fr];
3856
3857 for (i = BPF_REG_0; i < BPF_REG_10; ++i) {
3858 reg = &func->regs[i];
3859 if (!reg->id)
3860 continue;
3861 if (!idset_contains(precise_ids, reg->id))
3862 continue;
3863 bt_set_frame_reg(bt, fr, i);
3864 }
3865 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) {
3866 if (!is_spilled_scalar_reg(&func->stack[i]))
3867 continue;
3868 reg = &func->stack[i].spilled_ptr;
3869 if (!reg->id)
3870 continue;
3871 if (!idset_contains(precise_ids, reg->id))
3872 continue;
3873 bt_set_frame_slot(bt, fr, i);
3874 }
3875 }
3876
3877 return 0;
3878}
3879
f63181b6
AN
3880/*
3881 * __mark_chain_precision() backtracks BPF program instruction sequence and
3882 * chain of verifier states making sure that register *regno* (if regno >= 0)
3883 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
3884 * SCALARS, as well as any other registers and slots that contribute to
3885 * a tracked state of given registers/stack slots, depending on specific BPF
3886 * assembly instructions (see backtrack_insns() for exact instruction handling
3887 * logic). This backtracking relies on recorded jmp_history and is able to
3888 * traverse entire chain of parent states. This process ends only when all the
3889 * necessary registers/slots and their transitive dependencies are marked as
3890 * precise.
3891 *
3892 * One important and subtle aspect is that precise marks *do not matter* in
3893 * the currently verified state (current state). It is important to understand
3894 * why this is the case.
3895 *
3896 * First, note that current state is the state that is not yet "checkpointed",
3897 * i.e., it is not yet put into env->explored_states, and it has no children
3898 * states as well. It's ephemeral, and can end up either a) being discarded if
3899 * compatible explored state is found at some point or BPF_EXIT instruction is
3900 * reached or b) checkpointed and put into env->explored_states, branching out
3901 * into one or more children states.
3902 *
3903 * In the former case, precise markings in current state are completely
3904 * ignored by state comparison code (see regsafe() for details). Only
3905 * checkpointed ("old") state precise markings are important, and if old
3906 * state's register/slot is precise, regsafe() assumes current state's
3907 * register/slot as precise and checks value ranges exactly and precisely. If
3908 * states turn out to be compatible, current state's necessary precise
3909 * markings and any required parent states' precise markings are enforced
3910 * after the fact with propagate_precision() logic, after the fact. But it's
3911 * important to realize that in this case, even after marking current state
3912 * registers/slots as precise, we immediately discard current state. So what
3913 * actually matters is any of the precise markings propagated into current
3914 * state's parent states, which are always checkpointed (due to b) case above).
3915 * As such, for scenario a) it doesn't matter if current state has precise
3916 * markings set or not.
3917 *
3918 * Now, for the scenario b), checkpointing and forking into child(ren)
3919 * state(s). Note that before current state gets to checkpointing step, any
3920 * processed instruction always assumes precise SCALAR register/slot
3921 * knowledge: if precise value or range is useful to prune jump branch, BPF
3922 * verifier takes this opportunity enthusiastically. Similarly, when
3923 * register's value is used to calculate offset or memory address, exact
3924 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
3925 * what we mentioned above about state comparison ignoring precise markings
3926 * during state comparison, BPF verifier ignores and also assumes precise
3927 * markings *at will* during instruction verification process. But as verifier
3928 * assumes precision, it also propagates any precision dependencies across
3929 * parent states, which are not yet finalized, so can be further restricted
3930 * based on new knowledge gained from restrictions enforced by their children
3931 * states. This is so that once those parent states are finalized, i.e., when
3932 * they have no more active children state, state comparison logic in
3933 * is_state_visited() would enforce strict and precise SCALAR ranges, if
3934 * required for correctness.
3935 *
3936 * To build a bit more intuition, note also that once a state is checkpointed,
3937 * the path we took to get to that state is not important. This is crucial
3938 * property for state pruning. When state is checkpointed and finalized at
3939 * some instruction index, it can be correctly and safely used to "short
3940 * circuit" any *compatible* state that reaches exactly the same instruction
3941 * index. I.e., if we jumped to that instruction from a completely different
3942 * code path than original finalized state was derived from, it doesn't
3943 * matter, current state can be discarded because from that instruction
3944 * forward having a compatible state will ensure we will safely reach the
3945 * exit. States describe preconditions for further exploration, but completely
3946 * forget the history of how we got here.
3947 *
3948 * This also means that even if we needed precise SCALAR range to get to
3949 * finalized state, but from that point forward *that same* SCALAR register is
3950 * never used in a precise context (i.e., it's precise value is not needed for
3951 * correctness), it's correct and safe to mark such register as "imprecise"
3952 * (i.e., precise marking set to false). This is what we rely on when we do
3953 * not set precise marking in current state. If no child state requires
3954 * precision for any given SCALAR register, it's safe to dictate that it can
3955 * be imprecise. If any child state does require this register to be precise,
3956 * we'll mark it precise later retroactively during precise markings
3957 * propagation from child state to parent states.
7a830b53
AN
3958 *
3959 * Skipping precise marking setting in current state is a mild version of
3960 * relying on the above observation. But we can utilize this property even
3961 * more aggressively by proactively forgetting any precise marking in the
3962 * current state (which we inherited from the parent state), right before we
3963 * checkpoint it and branch off into new child state. This is done by
3964 * mark_all_scalars_imprecise() to hopefully get more permissive and generic
3965 * finalized states which help in short circuiting more future states.
f63181b6 3966 */
f655badf 3967static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
b5dc0163 3968{
407958a0 3969 struct backtrack_state *bt = &env->bt;
b5dc0163
AS
3970 struct bpf_verifier_state *st = env->cur_state;
3971 int first_idx = st->first_insn_idx;
3972 int last_idx = env->insn_idx;
d84b1a67 3973 int subseq_idx = -1;
b5dc0163
AS
3974 struct bpf_func_state *func;
3975 struct bpf_reg_state *reg;
b5dc0163 3976 bool skip_first = true;
d84b1a67 3977 int i, fr, err;
b5dc0163 3978
2c78ee89 3979 if (!env->bpf_capable)
b5dc0163
AS
3980 return 0;
3981
407958a0 3982 /* set frame number from which we are starting to backtrack */
f655badf 3983 bt_init(bt, env->cur_state->curframe);
407958a0 3984
f63181b6
AN
3985 /* Do sanity checks against current state of register and/or stack
3986 * slot, but don't set precise flag in current state, as precision
3987 * tracking in the current state is unnecessary.
3988 */
f655badf 3989 func = st->frame[bt->frame];
a3ce685d
AS
3990 if (regno >= 0) {
3991 reg = &func->regs[regno];
3992 if (reg->type != SCALAR_VALUE) {
3993 WARN_ONCE(1, "backtracing misuse");
3994 return -EFAULT;
3995 }
407958a0 3996 bt_set_reg(bt, regno);
b5dc0163 3997 }
b5dc0163 3998
407958a0 3999 if (bt_empty(bt))
a3ce685d 4000 return 0;
be2ef816 4001
b5dc0163
AS
4002 for (;;) {
4003 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
4004 u32 history = st->jmp_history_cnt;
4005
d9439c21 4006 if (env->log.level & BPF_LOG_LEVEL2) {
d84b1a67
AN
4007 verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
4008 bt->frame, last_idx, first_idx, subseq_idx);
d9439c21 4009 }
be2ef816 4010
904e6ddf
EZ
4011 /* If some register with scalar ID is marked as precise,
4012 * make sure that all registers sharing this ID are also precise.
4013 * This is needed to estimate effect of find_equal_scalars().
4014 * Do this at the last instruction of each state,
4015 * bpf_reg_state::id fields are valid for these instructions.
4016 *
4017 * Allows to track precision in situation like below:
4018 *
4019 * r2 = unknown value
4020 * ...
4021 * --- state #0 ---
4022 * ...
4023 * r1 = r2 // r1 and r2 now share the same ID
4024 * ...
4025 * --- state #1 {r1.id = A, r2.id = A} ---
4026 * ...
4027 * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1
4028 * ...
4029 * --- state #2 {r1.id = A, r2.id = A} ---
4030 * r3 = r10
4031 * r3 += r1 // need to mark both r1 and r2
4032 */
4033 if (mark_precise_scalar_ids(env, st))
4034 return -EFAULT;
4035
be2ef816
AN
4036 if (last_idx < 0) {
4037 /* we are at the entry into subprog, which
4038 * is expected for global funcs, but only if
4039 * requested precise registers are R1-R5
4040 * (which are global func's input arguments)
4041 */
4042 if (st->curframe == 0 &&
4043 st->frame[0]->subprogno > 0 &&
4044 st->frame[0]->callsite == BPF_MAIN_FUNC &&
407958a0
AN
4045 bt_stack_mask(bt) == 0 &&
4046 (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) {
4047 bitmap_from_u64(mask, bt_reg_mask(bt));
be2ef816
AN
4048 for_each_set_bit(i, mask, 32) {
4049 reg = &st->frame[0]->regs[i];
4050 if (reg->type != SCALAR_VALUE) {
407958a0 4051 bt_clear_reg(bt, i);
be2ef816
AN
4052 continue;
4053 }
4054 reg->precise = true;
4055 }
4056 return 0;
4057 }
4058
407958a0
AN
4059 verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n",
4060 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt));
be2ef816
AN
4061 WARN_ONCE(1, "verifier backtracking bug");
4062 return -EFAULT;
4063 }
4064
d84b1a67 4065 for (i = last_idx;;) {
b5dc0163
AS
4066 if (skip_first) {
4067 err = 0;
4068 skip_first = false;
4069 } else {
d84b1a67 4070 err = backtrack_insn(env, i, subseq_idx, bt);
b5dc0163
AS
4071 }
4072 if (err == -ENOTSUPP) {
c50c0b57 4073 mark_all_scalars_precise(env, env->cur_state);
407958a0 4074 bt_reset(bt);
b5dc0163
AS
4075 return 0;
4076 } else if (err) {
4077 return err;
4078 }
407958a0 4079 if (bt_empty(bt))
b5dc0163
AS
4080 /* Found assignment(s) into tracked register in this state.
4081 * Since this state is already marked, just return.
4082 * Nothing to be tracked further in the parent state.
4083 */
4084 return 0;
4085 if (i == first_idx)
4086 break;
d84b1a67 4087 subseq_idx = i;
b5dc0163
AS
4088 i = get_prev_insn_idx(st, i, &history);
4089 if (i >= env->prog->len) {
4090 /* This can happen if backtracking reached insn 0
4091 * and there are still reg_mask or stack_mask
4092 * to backtrack.
4093 * It means the backtracking missed the spot where
4094 * particular register was initialized with a constant.
4095 */
4096 verbose(env, "BUG backtracking idx %d\n", i);
4097 WARN_ONCE(1, "verifier backtracking bug");
4098 return -EFAULT;
4099 }
4100 }
4101 st = st->parent;
4102 if (!st)
4103 break;
4104
1ef22b68
AN
4105 for (fr = bt->frame; fr >= 0; fr--) {
4106 func = st->frame[fr];
4107 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr));
4108 for_each_set_bit(i, mask, 32) {
4109 reg = &func->regs[i];
4110 if (reg->type != SCALAR_VALUE) {
4111 bt_clear_frame_reg(bt, fr, i);
4112 continue;
4113 }
4114 if (reg->precise)
4115 bt_clear_frame_reg(bt, fr, i);
4116 else
4117 reg->precise = true;
a3ce685d 4118 }
b5dc0163 4119
1ef22b68
AN
4120 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
4121 for_each_set_bit(i, mask, 64) {
4122 if (i >= func->allocated_stack / BPF_REG_SIZE) {
4123 /* the sequence of instructions:
4124 * 2: (bf) r3 = r10
4125 * 3: (7b) *(u64 *)(r3 -8) = r0
4126 * 4: (79) r4 = *(u64 *)(r10 -8)
4127 * doesn't contain jmps. It's backtracked
4128 * as a single block.
4129 * During backtracking insn 3 is not recognized as
4130 * stack access, so at the end of backtracking
4131 * stack slot fp-8 is still marked in stack_mask.
4132 * However the parent state may not have accessed
4133 * fp-8 and it's "unallocated" stack space.
4134 * In such case fallback to conservative.
4135 */
c50c0b57 4136 mark_all_scalars_precise(env, env->cur_state);
1ef22b68
AN
4137 bt_reset(bt);
4138 return 0;
4139 }
b5dc0163 4140
1ef22b68
AN
4141 if (!is_spilled_scalar_reg(&func->stack[i])) {
4142 bt_clear_frame_slot(bt, fr, i);
4143 continue;
4144 }
4145 reg = &func->stack[i].spilled_ptr;
4146 if (reg->precise)
4147 bt_clear_frame_slot(bt, fr, i);
4148 else
4149 reg->precise = true;
4150 }
4151 if (env->log.level & BPF_LOG_LEVEL2) {
4152 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
4153 bt_frame_reg_mask(bt, fr));
4154 verbose(env, "mark_precise: frame%d: parent state regs=%s ",
4155 fr, env->tmp_str_buf);
4156 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN,
4157 bt_frame_stack_mask(bt, fr));
4158 verbose(env, "stack=%s: ", env->tmp_str_buf);
4159 print_verifier_state(env, func, true);
a3ce685d 4160 }
b5dc0163
AS
4161 }
4162
407958a0 4163 if (bt_empty(bt))
c50c0b57 4164 return 0;
b5dc0163 4165
d84b1a67 4166 subseq_idx = first_idx;
b5dc0163
AS
4167 last_idx = st->last_insn_idx;
4168 first_idx = st->first_insn_idx;
4169 }
c50c0b57
AN
4170
4171 /* if we still have requested precise regs or slots, we missed
4172 * something (e.g., stack access through non-r10 register), so
4173 * fallback to marking all precise
4174 */
4175 if (!bt_empty(bt)) {
4176 mark_all_scalars_precise(env, env->cur_state);
4177 bt_reset(bt);
4178 }
4179
b5dc0163
AS
4180 return 0;
4181}
4182
eb1f7f71 4183int mark_chain_precision(struct bpf_verifier_env *env, int regno)
a3ce685d 4184{
f655badf 4185 return __mark_chain_precision(env, regno);
a3ce685d
AS
4186}
4187
f655badf
AN
4188/* mark_chain_precision_batch() assumes that env->bt is set in the caller to
4189 * desired reg and stack masks across all relevant frames
4190 */
4191static int mark_chain_precision_batch(struct bpf_verifier_env *env)
a3ce685d 4192{
f655badf 4193 return __mark_chain_precision(env, -1);
a3ce685d 4194}
b5dc0163 4195
1be7f75d
AS
4196static bool is_spillable_regtype(enum bpf_reg_type type)
4197{
c25b2ae1 4198 switch (base_type(type)) {
1be7f75d 4199 case PTR_TO_MAP_VALUE:
1be7f75d
AS
4200 case PTR_TO_STACK:
4201 case PTR_TO_CTX:
969bf05e 4202 case PTR_TO_PACKET:
de8f3a83 4203 case PTR_TO_PACKET_META:
969bf05e 4204 case PTR_TO_PACKET_END:
d58e468b 4205 case PTR_TO_FLOW_KEYS:
1be7f75d 4206 case CONST_PTR_TO_MAP:
c64b7983 4207 case PTR_TO_SOCKET:
46f8bc92 4208 case PTR_TO_SOCK_COMMON:
655a51e5 4209 case PTR_TO_TCP_SOCK:
fada7fdc 4210 case PTR_TO_XDP_SOCK:
65726b5b 4211 case PTR_TO_BTF_ID:
20b2aff4 4212 case PTR_TO_BUF:
744ea4e3 4213 case PTR_TO_MEM:
69c087ba
YS
4214 case PTR_TO_FUNC:
4215 case PTR_TO_MAP_KEY:
1be7f75d
AS
4216 return true;
4217 default:
4218 return false;
4219 }
4220}
4221
cc2b14d5
AS
4222/* Does this register contain a constant zero? */
4223static bool register_is_null(struct bpf_reg_state *reg)
4224{
4225 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
4226}
4227
f7cf25b2
AS
4228static bool register_is_const(struct bpf_reg_state *reg)
4229{
4230 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
4231}
4232
5689d49b
YS
4233static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
4234{
4235 return tnum_is_unknown(reg->var_off) &&
4236 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
4237 reg->umin_value == 0 && reg->umax_value == U64_MAX &&
4238 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
4239 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
4240}
4241
4242static bool register_is_bounded(struct bpf_reg_state *reg)
4243{
4244 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
4245}
4246
6e7e63cb
JH
4247static bool __is_pointer_value(bool allow_ptr_leaks,
4248 const struct bpf_reg_state *reg)
4249{
4250 if (allow_ptr_leaks)
4251 return false;
4252
4253 return reg->type != SCALAR_VALUE;
4254}
4255
71f656a5
EZ
4256/* Copy src state preserving dst->parent and dst->live fields */
4257static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
4258{
4259 struct bpf_reg_state *parent = dst->parent;
4260 enum bpf_reg_liveness live = dst->live;
4261
4262 *dst = *src;
4263 dst->parent = parent;
4264 dst->live = live;
4265}
4266
f7cf25b2 4267static void save_register_state(struct bpf_func_state *state,
354e8f19
MKL
4268 int spi, struct bpf_reg_state *reg,
4269 int size)
f7cf25b2
AS
4270{
4271 int i;
4272
71f656a5 4273 copy_register_state(&state->stack[spi].spilled_ptr, reg);
354e8f19
MKL
4274 if (size == BPF_REG_SIZE)
4275 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
f7cf25b2 4276
354e8f19
MKL
4277 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
4278 state->stack[spi].slot_type[i - 1] = STACK_SPILL;
f7cf25b2 4279
354e8f19
MKL
4280 /* size < 8 bytes spill */
4281 for (; i; i--)
4282 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
f7cf25b2
AS
4283}
4284
ecdf985d
EZ
4285static bool is_bpf_st_mem(struct bpf_insn *insn)
4286{
4287 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
4288}
4289
01f810ac 4290/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
17a52670
AS
4291 * stack boundary and alignment are checked in check_mem_access()
4292 */
01f810ac
AM
4293static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
4294 /* stack frame we're writing to */
4295 struct bpf_func_state *state,
4296 int off, int size, int value_regno,
4297 int insn_idx)
17a52670 4298{
f4d7e40a 4299 struct bpf_func_state *cur; /* state of the current function */
638f5b90 4300 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
ecdf985d 4301 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
f7cf25b2 4302 struct bpf_reg_state *reg = NULL;
ecdf985d 4303 u32 dst_reg = insn->dst_reg;
638f5b90 4304
c69431aa 4305 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
638f5b90
AS
4306 if (err)
4307 return err;
9c399760
AS
4308 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
4309 * so it's aligned access and [off, off + size) are within stack limits
4310 */
638f5b90
AS
4311 if (!env->allow_ptr_leaks &&
4312 state->stack[spi].slot_type[0] == STACK_SPILL &&
4313 size != BPF_REG_SIZE) {
4314 verbose(env, "attempt to corrupt spilled pointer on stack\n");
4315 return -EACCES;
4316 }
17a52670 4317
f4d7e40a 4318 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
4319 if (value_regno >= 0)
4320 reg = &cur->regs[value_regno];
2039f26f
DB
4321 if (!env->bypass_spec_v4) {
4322 bool sanitize = reg && is_spillable_regtype(reg->type);
4323
4324 for (i = 0; i < size; i++) {
e4f4db47
LG
4325 u8 type = state->stack[spi].slot_type[i];
4326
4327 if (type != STACK_MISC && type != STACK_ZERO) {
2039f26f
DB
4328 sanitize = true;
4329 break;
4330 }
4331 }
4332
4333 if (sanitize)
4334 env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
4335 }
17a52670 4336
ef8fc7a0
KKD
4337 err = destroy_if_dynptr_stack_slot(env, state, spi);
4338 if (err)
4339 return err;
4340
0f55f9ed 4341 mark_stack_slot_scratched(env, spi);
354e8f19 4342 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
2c78ee89 4343 !register_is_null(reg) && env->bpf_capable) {
b5dc0163
AS
4344 if (dst_reg != BPF_REG_FP) {
4345 /* The backtracking logic can only recognize explicit
4346 * stack slot address like [fp - 8]. Other spill of
8fb33b60 4347 * scalar via different register has to be conservative.
b5dc0163
AS
4348 * Backtrack from here and mark all registers as precise
4349 * that contributed into 'reg' being a constant.
4350 */
4351 err = mark_chain_precision(env, value_regno);
4352 if (err)
4353 return err;
4354 }
354e8f19 4355 save_register_state(state, spi, reg, size);
713274f1
MM
4356 /* Break the relation on a narrowing spill. */
4357 if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
4358 state->stack[spi].spilled_ptr.id = 0;
ecdf985d
EZ
4359 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
4360 insn->imm != 0 && env->bpf_capable) {
4361 struct bpf_reg_state fake_reg = {};
4362
4363 __mark_reg_known(&fake_reg, (u32)insn->imm);
4364 fake_reg.type = SCALAR_VALUE;
4365 save_register_state(state, spi, &fake_reg, size);
f7cf25b2 4366 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 4367 /* register containing pointer is being spilled into stack */
9c399760 4368 if (size != BPF_REG_SIZE) {
f7cf25b2 4369 verbose_linfo(env, insn_idx, "; ");
61bd5218 4370 verbose(env, "invalid size of register spill\n");
17a52670
AS
4371 return -EACCES;
4372 }
f7cf25b2 4373 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
4374 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
4375 return -EINVAL;
4376 }
354e8f19 4377 save_register_state(state, spi, reg, size);
9c399760 4378 } else {
cc2b14d5
AS
4379 u8 type = STACK_MISC;
4380
679c782d
EC
4381 /* regular write of data into stack destroys any spilled ptr */
4382 state->stack[spi].spilled_ptr.type = NOT_INIT;
06accc87
AN
4383 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */
4384 if (is_stack_slot_special(&state->stack[spi]))
0bae2d4d 4385 for (i = 0; i < BPF_REG_SIZE; i++)
354e8f19 4386 scrub_spilled_slot(&state->stack[spi].slot_type[i]);
9c399760 4387
cc2b14d5
AS
4388 /* only mark the slot as written if all 8 bytes were written
4389 * otherwise read propagation may incorrectly stop too soon
4390 * when stack slots are partially written.
4391 * This heuristic means that read propagation will be
4392 * conservative, since it will add reg_live_read marks
4393 * to stack slots all the way to first state when programs
4394 * writes+reads less than 8 bytes
4395 */
4396 if (size == BPF_REG_SIZE)
4397 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
4398
4399 /* when we zero initialize stack slots mark them as such */
ecdf985d
EZ
4400 if ((reg && register_is_null(reg)) ||
4401 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
b5dc0163
AS
4402 /* backtracking doesn't work for STACK_ZERO yet. */
4403 err = mark_chain_precision(env, value_regno);
4404 if (err)
4405 return err;
cc2b14d5 4406 type = STACK_ZERO;
b5dc0163 4407 }
cc2b14d5 4408
0bae2d4d 4409 /* Mark slots affected by this stack write. */
9c399760 4410 for (i = 0; i < size; i++)
638f5b90 4411 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 4412 type;
17a52670
AS
4413 }
4414 return 0;
4415}
4416
01f810ac
AM
4417/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
4418 * known to contain a variable offset.
4419 * This function checks whether the write is permitted and conservatively
4420 * tracks the effects of the write, considering that each stack slot in the
4421 * dynamic range is potentially written to.
4422 *
4423 * 'off' includes 'regno->off'.
4424 * 'value_regno' can be -1, meaning that an unknown value is being written to
4425 * the stack.
4426 *
4427 * Spilled pointers in range are not marked as written because we don't know
4428 * what's going to be actually written. This means that read propagation for
4429 * future reads cannot be terminated by this write.
4430 *
4431 * For privileged programs, uninitialized stack slots are considered
4432 * initialized by this write (even though we don't know exactly what offsets
4433 * are going to be written to). The idea is that we don't want the verifier to
4434 * reject future reads that access slots written to through variable offsets.
4435 */
4436static int check_stack_write_var_off(struct bpf_verifier_env *env,
4437 /* func where register points to */
4438 struct bpf_func_state *state,
4439 int ptr_regno, int off, int size,
4440 int value_regno, int insn_idx)
4441{
4442 struct bpf_func_state *cur; /* state of the current function */
4443 int min_off, max_off;
4444 int i, err;
4445 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
31ff2135 4446 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
01f810ac
AM
4447 bool writing_zero = false;
4448 /* set if the fact that we're writing a zero is used to let any
4449 * stack slots remain STACK_ZERO
4450 */
4451 bool zero_used = false;
4452
4453 cur = env->cur_state->frame[env->cur_state->curframe];
4454 ptr_reg = &cur->regs[ptr_regno];
4455 min_off = ptr_reg->smin_value + off;
4456 max_off = ptr_reg->smax_value + off + size;
4457 if (value_regno >= 0)
4458 value_reg = &cur->regs[value_regno];
31ff2135
EZ
4459 if ((value_reg && register_is_null(value_reg)) ||
4460 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
01f810ac
AM
4461 writing_zero = true;
4462
c69431aa 4463 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
01f810ac
AM
4464 if (err)
4465 return err;
4466
ef8fc7a0
KKD
4467 for (i = min_off; i < max_off; i++) {
4468 int spi;
4469
4470 spi = __get_spi(i);
4471 err = destroy_if_dynptr_stack_slot(env, state, spi);
4472 if (err)
4473 return err;
4474 }
01f810ac
AM
4475
4476 /* Variable offset writes destroy any spilled pointers in range. */
4477 for (i = min_off; i < max_off; i++) {
4478 u8 new_type, *stype;
4479 int slot, spi;
4480
4481 slot = -i - 1;
4482 spi = slot / BPF_REG_SIZE;
4483 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
0f55f9ed 4484 mark_stack_slot_scratched(env, spi);
01f810ac 4485
f5e477a8
KKD
4486 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
4487 /* Reject the write if range we may write to has not
4488 * been initialized beforehand. If we didn't reject
4489 * here, the ptr status would be erased below (even
4490 * though not all slots are actually overwritten),
4491 * possibly opening the door to leaks.
4492 *
4493 * We do however catch STACK_INVALID case below, and
4494 * only allow reading possibly uninitialized memory
4495 * later for CAP_PERFMON, as the write may not happen to
4496 * that slot.
01f810ac
AM
4497 */
4498 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
4499 insn_idx, i);
4500 return -EINVAL;
4501 }
4502
4503 /* Erase all spilled pointers. */
4504 state->stack[spi].spilled_ptr.type = NOT_INIT;
4505
4506 /* Update the slot type. */
4507 new_type = STACK_MISC;
4508 if (writing_zero && *stype == STACK_ZERO) {
4509 new_type = STACK_ZERO;
4510 zero_used = true;
4511 }
4512 /* If the slot is STACK_INVALID, we check whether it's OK to
4513 * pretend that it will be initialized by this write. The slot
4514 * might not actually be written to, and so if we mark it as
4515 * initialized future reads might leak uninitialized memory.
4516 * For privileged programs, we will accept such reads to slots
4517 * that may or may not be written because, if we're reject
4518 * them, the error would be too confusing.
4519 */
4520 if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
4521 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
4522 insn_idx, i);
4523 return -EINVAL;
4524 }
4525 *stype = new_type;
4526 }
4527 if (zero_used) {
4528 /* backtracking doesn't work for STACK_ZERO yet. */
4529 err = mark_chain_precision(env, value_regno);
4530 if (err)
4531 return err;
4532 }
4533 return 0;
4534}
4535
4536/* When register 'dst_regno' is assigned some values from stack[min_off,
4537 * max_off), we set the register's type according to the types of the
4538 * respective stack slots. If all the stack values are known to be zeros, then
4539 * so is the destination reg. Otherwise, the register is considered to be
4540 * SCALAR. This function does not deal with register filling; the caller must
4541 * ensure that all spilled registers in the stack range have been marked as
4542 * read.
4543 */
4544static void mark_reg_stack_read(struct bpf_verifier_env *env,
4545 /* func where src register points to */
4546 struct bpf_func_state *ptr_state,
4547 int min_off, int max_off, int dst_regno)
4548{
4549 struct bpf_verifier_state *vstate = env->cur_state;
4550 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4551 int i, slot, spi;
4552 u8 *stype;
4553 int zeros = 0;
4554
4555 for (i = min_off; i < max_off; i++) {
4556 slot = -i - 1;
4557 spi = slot / BPF_REG_SIZE;
e0bf4622 4558 mark_stack_slot_scratched(env, spi);
01f810ac
AM
4559 stype = ptr_state->stack[spi].slot_type;
4560 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
4561 break;
4562 zeros++;
4563 }
4564 if (zeros == max_off - min_off) {
4565 /* any access_size read into register is zero extended,
4566 * so the whole register == const_zero
4567 */
4568 __mark_reg_const_zero(&state->regs[dst_regno]);
4569 /* backtracking doesn't support STACK_ZERO yet,
4570 * so mark it precise here, so that later
4571 * backtracking can stop here.
4572 * Backtracking may not need this if this register
4573 * doesn't participate in pointer adjustment.
4574 * Forward propagation of precise flag is not
4575 * necessary either. This mark is only to stop
4576 * backtracking. Any register that contributed
4577 * to const 0 was marked precise before spill.
4578 */
4579 state->regs[dst_regno].precise = true;
4580 } else {
4581 /* have read misc data from the stack */
4582 mark_reg_unknown(env, state->regs, dst_regno);
4583 }
4584 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4585}
4586
4587/* Read the stack at 'off' and put the results into the register indicated by
4588 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
4589 * spilled reg.
4590 *
4591 * 'dst_regno' can be -1, meaning that the read value is not going to a
4592 * register.
4593 *
4594 * The access is assumed to be within the current stack bounds.
4595 */
4596static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
4597 /* func where src register points to */
4598 struct bpf_func_state *reg_state,
4599 int off, int size, int dst_regno)
17a52670 4600{
f4d7e40a
AS
4601 struct bpf_verifier_state *vstate = env->cur_state;
4602 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 4603 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 4604 struct bpf_reg_state *reg;
354e8f19 4605 u8 *stype, type;
17a52670 4606
f4d7e40a 4607 stype = reg_state->stack[spi].slot_type;
f7cf25b2 4608 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 4609
e0bf4622
AN
4610 mark_stack_slot_scratched(env, spi);
4611
27113c59 4612 if (is_spilled_reg(&reg_state->stack[spi])) {
f30d4968
MKL
4613 u8 spill_size = 1;
4614
4615 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
4616 spill_size++;
354e8f19 4617
f30d4968 4618 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
f7cf25b2
AS
4619 if (reg->type != SCALAR_VALUE) {
4620 verbose_linfo(env, env->insn_idx, "; ");
4621 verbose(env, "invalid size of register fill\n");
4622 return -EACCES;
4623 }
354e8f19
MKL
4624
4625 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4626 if (dst_regno < 0)
4627 return 0;
4628
f30d4968 4629 if (!(off % BPF_REG_SIZE) && size == spill_size) {
354e8f19
MKL
4630 /* The earlier check_reg_arg() has decided the
4631 * subreg_def for this insn. Save it first.
4632 */
4633 s32 subreg_def = state->regs[dst_regno].subreg_def;
4634
71f656a5 4635 copy_register_state(&state->regs[dst_regno], reg);
354e8f19
MKL
4636 state->regs[dst_regno].subreg_def = subreg_def;
4637 } else {
4638 for (i = 0; i < size; i++) {
4639 type = stype[(slot - i) % BPF_REG_SIZE];
4640 if (type == STACK_SPILL)
4641 continue;
4642 if (type == STACK_MISC)
4643 continue;
6715df8d
EZ
4644 if (type == STACK_INVALID && env->allow_uninit_stack)
4645 continue;
354e8f19
MKL
4646 verbose(env, "invalid read from stack off %d+%d size %d\n",
4647 off, i, size);
4648 return -EACCES;
4649 }
01f810ac 4650 mark_reg_unknown(env, state->regs, dst_regno);
f7cf25b2 4651 }
354e8f19 4652 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
f7cf25b2 4653 return 0;
17a52670 4654 }
17a52670 4655
01f810ac 4656 if (dst_regno >= 0) {
17a52670 4657 /* restore register state from stack */
71f656a5 4658 copy_register_state(&state->regs[dst_regno], reg);
2f18f62e
AS
4659 /* mark reg as written since spilled pointer state likely
4660 * has its liveness marks cleared by is_state_visited()
4661 * which resets stack/reg liveness for state transitions
4662 */
01f810ac 4663 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
6e7e63cb 4664 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
01f810ac 4665 /* If dst_regno==-1, the caller is asking us whether
6e7e63cb
JH
4666 * it is acceptable to use this value as a SCALAR_VALUE
4667 * (e.g. for XADD).
4668 * We must not allow unprivileged callers to do that
4669 * with spilled pointers.
4670 */
4671 verbose(env, "leaking pointer from stack off %d\n",
4672 off);
4673 return -EACCES;
dc503a8a 4674 }
f7cf25b2 4675 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670
AS
4676 } else {
4677 for (i = 0; i < size; i++) {
01f810ac
AM
4678 type = stype[(slot - i) % BPF_REG_SIZE];
4679 if (type == STACK_MISC)
cc2b14d5 4680 continue;
01f810ac 4681 if (type == STACK_ZERO)
cc2b14d5 4682 continue;
6715df8d
EZ
4683 if (type == STACK_INVALID && env->allow_uninit_stack)
4684 continue;
cc2b14d5
AS
4685 verbose(env, "invalid read from stack off %d+%d size %d\n",
4686 off, i, size);
4687 return -EACCES;
4688 }
f7cf25b2 4689 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
01f810ac
AM
4690 if (dst_regno >= 0)
4691 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
17a52670 4692 }
f7cf25b2 4693 return 0;
17a52670
AS
4694}
4695
61df10c7 4696enum bpf_access_src {
01f810ac
AM
4697 ACCESS_DIRECT = 1, /* the access is performed by an instruction */
4698 ACCESS_HELPER = 2, /* the access is performed by a helper */
4699};
4700
4701static int check_stack_range_initialized(struct bpf_verifier_env *env,
4702 int regno, int off, int access_size,
4703 bool zero_size_allowed,
61df10c7 4704 enum bpf_access_src type,
01f810ac
AM
4705 struct bpf_call_arg_meta *meta);
4706
4707static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
4708{
4709 return cur_regs(env) + regno;
4710}
4711
4712/* Read the stack at 'ptr_regno + off' and put the result into the register
4713 * 'dst_regno'.
4714 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
4715 * but not its variable offset.
4716 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
4717 *
4718 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
4719 * filling registers (i.e. reads of spilled register cannot be detected when
4720 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
4721 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
4722 * offset; for a fixed offset check_stack_read_fixed_off should be used
4723 * instead.
4724 */
4725static int check_stack_read_var_off(struct bpf_verifier_env *env,
4726 int ptr_regno, int off, int size, int dst_regno)
e4298d25 4727{
01f810ac
AM
4728 /* The state of the source register. */
4729 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4730 struct bpf_func_state *ptr_state = func(env, reg);
4731 int err;
4732 int min_off, max_off;
4733
4734 /* Note that we pass a NULL meta, so raw access will not be permitted.
e4298d25 4735 */
01f810ac
AM
4736 err = check_stack_range_initialized(env, ptr_regno, off, size,
4737 false, ACCESS_DIRECT, NULL);
4738 if (err)
4739 return err;
4740
4741 min_off = reg->smin_value + off;
4742 max_off = reg->smax_value + off;
4743 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
4744 return 0;
4745}
4746
4747/* check_stack_read dispatches to check_stack_read_fixed_off or
4748 * check_stack_read_var_off.
4749 *
4750 * The caller must ensure that the offset falls within the allocated stack
4751 * bounds.
4752 *
4753 * 'dst_regno' is a register which will receive the value from the stack. It
4754 * can be -1, meaning that the read value is not going to a register.
4755 */
4756static int check_stack_read(struct bpf_verifier_env *env,
4757 int ptr_regno, int off, int size,
4758 int dst_regno)
4759{
4760 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4761 struct bpf_func_state *state = func(env, reg);
4762 int err;
4763 /* Some accesses are only permitted with a static offset. */
4764 bool var_off = !tnum_is_const(reg->var_off);
4765
4766 /* The offset is required to be static when reads don't go to a
4767 * register, in order to not leak pointers (see
4768 * check_stack_read_fixed_off).
4769 */
4770 if (dst_regno < 0 && var_off) {
e4298d25
DB
4771 char tn_buf[48];
4772
4773 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac 4774 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
e4298d25
DB
4775 tn_buf, off, size);
4776 return -EACCES;
4777 }
01f810ac
AM
4778 /* Variable offset is prohibited for unprivileged mode for simplicity
4779 * since it requires corresponding support in Spectre masking for stack
082cdc69
LG
4780 * ALU. See also retrieve_ptr_limit(). The check in
4781 * check_stack_access_for_ptr_arithmetic() called by
4782 * adjust_ptr_min_max_vals() prevents users from creating stack pointers
4783 * with variable offsets, therefore no check is required here. Further,
4784 * just checking it here would be insufficient as speculative stack
4785 * writes could still lead to unsafe speculative behaviour.
01f810ac 4786 */
01f810ac
AM
4787 if (!var_off) {
4788 off += reg->var_off.value;
4789 err = check_stack_read_fixed_off(env, state, off, size,
4790 dst_regno);
4791 } else {
4792 /* Variable offset stack reads need more conservative handling
4793 * than fixed offset ones. Note that dst_regno >= 0 on this
4794 * branch.
4795 */
4796 err = check_stack_read_var_off(env, ptr_regno, off, size,
4797 dst_regno);
4798 }
4799 return err;
4800}
4801
4802
4803/* check_stack_write dispatches to check_stack_write_fixed_off or
4804 * check_stack_write_var_off.
4805 *
4806 * 'ptr_regno' is the register used as a pointer into the stack.
4807 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
4808 * 'value_regno' is the register whose value we're writing to the stack. It can
4809 * be -1, meaning that we're not writing from a register.
4810 *
4811 * The caller must ensure that the offset falls within the maximum stack size.
4812 */
4813static int check_stack_write(struct bpf_verifier_env *env,
4814 int ptr_regno, int off, int size,
4815 int value_regno, int insn_idx)
4816{
4817 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4818 struct bpf_func_state *state = func(env, reg);
4819 int err;
4820
4821 if (tnum_is_const(reg->var_off)) {
4822 off += reg->var_off.value;
4823 err = check_stack_write_fixed_off(env, state, off, size,
4824 value_regno, insn_idx);
4825 } else {
4826 /* Variable offset stack reads need more conservative handling
4827 * than fixed offset ones.
4828 */
4829 err = check_stack_write_var_off(env, state,
4830 ptr_regno, off, size,
4831 value_regno, insn_idx);
4832 }
4833 return err;
e4298d25
DB
4834}
4835
591fe988
DB
4836static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
4837 int off, int size, enum bpf_access_type type)
4838{
4839 struct bpf_reg_state *regs = cur_regs(env);
4840 struct bpf_map *map = regs[regno].map_ptr;
4841 u32 cap = bpf_map_flags_to_cap(map);
4842
4843 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
4844 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
4845 map->value_size, off, size);
4846 return -EACCES;
4847 }
4848
4849 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
4850 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
4851 map->value_size, off, size);
4852 return -EACCES;
4853 }
4854
4855 return 0;
4856}
4857
457f4436
AN
4858/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
4859static int __check_mem_access(struct bpf_verifier_env *env, int regno,
4860 int off, int size, u32 mem_size,
4861 bool zero_size_allowed)
17a52670 4862{
457f4436
AN
4863 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
4864 struct bpf_reg_state *reg;
4865
4866 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
4867 return 0;
17a52670 4868
457f4436
AN
4869 reg = &cur_regs(env)[regno];
4870 switch (reg->type) {
69c087ba
YS
4871 case PTR_TO_MAP_KEY:
4872 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
4873 mem_size, off, size);
4874 break;
457f4436 4875 case PTR_TO_MAP_VALUE:
61bd5218 4876 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
457f4436
AN
4877 mem_size, off, size);
4878 break;
4879 case PTR_TO_PACKET:
4880 case PTR_TO_PACKET_META:
4881 case PTR_TO_PACKET_END:
4882 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
4883 off, size, regno, reg->id, off, mem_size);
4884 break;
4885 case PTR_TO_MEM:
4886 default:
4887 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
4888 mem_size, off, size);
17a52670 4889 }
457f4436
AN
4890
4891 return -EACCES;
17a52670
AS
4892}
4893
457f4436
AN
4894/* check read/write into a memory region with possible variable offset */
4895static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
4896 int off, int size, u32 mem_size,
4897 bool zero_size_allowed)
dbcfe5f7 4898{
f4d7e40a
AS
4899 struct bpf_verifier_state *vstate = env->cur_state;
4900 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
4901 struct bpf_reg_state *reg = &state->regs[regno];
4902 int err;
4903
457f4436 4904 /* We may have adjusted the register pointing to memory region, so we
f1174f77
EC
4905 * need to try adding each of min_value and max_value to off
4906 * to make sure our theoretical access will be safe.
2e576648
CL
4907 *
4908 * The minimum value is only important with signed
dbcfe5f7
GB
4909 * comparisons where we can't assume the floor of a
4910 * value is 0. If we are using signed variables for our
4911 * index'es we need to make sure that whatever we use
4912 * will have a set floor within our range.
4913 */
b7137c4e
DB
4914 if (reg->smin_value < 0 &&
4915 (reg->smin_value == S64_MIN ||
4916 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
4917 reg->smin_value + off < 0)) {
61bd5218 4918 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
4919 regno);
4920 return -EACCES;
4921 }
457f4436
AN
4922 err = __check_mem_access(env, regno, reg->smin_value + off, size,
4923 mem_size, zero_size_allowed);
dbcfe5f7 4924 if (err) {
457f4436 4925 verbose(env, "R%d min value is outside of the allowed memory range\n",
61bd5218 4926 regno);
dbcfe5f7
GB
4927 return err;
4928 }
4929
b03c9f9f
EC
4930 /* If we haven't set a max value then we need to bail since we can't be
4931 * sure we won't do bad things.
4932 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 4933 */
b03c9f9f 4934 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
457f4436 4935 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
dbcfe5f7
GB
4936 regno);
4937 return -EACCES;
4938 }
457f4436
AN
4939 err = __check_mem_access(env, regno, reg->umax_value + off, size,
4940 mem_size, zero_size_allowed);
4941 if (err) {
4942 verbose(env, "R%d max value is outside of the allowed memory range\n",
61bd5218 4943 regno);
457f4436
AN
4944 return err;
4945 }
4946
4947 return 0;
4948}
d83525ca 4949
e9147b44
KKD
4950static int __check_ptr_off_reg(struct bpf_verifier_env *env,
4951 const struct bpf_reg_state *reg, int regno,
4952 bool fixed_off_ok)
4953{
4954 /* Access to this pointer-typed register or passing it to a helper
4955 * is only allowed in its original, unmodified form.
4956 */
4957
4958 if (reg->off < 0) {
4959 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
4960 reg_type_str(env, reg->type), regno, reg->off);
4961 return -EACCES;
4962 }
4963
4964 if (!fixed_off_ok && reg->off) {
4965 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
4966 reg_type_str(env, reg->type), regno, reg->off);
4967 return -EACCES;
4968 }
4969
4970 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4971 char tn_buf[48];
4972
4973 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4974 verbose(env, "variable %s access var_off=%s disallowed\n",
4975 reg_type_str(env, reg->type), tn_buf);
4976 return -EACCES;
4977 }
4978
4979 return 0;
4980}
4981
4982int check_ptr_off_reg(struct bpf_verifier_env *env,
4983 const struct bpf_reg_state *reg, int regno)
4984{
4985 return __check_ptr_off_reg(env, reg, regno, false);
4986}
4987
61df10c7 4988static int map_kptr_match_type(struct bpf_verifier_env *env,
aa3496ac 4989 struct btf_field *kptr_field,
61df10c7
KKD
4990 struct bpf_reg_state *reg, u32 regno)
4991{
b32a5dae 4992 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
ab6c637a 4993 int perm_flags;
61df10c7
KKD
4994 const char *reg_name = "";
4995
ab6c637a
YS
4996 if (btf_is_kernel(reg->btf)) {
4997 perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
4998
4999 /* Only unreferenced case accepts untrusted pointers */
5000 if (kptr_field->type == BPF_KPTR_UNREF)
5001 perm_flags |= PTR_UNTRUSTED;
5002 } else {
5003 perm_flags = PTR_MAYBE_NULL | MEM_ALLOC;
5004 }
6efe152d
KKD
5005
5006 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
61df10c7
KKD
5007 goto bad_type;
5008
61df10c7 5009 /* We need to verify reg->type and reg->btf, before accessing reg->btf */
b32a5dae 5010 reg_name = btf_type_name(reg->btf, reg->btf_id);
61df10c7 5011
c0a5a21c
KKD
5012 /* For ref_ptr case, release function check should ensure we get one
5013 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
5014 * normal store of unreferenced kptr, we must ensure var_off is zero.
5015 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
5016 * reg->off and reg->ref_obj_id are not needed here.
5017 */
61df10c7
KKD
5018 if (__check_ptr_off_reg(env, reg, regno, true))
5019 return -EACCES;
5020
ab6c637a 5021 /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and
61df10c7
KKD
5022 * we also need to take into account the reg->off.
5023 *
5024 * We want to support cases like:
5025 *
5026 * struct foo {
5027 * struct bar br;
5028 * struct baz bz;
5029 * };
5030 *
5031 * struct foo *v;
5032 * v = func(); // PTR_TO_BTF_ID
5033 * val->foo = v; // reg->off is zero, btf and btf_id match type
5034 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
5035 * // first member type of struct after comparison fails
5036 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
5037 * // to match type
5038 *
5039 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
2ab3b380
KKD
5040 * is zero. We must also ensure that btf_struct_ids_match does not walk
5041 * the struct to match type against first member of struct, i.e. reject
5042 * second case from above. Hence, when type is BPF_KPTR_REF, we set
5043 * strict mode to true for type match.
61df10c7
KKD
5044 */
5045 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
aa3496ac
KKD
5046 kptr_field->kptr.btf, kptr_field->kptr.btf_id,
5047 kptr_field->type == BPF_KPTR_REF))
61df10c7
KKD
5048 goto bad_type;
5049 return 0;
5050bad_type:
5051 verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
5052 reg_type_str(env, reg->type), reg_name);
6efe152d 5053 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
aa3496ac 5054 if (kptr_field->type == BPF_KPTR_UNREF)
6efe152d
KKD
5055 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
5056 targ_name);
5057 else
5058 verbose(env, "\n");
61df10c7
KKD
5059 return -EINVAL;
5060}
5061
20c09d92
AS
5062/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
5063 * can dereference RCU protected pointers and result is PTR_TRUSTED.
5064 */
5065static bool in_rcu_cs(struct bpf_verifier_env *env)
5066{
5067 return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable;
5068}
5069
5070/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
5071BTF_SET_START(rcu_protected_types)
5072BTF_ID(struct, prog_test_ref_kfunc)
5073BTF_ID(struct, cgroup)
63d2d83d 5074BTF_ID(struct, bpf_cpumask)
d02c48fa 5075BTF_ID(struct, task_struct)
20c09d92
AS
5076BTF_SET_END(rcu_protected_types)
5077
5078static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
5079{
5080 if (!btf_is_kernel(btf))
5081 return false;
5082 return btf_id_set_contains(&rcu_protected_types, btf_id);
5083}
5084
5085static bool rcu_safe_kptr(const struct btf_field *field)
5086{
5087 const struct btf_field_kptr *kptr = &field->kptr;
5088
5089 return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id);
5090}
5091
61df10c7
KKD
5092static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
5093 int value_regno, int insn_idx,
aa3496ac 5094 struct btf_field *kptr_field)
61df10c7
KKD
5095{
5096 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
5097 int class = BPF_CLASS(insn->code);
5098 struct bpf_reg_state *val_reg;
5099
5100 /* Things we already checked for in check_map_access and caller:
5101 * - Reject cases where variable offset may touch kptr
5102 * - size of access (must be BPF_DW)
5103 * - tnum_is_const(reg->var_off)
aa3496ac 5104 * - kptr_field->offset == off + reg->var_off.value
61df10c7
KKD
5105 */
5106 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
5107 if (BPF_MODE(insn->code) != BPF_MEM) {
5108 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
5109 return -EACCES;
5110 }
5111
6efe152d
KKD
5112 /* We only allow loading referenced kptr, since it will be marked as
5113 * untrusted, similar to unreferenced kptr.
5114 */
aa3496ac 5115 if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
6efe152d 5116 verbose(env, "store to referenced kptr disallowed\n");
c0a5a21c
KKD
5117 return -EACCES;
5118 }
5119
61df10c7
KKD
5120 if (class == BPF_LDX) {
5121 val_reg = reg_state(env, value_regno);
5122 /* We can simply mark the value_regno receiving the pointer
5123 * value from map as PTR_TO_BTF_ID, with the correct type.
5124 */
aa3496ac 5125 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
20c09d92
AS
5126 kptr_field->kptr.btf_id,
5127 rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ?
5128 PTR_MAYBE_NULL | MEM_RCU :
5129 PTR_MAYBE_NULL | PTR_UNTRUSTED);
61df10c7
KKD
5130 /* For mark_ptr_or_null_reg */
5131 val_reg->id = ++env->id_gen;
5132 } else if (class == BPF_STX) {
5133 val_reg = reg_state(env, value_regno);
5134 if (!register_is_null(val_reg) &&
aa3496ac 5135 map_kptr_match_type(env, kptr_field, val_reg, value_regno))
61df10c7
KKD
5136 return -EACCES;
5137 } else if (class == BPF_ST) {
5138 if (insn->imm) {
5139 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
aa3496ac 5140 kptr_field->offset);
61df10c7
KKD
5141 return -EACCES;
5142 }
5143 } else {
5144 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
5145 return -EACCES;
5146 }
5147 return 0;
5148}
5149
457f4436
AN
5150/* check read/write into a map element with possible variable offset */
5151static int check_map_access(struct bpf_verifier_env *env, u32 regno,
61df10c7
KKD
5152 int off, int size, bool zero_size_allowed,
5153 enum bpf_access_src src)
457f4436
AN
5154{
5155 struct bpf_verifier_state *vstate = env->cur_state;
5156 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5157 struct bpf_reg_state *reg = &state->regs[regno];
5158 struct bpf_map *map = reg->map_ptr;
aa3496ac
KKD
5159 struct btf_record *rec;
5160 int err, i;
457f4436
AN
5161
5162 err = check_mem_region_access(env, regno, off, size, map->value_size,
5163 zero_size_allowed);
5164 if (err)
5165 return err;
5166
aa3496ac
KKD
5167 if (IS_ERR_OR_NULL(map->record))
5168 return 0;
5169 rec = map->record;
5170 for (i = 0; i < rec->cnt; i++) {
5171 struct btf_field *field = &rec->fields[i];
5172 u32 p = field->offset;
d83525ca 5173
db559117
KKD
5174 /* If any part of a field can be touched by load/store, reject
5175 * this program. To check that [x1, x2) overlaps with [y1, y2),
d83525ca
AS
5176 * it is sufficient to check x1 < y2 && y1 < x2.
5177 */
aa3496ac
KKD
5178 if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
5179 p < reg->umax_value + off + size) {
5180 switch (field->type) {
5181 case BPF_KPTR_UNREF:
5182 case BPF_KPTR_REF:
61df10c7
KKD
5183 if (src != ACCESS_DIRECT) {
5184 verbose(env, "kptr cannot be accessed indirectly by helper\n");
5185 return -EACCES;
5186 }
5187 if (!tnum_is_const(reg->var_off)) {
5188 verbose(env, "kptr access cannot have variable offset\n");
5189 return -EACCES;
5190 }
5191 if (p != off + reg->var_off.value) {
5192 verbose(env, "kptr access misaligned expected=%u off=%llu\n",
5193 p, off + reg->var_off.value);
5194 return -EACCES;
5195 }
5196 if (size != bpf_size_to_bytes(BPF_DW)) {
5197 verbose(env, "kptr access size must be BPF_DW\n");
5198 return -EACCES;
5199 }
5200 break;
aa3496ac 5201 default:
db559117
KKD
5202 verbose(env, "%s cannot be accessed directly by load/store\n",
5203 btf_field_type_name(field->type));
aa3496ac 5204 return -EACCES;
61df10c7
KKD
5205 }
5206 }
5207 }
aa3496ac 5208 return 0;
dbcfe5f7
GB
5209}
5210
969bf05e
AS
5211#define MAX_PACKET_OFF 0xffff
5212
58e2af8b 5213static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
5214 const struct bpf_call_arg_meta *meta,
5215 enum bpf_access_type t)
4acf6c0b 5216{
7e40781c
UP
5217 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
5218
5219 switch (prog_type) {
5d66fa7d 5220 /* Program types only with direct read access go here! */
3a0af8fd
TG
5221 case BPF_PROG_TYPE_LWT_IN:
5222 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 5223 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 5224 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 5225 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 5226 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
5227 if (t == BPF_WRITE)
5228 return false;
8731745e 5229 fallthrough;
5d66fa7d
DB
5230
5231 /* Program types with direct read + write access go here! */
36bbef52
DB
5232 case BPF_PROG_TYPE_SCHED_CLS:
5233 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 5234 case BPF_PROG_TYPE_XDP:
3a0af8fd 5235 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 5236 case BPF_PROG_TYPE_SK_SKB:
4f738adb 5237 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
5238 if (meta)
5239 return meta->pkt_access;
5240
5241 env->seen_direct_write = true;
4acf6c0b 5242 return true;
0d01da6a
SF
5243
5244 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
5245 if (t == BPF_WRITE)
5246 env->seen_direct_write = true;
5247
5248 return true;
5249
4acf6c0b
BB
5250 default:
5251 return false;
5252 }
5253}
5254
f1174f77 5255static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 5256 int size, bool zero_size_allowed)
f1174f77 5257{
638f5b90 5258 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
5259 struct bpf_reg_state *reg = &regs[regno];
5260 int err;
5261
5262 /* We may have added a variable offset to the packet pointer; but any
5263 * reg->range we have comes after that. We are only checking the fixed
5264 * offset.
5265 */
5266
5267 /* We don't allow negative numbers, because we aren't tracking enough
5268 * detail to prove they're safe.
5269 */
b03c9f9f 5270 if (reg->smin_value < 0) {
61bd5218 5271 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
5272 regno);
5273 return -EACCES;
5274 }
6d94e741
AS
5275
5276 err = reg->range < 0 ? -EINVAL :
5277 __check_mem_access(env, regno, off, size, reg->range,
457f4436 5278 zero_size_allowed);
f1174f77 5279 if (err) {
61bd5218 5280 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
5281 return err;
5282 }
e647815a 5283
457f4436 5284 /* __check_mem_access has made sure "off + size - 1" is within u16.
e647815a
JW
5285 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
5286 * otherwise find_good_pkt_pointers would have refused to set range info
457f4436 5287 * that __check_mem_access would have rejected this pkt access.
e647815a
JW
5288 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
5289 */
5290 env->prog->aux->max_pkt_offset =
5291 max_t(u32, env->prog->aux->max_pkt_offset,
5292 off + reg->umax_value + size - 1);
5293
f1174f77
EC
5294 return err;
5295}
5296
5297/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 5298static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66 5299 enum bpf_access_type t, enum bpf_reg_type *reg_type,
22dc4a0f 5300 struct btf **btf, u32 *btf_id)
17a52670 5301{
f96da094
DB
5302 struct bpf_insn_access_aux info = {
5303 .reg_type = *reg_type,
9e15db66 5304 .log = &env->log,
f96da094 5305 };
31fd8581 5306
4f9218aa 5307 if (env->ops->is_valid_access &&
5e43f899 5308 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
5309 /* A non zero info.ctx_field_size indicates that this field is a
5310 * candidate for later verifier transformation to load the whole
5311 * field and then apply a mask when accessed with a narrower
5312 * access than actual ctx access size. A zero info.ctx_field_size
5313 * will only allow for whole field access and rejects any other
5314 * type of narrower access.
31fd8581 5315 */
23994631 5316 *reg_type = info.reg_type;
31fd8581 5317
c25b2ae1 5318 if (base_type(*reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 5319 *btf = info.btf;
9e15db66 5320 *btf_id = info.btf_id;
22dc4a0f 5321 } else {
9e15db66 5322 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
22dc4a0f 5323 }
32bbe007
AS
5324 /* remember the offset of last byte accessed in ctx */
5325 if (env->prog->aux->max_ctx_offset < off + size)
5326 env->prog->aux->max_ctx_offset = off + size;
17a52670 5327 return 0;
32bbe007 5328 }
17a52670 5329
61bd5218 5330 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
5331 return -EACCES;
5332}
5333
d58e468b
PP
5334static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
5335 int size)
5336{
5337 if (size < 0 || off < 0 ||
5338 (u64)off + size > sizeof(struct bpf_flow_keys)) {
5339 verbose(env, "invalid access to flow keys off=%d size=%d\n",
5340 off, size);
5341 return -EACCES;
5342 }
5343 return 0;
5344}
5345
5f456649
MKL
5346static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
5347 u32 regno, int off, int size,
5348 enum bpf_access_type t)
c64b7983
JS
5349{
5350 struct bpf_reg_state *regs = cur_regs(env);
5351 struct bpf_reg_state *reg = &regs[regno];
5f456649 5352 struct bpf_insn_access_aux info = {};
46f8bc92 5353 bool valid;
c64b7983
JS
5354
5355 if (reg->smin_value < 0) {
5356 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
5357 regno);
5358 return -EACCES;
5359 }
5360
46f8bc92
MKL
5361 switch (reg->type) {
5362 case PTR_TO_SOCK_COMMON:
5363 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
5364 break;
5365 case PTR_TO_SOCKET:
5366 valid = bpf_sock_is_valid_access(off, size, t, &info);
5367 break;
655a51e5
MKL
5368 case PTR_TO_TCP_SOCK:
5369 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
5370 break;
fada7fdc
JL
5371 case PTR_TO_XDP_SOCK:
5372 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
5373 break;
46f8bc92
MKL
5374 default:
5375 valid = false;
c64b7983
JS
5376 }
5377
5f456649 5378
46f8bc92
MKL
5379 if (valid) {
5380 env->insn_aux_data[insn_idx].ctx_field_size =
5381 info.ctx_field_size;
5382 return 0;
5383 }
5384
5385 verbose(env, "R%d invalid %s access off=%d size=%d\n",
c25b2ae1 5386 regno, reg_type_str(env, reg->type), off, size);
46f8bc92
MKL
5387
5388 return -EACCES;
c64b7983
JS
5389}
5390
4cabc5b1
DB
5391static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
5392{
2a159c6f 5393 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
5394}
5395
f37a8cb8
DB
5396static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
5397{
2a159c6f 5398 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 5399
46f8bc92
MKL
5400 return reg->type == PTR_TO_CTX;
5401}
5402
5403static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
5404{
5405 const struct bpf_reg_state *reg = reg_state(env, regno);
5406
5407 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
5408}
5409
ca369602
DB
5410static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
5411{
2a159c6f 5412 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
5413
5414 return type_is_pkt_pointer(reg->type);
5415}
5416
4b5defde
DB
5417static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
5418{
5419 const struct bpf_reg_state *reg = reg_state(env, regno);
5420
5421 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
5422 return reg->type == PTR_TO_FLOW_KEYS;
5423}
5424
831deb29
AP
5425static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
5426#ifdef CONFIG_NET
5427 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
5428 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5429 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
5430#endif
5ba190c2 5431 [CONST_PTR_TO_MAP] = btf_bpf_map_id,
831deb29
AP
5432};
5433
9bb00b28
YS
5434static bool is_trusted_reg(const struct bpf_reg_state *reg)
5435{
5436 /* A referenced register is always trusted. */
5437 if (reg->ref_obj_id)
5438 return true;
5439
831deb29
AP
5440 /* Types listed in the reg2btf_ids are always trusted */
5441 if (reg2btf_ids[base_type(reg->type)])
5442 return true;
5443
9bb00b28 5444 /* If a register is not referenced, it is trusted if it has the
fca1aa75 5445 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
9bb00b28
YS
5446 * other type modifiers may be safe, but we elect to take an opt-in
5447 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
5448 * not.
5449 *
5450 * Eventually, we should make PTR_TRUSTED the single source of truth
5451 * for whether a register is trusted.
5452 */
5453 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
5454 !bpf_type_has_unsafe_modifiers(reg->type);
5455}
5456
fca1aa75
YS
5457static bool is_rcu_reg(const struct bpf_reg_state *reg)
5458{
5459 return reg->type & MEM_RCU;
5460}
5461
afeebf9f
AS
5462static void clear_trusted_flags(enum bpf_type_flag *flag)
5463{
5464 *flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU);
5465}
5466
61bd5218
JK
5467static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
5468 const struct bpf_reg_state *reg,
d1174416 5469 int off, int size, bool strict)
969bf05e 5470{
f1174f77 5471 struct tnum reg_off;
e07b98d9 5472 int ip_align;
d1174416
DM
5473
5474 /* Byte size accesses are always allowed. */
5475 if (!strict || size == 1)
5476 return 0;
5477
e4eda884
DM
5478 /* For platforms that do not have a Kconfig enabling
5479 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
5480 * NET_IP_ALIGN is universally set to '2'. And on platforms
5481 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
5482 * to this code only in strict mode where we want to emulate
5483 * the NET_IP_ALIGN==2 checking. Therefore use an
5484 * unconditional IP align value of '2'.
e07b98d9 5485 */
e4eda884 5486 ip_align = 2;
f1174f77
EC
5487
5488 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
5489 if (!tnum_is_aligned(reg_off, size)) {
5490 char tn_buf[48];
5491
5492 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
5493 verbose(env,
5494 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 5495 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
5496 return -EACCES;
5497 }
79adffcd 5498
969bf05e
AS
5499 return 0;
5500}
5501
61bd5218
JK
5502static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
5503 const struct bpf_reg_state *reg,
f1174f77
EC
5504 const char *pointer_desc,
5505 int off, int size, bool strict)
79adffcd 5506{
f1174f77
EC
5507 struct tnum reg_off;
5508
5509 /* Byte size accesses are always allowed. */
5510 if (!strict || size == 1)
5511 return 0;
5512
5513 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
5514 if (!tnum_is_aligned(reg_off, size)) {
5515 char tn_buf[48];
5516
5517 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 5518 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 5519 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
5520 return -EACCES;
5521 }
5522
969bf05e
AS
5523 return 0;
5524}
5525
e07b98d9 5526static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
5527 const struct bpf_reg_state *reg, int off,
5528 int size, bool strict_alignment_once)
79adffcd 5529{
ca369602 5530 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 5531 const char *pointer_desc = "";
d1174416 5532
79adffcd
DB
5533 switch (reg->type) {
5534 case PTR_TO_PACKET:
de8f3a83
DB
5535 case PTR_TO_PACKET_META:
5536 /* Special case, because of NET_IP_ALIGN. Given metadata sits
5537 * right in front, treat it the very same way.
5538 */
61bd5218 5539 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
5540 case PTR_TO_FLOW_KEYS:
5541 pointer_desc = "flow keys ";
5542 break;
69c087ba
YS
5543 case PTR_TO_MAP_KEY:
5544 pointer_desc = "key ";
5545 break;
f1174f77
EC
5546 case PTR_TO_MAP_VALUE:
5547 pointer_desc = "value ";
5548 break;
5549 case PTR_TO_CTX:
5550 pointer_desc = "context ";
5551 break;
5552 case PTR_TO_STACK:
5553 pointer_desc = "stack ";
01f810ac
AM
5554 /* The stack spill tracking logic in check_stack_write_fixed_off()
5555 * and check_stack_read_fixed_off() relies on stack accesses being
a5ec6ae1
JH
5556 * aligned.
5557 */
5558 strict = true;
f1174f77 5559 break;
c64b7983
JS
5560 case PTR_TO_SOCKET:
5561 pointer_desc = "sock ";
5562 break;
46f8bc92
MKL
5563 case PTR_TO_SOCK_COMMON:
5564 pointer_desc = "sock_common ";
5565 break;
655a51e5
MKL
5566 case PTR_TO_TCP_SOCK:
5567 pointer_desc = "tcp_sock ";
5568 break;
fada7fdc
JL
5569 case PTR_TO_XDP_SOCK:
5570 pointer_desc = "xdp_sock ";
5571 break;
79adffcd 5572 default:
f1174f77 5573 break;
79adffcd 5574 }
61bd5218
JK
5575 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
5576 strict);
79adffcd
DB
5577}
5578
f4d7e40a
AS
5579static int update_stack_depth(struct bpf_verifier_env *env,
5580 const struct bpf_func_state *func,
5581 int off)
5582{
9c8105bd 5583 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
5584
5585 if (stack >= -off)
5586 return 0;
5587
5588 /* update known max for given subprogram */
9c8105bd 5589 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
5590 return 0;
5591}
f4d7e40a 5592
70a87ffe
AS
5593/* starting from main bpf function walk all instructions of the function
5594 * and recursively walk all callees that given function can call.
5595 * Ignore jump and exit insns.
5596 * Since recursion is prevented by check_cfg() this algorithm
5597 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
5598 */
b5e9ad52 5599static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx)
70a87ffe 5600{
9c8105bd 5601 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 5602 struct bpf_insn *insn = env->prog->insnsi;
b5e9ad52 5603 int depth = 0, frame = 0, i, subprog_end;
ebf7d1f5 5604 bool tail_call_reachable = false;
70a87ffe
AS
5605 int ret_insn[MAX_CALL_FRAMES];
5606 int ret_prog[MAX_CALL_FRAMES];
ebf7d1f5 5607 int j;
f4d7e40a 5608
b5e9ad52 5609 i = subprog[idx].start;
70a87ffe 5610process_func:
7f6e4312
MF
5611 /* protect against potential stack overflow that might happen when
5612 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
5613 * depth for such case down to 256 so that the worst case scenario
5614 * would result in 8k stack size (32 which is tailcall limit * 256 =
5615 * 8k).
5616 *
5617 * To get the idea what might happen, see an example:
5618 * func1 -> sub rsp, 128
5619 * subfunc1 -> sub rsp, 256
5620 * tailcall1 -> add rsp, 256
5621 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
5622 * subfunc2 -> sub rsp, 64
5623 * subfunc22 -> sub rsp, 128
5624 * tailcall2 -> add rsp, 128
5625 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
5626 *
5627 * tailcall will unwind the current stack frame but it will not get rid
5628 * of caller's stack as shown on the example above.
5629 */
5630 if (idx && subprog[idx].has_tail_call && depth >= 256) {
5631 verbose(env,
5632 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
5633 depth);
5634 return -EACCES;
5635 }
70a87ffe
AS
5636 /* round up to 32-bytes, since this is granularity
5637 * of interpreter stack size
5638 */
9c8105bd 5639 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 5640 if (depth > MAX_BPF_STACK) {
f4d7e40a 5641 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 5642 frame + 1, depth);
f4d7e40a
AS
5643 return -EACCES;
5644 }
70a87ffe 5645continue_func:
4cb3d99c 5646 subprog_end = subprog[idx + 1].start;
70a87ffe 5647 for (; i < subprog_end; i++) {
ba7b3e7d 5648 int next_insn, sidx;
7ddc80a4 5649
69c087ba 5650 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
70a87ffe
AS
5651 continue;
5652 /* remember insn and function to return to */
5653 ret_insn[frame] = i + 1;
9c8105bd 5654 ret_prog[frame] = idx;
70a87ffe
AS
5655
5656 /* find the callee */
7ddc80a4 5657 next_insn = i + insn[i].imm + 1;
ba7b3e7d
KKD
5658 sidx = find_subprog(env, next_insn);
5659 if (sidx < 0) {
70a87ffe 5660 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
7ddc80a4 5661 next_insn);
70a87ffe
AS
5662 return -EFAULT;
5663 }
ba7b3e7d
KKD
5664 if (subprog[sidx].is_async_cb) {
5665 if (subprog[sidx].has_tail_call) {
7ddc80a4
AS
5666 verbose(env, "verifier bug. subprog has tail_call and async cb\n");
5667 return -EFAULT;
5668 }
5415ccd5
KKD
5669 /* async callbacks don't increase bpf prog stack size unless called directly */
5670 if (!bpf_pseudo_call(insn + i))
5671 continue;
7ddc80a4
AS
5672 }
5673 i = next_insn;
ba7b3e7d 5674 idx = sidx;
ebf7d1f5
MF
5675
5676 if (subprog[idx].has_tail_call)
5677 tail_call_reachable = true;
5678
70a87ffe
AS
5679 frame++;
5680 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
5681 verbose(env, "the call stack of %d frames is too deep !\n",
5682 frame);
5683 return -E2BIG;
70a87ffe
AS
5684 }
5685 goto process_func;
5686 }
ebf7d1f5
MF
5687 /* if tail call got detected across bpf2bpf calls then mark each of the
5688 * currently present subprog frames as tail call reachable subprogs;
5689 * this info will be utilized by JIT so that we will be preserving the
5690 * tail call counter throughout bpf2bpf calls combined with tailcalls
5691 */
5692 if (tail_call_reachable)
5693 for (j = 0; j < frame; j++)
5694 subprog[ret_prog[j]].tail_call_reachable = true;
5dd0a6b8
DB
5695 if (subprog[0].tail_call_reachable)
5696 env->prog->aux->tail_call_reachable = true;
ebf7d1f5 5697
70a87ffe
AS
5698 /* end of for() loop means the last insn of the 'subprog'
5699 * was reached. Doesn't matter whether it was JA or EXIT
5700 */
5701 if (frame == 0)
5702 return 0;
9c8105bd 5703 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
5704 frame--;
5705 i = ret_insn[frame];
9c8105bd 5706 idx = ret_prog[frame];
70a87ffe 5707 goto continue_func;
f4d7e40a
AS
5708}
5709
b5e9ad52
KKD
5710static int check_max_stack_depth(struct bpf_verifier_env *env)
5711{
5712 struct bpf_subprog_info *si = env->subprog_info;
5713 int ret;
5714
5715 for (int i = 0; i < env->subprog_cnt; i++) {
5716 if (!i || si[i].is_async_cb) {
5717 ret = check_max_stack_depth_subprog(env, i);
5718 if (ret < 0)
5719 return ret;
5720 }
5721 continue;
5722 }
5723 return 0;
5724}
5725
19d28fbd 5726#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
5727static int get_callee_stack_depth(struct bpf_verifier_env *env,
5728 const struct bpf_insn *insn, int idx)
5729{
5730 int start = idx + insn->imm + 1, subprog;
5731
5732 subprog = find_subprog(env, start);
5733 if (subprog < 0) {
5734 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5735 start);
5736 return -EFAULT;
5737 }
9c8105bd 5738 return env->subprog_info[subprog].stack_depth;
1ea47e01 5739}
19d28fbd 5740#endif
1ea47e01 5741
afbf21dc
YS
5742static int __check_buffer_access(struct bpf_verifier_env *env,
5743 const char *buf_info,
5744 const struct bpf_reg_state *reg,
5745 int regno, int off, int size)
9df1c28b
MM
5746{
5747 if (off < 0) {
5748 verbose(env,
4fc00b79 5749 "R%d invalid %s buffer access: off=%d, size=%d\n",
afbf21dc 5750 regno, buf_info, off, size);
9df1c28b
MM
5751 return -EACCES;
5752 }
5753 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5754 char tn_buf[48];
5755
5756 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5757 verbose(env,
4fc00b79 5758 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
9df1c28b
MM
5759 regno, off, tn_buf);
5760 return -EACCES;
5761 }
afbf21dc
YS
5762
5763 return 0;
5764}
5765
5766static int check_tp_buffer_access(struct bpf_verifier_env *env,
5767 const struct bpf_reg_state *reg,
5768 int regno, int off, int size)
5769{
5770 int err;
5771
5772 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
5773 if (err)
5774 return err;
5775
9df1c28b
MM
5776 if (off + size > env->prog->aux->max_tp_access)
5777 env->prog->aux->max_tp_access = off + size;
5778
5779 return 0;
5780}
5781
afbf21dc
YS
5782static int check_buffer_access(struct bpf_verifier_env *env,
5783 const struct bpf_reg_state *reg,
5784 int regno, int off, int size,
5785 bool zero_size_allowed,
afbf21dc
YS
5786 u32 *max_access)
5787{
44e9a741 5788 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
afbf21dc
YS
5789 int err;
5790
5791 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
5792 if (err)
5793 return err;
5794
5795 if (off + size > *max_access)
5796 *max_access = off + size;
5797
5798 return 0;
5799}
5800
3f50f132
JF
5801/* BPF architecture zero extends alu32 ops into 64-bit registesr */
5802static void zext_32_to_64(struct bpf_reg_state *reg)
5803{
5804 reg->var_off = tnum_subreg(reg->var_off);
5805 __reg_assign_32_into_64(reg);
5806}
9df1c28b 5807
0c17d1d2
JH
5808/* truncate register to smaller size (in bytes)
5809 * must be called with size < BPF_REG_SIZE
5810 */
5811static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
5812{
5813 u64 mask;
5814
5815 /* clear high bits in bit representation */
5816 reg->var_off = tnum_cast(reg->var_off, size);
5817
5818 /* fix arithmetic bounds */
5819 mask = ((u64)1 << (size * 8)) - 1;
5820 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
5821 reg->umin_value &= mask;
5822 reg->umax_value &= mask;
5823 } else {
5824 reg->umin_value = 0;
5825 reg->umax_value = mask;
5826 }
5827 reg->smin_value = reg->umin_value;
5828 reg->smax_value = reg->umax_value;
3f50f132
JF
5829
5830 /* If size is smaller than 32bit register the 32bit register
5831 * values are also truncated so we push 64-bit bounds into
5832 * 32-bit bounds. Above were truncated < 32-bits already.
5833 */
5834 if (size >= 4)
5835 return;
5836 __reg_combine_64_into_32(reg);
0c17d1d2
JH
5837}
5838
1f9a1ea8
YS
5839static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
5840{
5841 if (size == 1) {
5842 reg->smin_value = reg->s32_min_value = S8_MIN;
5843 reg->smax_value = reg->s32_max_value = S8_MAX;
5844 } else if (size == 2) {
5845 reg->smin_value = reg->s32_min_value = S16_MIN;
5846 reg->smax_value = reg->s32_max_value = S16_MAX;
5847 } else {
5848 /* size == 4 */
5849 reg->smin_value = reg->s32_min_value = S32_MIN;
5850 reg->smax_value = reg->s32_max_value = S32_MAX;
5851 }
5852 reg->umin_value = reg->u32_min_value = 0;
5853 reg->umax_value = U64_MAX;
5854 reg->u32_max_value = U32_MAX;
5855 reg->var_off = tnum_unknown;
5856}
5857
5858static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
5859{
5860 s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval;
5861 u64 top_smax_value, top_smin_value;
5862 u64 num_bits = size * 8;
5863
5864 if (tnum_is_const(reg->var_off)) {
5865 u64_cval = reg->var_off.value;
5866 if (size == 1)
5867 reg->var_off = tnum_const((s8)u64_cval);
5868 else if (size == 2)
5869 reg->var_off = tnum_const((s16)u64_cval);
5870 else
5871 /* size == 4 */
5872 reg->var_off = tnum_const((s32)u64_cval);
5873
5874 u64_cval = reg->var_off.value;
5875 reg->smax_value = reg->smin_value = u64_cval;
5876 reg->umax_value = reg->umin_value = u64_cval;
5877 reg->s32_max_value = reg->s32_min_value = u64_cval;
5878 reg->u32_max_value = reg->u32_min_value = u64_cval;
5879 return;
5880 }
5881
5882 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits;
5883 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits;
5884
5885 if (top_smax_value != top_smin_value)
5886 goto out;
5887
5888 /* find the s64_min and s64_min after sign extension */
5889 if (size == 1) {
5890 init_s64_max = (s8)reg->smax_value;
5891 init_s64_min = (s8)reg->smin_value;
5892 } else if (size == 2) {
5893 init_s64_max = (s16)reg->smax_value;
5894 init_s64_min = (s16)reg->smin_value;
5895 } else {
5896 init_s64_max = (s32)reg->smax_value;
5897 init_s64_min = (s32)reg->smin_value;
5898 }
5899
5900 s64_max = max(init_s64_max, init_s64_min);
5901 s64_min = min(init_s64_max, init_s64_min);
5902
5903 /* both of s64_max/s64_min positive or negative */
09fedc73 5904 if ((s64_max >= 0) == (s64_min >= 0)) {
1f9a1ea8
YS
5905 reg->smin_value = reg->s32_min_value = s64_min;
5906 reg->smax_value = reg->s32_max_value = s64_max;
5907 reg->umin_value = reg->u32_min_value = s64_min;
5908 reg->umax_value = reg->u32_max_value = s64_max;
5909 reg->var_off = tnum_range(s64_min, s64_max);
5910 return;
5911 }
5912
5913out:
5914 set_sext64_default_val(reg, size);
5915}
5916
8100928c
YS
5917static void set_sext32_default_val(struct bpf_reg_state *reg, int size)
5918{
5919 if (size == 1) {
5920 reg->s32_min_value = S8_MIN;
5921 reg->s32_max_value = S8_MAX;
5922 } else {
5923 /* size == 2 */
5924 reg->s32_min_value = S16_MIN;
5925 reg->s32_max_value = S16_MAX;
5926 }
5927 reg->u32_min_value = 0;
5928 reg->u32_max_value = U32_MAX;
5929}
5930
5931static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size)
5932{
5933 s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val;
5934 u32 top_smax_value, top_smin_value;
5935 u32 num_bits = size * 8;
5936
5937 if (tnum_is_const(reg->var_off)) {
5938 u32_val = reg->var_off.value;
5939 if (size == 1)
5940 reg->var_off = tnum_const((s8)u32_val);
5941 else
5942 reg->var_off = tnum_const((s16)u32_val);
5943
5944 u32_val = reg->var_off.value;
5945 reg->s32_min_value = reg->s32_max_value = u32_val;
5946 reg->u32_min_value = reg->u32_max_value = u32_val;
5947 return;
5948 }
5949
5950 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits;
5951 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits;
5952
5953 if (top_smax_value != top_smin_value)
5954 goto out;
5955
5956 /* find the s32_min and s32_min after sign extension */
5957 if (size == 1) {
5958 init_s32_max = (s8)reg->s32_max_value;
5959 init_s32_min = (s8)reg->s32_min_value;
5960 } else {
5961 /* size == 2 */
5962 init_s32_max = (s16)reg->s32_max_value;
5963 init_s32_min = (s16)reg->s32_min_value;
5964 }
5965 s32_max = max(init_s32_max, init_s32_min);
5966 s32_min = min(init_s32_max, init_s32_min);
5967
09fedc73 5968 if ((s32_min >= 0) == (s32_max >= 0)) {
8100928c
YS
5969 reg->s32_min_value = s32_min;
5970 reg->s32_max_value = s32_max;
5971 reg->u32_min_value = (u32)s32_min;
5972 reg->u32_max_value = (u32)s32_max;
5973 return;
5974 }
5975
5976out:
5977 set_sext32_default_val(reg, size);
5978}
5979
a23740ec
AN
5980static bool bpf_map_is_rdonly(const struct bpf_map *map)
5981{
353050be
DB
5982 /* A map is considered read-only if the following condition are true:
5983 *
5984 * 1) BPF program side cannot change any of the map content. The
5985 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
5986 * and was set at map creation time.
5987 * 2) The map value(s) have been initialized from user space by a
5988 * loader and then "frozen", such that no new map update/delete
5989 * operations from syscall side are possible for the rest of
5990 * the map's lifetime from that point onwards.
5991 * 3) Any parallel/pending map update/delete operations from syscall
5992 * side have been completed. Only after that point, it's safe to
5993 * assume that map value(s) are immutable.
5994 */
5995 return (map->map_flags & BPF_F_RDONLY_PROG) &&
5996 READ_ONCE(map->frozen) &&
5997 !bpf_map_write_active(map);
a23740ec
AN
5998}
5999
1f9a1ea8
YS
6000static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
6001 bool is_ldsx)
a23740ec
AN
6002{
6003 void *ptr;
6004 u64 addr;
6005 int err;
6006
6007 err = map->ops->map_direct_value_addr(map, &addr, off);
6008 if (err)
6009 return err;
2dedd7d2 6010 ptr = (void *)(long)addr + off;
a23740ec
AN
6011
6012 switch (size) {
6013 case sizeof(u8):
1f9a1ea8 6014 *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr;
a23740ec
AN
6015 break;
6016 case sizeof(u16):
1f9a1ea8 6017 *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr;
a23740ec
AN
6018 break;
6019 case sizeof(u32):
1f9a1ea8 6020 *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr;
a23740ec
AN
6021 break;
6022 case sizeof(u64):
6023 *val = *(u64 *)ptr;
6024 break;
6025 default:
6026 return -EINVAL;
6027 }
6028 return 0;
6029}
6030
6fcd486b 6031#define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
30ee9821 6032#define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null)
6fcd486b 6033#define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
57539b1c 6034
6fcd486b
AS
6035/*
6036 * Allow list few fields as RCU trusted or full trusted.
6037 * This logic doesn't allow mix tagging and will be removed once GCC supports
6038 * btf_type_tag.
6039 */
6040
6041/* RCU trusted: these fields are trusted in RCU CS and never NULL */
6042BTF_TYPE_SAFE_RCU(struct task_struct) {
57539b1c 6043 const cpumask_t *cpus_ptr;
8d093b4e 6044 struct css_set __rcu *cgroups;
6fcd486b
AS
6045 struct task_struct __rcu *real_parent;
6046 struct task_struct *group_leader;
8d093b4e
AS
6047};
6048
30ee9821
AS
6049BTF_TYPE_SAFE_RCU(struct cgroup) {
6050 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */
6051 struct kernfs_node *kn;
6052};
6053
6fcd486b 6054BTF_TYPE_SAFE_RCU(struct css_set) {
8d093b4e 6055 struct cgroup *dfl_cgrp;
57539b1c
DV
6056};
6057
30ee9821
AS
6058/* RCU trusted: these fields are trusted in RCU CS and can be NULL */
6059BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {
6060 struct file __rcu *exe_file;
6061};
6062
6063/* skb->sk, req->sk are not RCU protected, but we mark them as such
6064 * because bpf prog accessible sockets are SOCK_RCU_FREE.
6065 */
6066BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {
6067 struct sock *sk;
6068};
6069
6070BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {
6071 struct sock *sk;
6072};
6073
6fcd486b
AS
6074/* full trusted: these fields are trusted even outside of RCU CS and never NULL */
6075BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
63260df1 6076 struct seq_file *seq;
6fcd486b
AS
6077};
6078
6079BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
63260df1
AS
6080 struct bpf_iter_meta *meta;
6081 struct task_struct *task;
6fcd486b
AS
6082};
6083
6084BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
6085 struct file *file;
6086};
6087
6088BTF_TYPE_SAFE_TRUSTED(struct file) {
6089 struct inode *f_inode;
6090};
6091
6092BTF_TYPE_SAFE_TRUSTED(struct dentry) {
6093 /* no negative dentry-s in places where bpf can see it */
6094 struct inode *d_inode;
6095};
6096
6097BTF_TYPE_SAFE_TRUSTED(struct socket) {
6098 struct sock *sk;
6099};
6100
6101static bool type_is_rcu(struct bpf_verifier_env *env,
6102 struct bpf_reg_state *reg,
63260df1 6103 const char *field_name, u32 btf_id)
57539b1c 6104{
6fcd486b 6105 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
30ee9821 6106 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup));
6fcd486b 6107 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
57539b1c 6108
63260df1 6109 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu");
6fcd486b 6110}
57539b1c 6111
30ee9821
AS
6112static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
6113 struct bpf_reg_state *reg,
6114 const char *field_name, u32 btf_id)
6115{
6116 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct));
6117 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff));
6118 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock));
6119
6120 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null");
6121}
6122
6fcd486b
AS
6123static bool type_is_trusted(struct bpf_verifier_env *env,
6124 struct bpf_reg_state *reg,
63260df1 6125 const char *field_name, u32 btf_id)
6fcd486b
AS
6126{
6127 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
6128 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
6129 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
6130 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
6131 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
6132 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
6133
63260df1 6134 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
57539b1c
DV
6135}
6136
9e15db66
AS
6137static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
6138 struct bpf_reg_state *regs,
6139 int regno, int off, int size,
6140 enum bpf_access_type atype,
6141 int value_regno)
6142{
6143 struct bpf_reg_state *reg = regs + regno;
22dc4a0f
AN
6144 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
6145 const char *tname = btf_name_by_offset(reg->btf, t->name_off);
63260df1 6146 const char *field_name = NULL;
c6f1bfe8 6147 enum bpf_type_flag flag = 0;
b7e852a9 6148 u32 btf_id = 0;
9e15db66
AS
6149 int ret;
6150
c67cae55
AS
6151 if (!env->allow_ptr_leaks) {
6152 verbose(env,
6153 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
6154 tname);
6155 return -EPERM;
6156 }
6157 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
6158 verbose(env,
6159 "Cannot access kernel 'struct %s' from non-GPL compatible program\n",
6160 tname);
6161 return -EINVAL;
6162 }
9e15db66
AS
6163 if (off < 0) {
6164 verbose(env,
6165 "R%d is ptr_%s invalid negative access: off=%d\n",
6166 regno, tname, off);
6167 return -EACCES;
6168 }
6169 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
6170 char tn_buf[48];
6171
6172 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6173 verbose(env,
6174 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
6175 regno, tname, off, tn_buf);
6176 return -EACCES;
6177 }
6178
c6f1bfe8
YS
6179 if (reg->type & MEM_USER) {
6180 verbose(env,
6181 "R%d is ptr_%s access user memory: off=%d\n",
6182 regno, tname, off);
6183 return -EACCES;
6184 }
6185
5844101a
HL
6186 if (reg->type & MEM_PERCPU) {
6187 verbose(env,
6188 "R%d is ptr_%s access percpu memory: off=%d\n",
6189 regno, tname, off);
6190 return -EACCES;
6191 }
6192
7d64c513 6193 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) {
282de143
KKD
6194 if (!btf_is_kernel(reg->btf)) {
6195 verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
6196 return -EFAULT;
6197 }
b7e852a9 6198 ret = env->ops->btf_struct_access(&env->log, reg, off, size);
27ae7997 6199 } else {
282de143
KKD
6200 /* Writes are permitted with default btf_struct_access for
6201 * program allocated objects (which always have ref_obj_id > 0),
6202 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
6203 */
503e4def 6204 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) {
27ae7997
MKL
6205 verbose(env, "only read is supported\n");
6206 return -EACCES;
6207 }
6208
6a3cd331
DM
6209 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
6210 !reg->ref_obj_id) {
282de143
KKD
6211 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
6212 return -EFAULT;
6213 }
6214
63260df1 6215 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name);
27ae7997
MKL
6216 }
6217
9e15db66
AS
6218 if (ret < 0)
6219 return ret;
6220
6fcd486b
AS
6221 if (ret != PTR_TO_BTF_ID) {
6222 /* just mark; */
6efe152d 6223
6fcd486b
AS
6224 } else if (type_flag(reg->type) & PTR_UNTRUSTED) {
6225 /* If this is an untrusted pointer, all pointers formed by walking it
6226 * also inherit the untrusted flag.
6227 */
6228 flag = PTR_UNTRUSTED;
6229
6230 } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) {
6231 /* By default any pointer obtained from walking a trusted pointer is no
6232 * longer trusted, unless the field being accessed has explicitly been
6233 * marked as inheriting its parent's state of trust (either full or RCU).
6234 * For example:
6235 * 'cgroups' pointer is untrusted if task->cgroups dereference
6236 * happened in a sleepable program outside of bpf_rcu_read_lock()
6237 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU).
6238 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED.
6239 *
6240 * A regular RCU-protected pointer with __rcu tag can also be deemed
6241 * trusted if we are in an RCU CS. Such pointer can be NULL.
20c09d92 6242 */
63260df1 6243 if (type_is_trusted(env, reg, field_name, btf_id)) {
6fcd486b
AS
6244 flag |= PTR_TRUSTED;
6245 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
63260df1 6246 if (type_is_rcu(env, reg, field_name, btf_id)) {
6fcd486b
AS
6247 /* ignore __rcu tag and mark it MEM_RCU */
6248 flag |= MEM_RCU;
30ee9821
AS
6249 } else if (flag & MEM_RCU ||
6250 type_is_rcu_or_null(env, reg, field_name, btf_id)) {
6fcd486b 6251 /* __rcu tagged pointers can be NULL */
30ee9821 6252 flag |= MEM_RCU | PTR_MAYBE_NULL;
7ce4dc3e
YS
6253
6254 /* We always trust them */
6255 if (type_is_rcu_or_null(env, reg, field_name, btf_id) &&
6256 flag & PTR_UNTRUSTED)
6257 flag &= ~PTR_UNTRUSTED;
6fcd486b
AS
6258 } else if (flag & (MEM_PERCPU | MEM_USER)) {
6259 /* keep as-is */
6260 } else {
afeebf9f
AS
6261 /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */
6262 clear_trusted_flags(&flag);
6fcd486b
AS
6263 }
6264 } else {
6265 /*
6266 * If not in RCU CS or MEM_RCU pointer can be NULL then
6267 * aggressively mark as untrusted otherwise such
6268 * pointers will be plain PTR_TO_BTF_ID without flags
6269 * and will be allowed to be passed into helpers for
6270 * compat reasons.
6271 */
6272 flag = PTR_UNTRUSTED;
6273 }
20c09d92 6274 } else {
6fcd486b 6275 /* Old compat. Deprecated */
afeebf9f 6276 clear_trusted_flags(&flag);
20c09d92 6277 }
3f00c523 6278
41c48f3a 6279 if (atype == BPF_READ && value_regno >= 0)
c6f1bfe8 6280 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
41c48f3a
AI
6281
6282 return 0;
6283}
6284
6285static int check_ptr_to_map_access(struct bpf_verifier_env *env,
6286 struct bpf_reg_state *regs,
6287 int regno, int off, int size,
6288 enum bpf_access_type atype,
6289 int value_regno)
6290{
6291 struct bpf_reg_state *reg = regs + regno;
6292 struct bpf_map *map = reg->map_ptr;
6728aea7 6293 struct bpf_reg_state map_reg;
c6f1bfe8 6294 enum bpf_type_flag flag = 0;
41c48f3a
AI
6295 const struct btf_type *t;
6296 const char *tname;
6297 u32 btf_id;
6298 int ret;
6299
6300 if (!btf_vmlinux) {
6301 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
6302 return -ENOTSUPP;
6303 }
6304
6305 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
6306 verbose(env, "map_ptr access not supported for map type %d\n",
6307 map->map_type);
6308 return -ENOTSUPP;
6309 }
6310
6311 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
6312 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
6313
c67cae55 6314 if (!env->allow_ptr_leaks) {
41c48f3a 6315 verbose(env,
c67cae55 6316 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
41c48f3a
AI
6317 tname);
6318 return -EPERM;
9e15db66 6319 }
27ae7997 6320
41c48f3a
AI
6321 if (off < 0) {
6322 verbose(env, "R%d is %s invalid negative access: off=%d\n",
6323 regno, tname, off);
6324 return -EACCES;
6325 }
6326
6327 if (atype != BPF_READ) {
6328 verbose(env, "only read from %s is supported\n", tname);
6329 return -EACCES;
6330 }
6331
6728aea7
KKD
6332 /* Simulate access to a PTR_TO_BTF_ID */
6333 memset(&map_reg, 0, sizeof(map_reg));
6334 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
63260df1 6335 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL);
41c48f3a
AI
6336 if (ret < 0)
6337 return ret;
6338
6339 if (value_regno >= 0)
c6f1bfe8 6340 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
41c48f3a 6341
9e15db66
AS
6342 return 0;
6343}
6344
01f810ac
AM
6345/* Check that the stack access at the given offset is within bounds. The
6346 * maximum valid offset is -1.
6347 *
6348 * The minimum valid offset is -MAX_BPF_STACK for writes, and
6349 * -state->allocated_stack for reads.
6350 */
6351static int check_stack_slot_within_bounds(int off,
6352 struct bpf_func_state *state,
6353 enum bpf_access_type t)
6354{
6355 int min_valid_off;
6356
6357 if (t == BPF_WRITE)
6358 min_valid_off = -MAX_BPF_STACK;
6359 else
6360 min_valid_off = -state->allocated_stack;
6361
6362 if (off < min_valid_off || off > -1)
6363 return -EACCES;
6364 return 0;
6365}
6366
6367/* Check that the stack access at 'regno + off' falls within the maximum stack
6368 * bounds.
6369 *
6370 * 'off' includes `regno->offset`, but not its dynamic part (if any).
6371 */
6372static int check_stack_access_within_bounds(
6373 struct bpf_verifier_env *env,
6374 int regno, int off, int access_size,
61df10c7 6375 enum bpf_access_src src, enum bpf_access_type type)
01f810ac
AM
6376{
6377 struct bpf_reg_state *regs = cur_regs(env);
6378 struct bpf_reg_state *reg = regs + regno;
6379 struct bpf_func_state *state = func(env, reg);
6380 int min_off, max_off;
6381 int err;
6382 char *err_extra;
6383
6384 if (src == ACCESS_HELPER)
6385 /* We don't know if helpers are reading or writing (or both). */
6386 err_extra = " indirect access to";
6387 else if (type == BPF_READ)
6388 err_extra = " read from";
6389 else
6390 err_extra = " write to";
6391
6392 if (tnum_is_const(reg->var_off)) {
6393 min_off = reg->var_off.value + off;
6394 if (access_size > 0)
6395 max_off = min_off + access_size - 1;
6396 else
6397 max_off = min_off;
6398 } else {
6399 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
6400 reg->smin_value <= -BPF_MAX_VAR_OFF) {
6401 verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
6402 err_extra, regno);
6403 return -EACCES;
6404 }
6405 min_off = reg->smin_value + off;
6406 if (access_size > 0)
6407 max_off = reg->smax_value + off + access_size - 1;
6408 else
6409 max_off = min_off;
6410 }
6411
6412 err = check_stack_slot_within_bounds(min_off, state, type);
6413 if (!err)
6414 err = check_stack_slot_within_bounds(max_off, state, type);
6415
6416 if (err) {
6417 if (tnum_is_const(reg->var_off)) {
6418 verbose(env, "invalid%s stack R%d off=%d size=%d\n",
6419 err_extra, regno, off, access_size);
6420 } else {
6421 char tn_buf[48];
6422
6423 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
6424 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
6425 err_extra, regno, tn_buf, access_size);
6426 }
6427 }
6428 return err;
6429}
41c48f3a 6430
17a52670
AS
6431/* check whether memory at (regno + off) is accessible for t = (read | write)
6432 * if t==write, value_regno is a register which value is stored into memory
6433 * if t==read, value_regno is a register which will receive the value from memory
6434 * if t==write && value_regno==-1, some unknown value is stored into memory
6435 * if t==read && value_regno==-1, don't care what we read from memory
6436 */
ca369602
DB
6437static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
6438 int off, int bpf_size, enum bpf_access_type t,
1f9a1ea8 6439 int value_regno, bool strict_alignment_once, bool is_ldsx)
17a52670 6440{
638f5b90
AS
6441 struct bpf_reg_state *regs = cur_regs(env);
6442 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 6443 struct bpf_func_state *state;
17a52670
AS
6444 int size, err = 0;
6445
6446 size = bpf_size_to_bytes(bpf_size);
6447 if (size < 0)
6448 return size;
6449
f1174f77 6450 /* alignment checks will add in reg->off themselves */
ca369602 6451 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
6452 if (err)
6453 return err;
17a52670 6454
f1174f77
EC
6455 /* for access checks, reg->off is just part of off */
6456 off += reg->off;
6457
69c087ba
YS
6458 if (reg->type == PTR_TO_MAP_KEY) {
6459 if (t == BPF_WRITE) {
6460 verbose(env, "write to change key R%d not allowed\n", regno);
6461 return -EACCES;
6462 }
6463
6464 err = check_mem_region_access(env, regno, off, size,
6465 reg->map_ptr->key_size, false);
6466 if (err)
6467 return err;
6468 if (value_regno >= 0)
6469 mark_reg_unknown(env, regs, value_regno);
6470 } else if (reg->type == PTR_TO_MAP_VALUE) {
aa3496ac 6471 struct btf_field *kptr_field = NULL;
61df10c7 6472
1be7f75d
AS
6473 if (t == BPF_WRITE && value_regno >= 0 &&
6474 is_pointer_value(env, value_regno)) {
61bd5218 6475 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
6476 return -EACCES;
6477 }
591fe988
DB
6478 err = check_map_access_type(env, regno, off, size, t);
6479 if (err)
6480 return err;
61df10c7
KKD
6481 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
6482 if (err)
6483 return err;
6484 if (tnum_is_const(reg->var_off))
aa3496ac
KKD
6485 kptr_field = btf_record_find(reg->map_ptr->record,
6486 off + reg->var_off.value, BPF_KPTR);
6487 if (kptr_field) {
6488 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
61df10c7 6489 } else if (t == BPF_READ && value_regno >= 0) {
a23740ec
AN
6490 struct bpf_map *map = reg->map_ptr;
6491
6492 /* if map is read-only, track its contents as scalars */
6493 if (tnum_is_const(reg->var_off) &&
6494 bpf_map_is_rdonly(map) &&
6495 map->ops->map_direct_value_addr) {
6496 int map_off = off + reg->var_off.value;
6497 u64 val = 0;
6498
6499 err = bpf_map_direct_read(map, map_off, size,
1f9a1ea8 6500 &val, is_ldsx);
a23740ec
AN
6501 if (err)
6502 return err;
6503
6504 regs[value_regno].type = SCALAR_VALUE;
6505 __mark_reg_known(&regs[value_regno], val);
6506 } else {
6507 mark_reg_unknown(env, regs, value_regno);
6508 }
6509 }
34d3a78c
HL
6510 } else if (base_type(reg->type) == PTR_TO_MEM) {
6511 bool rdonly_mem = type_is_rdonly_mem(reg->type);
6512
6513 if (type_may_be_null(reg->type)) {
6514 verbose(env, "R%d invalid mem access '%s'\n", regno,
6515 reg_type_str(env, reg->type));
6516 return -EACCES;
6517 }
6518
6519 if (t == BPF_WRITE && rdonly_mem) {
6520 verbose(env, "R%d cannot write into %s\n",
6521 regno, reg_type_str(env, reg->type));
6522 return -EACCES;
6523 }
6524
457f4436
AN
6525 if (t == BPF_WRITE && value_regno >= 0 &&
6526 is_pointer_value(env, value_regno)) {
6527 verbose(env, "R%d leaks addr into mem\n", value_regno);
6528 return -EACCES;
6529 }
34d3a78c 6530
457f4436
AN
6531 err = check_mem_region_access(env, regno, off, size,
6532 reg->mem_size, false);
34d3a78c 6533 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
457f4436 6534 mark_reg_unknown(env, regs, value_regno);
1a0dc1ac 6535 } else if (reg->type == PTR_TO_CTX) {
f1174f77 6536 enum bpf_reg_type reg_type = SCALAR_VALUE;
22dc4a0f 6537 struct btf *btf = NULL;
9e15db66 6538 u32 btf_id = 0;
19de99f7 6539
1be7f75d
AS
6540 if (t == BPF_WRITE && value_regno >= 0 &&
6541 is_pointer_value(env, value_regno)) {
61bd5218 6542 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
6543 return -EACCES;
6544 }
f1174f77 6545
be80a1d3 6546 err = check_ptr_off_reg(env, reg, regno);
58990d1f
DB
6547 if (err < 0)
6548 return err;
6549
c6f1bfe8
YS
6550 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
6551 &btf_id);
9e15db66
AS
6552 if (err)
6553 verbose_linfo(env, insn_idx, "; ");
969bf05e 6554 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 6555 /* ctx access returns either a scalar, or a
de8f3a83
DB
6556 * PTR_TO_PACKET[_META,_END]. In the latter
6557 * case, we know the offset is zero.
f1174f77 6558 */
46f8bc92 6559 if (reg_type == SCALAR_VALUE) {
638f5b90 6560 mark_reg_unknown(env, regs, value_regno);
46f8bc92 6561 } else {
638f5b90 6562 mark_reg_known_zero(env, regs,
61bd5218 6563 value_regno);
c25b2ae1 6564 if (type_may_be_null(reg_type))
46f8bc92 6565 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
6566 /* A load of ctx field could have different
6567 * actual load size with the one encoded in the
6568 * insn. When the dst is PTR, it is for sure not
6569 * a sub-register.
6570 */
6571 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
c25b2ae1 6572 if (base_type(reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 6573 regs[value_regno].btf = btf;
9e15db66 6574 regs[value_regno].btf_id = btf_id;
22dc4a0f 6575 }
46f8bc92 6576 }
638f5b90 6577 regs[value_regno].type = reg_type;
969bf05e 6578 }
17a52670 6579
f1174f77 6580 } else if (reg->type == PTR_TO_STACK) {
01f810ac
AM
6581 /* Basic bounds checks. */
6582 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
e4298d25
DB
6583 if (err)
6584 return err;
8726679a 6585
f4d7e40a
AS
6586 state = func(env, reg);
6587 err = update_stack_depth(env, state, off);
6588 if (err)
6589 return err;
8726679a 6590
01f810ac
AM
6591 if (t == BPF_READ)
6592 err = check_stack_read(env, regno, off, size,
61bd5218 6593 value_regno);
01f810ac
AM
6594 else
6595 err = check_stack_write(env, regno, off, size,
6596 value_regno, insn_idx);
de8f3a83 6597 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 6598 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 6599 verbose(env, "cannot write into packet\n");
969bf05e
AS
6600 return -EACCES;
6601 }
4acf6c0b
BB
6602 if (t == BPF_WRITE && value_regno >= 0 &&
6603 is_pointer_value(env, value_regno)) {
61bd5218
JK
6604 verbose(env, "R%d leaks addr into packet\n",
6605 value_regno);
4acf6c0b
BB
6606 return -EACCES;
6607 }
9fd29c08 6608 err = check_packet_access(env, regno, off, size, false);
969bf05e 6609 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 6610 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
6611 } else if (reg->type == PTR_TO_FLOW_KEYS) {
6612 if (t == BPF_WRITE && value_regno >= 0 &&
6613 is_pointer_value(env, value_regno)) {
6614 verbose(env, "R%d leaks addr into flow keys\n",
6615 value_regno);
6616 return -EACCES;
6617 }
6618
6619 err = check_flow_keys_access(env, off, size);
6620 if (!err && t == BPF_READ && value_regno >= 0)
6621 mark_reg_unknown(env, regs, value_regno);
46f8bc92 6622 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 6623 if (t == BPF_WRITE) {
46f8bc92 6624 verbose(env, "R%d cannot write into %s\n",
c25b2ae1 6625 regno, reg_type_str(env, reg->type));
c64b7983
JS
6626 return -EACCES;
6627 }
5f456649 6628 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
6629 if (!err && value_regno >= 0)
6630 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
6631 } else if (reg->type == PTR_TO_TP_BUFFER) {
6632 err = check_tp_buffer_access(env, reg, regno, off, size);
6633 if (!err && t == BPF_READ && value_regno >= 0)
6634 mark_reg_unknown(env, regs, value_regno);
bff61f6f
HL
6635 } else if (base_type(reg->type) == PTR_TO_BTF_ID &&
6636 !type_may_be_null(reg->type)) {
9e15db66
AS
6637 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
6638 value_regno);
41c48f3a
AI
6639 } else if (reg->type == CONST_PTR_TO_MAP) {
6640 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
6641 value_regno);
20b2aff4
HL
6642 } else if (base_type(reg->type) == PTR_TO_BUF) {
6643 bool rdonly_mem = type_is_rdonly_mem(reg->type);
20b2aff4
HL
6644 u32 *max_access;
6645
6646 if (rdonly_mem) {
6647 if (t == BPF_WRITE) {
6648 verbose(env, "R%d cannot write into %s\n",
6649 regno, reg_type_str(env, reg->type));
6650 return -EACCES;
6651 }
20b2aff4
HL
6652 max_access = &env->prog->aux->max_rdonly_access;
6653 } else {
20b2aff4 6654 max_access = &env->prog->aux->max_rdwr_access;
afbf21dc 6655 }
20b2aff4 6656
f6dfbe31 6657 err = check_buffer_access(env, reg, regno, off, size, false,
44e9a741 6658 max_access);
20b2aff4
HL
6659
6660 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
afbf21dc 6661 mark_reg_unknown(env, regs, value_regno);
17a52670 6662 } else {
61bd5218 6663 verbose(env, "R%d invalid mem access '%s'\n", regno,
c25b2ae1 6664 reg_type_str(env, reg->type));
17a52670
AS
6665 return -EACCES;
6666 }
969bf05e 6667
f1174f77 6668 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 6669 regs[value_regno].type == SCALAR_VALUE) {
1f9a1ea8
YS
6670 if (!is_ldsx)
6671 /* b/h/w load zero-extends, mark upper bits as known 0 */
6672 coerce_reg_to_size(&regs[value_regno], size);
6673 else
6674 coerce_reg_to_size_sx(&regs[value_regno], size);
969bf05e 6675 }
17a52670
AS
6676 return err;
6677}
6678
91c960b0 6679static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 6680{
5ffa2550 6681 int load_reg;
17a52670
AS
6682 int err;
6683
5ca419f2
BJ
6684 switch (insn->imm) {
6685 case BPF_ADD:
6686 case BPF_ADD | BPF_FETCH:
981f94c3
BJ
6687 case BPF_AND:
6688 case BPF_AND | BPF_FETCH:
6689 case BPF_OR:
6690 case BPF_OR | BPF_FETCH:
6691 case BPF_XOR:
6692 case BPF_XOR | BPF_FETCH:
5ffa2550
BJ
6693 case BPF_XCHG:
6694 case BPF_CMPXCHG:
5ca419f2
BJ
6695 break;
6696 default:
91c960b0
BJ
6697 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
6698 return -EINVAL;
6699 }
6700
6701 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
6702 verbose(env, "invalid atomic operand size\n");
17a52670
AS
6703 return -EINVAL;
6704 }
6705
6706 /* check src1 operand */
dc503a8a 6707 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6708 if (err)
6709 return err;
6710
6711 /* check src2 operand */
dc503a8a 6712 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6713 if (err)
6714 return err;
6715
5ffa2550
BJ
6716 if (insn->imm == BPF_CMPXCHG) {
6717 /* Check comparison of R0 with memory location */
a82fe085
DB
6718 const u32 aux_reg = BPF_REG_0;
6719
6720 err = check_reg_arg(env, aux_reg, SRC_OP);
5ffa2550
BJ
6721 if (err)
6722 return err;
a82fe085
DB
6723
6724 if (is_pointer_value(env, aux_reg)) {
6725 verbose(env, "R%d leaks addr into mem\n", aux_reg);
6726 return -EACCES;
6727 }
5ffa2550
BJ
6728 }
6729
6bdf6abc 6730 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6731 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
6732 return -EACCES;
6733 }
6734
ca369602 6735 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 6736 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
6737 is_flow_key_reg(env, insn->dst_reg) ||
6738 is_sk_reg(env, insn->dst_reg)) {
91c960b0 6739 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
2a159c6f 6740 insn->dst_reg,
c25b2ae1 6741 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
f37a8cb8
DB
6742 return -EACCES;
6743 }
6744
37086bfd
BJ
6745 if (insn->imm & BPF_FETCH) {
6746 if (insn->imm == BPF_CMPXCHG)
6747 load_reg = BPF_REG_0;
6748 else
6749 load_reg = insn->src_reg;
6750
6751 /* check and record load of old value */
6752 err = check_reg_arg(env, load_reg, DST_OP);
6753 if (err)
6754 return err;
6755 } else {
6756 /* This instruction accesses a memory location but doesn't
6757 * actually load it into a register.
6758 */
6759 load_reg = -1;
6760 }
6761
7d3baf0a
DB
6762 /* Check whether we can read the memory, with second call for fetch
6763 * case to simulate the register fill.
6764 */
31fd8581 6765 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1f9a1ea8 6766 BPF_SIZE(insn->code), BPF_READ, -1, true, false);
7d3baf0a
DB
6767 if (!err && load_reg >= 0)
6768 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6769 BPF_SIZE(insn->code), BPF_READ, load_reg,
1f9a1ea8 6770 true, false);
17a52670
AS
6771 if (err)
6772 return err;
6773
7d3baf0a 6774 /* Check whether we can write into the same memory. */
5ca419f2 6775 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
1f9a1ea8 6776 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
5ca419f2
BJ
6777 if (err)
6778 return err;
6779
5ca419f2 6780 return 0;
17a52670
AS
6781}
6782
01f810ac
AM
6783/* When register 'regno' is used to read the stack (either directly or through
6784 * a helper function) make sure that it's within stack boundary and, depending
6785 * on the access type, that all elements of the stack are initialized.
6786 *
6787 * 'off' includes 'regno->off', but not its dynamic part (if any).
6788 *
6789 * All registers that have been spilled on the stack in the slots within the
6790 * read offsets are marked as read.
6791 */
6792static int check_stack_range_initialized(
6793 struct bpf_verifier_env *env, int regno, int off,
6794 int access_size, bool zero_size_allowed,
61df10c7 6795 enum bpf_access_src type, struct bpf_call_arg_meta *meta)
2011fccf
AI
6796{
6797 struct bpf_reg_state *reg = reg_state(env, regno);
01f810ac
AM
6798 struct bpf_func_state *state = func(env, reg);
6799 int err, min_off, max_off, i, j, slot, spi;
6800 char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
6801 enum bpf_access_type bounds_check_type;
6802 /* Some accesses can write anything into the stack, others are
6803 * read-only.
6804 */
6805 bool clobber = false;
2011fccf 6806
01f810ac
AM
6807 if (access_size == 0 && !zero_size_allowed) {
6808 verbose(env, "invalid zero-sized read\n");
2011fccf
AI
6809 return -EACCES;
6810 }
2011fccf 6811
01f810ac
AM
6812 if (type == ACCESS_HELPER) {
6813 /* The bounds checks for writes are more permissive than for
6814 * reads. However, if raw_mode is not set, we'll do extra
6815 * checks below.
6816 */
6817 bounds_check_type = BPF_WRITE;
6818 clobber = true;
6819 } else {
6820 bounds_check_type = BPF_READ;
6821 }
6822 err = check_stack_access_within_bounds(env, regno, off, access_size,
6823 type, bounds_check_type);
6824 if (err)
6825 return err;
6826
17a52670 6827
2011fccf 6828 if (tnum_is_const(reg->var_off)) {
01f810ac 6829 min_off = max_off = reg->var_off.value + off;
2011fccf 6830 } else {
088ec26d
AI
6831 /* Variable offset is prohibited for unprivileged mode for
6832 * simplicity since it requires corresponding support in
6833 * Spectre masking for stack ALU.
6834 * See also retrieve_ptr_limit().
6835 */
2c78ee89 6836 if (!env->bypass_spec_v1) {
088ec26d 6837 char tn_buf[48];
f1174f77 6838
088ec26d 6839 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
6840 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
6841 regno, err_extra, tn_buf);
088ec26d
AI
6842 return -EACCES;
6843 }
f2bcd05e
AI
6844 /* Only initialized buffer on stack is allowed to be accessed
6845 * with variable offset. With uninitialized buffer it's hard to
6846 * guarantee that whole memory is marked as initialized on
6847 * helper return since specific bounds are unknown what may
6848 * cause uninitialized stack leaking.
6849 */
6850 if (meta && meta->raw_mode)
6851 meta = NULL;
6852
01f810ac
AM
6853 min_off = reg->smin_value + off;
6854 max_off = reg->smax_value + off;
17a52670
AS
6855 }
6856
435faee1 6857 if (meta && meta->raw_mode) {
ef8fc7a0
KKD
6858 /* Ensure we won't be overwriting dynptrs when simulating byte
6859 * by byte access in check_helper_call using meta.access_size.
6860 * This would be a problem if we have a helper in the future
6861 * which takes:
6862 *
6863 * helper(uninit_mem, len, dynptr)
6864 *
6865 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
6866 * may end up writing to dynptr itself when touching memory from
6867 * arg 1. This can be relaxed on a case by case basis for known
6868 * safe cases, but reject due to the possibilitiy of aliasing by
6869 * default.
6870 */
6871 for (i = min_off; i < max_off + access_size; i++) {
6872 int stack_off = -i - 1;
6873
6874 spi = __get_spi(i);
6875 /* raw_mode may write past allocated_stack */
6876 if (state->allocated_stack <= stack_off)
6877 continue;
6878 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
6879 verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
6880 return -EACCES;
6881 }
6882 }
435faee1
DB
6883 meta->access_size = access_size;
6884 meta->regno = regno;
6885 return 0;
6886 }
6887
2011fccf 6888 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
6889 u8 *stype;
6890
2011fccf 6891 slot = -i - 1;
638f5b90 6892 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
6893 if (state->allocated_stack <= slot)
6894 goto err;
6895 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
6896 if (*stype == STACK_MISC)
6897 goto mark;
6715df8d
EZ
6898 if ((*stype == STACK_ZERO) ||
6899 (*stype == STACK_INVALID && env->allow_uninit_stack)) {
01f810ac
AM
6900 if (clobber) {
6901 /* helper can write anything into the stack */
6902 *stype = STACK_MISC;
6903 }
cc2b14d5 6904 goto mark;
17a52670 6905 }
1d68f22b 6906
27113c59 6907 if (is_spilled_reg(&state->stack[spi]) &&
cd17d38f
YS
6908 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
6909 env->allow_ptr_leaks)) {
01f810ac
AM
6910 if (clobber) {
6911 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
6912 for (j = 0; j < BPF_REG_SIZE; j++)
354e8f19 6913 scrub_spilled_slot(&state->stack[spi].slot_type[j]);
01f810ac 6914 }
f7cf25b2
AS
6915 goto mark;
6916 }
6917
cc2b14d5 6918err:
2011fccf 6919 if (tnum_is_const(reg->var_off)) {
01f810ac
AM
6920 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
6921 err_extra, regno, min_off, i - min_off, access_size);
2011fccf
AI
6922 } else {
6923 char tn_buf[48];
6924
6925 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
6926 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
6927 err_extra, regno, tn_buf, i - min_off, access_size);
2011fccf 6928 }
cc2b14d5
AS
6929 return -EACCES;
6930mark:
6931 /* reading any byte out of 8-byte 'spill_slot' will cause
6932 * the whole slot to be marked as 'read'
6933 */
679c782d 6934 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
6935 state->stack[spi].spilled_ptr.parent,
6936 REG_LIVE_READ64);
261f4664
KKD
6937 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
6938 * be sure that whether stack slot is written to or not. Hence,
6939 * we must still conservatively propagate reads upwards even if
6940 * helper may write to the entire memory range.
6941 */
17a52670 6942 }
2011fccf 6943 return update_stack_depth(env, state, min_off);
17a52670
AS
6944}
6945
06c1c049
GB
6946static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
6947 int access_size, bool zero_size_allowed,
6948 struct bpf_call_arg_meta *meta)
6949{
638f5b90 6950 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
20b2aff4 6951 u32 *max_access;
06c1c049 6952
20b2aff4 6953 switch (base_type(reg->type)) {
06c1c049 6954 case PTR_TO_PACKET:
de8f3a83 6955 case PTR_TO_PACKET_META:
9fd29c08
YS
6956 return check_packet_access(env, regno, reg->off, access_size,
6957 zero_size_allowed);
69c087ba 6958 case PTR_TO_MAP_KEY:
7b3552d3
KKD
6959 if (meta && meta->raw_mode) {
6960 verbose(env, "R%d cannot write into %s\n", regno,
6961 reg_type_str(env, reg->type));
6962 return -EACCES;
6963 }
69c087ba
YS
6964 return check_mem_region_access(env, regno, reg->off, access_size,
6965 reg->map_ptr->key_size, false);
06c1c049 6966 case PTR_TO_MAP_VALUE:
591fe988
DB
6967 if (check_map_access_type(env, regno, reg->off, access_size,
6968 meta && meta->raw_mode ? BPF_WRITE :
6969 BPF_READ))
6970 return -EACCES;
9fd29c08 6971 return check_map_access(env, regno, reg->off, access_size,
61df10c7 6972 zero_size_allowed, ACCESS_HELPER);
457f4436 6973 case PTR_TO_MEM:
97e6d7da
KKD
6974 if (type_is_rdonly_mem(reg->type)) {
6975 if (meta && meta->raw_mode) {
6976 verbose(env, "R%d cannot write into %s\n", regno,
6977 reg_type_str(env, reg->type));
6978 return -EACCES;
6979 }
6980 }
457f4436
AN
6981 return check_mem_region_access(env, regno, reg->off,
6982 access_size, reg->mem_size,
6983 zero_size_allowed);
20b2aff4
HL
6984 case PTR_TO_BUF:
6985 if (type_is_rdonly_mem(reg->type)) {
97e6d7da
KKD
6986 if (meta && meta->raw_mode) {
6987 verbose(env, "R%d cannot write into %s\n", regno,
6988 reg_type_str(env, reg->type));
20b2aff4 6989 return -EACCES;
97e6d7da 6990 }
20b2aff4 6991
20b2aff4
HL
6992 max_access = &env->prog->aux->max_rdonly_access;
6993 } else {
20b2aff4
HL
6994 max_access = &env->prog->aux->max_rdwr_access;
6995 }
afbf21dc
YS
6996 return check_buffer_access(env, reg, regno, reg->off,
6997 access_size, zero_size_allowed,
44e9a741 6998 max_access);
0d004c02 6999 case PTR_TO_STACK:
01f810ac
AM
7000 return check_stack_range_initialized(
7001 env,
7002 regno, reg->off, access_size,
7003 zero_size_allowed, ACCESS_HELPER, meta);
3e30be42
AS
7004 case PTR_TO_BTF_ID:
7005 return check_ptr_to_btf_access(env, regs, regno, reg->off,
7006 access_size, BPF_READ, -1);
15baa55f
BT
7007 case PTR_TO_CTX:
7008 /* in case the function doesn't know how to access the context,
7009 * (because we are in a program of type SYSCALL for example), we
7010 * can not statically check its size.
7011 * Dynamically check it now.
7012 */
7013 if (!env->ops->convert_ctx_access) {
7014 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
7015 int offset = access_size - 1;
7016
7017 /* Allow zero-byte read from PTR_TO_CTX */
7018 if (access_size == 0)
7019 return zero_size_allowed ? 0 : -EACCES;
7020
7021 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
1f9a1ea8 7022 atype, -1, false, false);
15baa55f
BT
7023 }
7024
7025 fallthrough;
0d004c02
LB
7026 default: /* scalar_value or invalid ptr */
7027 /* Allow zero-byte read from NULL, regardless of pointer type */
7028 if (zero_size_allowed && access_size == 0 &&
7029 register_is_null(reg))
7030 return 0;
7031
c25b2ae1
HL
7032 verbose(env, "R%d type=%s ", regno,
7033 reg_type_str(env, reg->type));
7034 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
0d004c02 7035 return -EACCES;
06c1c049
GB
7036 }
7037}
7038
d583691c
KKD
7039static int check_mem_size_reg(struct bpf_verifier_env *env,
7040 struct bpf_reg_state *reg, u32 regno,
7041 bool zero_size_allowed,
7042 struct bpf_call_arg_meta *meta)
7043{
7044 int err;
7045
7046 /* This is used to refine r0 return value bounds for helpers
7047 * that enforce this value as an upper bound on return values.
7048 * See do_refine_retval_range() for helpers that can refine
7049 * the return value. C type of helper is u32 so we pull register
7050 * bound from umax_value however, if negative verifier errors
7051 * out. Only upper bounds can be learned because retval is an
7052 * int type and negative retvals are allowed.
7053 */
be77354a 7054 meta->msize_max_value = reg->umax_value;
d583691c
KKD
7055
7056 /* The register is SCALAR_VALUE; the access check
7057 * happens using its boundaries.
7058 */
7059 if (!tnum_is_const(reg->var_off))
7060 /* For unprivileged variable accesses, disable raw
7061 * mode so that the program is required to
7062 * initialize all the memory that the helper could
7063 * just partially fill up.
7064 */
7065 meta = NULL;
7066
7067 if (reg->smin_value < 0) {
7068 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
7069 regno);
7070 return -EACCES;
7071 }
7072
7073 if (reg->umin_value == 0) {
7074 err = check_helper_mem_access(env, regno - 1, 0,
7075 zero_size_allowed,
7076 meta);
7077 if (err)
7078 return err;
7079 }
7080
7081 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
7082 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
7083 regno);
7084 return -EACCES;
7085 }
7086 err = check_helper_mem_access(env, regno - 1,
7087 reg->umax_value,
7088 zero_size_allowed, meta);
7089 if (!err)
7090 err = mark_chain_precision(env, regno);
7091 return err;
7092}
7093
e5069b9c
DB
7094int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
7095 u32 regno, u32 mem_size)
7096{
be77354a
KKD
7097 bool may_be_null = type_may_be_null(reg->type);
7098 struct bpf_reg_state saved_reg;
7099 struct bpf_call_arg_meta meta;
7100 int err;
7101
e5069b9c
DB
7102 if (register_is_null(reg))
7103 return 0;
7104
be77354a
KKD
7105 memset(&meta, 0, sizeof(meta));
7106 /* Assuming that the register contains a value check if the memory
7107 * access is safe. Temporarily save and restore the register's state as
7108 * the conversion shouldn't be visible to a caller.
7109 */
7110 if (may_be_null) {
7111 saved_reg = *reg;
e5069b9c 7112 mark_ptr_not_null_reg(reg);
e5069b9c
DB
7113 }
7114
be77354a
KKD
7115 err = check_helper_mem_access(env, regno, mem_size, true, &meta);
7116 /* Check access for BPF_WRITE */
7117 meta.raw_mode = true;
7118 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
7119
7120 if (may_be_null)
7121 *reg = saved_reg;
7122
7123 return err;
e5069b9c
DB
7124}
7125
00b85860
KKD
7126static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
7127 u32 regno)
d583691c
KKD
7128{
7129 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
7130 bool may_be_null = type_may_be_null(mem_reg->type);
7131 struct bpf_reg_state saved_reg;
be77354a 7132 struct bpf_call_arg_meta meta;
d583691c
KKD
7133 int err;
7134
7135 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
7136
be77354a
KKD
7137 memset(&meta, 0, sizeof(meta));
7138
d583691c
KKD
7139 if (may_be_null) {
7140 saved_reg = *mem_reg;
7141 mark_ptr_not_null_reg(mem_reg);
7142 }
7143
be77354a
KKD
7144 err = check_mem_size_reg(env, reg, regno, true, &meta);
7145 /* Check access for BPF_WRITE */
7146 meta.raw_mode = true;
7147 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
d583691c
KKD
7148
7149 if (may_be_null)
7150 *mem_reg = saved_reg;
7151 return err;
7152}
7153
d83525ca 7154/* Implementation details:
4e814da0
KKD
7155 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
7156 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
d83525ca 7157 * Two bpf_map_lookups (even with the same key) will have different reg->id.
4e814da0
KKD
7158 * Two separate bpf_obj_new will also have different reg->id.
7159 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
7160 * clears reg->id after value_or_null->value transition, since the verifier only
7161 * cares about the range of access to valid map value pointer and doesn't care
7162 * about actual address of the map element.
d83525ca
AS
7163 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
7164 * reg->id > 0 after value_or_null->value transition. By doing so
7165 * two bpf_map_lookups will be considered two different pointers that
4e814da0
KKD
7166 * point to different bpf_spin_locks. Likewise for pointers to allocated objects
7167 * returned from bpf_obj_new.
d83525ca
AS
7168 * The verifier allows taking only one bpf_spin_lock at a time to avoid
7169 * dead-locks.
7170 * Since only one bpf_spin_lock is allowed the checks are simpler than
7171 * reg_is_refcounted() logic. The verifier needs to remember only
7172 * one spin_lock instead of array of acquired_refs.
d0d78c1d 7173 * cur_state->active_lock remembers which map value element or allocated
4e814da0 7174 * object got locked and clears it after bpf_spin_unlock.
d83525ca
AS
7175 */
7176static int process_spin_lock(struct bpf_verifier_env *env, int regno,
7177 bool is_lock)
7178{
7179 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7180 struct bpf_verifier_state *cur = env->cur_state;
7181 bool is_const = tnum_is_const(reg->var_off);
d83525ca 7182 u64 val = reg->var_off.value;
4e814da0
KKD
7183 struct bpf_map *map = NULL;
7184 struct btf *btf = NULL;
7185 struct btf_record *rec;
d83525ca 7186
d83525ca
AS
7187 if (!is_const) {
7188 verbose(env,
7189 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
7190 regno);
7191 return -EINVAL;
7192 }
4e814da0
KKD
7193 if (reg->type == PTR_TO_MAP_VALUE) {
7194 map = reg->map_ptr;
7195 if (!map->btf) {
7196 verbose(env,
7197 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
7198 map->name);
7199 return -EINVAL;
7200 }
7201 } else {
7202 btf = reg->btf;
d83525ca 7203 }
4e814da0
KKD
7204
7205 rec = reg_btf_record(reg);
7206 if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
7207 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
7208 map ? map->name : "kptr");
d83525ca
AS
7209 return -EINVAL;
7210 }
4e814da0 7211 if (rec->spin_lock_off != val + reg->off) {
db559117 7212 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
4e814da0 7213 val + reg->off, rec->spin_lock_off);
d83525ca
AS
7214 return -EINVAL;
7215 }
7216 if (is_lock) {
d0d78c1d 7217 if (cur->active_lock.ptr) {
d83525ca
AS
7218 verbose(env,
7219 "Locking two bpf_spin_locks are not allowed\n");
7220 return -EINVAL;
7221 }
d0d78c1d
KKD
7222 if (map)
7223 cur->active_lock.ptr = map;
7224 else
7225 cur->active_lock.ptr = btf;
7226 cur->active_lock.id = reg->id;
d83525ca 7227 } else {
d0d78c1d
KKD
7228 void *ptr;
7229
7230 if (map)
7231 ptr = map;
7232 else
7233 ptr = btf;
7234
7235 if (!cur->active_lock.ptr) {
d83525ca
AS
7236 verbose(env, "bpf_spin_unlock without taking a lock\n");
7237 return -EINVAL;
7238 }
d0d78c1d
KKD
7239 if (cur->active_lock.ptr != ptr ||
7240 cur->active_lock.id != reg->id) {
d83525ca
AS
7241 verbose(env, "bpf_spin_unlock of different lock\n");
7242 return -EINVAL;
7243 }
534e86bc 7244
6a3cd331 7245 invalidate_non_owning_refs(env);
534e86bc 7246
6a3cd331
DM
7247 cur->active_lock.ptr = NULL;
7248 cur->active_lock.id = 0;
d83525ca
AS
7249 }
7250 return 0;
7251}
7252
b00628b1
AS
7253static int process_timer_func(struct bpf_verifier_env *env, int regno,
7254 struct bpf_call_arg_meta *meta)
7255{
7256 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7257 bool is_const = tnum_is_const(reg->var_off);
7258 struct bpf_map *map = reg->map_ptr;
7259 u64 val = reg->var_off.value;
7260
7261 if (!is_const) {
7262 verbose(env,
7263 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
7264 regno);
7265 return -EINVAL;
7266 }
7267 if (!map->btf) {
7268 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
7269 map->name);
7270 return -EINVAL;
7271 }
db559117
KKD
7272 if (!btf_record_has_field(map->record, BPF_TIMER)) {
7273 verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
68134668
AS
7274 return -EINVAL;
7275 }
db559117 7276 if (map->record->timer_off != val + reg->off) {
68134668 7277 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
db559117 7278 val + reg->off, map->record->timer_off);
b00628b1
AS
7279 return -EINVAL;
7280 }
7281 if (meta->map_ptr) {
7282 verbose(env, "verifier bug. Two map pointers in a timer helper\n");
7283 return -EFAULT;
7284 }
3e8ce298 7285 meta->map_uid = reg->map_uid;
b00628b1
AS
7286 meta->map_ptr = map;
7287 return 0;
7288}
7289
c0a5a21c
KKD
7290static int process_kptr_func(struct bpf_verifier_env *env, int regno,
7291 struct bpf_call_arg_meta *meta)
7292{
7293 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
c0a5a21c 7294 struct bpf_map *map_ptr = reg->map_ptr;
aa3496ac 7295 struct btf_field *kptr_field;
c0a5a21c 7296 u32 kptr_off;
c0a5a21c
KKD
7297
7298 if (!tnum_is_const(reg->var_off)) {
7299 verbose(env,
7300 "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
7301 regno);
7302 return -EINVAL;
7303 }
7304 if (!map_ptr->btf) {
7305 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
7306 map_ptr->name);
7307 return -EINVAL;
7308 }
aa3496ac
KKD
7309 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
7310 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
c0a5a21c
KKD
7311 return -EINVAL;
7312 }
7313
7314 meta->map_ptr = map_ptr;
7315 kptr_off = reg->off + reg->var_off.value;
aa3496ac
KKD
7316 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
7317 if (!kptr_field) {
c0a5a21c
KKD
7318 verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
7319 return -EACCES;
7320 }
aa3496ac 7321 if (kptr_field->type != BPF_KPTR_REF) {
c0a5a21c
KKD
7322 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
7323 return -EACCES;
7324 }
aa3496ac 7325 meta->kptr_field = kptr_field;
c0a5a21c
KKD
7326 return 0;
7327}
7328
27060531
KKD
7329/* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
7330 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
7331 *
7332 * In both cases we deal with the first 8 bytes, but need to mark the next 8
7333 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
7334 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
7335 *
7336 * Mutability of bpf_dynptr is at two levels, one is at the level of struct
7337 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
7338 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
7339 * mutate the view of the dynptr and also possibly destroy it. In the latter
7340 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
7341 * memory that dynptr points to.
7342 *
7343 * The verifier will keep track both levels of mutation (bpf_dynptr's in
7344 * reg->type and the memory's in reg->dynptr.type), but there is no support for
7345 * readonly dynptr view yet, hence only the first case is tracked and checked.
7346 *
7347 * This is consistent with how C applies the const modifier to a struct object,
7348 * where the pointer itself inside bpf_dynptr becomes const but not what it
7349 * points to.
7350 *
7351 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
7352 * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
7353 */
1d18feb2 7354static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
361f129f 7355 enum bpf_arg_type arg_type, int clone_ref_obj_id)
6b75bd3d
KKD
7356{
7357 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1d18feb2 7358 int err;
6b75bd3d 7359
27060531
KKD
7360 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
7361 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
7362 */
7363 if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
7364 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
7365 return -EFAULT;
7366 }
79168a66 7367
27060531
KKD
7368 /* MEM_UNINIT - Points to memory that is an appropriate candidate for
7369 * constructing a mutable bpf_dynptr object.
7370 *
7371 * Currently, this is only possible with PTR_TO_STACK
7372 * pointing to a region of at least 16 bytes which doesn't
7373 * contain an existing bpf_dynptr.
7374 *
7375 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
7376 * mutated or destroyed. However, the memory it points to
7377 * may be mutated.
7378 *
7379 * None - Points to a initialized dynptr that can be mutated and
7380 * destroyed, including mutation of the memory it points
7381 * to.
6b75bd3d 7382 */
6b75bd3d 7383 if (arg_type & MEM_UNINIT) {
1d18feb2
JK
7384 int i;
7385
7e0dac28 7386 if (!is_dynptr_reg_valid_uninit(env, reg)) {
6b75bd3d
KKD
7387 verbose(env, "Dynptr has to be an uninitialized dynptr\n");
7388 return -EINVAL;
7389 }
7390
1d18feb2
JK
7391 /* we write BPF_DW bits (8 bytes) at a time */
7392 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7393 err = check_mem_access(env, insn_idx, regno,
1f9a1ea8 7394 i, BPF_DW, BPF_WRITE, -1, false, false);
1d18feb2
JK
7395 if (err)
7396 return err;
6b75bd3d
KKD
7397 }
7398
361f129f 7399 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id);
27060531
KKD
7400 } else /* MEM_RDONLY and None case from above */ {
7401 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
7402 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
7403 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
7404 return -EINVAL;
7405 }
7406
7e0dac28 7407 if (!is_dynptr_reg_valid_init(env, reg)) {
6b75bd3d
KKD
7408 verbose(env,
7409 "Expected an initialized dynptr as arg #%d\n",
7410 regno);
7411 return -EINVAL;
7412 }
7413
27060531
KKD
7414 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
7415 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
6b75bd3d
KKD
7416 verbose(env,
7417 "Expected a dynptr of type %s as arg #%d\n",
d54e0f6c 7418 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno);
6b75bd3d
KKD
7419 return -EINVAL;
7420 }
d6fefa11
KKD
7421
7422 err = mark_dynptr_read(env, reg);
6b75bd3d 7423 }
1d18feb2 7424 return err;
6b75bd3d
KKD
7425}
7426
06accc87
AN
7427static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
7428{
7429 struct bpf_func_state *state = func(env, reg);
7430
7431 return state->stack[spi].spilled_ptr.ref_obj_id;
7432}
7433
7434static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7435{
7436 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
7437}
7438
7439static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7440{
7441 return meta->kfunc_flags & KF_ITER_NEW;
7442}
7443
7444static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7445{
7446 return meta->kfunc_flags & KF_ITER_NEXT;
7447}
7448
7449static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
7450{
7451 return meta->kfunc_flags & KF_ITER_DESTROY;
7452}
7453
7454static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
7455{
7456 /* btf_check_iter_kfuncs() guarantees that first argument of any iter
7457 * kfunc is iter state pointer
7458 */
7459 return arg == 0 && is_iter_kfunc(meta);
7460}
7461
7462static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
7463 struct bpf_kfunc_call_arg_meta *meta)
7464{
7465 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7466 const struct btf_type *t;
7467 const struct btf_param *arg;
7468 int spi, err, i, nr_slots;
7469 u32 btf_id;
7470
7471 /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
7472 arg = &btf_params(meta->func_proto)[0];
7473 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */
7474 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */
7475 nr_slots = t->size / BPF_REG_SIZE;
7476
06accc87
AN
7477 if (is_iter_new_kfunc(meta)) {
7478 /* bpf_iter_<type>_new() expects pointer to uninit iter state */
7479 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
7480 verbose(env, "expected uninitialized iter_%s as arg #%d\n",
7481 iter_type_str(meta->btf, btf_id), regno);
7482 return -EINVAL;
7483 }
7484
7485 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
7486 err = check_mem_access(env, insn_idx, regno,
1f9a1ea8 7487 i, BPF_DW, BPF_WRITE, -1, false, false);
06accc87
AN
7488 if (err)
7489 return err;
7490 }
7491
7492 err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots);
7493 if (err)
7494 return err;
7495 } else {
7496 /* iter_next() or iter_destroy() expect initialized iter state*/
7497 if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) {
7498 verbose(env, "expected an initialized iter_%s as arg #%d\n",
7499 iter_type_str(meta->btf, btf_id), regno);
7500 return -EINVAL;
7501 }
7502
b63cbc49
AN
7503 spi = iter_get_spi(env, reg, nr_slots);
7504 if (spi < 0)
7505 return spi;
7506
06accc87
AN
7507 err = mark_iter_read(env, reg, spi, nr_slots);
7508 if (err)
7509 return err;
7510
b63cbc49
AN
7511 /* remember meta->iter info for process_iter_next_call() */
7512 meta->iter.spi = spi;
7513 meta->iter.frameno = reg->frameno;
06accc87
AN
7514 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi);
7515
7516 if (is_iter_destroy_kfunc(meta)) {
7517 err = unmark_stack_slots_iter(env, reg, nr_slots);
7518 if (err)
7519 return err;
7520 }
7521 }
7522
7523 return 0;
7524}
7525
7526/* process_iter_next_call() is called when verifier gets to iterator's next
7527 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
7528 * to it as just "iter_next()" in comments below.
7529 *
7530 * BPF verifier relies on a crucial contract for any iter_next()
7531 * implementation: it should *eventually* return NULL, and once that happens
7532 * it should keep returning NULL. That is, once iterator exhausts elements to
7533 * iterate, it should never reset or spuriously return new elements.
7534 *
7535 * With the assumption of such contract, process_iter_next_call() simulates
7536 * a fork in the verifier state to validate loop logic correctness and safety
7537 * without having to simulate infinite amount of iterations.
7538 *
7539 * In current state, we first assume that iter_next() returned NULL and
7540 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
7541 * conditions we should not form an infinite loop and should eventually reach
7542 * exit.
7543 *
7544 * Besides that, we also fork current state and enqueue it for later
7545 * verification. In a forked state we keep iterator state as ACTIVE
7546 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
7547 * also bump iteration depth to prevent erroneous infinite loop detection
7548 * later on (see iter_active_depths_differ() comment for details). In this
7549 * state we assume that we'll eventually loop back to another iter_next()
7550 * calls (it could be in exactly same location or in some other instruction,
7551 * it doesn't matter, we don't make any unnecessary assumptions about this,
7552 * everything revolves around iterator state in a stack slot, not which
7553 * instruction is calling iter_next()). When that happens, we either will come
7554 * to iter_next() with equivalent state and can conclude that next iteration
7555 * will proceed in exactly the same way as we just verified, so it's safe to
7556 * assume that loop converges. If not, we'll go on another iteration
7557 * simulation with a different input state, until all possible starting states
7558 * are validated or we reach maximum number of instructions limit.
7559 *
7560 * This way, we will either exhaustively discover all possible input states
7561 * that iterator loop can start with and eventually will converge, or we'll
7562 * effectively regress into bounded loop simulation logic and either reach
7563 * maximum number of instructions if loop is not provably convergent, or there
7564 * is some statically known limit on number of iterations (e.g., if there is
7565 * an explicit `if n > 100 then break;` statement somewhere in the loop).
7566 *
7567 * One very subtle but very important aspect is that we *always* simulate NULL
7568 * condition first (as the current state) before we simulate non-NULL case.
7569 * This has to do with intricacies of scalar precision tracking. By simulating
7570 * "exit condition" of iter_next() returning NULL first, we make sure all the
7571 * relevant precision marks *that will be set **after** we exit iterator loop*
7572 * are propagated backwards to common parent state of NULL and non-NULL
7573 * branches. Thanks to that, state equivalence checks done later in forked
7574 * state, when reaching iter_next() for ACTIVE iterator, can assume that
7575 * precision marks are finalized and won't change. Because simulating another
7576 * ACTIVE iterator iteration won't change them (because given same input
7577 * states we'll end up with exactly same output states which we are currently
7578 * comparing; and verification after the loop already propagated back what
7579 * needs to be **additionally** tracked as precise). It's subtle, grok
7580 * precision tracking for more intuitive understanding.
7581 */
7582static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
7583 struct bpf_kfunc_call_arg_meta *meta)
7584{
7585 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st;
7586 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
7587 struct bpf_reg_state *cur_iter, *queued_iter;
7588 int iter_frameno = meta->iter.frameno;
7589 int iter_spi = meta->iter.spi;
7590
7591 BTF_TYPE_EMIT(struct bpf_iter);
7592
7593 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
7594
7595 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
7596 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
7597 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n",
7598 cur_iter->iter.state, iter_state_str(cur_iter->iter.state));
7599 return -EFAULT;
7600 }
7601
7602 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
7603 /* branch out active iter state */
7604 queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
7605 if (!queued_st)
7606 return -ENOMEM;
7607
7608 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
7609 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
7610 queued_iter->iter.depth++;
7611
7612 queued_fr = queued_st->frame[queued_st->curframe];
7613 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
7614 }
7615
7616 /* switch to DRAINED state, but keep the depth unchanged */
7617 /* mark current iter state as drained and assume returned NULL */
7618 cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
7619 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]);
7620
7621 return 0;
7622}
7623
90133415
DB
7624static bool arg_type_is_mem_size(enum bpf_arg_type type)
7625{
7626 return type == ARG_CONST_SIZE ||
7627 type == ARG_CONST_SIZE_OR_ZERO;
7628}
7629
8f14852e
KKD
7630static bool arg_type_is_release(enum bpf_arg_type type)
7631{
7632 return type & OBJ_RELEASE;
7633}
7634
97e03f52
JK
7635static bool arg_type_is_dynptr(enum bpf_arg_type type)
7636{
7637 return base_type(type) == ARG_PTR_TO_DYNPTR;
7638}
7639
57c3bb72
AI
7640static int int_ptr_type_to_size(enum bpf_arg_type type)
7641{
7642 if (type == ARG_PTR_TO_INT)
7643 return sizeof(u32);
7644 else if (type == ARG_PTR_TO_LONG)
7645 return sizeof(u64);
7646
7647 return -EINVAL;
7648}
7649
912f442c
LB
7650static int resolve_map_arg_type(struct bpf_verifier_env *env,
7651 const struct bpf_call_arg_meta *meta,
7652 enum bpf_arg_type *arg_type)
7653{
7654 if (!meta->map_ptr) {
7655 /* kernel subsystem misconfigured verifier */
7656 verbose(env, "invalid map_ptr to access map->type\n");
7657 return -EACCES;
7658 }
7659
7660 switch (meta->map_ptr->map_type) {
7661 case BPF_MAP_TYPE_SOCKMAP:
7662 case BPF_MAP_TYPE_SOCKHASH:
7663 if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6550f2dd 7664 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
912f442c
LB
7665 } else {
7666 verbose(env, "invalid arg_type for sockmap/sockhash\n");
7667 return -EINVAL;
7668 }
7669 break;
9330986c
JK
7670 case BPF_MAP_TYPE_BLOOM_FILTER:
7671 if (meta->func_id == BPF_FUNC_map_peek_elem)
7672 *arg_type = ARG_PTR_TO_MAP_VALUE;
7673 break;
912f442c
LB
7674 default:
7675 break;
7676 }
7677 return 0;
7678}
7679
f79e7ea5
LB
7680struct bpf_reg_types {
7681 const enum bpf_reg_type types[10];
1df8f55a 7682 u32 *btf_id;
f79e7ea5
LB
7683};
7684
f79e7ea5
LB
7685static const struct bpf_reg_types sock_types = {
7686 .types = {
7687 PTR_TO_SOCK_COMMON,
7688 PTR_TO_SOCKET,
7689 PTR_TO_TCP_SOCK,
7690 PTR_TO_XDP_SOCK,
7691 },
7692};
7693
49a2a4d4 7694#ifdef CONFIG_NET
1df8f55a
MKL
7695static const struct bpf_reg_types btf_id_sock_common_types = {
7696 .types = {
7697 PTR_TO_SOCK_COMMON,
7698 PTR_TO_SOCKET,
7699 PTR_TO_TCP_SOCK,
7700 PTR_TO_XDP_SOCK,
7701 PTR_TO_BTF_ID,
3f00c523 7702 PTR_TO_BTF_ID | PTR_TRUSTED,
1df8f55a
MKL
7703 },
7704 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
7705};
49a2a4d4 7706#endif
1df8f55a 7707
f79e7ea5
LB
7708static const struct bpf_reg_types mem_types = {
7709 .types = {
7710 PTR_TO_STACK,
7711 PTR_TO_PACKET,
7712 PTR_TO_PACKET_META,
69c087ba 7713 PTR_TO_MAP_KEY,
f79e7ea5
LB
7714 PTR_TO_MAP_VALUE,
7715 PTR_TO_MEM,
894f2a8b 7716 PTR_TO_MEM | MEM_RINGBUF,
20b2aff4 7717 PTR_TO_BUF,
3e30be42 7718 PTR_TO_BTF_ID | PTR_TRUSTED,
f79e7ea5
LB
7719 },
7720};
7721
7722static const struct bpf_reg_types int_ptr_types = {
7723 .types = {
7724 PTR_TO_STACK,
7725 PTR_TO_PACKET,
7726 PTR_TO_PACKET_META,
69c087ba 7727 PTR_TO_MAP_KEY,
f79e7ea5
LB
7728 PTR_TO_MAP_VALUE,
7729 },
7730};
7731
4e814da0
KKD
7732static const struct bpf_reg_types spin_lock_types = {
7733 .types = {
7734 PTR_TO_MAP_VALUE,
7735 PTR_TO_BTF_ID | MEM_ALLOC,
7736 }
7737};
7738
f79e7ea5
LB
7739static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
7740static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
7741static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
894f2a8b 7742static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
f79e7ea5 7743static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
3f00c523
DV
7744static const struct bpf_reg_types btf_ptr_types = {
7745 .types = {
7746 PTR_TO_BTF_ID,
7747 PTR_TO_BTF_ID | PTR_TRUSTED,
fca1aa75 7748 PTR_TO_BTF_ID | MEM_RCU,
3f00c523
DV
7749 },
7750};
7751static const struct bpf_reg_types percpu_btf_ptr_types = {
7752 .types = {
7753 PTR_TO_BTF_ID | MEM_PERCPU,
7754 PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
7755 }
7756};
69c087ba
YS
7757static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
7758static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
fff13c4b 7759static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
b00628b1 7760static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
c0a5a21c 7761static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
20571567
DV
7762static const struct bpf_reg_types dynptr_types = {
7763 .types = {
7764 PTR_TO_STACK,
27060531 7765 CONST_PTR_TO_DYNPTR,
20571567
DV
7766 }
7767};
f79e7ea5 7768
0789e13b 7769static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
d1673304
DM
7770 [ARG_PTR_TO_MAP_KEY] = &mem_types,
7771 [ARG_PTR_TO_MAP_VALUE] = &mem_types,
f79e7ea5
LB
7772 [ARG_CONST_SIZE] = &scalar_types,
7773 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
7774 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
7775 [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
7776 [ARG_PTR_TO_CTX] = &context_types,
f79e7ea5 7777 [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
49a2a4d4 7778#ifdef CONFIG_NET
1df8f55a 7779 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
49a2a4d4 7780#endif
f79e7ea5 7781 [ARG_PTR_TO_SOCKET] = &fullsock_types,
f79e7ea5
LB
7782 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
7783 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
7784 [ARG_PTR_TO_MEM] = &mem_types,
894f2a8b 7785 [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types,
f79e7ea5
LB
7786 [ARG_PTR_TO_INT] = &int_ptr_types,
7787 [ARG_PTR_TO_LONG] = &int_ptr_types,
eaa6bcb7 7788 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
69c087ba 7789 [ARG_PTR_TO_FUNC] = &func_ptr_types,
48946bd6 7790 [ARG_PTR_TO_STACK] = &stack_ptr_types,
fff13c4b 7791 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
b00628b1 7792 [ARG_PTR_TO_TIMER] = &timer_types,
c0a5a21c 7793 [ARG_PTR_TO_KPTR] = &kptr_types,
20571567 7794 [ARG_PTR_TO_DYNPTR] = &dynptr_types,
f79e7ea5
LB
7795};
7796
7797static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
a968d5e2 7798 enum bpf_arg_type arg_type,
c0a5a21c
KKD
7799 const u32 *arg_btf_id,
7800 struct bpf_call_arg_meta *meta)
f79e7ea5
LB
7801{
7802 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7803 enum bpf_reg_type expected, type = reg->type;
a968d5e2 7804 const struct bpf_reg_types *compatible;
f79e7ea5
LB
7805 int i, j;
7806
48946bd6 7807 compatible = compatible_reg_types[base_type(arg_type)];
a968d5e2
MKL
7808 if (!compatible) {
7809 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
7810 return -EFAULT;
7811 }
7812
216e3cd2
HL
7813 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
7814 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
7815 *
7816 * Same for MAYBE_NULL:
7817 *
7818 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
7819 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
7820 *
2012c867
DR
7821 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type.
7822 *
216e3cd2
HL
7823 * Therefore we fold these flags depending on the arg_type before comparison.
7824 */
7825 if (arg_type & MEM_RDONLY)
7826 type &= ~MEM_RDONLY;
7827 if (arg_type & PTR_MAYBE_NULL)
7828 type &= ~PTR_MAYBE_NULL;
2012c867
DR
7829 if (base_type(arg_type) == ARG_PTR_TO_MEM)
7830 type &= ~DYNPTR_TYPE_FLAG_MASK;
216e3cd2 7831
503e4def 7832 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type))
738c96d5
DM
7833 type &= ~MEM_ALLOC;
7834
f79e7ea5
LB
7835 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
7836 expected = compatible->types[i];
7837 if (expected == NOT_INIT)
7838 break;
7839
7840 if (type == expected)
a968d5e2 7841 goto found;
f79e7ea5
LB
7842 }
7843
216e3cd2 7844 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
f79e7ea5 7845 for (j = 0; j + 1 < i; j++)
c25b2ae1
HL
7846 verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
7847 verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
f79e7ea5 7848 return -EACCES;
a968d5e2
MKL
7849
7850found:
da03e43a
KKD
7851 if (base_type(reg->type) != PTR_TO_BTF_ID)
7852 return 0;
7853
3e30be42
AS
7854 if (compatible == &mem_types) {
7855 if (!(arg_type & MEM_RDONLY)) {
7856 verbose(env,
7857 "%s() may write into memory pointed by R%d type=%s\n",
7858 func_id_name(meta->func_id),
7859 regno, reg_type_str(env, reg->type));
7860 return -EACCES;
7861 }
7862 return 0;
7863 }
7864
da03e43a
KKD
7865 switch ((int)reg->type) {
7866 case PTR_TO_BTF_ID:
7867 case PTR_TO_BTF_ID | PTR_TRUSTED:
7868 case PTR_TO_BTF_ID | MEM_RCU:
add68b84
AS
7869 case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
7870 case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
da03e43a 7871 {
2ab3b380
KKD
7872 /* For bpf_sk_release, it needs to match against first member
7873 * 'struct sock_common', hence make an exception for it. This
7874 * allows bpf_sk_release to work for multiple socket types.
7875 */
7876 bool strict_type_match = arg_type_is_release(arg_type) &&
7877 meta->func_id != BPF_FUNC_sk_release;
7878
add68b84
AS
7879 if (type_may_be_null(reg->type) &&
7880 (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) {
7881 verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
7882 return -EACCES;
7883 }
7884
1df8f55a
MKL
7885 if (!arg_btf_id) {
7886 if (!compatible->btf_id) {
7887 verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
7888 return -EFAULT;
7889 }
7890 arg_btf_id = compatible->btf_id;
7891 }
7892
c0a5a21c 7893 if (meta->func_id == BPF_FUNC_kptr_xchg) {
aa3496ac 7894 if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
c0a5a21c 7895 return -EACCES;
47e34cb7
DM
7896 } else {
7897 if (arg_btf_id == BPF_PTR_POISON) {
7898 verbose(env, "verifier internal error:");
7899 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
7900 regno);
7901 return -EACCES;
7902 }
7903
7904 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
7905 btf_vmlinux, *arg_btf_id,
7906 strict_type_match)) {
7907 verbose(env, "R%d is of type %s but %s is expected\n",
b32a5dae
DM
7908 regno, btf_type_name(reg->btf, reg->btf_id),
7909 btf_type_name(btf_vmlinux, *arg_btf_id));
47e34cb7
DM
7910 return -EACCES;
7911 }
a968d5e2 7912 }
da03e43a
KKD
7913 break;
7914 }
7915 case PTR_TO_BTF_ID | MEM_ALLOC:
738c96d5
DM
7916 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
7917 meta->func_id != BPF_FUNC_kptr_xchg) {
4e814da0
KKD
7918 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
7919 return -EFAULT;
7920 }
ab6c637a
YS
7921 if (meta->func_id == BPF_FUNC_kptr_xchg) {
7922 if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
7923 return -EACCES;
7924 }
da03e43a
KKD
7925 break;
7926 case PTR_TO_BTF_ID | MEM_PERCPU:
7927 case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
7928 /* Handled by helper specific checks */
7929 break;
7930 default:
7931 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n");
7932 return -EFAULT;
a968d5e2 7933 }
a968d5e2 7934 return 0;
f79e7ea5
LB
7935}
7936
6a3cd331
DM
7937static struct btf_field *
7938reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
7939{
7940 struct btf_field *field;
7941 struct btf_record *rec;
7942
7943 rec = reg_btf_record(reg);
7944 if (!rec)
7945 return NULL;
7946
7947 field = btf_record_find(rec, off, fields);
7948 if (!field)
7949 return NULL;
7950
7951 return field;
7952}
7953
25b35dd2
KKD
7954int check_func_arg_reg_off(struct bpf_verifier_env *env,
7955 const struct bpf_reg_state *reg, int regno,
8f14852e 7956 enum bpf_arg_type arg_type)
25b35dd2 7957{
184c9bdb 7958 u32 type = reg->type;
25b35dd2 7959
184c9bdb
KKD
7960 /* When referenced register is passed to release function, its fixed
7961 * offset must be 0.
7962 *
7963 * We will check arg_type_is_release reg has ref_obj_id when storing
7964 * meta->release_regno.
7965 */
7966 if (arg_type_is_release(arg_type)) {
7967 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
7968 * may not directly point to the object being released, but to
7969 * dynptr pointing to such object, which might be at some offset
7970 * on the stack. In that case, we simply to fallback to the
7971 * default handling.
7972 */
7973 if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
7974 return 0;
6a3cd331 7975
184c9bdb
KKD
7976 /* Doing check_ptr_off_reg check for the offset will catch this
7977 * because fixed_off_ok is false, but checking here allows us
7978 * to give the user a better error message.
7979 */
7980 if (reg->off) {
7981 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
7982 regno);
7983 return -EINVAL;
7984 }
7985 return __check_ptr_off_reg(env, reg, regno, false);
7986 }
7987
7988 switch (type) {
7989 /* Pointer types where both fixed and variable offset is explicitly allowed: */
97e03f52 7990 case PTR_TO_STACK:
25b35dd2
KKD
7991 case PTR_TO_PACKET:
7992 case PTR_TO_PACKET_META:
7993 case PTR_TO_MAP_KEY:
7994 case PTR_TO_MAP_VALUE:
7995 case PTR_TO_MEM:
7996 case PTR_TO_MEM | MEM_RDONLY:
894f2a8b 7997 case PTR_TO_MEM | MEM_RINGBUF:
25b35dd2
KKD
7998 case PTR_TO_BUF:
7999 case PTR_TO_BUF | MEM_RDONLY:
97e03f52 8000 case SCALAR_VALUE:
184c9bdb 8001 return 0;
25b35dd2
KKD
8002 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows
8003 * fixed offset.
8004 */
8005 case PTR_TO_BTF_ID:
282de143 8006 case PTR_TO_BTF_ID | MEM_ALLOC:
3f00c523 8007 case PTR_TO_BTF_ID | PTR_TRUSTED:
fca1aa75 8008 case PTR_TO_BTF_ID | MEM_RCU:
6a3cd331 8009 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
24d5bb80 8010 /* When referenced PTR_TO_BTF_ID is passed to release function,
184c9bdb
KKD
8011 * its fixed offset must be 0. In the other cases, fixed offset
8012 * can be non-zero. This was already checked above. So pass
8013 * fixed_off_ok as true to allow fixed offset for all other
8014 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
8015 * still need to do checks instead of returning.
24d5bb80 8016 */
184c9bdb 8017 return __check_ptr_off_reg(env, reg, regno, true);
25b35dd2 8018 default:
184c9bdb 8019 return __check_ptr_off_reg(env, reg, regno, false);
25b35dd2 8020 }
25b35dd2
KKD
8021}
8022
485ec51e
JK
8023static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
8024 const struct bpf_func_proto *fn,
8025 struct bpf_reg_state *regs)
8026{
8027 struct bpf_reg_state *state = NULL;
8028 int i;
8029
8030 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
8031 if (arg_type_is_dynptr(fn->arg_type[i])) {
8032 if (state) {
8033 verbose(env, "verifier internal error: multiple dynptr args\n");
8034 return NULL;
8035 }
8036 state = &regs[BPF_REG_1 + i];
8037 }
8038
8039 if (!state)
8040 verbose(env, "verifier internal error: no dynptr arg found\n");
8041
8042 return state;
8043}
8044
f8064ab9 8045static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
34d4ef57
JK
8046{
8047 struct bpf_func_state *state = func(env, reg);
27060531 8048 int spi;
34d4ef57 8049
27060531 8050 if (reg->type == CONST_PTR_TO_DYNPTR)
f8064ab9
KKD
8051 return reg->id;
8052 spi = dynptr_get_spi(env, reg);
8053 if (spi < 0)
8054 return spi;
8055 return state->stack[spi].spilled_ptr.id;
8056}
8057
79168a66 8058static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
34d4ef57
JK
8059{
8060 struct bpf_func_state *state = func(env, reg);
27060531 8061 int spi;
27060531 8062
27060531
KKD
8063 if (reg->type == CONST_PTR_TO_DYNPTR)
8064 return reg->ref_obj_id;
79168a66
KKD
8065 spi = dynptr_get_spi(env, reg);
8066 if (spi < 0)
8067 return spi;
27060531 8068 return state->stack[spi].spilled_ptr.ref_obj_id;
34d4ef57
JK
8069}
8070
b5964b96
JK
8071static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
8072 struct bpf_reg_state *reg)
8073{
8074 struct bpf_func_state *state = func(env, reg);
8075 int spi;
8076
8077 if (reg->type == CONST_PTR_TO_DYNPTR)
8078 return reg->dynptr.type;
8079
8080 spi = __get_spi(reg->off);
8081 if (spi < 0) {
8082 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n");
8083 return BPF_DYNPTR_TYPE_INVALID;
8084 }
8085
8086 return state->stack[spi].spilled_ptr.dynptr.type;
8087}
8088
af7ec138
YS
8089static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
8090 struct bpf_call_arg_meta *meta,
1d18feb2
JK
8091 const struct bpf_func_proto *fn,
8092 int insn_idx)
17a52670 8093{
af7ec138 8094 u32 regno = BPF_REG_1 + arg;
638f5b90 8095 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
af7ec138 8096 enum bpf_arg_type arg_type = fn->arg_type[arg];
f79e7ea5 8097 enum bpf_reg_type type = reg->type;
508362ac 8098 u32 *arg_btf_id = NULL;
17a52670
AS
8099 int err = 0;
8100
80f1d68c 8101 if (arg_type == ARG_DONTCARE)
17a52670
AS
8102 return 0;
8103
dc503a8a
EC
8104 err = check_reg_arg(env, regno, SRC_OP);
8105 if (err)
8106 return err;
17a52670 8107
1be7f75d
AS
8108 if (arg_type == ARG_ANYTHING) {
8109 if (is_pointer_value(env, regno)) {
61bd5218
JK
8110 verbose(env, "R%d leaks addr into helper function\n",
8111 regno);
1be7f75d
AS
8112 return -EACCES;
8113 }
80f1d68c 8114 return 0;
1be7f75d 8115 }
80f1d68c 8116
de8f3a83 8117 if (type_is_pkt_pointer(type) &&
3a0af8fd 8118 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 8119 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
8120 return -EACCES;
8121 }
8122
16d1e00c 8123 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
912f442c
LB
8124 err = resolve_map_arg_type(env, meta, &arg_type);
8125 if (err)
8126 return err;
8127 }
8128
48946bd6 8129 if (register_is_null(reg) && type_may_be_null(arg_type))
fd1b0d60
LB
8130 /* A NULL register has a SCALAR_VALUE type, so skip
8131 * type checking.
8132 */
8133 goto skip_type_check;
8134
508362ac 8135 /* arg_btf_id and arg_size are in a union. */
4e814da0
KKD
8136 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
8137 base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
508362ac
MM
8138 arg_btf_id = fn->arg_btf_id[arg];
8139
8140 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
f79e7ea5
LB
8141 if (err)
8142 return err;
8143
8f14852e 8144 err = check_func_arg_reg_off(env, reg, regno, arg_type);
25b35dd2
KKD
8145 if (err)
8146 return err;
d7b9454a 8147
fd1b0d60 8148skip_type_check:
8f14852e 8149 if (arg_type_is_release(arg_type)) {
bc34dee6
JK
8150 if (arg_type_is_dynptr(arg_type)) {
8151 struct bpf_func_state *state = func(env, reg);
27060531 8152 int spi;
bc34dee6 8153
27060531
KKD
8154 /* Only dynptr created on stack can be released, thus
8155 * the get_spi and stack state checks for spilled_ptr
8156 * should only be done before process_dynptr_func for
8157 * PTR_TO_STACK.
8158 */
8159 if (reg->type == PTR_TO_STACK) {
79168a66 8160 spi = dynptr_get_spi(env, reg);
f5b625e5 8161 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
27060531
KKD
8162 verbose(env, "arg %d is an unacquired reference\n", regno);
8163 return -EINVAL;
8164 }
8165 } else {
8166 verbose(env, "cannot release unowned const bpf_dynptr\n");
bc34dee6
JK
8167 return -EINVAL;
8168 }
8169 } else if (!reg->ref_obj_id && !register_is_null(reg)) {
8f14852e
KKD
8170 verbose(env, "R%d must be referenced when passed to release function\n",
8171 regno);
8172 return -EINVAL;
8173 }
8174 if (meta->release_regno) {
8175 verbose(env, "verifier internal error: more than one release argument\n");
8176 return -EFAULT;
8177 }
8178 meta->release_regno = regno;
8179 }
8180
02f7c958 8181 if (reg->ref_obj_id) {
457f4436
AN
8182 if (meta->ref_obj_id) {
8183 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
8184 regno, reg->ref_obj_id,
8185 meta->ref_obj_id);
8186 return -EFAULT;
8187 }
8188 meta->ref_obj_id = reg->ref_obj_id;
17a52670
AS
8189 }
8190
8ab4cdcf
JK
8191 switch (base_type(arg_type)) {
8192 case ARG_CONST_MAP_PTR:
17a52670 8193 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3e8ce298
AS
8194 if (meta->map_ptr) {
8195 /* Use map_uid (which is unique id of inner map) to reject:
8196 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
8197 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
8198 * if (inner_map1 && inner_map2) {
8199 * timer = bpf_map_lookup_elem(inner_map1);
8200 * if (timer)
8201 * // mismatch would have been allowed
8202 * bpf_timer_init(timer, inner_map2);
8203 * }
8204 *
8205 * Comparing map_ptr is enough to distinguish normal and outer maps.
8206 */
8207 if (meta->map_ptr != reg->map_ptr ||
8208 meta->map_uid != reg->map_uid) {
8209 verbose(env,
8210 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
8211 meta->map_uid, reg->map_uid);
8212 return -EINVAL;
8213 }
b00628b1 8214 }
33ff9823 8215 meta->map_ptr = reg->map_ptr;
3e8ce298 8216 meta->map_uid = reg->map_uid;
8ab4cdcf
JK
8217 break;
8218 case ARG_PTR_TO_MAP_KEY:
17a52670
AS
8219 /* bpf_map_xxx(..., map_ptr, ..., key) call:
8220 * check that [key, key + map->key_size) are within
8221 * stack limits and initialized
8222 */
33ff9823 8223 if (!meta->map_ptr) {
17a52670
AS
8224 /* in function declaration map_ptr must come before
8225 * map_key, so that it's verified and known before
8226 * we have to check map_key here. Otherwise it means
8227 * that kernel subsystem misconfigured verifier
8228 */
61bd5218 8229 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
8230 return -EACCES;
8231 }
d71962f3
PC
8232 err = check_helper_mem_access(env, regno,
8233 meta->map_ptr->key_size, false,
8234 NULL);
8ab4cdcf
JK
8235 break;
8236 case ARG_PTR_TO_MAP_VALUE:
48946bd6
HL
8237 if (type_may_be_null(arg_type) && register_is_null(reg))
8238 return 0;
8239
17a52670
AS
8240 /* bpf_map_xxx(..., map_ptr, ..., value) call:
8241 * check [value, value + map->value_size) validity
8242 */
33ff9823 8243 if (!meta->map_ptr) {
17a52670 8244 /* kernel subsystem misconfigured verifier */
61bd5218 8245 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
8246 return -EACCES;
8247 }
16d1e00c 8248 meta->raw_mode = arg_type & MEM_UNINIT;
d71962f3
PC
8249 err = check_helper_mem_access(env, regno,
8250 meta->map_ptr->value_size, false,
2ea864c5 8251 meta);
8ab4cdcf
JK
8252 break;
8253 case ARG_PTR_TO_PERCPU_BTF_ID:
eaa6bcb7
HL
8254 if (!reg->btf_id) {
8255 verbose(env, "Helper has invalid btf_id in R%d\n", regno);
8256 return -EACCES;
8257 }
22dc4a0f 8258 meta->ret_btf = reg->btf;
eaa6bcb7 8259 meta->ret_btf_id = reg->btf_id;
8ab4cdcf
JK
8260 break;
8261 case ARG_PTR_TO_SPIN_LOCK:
5d92ddc3
DM
8262 if (in_rbtree_lock_required_cb(env)) {
8263 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
8264 return -EACCES;
8265 }
c18f0b6a 8266 if (meta->func_id == BPF_FUNC_spin_lock) {
ac50fe51
KKD
8267 err = process_spin_lock(env, regno, true);
8268 if (err)
8269 return err;
c18f0b6a 8270 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
ac50fe51
KKD
8271 err = process_spin_lock(env, regno, false);
8272 if (err)
8273 return err;
c18f0b6a
LB
8274 } else {
8275 verbose(env, "verifier internal error\n");
8276 return -EFAULT;
8277 }
8ab4cdcf
JK
8278 break;
8279 case ARG_PTR_TO_TIMER:
ac50fe51
KKD
8280 err = process_timer_func(env, regno, meta);
8281 if (err)
8282 return err;
8ab4cdcf
JK
8283 break;
8284 case ARG_PTR_TO_FUNC:
69c087ba 8285 meta->subprogno = reg->subprogno;
8ab4cdcf
JK
8286 break;
8287 case ARG_PTR_TO_MEM:
a2bbe7cc
LB
8288 /* The access to this pointer is only checked when we hit the
8289 * next is_mem_size argument below.
8290 */
16d1e00c 8291 meta->raw_mode = arg_type & MEM_UNINIT;
508362ac
MM
8292 if (arg_type & MEM_FIXED_SIZE) {
8293 err = check_helper_mem_access(env, regno,
8294 fn->arg_size[arg], false,
8295 meta);
8296 }
8ab4cdcf
JK
8297 break;
8298 case ARG_CONST_SIZE:
8299 err = check_mem_size_reg(env, reg, regno, false, meta);
8300 break;
8301 case ARG_CONST_SIZE_OR_ZERO:
8302 err = check_mem_size_reg(env, reg, regno, true, meta);
8303 break;
8304 case ARG_PTR_TO_DYNPTR:
361f129f 8305 err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
ac50fe51
KKD
8306 if (err)
8307 return err;
8ab4cdcf
JK
8308 break;
8309 case ARG_CONST_ALLOC_SIZE_OR_ZERO:
457f4436 8310 if (!tnum_is_const(reg->var_off)) {
28a8add6 8311 verbose(env, "R%d is not a known constant'\n",
457f4436
AN
8312 regno);
8313 return -EACCES;
8314 }
8315 meta->mem_size = reg->var_off.value;
2fc31465
KKD
8316 err = mark_chain_precision(env, regno);
8317 if (err)
8318 return err;
8ab4cdcf
JK
8319 break;
8320 case ARG_PTR_TO_INT:
8321 case ARG_PTR_TO_LONG:
8322 {
57c3bb72
AI
8323 int size = int_ptr_type_to_size(arg_type);
8324
8325 err = check_helper_mem_access(env, regno, size, false, meta);
8326 if (err)
8327 return err;
8328 err = check_ptr_alignment(env, reg, 0, size, true);
8ab4cdcf
JK
8329 break;
8330 }
8331 case ARG_PTR_TO_CONST_STR:
8332 {
fff13c4b
FR
8333 struct bpf_map *map = reg->map_ptr;
8334 int map_off;
8335 u64 map_addr;
8336 char *str_ptr;
8337
a8fad73e 8338 if (!bpf_map_is_rdonly(map)) {
fff13c4b
FR
8339 verbose(env, "R%d does not point to a readonly map'\n", regno);
8340 return -EACCES;
8341 }
8342
8343 if (!tnum_is_const(reg->var_off)) {
8344 verbose(env, "R%d is not a constant address'\n", regno);
8345 return -EACCES;
8346 }
8347
8348 if (!map->ops->map_direct_value_addr) {
8349 verbose(env, "no direct value access support for this map type\n");
8350 return -EACCES;
8351 }
8352
8353 err = check_map_access(env, regno, reg->off,
61df10c7
KKD
8354 map->value_size - reg->off, false,
8355 ACCESS_HELPER);
fff13c4b
FR
8356 if (err)
8357 return err;
8358
8359 map_off = reg->off + reg->var_off.value;
8360 err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
8361 if (err) {
8362 verbose(env, "direct value access on string failed\n");
8363 return err;
8364 }
8365
8366 str_ptr = (char *)(long)(map_addr);
8367 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
8368 verbose(env, "string is not zero-terminated\n");
8369 return -EINVAL;
8370 }
8ab4cdcf
JK
8371 break;
8372 }
8373 case ARG_PTR_TO_KPTR:
ac50fe51
KKD
8374 err = process_kptr_func(env, regno, meta);
8375 if (err)
8376 return err;
8ab4cdcf 8377 break;
17a52670
AS
8378 }
8379
8380 return err;
8381}
8382
0126240f
LB
8383static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
8384{
8385 enum bpf_attach_type eatype = env->prog->expected_attach_type;
7e40781c 8386 enum bpf_prog_type type = resolve_prog_type(env->prog);
0126240f
LB
8387
8388 if (func_id != BPF_FUNC_map_update_elem)
8389 return false;
8390
8391 /* It's not possible to get access to a locked struct sock in these
8392 * contexts, so updating is safe.
8393 */
8394 switch (type) {
8395 case BPF_PROG_TYPE_TRACING:
8396 if (eatype == BPF_TRACE_ITER)
8397 return true;
8398 break;
8399 case BPF_PROG_TYPE_SOCKET_FILTER:
8400 case BPF_PROG_TYPE_SCHED_CLS:
8401 case BPF_PROG_TYPE_SCHED_ACT:
8402 case BPF_PROG_TYPE_XDP:
8403 case BPF_PROG_TYPE_SK_REUSEPORT:
8404 case BPF_PROG_TYPE_FLOW_DISSECTOR:
8405 case BPF_PROG_TYPE_SK_LOOKUP:
8406 return true;
8407 default:
8408 break;
8409 }
8410
8411 verbose(env, "cannot update sockmap in this context\n");
8412 return false;
8413}
8414
e411901c
MF
8415static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
8416{
95acd881
TA
8417 return env->prog->jit_requested &&
8418 bpf_jit_supports_subprog_tailcalls();
e411901c
MF
8419}
8420
61bd5218
JK
8421static int check_map_func_compatibility(struct bpf_verifier_env *env,
8422 struct bpf_map *map, int func_id)
35578d79 8423{
35578d79
KX
8424 if (!map)
8425 return 0;
8426
6aff67c8
AS
8427 /* We need a two way check, first is from map perspective ... */
8428 switch (map->map_type) {
8429 case BPF_MAP_TYPE_PROG_ARRAY:
8430 if (func_id != BPF_FUNC_tail_call)
8431 goto error;
8432 break;
8433 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
8434 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 8435 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 8436 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
8437 func_id != BPF_FUNC_perf_event_read_value &&
8438 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
8439 goto error;
8440 break;
457f4436
AN
8441 case BPF_MAP_TYPE_RINGBUF:
8442 if (func_id != BPF_FUNC_ringbuf_output &&
8443 func_id != BPF_FUNC_ringbuf_reserve &&
bc34dee6
JK
8444 func_id != BPF_FUNC_ringbuf_query &&
8445 func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
8446 func_id != BPF_FUNC_ringbuf_submit_dynptr &&
8447 func_id != BPF_FUNC_ringbuf_discard_dynptr)
457f4436
AN
8448 goto error;
8449 break;
583c1f42 8450 case BPF_MAP_TYPE_USER_RINGBUF:
20571567
DV
8451 if (func_id != BPF_FUNC_user_ringbuf_drain)
8452 goto error;
8453 break;
6aff67c8
AS
8454 case BPF_MAP_TYPE_STACK_TRACE:
8455 if (func_id != BPF_FUNC_get_stackid)
8456 goto error;
8457 break;
4ed8ec52 8458 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 8459 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 8460 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
8461 goto error;
8462 break;
cd339431 8463 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 8464 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
8465 if (func_id != BPF_FUNC_get_local_storage)
8466 goto error;
8467 break;
546ac1ff 8468 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 8469 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
8470 if (func_id != BPF_FUNC_redirect_map &&
8471 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
8472 goto error;
8473 break;
fbfc504a
BT
8474 /* Restrict bpf side of cpumap and xskmap, open when use-cases
8475 * appear.
8476 */
6710e112
JDB
8477 case BPF_MAP_TYPE_CPUMAP:
8478 if (func_id != BPF_FUNC_redirect_map)
8479 goto error;
8480 break;
fada7fdc
JL
8481 case BPF_MAP_TYPE_XSKMAP:
8482 if (func_id != BPF_FUNC_redirect_map &&
8483 func_id != BPF_FUNC_map_lookup_elem)
8484 goto error;
8485 break;
56f668df 8486 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 8487 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
8488 if (func_id != BPF_FUNC_map_lookup_elem)
8489 goto error;
16a43625 8490 break;
174a79ff
JF
8491 case BPF_MAP_TYPE_SOCKMAP:
8492 if (func_id != BPF_FUNC_sk_redirect_map &&
8493 func_id != BPF_FUNC_sock_map_update &&
4f738adb 8494 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 8495 func_id != BPF_FUNC_msg_redirect_map &&
64d85290 8496 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
8497 func_id != BPF_FUNC_map_lookup_elem &&
8498 !may_update_sockmap(env, func_id))
174a79ff
JF
8499 goto error;
8500 break;
81110384
JF
8501 case BPF_MAP_TYPE_SOCKHASH:
8502 if (func_id != BPF_FUNC_sk_redirect_hash &&
8503 func_id != BPF_FUNC_sock_hash_update &&
8504 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 8505 func_id != BPF_FUNC_msg_redirect_hash &&
64d85290 8506 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
8507 func_id != BPF_FUNC_map_lookup_elem &&
8508 !may_update_sockmap(env, func_id))
81110384
JF
8509 goto error;
8510 break;
2dbb9b9e
MKL
8511 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
8512 if (func_id != BPF_FUNC_sk_select_reuseport)
8513 goto error;
8514 break;
f1a2e44a
MV
8515 case BPF_MAP_TYPE_QUEUE:
8516 case BPF_MAP_TYPE_STACK:
8517 if (func_id != BPF_FUNC_map_peek_elem &&
8518 func_id != BPF_FUNC_map_pop_elem &&
8519 func_id != BPF_FUNC_map_push_elem)
8520 goto error;
8521 break;
6ac99e8f
MKL
8522 case BPF_MAP_TYPE_SK_STORAGE:
8523 if (func_id != BPF_FUNC_sk_storage_get &&
9db44fdd
KKD
8524 func_id != BPF_FUNC_sk_storage_delete &&
8525 func_id != BPF_FUNC_kptr_xchg)
6ac99e8f
MKL
8526 goto error;
8527 break;
8ea63684
KS
8528 case BPF_MAP_TYPE_INODE_STORAGE:
8529 if (func_id != BPF_FUNC_inode_storage_get &&
9db44fdd
KKD
8530 func_id != BPF_FUNC_inode_storage_delete &&
8531 func_id != BPF_FUNC_kptr_xchg)
8ea63684
KS
8532 goto error;
8533 break;
4cf1bc1f
KS
8534 case BPF_MAP_TYPE_TASK_STORAGE:
8535 if (func_id != BPF_FUNC_task_storage_get &&
9db44fdd
KKD
8536 func_id != BPF_FUNC_task_storage_delete &&
8537 func_id != BPF_FUNC_kptr_xchg)
4cf1bc1f
KS
8538 goto error;
8539 break;
c4bcfb38
YS
8540 case BPF_MAP_TYPE_CGRP_STORAGE:
8541 if (func_id != BPF_FUNC_cgrp_storage_get &&
9db44fdd
KKD
8542 func_id != BPF_FUNC_cgrp_storage_delete &&
8543 func_id != BPF_FUNC_kptr_xchg)
c4bcfb38
YS
8544 goto error;
8545 break;
9330986c
JK
8546 case BPF_MAP_TYPE_BLOOM_FILTER:
8547 if (func_id != BPF_FUNC_map_peek_elem &&
8548 func_id != BPF_FUNC_map_push_elem)
8549 goto error;
8550 break;
6aff67c8
AS
8551 default:
8552 break;
8553 }
8554
8555 /* ... and second from the function itself. */
8556 switch (func_id) {
8557 case BPF_FUNC_tail_call:
8558 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
8559 goto error;
e411901c
MF
8560 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
8561 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
f4d7e40a
AS
8562 return -EINVAL;
8563 }
6aff67c8
AS
8564 break;
8565 case BPF_FUNC_perf_event_read:
8566 case BPF_FUNC_perf_event_output:
908432ca 8567 case BPF_FUNC_perf_event_read_value:
a7658e1a 8568 case BPF_FUNC_skb_output:
d831ee84 8569 case BPF_FUNC_xdp_output:
6aff67c8
AS
8570 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
8571 goto error;
8572 break;
5b029a32
DB
8573 case BPF_FUNC_ringbuf_output:
8574 case BPF_FUNC_ringbuf_reserve:
8575 case BPF_FUNC_ringbuf_query:
bc34dee6
JK
8576 case BPF_FUNC_ringbuf_reserve_dynptr:
8577 case BPF_FUNC_ringbuf_submit_dynptr:
8578 case BPF_FUNC_ringbuf_discard_dynptr:
5b029a32
DB
8579 if (map->map_type != BPF_MAP_TYPE_RINGBUF)
8580 goto error;
8581 break;
20571567
DV
8582 case BPF_FUNC_user_ringbuf_drain:
8583 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
8584 goto error;
8585 break;
6aff67c8
AS
8586 case BPF_FUNC_get_stackid:
8587 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
8588 goto error;
8589 break;
60d20f91 8590 case BPF_FUNC_current_task_under_cgroup:
747ea55e 8591 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
8592 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
8593 goto error;
8594 break;
97f91a7c 8595 case BPF_FUNC_redirect_map:
9c270af3 8596 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 8597 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
8598 map->map_type != BPF_MAP_TYPE_CPUMAP &&
8599 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
8600 goto error;
8601 break;
174a79ff 8602 case BPF_FUNC_sk_redirect_map:
4f738adb 8603 case BPF_FUNC_msg_redirect_map:
81110384 8604 case BPF_FUNC_sock_map_update:
174a79ff
JF
8605 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
8606 goto error;
8607 break;
81110384
JF
8608 case BPF_FUNC_sk_redirect_hash:
8609 case BPF_FUNC_msg_redirect_hash:
8610 case BPF_FUNC_sock_hash_update:
8611 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
8612 goto error;
8613 break;
cd339431 8614 case BPF_FUNC_get_local_storage:
b741f163
RG
8615 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
8616 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
8617 goto error;
8618 break;
2dbb9b9e 8619 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
8620 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
8621 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
8622 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
8623 goto error;
8624 break;
f1a2e44a 8625 case BPF_FUNC_map_pop_elem:
f1a2e44a
MV
8626 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
8627 map->map_type != BPF_MAP_TYPE_STACK)
8628 goto error;
8629 break;
9330986c
JK
8630 case BPF_FUNC_map_peek_elem:
8631 case BPF_FUNC_map_push_elem:
8632 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
8633 map->map_type != BPF_MAP_TYPE_STACK &&
8634 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
8635 goto error;
8636 break;
07343110
FZ
8637 case BPF_FUNC_map_lookup_percpu_elem:
8638 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
8639 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8640 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
8641 goto error;
8642 break;
6ac99e8f
MKL
8643 case BPF_FUNC_sk_storage_get:
8644 case BPF_FUNC_sk_storage_delete:
8645 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
8646 goto error;
8647 break;
8ea63684
KS
8648 case BPF_FUNC_inode_storage_get:
8649 case BPF_FUNC_inode_storage_delete:
8650 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
8651 goto error;
8652 break;
4cf1bc1f
KS
8653 case BPF_FUNC_task_storage_get:
8654 case BPF_FUNC_task_storage_delete:
8655 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
8656 goto error;
8657 break;
c4bcfb38
YS
8658 case BPF_FUNC_cgrp_storage_get:
8659 case BPF_FUNC_cgrp_storage_delete:
8660 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
8661 goto error;
8662 break;
6aff67c8
AS
8663 default:
8664 break;
35578d79
KX
8665 }
8666
8667 return 0;
6aff67c8 8668error:
61bd5218 8669 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 8670 map->map_type, func_id_name(func_id), func_id);
6aff67c8 8671 return -EINVAL;
35578d79
KX
8672}
8673
90133415 8674static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
8675{
8676 int count = 0;
8677
39f19ebb 8678 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 8679 count++;
39f19ebb 8680 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 8681 count++;
39f19ebb 8682 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 8683 count++;
39f19ebb 8684 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 8685 count++;
39f19ebb 8686 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
8687 count++;
8688
90133415
DB
8689 /* We only support one arg being in raw mode at the moment,
8690 * which is sufficient for the helper functions we have
8691 * right now.
8692 */
8693 return count <= 1;
8694}
8695
508362ac 8696static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
90133415 8697{
508362ac
MM
8698 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
8699 bool has_size = fn->arg_size[arg] != 0;
8700 bool is_next_size = false;
8701
8702 if (arg + 1 < ARRAY_SIZE(fn->arg_type))
8703 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
8704
8705 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
8706 return is_next_size;
8707
8708 return has_size == is_next_size || is_next_size == is_fixed;
90133415
DB
8709}
8710
8711static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
8712{
8713 /* bpf_xxx(..., buf, len) call will access 'len'
8714 * bytes from memory 'buf'. Both arg types need
8715 * to be paired, so make sure there's no buggy
8716 * helper function specification.
8717 */
8718 if (arg_type_is_mem_size(fn->arg1_type) ||
508362ac
MM
8719 check_args_pair_invalid(fn, 0) ||
8720 check_args_pair_invalid(fn, 1) ||
8721 check_args_pair_invalid(fn, 2) ||
8722 check_args_pair_invalid(fn, 3) ||
8723 check_args_pair_invalid(fn, 4))
90133415
DB
8724 return false;
8725
8726 return true;
8727}
8728
9436ef6e
LB
8729static bool check_btf_id_ok(const struct bpf_func_proto *fn)
8730{
8731 int i;
8732
1df8f55a 8733 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
4e814da0
KKD
8734 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
8735 return !!fn->arg_btf_id[i];
8736 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
8737 return fn->arg_btf_id[i] == BPF_PTR_POISON;
508362ac
MM
8738 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
8739 /* arg_btf_id and arg_size are in a union. */
8740 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
8741 !(fn->arg_type[i] & MEM_FIXED_SIZE)))
1df8f55a
MKL
8742 return false;
8743 }
8744
9436ef6e
LB
8745 return true;
8746}
8747
0c9a7a7e 8748static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
8749{
8750 return check_raw_mode_ok(fn) &&
fd978bf7 8751 check_arg_pair_ok(fn) &&
b2d8ef19 8752 check_btf_id_ok(fn) ? 0 : -EINVAL;
435faee1
DB
8753}
8754
de8f3a83
DB
8755/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
8756 * are now invalid, so turn them into unknown SCALAR_VALUE.
66e3a13e
JK
8757 *
8758 * This also applies to dynptr slices belonging to skb and xdp dynptrs,
8759 * since these slices point to packet data.
f1174f77 8760 */
b239da34 8761static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
969bf05e 8762{
b239da34
KKD
8763 struct bpf_func_state *state;
8764 struct bpf_reg_state *reg;
969bf05e 8765
b239da34 8766 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
66e3a13e 8767 if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg))
dbd8d228 8768 mark_reg_invalid(env, reg);
b239da34 8769 }));
f4d7e40a
AS
8770}
8771
6d94e741
AS
8772enum {
8773 AT_PKT_END = -1,
8774 BEYOND_PKT_END = -2,
8775};
8776
8777static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
8778{
8779 struct bpf_func_state *state = vstate->frame[vstate->curframe];
8780 struct bpf_reg_state *reg = &state->regs[regn];
8781
8782 if (reg->type != PTR_TO_PACKET)
8783 /* PTR_TO_PACKET_META is not supported yet */
8784 return;
8785
8786 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
8787 * How far beyond pkt_end it goes is unknown.
8788 * if (!range_open) it's the case of pkt >= pkt_end
8789 * if (range_open) it's the case of pkt > pkt_end
8790 * hence this pointer is at least 1 byte bigger than pkt_end
8791 */
8792 if (range_open)
8793 reg->range = BEYOND_PKT_END;
8794 else
8795 reg->range = AT_PKT_END;
8796}
8797
fd978bf7
JS
8798/* The pointer with the specified id has released its reference to kernel
8799 * resources. Identify all copies of the same pointer and clear the reference.
8800 */
8801static int release_reference(struct bpf_verifier_env *env,
1b986589 8802 int ref_obj_id)
fd978bf7 8803{
b239da34
KKD
8804 struct bpf_func_state *state;
8805 struct bpf_reg_state *reg;
1b986589 8806 int err;
fd978bf7 8807
1b986589
MKL
8808 err = release_reference_state(cur_func(env), ref_obj_id);
8809 if (err)
8810 return err;
8811
b239da34 8812 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
dbd8d228
KKD
8813 if (reg->ref_obj_id == ref_obj_id)
8814 mark_reg_invalid(env, reg);
b239da34 8815 }));
fd978bf7 8816
1b986589 8817 return 0;
fd978bf7
JS
8818}
8819
6a3cd331
DM
8820static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
8821{
8822 struct bpf_func_state *unused;
8823 struct bpf_reg_state *reg;
8824
8825 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
8826 if (type_is_non_owning_ref(reg->type))
dbd8d228 8827 mark_reg_invalid(env, reg);
6a3cd331
DM
8828 }));
8829}
8830
51c39bb1
AS
8831static void clear_caller_saved_regs(struct bpf_verifier_env *env,
8832 struct bpf_reg_state *regs)
8833{
8834 int i;
8835
8836 /* after the call registers r0 - r5 were scratched */
8837 for (i = 0; i < CALLER_SAVED_REGS; i++) {
8838 mark_reg_not_init(env, regs, caller_saved[i]);
8839 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
8840 }
8841}
8842
14351375
YS
8843typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
8844 struct bpf_func_state *caller,
8845 struct bpf_func_state *callee,
8846 int insn_idx);
8847
be2ef816
AN
8848static int set_callee_state(struct bpf_verifier_env *env,
8849 struct bpf_func_state *caller,
8850 struct bpf_func_state *callee, int insn_idx);
8851
14351375
YS
8852static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8853 int *insn_idx, int subprog,
8854 set_callee_state_fn set_callee_state_cb)
f4d7e40a
AS
8855{
8856 struct bpf_verifier_state *state = env->cur_state;
8857 struct bpf_func_state *caller, *callee;
14351375 8858 int err;
f4d7e40a 8859
aada9ce6 8860 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 8861 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 8862 state->curframe + 2);
f4d7e40a
AS
8863 return -E2BIG;
8864 }
8865
f4d7e40a
AS
8866 caller = state->frame[state->curframe];
8867 if (state->frame[state->curframe + 1]) {
8868 verbose(env, "verifier bug. Frame %d already allocated\n",
8869 state->curframe + 1);
8870 return -EFAULT;
8871 }
8872
95f2f26f 8873 err = btf_check_subprog_call(env, subprog, caller->regs);
51c39bb1
AS
8874 if (err == -EFAULT)
8875 return err;
fde2a388 8876 if (subprog_is_global(env, subprog)) {
51c39bb1
AS
8877 if (err) {
8878 verbose(env, "Caller passes invalid args into func#%d\n",
8879 subprog);
8880 return err;
8881 } else {
8882 if (env->log.level & BPF_LOG_LEVEL)
8883 verbose(env,
8884 "Func#%d is global and valid. Skipping.\n",
8885 subprog);
8886 clear_caller_saved_regs(env, caller->regs);
8887
45159b27 8888 /* All global functions return a 64-bit SCALAR_VALUE */
51c39bb1 8889 mark_reg_unknown(env, caller->regs, BPF_REG_0);
45159b27 8890 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
51c39bb1
AS
8891
8892 /* continue with next insn after call */
8893 return 0;
8894 }
8895 }
8896
be2ef816
AN
8897 /* set_callee_state is used for direct subprog calls, but we are
8898 * interested in validating only BPF helpers that can call subprogs as
8899 * callbacks
8900 */
5d92ddc3
DM
8901 if (set_callee_state_cb != set_callee_state) {
8902 if (bpf_pseudo_kfunc_call(insn) &&
8903 !is_callback_calling_kfunc(insn->imm)) {
8904 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
8905 func_id_name(insn->imm), insn->imm);
8906 return -EFAULT;
8907 } else if (!bpf_pseudo_kfunc_call(insn) &&
8908 !is_callback_calling_function(insn->imm)) { /* helper */
8909 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
8910 func_id_name(insn->imm), insn->imm);
8911 return -EFAULT;
8912 }
be2ef816
AN
8913 }
8914
bfc6bb74 8915 if (insn->code == (BPF_JMP | BPF_CALL) &&
a5bebc4f 8916 insn->src_reg == 0 &&
bfc6bb74
AS
8917 insn->imm == BPF_FUNC_timer_set_callback) {
8918 struct bpf_verifier_state *async_cb;
8919
8920 /* there is no real recursion here. timer callbacks are async */
7ddc80a4 8921 env->subprog_info[subprog].is_async_cb = true;
bfc6bb74
AS
8922 async_cb = push_async_cb(env, env->subprog_info[subprog].start,
8923 *insn_idx, subprog);
8924 if (!async_cb)
8925 return -EFAULT;
8926 callee = async_cb->frame[0];
8927 callee->async_entry_cnt = caller->async_entry_cnt + 1;
8928
8929 /* Convert bpf_timer_set_callback() args into timer callback args */
8930 err = set_callee_state_cb(env, caller, callee, *insn_idx);
8931 if (err)
8932 return err;
8933
8934 clear_caller_saved_regs(env, caller->regs);
8935 mark_reg_unknown(env, caller->regs, BPF_REG_0);
8936 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8937 /* continue with next insn after call */
8938 return 0;
8939 }
8940
f4d7e40a
AS
8941 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
8942 if (!callee)
8943 return -ENOMEM;
8944 state->frame[state->curframe + 1] = callee;
8945
8946 /* callee cannot access r0, r6 - r9 for reading and has to write
8947 * into its own stack before reading from it.
8948 * callee can read/write into caller's stack
8949 */
8950 init_func_state(env, callee,
8951 /* remember the callsite, it will be used by bpf_exit */
8952 *insn_idx /* callsite */,
8953 state->curframe + 1 /* frameno within this callchain */,
f910cefa 8954 subprog /* subprog number within this prog */);
f4d7e40a 8955
fd978bf7 8956 /* Transfer references to the callee */
c69431aa 8957 err = copy_reference_state(callee, caller);
fd978bf7 8958 if (err)
eb86559a 8959 goto err_out;
fd978bf7 8960
14351375
YS
8961 err = set_callee_state_cb(env, caller, callee, *insn_idx);
8962 if (err)
eb86559a 8963 goto err_out;
f4d7e40a 8964
51c39bb1 8965 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
8966
8967 /* only increment it after check_reg_arg() finished */
8968 state->curframe++;
8969
8970 /* and go analyze first insn of the callee */
14351375 8971 *insn_idx = env->subprog_info[subprog].start - 1;
f4d7e40a 8972
06ee7115 8973 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 8974 verbose(env, "caller:\n");
0f55f9ed 8975 print_verifier_state(env, caller, true);
f4d7e40a 8976 verbose(env, "callee:\n");
0f55f9ed 8977 print_verifier_state(env, callee, true);
f4d7e40a
AS
8978 }
8979 return 0;
eb86559a
WY
8980
8981err_out:
8982 free_func_state(callee);
8983 state->frame[state->curframe + 1] = NULL;
8984 return err;
f4d7e40a
AS
8985}
8986
314ee05e
YS
8987int map_set_for_each_callback_args(struct bpf_verifier_env *env,
8988 struct bpf_func_state *caller,
8989 struct bpf_func_state *callee)
8990{
8991 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
8992 * void *callback_ctx, u64 flags);
8993 * callback_fn(struct bpf_map *map, void *key, void *value,
8994 * void *callback_ctx);
8995 */
8996 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
8997
8998 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
8999 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9000 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9001
9002 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9003 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9004 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
9005
9006 /* pointer to stack or null */
9007 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
9008
9009 /* unused */
9010 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9011 return 0;
9012}
9013
14351375
YS
9014static int set_callee_state(struct bpf_verifier_env *env,
9015 struct bpf_func_state *caller,
9016 struct bpf_func_state *callee, int insn_idx)
9017{
9018 int i;
9019
9020 /* copy r1 - r5 args that callee can access. The copy includes parent
9021 * pointers, which connects us up to the liveness chain
9022 */
9023 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
9024 callee->regs[i] = caller->regs[i];
9025 return 0;
9026}
9027
9028static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9029 int *insn_idx)
9030{
9031 int subprog, target_insn;
9032
9033 target_insn = *insn_idx + insn->imm + 1;
9034 subprog = find_subprog(env, target_insn);
9035 if (subprog < 0) {
9036 verbose(env, "verifier bug. No program starts at insn %d\n",
9037 target_insn);
9038 return -EFAULT;
9039 }
9040
9041 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
9042}
9043
69c087ba
YS
9044static int set_map_elem_callback_state(struct bpf_verifier_env *env,
9045 struct bpf_func_state *caller,
9046 struct bpf_func_state *callee,
9047 int insn_idx)
9048{
9049 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
9050 struct bpf_map *map;
9051 int err;
9052
9053 if (bpf_map_ptr_poisoned(insn_aux)) {
9054 verbose(env, "tail_call abusing map_ptr\n");
9055 return -EINVAL;
9056 }
9057
9058 map = BPF_MAP_PTR(insn_aux->map_ptr_state);
9059 if (!map->ops->map_set_for_each_callback_args ||
9060 !map->ops->map_for_each_callback) {
9061 verbose(env, "callback function not allowed for map\n");
9062 return -ENOTSUPP;
9063 }
9064
9065 err = map->ops->map_set_for_each_callback_args(env, caller, callee);
9066 if (err)
9067 return err;
9068
9069 callee->in_callback_fn = true;
1bfe26fb 9070 callee->callback_ret_range = tnum_range(0, 1);
69c087ba
YS
9071 return 0;
9072}
9073
e6f2dd0f
JK
9074static int set_loop_callback_state(struct bpf_verifier_env *env,
9075 struct bpf_func_state *caller,
9076 struct bpf_func_state *callee,
9077 int insn_idx)
9078{
9079 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
9080 * u64 flags);
9081 * callback_fn(u32 index, void *callback_ctx);
9082 */
9083 callee->regs[BPF_REG_1].type = SCALAR_VALUE;
9084 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9085
9086 /* unused */
9087 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9088 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9089 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9090
9091 callee->in_callback_fn = true;
1bfe26fb 9092 callee->callback_ret_range = tnum_range(0, 1);
e6f2dd0f
JK
9093 return 0;
9094}
9095
b00628b1
AS
9096static int set_timer_callback_state(struct bpf_verifier_env *env,
9097 struct bpf_func_state *caller,
9098 struct bpf_func_state *callee,
9099 int insn_idx)
9100{
9101 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
9102
9103 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
9104 * callback_fn(struct bpf_map *map, void *key, void *value);
9105 */
9106 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
9107 __mark_reg_known_zero(&callee->regs[BPF_REG_1]);
9108 callee->regs[BPF_REG_1].map_ptr = map_ptr;
9109
9110 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
9111 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9112 callee->regs[BPF_REG_2].map_ptr = map_ptr;
9113
9114 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
9115 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
9116 callee->regs[BPF_REG_3].map_ptr = map_ptr;
9117
9118 /* unused */
9119 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9120 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
bfc6bb74 9121 callee->in_async_callback_fn = true;
1bfe26fb 9122 callee->callback_ret_range = tnum_range(0, 1);
b00628b1
AS
9123 return 0;
9124}
9125
7c7e3d31
SL
9126static int set_find_vma_callback_state(struct bpf_verifier_env *env,
9127 struct bpf_func_state *caller,
9128 struct bpf_func_state *callee,
9129 int insn_idx)
9130{
9131 /* bpf_find_vma(struct task_struct *task, u64 addr,
9132 * void *callback_fn, void *callback_ctx, u64 flags)
9133 * (callback_fn)(struct task_struct *task,
9134 * struct vm_area_struct *vma, void *callback_ctx);
9135 */
9136 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
9137
9138 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
9139 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
9140 callee->regs[BPF_REG_2].btf = btf_vmlinux;
d19ddb47 9141 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7c7e3d31
SL
9142
9143 /* pointer to stack or null */
9144 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
9145
9146 /* unused */
9147 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9148 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9149 callee->in_callback_fn = true;
1bfe26fb 9150 callee->callback_ret_range = tnum_range(0, 1);
7c7e3d31
SL
9151 return 0;
9152}
9153
20571567
DV
9154static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
9155 struct bpf_func_state *caller,
9156 struct bpf_func_state *callee,
9157 int insn_idx)
9158{
9159 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
9160 * callback_ctx, u64 flags);
27060531 9161 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
20571567
DV
9162 */
9163 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
f8064ab9 9164 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
20571567
DV
9165 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
9166
9167 /* unused */
9168 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9169 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9170 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9171
9172 callee->in_callback_fn = true;
c92a7a52 9173 callee->callback_ret_range = tnum_range(0, 1);
20571567
DV
9174 return 0;
9175}
9176
5d92ddc3
DM
9177static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
9178 struct bpf_func_state *caller,
9179 struct bpf_func_state *callee,
9180 int insn_idx)
9181{
d2dcc67d 9182 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
5d92ddc3
DM
9183 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
9184 *
d2dcc67d 9185 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset
5d92ddc3
DM
9186 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
9187 * by this point, so look at 'root'
9188 */
9189 struct btf_field *field;
9190
9191 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
9192 BPF_RB_ROOT);
9193 if (!field || !field->graph_root.value_btf_id)
9194 return -EFAULT;
9195
9196 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
9197 ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
9198 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
9199 ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
9200
9201 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
9202 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
9203 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
9204 callee->in_callback_fn = true;
9205 callee->callback_ret_range = tnum_range(0, 1);
9206 return 0;
9207}
9208
9209static bool is_rbtree_lock_required_kfunc(u32 btf_id);
9210
9211/* Are we currently verifying the callback for a rbtree helper that must
9212 * be called with lock held? If so, no need to complain about unreleased
9213 * lock
9214 */
9215static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
9216{
9217 struct bpf_verifier_state *state = env->cur_state;
9218 struct bpf_insn *insn = env->prog->insnsi;
9219 struct bpf_func_state *callee;
9220 int kfunc_btf_id;
9221
9222 if (!state->curframe)
9223 return false;
9224
9225 callee = state->frame[state->curframe];
9226
9227 if (!callee->in_callback_fn)
9228 return false;
9229
9230 kfunc_btf_id = insn[callee->callsite].imm;
9231 return is_rbtree_lock_required_kfunc(kfunc_btf_id);
9232}
9233
f4d7e40a
AS
9234static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
9235{
9236 struct bpf_verifier_state *state = env->cur_state;
9237 struct bpf_func_state *caller, *callee;
9238 struct bpf_reg_state *r0;
fd978bf7 9239 int err;
f4d7e40a
AS
9240
9241 callee = state->frame[state->curframe];
9242 r0 = &callee->regs[BPF_REG_0];
9243 if (r0->type == PTR_TO_STACK) {
9244 /* technically it's ok to return caller's stack pointer
9245 * (or caller's caller's pointer) back to the caller,
9246 * since these pointers are valid. Only current stack
9247 * pointer will be invalid as soon as function exits,
9248 * but let's be conservative
9249 */
9250 verbose(env, "cannot return stack pointer to the caller\n");
9251 return -EINVAL;
9252 }
9253
eb86559a 9254 caller = state->frame[state->curframe - 1];
69c087ba
YS
9255 if (callee->in_callback_fn) {
9256 /* enforce R0 return value range [0, 1]. */
1bfe26fb 9257 struct tnum range = callee->callback_ret_range;
69c087ba
YS
9258
9259 if (r0->type != SCALAR_VALUE) {
9260 verbose(env, "R0 not a scalar value\n");
9261 return -EACCES;
9262 }
9263 if (!tnum_in(range, r0->var_off)) {
9264 verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
9265 return -EINVAL;
9266 }
9267 } else {
9268 /* return to the caller whatever r0 had in the callee */
9269 caller->regs[BPF_REG_0] = *r0;
9270 }
f4d7e40a 9271
9d9d00ac
KKD
9272 /* callback_fn frame should have released its own additions to parent's
9273 * reference state at this point, or check_reference_leak would
9274 * complain, hence it must be the same as the caller. There is no need
9275 * to copy it back.
9276 */
9277 if (!callee->in_callback_fn) {
9278 /* Transfer references to the caller */
9279 err = copy_reference_state(caller, callee);
9280 if (err)
9281 return err;
9282 }
fd978bf7 9283
f4d7e40a 9284 *insn_idx = callee->callsite + 1;
06ee7115 9285 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 9286 verbose(env, "returning from callee:\n");
0f55f9ed 9287 print_verifier_state(env, callee, true);
f4d7e40a 9288 verbose(env, "to caller at %d:\n", *insn_idx);
0f55f9ed 9289 print_verifier_state(env, caller, true);
f4d7e40a
AS
9290 }
9291 /* clear everything in the callee */
9292 free_func_state(callee);
eb86559a 9293 state->frame[state->curframe--] = NULL;
f4d7e40a
AS
9294 return 0;
9295}
9296
849fa506
YS
9297static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
9298 int func_id,
9299 struct bpf_call_arg_meta *meta)
9300{
9301 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
9302
f42bcd16 9303 if (ret_type != RET_INTEGER)
849fa506
YS
9304 return;
9305
f42bcd16
AN
9306 switch (func_id) {
9307 case BPF_FUNC_get_stack:
9308 case BPF_FUNC_get_task_stack:
9309 case BPF_FUNC_probe_read_str:
9310 case BPF_FUNC_probe_read_kernel_str:
9311 case BPF_FUNC_probe_read_user_str:
9312 ret_reg->smax_value = meta->msize_max_value;
9313 ret_reg->s32_max_value = meta->msize_max_value;
9314 ret_reg->smin_value = -MAX_ERRNO;
9315 ret_reg->s32_min_value = -MAX_ERRNO;
9316 reg_bounds_sync(ret_reg);
9317 break;
9318 case BPF_FUNC_get_smp_processor_id:
9319 ret_reg->umax_value = nr_cpu_ids - 1;
9320 ret_reg->u32_max_value = nr_cpu_ids - 1;
9321 ret_reg->smax_value = nr_cpu_ids - 1;
9322 ret_reg->s32_max_value = nr_cpu_ids - 1;
9323 ret_reg->umin_value = 0;
9324 ret_reg->u32_min_value = 0;
9325 ret_reg->smin_value = 0;
9326 ret_reg->s32_min_value = 0;
9327 reg_bounds_sync(ret_reg);
9328 break;
9329 }
849fa506
YS
9330}
9331
c93552c4
DB
9332static int
9333record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
9334 int func_id, int insn_idx)
9335{
9336 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 9337 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
9338
9339 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
9340 func_id != BPF_FUNC_map_lookup_elem &&
9341 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
9342 func_id != BPF_FUNC_map_delete_elem &&
9343 func_id != BPF_FUNC_map_push_elem &&
9344 func_id != BPF_FUNC_map_pop_elem &&
69c087ba 9345 func_id != BPF_FUNC_map_peek_elem &&
e6a4750f 9346 func_id != BPF_FUNC_for_each_map_elem &&
07343110
FZ
9347 func_id != BPF_FUNC_redirect_map &&
9348 func_id != BPF_FUNC_map_lookup_percpu_elem)
c93552c4 9349 return 0;
09772d92 9350
591fe988 9351 if (map == NULL) {
c93552c4
DB
9352 verbose(env, "kernel subsystem misconfigured verifier\n");
9353 return -EINVAL;
9354 }
9355
591fe988
DB
9356 /* In case of read-only, some additional restrictions
9357 * need to be applied in order to prevent altering the
9358 * state of the map from program side.
9359 */
9360 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
9361 (func_id == BPF_FUNC_map_delete_elem ||
9362 func_id == BPF_FUNC_map_update_elem ||
9363 func_id == BPF_FUNC_map_push_elem ||
9364 func_id == BPF_FUNC_map_pop_elem)) {
9365 verbose(env, "write into map forbidden\n");
9366 return -EACCES;
9367 }
9368
d2e4c1e6 9369 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4 9370 bpf_map_ptr_store(aux, meta->map_ptr,
2c78ee89 9371 !meta->map_ptr->bypass_spec_v1);
d2e4c1e6 9372 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4 9373 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2c78ee89 9374 !meta->map_ptr->bypass_spec_v1);
c93552c4
DB
9375 return 0;
9376}
9377
d2e4c1e6
DB
9378static int
9379record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
9380 int func_id, int insn_idx)
9381{
9382 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
9383 struct bpf_reg_state *regs = cur_regs(env), *reg;
9384 struct bpf_map *map = meta->map_ptr;
a657182a 9385 u64 val, max;
cc52d914 9386 int err;
d2e4c1e6
DB
9387
9388 if (func_id != BPF_FUNC_tail_call)
9389 return 0;
9390 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
9391 verbose(env, "kernel subsystem misconfigured verifier\n");
9392 return -EINVAL;
9393 }
9394
d2e4c1e6 9395 reg = &regs[BPF_REG_3];
a657182a
DB
9396 val = reg->var_off.value;
9397 max = map->max_entries;
d2e4c1e6 9398
a657182a 9399 if (!(register_is_const(reg) && val < max)) {
d2e4c1e6
DB
9400 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
9401 return 0;
9402 }
9403
cc52d914
DB
9404 err = mark_chain_precision(env, BPF_REG_3);
9405 if (err)
9406 return err;
d2e4c1e6
DB
9407 if (bpf_map_key_unseen(aux))
9408 bpf_map_key_store(aux, val);
9409 else if (!bpf_map_key_poisoned(aux) &&
9410 bpf_map_key_immediate(aux) != val)
9411 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
9412 return 0;
9413}
9414
fd978bf7
JS
9415static int check_reference_leak(struct bpf_verifier_env *env)
9416{
9417 struct bpf_func_state *state = cur_func(env);
9d9d00ac 9418 bool refs_lingering = false;
fd978bf7
JS
9419 int i;
9420
9d9d00ac
KKD
9421 if (state->frameno && !state->in_callback_fn)
9422 return 0;
9423
fd978bf7 9424 for (i = 0; i < state->acquired_refs; i++) {
9d9d00ac
KKD
9425 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
9426 continue;
fd978bf7
JS
9427 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
9428 state->refs[i].id, state->refs[i].insn_idx);
9d9d00ac 9429 refs_lingering = true;
fd978bf7 9430 }
9d9d00ac 9431 return refs_lingering ? -EINVAL : 0;
fd978bf7
JS
9432}
9433
7b15523a
FR
9434static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
9435 struct bpf_reg_state *regs)
9436{
9437 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
9438 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
9439 struct bpf_map *fmt_map = fmt_reg->map_ptr;
78aa1cc9 9440 struct bpf_bprintf_data data = {};
7b15523a
FR
9441 int err, fmt_map_off, num_args;
9442 u64 fmt_addr;
9443 char *fmt;
9444
9445 /* data must be an array of u64 */
9446 if (data_len_reg->var_off.value % 8)
9447 return -EINVAL;
9448 num_args = data_len_reg->var_off.value / 8;
9449
9450 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
9451 * and map_direct_value_addr is set.
9452 */
9453 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
9454 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
9455 fmt_map_off);
8e8ee109
FR
9456 if (err) {
9457 verbose(env, "verifier bug\n");
9458 return -EFAULT;
9459 }
7b15523a
FR
9460 fmt = (char *)(long)fmt_addr + fmt_map_off;
9461
9462 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
9463 * can focus on validating the format specifiers.
9464 */
78aa1cc9 9465 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
7b15523a
FR
9466 if (err < 0)
9467 verbose(env, "Invalid format string\n");
9468
9469 return err;
9470}
9471
9b99edca
JO
9472static int check_get_func_ip(struct bpf_verifier_env *env)
9473{
9b99edca
JO
9474 enum bpf_prog_type type = resolve_prog_type(env->prog);
9475 int func_id = BPF_FUNC_get_func_ip;
9476
9477 if (type == BPF_PROG_TYPE_TRACING) {
f92c1e18 9478 if (!bpf_prog_has_trampoline(env->prog)) {
9b99edca
JO
9479 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
9480 func_id_name(func_id), func_id);
9481 return -ENOTSUPP;
9482 }
9483 return 0;
9ffd9f3f
JO
9484 } else if (type == BPF_PROG_TYPE_KPROBE) {
9485 return 0;
9b99edca
JO
9486 }
9487
9488 verbose(env, "func %s#%d not supported for program type %d\n",
9489 func_id_name(func_id), func_id, type);
9490 return -ENOTSUPP;
9491}
9492
1ade2371
EZ
9493static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
9494{
9495 return &env->insn_aux_data[env->insn_idx];
9496}
9497
9498static bool loop_flag_is_zero(struct bpf_verifier_env *env)
9499{
9500 struct bpf_reg_state *regs = cur_regs(env);
9501 struct bpf_reg_state *reg = &regs[BPF_REG_4];
9502 bool reg_is_null = register_is_null(reg);
9503
9504 if (reg_is_null)
9505 mark_chain_precision(env, BPF_REG_4);
9506
9507 return reg_is_null;
9508}
9509
9510static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
9511{
9512 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
9513
9514 if (!state->initialized) {
9515 state->initialized = 1;
9516 state->fit_for_inline = loop_flag_is_zero(env);
9517 state->callback_subprogno = subprogno;
9518 return;
9519 }
9520
9521 if (!state->fit_for_inline)
9522 return;
9523
9524 state->fit_for_inline = (loop_flag_is_zero(env) &&
9525 state->callback_subprogno == subprogno);
9526}
9527
69c087ba
YS
9528static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
9529 int *insn_idx_p)
17a52670 9530{
aef9d4a3 9531 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
17a52670 9532 const struct bpf_func_proto *fn = NULL;
3c480732 9533 enum bpf_return_type ret_type;
c25b2ae1 9534 enum bpf_type_flag ret_flag;
638f5b90 9535 struct bpf_reg_state *regs;
33ff9823 9536 struct bpf_call_arg_meta meta;
69c087ba 9537 int insn_idx = *insn_idx_p;
969bf05e 9538 bool changes_data;
69c087ba 9539 int i, err, func_id;
17a52670
AS
9540
9541 /* find function prototype */
69c087ba 9542 func_id = insn->imm;
17a52670 9543 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
9544 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
9545 func_id);
17a52670
AS
9546 return -EINVAL;
9547 }
9548
00176a34 9549 if (env->ops->get_func_proto)
5e43f899 9550 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 9551 if (!fn) {
61bd5218
JK
9552 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
9553 func_id);
17a52670
AS
9554 return -EINVAL;
9555 }
9556
9557 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 9558 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 9559 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
9560 return -EINVAL;
9561 }
9562
eae2e83e
JO
9563 if (fn->allowed && !fn->allowed(env->prog)) {
9564 verbose(env, "helper call is not allowed in probe\n");
9565 return -EINVAL;
9566 }
9567
01685c5b
YS
9568 if (!env->prog->aux->sleepable && fn->might_sleep) {
9569 verbose(env, "helper call might sleep in a non-sleepable prog\n");
9570 return -EINVAL;
9571 }
9572
04514d13 9573 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 9574 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
9575 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
9576 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
9577 func_id_name(func_id), func_id);
9578 return -EINVAL;
9579 }
969bf05e 9580
33ff9823 9581 memset(&meta, 0, sizeof(meta));
36bbef52 9582 meta.pkt_access = fn->pkt_access;
33ff9823 9583
0c9a7a7e 9584 err = check_func_proto(fn, func_id);
435faee1 9585 if (err) {
61bd5218 9586 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 9587 func_id_name(func_id), func_id);
435faee1
DB
9588 return err;
9589 }
9590
9bb00b28
YS
9591 if (env->cur_state->active_rcu_lock) {
9592 if (fn->might_sleep) {
9593 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
9594 func_id_name(func_id), func_id);
9595 return -EINVAL;
9596 }
9597
9598 if (env->prog->aux->sleepable && is_storage_get_function(func_id))
9599 env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
9600 }
9601
d83525ca 9602 meta.func_id = func_id;
17a52670 9603 /* check args */
523a4cf4 9604 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
1d18feb2 9605 err = check_func_arg(env, i, &meta, fn, insn_idx);
a7658e1a
AS
9606 if (err)
9607 return err;
9608 }
17a52670 9609
c93552c4
DB
9610 err = record_func_map(env, &meta, func_id, insn_idx);
9611 if (err)
9612 return err;
9613
d2e4c1e6
DB
9614 err = record_func_key(env, &meta, func_id, insn_idx);
9615 if (err)
9616 return err;
9617
435faee1
DB
9618 /* Mark slots with STACK_MISC in case of raw mode, stack offset
9619 * is inferred from register state.
9620 */
9621 for (i = 0; i < meta.access_size; i++) {
ca369602 9622 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
1f9a1ea8 9623 BPF_WRITE, -1, false, false);
435faee1
DB
9624 if (err)
9625 return err;
9626 }
9627
8f14852e
KKD
9628 regs = cur_regs(env);
9629
9630 if (meta.release_regno) {
9631 err = -EINVAL;
27060531
KKD
9632 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
9633 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
9634 * is safe to do directly.
9635 */
9636 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
9637 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
9638 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
9639 return -EFAULT;
9640 }
97e03f52 9641 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
27060531 9642 } else if (meta.ref_obj_id) {
8f14852e 9643 err = release_reference(env, meta.ref_obj_id);
27060531
KKD
9644 } else if (register_is_null(&regs[meta.release_regno])) {
9645 /* meta.ref_obj_id can only be 0 if register that is meant to be
9646 * released is NULL, which must be > R0.
9647 */
8f14852e 9648 err = 0;
27060531 9649 }
46f8bc92
MKL
9650 if (err) {
9651 verbose(env, "func %s#%d reference has not been acquired before\n",
9652 func_id_name(func_id), func_id);
fd978bf7 9653 return err;
46f8bc92 9654 }
fd978bf7
JS
9655 }
9656
e6f2dd0f
JK
9657 switch (func_id) {
9658 case BPF_FUNC_tail_call:
9659 err = check_reference_leak(env);
9660 if (err) {
9661 verbose(env, "tail_call would lead to reference leak\n");
9662 return err;
9663 }
9664 break;
9665 case BPF_FUNC_get_local_storage:
9666 /* check that flags argument in get_local_storage(map, flags) is 0,
9667 * this is required because get_local_storage() can't return an error.
9668 */
9669 if (!register_is_null(&regs[BPF_REG_2])) {
9670 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
9671 return -EINVAL;
9672 }
9673 break;
9674 case BPF_FUNC_for_each_map_elem:
69c087ba
YS
9675 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9676 set_map_elem_callback_state);
e6f2dd0f
JK
9677 break;
9678 case BPF_FUNC_timer_set_callback:
b00628b1
AS
9679 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9680 set_timer_callback_state);
e6f2dd0f
JK
9681 break;
9682 case BPF_FUNC_find_vma:
7c7e3d31
SL
9683 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9684 set_find_vma_callback_state);
e6f2dd0f
JK
9685 break;
9686 case BPF_FUNC_snprintf:
7b15523a 9687 err = check_bpf_snprintf_call(env, regs);
e6f2dd0f
JK
9688 break;
9689 case BPF_FUNC_loop:
1ade2371 9690 update_loop_inline_state(env, meta.subprogno);
e6f2dd0f
JK
9691 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9692 set_loop_callback_state);
9693 break;
263ae152
JK
9694 case BPF_FUNC_dynptr_from_mem:
9695 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
9696 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
9697 reg_type_str(env, regs[BPF_REG_1].type));
9698 return -EACCES;
9699 }
69fd337a
SF
9700 break;
9701 case BPF_FUNC_set_retval:
aef9d4a3
SF
9702 if (prog_type == BPF_PROG_TYPE_LSM &&
9703 env->prog->expected_attach_type == BPF_LSM_CGROUP) {
69fd337a
SF
9704 if (!env->prog->aux->attach_func_proto->type) {
9705 /* Make sure programs that attach to void
9706 * hooks don't try to modify return value.
9707 */
9708 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
9709 return -EINVAL;
9710 }
9711 }
9712 break;
88374342 9713 case BPF_FUNC_dynptr_data:
485ec51e
JK
9714 {
9715 struct bpf_reg_state *reg;
9716 int id, ref_obj_id;
20571567 9717
485ec51e
JK
9718 reg = get_dynptr_arg_reg(env, fn, regs);
9719 if (!reg)
9720 return -EFAULT;
f8064ab9 9721
f8064ab9 9722
485ec51e
JK
9723 if (meta.dynptr_id) {
9724 verbose(env, "verifier internal error: meta.dynptr_id already set\n");
9725 return -EFAULT;
88374342 9726 }
485ec51e
JK
9727 if (meta.ref_obj_id) {
9728 verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
88374342
JK
9729 return -EFAULT;
9730 }
485ec51e
JK
9731
9732 id = dynptr_id(env, reg);
9733 if (id < 0) {
9734 verbose(env, "verifier internal error: failed to obtain dynptr id\n");
9735 return id;
9736 }
9737
9738 ref_obj_id = dynptr_ref_obj_id(env, reg);
9739 if (ref_obj_id < 0) {
9740 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
9741 return ref_obj_id;
9742 }
9743
9744 meta.dynptr_id = id;
9745 meta.ref_obj_id = ref_obj_id;
9746
88374342 9747 break;
485ec51e 9748 }
b5964b96
JK
9749 case BPF_FUNC_dynptr_write:
9750 {
9751 enum bpf_dynptr_type dynptr_type;
9752 struct bpf_reg_state *reg;
9753
9754 reg = get_dynptr_arg_reg(env, fn, regs);
9755 if (!reg)
9756 return -EFAULT;
9757
9758 dynptr_type = dynptr_get_type(env, reg);
9759 if (dynptr_type == BPF_DYNPTR_TYPE_INVALID)
9760 return -EFAULT;
9761
9762 if (dynptr_type == BPF_DYNPTR_TYPE_SKB)
9763 /* this will trigger clear_all_pkt_pointers(), which will
9764 * invalidate all dynptr slices associated with the skb
9765 */
9766 changes_data = true;
9767
9768 break;
9769 }
20571567
DV
9770 case BPF_FUNC_user_ringbuf_drain:
9771 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9772 set_user_ringbuf_callback_state);
9773 break;
7b15523a
FR
9774 }
9775
e6f2dd0f
JK
9776 if (err)
9777 return err;
9778
17a52670 9779 /* reset caller saved regs */
dc503a8a 9780 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 9781 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
9782 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9783 }
17a52670 9784
5327ed3d
JW
9785 /* helper call returns 64-bit value. */
9786 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
9787
dc503a8a 9788 /* update return register (already marked as written above) */
3c480732 9789 ret_type = fn->ret_type;
0c9a7a7e
JK
9790 ret_flag = type_flag(ret_type);
9791
9792 switch (base_type(ret_type)) {
9793 case RET_INTEGER:
f1174f77 9794 /* sets type to SCALAR_VALUE */
61bd5218 9795 mark_reg_unknown(env, regs, BPF_REG_0);
0c9a7a7e
JK
9796 break;
9797 case RET_VOID:
17a52670 9798 regs[BPF_REG_0].type = NOT_INIT;
0c9a7a7e
JK
9799 break;
9800 case RET_PTR_TO_MAP_VALUE:
f1174f77 9801 /* There is no offset yet applied, variable or fixed */
61bd5218 9802 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
9803 /* remember map_ptr, so that check_map_access()
9804 * can check 'value_size' boundary of memory access
9805 * to map element returned from bpf_map_lookup_elem()
9806 */
33ff9823 9807 if (meta.map_ptr == NULL) {
61bd5218
JK
9808 verbose(env,
9809 "kernel subsystem misconfigured verifier\n");
17a52670
AS
9810 return -EINVAL;
9811 }
33ff9823 9812 regs[BPF_REG_0].map_ptr = meta.map_ptr;
3e8ce298 9813 regs[BPF_REG_0].map_uid = meta.map_uid;
c25b2ae1
HL
9814 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
9815 if (!type_may_be_null(ret_type) &&
db559117 9816 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
c25b2ae1 9817 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301 9818 }
0c9a7a7e
JK
9819 break;
9820 case RET_PTR_TO_SOCKET:
c64b7983 9821 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9822 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
0c9a7a7e
JK
9823 break;
9824 case RET_PTR_TO_SOCK_COMMON:
85a51f8c 9825 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9826 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
0c9a7a7e
JK
9827 break;
9828 case RET_PTR_TO_TCP_SOCK:
655a51e5 9829 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9830 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
0c9a7a7e 9831 break;
2de2669b 9832 case RET_PTR_TO_MEM:
457f4436 9833 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9834 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
457f4436 9835 regs[BPF_REG_0].mem_size = meta.mem_size;
0c9a7a7e
JK
9836 break;
9837 case RET_PTR_TO_MEM_OR_BTF_ID:
9838 {
eaa6bcb7
HL
9839 const struct btf_type *t;
9840
9841 mark_reg_known_zero(env, regs, BPF_REG_0);
22dc4a0f 9842 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
eaa6bcb7
HL
9843 if (!btf_type_is_struct(t)) {
9844 u32 tsize;
9845 const struct btf_type *ret;
9846 const char *tname;
9847
9848 /* resolve the type size of ksym. */
22dc4a0f 9849 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
eaa6bcb7 9850 if (IS_ERR(ret)) {
22dc4a0f 9851 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
eaa6bcb7
HL
9852 verbose(env, "unable to resolve the size of type '%s': %ld\n",
9853 tname, PTR_ERR(ret));
9854 return -EINVAL;
9855 }
c25b2ae1 9856 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
eaa6bcb7
HL
9857 regs[BPF_REG_0].mem_size = tsize;
9858 } else {
34d3a78c
HL
9859 /* MEM_RDONLY may be carried from ret_flag, but it
9860 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
9861 * it will confuse the check of PTR_TO_BTF_ID in
9862 * check_mem_access().
9863 */
9864 ret_flag &= ~MEM_RDONLY;
9865
c25b2ae1 9866 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
22dc4a0f 9867 regs[BPF_REG_0].btf = meta.ret_btf;
eaa6bcb7
HL
9868 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
9869 }
0c9a7a7e
JK
9870 break;
9871 }
9872 case RET_PTR_TO_BTF_ID:
9873 {
c0a5a21c 9874 struct btf *ret_btf;
af7ec138
YS
9875 int ret_btf_id;
9876
9877 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9878 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
c0a5a21c 9879 if (func_id == BPF_FUNC_kptr_xchg) {
aa3496ac
KKD
9880 ret_btf = meta.kptr_field->kptr.btf;
9881 ret_btf_id = meta.kptr_field->kptr.btf_id;
738c96d5
DM
9882 if (!btf_is_kernel(ret_btf))
9883 regs[BPF_REG_0].type |= MEM_ALLOC;
c0a5a21c 9884 } else {
47e34cb7
DM
9885 if (fn->ret_btf_id == BPF_PTR_POISON) {
9886 verbose(env, "verifier internal error:");
9887 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
9888 func_id_name(func_id));
9889 return -EINVAL;
9890 }
c0a5a21c
KKD
9891 ret_btf = btf_vmlinux;
9892 ret_btf_id = *fn->ret_btf_id;
9893 }
af7ec138 9894 if (ret_btf_id == 0) {
3c480732
HL
9895 verbose(env, "invalid return type %u of func %s#%d\n",
9896 base_type(ret_type), func_id_name(func_id),
9897 func_id);
af7ec138
YS
9898 return -EINVAL;
9899 }
c0a5a21c 9900 regs[BPF_REG_0].btf = ret_btf;
af7ec138 9901 regs[BPF_REG_0].btf_id = ret_btf_id;
0c9a7a7e
JK
9902 break;
9903 }
9904 default:
3c480732
HL
9905 verbose(env, "unknown return type %u of func %s#%d\n",
9906 base_type(ret_type), func_id_name(func_id), func_id);
17a52670
AS
9907 return -EINVAL;
9908 }
04fd61ab 9909
c25b2ae1 9910 if (type_may_be_null(regs[BPF_REG_0].type))
93c230e3
MKL
9911 regs[BPF_REG_0].id = ++env->id_gen;
9912
b2d8ef19
DM
9913 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
9914 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
9915 func_id_name(func_id), func_id);
9916 return -EFAULT;
9917 }
9918
f8064ab9
KKD
9919 if (is_dynptr_ref_function(func_id))
9920 regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
9921
88374342 9922 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
1b986589
MKL
9923 /* For release_reference() */
9924 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290 9925 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc28
LB
9926 int id = acquire_reference_state(env, insn_idx);
9927
9928 if (id < 0)
9929 return id;
9930 /* For mark_ptr_or_null_reg() */
9931 regs[BPF_REG_0].id = id;
9932 /* For release_reference() */
9933 regs[BPF_REG_0].ref_obj_id = id;
9934 }
1b986589 9935
849fa506
YS
9936 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
9937
61bd5218 9938 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
9939 if (err)
9940 return err;
04fd61ab 9941
fa28dcb8
SL
9942 if ((func_id == BPF_FUNC_get_stack ||
9943 func_id == BPF_FUNC_get_task_stack) &&
9944 !env->prog->has_callchain_buf) {
c195651e
YS
9945 const char *err_str;
9946
9947#ifdef CONFIG_PERF_EVENTS
9948 err = get_callchain_buffers(sysctl_perf_event_max_stack);
9949 err_str = "cannot get callchain buffer for func %s#%d\n";
9950#else
9951 err = -ENOTSUPP;
9952 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
9953#endif
9954 if (err) {
9955 verbose(env, err_str, func_id_name(func_id), func_id);
9956 return err;
9957 }
9958
9959 env->prog->has_callchain_buf = true;
9960 }
9961
5d99cb2c
SL
9962 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
9963 env->prog->call_get_stack = true;
9964
9b99edca
JO
9965 if (func_id == BPF_FUNC_get_func_ip) {
9966 if (check_get_func_ip(env))
9967 return -ENOTSUPP;
9968 env->prog->call_get_func_ip = true;
9969 }
9970
969bf05e
AS
9971 if (changes_data)
9972 clear_all_pkt_pointers(env);
9973 return 0;
9974}
9975
e6ac2450
MKL
9976/* mark_btf_func_reg_size() is used when the reg size is determined by
9977 * the BTF func_proto's return value size and argument.
9978 */
9979static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
9980 size_t reg_size)
9981{
9982 struct bpf_reg_state *reg = &cur_regs(env)[regno];
9983
9984 if (regno == BPF_REG_0) {
9985 /* Function return value */
9986 reg->live |= REG_LIVE_WRITTEN;
9987 reg->subreg_def = reg_size == sizeof(u64) ?
9988 DEF_NOT_SUBREG : env->insn_idx + 1;
9989 } else {
9990 /* Function argument */
9991 if (reg_size == sizeof(u64)) {
9992 mark_insn_zext(env, reg);
9993 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
9994 } else {
9995 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
9996 }
9997 }
9998}
9999
00b85860
KKD
10000static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
10001{
10002 return meta->kfunc_flags & KF_ACQUIRE;
10003}
a5d82727 10004
00b85860
KKD
10005static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
10006{
10007 return meta->kfunc_flags & KF_RELEASE;
10008}
e6ac2450 10009
00b85860
KKD
10010static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
10011{
6c831c46 10012 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta);
00b85860 10013}
4dd48c6f 10014
00b85860
KKD
10015static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
10016{
10017 return meta->kfunc_flags & KF_SLEEPABLE;
10018}
5c073f26 10019
00b85860
KKD
10020static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
10021{
10022 return meta->kfunc_flags & KF_DESTRUCTIVE;
10023}
eb1f7f71 10024
fca1aa75
YS
10025static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
10026{
10027 return meta->kfunc_flags & KF_RCU;
10028}
10029
a50388db
KKD
10030static bool __kfunc_param_match_suffix(const struct btf *btf,
10031 const struct btf_param *arg,
10032 const char *suffix)
00b85860 10033{
a50388db 10034 int suffix_len = strlen(suffix), len;
00b85860 10035 const char *param_name;
e6ac2450 10036
00b85860
KKD
10037 /* In the future, this can be ported to use BTF tagging */
10038 param_name = btf_name_by_offset(btf, arg->name_off);
10039 if (str_is_empty(param_name))
10040 return false;
10041 len = strlen(param_name);
a50388db 10042 if (len < suffix_len)
00b85860 10043 return false;
a50388db
KKD
10044 param_name += len - suffix_len;
10045 return !strncmp(param_name, suffix, suffix_len);
10046}
5c073f26 10047
a50388db
KKD
10048static bool is_kfunc_arg_mem_size(const struct btf *btf,
10049 const struct btf_param *arg,
10050 const struct bpf_reg_state *reg)
10051{
10052 const struct btf_type *t;
5c073f26 10053
a50388db
KKD
10054 t = btf_type_skip_modifiers(btf, arg->type, NULL);
10055 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
00b85860 10056 return false;
eb1f7f71 10057
a50388db
KKD
10058 return __kfunc_param_match_suffix(btf, arg, "__sz");
10059}
eb1f7f71 10060
66e3a13e
JK
10061static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
10062 const struct btf_param *arg,
10063 const struct bpf_reg_state *reg)
10064{
10065 const struct btf_type *t;
10066
10067 t = btf_type_skip_modifiers(btf, arg->type, NULL);
10068 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
10069 return false;
10070
10071 return __kfunc_param_match_suffix(btf, arg, "__szk");
10072}
10073
3bda08b6
DR
10074static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg)
10075{
10076 return __kfunc_param_match_suffix(btf, arg, "__opt");
10077}
10078
a50388db
KKD
10079static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
10080{
10081 return __kfunc_param_match_suffix(btf, arg, "__k");
00b85860 10082}
eb1f7f71 10083
958cf2e2
KKD
10084static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
10085{
10086 return __kfunc_param_match_suffix(btf, arg, "__ign");
10087}
5c073f26 10088
ac9f0605
KKD
10089static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
10090{
10091 return __kfunc_param_match_suffix(btf, arg, "__alloc");
10092}
e6ac2450 10093
d96d937d
JK
10094static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
10095{
10096 return __kfunc_param_match_suffix(btf, arg, "__uninit");
10097}
10098
7c50b1cb
DM
10099static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg)
10100{
10101 return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr");
10102}
10103
00b85860
KKD
10104static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
10105 const struct btf_param *arg,
10106 const char *name)
10107{
10108 int len, target_len = strlen(name);
10109 const char *param_name;
e6ac2450 10110
00b85860
KKD
10111 param_name = btf_name_by_offset(btf, arg->name_off);
10112 if (str_is_empty(param_name))
10113 return false;
10114 len = strlen(param_name);
10115 if (len != target_len)
10116 return false;
10117 if (strcmp(param_name, name))
10118 return false;
e6ac2450 10119
00b85860 10120 return true;
e6ac2450
MKL
10121}
10122
00b85860
KKD
10123enum {
10124 KF_ARG_DYNPTR_ID,
8cab76ec
KKD
10125 KF_ARG_LIST_HEAD_ID,
10126 KF_ARG_LIST_NODE_ID,
cd6791b4
DM
10127 KF_ARG_RB_ROOT_ID,
10128 KF_ARG_RB_NODE_ID,
00b85860 10129};
b03c9f9f 10130
00b85860
KKD
10131BTF_ID_LIST(kf_arg_btf_ids)
10132BTF_ID(struct, bpf_dynptr_kern)
8cab76ec
KKD
10133BTF_ID(struct, bpf_list_head)
10134BTF_ID(struct, bpf_list_node)
bd1279ae
DM
10135BTF_ID(struct, bpf_rb_root)
10136BTF_ID(struct, bpf_rb_node)
b03c9f9f 10137
8cab76ec
KKD
10138static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
10139 const struct btf_param *arg, int type)
3f50f132 10140{
00b85860
KKD
10141 const struct btf_type *t;
10142 u32 res_id;
3f50f132 10143
00b85860
KKD
10144 t = btf_type_skip_modifiers(btf, arg->type, NULL);
10145 if (!t)
10146 return false;
10147 if (!btf_type_is_ptr(t))
10148 return false;
10149 t = btf_type_skip_modifiers(btf, t->type, &res_id);
10150 if (!t)
10151 return false;
8cab76ec 10152 return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
3f50f132
JF
10153}
10154
8cab76ec 10155static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
b03c9f9f 10156{
8cab76ec 10157 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
969bf05e
AS
10158}
10159
8cab76ec 10160static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
3f50f132 10161{
8cab76ec 10162 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
3f50f132
JF
10163}
10164
8cab76ec 10165static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
bb7f0f98 10166{
8cab76ec 10167 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
00b85860
KKD
10168}
10169
cd6791b4
DM
10170static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
10171{
10172 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
10173}
10174
10175static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
10176{
10177 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
10178}
10179
5d92ddc3
DM
10180static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
10181 const struct btf_param *arg)
10182{
10183 const struct btf_type *t;
10184
10185 t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
10186 if (!t)
10187 return false;
10188
10189 return true;
10190}
10191
00b85860
KKD
10192/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
10193static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
10194 const struct btf *btf,
10195 const struct btf_type *t, int rec)
10196{
10197 const struct btf_type *member_type;
10198 const struct btf_member *member;
10199 u32 i;
10200
10201 if (!btf_type_is_struct(t))
10202 return false;
10203
10204 for_each_member(i, t, member) {
10205 const struct btf_array *array;
10206
10207 member_type = btf_type_skip_modifiers(btf, member->type, NULL);
10208 if (btf_type_is_struct(member_type)) {
10209 if (rec >= 3) {
10210 verbose(env, "max struct nesting depth exceeded\n");
10211 return false;
10212 }
10213 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
10214 return false;
10215 continue;
10216 }
10217 if (btf_type_is_array(member_type)) {
10218 array = btf_array(member_type);
10219 if (!array->nelems)
10220 return false;
10221 member_type = btf_type_skip_modifiers(btf, array->type, NULL);
10222 if (!btf_type_is_scalar(member_type))
10223 return false;
10224 continue;
10225 }
10226 if (!btf_type_is_scalar(member_type))
10227 return false;
10228 }
10229 return true;
10230}
10231
00b85860
KKD
10232enum kfunc_ptr_arg_type {
10233 KF_ARG_PTR_TO_CTX,
7c50b1cb
DM
10234 KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */
10235 KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */
00b85860 10236 KF_ARG_PTR_TO_DYNPTR,
06accc87 10237 KF_ARG_PTR_TO_ITER,
8cab76ec
KKD
10238 KF_ARG_PTR_TO_LIST_HEAD,
10239 KF_ARG_PTR_TO_LIST_NODE,
7c50b1cb 10240 KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
00b85860 10241 KF_ARG_PTR_TO_MEM,
7c50b1cb 10242 KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
5d92ddc3 10243 KF_ARG_PTR_TO_CALLBACK,
cd6791b4
DM
10244 KF_ARG_PTR_TO_RB_ROOT,
10245 KF_ARG_PTR_TO_RB_NODE,
00b85860
KKD
10246};
10247
ac9f0605
KKD
10248enum special_kfunc_type {
10249 KF_bpf_obj_new_impl,
10250 KF_bpf_obj_drop_impl,
7c50b1cb 10251 KF_bpf_refcount_acquire_impl,
d2dcc67d
DM
10252 KF_bpf_list_push_front_impl,
10253 KF_bpf_list_push_back_impl,
8cab76ec
KKD
10254 KF_bpf_list_pop_front,
10255 KF_bpf_list_pop_back,
fd264ca0 10256 KF_bpf_cast_to_kern_ctx,
a35b9af4 10257 KF_bpf_rdonly_cast,
9bb00b28
YS
10258 KF_bpf_rcu_read_lock,
10259 KF_bpf_rcu_read_unlock,
bd1279ae 10260 KF_bpf_rbtree_remove,
d2dcc67d 10261 KF_bpf_rbtree_add_impl,
bd1279ae 10262 KF_bpf_rbtree_first,
b5964b96 10263 KF_bpf_dynptr_from_skb,
05421aec 10264 KF_bpf_dynptr_from_xdp,
66e3a13e
JK
10265 KF_bpf_dynptr_slice,
10266 KF_bpf_dynptr_slice_rdwr,
361f129f 10267 KF_bpf_dynptr_clone,
ac9f0605
KKD
10268};
10269
10270BTF_SET_START(special_kfunc_set)
10271BTF_ID(func, bpf_obj_new_impl)
10272BTF_ID(func, bpf_obj_drop_impl)
7c50b1cb 10273BTF_ID(func, bpf_refcount_acquire_impl)
d2dcc67d
DM
10274BTF_ID(func, bpf_list_push_front_impl)
10275BTF_ID(func, bpf_list_push_back_impl)
8cab76ec
KKD
10276BTF_ID(func, bpf_list_pop_front)
10277BTF_ID(func, bpf_list_pop_back)
fd264ca0 10278BTF_ID(func, bpf_cast_to_kern_ctx)
a35b9af4 10279BTF_ID(func, bpf_rdonly_cast)
bd1279ae 10280BTF_ID(func, bpf_rbtree_remove)
d2dcc67d 10281BTF_ID(func, bpf_rbtree_add_impl)
bd1279ae 10282BTF_ID(func, bpf_rbtree_first)
b5964b96 10283BTF_ID(func, bpf_dynptr_from_skb)
05421aec 10284BTF_ID(func, bpf_dynptr_from_xdp)
66e3a13e
JK
10285BTF_ID(func, bpf_dynptr_slice)
10286BTF_ID(func, bpf_dynptr_slice_rdwr)
361f129f 10287BTF_ID(func, bpf_dynptr_clone)
ac9f0605
KKD
10288BTF_SET_END(special_kfunc_set)
10289
10290BTF_ID_LIST(special_kfunc_list)
10291BTF_ID(func, bpf_obj_new_impl)
10292BTF_ID(func, bpf_obj_drop_impl)
7c50b1cb 10293BTF_ID(func, bpf_refcount_acquire_impl)
d2dcc67d
DM
10294BTF_ID(func, bpf_list_push_front_impl)
10295BTF_ID(func, bpf_list_push_back_impl)
8cab76ec
KKD
10296BTF_ID(func, bpf_list_pop_front)
10297BTF_ID(func, bpf_list_pop_back)
fd264ca0 10298BTF_ID(func, bpf_cast_to_kern_ctx)
a35b9af4 10299BTF_ID(func, bpf_rdonly_cast)
9bb00b28
YS
10300BTF_ID(func, bpf_rcu_read_lock)
10301BTF_ID(func, bpf_rcu_read_unlock)
bd1279ae 10302BTF_ID(func, bpf_rbtree_remove)
d2dcc67d 10303BTF_ID(func, bpf_rbtree_add_impl)
bd1279ae 10304BTF_ID(func, bpf_rbtree_first)
b5964b96 10305BTF_ID(func, bpf_dynptr_from_skb)
05421aec 10306BTF_ID(func, bpf_dynptr_from_xdp)
66e3a13e
JK
10307BTF_ID(func, bpf_dynptr_slice)
10308BTF_ID(func, bpf_dynptr_slice_rdwr)
361f129f 10309BTF_ID(func, bpf_dynptr_clone)
9bb00b28 10310
7793fc3b
DM
10311static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
10312{
10313 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] &&
10314 meta->arg_owning_ref) {
10315 return false;
10316 }
10317
10318 return meta->kfunc_flags & KF_RET_NULL;
10319}
10320
9bb00b28
YS
10321static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
10322{
10323 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
10324}
10325
10326static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
10327{
10328 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
10329}
ac9f0605 10330
00b85860
KKD
10331static enum kfunc_ptr_arg_type
10332get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
10333 struct bpf_kfunc_call_arg_meta *meta,
10334 const struct btf_type *t, const struct btf_type *ref_t,
10335 const char *ref_tname, const struct btf_param *args,
10336 int argno, int nargs)
10337{
10338 u32 regno = argno + 1;
10339 struct bpf_reg_state *regs = cur_regs(env);
10340 struct bpf_reg_state *reg = &regs[regno];
10341 bool arg_mem_size = false;
10342
fd264ca0
YS
10343 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
10344 return KF_ARG_PTR_TO_CTX;
10345
00b85860
KKD
10346 /* In this function, we verify the kfunc's BTF as per the argument type,
10347 * leaving the rest of the verification with respect to the register
10348 * type to our caller. When a set of conditions hold in the BTF type of
10349 * arguments, we resolve it to a known kfunc_ptr_arg_type.
10350 */
10351 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
10352 return KF_ARG_PTR_TO_CTX;
10353
ac9f0605
KKD
10354 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
10355 return KF_ARG_PTR_TO_ALLOC_BTF_ID;
10356
7c50b1cb
DM
10357 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno]))
10358 return KF_ARG_PTR_TO_REFCOUNTED_KPTR;
00b85860
KKD
10359
10360 if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
10361 return KF_ARG_PTR_TO_DYNPTR;
10362
06accc87
AN
10363 if (is_kfunc_arg_iter(meta, argno))
10364 return KF_ARG_PTR_TO_ITER;
10365
8cab76ec
KKD
10366 if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
10367 return KF_ARG_PTR_TO_LIST_HEAD;
10368
10369 if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
10370 return KF_ARG_PTR_TO_LIST_NODE;
10371
cd6791b4
DM
10372 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
10373 return KF_ARG_PTR_TO_RB_ROOT;
10374
10375 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
10376 return KF_ARG_PTR_TO_RB_NODE;
10377
00b85860
KKD
10378 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
10379 if (!btf_type_is_struct(ref_t)) {
10380 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
10381 meta->func_name, argno, btf_type_str(ref_t), ref_tname);
10382 return -EINVAL;
10383 }
10384 return KF_ARG_PTR_TO_BTF_ID;
10385 }
10386
5d92ddc3
DM
10387 if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
10388 return KF_ARG_PTR_TO_CALLBACK;
10389
66e3a13e
JK
10390
10391 if (argno + 1 < nargs &&
10392 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
10393 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
00b85860
KKD
10394 arg_mem_size = true;
10395
10396 /* This is the catch all argument type of register types supported by
10397 * check_helper_mem_access. However, we only allow when argument type is
10398 * pointer to scalar, or struct composed (recursively) of scalars. When
10399 * arg_mem_size is true, the pointer can be void *.
10400 */
10401 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
10402 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
10403 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
10404 argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
10405 return -EINVAL;
10406 }
10407 return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
10408}
10409
10410static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
10411 struct bpf_reg_state *reg,
10412 const struct btf_type *ref_t,
10413 const char *ref_tname, u32 ref_id,
10414 struct bpf_kfunc_call_arg_meta *meta,
10415 int argno)
10416{
10417 const struct btf_type *reg_ref_t;
10418 bool strict_type_match = false;
10419 const struct btf *reg_btf;
10420 const char *reg_ref_tname;
10421 u32 reg_ref_id;
10422
3f00c523 10423 if (base_type(reg->type) == PTR_TO_BTF_ID) {
00b85860
KKD
10424 reg_btf = reg->btf;
10425 reg_ref_id = reg->btf_id;
10426 } else {
10427 reg_btf = btf_vmlinux;
10428 reg_ref_id = *reg2btf_ids[base_type(reg->type)];
10429 }
10430
b613d335
DV
10431 /* Enforce strict type matching for calls to kfuncs that are acquiring
10432 * or releasing a reference, or are no-cast aliases. We do _not_
10433 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
10434 * as we want to enable BPF programs to pass types that are bitwise
10435 * equivalent without forcing them to explicitly cast with something
10436 * like bpf_cast_to_kern_ctx().
10437 *
10438 * For example, say we had a type like the following:
10439 *
10440 * struct bpf_cpumask {
10441 * cpumask_t cpumask;
10442 * refcount_t usage;
10443 * };
10444 *
10445 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
10446 * to a struct cpumask, so it would be safe to pass a struct
10447 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
10448 *
10449 * The philosophy here is similar to how we allow scalars of different
10450 * types to be passed to kfuncs as long as the size is the same. The
10451 * only difference here is that we're simply allowing
10452 * btf_struct_ids_match() to walk the struct at the 0th offset, and
10453 * resolve types.
10454 */
10455 if (is_kfunc_acquire(meta) ||
10456 (is_kfunc_release(meta) && reg->ref_obj_id) ||
10457 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
00b85860
KKD
10458 strict_type_match = true;
10459
b613d335
DV
10460 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
10461
00b85860
KKD
10462 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
10463 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
10464 if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
10465 verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
10466 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
10467 btf_type_str(reg_ref_t), reg_ref_tname);
10468 return -EINVAL;
10469 }
10470 return 0;
10471}
10472
6a3cd331 10473static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
534e86bc 10474{
6a3cd331
DM
10475 struct bpf_verifier_state *state = env->cur_state;
10476
10477 if (!state->active_lock.ptr) {
10478 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
10479 return -EFAULT;
10480 }
10481
10482 if (type_flag(reg->type) & NON_OWN_REF) {
10483 verbose(env, "verifier internal error: NON_OWN_REF already set\n");
10484 return -EFAULT;
10485 }
10486
10487 reg->type |= NON_OWN_REF;
10488 return 0;
10489}
10490
10491static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
10492{
10493 struct bpf_func_state *state, *unused;
534e86bc
KKD
10494 struct bpf_reg_state *reg;
10495 int i;
10496
6a3cd331
DM
10497 state = cur_func(env);
10498
534e86bc 10499 if (!ref_obj_id) {
6a3cd331
DM
10500 verbose(env, "verifier internal error: ref_obj_id is zero for "
10501 "owning -> non-owning conversion\n");
534e86bc
KKD
10502 return -EFAULT;
10503 }
6a3cd331 10504
534e86bc 10505 for (i = 0; i < state->acquired_refs; i++) {
6a3cd331
DM
10506 if (state->refs[i].id != ref_obj_id)
10507 continue;
10508
10509 /* Clear ref_obj_id here so release_reference doesn't clobber
10510 * the whole reg
10511 */
10512 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
10513 if (reg->ref_obj_id == ref_obj_id) {
10514 reg->ref_obj_id = 0;
10515 ref_set_non_owning(env, reg);
534e86bc 10516 }
6a3cd331
DM
10517 }));
10518 return 0;
534e86bc 10519 }
6a3cd331 10520
534e86bc
KKD
10521 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
10522 return -EFAULT;
10523}
10524
8cab76ec
KKD
10525/* Implementation details:
10526 *
10527 * Each register points to some region of memory, which we define as an
10528 * allocation. Each allocation may embed a bpf_spin_lock which protects any
10529 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
10530 * allocation. The lock and the data it protects are colocated in the same
10531 * memory region.
10532 *
10533 * Hence, everytime a register holds a pointer value pointing to such
10534 * allocation, the verifier preserves a unique reg->id for it.
10535 *
10536 * The verifier remembers the lock 'ptr' and the lock 'id' whenever
10537 * bpf_spin_lock is called.
10538 *
10539 * To enable this, lock state in the verifier captures two values:
10540 * active_lock.ptr = Register's type specific pointer
10541 * active_lock.id = A unique ID for each register pointer value
10542 *
10543 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
10544 * supported register types.
10545 *
10546 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
10547 * allocated objects is the reg->btf pointer.
10548 *
10549 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
10550 * can establish the provenance of the map value statically for each distinct
10551 * lookup into such maps. They always contain a single map value hence unique
10552 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
10553 *
10554 * So, in case of global variables, they use array maps with max_entries = 1,
10555 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
10556 * into the same map value as max_entries is 1, as described above).
10557 *
10558 * In case of inner map lookups, the inner map pointer has same map_ptr as the
10559 * outer map pointer (in verifier context), but each lookup into an inner map
10560 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
10561 * maps from the same outer map share the same map_ptr as active_lock.ptr, they
10562 * will get different reg->id assigned to each lookup, hence different
10563 * active_lock.id.
10564 *
10565 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
10566 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
10567 * returned from bpf_obj_new. Each allocation receives a new reg->id.
10568 */
10569static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
10570{
10571 void *ptr;
10572 u32 id;
10573
10574 switch ((int)reg->type) {
10575 case PTR_TO_MAP_VALUE:
10576 ptr = reg->map_ptr;
10577 break;
10578 case PTR_TO_BTF_ID | MEM_ALLOC:
10579 ptr = reg->btf;
10580 break;
10581 default:
10582 verbose(env, "verifier internal error: unknown reg type for lock check\n");
10583 return -EFAULT;
10584 }
10585 id = reg->id;
10586
10587 if (!env->cur_state->active_lock.ptr)
10588 return -EINVAL;
10589 if (env->cur_state->active_lock.ptr != ptr ||
10590 env->cur_state->active_lock.id != id) {
10591 verbose(env, "held lock and object are not in the same allocation\n");
10592 return -EINVAL;
10593 }
10594 return 0;
10595}
10596
10597static bool is_bpf_list_api_kfunc(u32 btf_id)
10598{
d2dcc67d
DM
10599 return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
10600 btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
8cab76ec
KKD
10601 btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
10602 btf_id == special_kfunc_list[KF_bpf_list_pop_back];
10603}
10604
cd6791b4
DM
10605static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
10606{
d2dcc67d 10607 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] ||
cd6791b4
DM
10608 btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
10609 btf_id == special_kfunc_list[KF_bpf_rbtree_first];
10610}
10611
10612static bool is_bpf_graph_api_kfunc(u32 btf_id)
10613{
7c50b1cb
DM
10614 return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) ||
10615 btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl];
cd6791b4
DM
10616}
10617
5d92ddc3
DM
10618static bool is_callback_calling_kfunc(u32 btf_id)
10619{
d2dcc67d 10620 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl];
5d92ddc3
DM
10621}
10622
10623static bool is_rbtree_lock_required_kfunc(u32 btf_id)
10624{
10625 return is_bpf_rbtree_api_kfunc(btf_id);
10626}
10627
cd6791b4
DM
10628static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
10629 enum btf_field_type head_field_type,
10630 u32 kfunc_btf_id)
10631{
10632 bool ret;
10633
10634 switch (head_field_type) {
10635 case BPF_LIST_HEAD:
10636 ret = is_bpf_list_api_kfunc(kfunc_btf_id);
10637 break;
10638 case BPF_RB_ROOT:
10639 ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
10640 break;
10641 default:
10642 verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
10643 btf_field_type_name(head_field_type));
10644 return false;
10645 }
10646
10647 if (!ret)
10648 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
10649 btf_field_type_name(head_field_type));
10650 return ret;
10651}
10652
10653static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
10654 enum btf_field_type node_field_type,
10655 u32 kfunc_btf_id)
8cab76ec 10656{
cd6791b4
DM
10657 bool ret;
10658
10659 switch (node_field_type) {
10660 case BPF_LIST_NODE:
d2dcc67d
DM
10661 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
10662 kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]);
cd6791b4
DM
10663 break;
10664 case BPF_RB_NODE:
10665 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
d2dcc67d 10666 kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]);
cd6791b4
DM
10667 break;
10668 default:
10669 verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
10670 btf_field_type_name(node_field_type));
10671 return false;
10672 }
10673
10674 if (!ret)
10675 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
10676 btf_field_type_name(node_field_type));
10677 return ret;
10678}
10679
10680static int
10681__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
10682 struct bpf_reg_state *reg, u32 regno,
10683 struct bpf_kfunc_call_arg_meta *meta,
10684 enum btf_field_type head_field_type,
10685 struct btf_field **head_field)
10686{
10687 const char *head_type_name;
8cab76ec
KKD
10688 struct btf_field *field;
10689 struct btf_record *rec;
cd6791b4 10690 u32 head_off;
8cab76ec 10691
cd6791b4
DM
10692 if (meta->btf != btf_vmlinux) {
10693 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
8cab76ec
KKD
10694 return -EFAULT;
10695 }
10696
cd6791b4
DM
10697 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
10698 return -EFAULT;
10699
10700 head_type_name = btf_field_type_name(head_field_type);
8cab76ec
KKD
10701 if (!tnum_is_const(reg->var_off)) {
10702 verbose(env,
cd6791b4
DM
10703 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
10704 regno, head_type_name);
8cab76ec
KKD
10705 return -EINVAL;
10706 }
10707
10708 rec = reg_btf_record(reg);
cd6791b4
DM
10709 head_off = reg->off + reg->var_off.value;
10710 field = btf_record_find(rec, head_off, head_field_type);
8cab76ec 10711 if (!field) {
cd6791b4 10712 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
8cab76ec
KKD
10713 return -EINVAL;
10714 }
10715
10716 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */
10717 if (check_reg_allocation_locked(env, reg)) {
cd6791b4
DM
10718 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
10719 rec->spin_lock_off, head_type_name);
8cab76ec
KKD
10720 return -EINVAL;
10721 }
10722
cd6791b4
DM
10723 if (*head_field) {
10724 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
8cab76ec
KKD
10725 return -EFAULT;
10726 }
cd6791b4 10727 *head_field = field;
8cab76ec
KKD
10728 return 0;
10729}
10730
cd6791b4 10731static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
8cab76ec
KKD
10732 struct bpf_reg_state *reg, u32 regno,
10733 struct bpf_kfunc_call_arg_meta *meta)
10734{
cd6791b4
DM
10735 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
10736 &meta->arg_list_head.field);
10737}
10738
10739static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
10740 struct bpf_reg_state *reg, u32 regno,
10741 struct bpf_kfunc_call_arg_meta *meta)
10742{
10743 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
10744 &meta->arg_rbtree_root.field);
10745}
10746
10747static int
10748__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
10749 struct bpf_reg_state *reg, u32 regno,
10750 struct bpf_kfunc_call_arg_meta *meta,
10751 enum btf_field_type head_field_type,
10752 enum btf_field_type node_field_type,
10753 struct btf_field **node_field)
10754{
10755 const char *node_type_name;
8cab76ec
KKD
10756 const struct btf_type *et, *t;
10757 struct btf_field *field;
cd6791b4 10758 u32 node_off;
8cab76ec 10759
cd6791b4
DM
10760 if (meta->btf != btf_vmlinux) {
10761 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
8cab76ec
KKD
10762 return -EFAULT;
10763 }
10764
cd6791b4
DM
10765 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
10766 return -EFAULT;
10767
10768 node_type_name = btf_field_type_name(node_field_type);
8cab76ec
KKD
10769 if (!tnum_is_const(reg->var_off)) {
10770 verbose(env,
cd6791b4
DM
10771 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
10772 regno, node_type_name);
8cab76ec
KKD
10773 return -EINVAL;
10774 }
10775
cd6791b4
DM
10776 node_off = reg->off + reg->var_off.value;
10777 field = reg_find_field_offset(reg, node_off, node_field_type);
10778 if (!field || field->offset != node_off) {
10779 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
8cab76ec
KKD
10780 return -EINVAL;
10781 }
10782
cd6791b4 10783 field = *node_field;
8cab76ec 10784
30465003 10785 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
8cab76ec 10786 t = btf_type_by_id(reg->btf, reg->btf_id);
30465003
DM
10787 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
10788 field->graph_root.value_btf_id, true)) {
cd6791b4 10789 verbose(env, "operation on %s expects arg#1 %s at offset=%d "
8cab76ec 10790 "in struct %s, but arg is at offset=%d in struct %s\n",
cd6791b4
DM
10791 btf_field_type_name(head_field_type),
10792 btf_field_type_name(node_field_type),
30465003
DM
10793 field->graph_root.node_offset,
10794 btf_name_by_offset(field->graph_root.btf, et->name_off),
cd6791b4 10795 node_off, btf_name_by_offset(reg->btf, t->name_off));
8cab76ec
KKD
10796 return -EINVAL;
10797 }
2140a6e3
DM
10798 meta->arg_btf = reg->btf;
10799 meta->arg_btf_id = reg->btf_id;
8cab76ec 10800
cd6791b4
DM
10801 if (node_off != field->graph_root.node_offset) {
10802 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
10803 node_off, btf_field_type_name(node_field_type),
10804 field->graph_root.node_offset,
30465003 10805 btf_name_by_offset(field->graph_root.btf, et->name_off));
8cab76ec
KKD
10806 return -EINVAL;
10807 }
6a3cd331
DM
10808
10809 return 0;
8cab76ec
KKD
10810}
10811
cd6791b4
DM
10812static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
10813 struct bpf_reg_state *reg, u32 regno,
10814 struct bpf_kfunc_call_arg_meta *meta)
10815{
10816 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10817 BPF_LIST_HEAD, BPF_LIST_NODE,
10818 &meta->arg_list_head.field);
10819}
10820
10821static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
10822 struct bpf_reg_state *reg, u32 regno,
10823 struct bpf_kfunc_call_arg_meta *meta)
10824{
10825 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10826 BPF_RB_ROOT, BPF_RB_NODE,
10827 &meta->arg_rbtree_root.field);
10828}
10829
1d18feb2
JK
10830static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
10831 int insn_idx)
00b85860
KKD
10832{
10833 const char *func_name = meta->func_name, *ref_tname;
10834 const struct btf *btf = meta->btf;
10835 const struct btf_param *args;
7c50b1cb 10836 struct btf_record *rec;
00b85860
KKD
10837 u32 i, nargs;
10838 int ret;
10839
10840 args = (const struct btf_param *)(meta->func_proto + 1);
10841 nargs = btf_type_vlen(meta->func_proto);
10842 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
10843 verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
10844 MAX_BPF_FUNC_REG_ARGS);
10845 return -EINVAL;
10846 }
10847
10848 /* Check that BTF function arguments match actual types that the
10849 * verifier sees.
10850 */
10851 for (i = 0; i < nargs; i++) {
10852 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
10853 const struct btf_type *t, *ref_t, *resolve_ret;
10854 enum bpf_arg_type arg_type = ARG_DONTCARE;
10855 u32 regno = i + 1, ref_id, type_size;
10856 bool is_ret_buf_sz = false;
10857 int kf_arg_type;
10858
10859 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
958cf2e2
KKD
10860
10861 if (is_kfunc_arg_ignore(btf, &args[i]))
10862 continue;
10863
00b85860
KKD
10864 if (btf_type_is_scalar(t)) {
10865 if (reg->type != SCALAR_VALUE) {
10866 verbose(env, "R%d is not a scalar\n", regno);
10867 return -EINVAL;
10868 }
a50388db
KKD
10869
10870 if (is_kfunc_arg_constant(meta->btf, &args[i])) {
10871 if (meta->arg_constant.found) {
10872 verbose(env, "verifier internal error: only one constant argument permitted\n");
10873 return -EFAULT;
10874 }
10875 if (!tnum_is_const(reg->var_off)) {
10876 verbose(env, "R%d must be a known constant\n", regno);
10877 return -EINVAL;
10878 }
10879 ret = mark_chain_precision(env, regno);
10880 if (ret < 0)
10881 return ret;
10882 meta->arg_constant.found = true;
10883 meta->arg_constant.value = reg->var_off.value;
10884 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
00b85860
KKD
10885 meta->r0_rdonly = true;
10886 is_ret_buf_sz = true;
10887 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
10888 is_ret_buf_sz = true;
10889 }
10890
10891 if (is_ret_buf_sz) {
10892 if (meta->r0_size) {
10893 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
10894 return -EINVAL;
10895 }
10896
10897 if (!tnum_is_const(reg->var_off)) {
10898 verbose(env, "R%d is not a const\n", regno);
10899 return -EINVAL;
10900 }
10901
10902 meta->r0_size = reg->var_off.value;
10903 ret = mark_chain_precision(env, regno);
10904 if (ret)
10905 return ret;
10906 }
10907 continue;
10908 }
10909
10910 if (!btf_type_is_ptr(t)) {
10911 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
10912 return -EINVAL;
10913 }
10914
20c09d92 10915 if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
caf713c3
DV
10916 (register_is_null(reg) || type_may_be_null(reg->type))) {
10917 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
10918 return -EACCES;
10919 }
10920
00b85860
KKD
10921 if (reg->ref_obj_id) {
10922 if (is_kfunc_release(meta) && meta->ref_obj_id) {
10923 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
10924 regno, reg->ref_obj_id,
10925 meta->ref_obj_id);
10926 return -EFAULT;
10927 }
10928 meta->ref_obj_id = reg->ref_obj_id;
10929 if (is_kfunc_release(meta))
10930 meta->release_regno = regno;
10931 }
10932
10933 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
10934 ref_tname = btf_name_by_offset(btf, ref_t->name_off);
10935
10936 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
10937 if (kf_arg_type < 0)
10938 return kf_arg_type;
10939
10940 switch (kf_arg_type) {
ac9f0605 10941 case KF_ARG_PTR_TO_ALLOC_BTF_ID:
00b85860 10942 case KF_ARG_PTR_TO_BTF_ID:
fca1aa75 10943 if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
00b85860 10944 break;
3f00c523
DV
10945
10946 if (!is_trusted_reg(reg)) {
fca1aa75
YS
10947 if (!is_kfunc_rcu(meta)) {
10948 verbose(env, "R%d must be referenced or trusted\n", regno);
10949 return -EINVAL;
10950 }
10951 if (!is_rcu_reg(reg)) {
10952 verbose(env, "R%d must be a rcu pointer\n", regno);
10953 return -EINVAL;
10954 }
00b85860 10955 }
fca1aa75 10956
00b85860
KKD
10957 fallthrough;
10958 case KF_ARG_PTR_TO_CTX:
10959 /* Trusted arguments have the same offset checks as release arguments */
10960 arg_type |= OBJ_RELEASE;
10961 break;
00b85860 10962 case KF_ARG_PTR_TO_DYNPTR:
06accc87 10963 case KF_ARG_PTR_TO_ITER:
8cab76ec
KKD
10964 case KF_ARG_PTR_TO_LIST_HEAD:
10965 case KF_ARG_PTR_TO_LIST_NODE:
cd6791b4
DM
10966 case KF_ARG_PTR_TO_RB_ROOT:
10967 case KF_ARG_PTR_TO_RB_NODE:
00b85860
KKD
10968 case KF_ARG_PTR_TO_MEM:
10969 case KF_ARG_PTR_TO_MEM_SIZE:
5d92ddc3 10970 case KF_ARG_PTR_TO_CALLBACK:
7c50b1cb 10971 case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
00b85860
KKD
10972 /* Trusted by default */
10973 break;
10974 default:
10975 WARN_ON_ONCE(1);
10976 return -EFAULT;
10977 }
10978
10979 if (is_kfunc_release(meta) && reg->ref_obj_id)
10980 arg_type |= OBJ_RELEASE;
10981 ret = check_func_arg_reg_off(env, reg, regno, arg_type);
10982 if (ret < 0)
10983 return ret;
10984
10985 switch (kf_arg_type) {
10986 case KF_ARG_PTR_TO_CTX:
10987 if (reg->type != PTR_TO_CTX) {
10988 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
10989 return -EINVAL;
10990 }
fd264ca0
YS
10991
10992 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
10993 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
10994 if (ret < 0)
10995 return -EINVAL;
10996 meta->ret_btf_id = ret;
10997 }
00b85860 10998 break;
ac9f0605
KKD
10999 case KF_ARG_PTR_TO_ALLOC_BTF_ID:
11000 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11001 verbose(env, "arg#%d expected pointer to allocated object\n", i);
11002 return -EINVAL;
11003 }
11004 if (!reg->ref_obj_id) {
11005 verbose(env, "allocated object must be referenced\n");
11006 return -EINVAL;
11007 }
11008 if (meta->btf == btf_vmlinux &&
11009 meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
4d585f48
DM
11010 meta->arg_btf = reg->btf;
11011 meta->arg_btf_id = reg->btf_id;
ac9f0605
KKD
11012 }
11013 break;
00b85860 11014 case KF_ARG_PTR_TO_DYNPTR:
d96d937d
JK
11015 {
11016 enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
361f129f 11017 int clone_ref_obj_id = 0;
d96d937d 11018
6b75bd3d 11019 if (reg->type != PTR_TO_STACK &&
27060531 11020 reg->type != CONST_PTR_TO_DYNPTR) {
6b75bd3d 11021 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
00b85860
KKD
11022 return -EINVAL;
11023 }
11024
d96d937d
JK
11025 if (reg->type == CONST_PTR_TO_DYNPTR)
11026 dynptr_arg_type |= MEM_RDONLY;
11027
11028 if (is_kfunc_arg_uninit(btf, &args[i]))
11029 dynptr_arg_type |= MEM_UNINIT;
11030
361f129f 11031 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
b5964b96 11032 dynptr_arg_type |= DYNPTR_TYPE_SKB;
361f129f 11033 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) {
05421aec 11034 dynptr_arg_type |= DYNPTR_TYPE_XDP;
361f129f
JK
11035 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] &&
11036 (dynptr_arg_type & MEM_UNINIT)) {
11037 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type;
11038
11039 if (parent_type == BPF_DYNPTR_TYPE_INVALID) {
11040 verbose(env, "verifier internal error: no dynptr type for parent of clone\n");
11041 return -EFAULT;
11042 }
11043
11044 dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type);
11045 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id;
11046 if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) {
11047 verbose(env, "verifier internal error: missing ref obj id for parent of clone\n");
11048 return -EFAULT;
11049 }
11050 }
b5964b96 11051
361f129f 11052 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id);
6b75bd3d
KKD
11053 if (ret < 0)
11054 return ret;
66e3a13e
JK
11055
11056 if (!(dynptr_arg_type & MEM_UNINIT)) {
11057 int id = dynptr_id(env, reg);
11058
11059 if (id < 0) {
11060 verbose(env, "verifier internal error: failed to obtain dynptr id\n");
11061 return id;
11062 }
11063 meta->initialized_dynptr.id = id;
11064 meta->initialized_dynptr.type = dynptr_get_type(env, reg);
361f129f 11065 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg);
66e3a13e
JK
11066 }
11067
00b85860 11068 break;
d96d937d 11069 }
06accc87
AN
11070 case KF_ARG_PTR_TO_ITER:
11071 ret = process_iter_arg(env, regno, insn_idx, meta);
11072 if (ret < 0)
11073 return ret;
11074 break;
8cab76ec
KKD
11075 case KF_ARG_PTR_TO_LIST_HEAD:
11076 if (reg->type != PTR_TO_MAP_VALUE &&
11077 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11078 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
11079 return -EINVAL;
11080 }
11081 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
11082 verbose(env, "allocated object must be referenced\n");
11083 return -EINVAL;
11084 }
11085 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
11086 if (ret < 0)
11087 return ret;
11088 break;
cd6791b4
DM
11089 case KF_ARG_PTR_TO_RB_ROOT:
11090 if (reg->type != PTR_TO_MAP_VALUE &&
11091 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11092 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
11093 return -EINVAL;
11094 }
11095 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
11096 verbose(env, "allocated object must be referenced\n");
11097 return -EINVAL;
11098 }
11099 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
11100 if (ret < 0)
11101 return ret;
11102 break;
8cab76ec
KKD
11103 case KF_ARG_PTR_TO_LIST_NODE:
11104 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11105 verbose(env, "arg#%d expected pointer to allocated object\n", i);
11106 return -EINVAL;
11107 }
11108 if (!reg->ref_obj_id) {
11109 verbose(env, "allocated object must be referenced\n");
11110 return -EINVAL;
11111 }
11112 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
11113 if (ret < 0)
11114 return ret;
11115 break;
cd6791b4 11116 case KF_ARG_PTR_TO_RB_NODE:
a40d3632
DM
11117 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
11118 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
11119 verbose(env, "rbtree_remove node input must be non-owning ref\n");
11120 return -EINVAL;
11121 }
11122 if (in_rbtree_lock_required_cb(env)) {
11123 verbose(env, "rbtree_remove not allowed in rbtree cb\n");
11124 return -EINVAL;
11125 }
11126 } else {
11127 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
11128 verbose(env, "arg#%d expected pointer to allocated object\n", i);
11129 return -EINVAL;
11130 }
11131 if (!reg->ref_obj_id) {
11132 verbose(env, "allocated object must be referenced\n");
11133 return -EINVAL;
11134 }
cd6791b4 11135 }
a40d3632 11136
cd6791b4
DM
11137 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
11138 if (ret < 0)
11139 return ret;
11140 break;
00b85860
KKD
11141 case KF_ARG_PTR_TO_BTF_ID:
11142 /* Only base_type is checked, further checks are done here */
3f00c523 11143 if ((base_type(reg->type) != PTR_TO_BTF_ID ||
fca1aa75 11144 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
3f00c523
DV
11145 !reg2btf_ids[base_type(reg->type)]) {
11146 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
11147 verbose(env, "expected %s or socket\n",
11148 reg_type_str(env, base_type(reg->type) |
11149 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
00b85860
KKD
11150 return -EINVAL;
11151 }
11152 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
11153 if (ret < 0)
11154 return ret;
11155 break;
11156 case KF_ARG_PTR_TO_MEM:
11157 resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
11158 if (IS_ERR(resolve_ret)) {
11159 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
11160 i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
11161 return -EINVAL;
11162 }
11163 ret = check_mem_reg(env, reg, regno, type_size);
11164 if (ret < 0)
11165 return ret;
11166 break;
11167 case KF_ARG_PTR_TO_MEM_SIZE:
66e3a13e 11168 {
3bda08b6
DR
11169 struct bpf_reg_state *buff_reg = &regs[regno];
11170 const struct btf_param *buff_arg = &args[i];
66e3a13e
JK
11171 struct bpf_reg_state *size_reg = &regs[regno + 1];
11172 const struct btf_param *size_arg = &args[i + 1];
11173
3bda08b6
DR
11174 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) {
11175 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
11176 if (ret < 0) {
11177 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
11178 return ret;
11179 }
00b85860 11180 }
66e3a13e
JK
11181
11182 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) {
11183 if (meta->arg_constant.found) {
11184 verbose(env, "verifier internal error: only one constant argument permitted\n");
11185 return -EFAULT;
11186 }
11187 if (!tnum_is_const(size_reg->var_off)) {
11188 verbose(env, "R%d must be a known constant\n", regno + 1);
11189 return -EINVAL;
11190 }
11191 meta->arg_constant.found = true;
11192 meta->arg_constant.value = size_reg->var_off.value;
11193 }
11194
11195 /* Skip next '__sz' or '__szk' argument */
00b85860
KKD
11196 i++;
11197 break;
66e3a13e 11198 }
5d92ddc3
DM
11199 case KF_ARG_PTR_TO_CALLBACK:
11200 meta->subprogno = reg->subprogno;
11201 break;
7c50b1cb 11202 case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
7793fc3b 11203 if (!type_is_ptr_alloc_obj(reg->type)) {
7c50b1cb
DM
11204 verbose(env, "arg#%d is neither owning or non-owning ref\n", i);
11205 return -EINVAL;
11206 }
7793fc3b
DM
11207 if (!type_is_non_owning_ref(reg->type))
11208 meta->arg_owning_ref = true;
7c50b1cb
DM
11209
11210 rec = reg_btf_record(reg);
11211 if (!rec) {
11212 verbose(env, "verifier internal error: Couldn't find btf_record\n");
11213 return -EFAULT;
11214 }
11215
11216 if (rec->refcount_off < 0) {
11217 verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i);
11218 return -EINVAL;
11219 }
7deca5ea
DM
11220 if (rec->refcount_off >= 0) {
11221 verbose(env, "bpf_refcount_acquire calls are disabled for now\n");
11222 return -EINVAL;
11223 }
4d585f48
DM
11224 meta->arg_btf = reg->btf;
11225 meta->arg_btf_id = reg->btf_id;
7c50b1cb 11226 break;
00b85860
KKD
11227 }
11228 }
11229
11230 if (is_kfunc_release(meta) && !meta->release_regno) {
11231 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
11232 func_name);
11233 return -EINVAL;
11234 }
11235
11236 return 0;
11237}
11238
07236eab
AN
11239static int fetch_kfunc_meta(struct bpf_verifier_env *env,
11240 struct bpf_insn *insn,
11241 struct bpf_kfunc_call_arg_meta *meta,
11242 const char **kfunc_name)
e6ac2450 11243{
07236eab
AN
11244 const struct btf_type *func, *func_proto;
11245 u32 func_id, *kfunc_flags;
11246 const char *func_name;
2357672c 11247 struct btf *desc_btf;
e6ac2450 11248
07236eab
AN
11249 if (kfunc_name)
11250 *kfunc_name = NULL;
11251
a5d82727 11252 if (!insn->imm)
07236eab 11253 return -EINVAL;
a5d82727 11254
43bf0878 11255 desc_btf = find_kfunc_desc_btf(env, insn->off);
2357672c
KKD
11256 if (IS_ERR(desc_btf))
11257 return PTR_ERR(desc_btf);
11258
e6ac2450 11259 func_id = insn->imm;
2357672c
KKD
11260 func = btf_type_by_id(desc_btf, func_id);
11261 func_name = btf_name_by_offset(desc_btf, func->name_off);
07236eab
AN
11262 if (kfunc_name)
11263 *kfunc_name = func_name;
2357672c 11264 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450 11265
e924e80e 11266 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog);
a4703e31 11267 if (!kfunc_flags) {
e6ac2450
MKL
11268 return -EACCES;
11269 }
00b85860 11270
07236eab
AN
11271 memset(meta, 0, sizeof(*meta));
11272 meta->btf = desc_btf;
11273 meta->func_id = func_id;
11274 meta->kfunc_flags = *kfunc_flags;
11275 meta->func_proto = func_proto;
11276 meta->func_name = func_name;
11277
11278 return 0;
11279}
11280
11281static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
11282 int *insn_idx_p)
11283{
11284 const struct btf_type *t, *ptr_type;
11285 u32 i, nargs, ptr_type_id, release_ref_obj_id;
11286 struct bpf_reg_state *regs = cur_regs(env);
11287 const char *func_name, *ptr_type_name;
11288 bool sleepable, rcu_lock, rcu_unlock;
11289 struct bpf_kfunc_call_arg_meta meta;
11290 struct bpf_insn_aux_data *insn_aux;
11291 int err, insn_idx = *insn_idx_p;
11292 const struct btf_param *args;
11293 const struct btf_type *ret_t;
11294 struct btf *desc_btf;
11295
11296 /* skip for now, but return error when we find this in fixup_kfunc_call */
11297 if (!insn->imm)
11298 return 0;
11299
11300 err = fetch_kfunc_meta(env, insn, &meta, &func_name);
11301 if (err == -EACCES && func_name)
11302 verbose(env, "calling kernel function %s is not allowed\n", func_name);
11303 if (err)
11304 return err;
11305 desc_btf = meta.btf;
11306 insn_aux = &env->insn_aux_data[insn_idx];
00b85860 11307
06accc87
AN
11308 insn_aux->is_iter_next = is_iter_next_kfunc(&meta);
11309
00b85860
KKD
11310 if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
11311 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
4dd48c6f
AS
11312 return -EACCES;
11313 }
11314
9bb00b28
YS
11315 sleepable = is_kfunc_sleepable(&meta);
11316 if (sleepable && !env->prog->aux->sleepable) {
00b85860
KKD
11317 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
11318 return -EACCES;
11319 }
eb1f7f71 11320
9bb00b28
YS
11321 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
11322 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
9bb00b28
YS
11323
11324 if (env->cur_state->active_rcu_lock) {
11325 struct bpf_func_state *state;
11326 struct bpf_reg_state *reg;
11327
11328 if (rcu_lock) {
11329 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
11330 return -EINVAL;
11331 } else if (rcu_unlock) {
11332 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
11333 if (reg->type & MEM_RCU) {
fca1aa75 11334 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
9bb00b28
YS
11335 reg->type |= PTR_UNTRUSTED;
11336 }
11337 }));
11338 env->cur_state->active_rcu_lock = false;
11339 } else if (sleepable) {
11340 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
11341 return -EACCES;
11342 }
11343 } else if (rcu_lock) {
11344 env->cur_state->active_rcu_lock = true;
11345 } else if (rcu_unlock) {
11346 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
11347 return -EINVAL;
11348 }
11349
e6ac2450 11350 /* Check the arguments */
1d18feb2 11351 err = check_kfunc_args(env, &meta, insn_idx);
5c073f26 11352 if (err < 0)
e6ac2450 11353 return err;
5c073f26 11354 /* In case of release function, we get register number of refcounted
00b85860 11355 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
5c073f26 11356 */
00b85860
KKD
11357 if (meta.release_regno) {
11358 err = release_reference(env, regs[meta.release_regno].ref_obj_id);
5c073f26
KKD
11359 if (err) {
11360 verbose(env, "kfunc %s#%d reference has not been acquired before\n",
07236eab 11361 func_name, meta.func_id);
5c073f26
KKD
11362 return err;
11363 }
11364 }
e6ac2450 11365
d2dcc67d
DM
11366 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
11367 meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
11368 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
6a3cd331 11369 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
d2dcc67d 11370 insn_aux->insert_off = regs[BPF_REG_2].off;
2140a6e3 11371 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id);
6a3cd331
DM
11372 err = ref_convert_owning_non_owning(env, release_ref_obj_id);
11373 if (err) {
11374 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
07236eab 11375 func_name, meta.func_id);
6a3cd331
DM
11376 return err;
11377 }
11378
11379 err = release_reference(env, release_ref_obj_id);
11380 if (err) {
11381 verbose(env, "kfunc %s#%d reference has not been acquired before\n",
07236eab 11382 func_name, meta.func_id);
6a3cd331
DM
11383 return err;
11384 }
11385 }
11386
d2dcc67d 11387 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
5d92ddc3
DM
11388 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
11389 set_rbtree_add_callback_state);
11390 if (err) {
11391 verbose(env, "kfunc %s#%d failed callback verification\n",
07236eab 11392 func_name, meta.func_id);
5d92ddc3
DM
11393 return err;
11394 }
11395 }
11396
e6ac2450
MKL
11397 for (i = 0; i < CALLER_SAVED_REGS; i++)
11398 mark_reg_not_init(env, regs, caller_saved[i]);
11399
11400 /* Check return type */
07236eab 11401 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
5c073f26 11402
00b85860 11403 if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
958cf2e2 11404 /* Only exception is bpf_obj_new_impl */
7c50b1cb
DM
11405 if (meta.btf != btf_vmlinux ||
11406 (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] &&
11407 meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) {
958cf2e2
KKD
11408 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
11409 return -EINVAL;
11410 }
5c073f26
KKD
11411 }
11412
e6ac2450
MKL
11413 if (btf_type_is_scalar(t)) {
11414 mark_reg_unknown(env, regs, BPF_REG_0);
11415 mark_btf_func_reg_size(env, BPF_REG_0, t->size);
11416 } else if (btf_type_is_ptr(t)) {
958cf2e2
KKD
11417 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
11418
11419 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
11420 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
958cf2e2
KKD
11421 struct btf *ret_btf;
11422 u32 ret_btf_id;
11423
e181d3f1
KKD
11424 if (unlikely(!bpf_global_ma_set))
11425 return -ENOMEM;
11426
958cf2e2
KKD
11427 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
11428 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
11429 return -EINVAL;
11430 }
11431
11432 ret_btf = env->prog->aux->btf;
11433 ret_btf_id = meta.arg_constant.value;
11434
11435 /* This may be NULL due to user not supplying a BTF */
11436 if (!ret_btf) {
11437 verbose(env, "bpf_obj_new requires prog BTF\n");
11438 return -EINVAL;
11439 }
11440
11441 ret_t = btf_type_by_id(ret_btf, ret_btf_id);
11442 if (!ret_t || !__btf_type_is_struct(ret_t)) {
11443 verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
11444 return -EINVAL;
11445 }
11446
11447 mark_reg_known_zero(env, regs, BPF_REG_0);
11448 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
11449 regs[BPF_REG_0].btf = ret_btf;
11450 regs[BPF_REG_0].btf_id = ret_btf_id;
11451
07236eab
AN
11452 insn_aux->obj_new_size = ret_t->size;
11453 insn_aux->kptr_struct_meta =
958cf2e2 11454 btf_find_struct_meta(ret_btf, ret_btf_id);
7c50b1cb
DM
11455 } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
11456 mark_reg_known_zero(env, regs, BPF_REG_0);
11457 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
4d585f48
DM
11458 regs[BPF_REG_0].btf = meta.arg_btf;
11459 regs[BPF_REG_0].btf_id = meta.arg_btf_id;
7c50b1cb
DM
11460
11461 insn_aux->kptr_struct_meta =
4d585f48
DM
11462 btf_find_struct_meta(meta.arg_btf,
11463 meta.arg_btf_id);
8cab76ec
KKD
11464 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
11465 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
11466 struct btf_field *field = meta.arg_list_head.field;
11467
a40d3632
DM
11468 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
11469 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
11470 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
11471 struct btf_field *field = meta.arg_rbtree_root.field;
11472
11473 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
fd264ca0
YS
11474 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
11475 mark_reg_known_zero(env, regs, BPF_REG_0);
11476 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
11477 regs[BPF_REG_0].btf = desc_btf;
11478 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
a35b9af4
YS
11479 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
11480 ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
11481 if (!ret_t || !btf_type_is_struct(ret_t)) {
11482 verbose(env,
11483 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
11484 return -EINVAL;
11485 }
11486
11487 mark_reg_known_zero(env, regs, BPF_REG_0);
11488 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
11489 regs[BPF_REG_0].btf = desc_btf;
11490 regs[BPF_REG_0].btf_id = meta.arg_constant.value;
66e3a13e
JK
11491 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
11492 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
11493 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type);
11494
11495 mark_reg_known_zero(env, regs, BPF_REG_0);
11496
11497 if (!meta.arg_constant.found) {
11498 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n");
11499 return -EFAULT;
11500 }
11501
11502 regs[BPF_REG_0].mem_size = meta.arg_constant.value;
11503
11504 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */
11505 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag;
11506
11507 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) {
11508 regs[BPF_REG_0].type |= MEM_RDONLY;
11509 } else {
11510 /* this will set env->seen_direct_write to true */
11511 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) {
11512 verbose(env, "the prog does not allow writes to packet data\n");
11513 return -EINVAL;
11514 }
11515 }
11516
11517 if (!meta.initialized_dynptr.id) {
11518 verbose(env, "verifier internal error: no dynptr id\n");
11519 return -EFAULT;
11520 }
11521 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id;
11522
11523 /* we don't need to set BPF_REG_0's ref obj id
11524 * because packet slices are not refcounted (see
11525 * dynptr_type_refcounted)
11526 */
958cf2e2
KKD
11527 } else {
11528 verbose(env, "kernel function %s unhandled dynamic return type\n",
11529 meta.func_name);
11530 return -EFAULT;
11531 }
11532 } else if (!__btf_type_is_struct(ptr_type)) {
f4b4eee6
AN
11533 if (!meta.r0_size) {
11534 __u32 sz;
11535
11536 if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) {
11537 meta.r0_size = sz;
11538 meta.r0_rdonly = true;
11539 }
11540 }
eb1f7f71
BT
11541 if (!meta.r0_size) {
11542 ptr_type_name = btf_name_by_offset(desc_btf,
11543 ptr_type->name_off);
11544 verbose(env,
11545 "kernel function %s returns pointer type %s %s is not supported\n",
11546 func_name,
11547 btf_type_str(ptr_type),
11548 ptr_type_name);
11549 return -EINVAL;
11550 }
11551
11552 mark_reg_known_zero(env, regs, BPF_REG_0);
11553 regs[BPF_REG_0].type = PTR_TO_MEM;
11554 regs[BPF_REG_0].mem_size = meta.r0_size;
11555
11556 if (meta.r0_rdonly)
11557 regs[BPF_REG_0].type |= MEM_RDONLY;
11558
11559 /* Ensures we don't access the memory after a release_reference() */
11560 if (meta.ref_obj_id)
11561 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
11562 } else {
11563 mark_reg_known_zero(env, regs, BPF_REG_0);
11564 regs[BPF_REG_0].btf = desc_btf;
11565 regs[BPF_REG_0].type = PTR_TO_BTF_ID;
11566 regs[BPF_REG_0].btf_id = ptr_type_id;
e6ac2450 11567 }
958cf2e2 11568
00b85860 11569 if (is_kfunc_ret_null(&meta)) {
5c073f26
KKD
11570 regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
11571 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
11572 regs[BPF_REG_0].id = ++env->id_gen;
11573 }
e6ac2450 11574 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
00b85860 11575 if (is_kfunc_acquire(&meta)) {
5c073f26
KKD
11576 int id = acquire_reference_state(env, insn_idx);
11577
11578 if (id < 0)
11579 return id;
00b85860
KKD
11580 if (is_kfunc_ret_null(&meta))
11581 regs[BPF_REG_0].id = id;
5c073f26 11582 regs[BPF_REG_0].ref_obj_id = id;
a40d3632
DM
11583 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
11584 ref_set_non_owning(env, &regs[BPF_REG_0]);
5c073f26 11585 }
a40d3632 11586
00b85860
KKD
11587 if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
11588 regs[BPF_REG_0].id = ++env->id_gen;
f6a6a5a9
DM
11589 } else if (btf_type_is_void(t)) {
11590 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
11591 if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
11592 insn_aux->kptr_struct_meta =
4d585f48
DM
11593 btf_find_struct_meta(meta.arg_btf,
11594 meta.arg_btf_id);
f6a6a5a9
DM
11595 }
11596 }
11597 }
e6ac2450 11598
07236eab
AN
11599 nargs = btf_type_vlen(meta.func_proto);
11600 args = (const struct btf_param *)(meta.func_proto + 1);
e6ac2450
MKL
11601 for (i = 0; i < nargs; i++) {
11602 u32 regno = i + 1;
11603
2357672c 11604 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
e6ac2450
MKL
11605 if (btf_type_is_ptr(t))
11606 mark_btf_func_reg_size(env, regno, sizeof(void *));
11607 else
11608 /* scalar. ensured by btf_check_kfunc_arg_match() */
11609 mark_btf_func_reg_size(env, regno, t->size);
11610 }
11611
06accc87
AN
11612 if (is_iter_next_kfunc(&meta)) {
11613 err = process_iter_next_call(env, insn_idx, &meta);
11614 if (err)
11615 return err;
11616 }
11617
e6ac2450
MKL
11618 return 0;
11619}
11620
b03c9f9f
EC
11621static bool signed_add_overflows(s64 a, s64 b)
11622{
11623 /* Do the add in u64, where overflow is well-defined */
11624 s64 res = (s64)((u64)a + (u64)b);
11625
11626 if (b < 0)
11627 return res > a;
11628 return res < a;
11629}
11630
bc895e8b 11631static bool signed_add32_overflows(s32 a, s32 b)
3f50f132
JF
11632{
11633 /* Do the add in u32, where overflow is well-defined */
11634 s32 res = (s32)((u32)a + (u32)b);
11635
11636 if (b < 0)
11637 return res > a;
11638 return res < a;
11639}
11640
bc895e8b 11641static bool signed_sub_overflows(s64 a, s64 b)
b03c9f9f
EC
11642{
11643 /* Do the sub in u64, where overflow is well-defined */
11644 s64 res = (s64)((u64)a - (u64)b);
11645
11646 if (b < 0)
11647 return res < a;
11648 return res > a;
969bf05e
AS
11649}
11650
3f50f132
JF
11651static bool signed_sub32_overflows(s32 a, s32 b)
11652{
bc895e8b 11653 /* Do the sub in u32, where overflow is well-defined */
3f50f132
JF
11654 s32 res = (s32)((u32)a - (u32)b);
11655
11656 if (b < 0)
11657 return res < a;
11658 return res > a;
11659}
11660
bb7f0f98
AS
11661static bool check_reg_sane_offset(struct bpf_verifier_env *env,
11662 const struct bpf_reg_state *reg,
11663 enum bpf_reg_type type)
11664{
11665 bool known = tnum_is_const(reg->var_off);
11666 s64 val = reg->var_off.value;
11667 s64 smin = reg->smin_value;
11668
11669 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
11670 verbose(env, "math between %s pointer and %lld is not allowed\n",
c25b2ae1 11671 reg_type_str(env, type), val);
bb7f0f98
AS
11672 return false;
11673 }
11674
11675 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
11676 verbose(env, "%s pointer offset %d is not allowed\n",
c25b2ae1 11677 reg_type_str(env, type), reg->off);
bb7f0f98
AS
11678 return false;
11679 }
11680
11681 if (smin == S64_MIN) {
11682 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
c25b2ae1 11683 reg_type_str(env, type));
bb7f0f98
AS
11684 return false;
11685 }
11686
11687 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
11688 verbose(env, "value %lld makes %s pointer be out of bounds\n",
c25b2ae1 11689 smin, reg_type_str(env, type));
bb7f0f98
AS
11690 return false;
11691 }
11692
11693 return true;
11694}
11695
a6aaece0
DB
11696enum {
11697 REASON_BOUNDS = -1,
11698 REASON_TYPE = -2,
11699 REASON_PATHS = -3,
11700 REASON_LIMIT = -4,
11701 REASON_STACK = -5,
11702};
11703
979d63d5 11704static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
bb01a1bb 11705 u32 *alu_limit, bool mask_to_left)
979d63d5 11706{
7fedb63a 11707 u32 max = 0, ptr_limit = 0;
979d63d5
DB
11708
11709 switch (ptr_reg->type) {
11710 case PTR_TO_STACK:
1b1597e6 11711 /* Offset 0 is out-of-bounds, but acceptable start for the
7fedb63a
DB
11712 * left direction, see BPF_REG_FP. Also, unknown scalar
11713 * offset where we would need to deal with min/max bounds is
11714 * currently prohibited for unprivileged.
1b1597e6
PK
11715 */
11716 max = MAX_BPF_STACK + mask_to_left;
7fedb63a 11717 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
b658bbb8 11718 break;
979d63d5 11719 case PTR_TO_MAP_VALUE:
1b1597e6 11720 max = ptr_reg->map_ptr->value_size;
7fedb63a
DB
11721 ptr_limit = (mask_to_left ?
11722 ptr_reg->smin_value :
11723 ptr_reg->umax_value) + ptr_reg->off;
b658bbb8 11724 break;
979d63d5 11725 default:
a6aaece0 11726 return REASON_TYPE;
979d63d5 11727 }
b658bbb8
DB
11728
11729 if (ptr_limit >= max)
a6aaece0 11730 return REASON_LIMIT;
b658bbb8
DB
11731 *alu_limit = ptr_limit;
11732 return 0;
979d63d5
DB
11733}
11734
d3bd7413
DB
11735static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
11736 const struct bpf_insn *insn)
11737{
2c78ee89 11738 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
d3bd7413
DB
11739}
11740
11741static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
11742 u32 alu_state, u32 alu_limit)
11743{
11744 /* If we arrived here from different branches with different
11745 * state or limits to sanitize, then this won't work.
11746 */
11747 if (aux->alu_state &&
11748 (aux->alu_state != alu_state ||
11749 aux->alu_limit != alu_limit))
a6aaece0 11750 return REASON_PATHS;
d3bd7413 11751
e6ac5933 11752 /* Corresponding fixup done in do_misc_fixups(). */
d3bd7413
DB
11753 aux->alu_state = alu_state;
11754 aux->alu_limit = alu_limit;
11755 return 0;
11756}
11757
11758static int sanitize_val_alu(struct bpf_verifier_env *env,
11759 struct bpf_insn *insn)
11760{
11761 struct bpf_insn_aux_data *aux = cur_aux(env);
11762
11763 if (can_skip_alu_sanitation(env, insn))
11764 return 0;
11765
11766 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
11767}
11768
f5288193
DB
11769static bool sanitize_needed(u8 opcode)
11770{
11771 return opcode == BPF_ADD || opcode == BPF_SUB;
11772}
11773
3d0220f6
DB
11774struct bpf_sanitize_info {
11775 struct bpf_insn_aux_data aux;
bb01a1bb 11776 bool mask_to_left;
3d0220f6
DB
11777};
11778
9183671a
DB
11779static struct bpf_verifier_state *
11780sanitize_speculative_path(struct bpf_verifier_env *env,
11781 const struct bpf_insn *insn,
11782 u32 next_idx, u32 curr_idx)
11783{
11784 struct bpf_verifier_state *branch;
11785 struct bpf_reg_state *regs;
11786
11787 branch = push_stack(env, next_idx, curr_idx, true);
11788 if (branch && insn) {
11789 regs = branch->frame[branch->curframe]->regs;
11790 if (BPF_SRC(insn->code) == BPF_K) {
11791 mark_reg_unknown(env, regs, insn->dst_reg);
11792 } else if (BPF_SRC(insn->code) == BPF_X) {
11793 mark_reg_unknown(env, regs, insn->dst_reg);
11794 mark_reg_unknown(env, regs, insn->src_reg);
11795 }
11796 }
11797 return branch;
11798}
11799
979d63d5
DB
11800static int sanitize_ptr_alu(struct bpf_verifier_env *env,
11801 struct bpf_insn *insn,
11802 const struct bpf_reg_state *ptr_reg,
6f55b2f2 11803 const struct bpf_reg_state *off_reg,
979d63d5 11804 struct bpf_reg_state *dst_reg,
3d0220f6 11805 struct bpf_sanitize_info *info,
7fedb63a 11806 const bool commit_window)
979d63d5 11807{
3d0220f6 11808 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
979d63d5 11809 struct bpf_verifier_state *vstate = env->cur_state;
801c6058 11810 bool off_is_imm = tnum_is_const(off_reg->var_off);
6f55b2f2 11811 bool off_is_neg = off_reg->smin_value < 0;
979d63d5
DB
11812 bool ptr_is_dst_reg = ptr_reg == dst_reg;
11813 u8 opcode = BPF_OP(insn->code);
11814 u32 alu_state, alu_limit;
11815 struct bpf_reg_state tmp;
11816 bool ret;
f232326f 11817 int err;
979d63d5 11818
d3bd7413 11819 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
11820 return 0;
11821
11822 /* We already marked aux for masking from non-speculative
11823 * paths, thus we got here in the first place. We only care
11824 * to explore bad access from here.
11825 */
11826 if (vstate->speculative)
11827 goto do_sim;
11828
bb01a1bb
DB
11829 if (!commit_window) {
11830 if (!tnum_is_const(off_reg->var_off) &&
11831 (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
11832 return REASON_BOUNDS;
11833
11834 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
11835 (opcode == BPF_SUB && !off_is_neg);
11836 }
11837
11838 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
f232326f
PK
11839 if (err < 0)
11840 return err;
11841
7fedb63a
DB
11842 if (commit_window) {
11843 /* In commit phase we narrow the masking window based on
11844 * the observed pointer move after the simulated operation.
11845 */
3d0220f6
DB
11846 alu_state = info->aux.alu_state;
11847 alu_limit = abs(info->aux.alu_limit - alu_limit);
7fedb63a
DB
11848 } else {
11849 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
801c6058 11850 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7fedb63a
DB
11851 alu_state |= ptr_is_dst_reg ?
11852 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
e042aa53
DB
11853
11854 /* Limit pruning on unknown scalars to enable deep search for
11855 * potential masking differences from other program paths.
11856 */
11857 if (!off_is_imm)
11858 env->explore_alu_limits = true;
7fedb63a
DB
11859 }
11860
f232326f
PK
11861 err = update_alu_sanitation_state(aux, alu_state, alu_limit);
11862 if (err < 0)
11863 return err;
979d63d5 11864do_sim:
7fedb63a
DB
11865 /* If we're in commit phase, we're done here given we already
11866 * pushed the truncated dst_reg into the speculative verification
11867 * stack.
a7036191
DB
11868 *
11869 * Also, when register is a known constant, we rewrite register-based
11870 * operation to immediate-based, and thus do not need masking (and as
11871 * a consequence, do not need to simulate the zero-truncation either).
7fedb63a 11872 */
a7036191 11873 if (commit_window || off_is_imm)
7fedb63a
DB
11874 return 0;
11875
979d63d5
DB
11876 /* Simulate and find potential out-of-bounds access under
11877 * speculative execution from truncation as a result of
11878 * masking when off was not within expected range. If off
11879 * sits in dst, then we temporarily need to move ptr there
11880 * to simulate dst (== 0) +/-= ptr. Needed, for example,
11881 * for cases where we use K-based arithmetic in one direction
11882 * and truncated reg-based in the other in order to explore
11883 * bad access.
11884 */
11885 if (!ptr_is_dst_reg) {
11886 tmp = *dst_reg;
71f656a5 11887 copy_register_state(dst_reg, ptr_reg);
979d63d5 11888 }
9183671a
DB
11889 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
11890 env->insn_idx);
0803278b 11891 if (!ptr_is_dst_reg && ret)
979d63d5 11892 *dst_reg = tmp;
a6aaece0
DB
11893 return !ret ? REASON_STACK : 0;
11894}
11895
fe9a5ca7
DB
11896static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
11897{
11898 struct bpf_verifier_state *vstate = env->cur_state;
11899
11900 /* If we simulate paths under speculation, we don't update the
11901 * insn as 'seen' such that when we verify unreachable paths in
11902 * the non-speculative domain, sanitize_dead_code() can still
11903 * rewrite/sanitize them.
11904 */
11905 if (!vstate->speculative)
11906 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
11907}
11908
a6aaece0
DB
11909static int sanitize_err(struct bpf_verifier_env *env,
11910 const struct bpf_insn *insn, int reason,
11911 const struct bpf_reg_state *off_reg,
11912 const struct bpf_reg_state *dst_reg)
11913{
11914 static const char *err = "pointer arithmetic with it prohibited for !root";
11915 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
11916 u32 dst = insn->dst_reg, src = insn->src_reg;
11917
11918 switch (reason) {
11919 case REASON_BOUNDS:
11920 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
11921 off_reg == dst_reg ? dst : src, err);
11922 break;
11923 case REASON_TYPE:
11924 verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
11925 off_reg == dst_reg ? src : dst, err);
11926 break;
11927 case REASON_PATHS:
11928 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
11929 dst, op, err);
11930 break;
11931 case REASON_LIMIT:
11932 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
11933 dst, op, err);
11934 break;
11935 case REASON_STACK:
11936 verbose(env, "R%d could not be pushed for speculative verification, %s\n",
11937 dst, err);
11938 break;
11939 default:
11940 verbose(env, "verifier internal error: unknown reason (%d)\n",
11941 reason);
11942 break;
11943 }
11944
11945 return -EACCES;
979d63d5
DB
11946}
11947
01f810ac
AM
11948/* check that stack access falls within stack limits and that 'reg' doesn't
11949 * have a variable offset.
11950 *
11951 * Variable offset is prohibited for unprivileged mode for simplicity since it
11952 * requires corresponding support in Spectre masking for stack ALU. See also
11953 * retrieve_ptr_limit().
11954 *
11955 *
11956 * 'off' includes 'reg->off'.
11957 */
11958static int check_stack_access_for_ptr_arithmetic(
11959 struct bpf_verifier_env *env,
11960 int regno,
11961 const struct bpf_reg_state *reg,
11962 int off)
11963{
11964 if (!tnum_is_const(reg->var_off)) {
11965 char tn_buf[48];
11966
11967 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
11968 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
11969 regno, tn_buf, off);
11970 return -EACCES;
11971 }
11972
11973 if (off >= 0 || off < -MAX_BPF_STACK) {
11974 verbose(env, "R%d stack pointer arithmetic goes out of range, "
11975 "prohibited for !root; off=%d\n", regno, off);
11976 return -EACCES;
11977 }
11978
11979 return 0;
11980}
11981
073815b7
DB
11982static int sanitize_check_bounds(struct bpf_verifier_env *env,
11983 const struct bpf_insn *insn,
11984 const struct bpf_reg_state *dst_reg)
11985{
11986 u32 dst = insn->dst_reg;
11987
11988 /* For unprivileged we require that resulting offset must be in bounds
11989 * in order to be able to sanitize access later on.
11990 */
11991 if (env->bypass_spec_v1)
11992 return 0;
11993
11994 switch (dst_reg->type) {
11995 case PTR_TO_STACK:
11996 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
11997 dst_reg->off + dst_reg->var_off.value))
11998 return -EACCES;
11999 break;
12000 case PTR_TO_MAP_VALUE:
61df10c7 12001 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
073815b7
DB
12002 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
12003 "prohibited for !root\n", dst);
12004 return -EACCES;
12005 }
12006 break;
12007 default:
12008 break;
12009 }
12010
12011 return 0;
12012}
01f810ac 12013
f1174f77 12014/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
12015 * Caller should also handle BPF_MOV case separately.
12016 * If we return -EACCES, caller may want to try again treating pointer as a
12017 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
12018 */
12019static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
12020 struct bpf_insn *insn,
12021 const struct bpf_reg_state *ptr_reg,
12022 const struct bpf_reg_state *off_reg)
969bf05e 12023{
f4d7e40a
AS
12024 struct bpf_verifier_state *vstate = env->cur_state;
12025 struct bpf_func_state *state = vstate->frame[vstate->curframe];
12026 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 12027 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
12028 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
12029 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
12030 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
12031 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3d0220f6 12032 struct bpf_sanitize_info info = {};
969bf05e 12033 u8 opcode = BPF_OP(insn->code);
24c109bb 12034 u32 dst = insn->dst_reg;
979d63d5 12035 int ret;
969bf05e 12036
f1174f77 12037 dst_reg = &regs[dst];
969bf05e 12038
6f16101e
DB
12039 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
12040 smin_val > smax_val || umin_val > umax_val) {
12041 /* Taint dst register if offset had invalid bounds derived from
12042 * e.g. dead branches.
12043 */
f54c7898 12044 __mark_reg_unknown(env, dst_reg);
6f16101e 12045 return 0;
f1174f77
EC
12046 }
12047
12048 if (BPF_CLASS(insn->code) != BPF_ALU64) {
12049 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
6c693541
YS
12050 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
12051 __mark_reg_unknown(env, dst_reg);
12052 return 0;
12053 }
12054
82abbf8d
AS
12055 verbose(env,
12056 "R%d 32-bit pointer arithmetic prohibited\n",
12057 dst);
f1174f77 12058 return -EACCES;
969bf05e
AS
12059 }
12060
c25b2ae1 12061 if (ptr_reg->type & PTR_MAYBE_NULL) {
aad2eeaf 12062 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
c25b2ae1 12063 dst, reg_type_str(env, ptr_reg->type));
f1174f77 12064 return -EACCES;
c25b2ae1
HL
12065 }
12066
12067 switch (base_type(ptr_reg->type)) {
aad2eeaf 12068 case CONST_PTR_TO_MAP:
7c696732
YS
12069 /* smin_val represents the known value */
12070 if (known && smin_val == 0 && opcode == BPF_ADD)
12071 break;
8731745e 12072 fallthrough;
aad2eeaf 12073 case PTR_TO_PACKET_END:
c64b7983 12074 case PTR_TO_SOCKET:
46f8bc92 12075 case PTR_TO_SOCK_COMMON:
655a51e5 12076 case PTR_TO_TCP_SOCK:
fada7fdc 12077 case PTR_TO_XDP_SOCK:
aad2eeaf 12078 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
c25b2ae1 12079 dst, reg_type_str(env, ptr_reg->type));
f1174f77 12080 return -EACCES;
aad2eeaf
JS
12081 default:
12082 break;
f1174f77
EC
12083 }
12084
12085 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
12086 * The id may be overwritten later if we create a new variable offset.
969bf05e 12087 */
f1174f77
EC
12088 dst_reg->type = ptr_reg->type;
12089 dst_reg->id = ptr_reg->id;
969bf05e 12090
bb7f0f98
AS
12091 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
12092 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
12093 return -EINVAL;
12094
3f50f132
JF
12095 /* pointer types do not carry 32-bit bounds at the moment. */
12096 __mark_reg32_unbounded(dst_reg);
12097
7fedb63a
DB
12098 if (sanitize_needed(opcode)) {
12099 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
3d0220f6 12100 &info, false);
a6aaece0
DB
12101 if (ret < 0)
12102 return sanitize_err(env, insn, ret, off_reg, dst_reg);
7fedb63a 12103 }
a6aaece0 12104
f1174f77
EC
12105 switch (opcode) {
12106 case BPF_ADD:
12107 /* We can take a fixed offset as long as it doesn't overflow
12108 * the s32 'off' field
969bf05e 12109 */
b03c9f9f
EC
12110 if (known && (ptr_reg->off + smin_val ==
12111 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 12112 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
12113 dst_reg->smin_value = smin_ptr;
12114 dst_reg->smax_value = smax_ptr;
12115 dst_reg->umin_value = umin_ptr;
12116 dst_reg->umax_value = umax_ptr;
f1174f77 12117 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 12118 dst_reg->off = ptr_reg->off + smin_val;
0962590e 12119 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
12120 break;
12121 }
f1174f77
EC
12122 /* A new variable offset is created. Note that off_reg->off
12123 * == 0, since it's a scalar.
12124 * dst_reg gets the pointer type and since some positive
12125 * integer value was added to the pointer, give it a new 'id'
12126 * if it's a PTR_TO_PACKET.
12127 * this creates a new 'base' pointer, off_reg (variable) gets
12128 * added into the variable offset, and we copy the fixed offset
12129 * from ptr_reg.
969bf05e 12130 */
b03c9f9f
EC
12131 if (signed_add_overflows(smin_ptr, smin_val) ||
12132 signed_add_overflows(smax_ptr, smax_val)) {
12133 dst_reg->smin_value = S64_MIN;
12134 dst_reg->smax_value = S64_MAX;
12135 } else {
12136 dst_reg->smin_value = smin_ptr + smin_val;
12137 dst_reg->smax_value = smax_ptr + smax_val;
12138 }
12139 if (umin_ptr + umin_val < umin_ptr ||
12140 umax_ptr + umax_val < umax_ptr) {
12141 dst_reg->umin_value = 0;
12142 dst_reg->umax_value = U64_MAX;
12143 } else {
12144 dst_reg->umin_value = umin_ptr + umin_val;
12145 dst_reg->umax_value = umax_ptr + umax_val;
12146 }
f1174f77
EC
12147 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
12148 dst_reg->off = ptr_reg->off;
0962590e 12149 dst_reg->raw = ptr_reg->raw;
de8f3a83 12150 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
12151 dst_reg->id = ++env->id_gen;
12152 /* something was added to pkt_ptr, set range to zero */
22dc4a0f 12153 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
f1174f77
EC
12154 }
12155 break;
12156 case BPF_SUB:
12157 if (dst_reg == off_reg) {
12158 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
12159 verbose(env, "R%d tried to subtract pointer from scalar\n",
12160 dst);
f1174f77
EC
12161 return -EACCES;
12162 }
12163 /* We don't allow subtraction from FP, because (according to
12164 * test_verifier.c test "invalid fp arithmetic", JITs might not
12165 * be able to deal with it.
969bf05e 12166 */
f1174f77 12167 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
12168 verbose(env, "R%d subtraction from stack pointer prohibited\n",
12169 dst);
f1174f77
EC
12170 return -EACCES;
12171 }
b03c9f9f
EC
12172 if (known && (ptr_reg->off - smin_val ==
12173 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 12174 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
12175 dst_reg->smin_value = smin_ptr;
12176 dst_reg->smax_value = smax_ptr;
12177 dst_reg->umin_value = umin_ptr;
12178 dst_reg->umax_value = umax_ptr;
f1174f77
EC
12179 dst_reg->var_off = ptr_reg->var_off;
12180 dst_reg->id = ptr_reg->id;
b03c9f9f 12181 dst_reg->off = ptr_reg->off - smin_val;
0962590e 12182 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
12183 break;
12184 }
f1174f77
EC
12185 /* A new variable offset is created. If the subtrahend is known
12186 * nonnegative, then any reg->range we had before is still good.
969bf05e 12187 */
b03c9f9f
EC
12188 if (signed_sub_overflows(smin_ptr, smax_val) ||
12189 signed_sub_overflows(smax_ptr, smin_val)) {
12190 /* Overflow possible, we know nothing */
12191 dst_reg->smin_value = S64_MIN;
12192 dst_reg->smax_value = S64_MAX;
12193 } else {
12194 dst_reg->smin_value = smin_ptr - smax_val;
12195 dst_reg->smax_value = smax_ptr - smin_val;
12196 }
12197 if (umin_ptr < umax_val) {
12198 /* Overflow possible, we know nothing */
12199 dst_reg->umin_value = 0;
12200 dst_reg->umax_value = U64_MAX;
12201 } else {
12202 /* Cannot overflow (as long as bounds are consistent) */
12203 dst_reg->umin_value = umin_ptr - umax_val;
12204 dst_reg->umax_value = umax_ptr - umin_val;
12205 }
f1174f77
EC
12206 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
12207 dst_reg->off = ptr_reg->off;
0962590e 12208 dst_reg->raw = ptr_reg->raw;
de8f3a83 12209 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
12210 dst_reg->id = ++env->id_gen;
12211 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 12212 if (smin_val < 0)
22dc4a0f 12213 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
43188702 12214 }
f1174f77
EC
12215 break;
12216 case BPF_AND:
12217 case BPF_OR:
12218 case BPF_XOR:
82abbf8d
AS
12219 /* bitwise ops on pointers are troublesome, prohibit. */
12220 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
12221 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
12222 return -EACCES;
12223 default:
12224 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
12225 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
12226 dst, bpf_alu_string[opcode >> 4]);
f1174f77 12227 return -EACCES;
43188702
JF
12228 }
12229
bb7f0f98
AS
12230 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
12231 return -EINVAL;
3844d153 12232 reg_bounds_sync(dst_reg);
073815b7
DB
12233 if (sanitize_check_bounds(env, insn, dst_reg) < 0)
12234 return -EACCES;
7fedb63a
DB
12235 if (sanitize_needed(opcode)) {
12236 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
3d0220f6 12237 &info, true);
7fedb63a
DB
12238 if (ret < 0)
12239 return sanitize_err(env, insn, ret, off_reg, dst_reg);
0d6303db
DB
12240 }
12241
43188702
JF
12242 return 0;
12243}
12244
3f50f132
JF
12245static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
12246 struct bpf_reg_state *src_reg)
12247{
12248 s32 smin_val = src_reg->s32_min_value;
12249 s32 smax_val = src_reg->s32_max_value;
12250 u32 umin_val = src_reg->u32_min_value;
12251 u32 umax_val = src_reg->u32_max_value;
12252
12253 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
12254 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
12255 dst_reg->s32_min_value = S32_MIN;
12256 dst_reg->s32_max_value = S32_MAX;
12257 } else {
12258 dst_reg->s32_min_value += smin_val;
12259 dst_reg->s32_max_value += smax_val;
12260 }
12261 if (dst_reg->u32_min_value + umin_val < umin_val ||
12262 dst_reg->u32_max_value + umax_val < umax_val) {
12263 dst_reg->u32_min_value = 0;
12264 dst_reg->u32_max_value = U32_MAX;
12265 } else {
12266 dst_reg->u32_min_value += umin_val;
12267 dst_reg->u32_max_value += umax_val;
12268 }
12269}
12270
07cd2631
JF
12271static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
12272 struct bpf_reg_state *src_reg)
12273{
12274 s64 smin_val = src_reg->smin_value;
12275 s64 smax_val = src_reg->smax_value;
12276 u64 umin_val = src_reg->umin_value;
12277 u64 umax_val = src_reg->umax_value;
12278
12279 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
12280 signed_add_overflows(dst_reg->smax_value, smax_val)) {
12281 dst_reg->smin_value = S64_MIN;
12282 dst_reg->smax_value = S64_MAX;
12283 } else {
12284 dst_reg->smin_value += smin_val;
12285 dst_reg->smax_value += smax_val;
12286 }
12287 if (dst_reg->umin_value + umin_val < umin_val ||
12288 dst_reg->umax_value + umax_val < umax_val) {
12289 dst_reg->umin_value = 0;
12290 dst_reg->umax_value = U64_MAX;
12291 } else {
12292 dst_reg->umin_value += umin_val;
12293 dst_reg->umax_value += umax_val;
12294 }
3f50f132
JF
12295}
12296
12297static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
12298 struct bpf_reg_state *src_reg)
12299{
12300 s32 smin_val = src_reg->s32_min_value;
12301 s32 smax_val = src_reg->s32_max_value;
12302 u32 umin_val = src_reg->u32_min_value;
12303 u32 umax_val = src_reg->u32_max_value;
12304
12305 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
12306 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
12307 /* Overflow possible, we know nothing */
12308 dst_reg->s32_min_value = S32_MIN;
12309 dst_reg->s32_max_value = S32_MAX;
12310 } else {
12311 dst_reg->s32_min_value -= smax_val;
12312 dst_reg->s32_max_value -= smin_val;
12313 }
12314 if (dst_reg->u32_min_value < umax_val) {
12315 /* Overflow possible, we know nothing */
12316 dst_reg->u32_min_value = 0;
12317 dst_reg->u32_max_value = U32_MAX;
12318 } else {
12319 /* Cannot overflow (as long as bounds are consistent) */
12320 dst_reg->u32_min_value -= umax_val;
12321 dst_reg->u32_max_value -= umin_val;
12322 }
07cd2631
JF
12323}
12324
12325static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
12326 struct bpf_reg_state *src_reg)
12327{
12328 s64 smin_val = src_reg->smin_value;
12329 s64 smax_val = src_reg->smax_value;
12330 u64 umin_val = src_reg->umin_value;
12331 u64 umax_val = src_reg->umax_value;
12332
12333 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
12334 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
12335 /* Overflow possible, we know nothing */
12336 dst_reg->smin_value = S64_MIN;
12337 dst_reg->smax_value = S64_MAX;
12338 } else {
12339 dst_reg->smin_value -= smax_val;
12340 dst_reg->smax_value -= smin_val;
12341 }
12342 if (dst_reg->umin_value < umax_val) {
12343 /* Overflow possible, we know nothing */
12344 dst_reg->umin_value = 0;
12345 dst_reg->umax_value = U64_MAX;
12346 } else {
12347 /* Cannot overflow (as long as bounds are consistent) */
12348 dst_reg->umin_value -= umax_val;
12349 dst_reg->umax_value -= umin_val;
12350 }
3f50f132
JF
12351}
12352
12353static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
12354 struct bpf_reg_state *src_reg)
12355{
12356 s32 smin_val = src_reg->s32_min_value;
12357 u32 umin_val = src_reg->u32_min_value;
12358 u32 umax_val = src_reg->u32_max_value;
12359
12360 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
12361 /* Ain't nobody got time to multiply that sign */
12362 __mark_reg32_unbounded(dst_reg);
12363 return;
12364 }
12365 /* Both values are positive, so we can work with unsigned and
12366 * copy the result to signed (unless it exceeds S32_MAX).
12367 */
12368 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
12369 /* Potential overflow, we know nothing */
12370 __mark_reg32_unbounded(dst_reg);
12371 return;
12372 }
12373 dst_reg->u32_min_value *= umin_val;
12374 dst_reg->u32_max_value *= umax_val;
12375 if (dst_reg->u32_max_value > S32_MAX) {
12376 /* Overflow possible, we know nothing */
12377 dst_reg->s32_min_value = S32_MIN;
12378 dst_reg->s32_max_value = S32_MAX;
12379 } else {
12380 dst_reg->s32_min_value = dst_reg->u32_min_value;
12381 dst_reg->s32_max_value = dst_reg->u32_max_value;
12382 }
07cd2631
JF
12383}
12384
12385static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
12386 struct bpf_reg_state *src_reg)
12387{
12388 s64 smin_val = src_reg->smin_value;
12389 u64 umin_val = src_reg->umin_value;
12390 u64 umax_val = src_reg->umax_value;
12391
07cd2631
JF
12392 if (smin_val < 0 || dst_reg->smin_value < 0) {
12393 /* Ain't nobody got time to multiply that sign */
3f50f132 12394 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
12395 return;
12396 }
12397 /* Both values are positive, so we can work with unsigned and
12398 * copy the result to signed (unless it exceeds S64_MAX).
12399 */
12400 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
12401 /* Potential overflow, we know nothing */
3f50f132 12402 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
12403 return;
12404 }
12405 dst_reg->umin_value *= umin_val;
12406 dst_reg->umax_value *= umax_val;
12407 if (dst_reg->umax_value > S64_MAX) {
12408 /* Overflow possible, we know nothing */
12409 dst_reg->smin_value = S64_MIN;
12410 dst_reg->smax_value = S64_MAX;
12411 } else {
12412 dst_reg->smin_value = dst_reg->umin_value;
12413 dst_reg->smax_value = dst_reg->umax_value;
12414 }
12415}
12416
3f50f132
JF
12417static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
12418 struct bpf_reg_state *src_reg)
12419{
12420 bool src_known = tnum_subreg_is_const(src_reg->var_off);
12421 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
12422 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
12423 s32 smin_val = src_reg->s32_min_value;
12424 u32 umax_val = src_reg->u32_max_value;
12425
049c4e13
DB
12426 if (src_known && dst_known) {
12427 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 12428 return;
049c4e13 12429 }
3f50f132
JF
12430
12431 /* We get our minimum from the var_off, since that's inherently
12432 * bitwise. Our maximum is the minimum of the operands' maxima.
12433 */
12434 dst_reg->u32_min_value = var32_off.value;
12435 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
12436 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
12437 /* Lose signed bounds when ANDing negative numbers,
12438 * ain't nobody got time for that.
12439 */
12440 dst_reg->s32_min_value = S32_MIN;
12441 dst_reg->s32_max_value = S32_MAX;
12442 } else {
12443 /* ANDing two positives gives a positive, so safe to
12444 * cast result into s64.
12445 */
12446 dst_reg->s32_min_value = dst_reg->u32_min_value;
12447 dst_reg->s32_max_value = dst_reg->u32_max_value;
12448 }
3f50f132
JF
12449}
12450
07cd2631
JF
12451static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
12452 struct bpf_reg_state *src_reg)
12453{
3f50f132
JF
12454 bool src_known = tnum_is_const(src_reg->var_off);
12455 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
12456 s64 smin_val = src_reg->smin_value;
12457 u64 umax_val = src_reg->umax_value;
12458
3f50f132 12459 if (src_known && dst_known) {
4fbb38a3 12460 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
12461 return;
12462 }
12463
07cd2631
JF
12464 /* We get our minimum from the var_off, since that's inherently
12465 * bitwise. Our maximum is the minimum of the operands' maxima.
12466 */
07cd2631
JF
12467 dst_reg->umin_value = dst_reg->var_off.value;
12468 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
12469 if (dst_reg->smin_value < 0 || smin_val < 0) {
12470 /* Lose signed bounds when ANDing negative numbers,
12471 * ain't nobody got time for that.
12472 */
12473 dst_reg->smin_value = S64_MIN;
12474 dst_reg->smax_value = S64_MAX;
12475 } else {
12476 /* ANDing two positives gives a positive, so safe to
12477 * cast result into s64.
12478 */
12479 dst_reg->smin_value = dst_reg->umin_value;
12480 dst_reg->smax_value = dst_reg->umax_value;
12481 }
12482 /* We may learn something more from the var_off */
12483 __update_reg_bounds(dst_reg);
12484}
12485
3f50f132
JF
12486static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
12487 struct bpf_reg_state *src_reg)
12488{
12489 bool src_known = tnum_subreg_is_const(src_reg->var_off);
12490 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
12491 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5b9fbeb7
DB
12492 s32 smin_val = src_reg->s32_min_value;
12493 u32 umin_val = src_reg->u32_min_value;
3f50f132 12494
049c4e13
DB
12495 if (src_known && dst_known) {
12496 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 12497 return;
049c4e13 12498 }
3f50f132
JF
12499
12500 /* We get our maximum from the var_off, and our minimum is the
12501 * maximum of the operands' minima
12502 */
12503 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
12504 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
12505 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
12506 /* Lose signed bounds when ORing negative numbers,
12507 * ain't nobody got time for that.
12508 */
12509 dst_reg->s32_min_value = S32_MIN;
12510 dst_reg->s32_max_value = S32_MAX;
12511 } else {
12512 /* ORing two positives gives a positive, so safe to
12513 * cast result into s64.
12514 */
5b9fbeb7
DB
12515 dst_reg->s32_min_value = dst_reg->u32_min_value;
12516 dst_reg->s32_max_value = dst_reg->u32_max_value;
3f50f132
JF
12517 }
12518}
12519
07cd2631
JF
12520static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
12521 struct bpf_reg_state *src_reg)
12522{
3f50f132
JF
12523 bool src_known = tnum_is_const(src_reg->var_off);
12524 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
12525 s64 smin_val = src_reg->smin_value;
12526 u64 umin_val = src_reg->umin_value;
12527
3f50f132 12528 if (src_known && dst_known) {
4fbb38a3 12529 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
12530 return;
12531 }
12532
07cd2631
JF
12533 /* We get our maximum from the var_off, and our minimum is the
12534 * maximum of the operands' minima
12535 */
07cd2631
JF
12536 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
12537 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
12538 if (dst_reg->smin_value < 0 || smin_val < 0) {
12539 /* Lose signed bounds when ORing negative numbers,
12540 * ain't nobody got time for that.
12541 */
12542 dst_reg->smin_value = S64_MIN;
12543 dst_reg->smax_value = S64_MAX;
12544 } else {
12545 /* ORing two positives gives a positive, so safe to
12546 * cast result into s64.
12547 */
12548 dst_reg->smin_value = dst_reg->umin_value;
12549 dst_reg->smax_value = dst_reg->umax_value;
12550 }
12551 /* We may learn something more from the var_off */
12552 __update_reg_bounds(dst_reg);
12553}
12554
2921c90d
YS
12555static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
12556 struct bpf_reg_state *src_reg)
12557{
12558 bool src_known = tnum_subreg_is_const(src_reg->var_off);
12559 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
12560 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
12561 s32 smin_val = src_reg->s32_min_value;
12562
049c4e13
DB
12563 if (src_known && dst_known) {
12564 __mark_reg32_known(dst_reg, var32_off.value);
2921c90d 12565 return;
049c4e13 12566 }
2921c90d
YS
12567
12568 /* We get both minimum and maximum from the var32_off. */
12569 dst_reg->u32_min_value = var32_off.value;
12570 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
12571
12572 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
12573 /* XORing two positive sign numbers gives a positive,
12574 * so safe to cast u32 result into s32.
12575 */
12576 dst_reg->s32_min_value = dst_reg->u32_min_value;
12577 dst_reg->s32_max_value = dst_reg->u32_max_value;
12578 } else {
12579 dst_reg->s32_min_value = S32_MIN;
12580 dst_reg->s32_max_value = S32_MAX;
12581 }
12582}
12583
12584static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
12585 struct bpf_reg_state *src_reg)
12586{
12587 bool src_known = tnum_is_const(src_reg->var_off);
12588 bool dst_known = tnum_is_const(dst_reg->var_off);
12589 s64 smin_val = src_reg->smin_value;
12590
12591 if (src_known && dst_known) {
12592 /* dst_reg->var_off.value has been updated earlier */
12593 __mark_reg_known(dst_reg, dst_reg->var_off.value);
12594 return;
12595 }
12596
12597 /* We get both minimum and maximum from the var_off. */
12598 dst_reg->umin_value = dst_reg->var_off.value;
12599 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
12600
12601 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
12602 /* XORing two positive sign numbers gives a positive,
12603 * so safe to cast u64 result into s64.
12604 */
12605 dst_reg->smin_value = dst_reg->umin_value;
12606 dst_reg->smax_value = dst_reg->umax_value;
12607 } else {
12608 dst_reg->smin_value = S64_MIN;
12609 dst_reg->smax_value = S64_MAX;
12610 }
12611
12612 __update_reg_bounds(dst_reg);
12613}
12614
3f50f132
JF
12615static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
12616 u64 umin_val, u64 umax_val)
07cd2631 12617{
07cd2631
JF
12618 /* We lose all sign bit information (except what we can pick
12619 * up from var_off)
12620 */
3f50f132
JF
12621 dst_reg->s32_min_value = S32_MIN;
12622 dst_reg->s32_max_value = S32_MAX;
12623 /* If we might shift our top bit out, then we know nothing */
12624 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
12625 dst_reg->u32_min_value = 0;
12626 dst_reg->u32_max_value = U32_MAX;
12627 } else {
12628 dst_reg->u32_min_value <<= umin_val;
12629 dst_reg->u32_max_value <<= umax_val;
12630 }
12631}
12632
12633static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
12634 struct bpf_reg_state *src_reg)
12635{
12636 u32 umax_val = src_reg->u32_max_value;
12637 u32 umin_val = src_reg->u32_min_value;
12638 /* u32 alu operation will zext upper bits */
12639 struct tnum subreg = tnum_subreg(dst_reg->var_off);
12640
12641 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
12642 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
12643 /* Not required but being careful mark reg64 bounds as unknown so
12644 * that we are forced to pick them up from tnum and zext later and
12645 * if some path skips this step we are still safe.
12646 */
12647 __mark_reg64_unbounded(dst_reg);
12648 __update_reg32_bounds(dst_reg);
12649}
12650
12651static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
12652 u64 umin_val, u64 umax_val)
12653{
12654 /* Special case <<32 because it is a common compiler pattern to sign
12655 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
12656 * positive we know this shift will also be positive so we can track
12657 * bounds correctly. Otherwise we lose all sign bit information except
12658 * what we can pick up from var_off. Perhaps we can generalize this
12659 * later to shifts of any length.
12660 */
12661 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
12662 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
12663 else
12664 dst_reg->smax_value = S64_MAX;
12665
12666 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
12667 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
12668 else
12669 dst_reg->smin_value = S64_MIN;
12670
07cd2631
JF
12671 /* If we might shift our top bit out, then we know nothing */
12672 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
12673 dst_reg->umin_value = 0;
12674 dst_reg->umax_value = U64_MAX;
12675 } else {
12676 dst_reg->umin_value <<= umin_val;
12677 dst_reg->umax_value <<= umax_val;
12678 }
3f50f132
JF
12679}
12680
12681static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
12682 struct bpf_reg_state *src_reg)
12683{
12684 u64 umax_val = src_reg->umax_value;
12685 u64 umin_val = src_reg->umin_value;
12686
12687 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
12688 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
12689 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
12690
07cd2631
JF
12691 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
12692 /* We may learn something more from the var_off */
12693 __update_reg_bounds(dst_reg);
12694}
12695
3f50f132
JF
12696static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
12697 struct bpf_reg_state *src_reg)
12698{
12699 struct tnum subreg = tnum_subreg(dst_reg->var_off);
12700 u32 umax_val = src_reg->u32_max_value;
12701 u32 umin_val = src_reg->u32_min_value;
12702
12703 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
12704 * be negative, then either:
12705 * 1) src_reg might be zero, so the sign bit of the result is
12706 * unknown, so we lose our signed bounds
12707 * 2) it's known negative, thus the unsigned bounds capture the
12708 * signed bounds
12709 * 3) the signed bounds cross zero, so they tell us nothing
12710 * about the result
12711 * If the value in dst_reg is known nonnegative, then again the
18b24d78 12712 * unsigned bounds capture the signed bounds.
3f50f132
JF
12713 * Thus, in all cases it suffices to blow away our signed bounds
12714 * and rely on inferring new ones from the unsigned bounds and
12715 * var_off of the result.
12716 */
12717 dst_reg->s32_min_value = S32_MIN;
12718 dst_reg->s32_max_value = S32_MAX;
12719
12720 dst_reg->var_off = tnum_rshift(subreg, umin_val);
12721 dst_reg->u32_min_value >>= umax_val;
12722 dst_reg->u32_max_value >>= umin_val;
12723
12724 __mark_reg64_unbounded(dst_reg);
12725 __update_reg32_bounds(dst_reg);
12726}
12727
07cd2631
JF
12728static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
12729 struct bpf_reg_state *src_reg)
12730{
12731 u64 umax_val = src_reg->umax_value;
12732 u64 umin_val = src_reg->umin_value;
12733
12734 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
12735 * be negative, then either:
12736 * 1) src_reg might be zero, so the sign bit of the result is
12737 * unknown, so we lose our signed bounds
12738 * 2) it's known negative, thus the unsigned bounds capture the
12739 * signed bounds
12740 * 3) the signed bounds cross zero, so they tell us nothing
12741 * about the result
12742 * If the value in dst_reg is known nonnegative, then again the
18b24d78 12743 * unsigned bounds capture the signed bounds.
07cd2631
JF
12744 * Thus, in all cases it suffices to blow away our signed bounds
12745 * and rely on inferring new ones from the unsigned bounds and
12746 * var_off of the result.
12747 */
12748 dst_reg->smin_value = S64_MIN;
12749 dst_reg->smax_value = S64_MAX;
12750 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
12751 dst_reg->umin_value >>= umax_val;
12752 dst_reg->umax_value >>= umin_val;
3f50f132
JF
12753
12754 /* Its not easy to operate on alu32 bounds here because it depends
12755 * on bits being shifted in. Take easy way out and mark unbounded
12756 * so we can recalculate later from tnum.
12757 */
12758 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
12759 __update_reg_bounds(dst_reg);
12760}
12761
3f50f132
JF
12762static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
12763 struct bpf_reg_state *src_reg)
07cd2631 12764{
3f50f132 12765 u64 umin_val = src_reg->u32_min_value;
07cd2631
JF
12766
12767 /* Upon reaching here, src_known is true and
12768 * umax_val is equal to umin_val.
12769 */
3f50f132
JF
12770 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
12771 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
07cd2631 12772
3f50f132
JF
12773 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
12774
12775 /* blow away the dst_reg umin_value/umax_value and rely on
12776 * dst_reg var_off to refine the result.
12777 */
12778 dst_reg->u32_min_value = 0;
12779 dst_reg->u32_max_value = U32_MAX;
12780
12781 __mark_reg64_unbounded(dst_reg);
12782 __update_reg32_bounds(dst_reg);
12783}
12784
12785static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
12786 struct bpf_reg_state *src_reg)
12787{
12788 u64 umin_val = src_reg->umin_value;
12789
12790 /* Upon reaching here, src_known is true and umax_val is equal
12791 * to umin_val.
12792 */
12793 dst_reg->smin_value >>= umin_val;
12794 dst_reg->smax_value >>= umin_val;
12795
12796 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
07cd2631
JF
12797
12798 /* blow away the dst_reg umin_value/umax_value and rely on
12799 * dst_reg var_off to refine the result.
12800 */
12801 dst_reg->umin_value = 0;
12802 dst_reg->umax_value = U64_MAX;
3f50f132
JF
12803
12804 /* Its not easy to operate on alu32 bounds here because it depends
12805 * on bits being shifted in from upper 32-bits. Take easy way out
12806 * and mark unbounded so we can recalculate later from tnum.
12807 */
12808 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
12809 __update_reg_bounds(dst_reg);
12810}
12811
468f6eaf
JH
12812/* WARNING: This function does calculations on 64-bit values, but the actual
12813 * execution may occur on 32-bit values. Therefore, things like bitshifts
12814 * need extra checks in the 32-bit case.
12815 */
f1174f77
EC
12816static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
12817 struct bpf_insn *insn,
12818 struct bpf_reg_state *dst_reg,
12819 struct bpf_reg_state src_reg)
969bf05e 12820{
638f5b90 12821 struct bpf_reg_state *regs = cur_regs(env);
48461135 12822 u8 opcode = BPF_OP(insn->code);
b0b3fb67 12823 bool src_known;
b03c9f9f
EC
12824 s64 smin_val, smax_val;
12825 u64 umin_val, umax_val;
3f50f132
JF
12826 s32 s32_min_val, s32_max_val;
12827 u32 u32_min_val, u32_max_val;
468f6eaf 12828 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3f50f132 12829 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
a6aaece0 12830 int ret;
b799207e 12831
b03c9f9f
EC
12832 smin_val = src_reg.smin_value;
12833 smax_val = src_reg.smax_value;
12834 umin_val = src_reg.umin_value;
12835 umax_val = src_reg.umax_value;
f23cc643 12836
3f50f132
JF
12837 s32_min_val = src_reg.s32_min_value;
12838 s32_max_val = src_reg.s32_max_value;
12839 u32_min_val = src_reg.u32_min_value;
12840 u32_max_val = src_reg.u32_max_value;
12841
12842 if (alu32) {
12843 src_known = tnum_subreg_is_const(src_reg.var_off);
3f50f132
JF
12844 if ((src_known &&
12845 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
12846 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
12847 /* Taint dst register if offset had invalid bounds
12848 * derived from e.g. dead branches.
12849 */
12850 __mark_reg_unknown(env, dst_reg);
12851 return 0;
12852 }
12853 } else {
12854 src_known = tnum_is_const(src_reg.var_off);
3f50f132
JF
12855 if ((src_known &&
12856 (smin_val != smax_val || umin_val != umax_val)) ||
12857 smin_val > smax_val || umin_val > umax_val) {
12858 /* Taint dst register if offset had invalid bounds
12859 * derived from e.g. dead branches.
12860 */
12861 __mark_reg_unknown(env, dst_reg);
12862 return 0;
12863 }
6f16101e
DB
12864 }
12865
bb7f0f98
AS
12866 if (!src_known &&
12867 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 12868 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
12869 return 0;
12870 }
12871
f5288193
DB
12872 if (sanitize_needed(opcode)) {
12873 ret = sanitize_val_alu(env, insn);
12874 if (ret < 0)
12875 return sanitize_err(env, insn, ret, NULL, NULL);
12876 }
12877
3f50f132
JF
12878 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
12879 * There are two classes of instructions: The first class we track both
12880 * alu32 and alu64 sign/unsigned bounds independently this provides the
12881 * greatest amount of precision when alu operations are mixed with jmp32
12882 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
12883 * and BPF_OR. This is possible because these ops have fairly easy to
12884 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
12885 * See alu32 verifier tests for examples. The second class of
12886 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
12887 * with regards to tracking sign/unsigned bounds because the bits may
12888 * cross subreg boundaries in the alu64 case. When this happens we mark
12889 * the reg unbounded in the subreg bound space and use the resulting
12890 * tnum to calculate an approximation of the sign/unsigned bounds.
12891 */
48461135
JB
12892 switch (opcode) {
12893 case BPF_ADD:
3f50f132 12894 scalar32_min_max_add(dst_reg, &src_reg);
07cd2631 12895 scalar_min_max_add(dst_reg, &src_reg);
3f50f132 12896 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
12897 break;
12898 case BPF_SUB:
3f50f132 12899 scalar32_min_max_sub(dst_reg, &src_reg);
07cd2631 12900 scalar_min_max_sub(dst_reg, &src_reg);
3f50f132 12901 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
12902 break;
12903 case BPF_MUL:
3f50f132
JF
12904 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
12905 scalar32_min_max_mul(dst_reg, &src_reg);
07cd2631 12906 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
12907 break;
12908 case BPF_AND:
3f50f132
JF
12909 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
12910 scalar32_min_max_and(dst_reg, &src_reg);
07cd2631 12911 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
12912 break;
12913 case BPF_OR:
3f50f132
JF
12914 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
12915 scalar32_min_max_or(dst_reg, &src_reg);
07cd2631 12916 scalar_min_max_or(dst_reg, &src_reg);
48461135 12917 break;
2921c90d
YS
12918 case BPF_XOR:
12919 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
12920 scalar32_min_max_xor(dst_reg, &src_reg);
12921 scalar_min_max_xor(dst_reg, &src_reg);
12922 break;
48461135 12923 case BPF_LSH:
468f6eaf
JH
12924 if (umax_val >= insn_bitness) {
12925 /* Shifts greater than 31 or 63 are undefined.
12926 * This includes shifts by a negative number.
b03c9f9f 12927 */
61bd5218 12928 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
12929 break;
12930 }
3f50f132
JF
12931 if (alu32)
12932 scalar32_min_max_lsh(dst_reg, &src_reg);
12933 else
12934 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
12935 break;
12936 case BPF_RSH:
468f6eaf
JH
12937 if (umax_val >= insn_bitness) {
12938 /* Shifts greater than 31 or 63 are undefined.
12939 * This includes shifts by a negative number.
b03c9f9f 12940 */
61bd5218 12941 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
12942 break;
12943 }
3f50f132
JF
12944 if (alu32)
12945 scalar32_min_max_rsh(dst_reg, &src_reg);
12946 else
12947 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 12948 break;
9cbe1f5a
YS
12949 case BPF_ARSH:
12950 if (umax_val >= insn_bitness) {
12951 /* Shifts greater than 31 or 63 are undefined.
12952 * This includes shifts by a negative number.
12953 */
12954 mark_reg_unknown(env, regs, insn->dst_reg);
12955 break;
12956 }
3f50f132
JF
12957 if (alu32)
12958 scalar32_min_max_arsh(dst_reg, &src_reg);
12959 else
12960 scalar_min_max_arsh(dst_reg, &src_reg);
9cbe1f5a 12961 break;
48461135 12962 default:
61bd5218 12963 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
12964 break;
12965 }
12966
3f50f132
JF
12967 /* ALU32 ops are zero extended into 64bit register */
12968 if (alu32)
12969 zext_32_to_64(dst_reg);
3844d153 12970 reg_bounds_sync(dst_reg);
f1174f77
EC
12971 return 0;
12972}
12973
12974/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
12975 * and var_off.
12976 */
12977static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
12978 struct bpf_insn *insn)
12979{
f4d7e40a
AS
12980 struct bpf_verifier_state *vstate = env->cur_state;
12981 struct bpf_func_state *state = vstate->frame[vstate->curframe];
12982 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
12983 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
12984 u8 opcode = BPF_OP(insn->code);
b5dc0163 12985 int err;
f1174f77
EC
12986
12987 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
12988 src_reg = NULL;
12989 if (dst_reg->type != SCALAR_VALUE)
12990 ptr_reg = dst_reg;
75748837
AS
12991 else
12992 /* Make sure ID is cleared otherwise dst_reg min/max could be
12993 * incorrectly propagated into other registers by find_equal_scalars()
12994 */
12995 dst_reg->id = 0;
f1174f77
EC
12996 if (BPF_SRC(insn->code) == BPF_X) {
12997 src_reg = &regs[insn->src_reg];
f1174f77
EC
12998 if (src_reg->type != SCALAR_VALUE) {
12999 if (dst_reg->type != SCALAR_VALUE) {
13000 /* Combining two pointers by any ALU op yields
82abbf8d
AS
13001 * an arbitrary scalar. Disallow all math except
13002 * pointer subtraction
f1174f77 13003 */
dd066823 13004 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
13005 mark_reg_unknown(env, regs, insn->dst_reg);
13006 return 0;
f1174f77 13007 }
82abbf8d
AS
13008 verbose(env, "R%d pointer %s pointer prohibited\n",
13009 insn->dst_reg,
13010 bpf_alu_string[opcode >> 4]);
13011 return -EACCES;
f1174f77
EC
13012 } else {
13013 /* scalar += pointer
13014 * This is legal, but we have to reverse our
13015 * src/dest handling in computing the range
13016 */
b5dc0163
AS
13017 err = mark_chain_precision(env, insn->dst_reg);
13018 if (err)
13019 return err;
82abbf8d
AS
13020 return adjust_ptr_min_max_vals(env, insn,
13021 src_reg, dst_reg);
f1174f77
EC
13022 }
13023 } else if (ptr_reg) {
13024 /* pointer += scalar */
b5dc0163
AS
13025 err = mark_chain_precision(env, insn->src_reg);
13026 if (err)
13027 return err;
82abbf8d
AS
13028 return adjust_ptr_min_max_vals(env, insn,
13029 dst_reg, src_reg);
a3b666bf
AN
13030 } else if (dst_reg->precise) {
13031 /* if dst_reg is precise, src_reg should be precise as well */
13032 err = mark_chain_precision(env, insn->src_reg);
13033 if (err)
13034 return err;
f1174f77
EC
13035 }
13036 } else {
13037 /* Pretend the src is a reg with a known value, since we only
13038 * need to be able to read from this state.
13039 */
13040 off_reg.type = SCALAR_VALUE;
b03c9f9f 13041 __mark_reg_known(&off_reg, insn->imm);
f1174f77 13042 src_reg = &off_reg;
82abbf8d
AS
13043 if (ptr_reg) /* pointer += K */
13044 return adjust_ptr_min_max_vals(env, insn,
13045 ptr_reg, src_reg);
f1174f77
EC
13046 }
13047
13048 /* Got here implies adding two SCALAR_VALUEs */
13049 if (WARN_ON_ONCE(ptr_reg)) {
0f55f9ed 13050 print_verifier_state(env, state, true);
61bd5218 13051 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
13052 return -EINVAL;
13053 }
13054 if (WARN_ON(!src_reg)) {
0f55f9ed 13055 print_verifier_state(env, state, true);
61bd5218 13056 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
13057 return -EINVAL;
13058 }
13059 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
13060}
13061
17a52670 13062/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 13063static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 13064{
638f5b90 13065 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
13066 u8 opcode = BPF_OP(insn->code);
13067 int err;
13068
13069 if (opcode == BPF_END || opcode == BPF_NEG) {
13070 if (opcode == BPF_NEG) {
395e942d 13071 if (BPF_SRC(insn->code) != BPF_K ||
17a52670
AS
13072 insn->src_reg != BPF_REG_0 ||
13073 insn->off != 0 || insn->imm != 0) {
61bd5218 13074 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
13075 return -EINVAL;
13076 }
13077 } else {
13078 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68 13079 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
0845c3db
YS
13080 (BPF_CLASS(insn->code) == BPF_ALU64 &&
13081 BPF_SRC(insn->code) != BPF_TO_LE)) {
61bd5218 13082 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
13083 return -EINVAL;
13084 }
13085 }
13086
13087 /* check src operand */
dc503a8a 13088 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
13089 if (err)
13090 return err;
13091
1be7f75d 13092 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 13093 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
13094 insn->dst_reg);
13095 return -EACCES;
13096 }
13097
17a52670 13098 /* check dest operand */
dc503a8a 13099 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
13100 if (err)
13101 return err;
13102
13103 } else if (opcode == BPF_MOV) {
13104
13105 if (BPF_SRC(insn->code) == BPF_X) {
8100928c 13106 if (insn->imm != 0) {
61bd5218 13107 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
13108 return -EINVAL;
13109 }
13110
8100928c
YS
13111 if (BPF_CLASS(insn->code) == BPF_ALU) {
13112 if (insn->off != 0 && insn->off != 8 && insn->off != 16) {
13113 verbose(env, "BPF_MOV uses reserved fields\n");
13114 return -EINVAL;
13115 }
13116 } else {
13117 if (insn->off != 0 && insn->off != 8 && insn->off != 16 &&
13118 insn->off != 32) {
13119 verbose(env, "BPF_MOV uses reserved fields\n");
13120 return -EINVAL;
13121 }
13122 }
13123
17a52670 13124 /* check src operand */
dc503a8a 13125 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
13126 if (err)
13127 return err;
13128 } else {
13129 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 13130 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
13131 return -EINVAL;
13132 }
13133 }
13134
fbeb1603
AF
13135 /* check dest operand, mark as required later */
13136 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
13137 if (err)
13138 return err;
13139
13140 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
13141 struct bpf_reg_state *src_reg = regs + insn->src_reg;
13142 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
1ffc85d9
EZ
13143 bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id &&
13144 !tnum_is_const(src_reg->var_off);
e434b8cd 13145
17a52670 13146 if (BPF_CLASS(insn->code) == BPF_ALU64) {
8100928c
YS
13147 if (insn->off == 0) {
13148 /* case: R1 = R2
13149 * copy register state to dest reg
75748837 13150 */
8100928c
YS
13151 if (need_id)
13152 /* Assign src and dst registers the same ID
13153 * that will be used by find_equal_scalars()
13154 * to propagate min/max range.
13155 */
13156 src_reg->id = ++env->id_gen;
13157 copy_register_state(dst_reg, src_reg);
13158 dst_reg->live |= REG_LIVE_WRITTEN;
13159 dst_reg->subreg_def = DEF_NOT_SUBREG;
13160 } else {
13161 /* case: R1 = (s8, s16 s32)R2 */
db2baf82
YS
13162 if (is_pointer_value(env, insn->src_reg)) {
13163 verbose(env,
13164 "R%d sign-extension part of pointer\n",
13165 insn->src_reg);
13166 return -EACCES;
13167 } else if (src_reg->type == SCALAR_VALUE) {
13168 bool no_sext;
13169
13170 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
13171 if (no_sext && need_id)
13172 src_reg->id = ++env->id_gen;
13173 copy_register_state(dst_reg, src_reg);
13174 if (!no_sext)
13175 dst_reg->id = 0;
13176 coerce_reg_to_size_sx(dst_reg, insn->off >> 3);
13177 dst_reg->live |= REG_LIVE_WRITTEN;
13178 dst_reg->subreg_def = DEF_NOT_SUBREG;
13179 } else {
13180 mark_reg_unknown(env, regs, insn->dst_reg);
13181 }
8100928c 13182 }
17a52670 13183 } else {
f1174f77 13184 /* R1 = (u32) R2 */
1be7f75d 13185 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
13186 verbose(env,
13187 "R%d partial copy of pointer\n",
1be7f75d
AS
13188 insn->src_reg);
13189 return -EACCES;
e434b8cd 13190 } else if (src_reg->type == SCALAR_VALUE) {
8100928c
YS
13191 if (insn->off == 0) {
13192 bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
13193
13194 if (is_src_reg_u32 && need_id)
13195 src_reg->id = ++env->id_gen;
13196 copy_register_state(dst_reg, src_reg);
13197 /* Make sure ID is cleared if src_reg is not in u32
13198 * range otherwise dst_reg min/max could be incorrectly
13199 * propagated into src_reg by find_equal_scalars()
13200 */
13201 if (!is_src_reg_u32)
13202 dst_reg->id = 0;
13203 dst_reg->live |= REG_LIVE_WRITTEN;
13204 dst_reg->subreg_def = env->insn_idx + 1;
13205 } else {
13206 /* case: W1 = (s8, s16)W2 */
13207 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
13208
13209 if (no_sext && need_id)
13210 src_reg->id = ++env->id_gen;
13211 copy_register_state(dst_reg, src_reg);
13212 if (!no_sext)
13213 dst_reg->id = 0;
13214 dst_reg->live |= REG_LIVE_WRITTEN;
13215 dst_reg->subreg_def = env->insn_idx + 1;
13216 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3);
13217 }
e434b8cd
JW
13218 } else {
13219 mark_reg_unknown(env, regs,
13220 insn->dst_reg);
1be7f75d 13221 }
3f50f132 13222 zext_32_to_64(dst_reg);
3844d153 13223 reg_bounds_sync(dst_reg);
17a52670
AS
13224 }
13225 } else {
13226 /* case: R = imm
13227 * remember the value we stored into this reg
13228 */
fbeb1603
AF
13229 /* clear any state __mark_reg_known doesn't set */
13230 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 13231 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
13232 if (BPF_CLASS(insn->code) == BPF_ALU64) {
13233 __mark_reg_known(regs + insn->dst_reg,
13234 insn->imm);
13235 } else {
13236 __mark_reg_known(regs + insn->dst_reg,
13237 (u32)insn->imm);
13238 }
17a52670
AS
13239 }
13240
13241 } else if (opcode > BPF_END) {
61bd5218 13242 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
13243 return -EINVAL;
13244
13245 } else { /* all other ALU ops: and, sub, xor, add, ... */
13246
17a52670 13247 if (BPF_SRC(insn->code) == BPF_X) {
ec0e2da9
YS
13248 if (insn->imm != 0 || insn->off > 1 ||
13249 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
61bd5218 13250 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
13251 return -EINVAL;
13252 }
13253 /* check src1 operand */
dc503a8a 13254 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
13255 if (err)
13256 return err;
13257 } else {
ec0e2da9
YS
13258 if (insn->src_reg != BPF_REG_0 || insn->off > 1 ||
13259 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) {
61bd5218 13260 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
13261 return -EINVAL;
13262 }
13263 }
13264
13265 /* check src2 operand */
dc503a8a 13266 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
13267 if (err)
13268 return err;
13269
13270 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
13271 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 13272 verbose(env, "div by zero\n");
17a52670
AS
13273 return -EINVAL;
13274 }
13275
229394e8
RV
13276 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
13277 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
13278 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
13279
13280 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 13281 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
13282 return -EINVAL;
13283 }
13284 }
13285
1a0dc1ac 13286 /* check dest operand */
dc503a8a 13287 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
13288 if (err)
13289 return err;
13290
f1174f77 13291 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
13292 }
13293
13294 return 0;
13295}
13296
f4d7e40a 13297static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 13298 struct bpf_reg_state *dst_reg,
f8ddadc4 13299 enum bpf_reg_type type,
fb2a311a 13300 bool range_right_open)
969bf05e 13301{
b239da34
KKD
13302 struct bpf_func_state *state;
13303 struct bpf_reg_state *reg;
13304 int new_range;
2d2be8ca 13305
fb2a311a
DB
13306 if (dst_reg->off < 0 ||
13307 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
13308 /* This doesn't give us any range */
13309 return;
13310
b03c9f9f
EC
13311 if (dst_reg->umax_value > MAX_PACKET_OFF ||
13312 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
13313 /* Risk of overflow. For instance, ptr + (1<<63) may be less
13314 * than pkt_end, but that's because it's also less than pkt.
13315 */
13316 return;
13317
fb2a311a
DB
13318 new_range = dst_reg->off;
13319 if (range_right_open)
2fa7d94a 13320 new_range++;
fb2a311a
DB
13321
13322 /* Examples for register markings:
2d2be8ca 13323 *
fb2a311a 13324 * pkt_data in dst register:
2d2be8ca
DB
13325 *
13326 * r2 = r3;
13327 * r2 += 8;
13328 * if (r2 > pkt_end) goto <handle exception>
13329 * <access okay>
13330 *
b4e432f1
DB
13331 * r2 = r3;
13332 * r2 += 8;
13333 * if (r2 < pkt_end) goto <access okay>
13334 * <handle exception>
13335 *
2d2be8ca
DB
13336 * Where:
13337 * r2 == dst_reg, pkt_end == src_reg
13338 * r2=pkt(id=n,off=8,r=0)
13339 * r3=pkt(id=n,off=0,r=0)
13340 *
fb2a311a 13341 * pkt_data in src register:
2d2be8ca
DB
13342 *
13343 * r2 = r3;
13344 * r2 += 8;
13345 * if (pkt_end >= r2) goto <access okay>
13346 * <handle exception>
13347 *
b4e432f1
DB
13348 * r2 = r3;
13349 * r2 += 8;
13350 * if (pkt_end <= r2) goto <handle exception>
13351 * <access okay>
13352 *
2d2be8ca
DB
13353 * Where:
13354 * pkt_end == dst_reg, r2 == src_reg
13355 * r2=pkt(id=n,off=8,r=0)
13356 * r3=pkt(id=n,off=0,r=0)
13357 *
13358 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
13359 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
13360 * and [r3, r3 + 8-1) respectively is safe to access depending on
13361 * the check.
969bf05e 13362 */
2d2be8ca 13363
f1174f77
EC
13364 /* If our ids match, then we must have the same max_value. And we
13365 * don't care about the other reg's fixed offset, since if it's too big
13366 * the range won't allow anything.
13367 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
13368 */
b239da34
KKD
13369 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13370 if (reg->type == type && reg->id == dst_reg->id)
13371 /* keep the maximum range already checked */
13372 reg->range = max(reg->range, new_range);
13373 }));
969bf05e
AS
13374}
13375
3f50f132 13376static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
4f7b3e82 13377{
3f50f132
JF
13378 struct tnum subreg = tnum_subreg(reg->var_off);
13379 s32 sval = (s32)val;
a72dafaf 13380
3f50f132
JF
13381 switch (opcode) {
13382 case BPF_JEQ:
13383 if (tnum_is_const(subreg))
13384 return !!tnum_equals_const(subreg, val);
13fbcee5
YS
13385 else if (val < reg->u32_min_value || val > reg->u32_max_value)
13386 return 0;
3f50f132
JF
13387 break;
13388 case BPF_JNE:
13389 if (tnum_is_const(subreg))
13390 return !tnum_equals_const(subreg, val);
13fbcee5
YS
13391 else if (val < reg->u32_min_value || val > reg->u32_max_value)
13392 return 1;
3f50f132
JF
13393 break;
13394 case BPF_JSET:
13395 if ((~subreg.mask & subreg.value) & val)
13396 return 1;
13397 if (!((subreg.mask | subreg.value) & val))
13398 return 0;
13399 break;
13400 case BPF_JGT:
13401 if (reg->u32_min_value > val)
13402 return 1;
13403 else if (reg->u32_max_value <= val)
13404 return 0;
13405 break;
13406 case BPF_JSGT:
13407 if (reg->s32_min_value > sval)
13408 return 1;
ee114dd6 13409 else if (reg->s32_max_value <= sval)
3f50f132
JF
13410 return 0;
13411 break;
13412 case BPF_JLT:
13413 if (reg->u32_max_value < val)
13414 return 1;
13415 else if (reg->u32_min_value >= val)
13416 return 0;
13417 break;
13418 case BPF_JSLT:
13419 if (reg->s32_max_value < sval)
13420 return 1;
13421 else if (reg->s32_min_value >= sval)
13422 return 0;
13423 break;
13424 case BPF_JGE:
13425 if (reg->u32_min_value >= val)
13426 return 1;
13427 else if (reg->u32_max_value < val)
13428 return 0;
13429 break;
13430 case BPF_JSGE:
13431 if (reg->s32_min_value >= sval)
13432 return 1;
13433 else if (reg->s32_max_value < sval)
13434 return 0;
13435 break;
13436 case BPF_JLE:
13437 if (reg->u32_max_value <= val)
13438 return 1;
13439 else if (reg->u32_min_value > val)
13440 return 0;
13441 break;
13442 case BPF_JSLE:
13443 if (reg->s32_max_value <= sval)
13444 return 1;
13445 else if (reg->s32_min_value > sval)
13446 return 0;
13447 break;
13448 }
4f7b3e82 13449
3f50f132
JF
13450 return -1;
13451}
092ed096 13452
3f50f132
JF
13453
13454static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
13455{
13456 s64 sval = (s64)val;
a72dafaf 13457
4f7b3e82
AS
13458 switch (opcode) {
13459 case BPF_JEQ:
13460 if (tnum_is_const(reg->var_off))
13461 return !!tnum_equals_const(reg->var_off, val);
13fbcee5
YS
13462 else if (val < reg->umin_value || val > reg->umax_value)
13463 return 0;
4f7b3e82
AS
13464 break;
13465 case BPF_JNE:
13466 if (tnum_is_const(reg->var_off))
13467 return !tnum_equals_const(reg->var_off, val);
13fbcee5
YS
13468 else if (val < reg->umin_value || val > reg->umax_value)
13469 return 1;
4f7b3e82 13470 break;
960ea056
JK
13471 case BPF_JSET:
13472 if ((~reg->var_off.mask & reg->var_off.value) & val)
13473 return 1;
13474 if (!((reg->var_off.mask | reg->var_off.value) & val))
13475 return 0;
13476 break;
4f7b3e82
AS
13477 case BPF_JGT:
13478 if (reg->umin_value > val)
13479 return 1;
13480 else if (reg->umax_value <= val)
13481 return 0;
13482 break;
13483 case BPF_JSGT:
a72dafaf 13484 if (reg->smin_value > sval)
4f7b3e82 13485 return 1;
ee114dd6 13486 else if (reg->smax_value <= sval)
4f7b3e82
AS
13487 return 0;
13488 break;
13489 case BPF_JLT:
13490 if (reg->umax_value < val)
13491 return 1;
13492 else if (reg->umin_value >= val)
13493 return 0;
13494 break;
13495 case BPF_JSLT:
a72dafaf 13496 if (reg->smax_value < sval)
4f7b3e82 13497 return 1;
a72dafaf 13498 else if (reg->smin_value >= sval)
4f7b3e82
AS
13499 return 0;
13500 break;
13501 case BPF_JGE:
13502 if (reg->umin_value >= val)
13503 return 1;
13504 else if (reg->umax_value < val)
13505 return 0;
13506 break;
13507 case BPF_JSGE:
a72dafaf 13508 if (reg->smin_value >= sval)
4f7b3e82 13509 return 1;
a72dafaf 13510 else if (reg->smax_value < sval)
4f7b3e82
AS
13511 return 0;
13512 break;
13513 case BPF_JLE:
13514 if (reg->umax_value <= val)
13515 return 1;
13516 else if (reg->umin_value > val)
13517 return 0;
13518 break;
13519 case BPF_JSLE:
a72dafaf 13520 if (reg->smax_value <= sval)
4f7b3e82 13521 return 1;
a72dafaf 13522 else if (reg->smin_value > sval)
4f7b3e82
AS
13523 return 0;
13524 break;
13525 }
13526
13527 return -1;
13528}
13529
3f50f132
JF
13530/* compute branch direction of the expression "if (reg opcode val) goto target;"
13531 * and return:
13532 * 1 - branch will be taken and "goto target" will be executed
13533 * 0 - branch will not be taken and fall-through to next insn
13534 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
13535 * range [0,10]
604dca5e 13536 */
3f50f132
JF
13537static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
13538 bool is_jmp32)
604dca5e 13539{
cac616db 13540 if (__is_pointer_value(false, reg)) {
51302c95 13541 if (!reg_not_null(reg))
cac616db
JF
13542 return -1;
13543
13544 /* If pointer is valid tests against zero will fail so we can
13545 * use this to direct branch taken.
13546 */
13547 if (val != 0)
13548 return -1;
13549
13550 switch (opcode) {
13551 case BPF_JEQ:
13552 return 0;
13553 case BPF_JNE:
13554 return 1;
13555 default:
13556 return -1;
13557 }
13558 }
604dca5e 13559
3f50f132
JF
13560 if (is_jmp32)
13561 return is_branch32_taken(reg, val, opcode);
13562 return is_branch64_taken(reg, val, opcode);
604dca5e
JH
13563}
13564
6d94e741
AS
13565static int flip_opcode(u32 opcode)
13566{
13567 /* How can we transform "a <op> b" into "b <op> a"? */
13568 static const u8 opcode_flip[16] = {
13569 /* these stay the same */
13570 [BPF_JEQ >> 4] = BPF_JEQ,
13571 [BPF_JNE >> 4] = BPF_JNE,
13572 [BPF_JSET >> 4] = BPF_JSET,
13573 /* these swap "lesser" and "greater" (L and G in the opcodes) */
13574 [BPF_JGE >> 4] = BPF_JLE,
13575 [BPF_JGT >> 4] = BPF_JLT,
13576 [BPF_JLE >> 4] = BPF_JGE,
13577 [BPF_JLT >> 4] = BPF_JGT,
13578 [BPF_JSGE >> 4] = BPF_JSLE,
13579 [BPF_JSGT >> 4] = BPF_JSLT,
13580 [BPF_JSLE >> 4] = BPF_JSGE,
13581 [BPF_JSLT >> 4] = BPF_JSGT
13582 };
13583 return opcode_flip[opcode >> 4];
13584}
13585
13586static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
13587 struct bpf_reg_state *src_reg,
13588 u8 opcode)
13589{
13590 struct bpf_reg_state *pkt;
13591
13592 if (src_reg->type == PTR_TO_PACKET_END) {
13593 pkt = dst_reg;
13594 } else if (dst_reg->type == PTR_TO_PACKET_END) {
13595 pkt = src_reg;
13596 opcode = flip_opcode(opcode);
13597 } else {
13598 return -1;
13599 }
13600
13601 if (pkt->range >= 0)
13602 return -1;
13603
13604 switch (opcode) {
13605 case BPF_JLE:
13606 /* pkt <= pkt_end */
13607 fallthrough;
13608 case BPF_JGT:
13609 /* pkt > pkt_end */
13610 if (pkt->range == BEYOND_PKT_END)
13611 /* pkt has at last one extra byte beyond pkt_end */
13612 return opcode == BPF_JGT;
13613 break;
13614 case BPF_JLT:
13615 /* pkt < pkt_end */
13616 fallthrough;
13617 case BPF_JGE:
13618 /* pkt >= pkt_end */
13619 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
13620 return opcode == BPF_JGE;
13621 break;
13622 }
13623 return -1;
13624}
13625
48461135
JB
13626/* Adjusts the register min/max values in the case that the dst_reg is the
13627 * variable register that we are working on, and src_reg is a constant or we're
13628 * simply doing a BPF_K check.
f1174f77 13629 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
13630 */
13631static void reg_set_min_max(struct bpf_reg_state *true_reg,
3f50f132
JF
13632 struct bpf_reg_state *false_reg,
13633 u64 val, u32 val32,
092ed096 13634 u8 opcode, bool is_jmp32)
48461135 13635{
3f50f132
JF
13636 struct tnum false_32off = tnum_subreg(false_reg->var_off);
13637 struct tnum false_64off = false_reg->var_off;
13638 struct tnum true_32off = tnum_subreg(true_reg->var_off);
13639 struct tnum true_64off = true_reg->var_off;
13640 s64 sval = (s64)val;
13641 s32 sval32 = (s32)val32;
a72dafaf 13642
f1174f77
EC
13643 /* If the dst_reg is a pointer, we can't learn anything about its
13644 * variable offset from the compare (unless src_reg were a pointer into
13645 * the same object, but we don't bother with that.
13646 * Since false_reg and true_reg have the same type by construction, we
13647 * only need to check one of them for pointerness.
13648 */
13649 if (__is_pointer_value(false, false_reg))
13650 return;
4cabc5b1 13651
48461135 13652 switch (opcode) {
a12ca627
DB
13653 /* JEQ/JNE comparison doesn't change the register equivalence.
13654 *
13655 * r1 = r2;
13656 * if (r1 == 42) goto label;
13657 * ...
13658 * label: // here both r1 and r2 are known to be 42.
13659 *
13660 * Hence when marking register as known preserve it's ID.
13661 */
48461135 13662 case BPF_JEQ:
a12ca627
DB
13663 if (is_jmp32) {
13664 __mark_reg32_known(true_reg, val32);
13665 true_32off = tnum_subreg(true_reg->var_off);
13666 } else {
13667 ___mark_reg_known(true_reg, val);
13668 true_64off = true_reg->var_off;
13669 }
13670 break;
48461135 13671 case BPF_JNE:
a12ca627
DB
13672 if (is_jmp32) {
13673 __mark_reg32_known(false_reg, val32);
13674 false_32off = tnum_subreg(false_reg->var_off);
13675 } else {
13676 ___mark_reg_known(false_reg, val);
13677 false_64off = false_reg->var_off;
13678 }
48461135 13679 break;
960ea056 13680 case BPF_JSET:
3f50f132
JF
13681 if (is_jmp32) {
13682 false_32off = tnum_and(false_32off, tnum_const(~val32));
13683 if (is_power_of_2(val32))
13684 true_32off = tnum_or(true_32off,
13685 tnum_const(val32));
13686 } else {
13687 false_64off = tnum_and(false_64off, tnum_const(~val));
13688 if (is_power_of_2(val))
13689 true_64off = tnum_or(true_64off,
13690 tnum_const(val));
13691 }
960ea056 13692 break;
48461135 13693 case BPF_JGE:
a72dafaf
JW
13694 case BPF_JGT:
13695 {
3f50f132
JF
13696 if (is_jmp32) {
13697 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
13698 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
13699
13700 false_reg->u32_max_value = min(false_reg->u32_max_value,
13701 false_umax);
13702 true_reg->u32_min_value = max(true_reg->u32_min_value,
13703 true_umin);
13704 } else {
13705 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
13706 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
13707
13708 false_reg->umax_value = min(false_reg->umax_value, false_umax);
13709 true_reg->umin_value = max(true_reg->umin_value, true_umin);
13710 }
b03c9f9f 13711 break;
a72dafaf 13712 }
48461135 13713 case BPF_JSGE:
a72dafaf
JW
13714 case BPF_JSGT:
13715 {
3f50f132
JF
13716 if (is_jmp32) {
13717 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
13718 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
a72dafaf 13719
3f50f132
JF
13720 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
13721 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
13722 } else {
13723 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
13724 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
13725
13726 false_reg->smax_value = min(false_reg->smax_value, false_smax);
13727 true_reg->smin_value = max(true_reg->smin_value, true_smin);
13728 }
48461135 13729 break;
a72dafaf 13730 }
b4e432f1 13731 case BPF_JLE:
a72dafaf
JW
13732 case BPF_JLT:
13733 {
3f50f132
JF
13734 if (is_jmp32) {
13735 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
13736 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
13737
13738 false_reg->u32_min_value = max(false_reg->u32_min_value,
13739 false_umin);
13740 true_reg->u32_max_value = min(true_reg->u32_max_value,
13741 true_umax);
13742 } else {
13743 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
13744 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
13745
13746 false_reg->umin_value = max(false_reg->umin_value, false_umin);
13747 true_reg->umax_value = min(true_reg->umax_value, true_umax);
13748 }
b4e432f1 13749 break;
a72dafaf 13750 }
b4e432f1 13751 case BPF_JSLE:
a72dafaf
JW
13752 case BPF_JSLT:
13753 {
3f50f132
JF
13754 if (is_jmp32) {
13755 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
13756 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
a72dafaf 13757
3f50f132
JF
13758 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
13759 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
13760 } else {
13761 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
13762 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
13763
13764 false_reg->smin_value = max(false_reg->smin_value, false_smin);
13765 true_reg->smax_value = min(true_reg->smax_value, true_smax);
13766 }
b4e432f1 13767 break;
a72dafaf 13768 }
48461135 13769 default:
0fc31b10 13770 return;
48461135
JB
13771 }
13772
3f50f132
JF
13773 if (is_jmp32) {
13774 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
13775 tnum_subreg(false_32off));
13776 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
13777 tnum_subreg(true_32off));
13778 __reg_combine_32_into_64(false_reg);
13779 __reg_combine_32_into_64(true_reg);
13780 } else {
13781 false_reg->var_off = false_64off;
13782 true_reg->var_off = true_64off;
13783 __reg_combine_64_into_32(false_reg);
13784 __reg_combine_64_into_32(true_reg);
13785 }
48461135
JB
13786}
13787
f1174f77
EC
13788/* Same as above, but for the case that dst_reg holds a constant and src_reg is
13789 * the variable reg.
48461135
JB
13790 */
13791static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3f50f132
JF
13792 struct bpf_reg_state *false_reg,
13793 u64 val, u32 val32,
092ed096 13794 u8 opcode, bool is_jmp32)
48461135 13795{
6d94e741 13796 opcode = flip_opcode(opcode);
0fc31b10
JH
13797 /* This uses zero as "not present in table"; luckily the zero opcode,
13798 * BPF_JA, can't get here.
b03c9f9f 13799 */
0fc31b10 13800 if (opcode)
3f50f132 13801 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
f1174f77
EC
13802}
13803
13804/* Regs are known to be equal, so intersect their min/max/var_off */
13805static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
13806 struct bpf_reg_state *dst_reg)
13807{
b03c9f9f
EC
13808 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
13809 dst_reg->umin_value);
13810 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
13811 dst_reg->umax_value);
13812 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
13813 dst_reg->smin_value);
13814 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
13815 dst_reg->smax_value);
f1174f77
EC
13816 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
13817 dst_reg->var_off);
3844d153
DB
13818 reg_bounds_sync(src_reg);
13819 reg_bounds_sync(dst_reg);
f1174f77
EC
13820}
13821
13822static void reg_combine_min_max(struct bpf_reg_state *true_src,
13823 struct bpf_reg_state *true_dst,
13824 struct bpf_reg_state *false_src,
13825 struct bpf_reg_state *false_dst,
13826 u8 opcode)
13827{
13828 switch (opcode) {
13829 case BPF_JEQ:
13830 __reg_combine_min_max(true_src, true_dst);
13831 break;
13832 case BPF_JNE:
13833 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 13834 break;
4cabc5b1 13835 }
48461135
JB
13836}
13837
fd978bf7
JS
13838static void mark_ptr_or_null_reg(struct bpf_func_state *state,
13839 struct bpf_reg_state *reg, u32 id,
840b9615 13840 bool is_null)
57a09bf0 13841{
c25b2ae1 13842 if (type_may_be_null(reg->type) && reg->id == id &&
fca1aa75 13843 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
df57f38a
KKD
13844 /* Old offset (both fixed and variable parts) should have been
13845 * known-zero, because we don't allow pointer arithmetic on
13846 * pointers that might be NULL. If we see this happening, don't
13847 * convert the register.
13848 *
13849 * But in some cases, some helpers that return local kptrs
13850 * advance offset for the returned pointer. In those cases, it
13851 * is fine to expect to see reg->off.
13852 */
13853 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
13854 return;
6a3cd331
DM
13855 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
13856 WARN_ON_ONCE(reg->off))
e60b0d12 13857 return;
6a3cd331 13858
f1174f77
EC
13859 if (is_null) {
13860 reg->type = SCALAR_VALUE;
1b986589
MKL
13861 /* We don't need id and ref_obj_id from this point
13862 * onwards anymore, thus we should better reset it,
13863 * so that state pruning has chances to take effect.
13864 */
13865 reg->id = 0;
13866 reg->ref_obj_id = 0;
4ddb7416
DB
13867
13868 return;
13869 }
13870
13871 mark_ptr_not_null_reg(reg);
13872
13873 if (!reg_may_point_to_spin_lock(reg)) {
1b986589 13874 /* For not-NULL ptr, reg->ref_obj_id will be reset
b239da34 13875 * in release_reference().
1b986589
MKL
13876 *
13877 * reg->id is still used by spin_lock ptr. Other
13878 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
13879 */
13880 reg->id = 0;
56f668df 13881 }
57a09bf0
TG
13882 }
13883}
13884
13885/* The logic is similar to find_good_pkt_pointers(), both could eventually
13886 * be folded together at some point.
13887 */
840b9615
JS
13888static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
13889 bool is_null)
57a09bf0 13890{
f4d7e40a 13891 struct bpf_func_state *state = vstate->frame[vstate->curframe];
b239da34 13892 struct bpf_reg_state *regs = state->regs, *reg;
1b986589 13893 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 13894 u32 id = regs[regno].id;
57a09bf0 13895
1b986589
MKL
13896 if (ref_obj_id && ref_obj_id == id && is_null)
13897 /* regs[regno] is in the " == NULL" branch.
13898 * No one could have freed the reference state before
13899 * doing the NULL check.
13900 */
13901 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 13902
b239da34
KKD
13903 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13904 mark_ptr_or_null_reg(state, reg, id, is_null);
13905 }));
57a09bf0
TG
13906}
13907
5beca081
DB
13908static bool try_match_pkt_pointers(const struct bpf_insn *insn,
13909 struct bpf_reg_state *dst_reg,
13910 struct bpf_reg_state *src_reg,
13911 struct bpf_verifier_state *this_branch,
13912 struct bpf_verifier_state *other_branch)
13913{
13914 if (BPF_SRC(insn->code) != BPF_X)
13915 return false;
13916
092ed096
JW
13917 /* Pointers are always 64-bit. */
13918 if (BPF_CLASS(insn->code) == BPF_JMP32)
13919 return false;
13920
5beca081
DB
13921 switch (BPF_OP(insn->code)) {
13922 case BPF_JGT:
13923 if ((dst_reg->type == PTR_TO_PACKET &&
13924 src_reg->type == PTR_TO_PACKET_END) ||
13925 (dst_reg->type == PTR_TO_PACKET_META &&
13926 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13927 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
13928 find_good_pkt_pointers(this_branch, dst_reg,
13929 dst_reg->type, false);
6d94e741 13930 mark_pkt_end(other_branch, insn->dst_reg, true);
5beca081
DB
13931 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13932 src_reg->type == PTR_TO_PACKET) ||
13933 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13934 src_reg->type == PTR_TO_PACKET_META)) {
13935 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
13936 find_good_pkt_pointers(other_branch, src_reg,
13937 src_reg->type, true);
6d94e741 13938 mark_pkt_end(this_branch, insn->src_reg, false);
5beca081
DB
13939 } else {
13940 return false;
13941 }
13942 break;
13943 case BPF_JLT:
13944 if ((dst_reg->type == PTR_TO_PACKET &&
13945 src_reg->type == PTR_TO_PACKET_END) ||
13946 (dst_reg->type == PTR_TO_PACKET_META &&
13947 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13948 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
13949 find_good_pkt_pointers(other_branch, dst_reg,
13950 dst_reg->type, true);
6d94e741 13951 mark_pkt_end(this_branch, insn->dst_reg, false);
5beca081
DB
13952 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13953 src_reg->type == PTR_TO_PACKET) ||
13954 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13955 src_reg->type == PTR_TO_PACKET_META)) {
13956 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
13957 find_good_pkt_pointers(this_branch, src_reg,
13958 src_reg->type, false);
6d94e741 13959 mark_pkt_end(other_branch, insn->src_reg, true);
5beca081
DB
13960 } else {
13961 return false;
13962 }
13963 break;
13964 case BPF_JGE:
13965 if ((dst_reg->type == PTR_TO_PACKET &&
13966 src_reg->type == PTR_TO_PACKET_END) ||
13967 (dst_reg->type == PTR_TO_PACKET_META &&
13968 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13969 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
13970 find_good_pkt_pointers(this_branch, dst_reg,
13971 dst_reg->type, true);
6d94e741 13972 mark_pkt_end(other_branch, insn->dst_reg, false);
5beca081
DB
13973 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13974 src_reg->type == PTR_TO_PACKET) ||
13975 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13976 src_reg->type == PTR_TO_PACKET_META)) {
13977 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
13978 find_good_pkt_pointers(other_branch, src_reg,
13979 src_reg->type, false);
6d94e741 13980 mark_pkt_end(this_branch, insn->src_reg, true);
5beca081
DB
13981 } else {
13982 return false;
13983 }
13984 break;
13985 case BPF_JLE:
13986 if ((dst_reg->type == PTR_TO_PACKET &&
13987 src_reg->type == PTR_TO_PACKET_END) ||
13988 (dst_reg->type == PTR_TO_PACKET_META &&
13989 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13990 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
13991 find_good_pkt_pointers(other_branch, dst_reg,
13992 dst_reg->type, false);
6d94e741 13993 mark_pkt_end(this_branch, insn->dst_reg, true);
5beca081
DB
13994 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13995 src_reg->type == PTR_TO_PACKET) ||
13996 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13997 src_reg->type == PTR_TO_PACKET_META)) {
13998 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
13999 find_good_pkt_pointers(this_branch, src_reg,
14000 src_reg->type, true);
6d94e741 14001 mark_pkt_end(other_branch, insn->src_reg, false);
5beca081
DB
14002 } else {
14003 return false;
14004 }
14005 break;
14006 default:
14007 return false;
14008 }
14009
14010 return true;
14011}
14012
75748837
AS
14013static void find_equal_scalars(struct bpf_verifier_state *vstate,
14014 struct bpf_reg_state *known_reg)
14015{
14016 struct bpf_func_state *state;
14017 struct bpf_reg_state *reg;
75748837 14018
b239da34
KKD
14019 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
14020 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
71f656a5 14021 copy_register_state(reg, known_reg);
b239da34 14022 }));
75748837
AS
14023}
14024
58e2af8b 14025static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
14026 struct bpf_insn *insn, int *insn_idx)
14027{
f4d7e40a
AS
14028 struct bpf_verifier_state *this_branch = env->cur_state;
14029 struct bpf_verifier_state *other_branch;
14030 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 14031 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
befae758 14032 struct bpf_reg_state *eq_branch_regs;
17a52670 14033 u8 opcode = BPF_OP(insn->code);
092ed096 14034 bool is_jmp32;
fb8d251e 14035 int pred = -1;
17a52670
AS
14036 int err;
14037
092ed096
JW
14038 /* Only conditional jumps are expected to reach here. */
14039 if (opcode == BPF_JA || opcode > BPF_JSLE) {
14040 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
14041 return -EINVAL;
14042 }
14043
d75e30dd
YS
14044 /* check src2 operand */
14045 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
14046 if (err)
14047 return err;
14048
14049 dst_reg = &regs[insn->dst_reg];
17a52670
AS
14050 if (BPF_SRC(insn->code) == BPF_X) {
14051 if (insn->imm != 0) {
092ed096 14052 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
14053 return -EINVAL;
14054 }
14055
14056 /* check src1 operand */
dc503a8a 14057 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
14058 if (err)
14059 return err;
1be7f75d 14060
d75e30dd
YS
14061 src_reg = &regs[insn->src_reg];
14062 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) &&
14063 is_pointer_value(env, insn->src_reg)) {
61bd5218 14064 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
14065 insn->src_reg);
14066 return -EACCES;
14067 }
17a52670
AS
14068 } else {
14069 if (insn->src_reg != BPF_REG_0) {
092ed096 14070 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
14071 return -EINVAL;
14072 }
14073 }
14074
092ed096 14075 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 14076
3f50f132
JF
14077 if (BPF_SRC(insn->code) == BPF_K) {
14078 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
14079 } else if (src_reg->type == SCALAR_VALUE &&
14080 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
14081 pred = is_branch_taken(dst_reg,
14082 tnum_subreg(src_reg->var_off).value,
14083 opcode,
14084 is_jmp32);
14085 } else if (src_reg->type == SCALAR_VALUE &&
14086 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
14087 pred = is_branch_taken(dst_reg,
14088 src_reg->var_off.value,
14089 opcode,
14090 is_jmp32);
953d9f5b
YS
14091 } else if (dst_reg->type == SCALAR_VALUE &&
14092 is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) {
14093 pred = is_branch_taken(src_reg,
14094 tnum_subreg(dst_reg->var_off).value,
14095 flip_opcode(opcode),
14096 is_jmp32);
14097 } else if (dst_reg->type == SCALAR_VALUE &&
14098 !is_jmp32 && tnum_is_const(dst_reg->var_off)) {
14099 pred = is_branch_taken(src_reg,
14100 dst_reg->var_off.value,
14101 flip_opcode(opcode),
14102 is_jmp32);
6d94e741
AS
14103 } else if (reg_is_pkt_pointer_any(dst_reg) &&
14104 reg_is_pkt_pointer_any(src_reg) &&
14105 !is_jmp32) {
14106 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
3f50f132
JF
14107 }
14108
b5dc0163 14109 if (pred >= 0) {
cac616db
JF
14110 /* If we get here with a dst_reg pointer type it is because
14111 * above is_branch_taken() special cased the 0 comparison.
14112 */
14113 if (!__is_pointer_value(false, dst_reg))
14114 err = mark_chain_precision(env, insn->dst_reg);
6d94e741
AS
14115 if (BPF_SRC(insn->code) == BPF_X && !err &&
14116 !__is_pointer_value(false, src_reg))
b5dc0163
AS
14117 err = mark_chain_precision(env, insn->src_reg);
14118 if (err)
14119 return err;
14120 }
9183671a 14121
fb8d251e 14122 if (pred == 1) {
9183671a
DB
14123 /* Only follow the goto, ignore fall-through. If needed, push
14124 * the fall-through branch for simulation under speculative
14125 * execution.
14126 */
14127 if (!env->bypass_spec_v1 &&
14128 !sanitize_speculative_path(env, insn, *insn_idx + 1,
14129 *insn_idx))
14130 return -EFAULT;
fb8d251e
AS
14131 *insn_idx += insn->off;
14132 return 0;
14133 } else if (pred == 0) {
9183671a
DB
14134 /* Only follow the fall-through branch, since that's where the
14135 * program will go. If needed, push the goto branch for
14136 * simulation under speculative execution.
fb8d251e 14137 */
9183671a
DB
14138 if (!env->bypass_spec_v1 &&
14139 !sanitize_speculative_path(env, insn,
14140 *insn_idx + insn->off + 1,
14141 *insn_idx))
14142 return -EFAULT;
fb8d251e 14143 return 0;
17a52670
AS
14144 }
14145
979d63d5
DB
14146 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
14147 false);
17a52670
AS
14148 if (!other_branch)
14149 return -EFAULT;
f4d7e40a 14150 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 14151
48461135
JB
14152 /* detect if we are comparing against a constant value so we can adjust
14153 * our min/max values for our dst register.
f1174f77 14154 * this is only legit if both are scalars (or pointers to the same
befae758
EZ
14155 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
14156 * because otherwise the different base pointers mean the offsets aren't
f1174f77 14157 * comparable.
48461135
JB
14158 */
14159 if (BPF_SRC(insn->code) == BPF_X) {
092ed096 14160 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
092ed096 14161
f1174f77 14162 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
14163 src_reg->type == SCALAR_VALUE) {
14164 if (tnum_is_const(src_reg->var_off) ||
3f50f132
JF
14165 (is_jmp32 &&
14166 tnum_is_const(tnum_subreg(src_reg->var_off))))
f4d7e40a 14167 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 14168 dst_reg,
3f50f132
JF
14169 src_reg->var_off.value,
14170 tnum_subreg(src_reg->var_off).value,
092ed096
JW
14171 opcode, is_jmp32);
14172 else if (tnum_is_const(dst_reg->var_off) ||
3f50f132
JF
14173 (is_jmp32 &&
14174 tnum_is_const(tnum_subreg(dst_reg->var_off))))
f4d7e40a 14175 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096 14176 src_reg,
3f50f132
JF
14177 dst_reg->var_off.value,
14178 tnum_subreg(dst_reg->var_off).value,
092ed096
JW
14179 opcode, is_jmp32);
14180 else if (!is_jmp32 &&
14181 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 14182 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
14183 reg_combine_min_max(&other_branch_regs[insn->src_reg],
14184 &other_branch_regs[insn->dst_reg],
092ed096 14185 src_reg, dst_reg, opcode);
e688c3db
AS
14186 if (src_reg->id &&
14187 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
75748837
AS
14188 find_equal_scalars(this_branch, src_reg);
14189 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
14190 }
14191
f1174f77
EC
14192 }
14193 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 14194 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3f50f132
JF
14195 dst_reg, insn->imm, (u32)insn->imm,
14196 opcode, is_jmp32);
48461135
JB
14197 }
14198
e688c3db
AS
14199 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
14200 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
75748837
AS
14201 find_equal_scalars(this_branch, dst_reg);
14202 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
14203 }
14204
befae758
EZ
14205 /* if one pointer register is compared to another pointer
14206 * register check if PTR_MAYBE_NULL could be lifted.
14207 * E.g. register A - maybe null
14208 * register B - not null
14209 * for JNE A, B, ... - A is not null in the false branch;
14210 * for JEQ A, B, ... - A is not null in the true branch.
8374bfd5
HS
14211 *
14212 * Since PTR_TO_BTF_ID points to a kernel struct that does
14213 * not need to be null checked by the BPF program, i.e.,
14214 * could be null even without PTR_MAYBE_NULL marking, so
14215 * only propagate nullness when neither reg is that type.
befae758
EZ
14216 */
14217 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
14218 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
8374bfd5
HS
14219 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
14220 base_type(src_reg->type) != PTR_TO_BTF_ID &&
14221 base_type(dst_reg->type) != PTR_TO_BTF_ID) {
befae758
EZ
14222 eq_branch_regs = NULL;
14223 switch (opcode) {
14224 case BPF_JEQ:
14225 eq_branch_regs = other_branch_regs;
14226 break;
14227 case BPF_JNE:
14228 eq_branch_regs = regs;
14229 break;
14230 default:
14231 /* do nothing */
14232 break;
14233 }
14234 if (eq_branch_regs) {
14235 if (type_may_be_null(src_reg->type))
14236 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
14237 else
14238 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
14239 }
14240 }
14241
092ed096
JW
14242 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
14243 * NOTE: these optimizations below are related with pointer comparison
14244 * which will never be JMP32.
14245 */
14246 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 14247 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
c25b2ae1 14248 type_may_be_null(dst_reg->type)) {
840b9615 14249 /* Mark all identical registers in each branch as either
57a09bf0
TG
14250 * safe or unknown depending R == 0 or R != 0 conditional.
14251 */
840b9615
JS
14252 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
14253 opcode == BPF_JNE);
14254 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
14255 opcode == BPF_JEQ);
5beca081
DB
14256 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
14257 this_branch, other_branch) &&
14258 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
14259 verbose(env, "R%d pointer comparison prohibited\n",
14260 insn->dst_reg);
1be7f75d 14261 return -EACCES;
17a52670 14262 }
06ee7115 14263 if (env->log.level & BPF_LOG_LEVEL)
2e576648 14264 print_insn_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
14265 return 0;
14266}
14267
17a52670 14268/* verify BPF_LD_IMM64 instruction */
58e2af8b 14269static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 14270{
d8eca5bb 14271 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 14272 struct bpf_reg_state *regs = cur_regs(env);
4976b718 14273 struct bpf_reg_state *dst_reg;
d8eca5bb 14274 struct bpf_map *map;
17a52670
AS
14275 int err;
14276
14277 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 14278 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
14279 return -EINVAL;
14280 }
14281 if (insn->off != 0) {
61bd5218 14282 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
14283 return -EINVAL;
14284 }
14285
dc503a8a 14286 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
14287 if (err)
14288 return err;
14289
4976b718 14290 dst_reg = &regs[insn->dst_reg];
6b173873 14291 if (insn->src_reg == 0) {
6b173873
JK
14292 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
14293
4976b718 14294 dst_reg->type = SCALAR_VALUE;
b03c9f9f 14295 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 14296 return 0;
6b173873 14297 }
17a52670 14298
d400a6cf
DB
14299 /* All special src_reg cases are listed below. From this point onwards
14300 * we either succeed and assign a corresponding dst_reg->type after
14301 * zeroing the offset, or fail and reject the program.
14302 */
14303 mark_reg_known_zero(env, regs, insn->dst_reg);
4976b718 14304
d400a6cf 14305 if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
4976b718 14306 dst_reg->type = aux->btf_var.reg_type;
34d3a78c 14307 switch (base_type(dst_reg->type)) {
4976b718
HL
14308 case PTR_TO_MEM:
14309 dst_reg->mem_size = aux->btf_var.mem_size;
14310 break;
14311 case PTR_TO_BTF_ID:
22dc4a0f 14312 dst_reg->btf = aux->btf_var.btf;
4976b718
HL
14313 dst_reg->btf_id = aux->btf_var.btf_id;
14314 break;
14315 default:
14316 verbose(env, "bpf verifier is misconfigured\n");
14317 return -EFAULT;
14318 }
14319 return 0;
14320 }
14321
69c087ba
YS
14322 if (insn->src_reg == BPF_PSEUDO_FUNC) {
14323 struct bpf_prog_aux *aux = env->prog->aux;
3990ed4c
MKL
14324 u32 subprogno = find_subprog(env,
14325 env->insn_idx + insn->imm + 1);
69c087ba
YS
14326
14327 if (!aux->func_info) {
14328 verbose(env, "missing btf func_info\n");
14329 return -EINVAL;
14330 }
14331 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
14332 verbose(env, "callback function not static\n");
14333 return -EINVAL;
14334 }
14335
14336 dst_reg->type = PTR_TO_FUNC;
14337 dst_reg->subprogno = subprogno;
14338 return 0;
14339 }
14340
d8eca5bb 14341 map = env->used_maps[aux->map_index];
4976b718 14342 dst_reg->map_ptr = map;
d8eca5bb 14343
387544bf
AS
14344 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
14345 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
4976b718
HL
14346 dst_reg->type = PTR_TO_MAP_VALUE;
14347 dst_reg->off = aux->map_off;
d0d78c1d
KKD
14348 WARN_ON_ONCE(map->max_entries != 1);
14349 /* We want reg->id to be same (0) as map_value is not distinct */
387544bf
AS
14350 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
14351 insn->src_reg == BPF_PSEUDO_MAP_IDX) {
4976b718 14352 dst_reg->type = CONST_PTR_TO_MAP;
d8eca5bb
DB
14353 } else {
14354 verbose(env, "bpf verifier is misconfigured\n");
14355 return -EINVAL;
14356 }
17a52670 14357
17a52670
AS
14358 return 0;
14359}
14360
96be4325
DB
14361static bool may_access_skb(enum bpf_prog_type type)
14362{
14363 switch (type) {
14364 case BPF_PROG_TYPE_SOCKET_FILTER:
14365 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 14366 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
14367 return true;
14368 default:
14369 return false;
14370 }
14371}
14372
ddd872bc
AS
14373/* verify safety of LD_ABS|LD_IND instructions:
14374 * - they can only appear in the programs where ctx == skb
14375 * - since they are wrappers of function calls, they scratch R1-R5 registers,
14376 * preserve R6-R9, and store return value into R0
14377 *
14378 * Implicit input:
14379 * ctx == skb == R6 == CTX
14380 *
14381 * Explicit input:
14382 * SRC == any register
14383 * IMM == 32-bit immediate
14384 *
14385 * Output:
14386 * R0 - 8/16/32-bit skb data converted to cpu endianness
14387 */
58e2af8b 14388static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 14389{
638f5b90 14390 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 14391 static const int ctx_reg = BPF_REG_6;
ddd872bc 14392 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
14393 int i, err;
14394
7e40781c 14395 if (!may_access_skb(resolve_prog_type(env->prog))) {
61bd5218 14396 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
14397 return -EINVAL;
14398 }
14399
e0cea7ce
DB
14400 if (!env->ops->gen_ld_abs) {
14401 verbose(env, "bpf verifier is misconfigured\n");
14402 return -EINVAL;
14403 }
14404
ddd872bc 14405 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 14406 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 14407 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 14408 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
14409 return -EINVAL;
14410 }
14411
14412 /* check whether implicit source operand (register R6) is readable */
6d4f151a 14413 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
14414 if (err)
14415 return err;
14416
fd978bf7
JS
14417 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
14418 * gen_ld_abs() may terminate the program at runtime, leading to
14419 * reference leak.
14420 */
14421 err = check_reference_leak(env);
14422 if (err) {
14423 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
14424 return err;
14425 }
14426
d0d78c1d 14427 if (env->cur_state->active_lock.ptr) {
d83525ca
AS
14428 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
14429 return -EINVAL;
14430 }
14431
9bb00b28
YS
14432 if (env->cur_state->active_rcu_lock) {
14433 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
14434 return -EINVAL;
14435 }
14436
6d4f151a 14437 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
14438 verbose(env,
14439 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
14440 return -EINVAL;
14441 }
14442
14443 if (mode == BPF_IND) {
14444 /* check explicit source operand */
dc503a8a 14445 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
14446 if (err)
14447 return err;
14448 }
14449
be80a1d3 14450 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
6d4f151a
DB
14451 if (err < 0)
14452 return err;
14453
ddd872bc 14454 /* reset caller saved regs to unreadable */
dc503a8a 14455 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 14456 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
14457 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
14458 }
ddd872bc
AS
14459
14460 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
14461 * the value fetched from the packet.
14462 * Already marked as written above.
ddd872bc 14463 */
61bd5218 14464 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
14465 /* ld_abs load up to 32-bit skb data. */
14466 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
14467 return 0;
14468}
14469
390ee7e2
AS
14470static int check_return_code(struct bpf_verifier_env *env)
14471{
5cf1e914 14472 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 14473 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
14474 struct bpf_reg_state *reg;
14475 struct tnum range = tnum_range(0, 1);
7e40781c 14476 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
27ae7997 14477 int err;
bfc6bb74
AS
14478 struct bpf_func_state *frame = env->cur_state->frame[0];
14479 const bool is_subprog = frame->subprogno;
27ae7997 14480
9e4e01df 14481 /* LSM and struct_ops func-ptr's return type could be "void" */
d1a6edec
SF
14482 if (!is_subprog) {
14483 switch (prog_type) {
14484 case BPF_PROG_TYPE_LSM:
14485 if (prog->expected_attach_type == BPF_LSM_CGROUP)
14486 /* See below, can be 0 or 0-1 depending on hook. */
14487 break;
14488 fallthrough;
14489 case BPF_PROG_TYPE_STRUCT_OPS:
14490 if (!prog->aux->attach_func_proto->type)
14491 return 0;
14492 break;
14493 default:
14494 break;
14495 }
14496 }
27ae7997 14497
8fb33b60 14498 /* eBPF calling convention is such that R0 is used
27ae7997
MKL
14499 * to return the value from eBPF program.
14500 * Make sure that it's readable at this time
14501 * of bpf_exit, which means that program wrote
14502 * something into it earlier
14503 */
14504 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
14505 if (err)
14506 return err;
14507
14508 if (is_pointer_value(env, BPF_REG_0)) {
14509 verbose(env, "R0 leaks addr as return value\n");
14510 return -EACCES;
14511 }
390ee7e2 14512
f782e2c3 14513 reg = cur_regs(env) + BPF_REG_0;
bfc6bb74
AS
14514
14515 if (frame->in_async_callback_fn) {
14516 /* enforce return zero from async callbacks like timer */
14517 if (reg->type != SCALAR_VALUE) {
14518 verbose(env, "In async callback the register R0 is not a known value (%s)\n",
c25b2ae1 14519 reg_type_str(env, reg->type));
bfc6bb74
AS
14520 return -EINVAL;
14521 }
14522
14523 if (!tnum_in(tnum_const(0), reg->var_off)) {
14524 verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
14525 return -EINVAL;
14526 }
14527 return 0;
14528 }
14529
f782e2c3
DB
14530 if (is_subprog) {
14531 if (reg->type != SCALAR_VALUE) {
14532 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
c25b2ae1 14533 reg_type_str(env, reg->type));
f782e2c3
DB
14534 return -EINVAL;
14535 }
14536 return 0;
14537 }
14538
7e40781c 14539 switch (prog_type) {
983695fa
DB
14540 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
14541 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
1b66d253
DB
14542 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
14543 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
14544 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
14545 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
14546 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
983695fa 14547 range = tnum_range(1, 1);
77241217
SF
14548 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
14549 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
14550 range = tnum_range(0, 3);
ed4ed404 14551 break;
390ee7e2 14552 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 14553 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
14554 range = tnum_range(0, 3);
14555 enforce_attach_type_range = tnum_range(2, 3);
14556 }
ed4ed404 14557 break;
390ee7e2
AS
14558 case BPF_PROG_TYPE_CGROUP_SOCK:
14559 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 14560 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 14561 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 14562 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 14563 break;
15ab09bd
AS
14564 case BPF_PROG_TYPE_RAW_TRACEPOINT:
14565 if (!env->prog->aux->attach_btf_id)
14566 return 0;
14567 range = tnum_const(0);
14568 break;
15d83c4d 14569 case BPF_PROG_TYPE_TRACING:
e92888c7
YS
14570 switch (env->prog->expected_attach_type) {
14571 case BPF_TRACE_FENTRY:
14572 case BPF_TRACE_FEXIT:
14573 range = tnum_const(0);
14574 break;
14575 case BPF_TRACE_RAW_TP:
14576 case BPF_MODIFY_RETURN:
15d83c4d 14577 return 0;
2ec0616e
DB
14578 case BPF_TRACE_ITER:
14579 break;
e92888c7
YS
14580 default:
14581 return -ENOTSUPP;
14582 }
15d83c4d 14583 break;
e9ddbb77
JS
14584 case BPF_PROG_TYPE_SK_LOOKUP:
14585 range = tnum_range(SK_DROP, SK_PASS);
14586 break;
69fd337a
SF
14587
14588 case BPF_PROG_TYPE_LSM:
14589 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
14590 /* Regular BPF_PROG_TYPE_LSM programs can return
14591 * any value.
14592 */
14593 return 0;
14594 }
14595 if (!env->prog->aux->attach_func_proto->type) {
14596 /* Make sure programs that attach to void
14597 * hooks don't try to modify return value.
14598 */
14599 range = tnum_range(1, 1);
14600 }
14601 break;
14602
fd9c663b
FW
14603 case BPF_PROG_TYPE_NETFILTER:
14604 range = tnum_range(NF_DROP, NF_ACCEPT);
14605 break;
e92888c7
YS
14606 case BPF_PROG_TYPE_EXT:
14607 /* freplace program can return anything as its return value
14608 * depends on the to-be-replaced kernel func or bpf program.
14609 */
390ee7e2
AS
14610 default:
14611 return 0;
14612 }
14613
390ee7e2 14614 if (reg->type != SCALAR_VALUE) {
61bd5218 14615 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
c25b2ae1 14616 reg_type_str(env, reg->type));
390ee7e2
AS
14617 return -EINVAL;
14618 }
14619
14620 if (!tnum_in(range, reg->var_off)) {
bc2591d6 14621 verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
69fd337a 14622 if (prog->expected_attach_type == BPF_LSM_CGROUP &&
d1a6edec 14623 prog_type == BPF_PROG_TYPE_LSM &&
69fd337a
SF
14624 !prog->aux->attach_func_proto->type)
14625 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
390ee7e2
AS
14626 return -EINVAL;
14627 }
5cf1e914 14628
14629 if (!tnum_is_unknown(enforce_attach_type_range) &&
14630 tnum_in(enforce_attach_type_range, reg->var_off))
14631 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
14632 return 0;
14633}
14634
475fb78f
AS
14635/* non-recursive DFS pseudo code
14636 * 1 procedure DFS-iterative(G,v):
14637 * 2 label v as discovered
14638 * 3 let S be a stack
14639 * 4 S.push(v)
14640 * 5 while S is not empty
b6d20799 14641 * 6 t <- S.peek()
475fb78f
AS
14642 * 7 if t is what we're looking for:
14643 * 8 return t
14644 * 9 for all edges e in G.adjacentEdges(t) do
14645 * 10 if edge e is already labelled
14646 * 11 continue with the next edge
14647 * 12 w <- G.adjacentVertex(t,e)
14648 * 13 if vertex w is not discovered and not explored
14649 * 14 label e as tree-edge
14650 * 15 label w as discovered
14651 * 16 S.push(w)
14652 * 17 continue at 5
14653 * 18 else if vertex w is discovered
14654 * 19 label e as back-edge
14655 * 20 else
14656 * 21 // vertex w is explored
14657 * 22 label e as forward- or cross-edge
14658 * 23 label t as explored
14659 * 24 S.pop()
14660 *
14661 * convention:
14662 * 0x10 - discovered
14663 * 0x11 - discovered and fall-through edge labelled
14664 * 0x12 - discovered and fall-through and branch edges labelled
14665 * 0x20 - explored
14666 */
14667
14668enum {
14669 DISCOVERED = 0x10,
14670 EXPLORED = 0x20,
14671 FALLTHROUGH = 1,
14672 BRANCH = 2,
14673};
14674
dc2a4ebc
AS
14675static u32 state_htab_size(struct bpf_verifier_env *env)
14676{
14677 return env->prog->len;
14678}
14679
5d839021
AS
14680static struct bpf_verifier_state_list **explored_state(
14681 struct bpf_verifier_env *env,
14682 int idx)
14683{
dc2a4ebc
AS
14684 struct bpf_verifier_state *cur = env->cur_state;
14685 struct bpf_func_state *state = cur->frame[cur->curframe];
14686
14687 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
14688}
14689
bffdeaa8 14690static void mark_prune_point(struct bpf_verifier_env *env, int idx)
5d839021 14691{
a8f500af 14692 env->insn_aux_data[idx].prune_point = true;
5d839021 14693}
f1bca824 14694
bffdeaa8
AN
14695static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
14696{
14697 return env->insn_aux_data[insn_idx].prune_point;
14698}
14699
4b5ce570
AN
14700static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
14701{
14702 env->insn_aux_data[idx].force_checkpoint = true;
14703}
14704
14705static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
14706{
14707 return env->insn_aux_data[insn_idx].force_checkpoint;
14708}
14709
14710
59e2e27d
WAF
14711enum {
14712 DONE_EXPLORING = 0,
14713 KEEP_EXPLORING = 1,
14714};
14715
475fb78f
AS
14716/* t, w, e - match pseudo-code above:
14717 * t - index of current instruction
14718 * w - next instruction
14719 * e - edge
14720 */
2589726d
AS
14721static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
14722 bool loop_ok)
475fb78f 14723{
7df737e9
AS
14724 int *insn_stack = env->cfg.insn_stack;
14725 int *insn_state = env->cfg.insn_state;
14726
475fb78f 14727 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
59e2e27d 14728 return DONE_EXPLORING;
475fb78f
AS
14729
14730 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
59e2e27d 14731 return DONE_EXPLORING;
475fb78f
AS
14732
14733 if (w < 0 || w >= env->prog->len) {
d9762e84 14734 verbose_linfo(env, t, "%d: ", t);
61bd5218 14735 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
14736 return -EINVAL;
14737 }
14738
bffdeaa8 14739 if (e == BRANCH) {
f1bca824 14740 /* mark branch target for state pruning */
bffdeaa8
AN
14741 mark_prune_point(env, w);
14742 mark_jmp_point(env, w);
14743 }
f1bca824 14744
475fb78f
AS
14745 if (insn_state[w] == 0) {
14746 /* tree-edge */
14747 insn_state[t] = DISCOVERED | e;
14748 insn_state[w] = DISCOVERED;
7df737e9 14749 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 14750 return -E2BIG;
7df737e9 14751 insn_stack[env->cfg.cur_stack++] = w;
59e2e27d 14752 return KEEP_EXPLORING;
475fb78f 14753 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2c78ee89 14754 if (loop_ok && env->bpf_capable)
59e2e27d 14755 return DONE_EXPLORING;
d9762e84
MKL
14756 verbose_linfo(env, t, "%d: ", t);
14757 verbose_linfo(env, w, "%d: ", w);
61bd5218 14758 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
14759 return -EINVAL;
14760 } else if (insn_state[w] == EXPLORED) {
14761 /* forward- or cross-edge */
14762 insn_state[t] = DISCOVERED | e;
14763 } else {
61bd5218 14764 verbose(env, "insn state internal bug\n");
475fb78f
AS
14765 return -EFAULT;
14766 }
59e2e27d
WAF
14767 return DONE_EXPLORING;
14768}
14769
dcb2288b 14770static int visit_func_call_insn(int t, struct bpf_insn *insns,
efdb22de
YS
14771 struct bpf_verifier_env *env,
14772 bool visit_callee)
14773{
14774 int ret;
14775
14776 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
14777 if (ret)
14778 return ret;
14779
618945fb
AN
14780 mark_prune_point(env, t + 1);
14781 /* when we exit from subprog, we need to record non-linear history */
14782 mark_jmp_point(env, t + 1);
14783
efdb22de 14784 if (visit_callee) {
bffdeaa8 14785 mark_prune_point(env, t);
86fc6ee6
AS
14786 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
14787 /* It's ok to allow recursion from CFG point of
14788 * view. __check_func_call() will do the actual
14789 * check.
14790 */
14791 bpf_pseudo_func(insns + t));
efdb22de
YS
14792 }
14793 return ret;
14794}
14795
59e2e27d
WAF
14796/* Visits the instruction at index t and returns one of the following:
14797 * < 0 - an error occurred
14798 * DONE_EXPLORING - the instruction was fully explored
14799 * KEEP_EXPLORING - there is still work to be done before it is fully explored
14800 */
dcb2288b 14801static int visit_insn(int t, struct bpf_verifier_env *env)
59e2e27d 14802{
653ae3a8 14803 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
4cd58e9a 14804 int ret, off;
59e2e27d 14805
653ae3a8 14806 if (bpf_pseudo_func(insn))
dcb2288b 14807 return visit_func_call_insn(t, insns, env, true);
69c087ba 14808
59e2e27d 14809 /* All non-branch instructions have a single fall-through edge. */
653ae3a8
AN
14810 if (BPF_CLASS(insn->code) != BPF_JMP &&
14811 BPF_CLASS(insn->code) != BPF_JMP32)
59e2e27d
WAF
14812 return push_insn(t, t + 1, FALLTHROUGH, env, false);
14813
653ae3a8 14814 switch (BPF_OP(insn->code)) {
59e2e27d
WAF
14815 case BPF_EXIT:
14816 return DONE_EXPLORING;
14817
14818 case BPF_CALL:
c1ee85a9 14819 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback)
618945fb
AN
14820 /* Mark this call insn as a prune point to trigger
14821 * is_state_visited() check before call itself is
14822 * processed by __check_func_call(). Otherwise new
14823 * async state will be pushed for further exploration.
bfc6bb74 14824 */
bffdeaa8 14825 mark_prune_point(env, t);
06accc87
AN
14826 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
14827 struct bpf_kfunc_call_arg_meta meta;
14828
14829 ret = fetch_kfunc_meta(env, insn, &meta, NULL);
4b5ce570 14830 if (ret == 0 && is_iter_next_kfunc(&meta)) {
06accc87 14831 mark_prune_point(env, t);
4b5ce570
AN
14832 /* Checking and saving state checkpoints at iter_next() call
14833 * is crucial for fast convergence of open-coded iterator loop
14834 * logic, so we need to force it. If we don't do that,
14835 * is_state_visited() might skip saving a checkpoint, causing
14836 * unnecessarily long sequence of not checkpointed
14837 * instructions and jumps, leading to exhaustion of jump
14838 * history buffer, and potentially other undesired outcomes.
14839 * It is expected that with correct open-coded iterators
14840 * convergence will happen quickly, so we don't run a risk of
14841 * exhausting memory.
14842 */
14843 mark_force_checkpoint(env, t);
14844 }
06accc87 14845 }
653ae3a8 14846 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
59e2e27d
WAF
14847
14848 case BPF_JA:
653ae3a8 14849 if (BPF_SRC(insn->code) != BPF_K)
59e2e27d
WAF
14850 return -EINVAL;
14851
4cd58e9a
YS
14852 if (BPF_CLASS(insn->code) == BPF_JMP)
14853 off = insn->off;
14854 else
14855 off = insn->imm;
14856
59e2e27d 14857 /* unconditional jump with single edge */
4cd58e9a 14858 ret = push_insn(t, t + off + 1, FALLTHROUGH, env,
59e2e27d
WAF
14859 true);
14860 if (ret)
14861 return ret;
14862
4cd58e9a
YS
14863 mark_prune_point(env, t + off + 1);
14864 mark_jmp_point(env, t + off + 1);
59e2e27d
WAF
14865
14866 return ret;
14867
14868 default:
14869 /* conditional jump with two edges */
bffdeaa8 14870 mark_prune_point(env, t);
618945fb 14871
59e2e27d
WAF
14872 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
14873 if (ret)
14874 return ret;
14875
653ae3a8 14876 return push_insn(t, t + insn->off + 1, BRANCH, env, true);
59e2e27d 14877 }
475fb78f
AS
14878}
14879
14880/* non-recursive depth-first-search to detect loops in BPF program
14881 * loop == back-edge in directed graph
14882 */
58e2af8b 14883static int check_cfg(struct bpf_verifier_env *env)
475fb78f 14884{
475fb78f 14885 int insn_cnt = env->prog->len;
7df737e9 14886 int *insn_stack, *insn_state;
475fb78f 14887 int ret = 0;
59e2e27d 14888 int i;
475fb78f 14889
7df737e9 14890 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
14891 if (!insn_state)
14892 return -ENOMEM;
14893
7df737e9 14894 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 14895 if (!insn_stack) {
71dde681 14896 kvfree(insn_state);
475fb78f
AS
14897 return -ENOMEM;
14898 }
14899
14900 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
14901 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 14902 env->cfg.cur_stack = 1;
475fb78f 14903
59e2e27d
WAF
14904 while (env->cfg.cur_stack > 0) {
14905 int t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 14906
dcb2288b 14907 ret = visit_insn(t, env);
59e2e27d
WAF
14908 switch (ret) {
14909 case DONE_EXPLORING:
14910 insn_state[t] = EXPLORED;
14911 env->cfg.cur_stack--;
14912 break;
14913 case KEEP_EXPLORING:
14914 break;
14915 default:
14916 if (ret > 0) {
14917 verbose(env, "visit_insn internal bug\n");
14918 ret = -EFAULT;
475fb78f 14919 }
475fb78f 14920 goto err_free;
59e2e27d 14921 }
475fb78f
AS
14922 }
14923
59e2e27d 14924 if (env->cfg.cur_stack < 0) {
61bd5218 14925 verbose(env, "pop stack internal bug\n");
475fb78f
AS
14926 ret = -EFAULT;
14927 goto err_free;
14928 }
475fb78f 14929
475fb78f
AS
14930 for (i = 0; i < insn_cnt; i++) {
14931 if (insn_state[i] != EXPLORED) {
61bd5218 14932 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
14933 ret = -EINVAL;
14934 goto err_free;
14935 }
14936 }
14937 ret = 0; /* cfg looks good */
14938
14939err_free:
71dde681
AS
14940 kvfree(insn_state);
14941 kvfree(insn_stack);
7df737e9 14942 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
14943 return ret;
14944}
14945
09b28d76
AS
14946static int check_abnormal_return(struct bpf_verifier_env *env)
14947{
14948 int i;
14949
14950 for (i = 1; i < env->subprog_cnt; i++) {
14951 if (env->subprog_info[i].has_ld_abs) {
14952 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
14953 return -EINVAL;
14954 }
14955 if (env->subprog_info[i].has_tail_call) {
14956 verbose(env, "tail_call is not allowed in subprogs without BTF\n");
14957 return -EINVAL;
14958 }
14959 }
14960 return 0;
14961}
14962
838e9690
YS
14963/* The minimum supported BTF func info size */
14964#define MIN_BPF_FUNCINFO_SIZE 8
14965#define MAX_FUNCINFO_REC_SIZE 252
14966
c454a46b
MKL
14967static int check_btf_func(struct bpf_verifier_env *env,
14968 const union bpf_attr *attr,
af2ac3e1 14969 bpfptr_t uattr)
838e9690 14970{
09b28d76 14971 const struct btf_type *type, *func_proto, *ret_type;
d0b2818e 14972 u32 i, nfuncs, urec_size, min_size;
838e9690 14973 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 14974 struct bpf_func_info *krecord;
8c1b6e69 14975 struct bpf_func_info_aux *info_aux = NULL;
c454a46b
MKL
14976 struct bpf_prog *prog;
14977 const struct btf *btf;
af2ac3e1 14978 bpfptr_t urecord;
d0b2818e 14979 u32 prev_offset = 0;
09b28d76 14980 bool scalar_return;
e7ed83d6 14981 int ret = -ENOMEM;
838e9690
YS
14982
14983 nfuncs = attr->func_info_cnt;
09b28d76
AS
14984 if (!nfuncs) {
14985 if (check_abnormal_return(env))
14986 return -EINVAL;
838e9690 14987 return 0;
09b28d76 14988 }
838e9690
YS
14989
14990 if (nfuncs != env->subprog_cnt) {
14991 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
14992 return -EINVAL;
14993 }
14994
14995 urec_size = attr->func_info_rec_size;
14996 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
14997 urec_size > MAX_FUNCINFO_REC_SIZE ||
14998 urec_size % sizeof(u32)) {
14999 verbose(env, "invalid func info rec size %u\n", urec_size);
15000 return -EINVAL;
15001 }
15002
c454a46b
MKL
15003 prog = env->prog;
15004 btf = prog->aux->btf;
838e9690 15005
af2ac3e1 15006 urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
838e9690
YS
15007 min_size = min_t(u32, krec_size, urec_size);
15008
ba64e7d8 15009 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
15010 if (!krecord)
15011 return -ENOMEM;
8c1b6e69
AS
15012 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
15013 if (!info_aux)
15014 goto err_free;
ba64e7d8 15015
838e9690
YS
15016 for (i = 0; i < nfuncs; i++) {
15017 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
15018 if (ret) {
15019 if (ret == -E2BIG) {
15020 verbose(env, "nonzero tailing record in func info");
15021 /* set the size kernel expects so loader can zero
15022 * out the rest of the record.
15023 */
af2ac3e1
AS
15024 if (copy_to_bpfptr_offset(uattr,
15025 offsetof(union bpf_attr, func_info_rec_size),
15026 &min_size, sizeof(min_size)))
838e9690
YS
15027 ret = -EFAULT;
15028 }
c454a46b 15029 goto err_free;
838e9690
YS
15030 }
15031
af2ac3e1 15032 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
838e9690 15033 ret = -EFAULT;
c454a46b 15034 goto err_free;
838e9690
YS
15035 }
15036
d30d42e0 15037 /* check insn_off */
09b28d76 15038 ret = -EINVAL;
838e9690 15039 if (i == 0) {
d30d42e0 15040 if (krecord[i].insn_off) {
838e9690 15041 verbose(env,
d30d42e0
MKL
15042 "nonzero insn_off %u for the first func info record",
15043 krecord[i].insn_off);
c454a46b 15044 goto err_free;
838e9690 15045 }
d30d42e0 15046 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
15047 verbose(env,
15048 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 15049 krecord[i].insn_off, prev_offset);
c454a46b 15050 goto err_free;
838e9690
YS
15051 }
15052
d30d42e0 15053 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690 15054 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
c454a46b 15055 goto err_free;
838e9690
YS
15056 }
15057
15058 /* check type_id */
ba64e7d8 15059 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 15060 if (!type || !btf_type_is_func(type)) {
838e9690 15061 verbose(env, "invalid type id %d in func info",
ba64e7d8 15062 krecord[i].type_id);
c454a46b 15063 goto err_free;
838e9690 15064 }
51c39bb1 15065 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
09b28d76
AS
15066
15067 func_proto = btf_type_by_id(btf, type->type);
15068 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
15069 /* btf_func_check() already verified it during BTF load */
15070 goto err_free;
15071 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
15072 scalar_return =
6089fb32 15073 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
09b28d76
AS
15074 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
15075 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
15076 goto err_free;
15077 }
15078 if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
15079 verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
15080 goto err_free;
15081 }
15082
d30d42e0 15083 prev_offset = krecord[i].insn_off;
af2ac3e1 15084 bpfptr_add(&urecord, urec_size);
838e9690
YS
15085 }
15086
ba64e7d8
YS
15087 prog->aux->func_info = krecord;
15088 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 15089 prog->aux->func_info_aux = info_aux;
838e9690
YS
15090 return 0;
15091
c454a46b 15092err_free:
ba64e7d8 15093 kvfree(krecord);
8c1b6e69 15094 kfree(info_aux);
838e9690
YS
15095 return ret;
15096}
15097
ba64e7d8
YS
15098static void adjust_btf_func(struct bpf_verifier_env *env)
15099{
8c1b6e69 15100 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
15101 int i;
15102
8c1b6e69 15103 if (!aux->func_info)
ba64e7d8
YS
15104 return;
15105
15106 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 15107 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
15108}
15109
1b773d00 15110#define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col)
c454a46b
MKL
15111#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
15112
15113static int check_btf_line(struct bpf_verifier_env *env,
15114 const union bpf_attr *attr,
af2ac3e1 15115 bpfptr_t uattr)
c454a46b
MKL
15116{
15117 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
15118 struct bpf_subprog_info *sub;
15119 struct bpf_line_info *linfo;
15120 struct bpf_prog *prog;
15121 const struct btf *btf;
af2ac3e1 15122 bpfptr_t ulinfo;
c454a46b
MKL
15123 int err;
15124
15125 nr_linfo = attr->line_info_cnt;
15126 if (!nr_linfo)
15127 return 0;
0e6491b5
BC
15128 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
15129 return -EINVAL;
c454a46b
MKL
15130
15131 rec_size = attr->line_info_rec_size;
15132 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
15133 rec_size > MAX_LINEINFO_REC_SIZE ||
15134 rec_size & (sizeof(u32) - 1))
15135 return -EINVAL;
15136
15137 /* Need to zero it in case the userspace may
15138 * pass in a smaller bpf_line_info object.
15139 */
15140 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
15141 GFP_KERNEL | __GFP_NOWARN);
15142 if (!linfo)
15143 return -ENOMEM;
15144
15145 prog = env->prog;
15146 btf = prog->aux->btf;
15147
15148 s = 0;
15149 sub = env->subprog_info;
af2ac3e1 15150 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
c454a46b
MKL
15151 expected_size = sizeof(struct bpf_line_info);
15152 ncopy = min_t(u32, expected_size, rec_size);
15153 for (i = 0; i < nr_linfo; i++) {
15154 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
15155 if (err) {
15156 if (err == -E2BIG) {
15157 verbose(env, "nonzero tailing record in line_info");
af2ac3e1
AS
15158 if (copy_to_bpfptr_offset(uattr,
15159 offsetof(union bpf_attr, line_info_rec_size),
15160 &expected_size, sizeof(expected_size)))
c454a46b
MKL
15161 err = -EFAULT;
15162 }
15163 goto err_free;
15164 }
15165
af2ac3e1 15166 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
c454a46b
MKL
15167 err = -EFAULT;
15168 goto err_free;
15169 }
15170
15171 /*
15172 * Check insn_off to ensure
15173 * 1) strictly increasing AND
15174 * 2) bounded by prog->len
15175 *
15176 * The linfo[0].insn_off == 0 check logically falls into
15177 * the later "missing bpf_line_info for func..." case
15178 * because the first linfo[0].insn_off must be the
15179 * first sub also and the first sub must have
15180 * subprog_info[0].start == 0.
15181 */
15182 if ((i && linfo[i].insn_off <= prev_offset) ||
15183 linfo[i].insn_off >= prog->len) {
15184 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
15185 i, linfo[i].insn_off, prev_offset,
15186 prog->len);
15187 err = -EINVAL;
15188 goto err_free;
15189 }
15190
fdbaa0be
MKL
15191 if (!prog->insnsi[linfo[i].insn_off].code) {
15192 verbose(env,
15193 "Invalid insn code at line_info[%u].insn_off\n",
15194 i);
15195 err = -EINVAL;
15196 goto err_free;
15197 }
15198
23127b33
MKL
15199 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
15200 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
15201 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
15202 err = -EINVAL;
15203 goto err_free;
15204 }
15205
15206 if (s != env->subprog_cnt) {
15207 if (linfo[i].insn_off == sub[s].start) {
15208 sub[s].linfo_idx = i;
15209 s++;
15210 } else if (sub[s].start < linfo[i].insn_off) {
15211 verbose(env, "missing bpf_line_info for func#%u\n", s);
15212 err = -EINVAL;
15213 goto err_free;
15214 }
15215 }
15216
15217 prev_offset = linfo[i].insn_off;
af2ac3e1 15218 bpfptr_add(&ulinfo, rec_size);
c454a46b
MKL
15219 }
15220
15221 if (s != env->subprog_cnt) {
15222 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
15223 env->subprog_cnt - s, s);
15224 err = -EINVAL;
15225 goto err_free;
15226 }
15227
15228 prog->aux->linfo = linfo;
15229 prog->aux->nr_linfo = nr_linfo;
15230
15231 return 0;
15232
15233err_free:
15234 kvfree(linfo);
15235 return err;
15236}
15237
fbd94c7a
AS
15238#define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo)
15239#define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE
15240
15241static int check_core_relo(struct bpf_verifier_env *env,
15242 const union bpf_attr *attr,
15243 bpfptr_t uattr)
15244{
15245 u32 i, nr_core_relo, ncopy, expected_size, rec_size;
15246 struct bpf_core_relo core_relo = {};
15247 struct bpf_prog *prog = env->prog;
15248 const struct btf *btf = prog->aux->btf;
15249 struct bpf_core_ctx ctx = {
15250 .log = &env->log,
15251 .btf = btf,
15252 };
15253 bpfptr_t u_core_relo;
15254 int err;
15255
15256 nr_core_relo = attr->core_relo_cnt;
15257 if (!nr_core_relo)
15258 return 0;
15259 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
15260 return -EINVAL;
15261
15262 rec_size = attr->core_relo_rec_size;
15263 if (rec_size < MIN_CORE_RELO_SIZE ||
15264 rec_size > MAX_CORE_RELO_SIZE ||
15265 rec_size % sizeof(u32))
15266 return -EINVAL;
15267
15268 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
15269 expected_size = sizeof(struct bpf_core_relo);
15270 ncopy = min_t(u32, expected_size, rec_size);
15271
15272 /* Unlike func_info and line_info, copy and apply each CO-RE
15273 * relocation record one at a time.
15274 */
15275 for (i = 0; i < nr_core_relo; i++) {
15276 /* future proofing when sizeof(bpf_core_relo) changes */
15277 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
15278 if (err) {
15279 if (err == -E2BIG) {
15280 verbose(env, "nonzero tailing record in core_relo");
15281 if (copy_to_bpfptr_offset(uattr,
15282 offsetof(union bpf_attr, core_relo_rec_size),
15283 &expected_size, sizeof(expected_size)))
15284 err = -EFAULT;
15285 }
15286 break;
15287 }
15288
15289 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
15290 err = -EFAULT;
15291 break;
15292 }
15293
15294 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
15295 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
15296 i, core_relo.insn_off, prog->len);
15297 err = -EINVAL;
15298 break;
15299 }
15300
15301 err = bpf_core_apply(&ctx, &core_relo, i,
15302 &prog->insnsi[core_relo.insn_off / 8]);
15303 if (err)
15304 break;
15305 bpfptr_add(&u_core_relo, rec_size);
15306 }
15307 return err;
15308}
15309
c454a46b
MKL
15310static int check_btf_info(struct bpf_verifier_env *env,
15311 const union bpf_attr *attr,
af2ac3e1 15312 bpfptr_t uattr)
c454a46b
MKL
15313{
15314 struct btf *btf;
15315 int err;
15316
09b28d76
AS
15317 if (!attr->func_info_cnt && !attr->line_info_cnt) {
15318 if (check_abnormal_return(env))
15319 return -EINVAL;
c454a46b 15320 return 0;
09b28d76 15321 }
c454a46b
MKL
15322
15323 btf = btf_get_by_fd(attr->prog_btf_fd);
15324 if (IS_ERR(btf))
15325 return PTR_ERR(btf);
350a5c4d
AS
15326 if (btf_is_kernel(btf)) {
15327 btf_put(btf);
15328 return -EACCES;
15329 }
c454a46b
MKL
15330 env->prog->aux->btf = btf;
15331
15332 err = check_btf_func(env, attr, uattr);
15333 if (err)
15334 return err;
15335
15336 err = check_btf_line(env, attr, uattr);
15337 if (err)
15338 return err;
15339
fbd94c7a
AS
15340 err = check_core_relo(env, attr, uattr);
15341 if (err)
15342 return err;
15343
c454a46b 15344 return 0;
ba64e7d8
YS
15345}
15346
f1174f77
EC
15347/* check %cur's range satisfies %old's */
15348static bool range_within(struct bpf_reg_state *old,
15349 struct bpf_reg_state *cur)
15350{
b03c9f9f
EC
15351 return old->umin_value <= cur->umin_value &&
15352 old->umax_value >= cur->umax_value &&
15353 old->smin_value <= cur->smin_value &&
fd675184
DB
15354 old->smax_value >= cur->smax_value &&
15355 old->u32_min_value <= cur->u32_min_value &&
15356 old->u32_max_value >= cur->u32_max_value &&
15357 old->s32_min_value <= cur->s32_min_value &&
15358 old->s32_max_value >= cur->s32_max_value;
f1174f77
EC
15359}
15360
f1174f77
EC
15361/* If in the old state two registers had the same id, then they need to have
15362 * the same id in the new state as well. But that id could be different from
15363 * the old state, so we need to track the mapping from old to new ids.
15364 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
15365 * regs with old id 5 must also have new id 9 for the new state to be safe. But
15366 * regs with a different old id could still have new id 9, we don't care about
15367 * that.
15368 * So we look through our idmap to see if this old id has been seen before. If
15369 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 15370 */
1ffc85d9 15371static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
969bf05e 15372{
1ffc85d9 15373 struct bpf_id_pair *map = idmap->map;
f1174f77 15374 unsigned int i;
969bf05e 15375
4633a006
AN
15376 /* either both IDs should be set or both should be zero */
15377 if (!!old_id != !!cur_id)
15378 return false;
15379
15380 if (old_id == 0) /* cur_id == 0 as well */
15381 return true;
15382
c9e73e3d 15383 for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
1ffc85d9 15384 if (!map[i].old) {
f1174f77 15385 /* Reached an empty slot; haven't seen this id before */
1ffc85d9
EZ
15386 map[i].old = old_id;
15387 map[i].cur = cur_id;
f1174f77
EC
15388 return true;
15389 }
1ffc85d9
EZ
15390 if (map[i].old == old_id)
15391 return map[i].cur == cur_id;
15392 if (map[i].cur == cur_id)
15393 return false;
f1174f77
EC
15394 }
15395 /* We ran out of idmap slots, which should be impossible */
15396 WARN_ON_ONCE(1);
15397 return false;
15398}
15399
1ffc85d9
EZ
15400/* Similar to check_ids(), but allocate a unique temporary ID
15401 * for 'old_id' or 'cur_id' of zero.
15402 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid.
15403 */
15404static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap)
15405{
15406 old_id = old_id ? old_id : ++idmap->tmp_id_gen;
15407 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen;
15408
15409 return check_ids(old_id, cur_id, idmap);
15410}
15411
9242b5f5
AS
15412static void clean_func_state(struct bpf_verifier_env *env,
15413 struct bpf_func_state *st)
15414{
15415 enum bpf_reg_liveness live;
15416 int i, j;
15417
15418 for (i = 0; i < BPF_REG_FP; i++) {
15419 live = st->regs[i].live;
15420 /* liveness must not touch this register anymore */
15421 st->regs[i].live |= REG_LIVE_DONE;
15422 if (!(live & REG_LIVE_READ))
15423 /* since the register is unused, clear its state
15424 * to make further comparison simpler
15425 */
f54c7898 15426 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
15427 }
15428
15429 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
15430 live = st->stack[i].spilled_ptr.live;
15431 /* liveness must not touch this stack slot anymore */
15432 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
15433 if (!(live & REG_LIVE_READ)) {
f54c7898 15434 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
15435 for (j = 0; j < BPF_REG_SIZE; j++)
15436 st->stack[i].slot_type[j] = STACK_INVALID;
15437 }
15438 }
15439}
15440
15441static void clean_verifier_state(struct bpf_verifier_env *env,
15442 struct bpf_verifier_state *st)
15443{
15444 int i;
15445
15446 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
15447 /* all regs in this state in all frames were already marked */
15448 return;
15449
15450 for (i = 0; i <= st->curframe; i++)
15451 clean_func_state(env, st->frame[i]);
15452}
15453
15454/* the parentage chains form a tree.
15455 * the verifier states are added to state lists at given insn and
15456 * pushed into state stack for future exploration.
15457 * when the verifier reaches bpf_exit insn some of the verifer states
15458 * stored in the state lists have their final liveness state already,
15459 * but a lot of states will get revised from liveness point of view when
15460 * the verifier explores other branches.
15461 * Example:
15462 * 1: r0 = 1
15463 * 2: if r1 == 100 goto pc+1
15464 * 3: r0 = 2
15465 * 4: exit
15466 * when the verifier reaches exit insn the register r0 in the state list of
15467 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
15468 * of insn 2 and goes exploring further. At the insn 4 it will walk the
15469 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
15470 *
15471 * Since the verifier pushes the branch states as it sees them while exploring
15472 * the program the condition of walking the branch instruction for the second
15473 * time means that all states below this branch were already explored and
8fb33b60 15474 * their final liveness marks are already propagated.
9242b5f5
AS
15475 * Hence when the verifier completes the search of state list in is_state_visited()
15476 * we can call this clean_live_states() function to mark all liveness states
15477 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
15478 * will not be used.
15479 * This function also clears the registers and stack for states that !READ
15480 * to simplify state merging.
15481 *
15482 * Important note here that walking the same branch instruction in the callee
15483 * doesn't meant that the states are DONE. The verifier has to compare
15484 * the callsites
15485 */
15486static void clean_live_states(struct bpf_verifier_env *env, int insn,
15487 struct bpf_verifier_state *cur)
15488{
15489 struct bpf_verifier_state_list *sl;
15490 int i;
15491
5d839021 15492 sl = *explored_state(env, insn);
a8f500af 15493 while (sl) {
2589726d
AS
15494 if (sl->state.branches)
15495 goto next;
dc2a4ebc
AS
15496 if (sl->state.insn_idx != insn ||
15497 sl->state.curframe != cur->curframe)
9242b5f5
AS
15498 goto next;
15499 for (i = 0; i <= cur->curframe; i++)
15500 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
15501 goto next;
15502 clean_verifier_state(env, &sl->state);
15503next:
15504 sl = sl->next;
15505 }
15506}
15507
4a95c85c 15508static bool regs_exact(const struct bpf_reg_state *rold,
4633a006 15509 const struct bpf_reg_state *rcur,
1ffc85d9 15510 struct bpf_idmap *idmap)
4a95c85c 15511{
d2dcc67d 15512 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
4633a006
AN
15513 check_ids(rold->id, rcur->id, idmap) &&
15514 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
4a95c85c
AN
15515}
15516
f1174f77 15517/* Returns true if (rold safe implies rcur safe) */
e042aa53 15518static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
1ffc85d9 15519 struct bpf_reg_state *rcur, struct bpf_idmap *idmap)
f1174f77 15520{
dc503a8a
EC
15521 if (!(rold->live & REG_LIVE_READ))
15522 /* explored state didn't use this */
15523 return true;
f1174f77
EC
15524 if (rold->type == NOT_INIT)
15525 /* explored state can't have used this */
969bf05e 15526 return true;
f1174f77
EC
15527 if (rcur->type == NOT_INIT)
15528 return false;
7f4ce97c 15529
910f6999
AN
15530 /* Enforce that register types have to match exactly, including their
15531 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
15532 * rule.
15533 *
15534 * One can make a point that using a pointer register as unbounded
15535 * SCALAR would be technically acceptable, but this could lead to
15536 * pointer leaks because scalars are allowed to leak while pointers
15537 * are not. We could make this safe in special cases if root is
15538 * calling us, but it's probably not worth the hassle.
15539 *
15540 * Also, register types that are *not* MAYBE_NULL could technically be
15541 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
15542 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
15543 * to the same map).
7f4ce97c
AN
15544 * However, if the old MAYBE_NULL register then got NULL checked,
15545 * doing so could have affected others with the same id, and we can't
15546 * check for that because we lost the id when we converted to
15547 * a non-MAYBE_NULL variant.
15548 * So, as a general rule we don't allow mixing MAYBE_NULL and
910f6999 15549 * non-MAYBE_NULL registers as well.
7f4ce97c 15550 */
910f6999 15551 if (rold->type != rcur->type)
7f4ce97c
AN
15552 return false;
15553
c25b2ae1 15554 switch (base_type(rold->type)) {
f1174f77 15555 case SCALAR_VALUE:
1ffc85d9
EZ
15556 if (env->explore_alu_limits) {
15557 /* explore_alu_limits disables tnum_in() and range_within()
15558 * logic and requires everything to be strict
15559 */
15560 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
15561 check_scalar_ids(rold->id, rcur->id, idmap);
15562 }
910f6999
AN
15563 if (!rold->precise)
15564 return true;
1ffc85d9
EZ
15565 /* Why check_ids() for scalar registers?
15566 *
15567 * Consider the following BPF code:
15568 * 1: r6 = ... unbound scalar, ID=a ...
15569 * 2: r7 = ... unbound scalar, ID=b ...
15570 * 3: if (r6 > r7) goto +1
15571 * 4: r6 = r7
15572 * 5: if (r6 > X) goto ...
15573 * 6: ... memory operation using r7 ...
15574 *
15575 * First verification path is [1-6]:
15576 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7;
15577 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark
15578 * r7 <= X, because r6 and r7 share same id.
15579 * Next verification path is [1-4, 6].
15580 *
15581 * Instruction (6) would be reached in two states:
15582 * I. r6{.id=b}, r7{.id=b} via path 1-6;
15583 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6.
15584 *
15585 * Use check_ids() to distinguish these states.
15586 * ---
15587 * Also verify that new value satisfies old value range knowledge.
15588 */
910f6999 15589 return range_within(rold, rcur) &&
1ffc85d9
EZ
15590 tnum_in(rold->var_off, rcur->var_off) &&
15591 check_scalar_ids(rold->id, rcur->id, idmap);
69c087ba 15592 case PTR_TO_MAP_KEY:
f1174f77 15593 case PTR_TO_MAP_VALUE:
567da5d2
AN
15594 case PTR_TO_MEM:
15595 case PTR_TO_BUF:
15596 case PTR_TO_TP_BUFFER:
1b688a19
EC
15597 /* If the new min/max/var_off satisfy the old ones and
15598 * everything else matches, we are OK.
1b688a19 15599 */
a73bf9f2 15600 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
1b688a19 15601 range_within(rold, rcur) &&
4ea2bb15 15602 tnum_in(rold->var_off, rcur->var_off) &&
567da5d2
AN
15603 check_ids(rold->id, rcur->id, idmap) &&
15604 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
de8f3a83 15605 case PTR_TO_PACKET_META:
f1174f77 15606 case PTR_TO_PACKET:
f1174f77
EC
15607 /* We must have at least as much range as the old ptr
15608 * did, so that any accesses which were safe before are
15609 * still safe. This is true even if old range < old off,
15610 * since someone could have accessed through (ptr - k), or
15611 * even done ptr -= k in a register, to get a safe access.
15612 */
15613 if (rold->range > rcur->range)
15614 return false;
15615 /* If the offsets don't match, we can't trust our alignment;
15616 * nor can we be sure that we won't fall out of range.
15617 */
15618 if (rold->off != rcur->off)
15619 return false;
15620 /* id relations must be preserved */
4633a006 15621 if (!check_ids(rold->id, rcur->id, idmap))
f1174f77
EC
15622 return false;
15623 /* new val must satisfy old val knowledge */
15624 return range_within(rold, rcur) &&
15625 tnum_in(rold->var_off, rcur->var_off);
7c884339
EZ
15626 case PTR_TO_STACK:
15627 /* two stack pointers are equal only if they're pointing to
15628 * the same stack frame, since fp-8 in foo != fp-8 in bar
f1174f77 15629 */
4633a006 15630 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
f1174f77 15631 default:
4633a006 15632 return regs_exact(rold, rcur, idmap);
f1174f77 15633 }
969bf05e
AS
15634}
15635
e042aa53 15636static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
1ffc85d9 15637 struct bpf_func_state *cur, struct bpf_idmap *idmap)
638f5b90
AS
15638{
15639 int i, spi;
15640
638f5b90
AS
15641 /* walk slots of the explored stack and ignore any additional
15642 * slots in the current stack, since explored(safe) state
15643 * didn't use them
15644 */
15645 for (i = 0; i < old->allocated_stack; i++) {
06accc87
AN
15646 struct bpf_reg_state *old_reg, *cur_reg;
15647
638f5b90
AS
15648 spi = i / BPF_REG_SIZE;
15649
b233920c
AS
15650 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
15651 i += BPF_REG_SIZE - 1;
cc2b14d5 15652 /* explored state didn't use this */
fd05e57b 15653 continue;
b233920c 15654 }
cc2b14d5 15655
638f5b90
AS
15656 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
15657 continue;
19e2dbb7 15658
6715df8d
EZ
15659 if (env->allow_uninit_stack &&
15660 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
15661 continue;
15662
19e2dbb7
AS
15663 /* explored stack has more populated slots than current stack
15664 * and these slots were used
15665 */
15666 if (i >= cur->allocated_stack)
15667 return false;
15668
cc2b14d5
AS
15669 /* if old state was safe with misc data in the stack
15670 * it will be safe with zero-initialized stack.
15671 * The opposite is not true
15672 */
15673 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
15674 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
15675 continue;
638f5b90
AS
15676 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
15677 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
15678 /* Ex: old explored (safe) state has STACK_SPILL in
b8c1a309 15679 * this stack slot, but current has STACK_MISC ->
638f5b90
AS
15680 * this verifier states are not equivalent,
15681 * return false to continue verification of this path
15682 */
15683 return false;
27113c59 15684 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
638f5b90 15685 continue;
d6fefa11
KKD
15686 /* Both old and cur are having same slot_type */
15687 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
15688 case STACK_SPILL:
638f5b90
AS
15689 /* when explored and current stack slot are both storing
15690 * spilled registers, check that stored pointers types
15691 * are the same as well.
15692 * Ex: explored safe path could have stored
15693 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
15694 * but current path has stored:
15695 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
15696 * such verifier states are not equivalent.
15697 * return false to continue verification of this path
15698 */
d6fefa11
KKD
15699 if (!regsafe(env, &old->stack[spi].spilled_ptr,
15700 &cur->stack[spi].spilled_ptr, idmap))
15701 return false;
15702 break;
15703 case STACK_DYNPTR:
d6fefa11
KKD
15704 old_reg = &old->stack[spi].spilled_ptr;
15705 cur_reg = &cur->stack[spi].spilled_ptr;
15706 if (old_reg->dynptr.type != cur_reg->dynptr.type ||
15707 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
15708 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
15709 return false;
15710 break;
06accc87
AN
15711 case STACK_ITER:
15712 old_reg = &old->stack[spi].spilled_ptr;
15713 cur_reg = &cur->stack[spi].spilled_ptr;
15714 /* iter.depth is not compared between states as it
15715 * doesn't matter for correctness and would otherwise
15716 * prevent convergence; we maintain it only to prevent
15717 * infinite loop check triggering, see
15718 * iter_active_depths_differ()
15719 */
15720 if (old_reg->iter.btf != cur_reg->iter.btf ||
15721 old_reg->iter.btf_id != cur_reg->iter.btf_id ||
15722 old_reg->iter.state != cur_reg->iter.state ||
15723 /* ignore {old_reg,cur_reg}->iter.depth, see above */
15724 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
15725 return false;
15726 break;
d6fefa11
KKD
15727 case STACK_MISC:
15728 case STACK_ZERO:
15729 case STACK_INVALID:
15730 continue;
15731 /* Ensure that new unhandled slot types return false by default */
15732 default:
638f5b90 15733 return false;
d6fefa11 15734 }
638f5b90
AS
15735 }
15736 return true;
15737}
15738
e8f55fcf 15739static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
1ffc85d9 15740 struct bpf_idmap *idmap)
fd978bf7 15741{
e8f55fcf
AN
15742 int i;
15743
fd978bf7
JS
15744 if (old->acquired_refs != cur->acquired_refs)
15745 return false;
e8f55fcf
AN
15746
15747 for (i = 0; i < old->acquired_refs; i++) {
15748 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
15749 return false;
15750 }
15751
15752 return true;
fd978bf7
JS
15753}
15754
f1bca824
AS
15755/* compare two verifier states
15756 *
15757 * all states stored in state_list are known to be valid, since
15758 * verifier reached 'bpf_exit' instruction through them
15759 *
15760 * this function is called when verifier exploring different branches of
15761 * execution popped from the state stack. If it sees an old state that has
15762 * more strict register state and more strict stack state then this execution
15763 * branch doesn't need to be explored further, since verifier already
15764 * concluded that more strict state leads to valid finish.
15765 *
15766 * Therefore two states are equivalent if register state is more conservative
15767 * and explored stack state is more conservative than the current one.
15768 * Example:
15769 * explored current
15770 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
15771 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
15772 *
15773 * In other words if current stack state (one being explored) has more
15774 * valid slots than old one that already passed validation, it means
15775 * the verifier can stop exploring and conclude that current state is valid too
15776 *
15777 * Similarly with registers. If explored state has register type as invalid
15778 * whereas register type in current state is meaningful, it means that
15779 * the current state will reach 'bpf_exit' instruction safely
15780 */
c9e73e3d 15781static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
f4d7e40a 15782 struct bpf_func_state *cur)
f1bca824
AS
15783{
15784 int i;
15785
c9e73e3d 15786 for (i = 0; i < MAX_BPF_REG; i++)
e042aa53 15787 if (!regsafe(env, &old->regs[i], &cur->regs[i],
1ffc85d9 15788 &env->idmap_scratch))
c9e73e3d 15789 return false;
f1bca824 15790
1ffc85d9 15791 if (!stacksafe(env, old, cur, &env->idmap_scratch))
c9e73e3d 15792 return false;
fd978bf7 15793
1ffc85d9 15794 if (!refsafe(old, cur, &env->idmap_scratch))
c9e73e3d
LB
15795 return false;
15796
15797 return true;
f1bca824
AS
15798}
15799
f4d7e40a
AS
15800static bool states_equal(struct bpf_verifier_env *env,
15801 struct bpf_verifier_state *old,
15802 struct bpf_verifier_state *cur)
15803{
15804 int i;
15805
15806 if (old->curframe != cur->curframe)
15807 return false;
15808
1ffc85d9
EZ
15809 env->idmap_scratch.tmp_id_gen = env->id_gen;
15810 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map));
5dd9cdbc 15811
979d63d5
DB
15812 /* Verification state from speculative execution simulation
15813 * must never prune a non-speculative execution one.
15814 */
15815 if (old->speculative && !cur->speculative)
15816 return false;
15817
4ea2bb15
EZ
15818 if (old->active_lock.ptr != cur->active_lock.ptr)
15819 return false;
15820
15821 /* Old and cur active_lock's have to be either both present
15822 * or both absent.
15823 */
15824 if (!!old->active_lock.id != !!cur->active_lock.id)
15825 return false;
15826
15827 if (old->active_lock.id &&
1ffc85d9 15828 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch))
d83525ca
AS
15829 return false;
15830
9bb00b28 15831 if (old->active_rcu_lock != cur->active_rcu_lock)
d83525ca
AS
15832 return false;
15833
f4d7e40a
AS
15834 /* for states to be equal callsites have to be the same
15835 * and all frame states need to be equivalent
15836 */
15837 for (i = 0; i <= old->curframe; i++) {
15838 if (old->frame[i]->callsite != cur->frame[i]->callsite)
15839 return false;
c9e73e3d 15840 if (!func_states_equal(env, old->frame[i], cur->frame[i]))
f4d7e40a
AS
15841 return false;
15842 }
15843 return true;
15844}
15845
5327ed3d
JW
15846/* Return 0 if no propagation happened. Return negative error code if error
15847 * happened. Otherwise, return the propagated bit.
15848 */
55e7f3b5
JW
15849static int propagate_liveness_reg(struct bpf_verifier_env *env,
15850 struct bpf_reg_state *reg,
15851 struct bpf_reg_state *parent_reg)
15852{
5327ed3d
JW
15853 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
15854 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
15855 int err;
15856
5327ed3d
JW
15857 /* When comes here, read flags of PARENT_REG or REG could be any of
15858 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
15859 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
15860 */
15861 if (parent_flag == REG_LIVE_READ64 ||
15862 /* Or if there is no read flag from REG. */
15863 !flag ||
15864 /* Or if the read flag from REG is the same as PARENT_REG. */
15865 parent_flag == flag)
55e7f3b5
JW
15866 return 0;
15867
5327ed3d 15868 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
15869 if (err)
15870 return err;
15871
5327ed3d 15872 return flag;
55e7f3b5
JW
15873}
15874
8e9cd9ce 15875/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
15876 * straight-line code between a state and its parent. When we arrive at an
15877 * equivalent state (jump target or such) we didn't arrive by the straight-line
15878 * code, so read marks in the state must propagate to the parent regardless
15879 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 15880 * in mark_reg_read() is for.
8e9cd9ce 15881 */
f4d7e40a
AS
15882static int propagate_liveness(struct bpf_verifier_env *env,
15883 const struct bpf_verifier_state *vstate,
15884 struct bpf_verifier_state *vparent)
dc503a8a 15885{
3f8cafa4 15886 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 15887 struct bpf_func_state *state, *parent;
3f8cafa4 15888 int i, frame, err = 0;
dc503a8a 15889
f4d7e40a
AS
15890 if (vparent->curframe != vstate->curframe) {
15891 WARN(1, "propagate_live: parent frame %d current frame %d\n",
15892 vparent->curframe, vstate->curframe);
15893 return -EFAULT;
15894 }
dc503a8a
EC
15895 /* Propagate read liveness of registers... */
15896 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 15897 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
15898 parent = vparent->frame[frame];
15899 state = vstate->frame[frame];
15900 parent_reg = parent->regs;
15901 state_reg = state->regs;
83d16312
JK
15902 /* We don't need to worry about FP liveness, it's read-only */
15903 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
15904 err = propagate_liveness_reg(env, &state_reg[i],
15905 &parent_reg[i]);
5327ed3d 15906 if (err < 0)
3f8cafa4 15907 return err;
5327ed3d
JW
15908 if (err == REG_LIVE_READ64)
15909 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 15910 }
f4d7e40a 15911
1b04aee7 15912 /* Propagate stack slots. */
f4d7e40a
AS
15913 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
15914 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
15915 parent_reg = &parent->stack[i].spilled_ptr;
15916 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
15917 err = propagate_liveness_reg(env, state_reg,
15918 parent_reg);
5327ed3d 15919 if (err < 0)
3f8cafa4 15920 return err;
dc503a8a
EC
15921 }
15922 }
5327ed3d 15923 return 0;
dc503a8a
EC
15924}
15925
a3ce685d
AS
15926/* find precise scalars in the previous equivalent state and
15927 * propagate them into the current state
15928 */
15929static int propagate_precision(struct bpf_verifier_env *env,
15930 const struct bpf_verifier_state *old)
15931{
15932 struct bpf_reg_state *state_reg;
15933 struct bpf_func_state *state;
529409ea 15934 int i, err = 0, fr;
f655badf 15935 bool first;
a3ce685d 15936
529409ea
AN
15937 for (fr = old->curframe; fr >= 0; fr--) {
15938 state = old->frame[fr];
15939 state_reg = state->regs;
f655badf 15940 first = true;
529409ea
AN
15941 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
15942 if (state_reg->type != SCALAR_VALUE ||
52c2b005
AN
15943 !state_reg->precise ||
15944 !(state_reg->live & REG_LIVE_READ))
529409ea 15945 continue;
f655badf
AN
15946 if (env->log.level & BPF_LOG_LEVEL2) {
15947 if (first)
15948 verbose(env, "frame %d: propagating r%d", fr, i);
15949 else
15950 verbose(env, ",r%d", i);
15951 }
15952 bt_set_frame_reg(&env->bt, fr, i);
15953 first = false;
529409ea 15954 }
a3ce685d 15955
529409ea
AN
15956 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
15957 if (!is_spilled_reg(&state->stack[i]))
15958 continue;
15959 state_reg = &state->stack[i].spilled_ptr;
15960 if (state_reg->type != SCALAR_VALUE ||
52c2b005
AN
15961 !state_reg->precise ||
15962 !(state_reg->live & REG_LIVE_READ))
529409ea 15963 continue;
f655badf
AN
15964 if (env->log.level & BPF_LOG_LEVEL2) {
15965 if (first)
15966 verbose(env, "frame %d: propagating fp%d",
15967 fr, (-i - 1) * BPF_REG_SIZE);
15968 else
15969 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE);
15970 }
15971 bt_set_frame_slot(&env->bt, fr, i);
15972 first = false;
529409ea 15973 }
f655badf
AN
15974 if (!first)
15975 verbose(env, "\n");
a3ce685d 15976 }
f655badf
AN
15977
15978 err = mark_chain_precision_batch(env);
15979 if (err < 0)
15980 return err;
15981
a3ce685d
AS
15982 return 0;
15983}
15984
2589726d
AS
15985static bool states_maybe_looping(struct bpf_verifier_state *old,
15986 struct bpf_verifier_state *cur)
15987{
15988 struct bpf_func_state *fold, *fcur;
15989 int i, fr = cur->curframe;
15990
15991 if (old->curframe != fr)
15992 return false;
15993
15994 fold = old->frame[fr];
15995 fcur = cur->frame[fr];
15996 for (i = 0; i < MAX_BPF_REG; i++)
15997 if (memcmp(&fold->regs[i], &fcur->regs[i],
15998 offsetof(struct bpf_reg_state, parent)))
15999 return false;
16000 return true;
16001}
16002
06accc87
AN
16003static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
16004{
16005 return env->insn_aux_data[insn_idx].is_iter_next;
16006}
16007
16008/* is_state_visited() handles iter_next() (see process_iter_next_call() for
16009 * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
16010 * states to match, which otherwise would look like an infinite loop. So while
16011 * iter_next() calls are taken care of, we still need to be careful and
16012 * prevent erroneous and too eager declaration of "ininite loop", when
16013 * iterators are involved.
16014 *
16015 * Here's a situation in pseudo-BPF assembly form:
16016 *
16017 * 0: again: ; set up iter_next() call args
16018 * 1: r1 = &it ; <CHECKPOINT HERE>
16019 * 2: call bpf_iter_num_next ; this is iter_next() call
16020 * 3: if r0 == 0 goto done
16021 * 4: ... something useful here ...
16022 * 5: goto again ; another iteration
16023 * 6: done:
16024 * 7: r1 = &it
16025 * 8: call bpf_iter_num_destroy ; clean up iter state
16026 * 9: exit
16027 *
16028 * This is a typical loop. Let's assume that we have a prune point at 1:,
16029 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
16030 * again`, assuming other heuristics don't get in a way).
16031 *
16032 * When we first time come to 1:, let's say we have some state X. We proceed
16033 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
16034 * Now we come back to validate that forked ACTIVE state. We proceed through
16035 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
16036 * are converging. But the problem is that we don't know that yet, as this
16037 * convergence has to happen at iter_next() call site only. So if nothing is
16038 * done, at 1: verifier will use bounded loop logic and declare infinite
16039 * looping (and would be *technically* correct, if not for iterator's
16040 * "eventual sticky NULL" contract, see process_iter_next_call()). But we
16041 * don't want that. So what we do in process_iter_next_call() when we go on
16042 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
16043 * a different iteration. So when we suspect an infinite loop, we additionally
16044 * check if any of the *ACTIVE* iterator states depths differ. If yes, we
16045 * pretend we are not looping and wait for next iter_next() call.
16046 *
16047 * This only applies to ACTIVE state. In DRAINED state we don't expect to
16048 * loop, because that would actually mean infinite loop, as DRAINED state is
16049 * "sticky", and so we'll keep returning into the same instruction with the
16050 * same state (at least in one of possible code paths).
16051 *
16052 * This approach allows to keep infinite loop heuristic even in the face of
16053 * active iterator. E.g., C snippet below is and will be detected as
16054 * inifintely looping:
16055 *
16056 * struct bpf_iter_num it;
16057 * int *p, x;
16058 *
16059 * bpf_iter_num_new(&it, 0, 10);
16060 * while ((p = bpf_iter_num_next(&t))) {
16061 * x = p;
16062 * while (x--) {} // <<-- infinite loop here
16063 * }
16064 *
16065 */
16066static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
16067{
16068 struct bpf_reg_state *slot, *cur_slot;
16069 struct bpf_func_state *state;
16070 int i, fr;
16071
16072 for (fr = old->curframe; fr >= 0; fr--) {
16073 state = old->frame[fr];
16074 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
16075 if (state->stack[i].slot_type[0] != STACK_ITER)
16076 continue;
16077
16078 slot = &state->stack[i].spilled_ptr;
16079 if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
16080 continue;
16081
16082 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
16083 if (cur_slot->iter.depth != slot->iter.depth)
16084 return true;
16085 }
16086 }
16087 return false;
16088}
2589726d 16089
58e2af8b 16090static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 16091{
58e2af8b 16092 struct bpf_verifier_state_list *new_sl;
9f4686c4 16093 struct bpf_verifier_state_list *sl, **pprev;
679c782d 16094 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 16095 int i, j, err, states_cnt = 0;
4b5ce570
AN
16096 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
16097 bool add_new_state = force_new_state;
f1bca824 16098
2589726d
AS
16099 /* bpf progs typically have pruning point every 4 instructions
16100 * http://vger.kernel.org/bpfconf2019.html#session-1
16101 * Do not add new state for future pruning if the verifier hasn't seen
16102 * at least 2 jumps and at least 8 instructions.
16103 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
16104 * In tests that amounts to up to 50% reduction into total verifier
16105 * memory consumption and 20% verifier time speedup.
16106 */
16107 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
16108 env->insn_processed - env->prev_insn_processed >= 8)
16109 add_new_state = true;
16110
a8f500af
AS
16111 pprev = explored_state(env, insn_idx);
16112 sl = *pprev;
16113
9242b5f5
AS
16114 clean_live_states(env, insn_idx, cur);
16115
a8f500af 16116 while (sl) {
dc2a4ebc
AS
16117 states_cnt++;
16118 if (sl->state.insn_idx != insn_idx)
16119 goto next;
bfc6bb74 16120
2589726d 16121 if (sl->state.branches) {
bfc6bb74
AS
16122 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
16123
16124 if (frame->in_async_callback_fn &&
16125 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
16126 /* Different async_entry_cnt means that the verifier is
16127 * processing another entry into async callback.
16128 * Seeing the same state is not an indication of infinite
16129 * loop or infinite recursion.
16130 * But finding the same state doesn't mean that it's safe
16131 * to stop processing the current state. The previous state
16132 * hasn't yet reached bpf_exit, since state.branches > 0.
16133 * Checking in_async_callback_fn alone is not enough either.
16134 * Since the verifier still needs to catch infinite loops
16135 * inside async callbacks.
16136 */
06accc87
AN
16137 goto skip_inf_loop_check;
16138 }
16139 /* BPF open-coded iterators loop detection is special.
16140 * states_maybe_looping() logic is too simplistic in detecting
16141 * states that *might* be equivalent, because it doesn't know
16142 * about ID remapping, so don't even perform it.
16143 * See process_iter_next_call() and iter_active_depths_differ()
16144 * for overview of the logic. When current and one of parent
16145 * states are detected as equivalent, it's a good thing: we prove
16146 * convergence and can stop simulating further iterations.
16147 * It's safe to assume that iterator loop will finish, taking into
16148 * account iter_next() contract of eventually returning
16149 * sticky NULL result.
16150 */
16151 if (is_iter_next_insn(env, insn_idx)) {
16152 if (states_equal(env, &sl->state, cur)) {
16153 struct bpf_func_state *cur_frame;
16154 struct bpf_reg_state *iter_state, *iter_reg;
16155 int spi;
16156
16157 cur_frame = cur->frame[cur->curframe];
16158 /* btf_check_iter_kfuncs() enforces that
16159 * iter state pointer is always the first arg
16160 */
16161 iter_reg = &cur_frame->regs[BPF_REG_1];
16162 /* current state is valid due to states_equal(),
16163 * so we can assume valid iter and reg state,
16164 * no need for extra (re-)validations
16165 */
16166 spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
16167 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
16168 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE)
16169 goto hit;
16170 }
16171 goto skip_inf_loop_check;
16172 }
16173 /* attempt to detect infinite loop to avoid unnecessary doomed work */
16174 if (states_maybe_looping(&sl->state, cur) &&
16175 states_equal(env, &sl->state, cur) &&
16176 !iter_active_depths_differ(&sl->state, cur)) {
2589726d
AS
16177 verbose_linfo(env, insn_idx, "; ");
16178 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
16179 return -EINVAL;
16180 }
16181 /* if the verifier is processing a loop, avoid adding new state
16182 * too often, since different loop iterations have distinct
16183 * states and may not help future pruning.
16184 * This threshold shouldn't be too low to make sure that
16185 * a loop with large bound will be rejected quickly.
16186 * The most abusive loop will be:
16187 * r1 += 1
16188 * if r1 < 1000000 goto pc-2
16189 * 1M insn_procssed limit / 100 == 10k peak states.
16190 * This threshold shouldn't be too high either, since states
16191 * at the end of the loop are likely to be useful in pruning.
16192 */
06accc87 16193skip_inf_loop_check:
4b5ce570 16194 if (!force_new_state &&
98ddcf38 16195 env->jmps_processed - env->prev_jmps_processed < 20 &&
2589726d
AS
16196 env->insn_processed - env->prev_insn_processed < 100)
16197 add_new_state = false;
16198 goto miss;
16199 }
638f5b90 16200 if (states_equal(env, &sl->state, cur)) {
06accc87 16201hit:
9f4686c4 16202 sl->hit_cnt++;
f1bca824 16203 /* reached equivalent register/stack state,
dc503a8a
EC
16204 * prune the search.
16205 * Registers read by the continuation are read by us.
8e9cd9ce
EC
16206 * If we have any write marks in env->cur_state, they
16207 * will prevent corresponding reads in the continuation
16208 * from reaching our parent (an explored_state). Our
16209 * own state will get the read marks recorded, but
16210 * they'll be immediately forgotten as we're pruning
16211 * this state and will pop a new one.
f1bca824 16212 */
f4d7e40a 16213 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
16214
16215 /* if previous state reached the exit with precision and
16216 * current state is equivalent to it (except precsion marks)
16217 * the precision needs to be propagated back in
16218 * the current state.
16219 */
16220 err = err ? : push_jmp_history(env, cur);
16221 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
16222 if (err)
16223 return err;
f1bca824 16224 return 1;
dc503a8a 16225 }
2589726d
AS
16226miss:
16227 /* when new state is not going to be added do not increase miss count.
16228 * Otherwise several loop iterations will remove the state
16229 * recorded earlier. The goal of these heuristics is to have
16230 * states from some iterations of the loop (some in the beginning
16231 * and some at the end) to help pruning.
16232 */
16233 if (add_new_state)
16234 sl->miss_cnt++;
9f4686c4
AS
16235 /* heuristic to determine whether this state is beneficial
16236 * to keep checking from state equivalence point of view.
16237 * Higher numbers increase max_states_per_insn and verification time,
16238 * but do not meaningfully decrease insn_processed.
16239 */
16240 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
16241 /* the state is unlikely to be useful. Remove it to
16242 * speed up verification
16243 */
16244 *pprev = sl->next;
16245 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
16246 u32 br = sl->state.branches;
16247
16248 WARN_ONCE(br,
16249 "BUG live_done but branches_to_explore %d\n",
16250 br);
9f4686c4
AS
16251 free_verifier_state(&sl->state, false);
16252 kfree(sl);
16253 env->peak_states--;
16254 } else {
16255 /* cannot free this state, since parentage chain may
16256 * walk it later. Add it for free_list instead to
16257 * be freed at the end of verification
16258 */
16259 sl->next = env->free_list;
16260 env->free_list = sl;
16261 }
16262 sl = *pprev;
16263 continue;
16264 }
dc2a4ebc 16265next:
9f4686c4
AS
16266 pprev = &sl->next;
16267 sl = *pprev;
f1bca824
AS
16268 }
16269
06ee7115
AS
16270 if (env->max_states_per_insn < states_cnt)
16271 env->max_states_per_insn = states_cnt;
16272
2c78ee89 16273 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
a095f421 16274 return 0;
ceefbc96 16275
2589726d 16276 if (!add_new_state)
a095f421 16277 return 0;
ceefbc96 16278
2589726d
AS
16279 /* There were no equivalent states, remember the current one.
16280 * Technically the current state is not proven to be safe yet,
f4d7e40a 16281 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 16282 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 16283 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
16284 * again on the way to bpf_exit.
16285 * When looping the sl->state.branches will be > 0 and this state
16286 * will not be considered for equivalence until branches == 0.
f1bca824 16287 */
638f5b90 16288 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
16289 if (!new_sl)
16290 return -ENOMEM;
06ee7115
AS
16291 env->total_states++;
16292 env->peak_states++;
2589726d
AS
16293 env->prev_jmps_processed = env->jmps_processed;
16294 env->prev_insn_processed = env->insn_processed;
f1bca824 16295
7a830b53
AN
16296 /* forget precise markings we inherited, see __mark_chain_precision */
16297 if (env->bpf_capable)
16298 mark_all_scalars_imprecise(env, cur);
16299
f1bca824 16300 /* add new state to the head of linked list */
679c782d
EC
16301 new = &new_sl->state;
16302 err = copy_verifier_state(new, cur);
1969db47 16303 if (err) {
679c782d 16304 free_verifier_state(new, false);
1969db47
AS
16305 kfree(new_sl);
16306 return err;
16307 }
dc2a4ebc 16308 new->insn_idx = insn_idx;
2589726d
AS
16309 WARN_ONCE(new->branches != 1,
16310 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 16311
2589726d 16312 cur->parent = new;
b5dc0163
AS
16313 cur->first_insn_idx = insn_idx;
16314 clear_jmp_history(cur);
5d839021
AS
16315 new_sl->next = *explored_state(env, insn_idx);
16316 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
16317 /* connect new state to parentage chain. Current frame needs all
16318 * registers connected. Only r6 - r9 of the callers are alive (pushed
16319 * to the stack implicitly by JITs) so in callers' frames connect just
16320 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
16321 * the state of the call instruction (with WRITTEN set), and r0 comes
16322 * from callee with its full parentage chain, anyway.
16323 */
8e9cd9ce
EC
16324 /* clear write marks in current state: the writes we did are not writes
16325 * our child did, so they don't screen off its reads from us.
16326 * (There are no read marks in current state, because reads always mark
16327 * their parent and current state never has children yet. Only
16328 * explored_states can get read marks.)
16329 */
eea1c227
AS
16330 for (j = 0; j <= cur->curframe; j++) {
16331 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
16332 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
16333 for (i = 0; i < BPF_REG_FP; i++)
16334 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
16335 }
f4d7e40a
AS
16336
16337 /* all stack frames are accessible from callee, clear them all */
16338 for (j = 0; j <= cur->curframe; j++) {
16339 struct bpf_func_state *frame = cur->frame[j];
679c782d 16340 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 16341
679c782d 16342 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 16343 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
16344 frame->stack[i].spilled_ptr.parent =
16345 &newframe->stack[i].spilled_ptr;
16346 }
f4d7e40a 16347 }
f1bca824
AS
16348 return 0;
16349}
16350
c64b7983
JS
16351/* Return true if it's OK to have the same insn return a different type. */
16352static bool reg_type_mismatch_ok(enum bpf_reg_type type)
16353{
c25b2ae1 16354 switch (base_type(type)) {
c64b7983
JS
16355 case PTR_TO_CTX:
16356 case PTR_TO_SOCKET:
46f8bc92 16357 case PTR_TO_SOCK_COMMON:
655a51e5 16358 case PTR_TO_TCP_SOCK:
fada7fdc 16359 case PTR_TO_XDP_SOCK:
2a02759e 16360 case PTR_TO_BTF_ID:
c64b7983
JS
16361 return false;
16362 default:
16363 return true;
16364 }
16365}
16366
16367/* If an instruction was previously used with particular pointer types, then we
16368 * need to be careful to avoid cases such as the below, where it may be ok
16369 * for one branch accessing the pointer, but not ok for the other branch:
16370 *
16371 * R1 = sock_ptr
16372 * goto X;
16373 * ...
16374 * R1 = some_other_valid_ptr;
16375 * goto X;
16376 * ...
16377 * R2 = *(u32 *)(R1 + 0);
16378 */
16379static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
16380{
16381 return src != prev && (!reg_type_mismatch_ok(src) ||
16382 !reg_type_mismatch_ok(prev));
16383}
16384
0d80a619
EZ
16385static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
16386 bool allow_trust_missmatch)
16387{
16388 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
16389
16390 if (*prev_type == NOT_INIT) {
16391 /* Saw a valid insn
16392 * dst_reg = *(u32 *)(src_reg + off)
16393 * save type to validate intersecting paths
16394 */
16395 *prev_type = type;
16396 } else if (reg_type_mismatch(type, *prev_type)) {
16397 /* Abuser program is trying to use the same insn
16398 * dst_reg = *(u32*) (src_reg + off)
16399 * with different pointer types:
16400 * src_reg == ctx in one branch and
16401 * src_reg == stack|map in some other branch.
16402 * Reject it.
16403 */
16404 if (allow_trust_missmatch &&
16405 base_type(type) == PTR_TO_BTF_ID &&
16406 base_type(*prev_type) == PTR_TO_BTF_ID) {
16407 /*
16408 * Have to support a use case when one path through
16409 * the program yields TRUSTED pointer while another
16410 * is UNTRUSTED. Fallback to UNTRUSTED to generate
1f9a1ea8 16411 * BPF_PROBE_MEM/BPF_PROBE_MEMSX.
0d80a619
EZ
16412 */
16413 *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
16414 } else {
16415 verbose(env, "same insn cannot be used with different pointers\n");
16416 return -EINVAL;
16417 }
16418 }
16419
16420 return 0;
16421}
16422
58e2af8b 16423static int do_check(struct bpf_verifier_env *env)
17a52670 16424{
6f8a57cc 16425 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1 16426 struct bpf_verifier_state *state = env->cur_state;
17a52670 16427 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 16428 struct bpf_reg_state *regs;
06ee7115 16429 int insn_cnt = env->prog->len;
17a52670 16430 bool do_print_state = false;
b5dc0163 16431 int prev_insn_idx = -1;
17a52670 16432
17a52670
AS
16433 for (;;) {
16434 struct bpf_insn *insn;
16435 u8 class;
16436 int err;
16437
b5dc0163 16438 env->prev_insn_idx = prev_insn_idx;
c08435ec 16439 if (env->insn_idx >= insn_cnt) {
61bd5218 16440 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 16441 env->insn_idx, insn_cnt);
17a52670
AS
16442 return -EFAULT;
16443 }
16444
c08435ec 16445 insn = &insns[env->insn_idx];
17a52670
AS
16446 class = BPF_CLASS(insn->code);
16447
06ee7115 16448 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
16449 verbose(env,
16450 "BPF program is too large. Processed %d insn\n",
06ee7115 16451 env->insn_processed);
17a52670
AS
16452 return -E2BIG;
16453 }
16454
a095f421
AN
16455 state->last_insn_idx = env->prev_insn_idx;
16456
16457 if (is_prune_point(env, env->insn_idx)) {
16458 err = is_state_visited(env, env->insn_idx);
16459 if (err < 0)
16460 return err;
16461 if (err == 1) {
16462 /* found equivalent state, can prune the search */
16463 if (env->log.level & BPF_LOG_LEVEL) {
16464 if (do_print_state)
16465 verbose(env, "\nfrom %d to %d%s: safe\n",
16466 env->prev_insn_idx, env->insn_idx,
16467 env->cur_state->speculative ?
16468 " (speculative execution)" : "");
16469 else
16470 verbose(env, "%d: safe\n", env->insn_idx);
16471 }
16472 goto process_bpf_exit;
f1bca824 16473 }
a095f421
AN
16474 }
16475
16476 if (is_jmp_point(env, env->insn_idx)) {
16477 err = push_jmp_history(env, state);
16478 if (err)
16479 return err;
f1bca824
AS
16480 }
16481
c3494801
AS
16482 if (signal_pending(current))
16483 return -EAGAIN;
16484
3c2ce60b
DB
16485 if (need_resched())
16486 cond_resched();
16487
2e576648
CL
16488 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
16489 verbose(env, "\nfrom %d to %d%s:",
16490 env->prev_insn_idx, env->insn_idx,
16491 env->cur_state->speculative ?
16492 " (speculative execution)" : "");
16493 print_verifier_state(env, state->frame[state->curframe], true);
17a52670
AS
16494 do_print_state = false;
16495 }
16496
06ee7115 16497 if (env->log.level & BPF_LOG_LEVEL) {
7105e828 16498 const struct bpf_insn_cbs cbs = {
e6ac2450 16499 .cb_call = disasm_kfunc_name,
7105e828 16500 .cb_print = verbose,
abe08840 16501 .private_data = env,
7105e828
DB
16502 };
16503
2e576648
CL
16504 if (verifier_state_scratched(env))
16505 print_insn_state(env, state->frame[state->curframe]);
16506
c08435ec 16507 verbose_linfo(env, env->insn_idx, "; ");
12166409 16508 env->prev_log_pos = env->log.end_pos;
c08435ec 16509 verbose(env, "%d: ", env->insn_idx);
abe08840 16510 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
12166409
AN
16511 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos;
16512 env->prev_log_pos = env->log.end_pos;
17a52670
AS
16513 }
16514
9d03ebc7 16515 if (bpf_prog_is_offloaded(env->prog->aux)) {
c08435ec
DB
16516 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
16517 env->prev_insn_idx);
cae1927c
JK
16518 if (err)
16519 return err;
16520 }
13a27dfc 16521
638f5b90 16522 regs = cur_regs(env);
fe9a5ca7 16523 sanitize_mark_insn_seen(env);
b5dc0163 16524 prev_insn_idx = env->insn_idx;
fd978bf7 16525
17a52670 16526 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 16527 err = check_alu_op(env, insn);
17a52670
AS
16528 if (err)
16529 return err;
16530
16531 } else if (class == BPF_LDX) {
0d80a619 16532 enum bpf_reg_type src_reg_type;
9bac3d6d
AS
16533
16534 /* check for reserved fields is already done */
16535
17a52670 16536 /* check src operand */
dc503a8a 16537 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
16538 if (err)
16539 return err;
16540
dc503a8a 16541 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
16542 if (err)
16543 return err;
16544
725f9dcd
AS
16545 src_reg_type = regs[insn->src_reg].type;
16546
17a52670
AS
16547 /* check that memory (src_reg + off) is readable,
16548 * the state of dst_reg will be updated by this func
16549 */
c08435ec
DB
16550 err = check_mem_access(env, env->insn_idx, insn->src_reg,
16551 insn->off, BPF_SIZE(insn->code),
1f9a1ea8
YS
16552 BPF_READ, insn->dst_reg, false,
16553 BPF_MODE(insn->code) == BPF_MEMSX);
17a52670
AS
16554 if (err)
16555 return err;
16556
0d80a619
EZ
16557 err = save_aux_ptr_type(env, src_reg_type, true);
16558 if (err)
16559 return err;
17a52670 16560 } else if (class == BPF_STX) {
0d80a619 16561 enum bpf_reg_type dst_reg_type;
d691f9e8 16562
91c960b0
BJ
16563 if (BPF_MODE(insn->code) == BPF_ATOMIC) {
16564 err = check_atomic(env, env->insn_idx, insn);
17a52670
AS
16565 if (err)
16566 return err;
c08435ec 16567 env->insn_idx++;
17a52670
AS
16568 continue;
16569 }
16570
5ca419f2
BJ
16571 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
16572 verbose(env, "BPF_STX uses reserved fields\n");
16573 return -EINVAL;
16574 }
16575
17a52670 16576 /* check src1 operand */
dc503a8a 16577 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
16578 if (err)
16579 return err;
16580 /* check src2 operand */
dc503a8a 16581 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
16582 if (err)
16583 return err;
16584
d691f9e8
AS
16585 dst_reg_type = regs[insn->dst_reg].type;
16586
17a52670 16587 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
16588 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
16589 insn->off, BPF_SIZE(insn->code),
1f9a1ea8 16590 BPF_WRITE, insn->src_reg, false, false);
17a52670
AS
16591 if (err)
16592 return err;
16593
0d80a619
EZ
16594 err = save_aux_ptr_type(env, dst_reg_type, false);
16595 if (err)
16596 return err;
17a52670 16597 } else if (class == BPF_ST) {
0d80a619
EZ
16598 enum bpf_reg_type dst_reg_type;
16599
17a52670
AS
16600 if (BPF_MODE(insn->code) != BPF_MEM ||
16601 insn->src_reg != BPF_REG_0) {
61bd5218 16602 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
16603 return -EINVAL;
16604 }
16605 /* check src operand */
dc503a8a 16606 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
16607 if (err)
16608 return err;
16609
0d80a619 16610 dst_reg_type = regs[insn->dst_reg].type;
f37a8cb8 16611
17a52670 16612 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
16613 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
16614 insn->off, BPF_SIZE(insn->code),
1f9a1ea8 16615 BPF_WRITE, -1, false, false);
17a52670
AS
16616 if (err)
16617 return err;
16618
0d80a619
EZ
16619 err = save_aux_ptr_type(env, dst_reg_type, false);
16620 if (err)
16621 return err;
092ed096 16622 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
16623 u8 opcode = BPF_OP(insn->code);
16624
2589726d 16625 env->jmps_processed++;
17a52670
AS
16626 if (opcode == BPF_CALL) {
16627 if (BPF_SRC(insn->code) != BPF_K ||
2357672c
KKD
16628 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
16629 && insn->off != 0) ||
f4d7e40a 16630 (insn->src_reg != BPF_REG_0 &&
e6ac2450
MKL
16631 insn->src_reg != BPF_PSEUDO_CALL &&
16632 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
092ed096
JW
16633 insn->dst_reg != BPF_REG_0 ||
16634 class == BPF_JMP32) {
61bd5218 16635 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
16636 return -EINVAL;
16637 }
16638
8cab76ec
KKD
16639 if (env->cur_state->active_lock.ptr) {
16640 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
16641 (insn->src_reg == BPF_PSEUDO_CALL) ||
16642 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
cd6791b4 16643 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
8cab76ec
KKD
16644 verbose(env, "function calls are not allowed while holding a lock\n");
16645 return -EINVAL;
16646 }
d83525ca 16647 }
f4d7e40a 16648 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 16649 err = check_func_call(env, insn, &env->insn_idx);
e6ac2450 16650 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
5c073f26 16651 err = check_kfunc_call(env, insn, &env->insn_idx);
f4d7e40a 16652 else
69c087ba 16653 err = check_helper_call(env, insn, &env->insn_idx);
17a52670
AS
16654 if (err)
16655 return err;
553a64a8
AN
16656
16657 mark_reg_scratched(env, BPF_REG_0);
17a52670
AS
16658 } else if (opcode == BPF_JA) {
16659 if (BPF_SRC(insn->code) != BPF_K ||
17a52670 16660 insn->src_reg != BPF_REG_0 ||
092ed096 16661 insn->dst_reg != BPF_REG_0 ||
4cd58e9a
YS
16662 (class == BPF_JMP && insn->imm != 0) ||
16663 (class == BPF_JMP32 && insn->off != 0)) {
61bd5218 16664 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
16665 return -EINVAL;
16666 }
16667
4cd58e9a
YS
16668 if (class == BPF_JMP)
16669 env->insn_idx += insn->off + 1;
16670 else
16671 env->insn_idx += insn->imm + 1;
17a52670
AS
16672 continue;
16673
16674 } else if (opcode == BPF_EXIT) {
16675 if (BPF_SRC(insn->code) != BPF_K ||
16676 insn->imm != 0 ||
16677 insn->src_reg != BPF_REG_0 ||
092ed096
JW
16678 insn->dst_reg != BPF_REG_0 ||
16679 class == BPF_JMP32) {
61bd5218 16680 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
16681 return -EINVAL;
16682 }
16683
5d92ddc3
DM
16684 if (env->cur_state->active_lock.ptr &&
16685 !in_rbtree_lock_required_cb(env)) {
d83525ca
AS
16686 verbose(env, "bpf_spin_unlock is missing\n");
16687 return -EINVAL;
16688 }
16689
9bb00b28
YS
16690 if (env->cur_state->active_rcu_lock) {
16691 verbose(env, "bpf_rcu_read_unlock is missing\n");
16692 return -EINVAL;
16693 }
16694
9d9d00ac
KKD
16695 /* We must do check_reference_leak here before
16696 * prepare_func_exit to handle the case when
16697 * state->curframe > 0, it may be a callback
16698 * function, for which reference_state must
16699 * match caller reference state when it exits.
16700 */
16701 err = check_reference_leak(env);
16702 if (err)
16703 return err;
16704
f4d7e40a
AS
16705 if (state->curframe) {
16706 /* exit from nested function */
c08435ec 16707 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
16708 if (err)
16709 return err;
16710 do_print_state = true;
16711 continue;
16712 }
16713
390ee7e2
AS
16714 err = check_return_code(env);
16715 if (err)
16716 return err;
f1bca824 16717process_bpf_exit:
0f55f9ed 16718 mark_verifier_state_scratched(env);
2589726d 16719 update_branch_counts(env, env->cur_state);
b5dc0163 16720 err = pop_stack(env, &prev_insn_idx,
6f8a57cc 16721 &env->insn_idx, pop_log);
638f5b90
AS
16722 if (err < 0) {
16723 if (err != -ENOENT)
16724 return err;
17a52670
AS
16725 break;
16726 } else {
16727 do_print_state = true;
16728 continue;
16729 }
16730 } else {
c08435ec 16731 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
16732 if (err)
16733 return err;
16734 }
16735 } else if (class == BPF_LD) {
16736 u8 mode = BPF_MODE(insn->code);
16737
16738 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
16739 err = check_ld_abs(env, insn);
16740 if (err)
16741 return err;
16742
17a52670
AS
16743 } else if (mode == BPF_IMM) {
16744 err = check_ld_imm(env, insn);
16745 if (err)
16746 return err;
16747
c08435ec 16748 env->insn_idx++;
fe9a5ca7 16749 sanitize_mark_insn_seen(env);
17a52670 16750 } else {
61bd5218 16751 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
16752 return -EINVAL;
16753 }
16754 } else {
61bd5218 16755 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
16756 return -EINVAL;
16757 }
16758
c08435ec 16759 env->insn_idx++;
17a52670
AS
16760 }
16761
16762 return 0;
16763}
16764
541c3bad
AN
16765static int find_btf_percpu_datasec(struct btf *btf)
16766{
16767 const struct btf_type *t;
16768 const char *tname;
16769 int i, n;
16770
16771 /*
16772 * Both vmlinux and module each have their own ".data..percpu"
16773 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
16774 * types to look at only module's own BTF types.
16775 */
16776 n = btf_nr_types(btf);
16777 if (btf_is_module(btf))
16778 i = btf_nr_types(btf_vmlinux);
16779 else
16780 i = 1;
16781
16782 for(; i < n; i++) {
16783 t = btf_type_by_id(btf, i);
16784 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
16785 continue;
16786
16787 tname = btf_name_by_offset(btf, t->name_off);
16788 if (!strcmp(tname, ".data..percpu"))
16789 return i;
16790 }
16791
16792 return -ENOENT;
16793}
16794
4976b718
HL
16795/* replace pseudo btf_id with kernel symbol address */
16796static int check_pseudo_btf_id(struct bpf_verifier_env *env,
16797 struct bpf_insn *insn,
16798 struct bpf_insn_aux_data *aux)
16799{
eaa6bcb7
HL
16800 const struct btf_var_secinfo *vsi;
16801 const struct btf_type *datasec;
541c3bad 16802 struct btf_mod_pair *btf_mod;
4976b718
HL
16803 const struct btf_type *t;
16804 const char *sym_name;
eaa6bcb7 16805 bool percpu = false;
f16e6313 16806 u32 type, id = insn->imm;
541c3bad 16807 struct btf *btf;
f16e6313 16808 s32 datasec_id;
4976b718 16809 u64 addr;
541c3bad 16810 int i, btf_fd, err;
4976b718 16811
541c3bad
AN
16812 btf_fd = insn[1].imm;
16813 if (btf_fd) {
16814 btf = btf_get_by_fd(btf_fd);
16815 if (IS_ERR(btf)) {
16816 verbose(env, "invalid module BTF object FD specified.\n");
16817 return -EINVAL;
16818 }
16819 } else {
16820 if (!btf_vmlinux) {
16821 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
16822 return -EINVAL;
16823 }
16824 btf = btf_vmlinux;
16825 btf_get(btf);
4976b718
HL
16826 }
16827
541c3bad 16828 t = btf_type_by_id(btf, id);
4976b718
HL
16829 if (!t) {
16830 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
541c3bad
AN
16831 err = -ENOENT;
16832 goto err_put;
4976b718
HL
16833 }
16834
58aa2afb
AS
16835 if (!btf_type_is_var(t) && !btf_type_is_func(t)) {
16836 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id);
541c3bad
AN
16837 err = -EINVAL;
16838 goto err_put;
4976b718
HL
16839 }
16840
541c3bad 16841 sym_name = btf_name_by_offset(btf, t->name_off);
4976b718
HL
16842 addr = kallsyms_lookup_name(sym_name);
16843 if (!addr) {
16844 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
16845 sym_name);
541c3bad
AN
16846 err = -ENOENT;
16847 goto err_put;
4976b718 16848 }
58aa2afb
AS
16849 insn[0].imm = (u32)addr;
16850 insn[1].imm = addr >> 32;
16851
16852 if (btf_type_is_func(t)) {
16853 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
16854 aux->btf_var.mem_size = 0;
16855 goto check_btf;
16856 }
4976b718 16857
541c3bad 16858 datasec_id = find_btf_percpu_datasec(btf);
eaa6bcb7 16859 if (datasec_id > 0) {
541c3bad 16860 datasec = btf_type_by_id(btf, datasec_id);
eaa6bcb7
HL
16861 for_each_vsi(i, datasec, vsi) {
16862 if (vsi->type == id) {
16863 percpu = true;
16864 break;
16865 }
16866 }
16867 }
16868
4976b718 16869 type = t->type;
541c3bad 16870 t = btf_type_skip_modifiers(btf, type, NULL);
eaa6bcb7 16871 if (percpu) {
5844101a 16872 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
541c3bad 16873 aux->btf_var.btf = btf;
eaa6bcb7
HL
16874 aux->btf_var.btf_id = type;
16875 } else if (!btf_type_is_struct(t)) {
4976b718
HL
16876 const struct btf_type *ret;
16877 const char *tname;
16878 u32 tsize;
16879
16880 /* resolve the type size of ksym. */
541c3bad 16881 ret = btf_resolve_size(btf, t, &tsize);
4976b718 16882 if (IS_ERR(ret)) {
541c3bad 16883 tname = btf_name_by_offset(btf, t->name_off);
4976b718
HL
16884 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
16885 tname, PTR_ERR(ret));
541c3bad
AN
16886 err = -EINVAL;
16887 goto err_put;
4976b718 16888 }
34d3a78c 16889 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
4976b718
HL
16890 aux->btf_var.mem_size = tsize;
16891 } else {
16892 aux->btf_var.reg_type = PTR_TO_BTF_ID;
541c3bad 16893 aux->btf_var.btf = btf;
4976b718
HL
16894 aux->btf_var.btf_id = type;
16895 }
58aa2afb 16896check_btf:
541c3bad
AN
16897 /* check whether we recorded this BTF (and maybe module) already */
16898 for (i = 0; i < env->used_btf_cnt; i++) {
16899 if (env->used_btfs[i].btf == btf) {
16900 btf_put(btf);
16901 return 0;
16902 }
16903 }
16904
16905 if (env->used_btf_cnt >= MAX_USED_BTFS) {
16906 err = -E2BIG;
16907 goto err_put;
16908 }
16909
16910 btf_mod = &env->used_btfs[env->used_btf_cnt];
16911 btf_mod->btf = btf;
16912 btf_mod->module = NULL;
16913
16914 /* if we reference variables from kernel module, bump its refcount */
16915 if (btf_is_module(btf)) {
16916 btf_mod->module = btf_try_get_module(btf);
16917 if (!btf_mod->module) {
16918 err = -ENXIO;
16919 goto err_put;
16920 }
16921 }
16922
16923 env->used_btf_cnt++;
16924
4976b718 16925 return 0;
541c3bad
AN
16926err_put:
16927 btf_put(btf);
16928 return err;
4976b718
HL
16929}
16930
d83525ca
AS
16931static bool is_tracing_prog_type(enum bpf_prog_type type)
16932{
16933 switch (type) {
16934 case BPF_PROG_TYPE_KPROBE:
16935 case BPF_PROG_TYPE_TRACEPOINT:
16936 case BPF_PROG_TYPE_PERF_EVENT:
16937 case BPF_PROG_TYPE_RAW_TRACEPOINT:
5002615a 16938 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
d83525ca
AS
16939 return true;
16940 default:
16941 return false;
16942 }
16943}
16944
61bd5218
JK
16945static int check_map_prog_compatibility(struct bpf_verifier_env *env,
16946 struct bpf_map *map,
fdc15d38
AS
16947 struct bpf_prog *prog)
16948
16949{
7e40781c 16950 enum bpf_prog_type prog_type = resolve_prog_type(prog);
a3884572 16951
9c395c1b
DM
16952 if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
16953 btf_record_has_field(map->record, BPF_RB_ROOT)) {
f0c5941f 16954 if (is_tracing_prog_type(prog_type)) {
9c395c1b 16955 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
f0c5941f
KKD
16956 return -EINVAL;
16957 }
16958 }
16959
db559117 16960 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
9e7a4d98
KS
16961 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
16962 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
16963 return -EINVAL;
16964 }
16965
16966 if (is_tracing_prog_type(prog_type)) {
16967 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
16968 return -EINVAL;
16969 }
16970
16971 if (prog->aux->sleepable) {
16972 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
16973 return -EINVAL;
16974 }
d83525ca
AS
16975 }
16976
db559117 16977 if (btf_record_has_field(map->record, BPF_TIMER)) {
5e0bc308
DB
16978 if (is_tracing_prog_type(prog_type)) {
16979 verbose(env, "tracing progs cannot use bpf_timer yet\n");
16980 return -EINVAL;
16981 }
16982 }
16983
9d03ebc7 16984 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
09728266 16985 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
16986 verbose(env, "offload device mismatch between prog and map\n");
16987 return -EINVAL;
16988 }
16989
85d33df3
MKL
16990 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
16991 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
16992 return -EINVAL;
16993 }
16994
1e6c62a8
AS
16995 if (prog->aux->sleepable)
16996 switch (map->map_type) {
16997 case BPF_MAP_TYPE_HASH:
16998 case BPF_MAP_TYPE_LRU_HASH:
16999 case BPF_MAP_TYPE_ARRAY:
638e4b82
AS
17000 case BPF_MAP_TYPE_PERCPU_HASH:
17001 case BPF_MAP_TYPE_PERCPU_ARRAY:
17002 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
17003 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
17004 case BPF_MAP_TYPE_HASH_OF_MAPS:
ba90c2cc 17005 case BPF_MAP_TYPE_RINGBUF:
583c1f42 17006 case BPF_MAP_TYPE_USER_RINGBUF:
0fe4b381
KS
17007 case BPF_MAP_TYPE_INODE_STORAGE:
17008 case BPF_MAP_TYPE_SK_STORAGE:
17009 case BPF_MAP_TYPE_TASK_STORAGE:
2c40d97d 17010 case BPF_MAP_TYPE_CGRP_STORAGE:
ba90c2cc 17011 break;
1e6c62a8
AS
17012 default:
17013 verbose(env,
2c40d97d 17014 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
1e6c62a8
AS
17015 return -EINVAL;
17016 }
17017
fdc15d38
AS
17018 return 0;
17019}
17020
b741f163
RG
17021static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
17022{
17023 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
17024 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
17025}
17026
4976b718
HL
17027/* find and rewrite pseudo imm in ld_imm64 instructions:
17028 *
17029 * 1. if it accesses map FD, replace it with actual map pointer.
17030 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
17031 *
17032 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
0246e64d 17033 */
4976b718 17034static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
0246e64d
AS
17035{
17036 struct bpf_insn *insn = env->prog->insnsi;
17037 int insn_cnt = env->prog->len;
fdc15d38 17038 int i, j, err;
0246e64d 17039
f1f7714e 17040 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
17041 if (err)
17042 return err;
17043
0246e64d 17044 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 17045 if (BPF_CLASS(insn->code) == BPF_LDX &&
1f9a1ea8
YS
17046 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) ||
17047 insn->imm != 0)) {
61bd5218 17048 verbose(env, "BPF_LDX uses reserved fields\n");
d691f9e8
AS
17049 return -EINVAL;
17050 }
17051
0246e64d 17052 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 17053 struct bpf_insn_aux_data *aux;
0246e64d
AS
17054 struct bpf_map *map;
17055 struct fd f;
d8eca5bb 17056 u64 addr;
387544bf 17057 u32 fd;
0246e64d
AS
17058
17059 if (i == insn_cnt - 1 || insn[1].code != 0 ||
17060 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
17061 insn[1].off != 0) {
61bd5218 17062 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
17063 return -EINVAL;
17064 }
17065
d8eca5bb 17066 if (insn[0].src_reg == 0)
0246e64d
AS
17067 /* valid generic load 64-bit imm */
17068 goto next_insn;
17069
4976b718
HL
17070 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
17071 aux = &env->insn_aux_data[i];
17072 err = check_pseudo_btf_id(env, insn, aux);
17073 if (err)
17074 return err;
17075 goto next_insn;
17076 }
17077
69c087ba
YS
17078 if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
17079 aux = &env->insn_aux_data[i];
17080 aux->ptr_type = PTR_TO_FUNC;
17081 goto next_insn;
17082 }
17083
d8eca5bb
DB
17084 /* In final convert_pseudo_ld_imm64() step, this is
17085 * converted into regular 64-bit imm load insn.
17086 */
387544bf
AS
17087 switch (insn[0].src_reg) {
17088 case BPF_PSEUDO_MAP_VALUE:
17089 case BPF_PSEUDO_MAP_IDX_VALUE:
17090 break;
17091 case BPF_PSEUDO_MAP_FD:
17092 case BPF_PSEUDO_MAP_IDX:
17093 if (insn[1].imm == 0)
17094 break;
17095 fallthrough;
17096 default:
17097 verbose(env, "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
17098 return -EINVAL;
17099 }
17100
387544bf
AS
17101 switch (insn[0].src_reg) {
17102 case BPF_PSEUDO_MAP_IDX_VALUE:
17103 case BPF_PSEUDO_MAP_IDX:
17104 if (bpfptr_is_null(env->fd_array)) {
17105 verbose(env, "fd_idx without fd_array is invalid\n");
17106 return -EPROTO;
17107 }
17108 if (copy_from_bpfptr_offset(&fd, env->fd_array,
17109 insn[0].imm * sizeof(fd),
17110 sizeof(fd)))
17111 return -EFAULT;
17112 break;
17113 default:
17114 fd = insn[0].imm;
17115 break;
17116 }
17117
17118 f = fdget(fd);
c2101297 17119 map = __bpf_map_get(f);
0246e64d 17120 if (IS_ERR(map)) {
61bd5218 17121 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 17122 insn[0].imm);
0246e64d
AS
17123 return PTR_ERR(map);
17124 }
17125
61bd5218 17126 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
17127 if (err) {
17128 fdput(f);
17129 return err;
17130 }
17131
d8eca5bb 17132 aux = &env->insn_aux_data[i];
387544bf
AS
17133 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
17134 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
d8eca5bb
DB
17135 addr = (unsigned long)map;
17136 } else {
17137 u32 off = insn[1].imm;
17138
17139 if (off >= BPF_MAX_VAR_OFF) {
17140 verbose(env, "direct value offset of %u is not allowed\n", off);
17141 fdput(f);
17142 return -EINVAL;
17143 }
17144
17145 if (!map->ops->map_direct_value_addr) {
17146 verbose(env, "no direct value access support for this map type\n");
17147 fdput(f);
17148 return -EINVAL;
17149 }
17150
17151 err = map->ops->map_direct_value_addr(map, &addr, off);
17152 if (err) {
17153 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
17154 map->value_size, off);
17155 fdput(f);
17156 return err;
17157 }
17158
17159 aux->map_off = off;
17160 addr += off;
17161 }
17162
17163 insn[0].imm = (u32)addr;
17164 insn[1].imm = addr >> 32;
0246e64d
AS
17165
17166 /* check whether we recorded this map already */
d8eca5bb 17167 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 17168 if (env->used_maps[j] == map) {
d8eca5bb 17169 aux->map_index = j;
0246e64d
AS
17170 fdput(f);
17171 goto next_insn;
17172 }
d8eca5bb 17173 }
0246e64d
AS
17174
17175 if (env->used_map_cnt >= MAX_USED_MAPS) {
17176 fdput(f);
17177 return -E2BIG;
17178 }
17179
0246e64d
AS
17180 /* hold the map. If the program is rejected by verifier,
17181 * the map will be released by release_maps() or it
17182 * will be used by the valid program until it's unloaded
ab7f5bf0 17183 * and all maps are released in free_used_maps()
0246e64d 17184 */
1e0bd5a0 17185 bpf_map_inc(map);
d8eca5bb
DB
17186
17187 aux->map_index = env->used_map_cnt;
92117d84
AS
17188 env->used_maps[env->used_map_cnt++] = map;
17189
b741f163 17190 if (bpf_map_is_cgroup_storage(map) &&
e4730423 17191 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 17192 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
17193 fdput(f);
17194 return -EBUSY;
17195 }
17196
0246e64d
AS
17197 fdput(f);
17198next_insn:
17199 insn++;
17200 i++;
5e581dad
DB
17201 continue;
17202 }
17203
17204 /* Basic sanity check before we invest more work here. */
17205 if (!bpf_opcode_in_insntable(insn->code)) {
17206 verbose(env, "unknown opcode %02x\n", insn->code);
17207 return -EINVAL;
0246e64d
AS
17208 }
17209 }
17210
17211 /* now all pseudo BPF_LD_IMM64 instructions load valid
17212 * 'struct bpf_map *' into a register instead of user map_fd.
17213 * These pointers will be used later by verifier to validate map access.
17214 */
17215 return 0;
17216}
17217
17218/* drop refcnt of maps used by the rejected program */
58e2af8b 17219static void release_maps(struct bpf_verifier_env *env)
0246e64d 17220{
a2ea0746
DB
17221 __bpf_free_used_maps(env->prog->aux, env->used_maps,
17222 env->used_map_cnt);
0246e64d
AS
17223}
17224
541c3bad
AN
17225/* drop refcnt of maps used by the rejected program */
17226static void release_btfs(struct bpf_verifier_env *env)
17227{
17228 __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
17229 env->used_btf_cnt);
17230}
17231
0246e64d 17232/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 17233static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
17234{
17235 struct bpf_insn *insn = env->prog->insnsi;
17236 int insn_cnt = env->prog->len;
17237 int i;
17238
69c087ba
YS
17239 for (i = 0; i < insn_cnt; i++, insn++) {
17240 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
17241 continue;
17242 if (insn->src_reg == BPF_PSEUDO_FUNC)
17243 continue;
17244 insn->src_reg = 0;
17245 }
0246e64d
AS
17246}
17247
8041902d
AS
17248/* single env->prog->insni[off] instruction was replaced with the range
17249 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
17250 * [0, off) and [off, end) to new locations, so the patched range stays zero
17251 */
75f0fc7b
HF
17252static void adjust_insn_aux_data(struct bpf_verifier_env *env,
17253 struct bpf_insn_aux_data *new_data,
17254 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d 17255{
75f0fc7b 17256 struct bpf_insn_aux_data *old_data = env->insn_aux_data;
b325fbca 17257 struct bpf_insn *insn = new_prog->insnsi;
d203b0fd 17258 u32 old_seen = old_data[off].seen;
b325fbca 17259 u32 prog_len;
c131187d 17260 int i;
8041902d 17261
b325fbca
JW
17262 /* aux info at OFF always needs adjustment, no matter fast path
17263 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
17264 * original insn at old prog.
17265 */
17266 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
17267
8041902d 17268 if (cnt == 1)
75f0fc7b 17269 return;
b325fbca 17270 prog_len = new_prog->len;
75f0fc7b 17271
8041902d
AS
17272 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
17273 memcpy(new_data + off + cnt - 1, old_data + off,
17274 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 17275 for (i = off; i < off + cnt - 1; i++) {
d203b0fd
DB
17276 /* Expand insni[off]'s seen count to the patched range. */
17277 new_data[i].seen = old_seen;
b325fbca
JW
17278 new_data[i].zext_dst = insn_has_def32(env, insn + i);
17279 }
8041902d
AS
17280 env->insn_aux_data = new_data;
17281 vfree(old_data);
8041902d
AS
17282}
17283
cc8b0b92
AS
17284static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
17285{
17286 int i;
17287
17288 if (len == 1)
17289 return;
4cb3d99c
JW
17290 /* NOTE: fake 'exit' subprog should be updated as well. */
17291 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 17292 if (env->subprog_info[i].start <= off)
cc8b0b92 17293 continue;
9c8105bd 17294 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
17295 }
17296}
17297
7506d211 17298static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
a748c697
MF
17299{
17300 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
17301 int i, sz = prog->aux->size_poke_tab;
17302 struct bpf_jit_poke_descriptor *desc;
17303
17304 for (i = 0; i < sz; i++) {
17305 desc = &tab[i];
7506d211
JF
17306 if (desc->insn_idx <= off)
17307 continue;
a748c697
MF
17308 desc->insn_idx += len - 1;
17309 }
17310}
17311
8041902d
AS
17312static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
17313 const struct bpf_insn *patch, u32 len)
17314{
17315 struct bpf_prog *new_prog;
75f0fc7b
HF
17316 struct bpf_insn_aux_data *new_data = NULL;
17317
17318 if (len > 1) {
17319 new_data = vzalloc(array_size(env->prog->len + len - 1,
17320 sizeof(struct bpf_insn_aux_data)));
17321 if (!new_data)
17322 return NULL;
17323 }
8041902d
AS
17324
17325 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
17326 if (IS_ERR(new_prog)) {
17327 if (PTR_ERR(new_prog) == -ERANGE)
17328 verbose(env,
17329 "insn %d cannot be patched due to 16-bit range\n",
17330 env->insn_aux_data[off].orig_idx);
75f0fc7b 17331 vfree(new_data);
8041902d 17332 return NULL;
4f73379e 17333 }
75f0fc7b 17334 adjust_insn_aux_data(env, new_data, new_prog, off, len);
cc8b0b92 17335 adjust_subprog_starts(env, off, len);
7506d211 17336 adjust_poke_descs(new_prog, off, len);
8041902d
AS
17337 return new_prog;
17338}
17339
52875a04
JK
17340static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
17341 u32 off, u32 cnt)
17342{
17343 int i, j;
17344
17345 /* find first prog starting at or after off (first to remove) */
17346 for (i = 0; i < env->subprog_cnt; i++)
17347 if (env->subprog_info[i].start >= off)
17348 break;
17349 /* find first prog starting at or after off + cnt (first to stay) */
17350 for (j = i; j < env->subprog_cnt; j++)
17351 if (env->subprog_info[j].start >= off + cnt)
17352 break;
17353 /* if j doesn't start exactly at off + cnt, we are just removing
17354 * the front of previous prog
17355 */
17356 if (env->subprog_info[j].start != off + cnt)
17357 j--;
17358
17359 if (j > i) {
17360 struct bpf_prog_aux *aux = env->prog->aux;
17361 int move;
17362
17363 /* move fake 'exit' subprog as well */
17364 move = env->subprog_cnt + 1 - j;
17365
17366 memmove(env->subprog_info + i,
17367 env->subprog_info + j,
17368 sizeof(*env->subprog_info) * move);
17369 env->subprog_cnt -= j - i;
17370
17371 /* remove func_info */
17372 if (aux->func_info) {
17373 move = aux->func_info_cnt - j;
17374
17375 memmove(aux->func_info + i,
17376 aux->func_info + j,
17377 sizeof(*aux->func_info) * move);
17378 aux->func_info_cnt -= j - i;
17379 /* func_info->insn_off is set after all code rewrites,
17380 * in adjust_btf_func() - no need to adjust
17381 */
17382 }
17383 } else {
17384 /* convert i from "first prog to remove" to "first to adjust" */
17385 if (env->subprog_info[i].start == off)
17386 i++;
17387 }
17388
17389 /* update fake 'exit' subprog as well */
17390 for (; i <= env->subprog_cnt; i++)
17391 env->subprog_info[i].start -= cnt;
17392
17393 return 0;
17394}
17395
17396static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
17397 u32 cnt)
17398{
17399 struct bpf_prog *prog = env->prog;
17400 u32 i, l_off, l_cnt, nr_linfo;
17401 struct bpf_line_info *linfo;
17402
17403 nr_linfo = prog->aux->nr_linfo;
17404 if (!nr_linfo)
17405 return 0;
17406
17407 linfo = prog->aux->linfo;
17408
17409 /* find first line info to remove, count lines to be removed */
17410 for (i = 0; i < nr_linfo; i++)
17411 if (linfo[i].insn_off >= off)
17412 break;
17413
17414 l_off = i;
17415 l_cnt = 0;
17416 for (; i < nr_linfo; i++)
17417 if (linfo[i].insn_off < off + cnt)
17418 l_cnt++;
17419 else
17420 break;
17421
17422 /* First live insn doesn't match first live linfo, it needs to "inherit"
17423 * last removed linfo. prog is already modified, so prog->len == off
17424 * means no live instructions after (tail of the program was removed).
17425 */
17426 if (prog->len != off && l_cnt &&
17427 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
17428 l_cnt--;
17429 linfo[--i].insn_off = off + cnt;
17430 }
17431
17432 /* remove the line info which refer to the removed instructions */
17433 if (l_cnt) {
17434 memmove(linfo + l_off, linfo + i,
17435 sizeof(*linfo) * (nr_linfo - i));
17436
17437 prog->aux->nr_linfo -= l_cnt;
17438 nr_linfo = prog->aux->nr_linfo;
17439 }
17440
17441 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
17442 for (i = l_off; i < nr_linfo; i++)
17443 linfo[i].insn_off -= cnt;
17444
17445 /* fix up all subprogs (incl. 'exit') which start >= off */
17446 for (i = 0; i <= env->subprog_cnt; i++)
17447 if (env->subprog_info[i].linfo_idx > l_off) {
17448 /* program may have started in the removed region but
17449 * may not be fully removed
17450 */
17451 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
17452 env->subprog_info[i].linfo_idx -= l_cnt;
17453 else
17454 env->subprog_info[i].linfo_idx = l_off;
17455 }
17456
17457 return 0;
17458}
17459
17460static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
17461{
17462 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
17463 unsigned int orig_prog_len = env->prog->len;
17464 int err;
17465
9d03ebc7 17466 if (bpf_prog_is_offloaded(env->prog->aux))
08ca90af
JK
17467 bpf_prog_offload_remove_insns(env, off, cnt);
17468
52875a04
JK
17469 err = bpf_remove_insns(env->prog, off, cnt);
17470 if (err)
17471 return err;
17472
17473 err = adjust_subprog_starts_after_remove(env, off, cnt);
17474 if (err)
17475 return err;
17476
17477 err = bpf_adj_linfo_after_remove(env, off, cnt);
17478 if (err)
17479 return err;
17480
17481 memmove(aux_data + off, aux_data + off + cnt,
17482 sizeof(*aux_data) * (orig_prog_len - off - cnt));
17483
17484 return 0;
17485}
17486
2a5418a1
DB
17487/* The verifier does more data flow analysis than llvm and will not
17488 * explore branches that are dead at run time. Malicious programs can
17489 * have dead code too. Therefore replace all dead at-run-time code
17490 * with 'ja -1'.
17491 *
17492 * Just nops are not optimal, e.g. if they would sit at the end of the
17493 * program and through another bug we would manage to jump there, then
17494 * we'd execute beyond program memory otherwise. Returning exception
17495 * code also wouldn't work since we can have subprogs where the dead
17496 * code could be located.
c131187d
AS
17497 */
17498static void sanitize_dead_code(struct bpf_verifier_env *env)
17499{
17500 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 17501 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
17502 struct bpf_insn *insn = env->prog->insnsi;
17503 const int insn_cnt = env->prog->len;
17504 int i;
17505
17506 for (i = 0; i < insn_cnt; i++) {
17507 if (aux_data[i].seen)
17508 continue;
2a5418a1 17509 memcpy(insn + i, &trap, sizeof(trap));
45c709f8 17510 aux_data[i].zext_dst = false;
c131187d
AS
17511 }
17512}
17513
e2ae4ca2
JK
17514static bool insn_is_cond_jump(u8 code)
17515{
17516 u8 op;
17517
4cd58e9a 17518 op = BPF_OP(code);
092ed096 17519 if (BPF_CLASS(code) == BPF_JMP32)
4cd58e9a 17520 return op != BPF_JA;
092ed096 17521
e2ae4ca2
JK
17522 if (BPF_CLASS(code) != BPF_JMP)
17523 return false;
17524
e2ae4ca2
JK
17525 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
17526}
17527
17528static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
17529{
17530 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
17531 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
17532 struct bpf_insn *insn = env->prog->insnsi;
17533 const int insn_cnt = env->prog->len;
17534 int i;
17535
17536 for (i = 0; i < insn_cnt; i++, insn++) {
17537 if (!insn_is_cond_jump(insn->code))
17538 continue;
17539
17540 if (!aux_data[i + 1].seen)
17541 ja.off = insn->off;
17542 else if (!aux_data[i + 1 + insn->off].seen)
17543 ja.off = 0;
17544 else
17545 continue;
17546
9d03ebc7 17547 if (bpf_prog_is_offloaded(env->prog->aux))
08ca90af
JK
17548 bpf_prog_offload_replace_insn(env, i, &ja);
17549
e2ae4ca2
JK
17550 memcpy(insn, &ja, sizeof(ja));
17551 }
17552}
17553
52875a04
JK
17554static int opt_remove_dead_code(struct bpf_verifier_env *env)
17555{
17556 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
17557 int insn_cnt = env->prog->len;
17558 int i, err;
17559
17560 for (i = 0; i < insn_cnt; i++) {
17561 int j;
17562
17563 j = 0;
17564 while (i + j < insn_cnt && !aux_data[i + j].seen)
17565 j++;
17566 if (!j)
17567 continue;
17568
17569 err = verifier_remove_insns(env, i, j);
17570 if (err)
17571 return err;
17572 insn_cnt = env->prog->len;
17573 }
17574
17575 return 0;
17576}
17577
a1b14abc
JK
17578static int opt_remove_nops(struct bpf_verifier_env *env)
17579{
17580 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
17581 struct bpf_insn *insn = env->prog->insnsi;
17582 int insn_cnt = env->prog->len;
17583 int i, err;
17584
17585 for (i = 0; i < insn_cnt; i++) {
17586 if (memcmp(&insn[i], &ja, sizeof(ja)))
17587 continue;
17588
17589 err = verifier_remove_insns(env, i, 1);
17590 if (err)
17591 return err;
17592 insn_cnt--;
17593 i--;
17594 }
17595
17596 return 0;
17597}
17598
d6c2308c
JW
17599static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
17600 const union bpf_attr *attr)
a4b1d3c1 17601{
d6c2308c 17602 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 17603 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 17604 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 17605 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 17606 struct bpf_prog *new_prog;
d6c2308c 17607 bool rnd_hi32;
a4b1d3c1 17608
d6c2308c 17609 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 17610 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
17611 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
17612 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
17613 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
17614 for (i = 0; i < len; i++) {
17615 int adj_idx = i + delta;
17616 struct bpf_insn insn;
83a28819 17617 int load_reg;
a4b1d3c1 17618
d6c2308c 17619 insn = insns[adj_idx];
83a28819 17620 load_reg = insn_def_regno(&insn);
d6c2308c
JW
17621 if (!aux[adj_idx].zext_dst) {
17622 u8 code, class;
17623 u32 imm_rnd;
17624
17625 if (!rnd_hi32)
17626 continue;
17627
17628 code = insn.code;
17629 class = BPF_CLASS(code);
83a28819 17630 if (load_reg == -1)
d6c2308c
JW
17631 continue;
17632
17633 /* NOTE: arg "reg" (the fourth one) is only used for
83a28819
IL
17634 * BPF_STX + SRC_OP, so it is safe to pass NULL
17635 * here.
d6c2308c 17636 */
83a28819 17637 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
d6c2308c
JW
17638 if (class == BPF_LD &&
17639 BPF_MODE(code) == BPF_IMM)
17640 i++;
17641 continue;
17642 }
17643
17644 /* ctx load could be transformed into wider load. */
17645 if (class == BPF_LDX &&
17646 aux[adj_idx].ptr_type == PTR_TO_CTX)
17647 continue;
17648
a251c17a 17649 imm_rnd = get_random_u32();
d6c2308c
JW
17650 rnd_hi32_patch[0] = insn;
17651 rnd_hi32_patch[1].imm = imm_rnd;
83a28819 17652 rnd_hi32_patch[3].dst_reg = load_reg;
d6c2308c
JW
17653 patch = rnd_hi32_patch;
17654 patch_len = 4;
17655 goto apply_patch_buffer;
17656 }
17657
39491867
BJ
17658 /* Add in an zero-extend instruction if a) the JIT has requested
17659 * it or b) it's a CMPXCHG.
17660 *
17661 * The latter is because: BPF_CMPXCHG always loads a value into
17662 * R0, therefore always zero-extends. However some archs'
17663 * equivalent instruction only does this load when the
17664 * comparison is successful. This detail of CMPXCHG is
17665 * orthogonal to the general zero-extension behaviour of the
17666 * CPU, so it's treated independently of bpf_jit_needs_zext.
17667 */
17668 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
a4b1d3c1
JW
17669 continue;
17670
d35af0a7
BT
17671 /* Zero-extension is done by the caller. */
17672 if (bpf_pseudo_kfunc_call(&insn))
17673 continue;
17674
83a28819
IL
17675 if (WARN_ON(load_reg == -1)) {
17676 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
17677 return -EFAULT;
b2e37a71
IL
17678 }
17679
a4b1d3c1 17680 zext_patch[0] = insn;
b2e37a71
IL
17681 zext_patch[1].dst_reg = load_reg;
17682 zext_patch[1].src_reg = load_reg;
d6c2308c
JW
17683 patch = zext_patch;
17684 patch_len = 2;
17685apply_patch_buffer:
17686 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
17687 if (!new_prog)
17688 return -ENOMEM;
17689 env->prog = new_prog;
17690 insns = new_prog->insnsi;
17691 aux = env->insn_aux_data;
d6c2308c 17692 delta += patch_len - 1;
a4b1d3c1
JW
17693 }
17694
17695 return 0;
17696}
17697
c64b7983
JS
17698/* convert load instructions that access fields of a context type into a
17699 * sequence of instructions that access fields of the underlying structure:
17700 * struct __sk_buff -> struct sk_buff
17701 * struct bpf_sock_ops -> struct sock
9bac3d6d 17702 */
58e2af8b 17703static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 17704{
00176a34 17705 const struct bpf_verifier_ops *ops = env->ops;
f96da094 17706 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 17707 const int insn_cnt = env->prog->len;
36bbef52 17708 struct bpf_insn insn_buf[16], *insn;
46f53a65 17709 u32 target_size, size_default, off;
9bac3d6d 17710 struct bpf_prog *new_prog;
d691f9e8 17711 enum bpf_access_type type;
f96da094 17712 bool is_narrower_load;
9bac3d6d 17713
b09928b9
DB
17714 if (ops->gen_prologue || env->seen_direct_write) {
17715 if (!ops->gen_prologue) {
17716 verbose(env, "bpf verifier is misconfigured\n");
17717 return -EINVAL;
17718 }
36bbef52
DB
17719 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
17720 env->prog);
17721 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 17722 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
17723 return -EINVAL;
17724 } else if (cnt) {
8041902d 17725 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
17726 if (!new_prog)
17727 return -ENOMEM;
8041902d 17728
36bbef52 17729 env->prog = new_prog;
3df126f3 17730 delta += cnt - 1;
36bbef52
DB
17731 }
17732 }
17733
9d03ebc7 17734 if (bpf_prog_is_offloaded(env->prog->aux))
9bac3d6d
AS
17735 return 0;
17736
3df126f3 17737 insn = env->prog->insnsi + delta;
36bbef52 17738
9bac3d6d 17739 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983 17740 bpf_convert_ctx_access_t convert_ctx_access;
1f1e864b 17741 u8 mode;
c64b7983 17742
62c7989b
DB
17743 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
17744 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
17745 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
1f9a1ea8
YS
17746 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) ||
17747 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) ||
17748 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) ||
17749 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) {
d691f9e8 17750 type = BPF_READ;
2039f26f
DB
17751 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
17752 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
17753 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
17754 insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
17755 insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
17756 insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
17757 insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
17758 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
d691f9e8 17759 type = BPF_WRITE;
2039f26f 17760 } else {
9bac3d6d 17761 continue;
2039f26f 17762 }
9bac3d6d 17763
af86ca4e 17764 if (type == BPF_WRITE &&
2039f26f 17765 env->insn_aux_data[i + delta].sanitize_stack_spill) {
af86ca4e 17766 struct bpf_insn patch[] = {
af86ca4e 17767 *insn,
2039f26f 17768 BPF_ST_NOSPEC(),
af86ca4e
AS
17769 };
17770
17771 cnt = ARRAY_SIZE(patch);
17772 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
17773 if (!new_prog)
17774 return -ENOMEM;
17775
17776 delta += cnt - 1;
17777 env->prog = new_prog;
17778 insn = new_prog->insnsi + i + delta;
17779 continue;
17780 }
17781
6efe152d 17782 switch ((int)env->insn_aux_data[i + delta].ptr_type) {
c64b7983
JS
17783 case PTR_TO_CTX:
17784 if (!ops->convert_ctx_access)
17785 continue;
17786 convert_ctx_access = ops->convert_ctx_access;
17787 break;
17788 case PTR_TO_SOCKET:
46f8bc92 17789 case PTR_TO_SOCK_COMMON:
c64b7983
JS
17790 convert_ctx_access = bpf_sock_convert_ctx_access;
17791 break;
655a51e5
MKL
17792 case PTR_TO_TCP_SOCK:
17793 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
17794 break;
fada7fdc
JL
17795 case PTR_TO_XDP_SOCK:
17796 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
17797 break;
2a02759e 17798 case PTR_TO_BTF_ID:
6efe152d 17799 case PTR_TO_BTF_ID | PTR_UNTRUSTED:
282de143
KKD
17800 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
17801 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
17802 * be said once it is marked PTR_UNTRUSTED, hence we must handle
17803 * any faults for loads into such types. BPF_WRITE is disallowed
17804 * for this case.
17805 */
17806 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
27ae7997 17807 if (type == BPF_READ) {
1f9a1ea8
YS
17808 if (BPF_MODE(insn->code) == BPF_MEM)
17809 insn->code = BPF_LDX | BPF_PROBE_MEM |
17810 BPF_SIZE((insn)->code);
17811 else
17812 insn->code = BPF_LDX | BPF_PROBE_MEMSX |
17813 BPF_SIZE((insn)->code);
27ae7997 17814 env->prog->aux->num_exentries++;
2a02759e 17815 }
2a02759e 17816 continue;
c64b7983 17817 default:
9bac3d6d 17818 continue;
c64b7983 17819 }
9bac3d6d 17820
31fd8581 17821 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 17822 size = BPF_LDST_BYTES(insn);
1f1e864b 17823 mode = BPF_MODE(insn->code);
31fd8581
YS
17824
17825 /* If the read access is a narrower load of the field,
17826 * convert to a 4/8-byte load, to minimum program type specific
17827 * convert_ctx_access changes. If conversion is successful,
17828 * we will apply proper mask to the result.
17829 */
f96da094 17830 is_narrower_load = size < ctx_field_size;
46f53a65
AI
17831 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
17832 off = insn->off;
31fd8581 17833 if (is_narrower_load) {
f96da094
DB
17834 u8 size_code;
17835
17836 if (type == BPF_WRITE) {
61bd5218 17837 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
17838 return -EINVAL;
17839 }
31fd8581 17840
f96da094 17841 size_code = BPF_H;
31fd8581
YS
17842 if (ctx_field_size == 4)
17843 size_code = BPF_W;
17844 else if (ctx_field_size == 8)
17845 size_code = BPF_DW;
f96da094 17846
bc23105c 17847 insn->off = off & ~(size_default - 1);
31fd8581
YS
17848 insn->code = BPF_LDX | BPF_MEM | size_code;
17849 }
f96da094
DB
17850
17851 target_size = 0;
c64b7983
JS
17852 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
17853 &target_size);
f96da094
DB
17854 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
17855 (ctx_field_size && !target_size)) {
61bd5218 17856 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
17857 return -EINVAL;
17858 }
f96da094
DB
17859
17860 if (is_narrower_load && size < target_size) {
d895a0f1
IL
17861 u8 shift = bpf_ctx_narrow_access_offset(
17862 off, size, size_default) * 8;
d7af7e49
AI
17863 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
17864 verbose(env, "bpf verifier narrow ctx load misconfigured\n");
17865 return -EINVAL;
17866 }
46f53a65
AI
17867 if (ctx_field_size <= 4) {
17868 if (shift)
17869 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
17870 insn->dst_reg,
17871 shift);
31fd8581 17872 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 17873 (1 << size * 8) - 1);
46f53a65
AI
17874 } else {
17875 if (shift)
17876 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
17877 insn->dst_reg,
17878 shift);
0613d8ca 17879 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 17880 (1ULL << size * 8) - 1);
46f53a65 17881 }
31fd8581 17882 }
1f1e864b
YS
17883 if (mode == BPF_MEMSX)
17884 insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X,
17885 insn->dst_reg, insn->dst_reg,
17886 size * 8, 0);
9bac3d6d 17887
8041902d 17888 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
17889 if (!new_prog)
17890 return -ENOMEM;
17891
3df126f3 17892 delta += cnt - 1;
9bac3d6d
AS
17893
17894 /* keep walking new program and skip insns we just inserted */
17895 env->prog = new_prog;
3df126f3 17896 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
17897 }
17898
17899 return 0;
17900}
17901
1c2a088a
AS
17902static int jit_subprogs(struct bpf_verifier_env *env)
17903{
17904 struct bpf_prog *prog = env->prog, **func, *tmp;
17905 int i, j, subprog_start, subprog_end = 0, len, subprog;
a748c697 17906 struct bpf_map *map_ptr;
7105e828 17907 struct bpf_insn *insn;
1c2a088a 17908 void *old_bpf_func;
c4c0bdc0 17909 int err, num_exentries;
1c2a088a 17910
f910cefa 17911 if (env->subprog_cnt <= 1)
1c2a088a
AS
17912 return 0;
17913
7105e828 17914 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
3990ed4c 17915 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
69c087ba 17916 continue;
69c087ba 17917
c7a89784
DB
17918 /* Upon error here we cannot fall back to interpreter but
17919 * need a hard reject of the program. Thus -EFAULT is
17920 * propagated in any case.
17921 */
1c2a088a
AS
17922 subprog = find_subprog(env, i + insn->imm + 1);
17923 if (subprog < 0) {
17924 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
17925 i + insn->imm + 1);
17926 return -EFAULT;
17927 }
17928 /* temporarily remember subprog id inside insn instead of
17929 * aux_data, since next loop will split up all insns into funcs
17930 */
f910cefa 17931 insn->off = subprog;
1c2a088a
AS
17932 /* remember original imm in case JIT fails and fallback
17933 * to interpreter will be needed
17934 */
17935 env->insn_aux_data[i].call_imm = insn->imm;
17936 /* point imm to __bpf_call_base+1 from JITs point of view */
17937 insn->imm = 1;
3990ed4c
MKL
17938 if (bpf_pseudo_func(insn))
17939 /* jit (e.g. x86_64) may emit fewer instructions
17940 * if it learns a u32 imm is the same as a u64 imm.
17941 * Force a non zero here.
17942 */
17943 insn[1].imm = 1;
1c2a088a
AS
17944 }
17945
c454a46b
MKL
17946 err = bpf_prog_alloc_jited_linfo(prog);
17947 if (err)
17948 goto out_undo_insn;
17949
17950 err = -ENOMEM;
6396bb22 17951 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 17952 if (!func)
c7a89784 17953 goto out_undo_insn;
1c2a088a 17954
f910cefa 17955 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 17956 subprog_start = subprog_end;
4cb3d99c 17957 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
17958
17959 len = subprog_end - subprog_start;
fb7dd8bc 17960 /* bpf_prog_run() doesn't call subprogs directly,
492ecee8
AS
17961 * hence main prog stats include the runtime of subprogs.
17962 * subprogs don't have IDs and not reachable via prog_get_next_id
700d4796 17963 * func[i]->stats will never be accessed and stays NULL
492ecee8
AS
17964 */
17965 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
17966 if (!func[i])
17967 goto out_free;
17968 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
17969 len * sizeof(struct bpf_insn));
4f74d809 17970 func[i]->type = prog->type;
1c2a088a 17971 func[i]->len = len;
4f74d809
DB
17972 if (bpf_prog_calc_tag(func[i]))
17973 goto out_free;
1c2a088a 17974 func[i]->is_func = 1;
ba64e7d8 17975 func[i]->aux->func_idx = i;
f263a814 17976 /* Below members will be freed only at prog->aux */
ba64e7d8
YS
17977 func[i]->aux->btf = prog->aux->btf;
17978 func[i]->aux->func_info = prog->aux->func_info;
9c7c48d6 17979 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
f263a814
JF
17980 func[i]->aux->poke_tab = prog->aux->poke_tab;
17981 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
ba64e7d8 17982
a748c697 17983 for (j = 0; j < prog->aux->size_poke_tab; j++) {
f263a814 17984 struct bpf_jit_poke_descriptor *poke;
a748c697 17985
f263a814
JF
17986 poke = &prog->aux->poke_tab[j];
17987 if (poke->insn_idx < subprog_end &&
17988 poke->insn_idx >= subprog_start)
17989 poke->aux = func[i]->aux;
a748c697
MF
17990 }
17991
1c2a088a 17992 func[i]->aux->name[0] = 'F';
9c8105bd 17993 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 17994 func[i]->jit_requested = 1;
d2a3b7c5 17995 func[i]->blinding_requested = prog->blinding_requested;
e6ac2450 17996 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
2357672c 17997 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
c454a46b
MKL
17998 func[i]->aux->linfo = prog->aux->linfo;
17999 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
18000 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
18001 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
c4c0bdc0
YS
18002 num_exentries = 0;
18003 insn = func[i]->insnsi;
18004 for (j = 0; j < func[i]->len; j++, insn++) {
18005 if (BPF_CLASS(insn->code) == BPF_LDX &&
1f9a1ea8
YS
18006 (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
18007 BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
c4c0bdc0
YS
18008 num_exentries++;
18009 }
18010 func[i]->aux->num_exentries = num_exentries;
ebf7d1f5 18011 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
1c2a088a
AS
18012 func[i] = bpf_int_jit_compile(func[i]);
18013 if (!func[i]->jited) {
18014 err = -ENOTSUPP;
18015 goto out_free;
18016 }
18017 cond_resched();
18018 }
a748c697 18019
1c2a088a
AS
18020 /* at this point all bpf functions were successfully JITed
18021 * now populate all bpf_calls with correct addresses and
18022 * run last pass of JIT
18023 */
f910cefa 18024 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
18025 insn = func[i]->insnsi;
18026 for (j = 0; j < func[i]->len; j++, insn++) {
69c087ba 18027 if (bpf_pseudo_func(insn)) {
3990ed4c 18028 subprog = insn->off;
69c087ba
YS
18029 insn[0].imm = (u32)(long)func[subprog]->bpf_func;
18030 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
18031 continue;
18032 }
23a2d70c 18033 if (!bpf_pseudo_call(insn))
1c2a088a
AS
18034 continue;
18035 subprog = insn->off;
3d717fad 18036 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
1c2a088a 18037 }
2162fed4
SD
18038
18039 /* we use the aux data to keep a list of the start addresses
18040 * of the JITed images for each function in the program
18041 *
18042 * for some architectures, such as powerpc64, the imm field
18043 * might not be large enough to hold the offset of the start
18044 * address of the callee's JITed image from __bpf_call_base
18045 *
18046 * in such cases, we can lookup the start address of a callee
18047 * by using its subprog id, available from the off field of
18048 * the call instruction, as an index for this list
18049 */
18050 func[i]->aux->func = func;
18051 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 18052 }
f910cefa 18053 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
18054 old_bpf_func = func[i]->bpf_func;
18055 tmp = bpf_int_jit_compile(func[i]);
18056 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
18057 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 18058 err = -ENOTSUPP;
1c2a088a
AS
18059 goto out_free;
18060 }
18061 cond_resched();
18062 }
18063
18064 /* finally lock prog and jit images for all functions and
0108a4e9
KJ
18065 * populate kallsysm. Begin at the first subprogram, since
18066 * bpf_prog_load will add the kallsyms for the main program.
1c2a088a 18067 */
0108a4e9 18068 for (i = 1; i < env->subprog_cnt; i++) {
1c2a088a
AS
18069 bpf_prog_lock_ro(func[i]);
18070 bpf_prog_kallsyms_add(func[i]);
18071 }
7105e828
DB
18072
18073 /* Last step: make now unused interpreter insns from main
18074 * prog consistent for later dump requests, so they can
18075 * later look the same as if they were interpreted only.
18076 */
18077 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
69c087ba
YS
18078 if (bpf_pseudo_func(insn)) {
18079 insn[0].imm = env->insn_aux_data[i].call_imm;
3990ed4c
MKL
18080 insn[1].imm = insn->off;
18081 insn->off = 0;
69c087ba
YS
18082 continue;
18083 }
23a2d70c 18084 if (!bpf_pseudo_call(insn))
7105e828
DB
18085 continue;
18086 insn->off = env->insn_aux_data[i].call_imm;
18087 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 18088 insn->imm = subprog;
7105e828
DB
18089 }
18090
1c2a088a
AS
18091 prog->jited = 1;
18092 prog->bpf_func = func[0]->bpf_func;
d00c6473 18093 prog->jited_len = func[0]->jited_len;
0108a4e9
KJ
18094 prog->aux->extable = func[0]->aux->extable;
18095 prog->aux->num_exentries = func[0]->aux->num_exentries;
1c2a088a 18096 prog->aux->func = func;
f910cefa 18097 prog->aux->func_cnt = env->subprog_cnt;
e16301fb 18098 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
18099 return 0;
18100out_free:
f263a814
JF
18101 /* We failed JIT'ing, so at this point we need to unregister poke
18102 * descriptors from subprogs, so that kernel is not attempting to
18103 * patch it anymore as we're freeing the subprog JIT memory.
18104 */
18105 for (i = 0; i < prog->aux->size_poke_tab; i++) {
18106 map_ptr = prog->aux->poke_tab[i].tail_call.map;
18107 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
18108 }
18109 /* At this point we're guaranteed that poke descriptors are not
18110 * live anymore. We can just unlink its descriptor table as it's
18111 * released with the main prog.
18112 */
a748c697
MF
18113 for (i = 0; i < env->subprog_cnt; i++) {
18114 if (!func[i])
18115 continue;
f263a814 18116 func[i]->aux->poke_tab = NULL;
a748c697
MF
18117 bpf_jit_free(func[i]);
18118 }
1c2a088a 18119 kfree(func);
c7a89784 18120out_undo_insn:
1c2a088a
AS
18121 /* cleanup main prog to be interpreted */
18122 prog->jit_requested = 0;
d2a3b7c5 18123 prog->blinding_requested = 0;
1c2a088a 18124 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
23a2d70c 18125 if (!bpf_pseudo_call(insn))
1c2a088a
AS
18126 continue;
18127 insn->off = 0;
18128 insn->imm = env->insn_aux_data[i].call_imm;
18129 }
e16301fb 18130 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
18131 return err;
18132}
18133
1ea47e01
AS
18134static int fixup_call_args(struct bpf_verifier_env *env)
18135{
19d28fbd 18136#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
18137 struct bpf_prog *prog = env->prog;
18138 struct bpf_insn *insn = prog->insnsi;
e6ac2450 18139 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
1ea47e01 18140 int i, depth;
19d28fbd 18141#endif
e4052d06 18142 int err = 0;
1ea47e01 18143
e4052d06 18144 if (env->prog->jit_requested &&
9d03ebc7 18145 !bpf_prog_is_offloaded(env->prog->aux)) {
19d28fbd
DM
18146 err = jit_subprogs(env);
18147 if (err == 0)
1c2a088a 18148 return 0;
c7a89784
DB
18149 if (err == -EFAULT)
18150 return err;
19d28fbd
DM
18151 }
18152#ifndef CONFIG_BPF_JIT_ALWAYS_ON
e6ac2450
MKL
18153 if (has_kfunc_call) {
18154 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
18155 return -EINVAL;
18156 }
e411901c
MF
18157 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
18158 /* When JIT fails the progs with bpf2bpf calls and tail_calls
18159 * have to be rejected, since interpreter doesn't support them yet.
18160 */
18161 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
18162 return -EINVAL;
18163 }
1ea47e01 18164 for (i = 0; i < prog->len; i++, insn++) {
69c087ba
YS
18165 if (bpf_pseudo_func(insn)) {
18166 /* When JIT fails the progs with callback calls
18167 * have to be rejected, since interpreter doesn't support them yet.
18168 */
18169 verbose(env, "callbacks are not allowed in non-JITed programs\n");
18170 return -EINVAL;
18171 }
18172
23a2d70c 18173 if (!bpf_pseudo_call(insn))
1ea47e01
AS
18174 continue;
18175 depth = get_callee_stack_depth(env, insn, i);
18176 if (depth < 0)
18177 return depth;
18178 bpf_patch_call_args(insn, depth);
18179 }
19d28fbd
DM
18180 err = 0;
18181#endif
18182 return err;
1ea47e01
AS
18183}
18184
1cf3bfc6
IL
18185/* replace a generic kfunc with a specialized version if necessary */
18186static void specialize_kfunc(struct bpf_verifier_env *env,
18187 u32 func_id, u16 offset, unsigned long *addr)
18188{
18189 struct bpf_prog *prog = env->prog;
18190 bool seen_direct_write;
18191 void *xdp_kfunc;
18192 bool is_rdonly;
18193
18194 if (bpf_dev_bound_kfunc_id(func_id)) {
18195 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id);
18196 if (xdp_kfunc) {
18197 *addr = (unsigned long)xdp_kfunc;
18198 return;
18199 }
18200 /* fallback to default kfunc when not supported by netdev */
18201 }
18202
18203 if (offset)
18204 return;
18205
18206 if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
18207 seen_direct_write = env->seen_direct_write;
18208 is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
18209
18210 if (is_rdonly)
18211 *addr = (unsigned long)bpf_dynptr_from_skb_rdonly;
18212
18213 /* restore env->seen_direct_write to its original value, since
18214 * may_access_direct_pkt_data mutates it
18215 */
18216 env->seen_direct_write = seen_direct_write;
18217 }
18218}
18219
d2dcc67d
DM
18220static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux,
18221 u16 struct_meta_reg,
18222 u16 node_offset_reg,
18223 struct bpf_insn *insn,
18224 struct bpf_insn *insn_buf,
18225 int *cnt)
18226{
18227 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta;
18228 struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) };
18229
18230 insn_buf[0] = addr[0];
18231 insn_buf[1] = addr[1];
18232 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off);
18233 insn_buf[3] = *insn;
18234 *cnt = 4;
18235}
18236
958cf2e2
KKD
18237static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
18238 struct bpf_insn *insn_buf, int insn_idx, int *cnt)
e6ac2450
MKL
18239{
18240 const struct bpf_kfunc_desc *desc;
18241
a5d82727
KKD
18242 if (!insn->imm) {
18243 verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
18244 return -EINVAL;
18245 }
18246
3d76a4d3
SF
18247 *cnt = 0;
18248
1cf3bfc6
IL
18249 /* insn->imm has the btf func_id. Replace it with an offset relative to
18250 * __bpf_call_base, unless the JIT needs to call functions that are
18251 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()).
e6ac2450 18252 */
2357672c 18253 desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
e6ac2450
MKL
18254 if (!desc) {
18255 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
18256 insn->imm);
18257 return -EFAULT;
18258 }
18259
1cf3bfc6
IL
18260 if (!bpf_jit_supports_far_kfunc_call())
18261 insn->imm = BPF_CALL_IMM(desc->addr);
958cf2e2
KKD
18262 if (insn->off)
18263 return 0;
18264 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
18265 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
18266 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
18267 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
e6ac2450 18268
958cf2e2
KKD
18269 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
18270 insn_buf[1] = addr[0];
18271 insn_buf[2] = addr[1];
18272 insn_buf[3] = *insn;
18273 *cnt = 4;
7c50b1cb
DM
18274 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] ||
18275 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) {
ac9f0605
KKD
18276 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
18277 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
18278
18279 insn_buf[0] = addr[0];
18280 insn_buf[1] = addr[1];
18281 insn_buf[2] = *insn;
18282 *cnt = 3;
d2dcc67d
DM
18283 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] ||
18284 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] ||
18285 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
18286 int struct_meta_reg = BPF_REG_3;
18287 int node_offset_reg = BPF_REG_4;
18288
18289 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */
18290 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) {
18291 struct_meta_reg = BPF_REG_4;
18292 node_offset_reg = BPF_REG_5;
18293 }
18294
18295 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg,
18296 node_offset_reg, insn, insn_buf, cnt);
a35b9af4
YS
18297 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
18298 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
fd264ca0
YS
18299 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
18300 *cnt = 1;
958cf2e2 18301 }
e6ac2450
MKL
18302 return 0;
18303}
18304
e6ac5933
BJ
18305/* Do various post-verification rewrites in a single program pass.
18306 * These rewrites simplify JIT and interpreter implementations.
e245c5c6 18307 */
e6ac5933 18308static int do_misc_fixups(struct bpf_verifier_env *env)
e245c5c6 18309{
79741b3b 18310 struct bpf_prog *prog = env->prog;
f92c1e18 18311 enum bpf_attach_type eatype = prog->expected_attach_type;
9b99edca 18312 enum bpf_prog_type prog_type = resolve_prog_type(prog);
79741b3b 18313 struct bpf_insn *insn = prog->insnsi;
e245c5c6 18314 const struct bpf_func_proto *fn;
79741b3b 18315 const int insn_cnt = prog->len;
09772d92 18316 const struct bpf_map_ops *ops;
c93552c4 18317 struct bpf_insn_aux_data *aux;
81ed18ab
AS
18318 struct bpf_insn insn_buf[16];
18319 struct bpf_prog *new_prog;
18320 struct bpf_map *map_ptr;
d2e4c1e6 18321 int i, ret, cnt, delta = 0;
e245c5c6 18322
79741b3b 18323 for (i = 0; i < insn_cnt; i++, insn++) {
e6ac5933 18324 /* Make divide-by-zero exceptions impossible. */
f6b1b3bf
DB
18325 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
18326 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
18327 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 18328 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf 18329 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
e88b2c6e
DB
18330 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
18331 struct bpf_insn *patchlet;
18332 struct bpf_insn chk_and_div[] = {
9b00f1b7 18333 /* [R,W]x div 0 -> 0 */
e88b2c6e
DB
18334 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
18335 BPF_JNE | BPF_K, insn->src_reg,
18336 0, 2, 0),
f6b1b3bf
DB
18337 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
18338 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
18339 *insn,
18340 };
e88b2c6e 18341 struct bpf_insn chk_and_mod[] = {
9b00f1b7 18342 /* [R,W]x mod 0 -> [R,W]x */
e88b2c6e
DB
18343 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
18344 BPF_JEQ | BPF_K, insn->src_reg,
9b00f1b7 18345 0, 1 + (is64 ? 0 : 1), 0),
f6b1b3bf 18346 *insn,
9b00f1b7
DB
18347 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
18348 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
f6b1b3bf 18349 };
f6b1b3bf 18350
e88b2c6e
DB
18351 patchlet = isdiv ? chk_and_div : chk_and_mod;
18352 cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
9b00f1b7 18353 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
f6b1b3bf
DB
18354
18355 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
18356 if (!new_prog)
18357 return -ENOMEM;
18358
18359 delta += cnt - 1;
18360 env->prog = prog = new_prog;
18361 insn = new_prog->insnsi + i + delta;
18362 continue;
18363 }
18364
e6ac5933 18365 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
e0cea7ce
DB
18366 if (BPF_CLASS(insn->code) == BPF_LD &&
18367 (BPF_MODE(insn->code) == BPF_ABS ||
18368 BPF_MODE(insn->code) == BPF_IND)) {
18369 cnt = env->ops->gen_ld_abs(insn, insn_buf);
18370 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
18371 verbose(env, "bpf verifier is misconfigured\n");
18372 return -EINVAL;
18373 }
18374
18375 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18376 if (!new_prog)
18377 return -ENOMEM;
18378
18379 delta += cnt - 1;
18380 env->prog = prog = new_prog;
18381 insn = new_prog->insnsi + i + delta;
18382 continue;
18383 }
18384
e6ac5933 18385 /* Rewrite pointer arithmetic to mitigate speculation attacks. */
979d63d5
DB
18386 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
18387 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
18388 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
18389 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
979d63d5 18390 struct bpf_insn *patch = &insn_buf[0];
801c6058 18391 bool issrc, isneg, isimm;
979d63d5
DB
18392 u32 off_reg;
18393
18394 aux = &env->insn_aux_data[i + delta];
3612af78
DB
18395 if (!aux->alu_state ||
18396 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
18397 continue;
18398
18399 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
18400 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
18401 BPF_ALU_SANITIZE_SRC;
801c6058 18402 isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
979d63d5
DB
18403
18404 off_reg = issrc ? insn->src_reg : insn->dst_reg;
801c6058
DB
18405 if (isimm) {
18406 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
18407 } else {
18408 if (isneg)
18409 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
18410 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
18411 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
18412 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
18413 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
18414 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
18415 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
18416 }
b9b34ddb
DB
18417 if (!issrc)
18418 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
18419 insn->src_reg = BPF_REG_AX;
979d63d5
DB
18420 if (isneg)
18421 insn->code = insn->code == code_add ?
18422 code_sub : code_add;
18423 *patch++ = *insn;
801c6058 18424 if (issrc && isneg && !isimm)
979d63d5
DB
18425 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
18426 cnt = patch - insn_buf;
18427
18428 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18429 if (!new_prog)
18430 return -ENOMEM;
18431
18432 delta += cnt - 1;
18433 env->prog = prog = new_prog;
18434 insn = new_prog->insnsi + i + delta;
18435 continue;
18436 }
18437
79741b3b
AS
18438 if (insn->code != (BPF_JMP | BPF_CALL))
18439 continue;
cc8b0b92
AS
18440 if (insn->src_reg == BPF_PSEUDO_CALL)
18441 continue;
e6ac2450 18442 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
958cf2e2 18443 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
e6ac2450
MKL
18444 if (ret)
18445 return ret;
958cf2e2
KKD
18446 if (cnt == 0)
18447 continue;
18448
18449 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18450 if (!new_prog)
18451 return -ENOMEM;
18452
18453 delta += cnt - 1;
18454 env->prog = prog = new_prog;
18455 insn = new_prog->insnsi + i + delta;
e6ac2450
MKL
18456 continue;
18457 }
e245c5c6 18458
79741b3b
AS
18459 if (insn->imm == BPF_FUNC_get_route_realm)
18460 prog->dst_needed = 1;
18461 if (insn->imm == BPF_FUNC_get_prandom_u32)
18462 bpf_user_rnd_init_once();
9802d865
JB
18463 if (insn->imm == BPF_FUNC_override_return)
18464 prog->kprobe_override = 1;
79741b3b 18465 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
18466 /* If we tail call into other programs, we
18467 * cannot make any assumptions since they can
18468 * be replaced dynamically during runtime in
18469 * the program array.
18470 */
18471 prog->cb_access = 1;
e411901c
MF
18472 if (!allow_tail_call_in_subprogs(env))
18473 prog->aux->stack_depth = MAX_BPF_STACK;
18474 prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 18475
79741b3b 18476 /* mark bpf_tail_call as different opcode to avoid
8fb33b60 18477 * conditional branch in the interpreter for every normal
79741b3b
AS
18478 * call and to prevent accidental JITing by JIT compiler
18479 * that doesn't support bpf_tail_call yet
e245c5c6 18480 */
79741b3b 18481 insn->imm = 0;
71189fa9 18482 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 18483
c93552c4 18484 aux = &env->insn_aux_data[i + delta];
d2a3b7c5 18485 if (env->bpf_capable && !prog->blinding_requested &&
cc52d914 18486 prog->jit_requested &&
d2e4c1e6
DB
18487 !bpf_map_key_poisoned(aux) &&
18488 !bpf_map_ptr_poisoned(aux) &&
18489 !bpf_map_ptr_unpriv(aux)) {
18490 struct bpf_jit_poke_descriptor desc = {
18491 .reason = BPF_POKE_REASON_TAIL_CALL,
18492 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
18493 .tail_call.key = bpf_map_key_immediate(aux),
a748c697 18494 .insn_idx = i + delta,
d2e4c1e6
DB
18495 };
18496
18497 ret = bpf_jit_add_poke_descriptor(prog, &desc);
18498 if (ret < 0) {
18499 verbose(env, "adding tail call poke descriptor failed\n");
18500 return ret;
18501 }
18502
18503 insn->imm = ret + 1;
18504 continue;
18505 }
18506
c93552c4
DB
18507 if (!bpf_map_ptr_unpriv(aux))
18508 continue;
18509
b2157399
AS
18510 /* instead of changing every JIT dealing with tail_call
18511 * emit two extra insns:
18512 * if (index >= max_entries) goto out;
18513 * index &= array->index_mask;
18514 * to avoid out-of-bounds cpu speculation
18515 */
c93552c4 18516 if (bpf_map_ptr_poisoned(aux)) {
40950343 18517 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
18518 return -EINVAL;
18519 }
c93552c4 18520
d2e4c1e6 18521 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
18522 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
18523 map_ptr->max_entries, 2);
18524 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
18525 container_of(map_ptr,
18526 struct bpf_array,
18527 map)->index_mask);
18528 insn_buf[2] = *insn;
18529 cnt = 3;
18530 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18531 if (!new_prog)
18532 return -ENOMEM;
18533
18534 delta += cnt - 1;
18535 env->prog = prog = new_prog;
18536 insn = new_prog->insnsi + i + delta;
79741b3b
AS
18537 continue;
18538 }
e245c5c6 18539
b00628b1
AS
18540 if (insn->imm == BPF_FUNC_timer_set_callback) {
18541 /* The verifier will process callback_fn as many times as necessary
18542 * with different maps and the register states prepared by
18543 * set_timer_callback_state will be accurate.
18544 *
18545 * The following use case is valid:
18546 * map1 is shared by prog1, prog2, prog3.
18547 * prog1 calls bpf_timer_init for some map1 elements
18548 * prog2 calls bpf_timer_set_callback for some map1 elements.
18549 * Those that were not bpf_timer_init-ed will return -EINVAL.
18550 * prog3 calls bpf_timer_start for some map1 elements.
18551 * Those that were not both bpf_timer_init-ed and
18552 * bpf_timer_set_callback-ed will return -EINVAL.
18553 */
18554 struct bpf_insn ld_addrs[2] = {
18555 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
18556 };
18557
18558 insn_buf[0] = ld_addrs[0];
18559 insn_buf[1] = ld_addrs[1];
18560 insn_buf[2] = *insn;
18561 cnt = 3;
18562
18563 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18564 if (!new_prog)
18565 return -ENOMEM;
18566
18567 delta += cnt - 1;
18568 env->prog = prog = new_prog;
18569 insn = new_prog->insnsi + i + delta;
18570 goto patch_call_imm;
18571 }
18572
9bb00b28
YS
18573 if (is_storage_get_function(insn->imm)) {
18574 if (!env->prog->aux->sleepable ||
18575 env->insn_aux_data[i + delta].storage_get_func_atomic)
d56c9fe6 18576 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
9bb00b28
YS
18577 else
18578 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
b00fa38a
JK
18579 insn_buf[1] = *insn;
18580 cnt = 2;
18581
18582 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18583 if (!new_prog)
18584 return -ENOMEM;
18585
18586 delta += cnt - 1;
18587 env->prog = prog = new_prog;
18588 insn = new_prog->insnsi + i + delta;
18589 goto patch_call_imm;
18590 }
18591
89c63074 18592 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
18593 * and other inlining handlers are currently limited to 64 bit
18594 * only.
89c63074 18595 */
60b58afc 18596 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
18597 (insn->imm == BPF_FUNC_map_lookup_elem ||
18598 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
18599 insn->imm == BPF_FUNC_map_delete_elem ||
18600 insn->imm == BPF_FUNC_map_push_elem ||
18601 insn->imm == BPF_FUNC_map_pop_elem ||
e6a4750f 18602 insn->imm == BPF_FUNC_map_peek_elem ||
0640c77c 18603 insn->imm == BPF_FUNC_redirect_map ||
07343110
FZ
18604 insn->imm == BPF_FUNC_for_each_map_elem ||
18605 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
c93552c4
DB
18606 aux = &env->insn_aux_data[i + delta];
18607 if (bpf_map_ptr_poisoned(aux))
18608 goto patch_call_imm;
18609
d2e4c1e6 18610 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
18611 ops = map_ptr->ops;
18612 if (insn->imm == BPF_FUNC_map_lookup_elem &&
18613 ops->map_gen_lookup) {
18614 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
4a8f87e6
DB
18615 if (cnt == -EOPNOTSUPP)
18616 goto patch_map_ops_generic;
18617 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
09772d92
DB
18618 verbose(env, "bpf verifier is misconfigured\n");
18619 return -EINVAL;
18620 }
81ed18ab 18621
09772d92
DB
18622 new_prog = bpf_patch_insn_data(env, i + delta,
18623 insn_buf, cnt);
18624 if (!new_prog)
18625 return -ENOMEM;
81ed18ab 18626
09772d92
DB
18627 delta += cnt - 1;
18628 env->prog = prog = new_prog;
18629 insn = new_prog->insnsi + i + delta;
18630 continue;
18631 }
81ed18ab 18632
09772d92
DB
18633 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
18634 (void *(*)(struct bpf_map *map, void *key))NULL));
18635 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
d7ba4cc9 18636 (long (*)(struct bpf_map *map, void *key))NULL));
09772d92 18637 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
d7ba4cc9 18638 (long (*)(struct bpf_map *map, void *key, void *value,
09772d92 18639 u64 flags))NULL));
84430d42 18640 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
d7ba4cc9 18641 (long (*)(struct bpf_map *map, void *value,
84430d42
DB
18642 u64 flags))NULL));
18643 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
d7ba4cc9 18644 (long (*)(struct bpf_map *map, void *value))NULL));
84430d42 18645 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
d7ba4cc9 18646 (long (*)(struct bpf_map *map, void *value))NULL));
e6a4750f 18647 BUILD_BUG_ON(!__same_type(ops->map_redirect,
d7ba4cc9 18648 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
0640c77c 18649 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
d7ba4cc9 18650 (long (*)(struct bpf_map *map,
0640c77c
AI
18651 bpf_callback_t callback_fn,
18652 void *callback_ctx,
18653 u64 flags))NULL));
07343110
FZ
18654 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
18655 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
e6a4750f 18656
4a8f87e6 18657patch_map_ops_generic:
09772d92
DB
18658 switch (insn->imm) {
18659 case BPF_FUNC_map_lookup_elem:
3d717fad 18660 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
09772d92
DB
18661 continue;
18662 case BPF_FUNC_map_update_elem:
3d717fad 18663 insn->imm = BPF_CALL_IMM(ops->map_update_elem);
09772d92
DB
18664 continue;
18665 case BPF_FUNC_map_delete_elem:
3d717fad 18666 insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
09772d92 18667 continue;
84430d42 18668 case BPF_FUNC_map_push_elem:
3d717fad 18669 insn->imm = BPF_CALL_IMM(ops->map_push_elem);
84430d42
DB
18670 continue;
18671 case BPF_FUNC_map_pop_elem:
3d717fad 18672 insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
84430d42
DB
18673 continue;
18674 case BPF_FUNC_map_peek_elem:
3d717fad 18675 insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
84430d42 18676 continue;
e6a4750f 18677 case BPF_FUNC_redirect_map:
3d717fad 18678 insn->imm = BPF_CALL_IMM(ops->map_redirect);
e6a4750f 18679 continue;
0640c77c
AI
18680 case BPF_FUNC_for_each_map_elem:
18681 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
e6a4750f 18682 continue;
07343110
FZ
18683 case BPF_FUNC_map_lookup_percpu_elem:
18684 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
18685 continue;
09772d92 18686 }
81ed18ab 18687
09772d92 18688 goto patch_call_imm;
81ed18ab
AS
18689 }
18690
e6ac5933 18691 /* Implement bpf_jiffies64 inline. */
5576b991
MKL
18692 if (prog->jit_requested && BITS_PER_LONG == 64 &&
18693 insn->imm == BPF_FUNC_jiffies64) {
18694 struct bpf_insn ld_jiffies_addr[2] = {
18695 BPF_LD_IMM64(BPF_REG_0,
18696 (unsigned long)&jiffies),
18697 };
18698
18699 insn_buf[0] = ld_jiffies_addr[0];
18700 insn_buf[1] = ld_jiffies_addr[1];
18701 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
18702 BPF_REG_0, 0);
18703 cnt = 3;
18704
18705 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
18706 cnt);
18707 if (!new_prog)
18708 return -ENOMEM;
18709
18710 delta += cnt - 1;
18711 env->prog = prog = new_prog;
18712 insn = new_prog->insnsi + i + delta;
18713 continue;
18714 }
18715
f92c1e18
JO
18716 /* Implement bpf_get_func_arg inline. */
18717 if (prog_type == BPF_PROG_TYPE_TRACING &&
18718 insn->imm == BPF_FUNC_get_func_arg) {
18719 /* Load nr_args from ctx - 8 */
18720 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
18721 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
18722 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
18723 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
18724 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
18725 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
18726 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
18727 insn_buf[7] = BPF_JMP_A(1);
18728 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
18729 cnt = 9;
18730
18731 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18732 if (!new_prog)
18733 return -ENOMEM;
18734
18735 delta += cnt - 1;
18736 env->prog = prog = new_prog;
18737 insn = new_prog->insnsi + i + delta;
18738 continue;
18739 }
18740
18741 /* Implement bpf_get_func_ret inline. */
18742 if (prog_type == BPF_PROG_TYPE_TRACING &&
18743 insn->imm == BPF_FUNC_get_func_ret) {
18744 if (eatype == BPF_TRACE_FEXIT ||
18745 eatype == BPF_MODIFY_RETURN) {
18746 /* Load nr_args from ctx - 8 */
18747 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
18748 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
18749 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
18750 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
18751 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
18752 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
18753 cnt = 6;
18754 } else {
18755 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
18756 cnt = 1;
18757 }
18758
18759 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
18760 if (!new_prog)
18761 return -ENOMEM;
18762
18763 delta += cnt - 1;
18764 env->prog = prog = new_prog;
18765 insn = new_prog->insnsi + i + delta;
18766 continue;
18767 }
18768
18769 /* Implement get_func_arg_cnt inline. */
18770 if (prog_type == BPF_PROG_TYPE_TRACING &&
18771 insn->imm == BPF_FUNC_get_func_arg_cnt) {
18772 /* Load nr_args from ctx - 8 */
18773 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
18774
18775 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
18776 if (!new_prog)
18777 return -ENOMEM;
18778
18779 env->prog = prog = new_prog;
18780 insn = new_prog->insnsi + i + delta;
18781 continue;
18782 }
18783
f705ec76 18784 /* Implement bpf_get_func_ip inline. */
9b99edca
JO
18785 if (prog_type == BPF_PROG_TYPE_TRACING &&
18786 insn->imm == BPF_FUNC_get_func_ip) {
f92c1e18
JO
18787 /* Load IP address from ctx - 16 */
18788 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
9b99edca
JO
18789
18790 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
18791 if (!new_prog)
18792 return -ENOMEM;
18793
18794 env->prog = prog = new_prog;
18795 insn = new_prog->insnsi + i + delta;
18796 continue;
18797 }
18798
81ed18ab 18799patch_call_imm:
5e43f899 18800 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
18801 /* all functions that have prototype and verifier allowed
18802 * programs to call them, must be real in-kernel functions
18803 */
18804 if (!fn->func) {
61bd5218
JK
18805 verbose(env,
18806 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
18807 func_id_name(insn->imm), insn->imm);
18808 return -EFAULT;
e245c5c6 18809 }
79741b3b 18810 insn->imm = fn->func - __bpf_call_base;
e245c5c6 18811 }
e245c5c6 18812
d2e4c1e6
DB
18813 /* Since poke tab is now finalized, publish aux to tracker. */
18814 for (i = 0; i < prog->aux->size_poke_tab; i++) {
18815 map_ptr = prog->aux->poke_tab[i].tail_call.map;
18816 if (!map_ptr->ops->map_poke_track ||
18817 !map_ptr->ops->map_poke_untrack ||
18818 !map_ptr->ops->map_poke_run) {
18819 verbose(env, "bpf verifier is misconfigured\n");
18820 return -EINVAL;
18821 }
18822
18823 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
18824 if (ret < 0) {
18825 verbose(env, "tracking tail call prog failed\n");
18826 return ret;
18827 }
18828 }
18829
1cf3bfc6 18830 sort_kfunc_descs_by_imm_off(env->prog);
e6ac2450 18831
79741b3b
AS
18832 return 0;
18833}
e245c5c6 18834
1ade2371
EZ
18835static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
18836 int position,
18837 s32 stack_base,
18838 u32 callback_subprogno,
18839 u32 *cnt)
18840{
18841 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
18842 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
18843 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
18844 int reg_loop_max = BPF_REG_6;
18845 int reg_loop_cnt = BPF_REG_7;
18846 int reg_loop_ctx = BPF_REG_8;
18847
18848 struct bpf_prog *new_prog;
18849 u32 callback_start;
18850 u32 call_insn_offset;
18851 s32 callback_offset;
18852
18853 /* This represents an inlined version of bpf_iter.c:bpf_loop,
18854 * be careful to modify this code in sync.
18855 */
18856 struct bpf_insn insn_buf[] = {
18857 /* Return error and jump to the end of the patch if
18858 * expected number of iterations is too big.
18859 */
18860 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
18861 BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
18862 BPF_JMP_IMM(BPF_JA, 0, 0, 16),
18863 /* spill R6, R7, R8 to use these as loop vars */
18864 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
18865 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
18866 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
18867 /* initialize loop vars */
18868 BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
18869 BPF_MOV32_IMM(reg_loop_cnt, 0),
18870 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
18871 /* loop header,
18872 * if reg_loop_cnt >= reg_loop_max skip the loop body
18873 */
18874 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
18875 /* callback call,
18876 * correct callback offset would be set after patching
18877 */
18878 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
18879 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
18880 BPF_CALL_REL(0),
18881 /* increment loop counter */
18882 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
18883 /* jump to loop header if callback returned 0 */
18884 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
18885 /* return value of bpf_loop,
18886 * set R0 to the number of iterations
18887 */
18888 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
18889 /* restore original values of R6, R7, R8 */
18890 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
18891 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
18892 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
18893 };
18894
18895 *cnt = ARRAY_SIZE(insn_buf);
18896 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
18897 if (!new_prog)
18898 return new_prog;
18899
18900 /* callback start is known only after patching */
18901 callback_start = env->subprog_info[callback_subprogno].start;
18902 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
18903 call_insn_offset = position + 12;
18904 callback_offset = callback_start - call_insn_offset - 1;
fb4e3b33 18905 new_prog->insnsi[call_insn_offset].imm = callback_offset;
1ade2371
EZ
18906
18907 return new_prog;
18908}
18909
18910static bool is_bpf_loop_call(struct bpf_insn *insn)
18911{
18912 return insn->code == (BPF_JMP | BPF_CALL) &&
18913 insn->src_reg == 0 &&
18914 insn->imm == BPF_FUNC_loop;
18915}
18916
18917/* For all sub-programs in the program (including main) check
18918 * insn_aux_data to see if there are bpf_loop calls that require
18919 * inlining. If such calls are found the calls are replaced with a
18920 * sequence of instructions produced by `inline_bpf_loop` function and
18921 * subprog stack_depth is increased by the size of 3 registers.
18922 * This stack space is used to spill values of the R6, R7, R8. These
18923 * registers are used to store the loop bound, counter and context
18924 * variables.
18925 */
18926static int optimize_bpf_loop(struct bpf_verifier_env *env)
18927{
18928 struct bpf_subprog_info *subprogs = env->subprog_info;
18929 int i, cur_subprog = 0, cnt, delta = 0;
18930 struct bpf_insn *insn = env->prog->insnsi;
18931 int insn_cnt = env->prog->len;
18932 u16 stack_depth = subprogs[cur_subprog].stack_depth;
18933 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
18934 u16 stack_depth_extra = 0;
18935
18936 for (i = 0; i < insn_cnt; i++, insn++) {
18937 struct bpf_loop_inline_state *inline_state =
18938 &env->insn_aux_data[i + delta].loop_inline_state;
18939
18940 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
18941 struct bpf_prog *new_prog;
18942
18943 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
18944 new_prog = inline_bpf_loop(env,
18945 i + delta,
18946 -(stack_depth + stack_depth_extra),
18947 inline_state->callback_subprogno,
18948 &cnt);
18949 if (!new_prog)
18950 return -ENOMEM;
18951
18952 delta += cnt - 1;
18953 env->prog = new_prog;
18954 insn = new_prog->insnsi + i + delta;
18955 }
18956
18957 if (subprogs[cur_subprog + 1].start == i + delta + 1) {
18958 subprogs[cur_subprog].stack_depth += stack_depth_extra;
18959 cur_subprog++;
18960 stack_depth = subprogs[cur_subprog].stack_depth;
18961 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
18962 stack_depth_extra = 0;
18963 }
18964 }
18965
18966 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
18967
18968 return 0;
18969}
18970
58e2af8b 18971static void free_states(struct bpf_verifier_env *env)
f1bca824 18972{
58e2af8b 18973 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
18974 int i;
18975
9f4686c4
AS
18976 sl = env->free_list;
18977 while (sl) {
18978 sln = sl->next;
18979 free_verifier_state(&sl->state, false);
18980 kfree(sl);
18981 sl = sln;
18982 }
51c39bb1 18983 env->free_list = NULL;
9f4686c4 18984
f1bca824
AS
18985 if (!env->explored_states)
18986 return;
18987
dc2a4ebc 18988 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
18989 sl = env->explored_states[i];
18990
a8f500af
AS
18991 while (sl) {
18992 sln = sl->next;
18993 free_verifier_state(&sl->state, false);
18994 kfree(sl);
18995 sl = sln;
18996 }
51c39bb1 18997 env->explored_states[i] = NULL;
f1bca824 18998 }
51c39bb1 18999}
f1bca824 19000
51c39bb1
AS
19001static int do_check_common(struct bpf_verifier_env *env, int subprog)
19002{
6f8a57cc 19003 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1
AS
19004 struct bpf_verifier_state *state;
19005 struct bpf_reg_state *regs;
19006 int ret, i;
19007
19008 env->prev_linfo = NULL;
19009 env->pass_cnt++;
19010
19011 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
19012 if (!state)
19013 return -ENOMEM;
19014 state->curframe = 0;
19015 state->speculative = false;
19016 state->branches = 1;
19017 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
19018 if (!state->frame[0]) {
19019 kfree(state);
19020 return -ENOMEM;
19021 }
19022 env->cur_state = state;
19023 init_func_state(env, state->frame[0],
19024 BPF_MAIN_FUNC /* callsite */,
19025 0 /* frameno */,
19026 subprog);
be2ef816
AN
19027 state->first_insn_idx = env->subprog_info[subprog].start;
19028 state->last_insn_idx = -1;
51c39bb1
AS
19029
19030 regs = state->frame[state->curframe]->regs;
be8704ff 19031 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
19032 ret = btf_prepare_func_args(env, subprog, regs);
19033 if (ret)
19034 goto out;
19035 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
19036 if (regs[i].type == PTR_TO_CTX)
19037 mark_reg_known_zero(env, regs, i);
19038 else if (regs[i].type == SCALAR_VALUE)
19039 mark_reg_unknown(env, regs, i);
cf9f2f8d 19040 else if (base_type(regs[i].type) == PTR_TO_MEM) {
e5069b9c
DB
19041 const u32 mem_size = regs[i].mem_size;
19042
19043 mark_reg_known_zero(env, regs, i);
19044 regs[i].mem_size = mem_size;
19045 regs[i].id = ++env->id_gen;
19046 }
51c39bb1
AS
19047 }
19048 } else {
19049 /* 1st arg to a function */
19050 regs[BPF_REG_1].type = PTR_TO_CTX;
19051 mark_reg_known_zero(env, regs, BPF_REG_1);
34747c41 19052 ret = btf_check_subprog_arg_match(env, subprog, regs);
51c39bb1
AS
19053 if (ret == -EFAULT)
19054 /* unlikely verifier bug. abort.
19055 * ret == 0 and ret < 0 are sadly acceptable for
19056 * main() function due to backward compatibility.
19057 * Like socket filter program may be written as:
19058 * int bpf_prog(struct pt_regs *ctx)
19059 * and never dereference that ctx in the program.
19060 * 'struct pt_regs' is a type mismatch for socket
19061 * filter that should be using 'struct __sk_buff'.
19062 */
19063 goto out;
19064 }
19065
19066 ret = do_check(env);
19067out:
f59bbfc2
AS
19068 /* check for NULL is necessary, since cur_state can be freed inside
19069 * do_check() under memory pressure.
19070 */
19071 if (env->cur_state) {
19072 free_verifier_state(env->cur_state, true);
19073 env->cur_state = NULL;
19074 }
6f8a57cc
AN
19075 while (!pop_stack(env, NULL, NULL, false));
19076 if (!ret && pop_log)
19077 bpf_vlog_reset(&env->log, 0);
51c39bb1 19078 free_states(env);
51c39bb1
AS
19079 return ret;
19080}
19081
19082/* Verify all global functions in a BPF program one by one based on their BTF.
19083 * All global functions must pass verification. Otherwise the whole program is rejected.
19084 * Consider:
19085 * int bar(int);
19086 * int foo(int f)
19087 * {
19088 * return bar(f);
19089 * }
19090 * int bar(int b)
19091 * {
19092 * ...
19093 * }
19094 * foo() will be verified first for R1=any_scalar_value. During verification it
19095 * will be assumed that bar() already verified successfully and call to bar()
19096 * from foo() will be checked for type match only. Later bar() will be verified
19097 * independently to check that it's safe for R1=any_scalar_value.
19098 */
19099static int do_check_subprogs(struct bpf_verifier_env *env)
19100{
19101 struct bpf_prog_aux *aux = env->prog->aux;
19102 int i, ret;
19103
19104 if (!aux->func_info)
19105 return 0;
19106
19107 for (i = 1; i < env->subprog_cnt; i++) {
19108 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
19109 continue;
19110 env->insn_idx = env->subprog_info[i].start;
19111 WARN_ON_ONCE(env->insn_idx == 0);
19112 ret = do_check_common(env, i);
19113 if (ret) {
19114 return ret;
19115 } else if (env->log.level & BPF_LOG_LEVEL) {
19116 verbose(env,
19117 "Func#%d is safe for any args that match its prototype\n",
19118 i);
19119 }
19120 }
19121 return 0;
19122}
19123
19124static int do_check_main(struct bpf_verifier_env *env)
19125{
19126 int ret;
19127
19128 env->insn_idx = 0;
19129 ret = do_check_common(env, 0);
19130 if (!ret)
19131 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
19132 return ret;
19133}
19134
19135
06ee7115
AS
19136static void print_verification_stats(struct bpf_verifier_env *env)
19137{
19138 int i;
19139
19140 if (env->log.level & BPF_LOG_STATS) {
19141 verbose(env, "verification time %lld usec\n",
19142 div_u64(env->verification_time, 1000));
19143 verbose(env, "stack depth ");
19144 for (i = 0; i < env->subprog_cnt; i++) {
19145 u32 depth = env->subprog_info[i].stack_depth;
19146
19147 verbose(env, "%d", depth);
19148 if (i + 1 < env->subprog_cnt)
19149 verbose(env, "+");
19150 }
19151 verbose(env, "\n");
19152 }
19153 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
19154 "total_states %d peak_states %d mark_read %d\n",
19155 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
19156 env->max_states_per_insn, env->total_states,
19157 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
19158}
19159
27ae7997
MKL
19160static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
19161{
19162 const struct btf_type *t, *func_proto;
19163 const struct bpf_struct_ops *st_ops;
19164 const struct btf_member *member;
19165 struct bpf_prog *prog = env->prog;
19166 u32 btf_id, member_idx;
19167 const char *mname;
19168
12aa8a94
THJ
19169 if (!prog->gpl_compatible) {
19170 verbose(env, "struct ops programs must have a GPL compatible license\n");
19171 return -EINVAL;
19172 }
19173
27ae7997
MKL
19174 btf_id = prog->aux->attach_btf_id;
19175 st_ops = bpf_struct_ops_find(btf_id);
19176 if (!st_ops) {
19177 verbose(env, "attach_btf_id %u is not a supported struct\n",
19178 btf_id);
19179 return -ENOTSUPP;
19180 }
19181
19182 t = st_ops->type;
19183 member_idx = prog->expected_attach_type;
19184 if (member_idx >= btf_type_vlen(t)) {
19185 verbose(env, "attach to invalid member idx %u of struct %s\n",
19186 member_idx, st_ops->name);
19187 return -EINVAL;
19188 }
19189
19190 member = &btf_type_member(t)[member_idx];
19191 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
19192 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
19193 NULL);
19194 if (!func_proto) {
19195 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
19196 mname, member_idx, st_ops->name);
19197 return -EINVAL;
19198 }
19199
19200 if (st_ops->check_member) {
51a52a29 19201 int err = st_ops->check_member(t, member, prog);
27ae7997
MKL
19202
19203 if (err) {
19204 verbose(env, "attach to unsupported member %s of struct %s\n",
19205 mname, st_ops->name);
19206 return err;
19207 }
19208 }
19209
19210 prog->aux->attach_func_proto = func_proto;
19211 prog->aux->attach_func_name = mname;
19212 env->ops = st_ops->verifier_ops;
19213
19214 return 0;
19215}
6ba43b76
KS
19216#define SECURITY_PREFIX "security_"
19217
f7b12b6f 19218static int check_attach_modify_return(unsigned long addr, const char *func_name)
6ba43b76 19219{
69191754 19220 if (within_error_injection_list(addr) ||
f7b12b6f 19221 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
6ba43b76 19222 return 0;
6ba43b76 19223
6ba43b76
KS
19224 return -EINVAL;
19225}
27ae7997 19226
1e6c62a8
AS
19227/* list of non-sleepable functions that are otherwise on
19228 * ALLOW_ERROR_INJECTION list
19229 */
19230BTF_SET_START(btf_non_sleepable_error_inject)
19231/* Three functions below can be called from sleepable and non-sleepable context.
19232 * Assume non-sleepable from bpf safety point of view.
19233 */
9dd3d069 19234BTF_ID(func, __filemap_add_folio)
1e6c62a8
AS
19235BTF_ID(func, should_fail_alloc_page)
19236BTF_ID(func, should_failslab)
19237BTF_SET_END(btf_non_sleepable_error_inject)
19238
19239static int check_non_sleepable_error_inject(u32 btf_id)
19240{
19241 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
19242}
19243
f7b12b6f
THJ
19244int bpf_check_attach_target(struct bpf_verifier_log *log,
19245 const struct bpf_prog *prog,
19246 const struct bpf_prog *tgt_prog,
19247 u32 btf_id,
19248 struct bpf_attach_target_info *tgt_info)
38207291 19249{
be8704ff 19250 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
f1b9509c 19251 const char prefix[] = "btf_trace_";
5b92a28a 19252 int ret = 0, subprog = -1, i;
38207291 19253 const struct btf_type *t;
5b92a28a 19254 bool conservative = true;
38207291 19255 const char *tname;
5b92a28a 19256 struct btf *btf;
f7b12b6f 19257 long addr = 0;
31bf1dbc 19258 struct module *mod = NULL;
38207291 19259
f1b9509c 19260 if (!btf_id) {
efc68158 19261 bpf_log(log, "Tracing programs must provide btf_id\n");
f1b9509c
AS
19262 return -EINVAL;
19263 }
22dc4a0f 19264 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
5b92a28a 19265 if (!btf) {
efc68158 19266 bpf_log(log,
5b92a28a
AS
19267 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
19268 return -EINVAL;
19269 }
19270 t = btf_type_by_id(btf, btf_id);
f1b9509c 19271 if (!t) {
efc68158 19272 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
f1b9509c
AS
19273 return -EINVAL;
19274 }
5b92a28a 19275 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c 19276 if (!tname) {
efc68158 19277 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
f1b9509c
AS
19278 return -EINVAL;
19279 }
5b92a28a
AS
19280 if (tgt_prog) {
19281 struct bpf_prog_aux *aux = tgt_prog->aux;
19282
fd7c211d
THJ
19283 if (bpf_prog_is_dev_bound(prog->aux) &&
19284 !bpf_prog_dev_bound_match(prog, tgt_prog)) {
19285 bpf_log(log, "Target program bound device mismatch");
3d76a4d3
SF
19286 return -EINVAL;
19287 }
19288
5b92a28a
AS
19289 for (i = 0; i < aux->func_info_cnt; i++)
19290 if (aux->func_info[i].type_id == btf_id) {
19291 subprog = i;
19292 break;
19293 }
19294 if (subprog == -1) {
efc68158 19295 bpf_log(log, "Subprog %s doesn't exist\n", tname);
5b92a28a
AS
19296 return -EINVAL;
19297 }
19298 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
19299 if (prog_extension) {
19300 if (conservative) {
efc68158 19301 bpf_log(log,
be8704ff
AS
19302 "Cannot replace static functions\n");
19303 return -EINVAL;
19304 }
19305 if (!prog->jit_requested) {
efc68158 19306 bpf_log(log,
be8704ff
AS
19307 "Extension programs should be JITed\n");
19308 return -EINVAL;
19309 }
be8704ff
AS
19310 }
19311 if (!tgt_prog->jited) {
efc68158 19312 bpf_log(log, "Can attach to only JITed progs\n");
be8704ff
AS
19313 return -EINVAL;
19314 }
19315 if (tgt_prog->type == prog->type) {
19316 /* Cannot fentry/fexit another fentry/fexit program.
19317 * Cannot attach program extension to another extension.
19318 * It's ok to attach fentry/fexit to extension program.
19319 */
efc68158 19320 bpf_log(log, "Cannot recursively attach\n");
be8704ff
AS
19321 return -EINVAL;
19322 }
19323 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
19324 prog_extension &&
19325 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
19326 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
19327 /* Program extensions can extend all program types
19328 * except fentry/fexit. The reason is the following.
19329 * The fentry/fexit programs are used for performance
19330 * analysis, stats and can be attached to any program
19331 * type except themselves. When extension program is
19332 * replacing XDP function it is necessary to allow
19333 * performance analysis of all functions. Both original
19334 * XDP program and its program extension. Hence
19335 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
19336 * allowed. If extending of fentry/fexit was allowed it
19337 * would be possible to create long call chain
19338 * fentry->extension->fentry->extension beyond
19339 * reasonable stack size. Hence extending fentry is not
19340 * allowed.
19341 */
efc68158 19342 bpf_log(log, "Cannot extend fentry/fexit\n");
be8704ff
AS
19343 return -EINVAL;
19344 }
5b92a28a 19345 } else {
be8704ff 19346 if (prog_extension) {
efc68158 19347 bpf_log(log, "Cannot replace kernel functions\n");
be8704ff
AS
19348 return -EINVAL;
19349 }
5b92a28a 19350 }
f1b9509c
AS
19351
19352 switch (prog->expected_attach_type) {
19353 case BPF_TRACE_RAW_TP:
5b92a28a 19354 if (tgt_prog) {
efc68158 19355 bpf_log(log,
5b92a28a
AS
19356 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
19357 return -EINVAL;
19358 }
38207291 19359 if (!btf_type_is_typedef(t)) {
efc68158 19360 bpf_log(log, "attach_btf_id %u is not a typedef\n",
38207291
MKL
19361 btf_id);
19362 return -EINVAL;
19363 }
f1b9509c 19364 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
efc68158 19365 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
38207291
MKL
19366 btf_id, tname);
19367 return -EINVAL;
19368 }
19369 tname += sizeof(prefix) - 1;
5b92a28a 19370 t = btf_type_by_id(btf, t->type);
38207291
MKL
19371 if (!btf_type_is_ptr(t))
19372 /* should never happen in valid vmlinux build */
19373 return -EINVAL;
5b92a28a 19374 t = btf_type_by_id(btf, t->type);
38207291
MKL
19375 if (!btf_type_is_func_proto(t))
19376 /* should never happen in valid vmlinux build */
19377 return -EINVAL;
19378
f7b12b6f 19379 break;
15d83c4d
YS
19380 case BPF_TRACE_ITER:
19381 if (!btf_type_is_func(t)) {
efc68158 19382 bpf_log(log, "attach_btf_id %u is not a function\n",
15d83c4d
YS
19383 btf_id);
19384 return -EINVAL;
19385 }
19386 t = btf_type_by_id(btf, t->type);
19387 if (!btf_type_is_func_proto(t))
19388 return -EINVAL;
f7b12b6f
THJ
19389 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
19390 if (ret)
19391 return ret;
19392 break;
be8704ff
AS
19393 default:
19394 if (!prog_extension)
19395 return -EINVAL;
df561f66 19396 fallthrough;
ae240823 19397 case BPF_MODIFY_RETURN:
9e4e01df 19398 case BPF_LSM_MAC:
69fd337a 19399 case BPF_LSM_CGROUP:
fec56f58
AS
19400 case BPF_TRACE_FENTRY:
19401 case BPF_TRACE_FEXIT:
19402 if (!btf_type_is_func(t)) {
efc68158 19403 bpf_log(log, "attach_btf_id %u is not a function\n",
fec56f58
AS
19404 btf_id);
19405 return -EINVAL;
19406 }
be8704ff 19407 if (prog_extension &&
efc68158 19408 btf_check_type_match(log, prog, btf, t))
be8704ff 19409 return -EINVAL;
5b92a28a 19410 t = btf_type_by_id(btf, t->type);
fec56f58
AS
19411 if (!btf_type_is_func_proto(t))
19412 return -EINVAL;
f7b12b6f 19413
4a1e7c0c
THJ
19414 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
19415 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
19416 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
19417 return -EINVAL;
19418
f7b12b6f 19419 if (tgt_prog && conservative)
5b92a28a 19420 t = NULL;
f7b12b6f
THJ
19421
19422 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
fec56f58 19423 if (ret < 0)
f7b12b6f
THJ
19424 return ret;
19425
5b92a28a 19426 if (tgt_prog) {
e9eeec58
YS
19427 if (subprog == 0)
19428 addr = (long) tgt_prog->bpf_func;
19429 else
19430 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a 19431 } else {
31bf1dbc
VM
19432 if (btf_is_module(btf)) {
19433 mod = btf_try_get_module(btf);
19434 if (mod)
19435 addr = find_kallsyms_symbol_value(mod, tname);
19436 else
19437 addr = 0;
19438 } else {
19439 addr = kallsyms_lookup_name(tname);
19440 }
5b92a28a 19441 if (!addr) {
31bf1dbc 19442 module_put(mod);
efc68158 19443 bpf_log(log,
5b92a28a
AS
19444 "The address of function %s cannot be found\n",
19445 tname);
f7b12b6f 19446 return -ENOENT;
5b92a28a 19447 }
fec56f58 19448 }
18644cec 19449
1e6c62a8
AS
19450 if (prog->aux->sleepable) {
19451 ret = -EINVAL;
19452 switch (prog->type) {
19453 case BPF_PROG_TYPE_TRACING:
5b481aca
BT
19454
19455 /* fentry/fexit/fmod_ret progs can be sleepable if they are
1e6c62a8
AS
19456 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
19457 */
19458 if (!check_non_sleepable_error_inject(btf_id) &&
19459 within_error_injection_list(addr))
19460 ret = 0;
5b481aca
BT
19461 /* fentry/fexit/fmod_ret progs can also be sleepable if they are
19462 * in the fmodret id set with the KF_SLEEPABLE flag.
19463 */
19464 else {
e924e80e
AG
19465 u32 *flags = btf_kfunc_is_modify_return(btf, btf_id,
19466 prog);
5b481aca
BT
19467
19468 if (flags && (*flags & KF_SLEEPABLE))
19469 ret = 0;
19470 }
1e6c62a8
AS
19471 break;
19472 case BPF_PROG_TYPE_LSM:
19473 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
19474 * Only some of them are sleepable.
19475 */
423f1610 19476 if (bpf_lsm_is_sleepable_hook(btf_id))
1e6c62a8
AS
19477 ret = 0;
19478 break;
19479 default:
19480 break;
19481 }
f7b12b6f 19482 if (ret) {
31bf1dbc 19483 module_put(mod);
f7b12b6f
THJ
19484 bpf_log(log, "%s is not sleepable\n", tname);
19485 return ret;
19486 }
1e6c62a8 19487 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
1af9270e 19488 if (tgt_prog) {
31bf1dbc 19489 module_put(mod);
efc68158 19490 bpf_log(log, "can't modify return codes of BPF programs\n");
f7b12b6f
THJ
19491 return -EINVAL;
19492 }
5b481aca 19493 ret = -EINVAL;
e924e80e 19494 if (btf_kfunc_is_modify_return(btf, btf_id, prog) ||
5b481aca
BT
19495 !check_attach_modify_return(addr, tname))
19496 ret = 0;
f7b12b6f 19497 if (ret) {
31bf1dbc 19498 module_put(mod);
f7b12b6f
THJ
19499 bpf_log(log, "%s() is not modifiable\n", tname);
19500 return ret;
1af9270e 19501 }
18644cec 19502 }
f7b12b6f
THJ
19503
19504 break;
19505 }
19506 tgt_info->tgt_addr = addr;
19507 tgt_info->tgt_name = tname;
19508 tgt_info->tgt_type = t;
31bf1dbc 19509 tgt_info->tgt_mod = mod;
f7b12b6f
THJ
19510 return 0;
19511}
19512
35e3815f
JO
19513BTF_SET_START(btf_id_deny)
19514BTF_ID_UNUSED
19515#ifdef CONFIG_SMP
19516BTF_ID(func, migrate_disable)
19517BTF_ID(func, migrate_enable)
19518#endif
19519#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
19520BTF_ID(func, rcu_read_unlock_strict)
19521#endif
c11bd046
Y
19522#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
19523BTF_ID(func, preempt_count_add)
19524BTF_ID(func, preempt_count_sub)
19525#endif
a0c109dc
YS
19526#ifdef CONFIG_PREEMPT_RCU
19527BTF_ID(func, __rcu_read_lock)
19528BTF_ID(func, __rcu_read_unlock)
19529#endif
35e3815f
JO
19530BTF_SET_END(btf_id_deny)
19531
700e6f85
JO
19532static bool can_be_sleepable(struct bpf_prog *prog)
19533{
19534 if (prog->type == BPF_PROG_TYPE_TRACING) {
19535 switch (prog->expected_attach_type) {
19536 case BPF_TRACE_FENTRY:
19537 case BPF_TRACE_FEXIT:
19538 case BPF_MODIFY_RETURN:
19539 case BPF_TRACE_ITER:
19540 return true;
19541 default:
19542 return false;
19543 }
19544 }
19545 return prog->type == BPF_PROG_TYPE_LSM ||
1e12d3ef
DV
19546 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
19547 prog->type == BPF_PROG_TYPE_STRUCT_OPS;
700e6f85
JO
19548}
19549
f7b12b6f
THJ
19550static int check_attach_btf_id(struct bpf_verifier_env *env)
19551{
19552 struct bpf_prog *prog = env->prog;
3aac1ead 19553 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
f7b12b6f
THJ
19554 struct bpf_attach_target_info tgt_info = {};
19555 u32 btf_id = prog->aux->attach_btf_id;
19556 struct bpf_trampoline *tr;
19557 int ret;
19558 u64 key;
19559
79a7f8bd
AS
19560 if (prog->type == BPF_PROG_TYPE_SYSCALL) {
19561 if (prog->aux->sleepable)
19562 /* attach_btf_id checked to be zero already */
19563 return 0;
19564 verbose(env, "Syscall programs can only be sleepable\n");
19565 return -EINVAL;
19566 }
19567
700e6f85 19568 if (prog->aux->sleepable && !can_be_sleepable(prog)) {
1e12d3ef 19569 verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
f7b12b6f
THJ
19570 return -EINVAL;
19571 }
19572
19573 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
19574 return check_struct_ops_btf_id(env);
19575
19576 if (prog->type != BPF_PROG_TYPE_TRACING &&
19577 prog->type != BPF_PROG_TYPE_LSM &&
19578 prog->type != BPF_PROG_TYPE_EXT)
19579 return 0;
19580
19581 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
19582 if (ret)
fec56f58 19583 return ret;
f7b12b6f
THJ
19584
19585 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
3aac1ead
THJ
19586 /* to make freplace equivalent to their targets, they need to
19587 * inherit env->ops and expected_attach_type for the rest of the
19588 * verification
19589 */
f7b12b6f
THJ
19590 env->ops = bpf_verifier_ops[tgt_prog->type];
19591 prog->expected_attach_type = tgt_prog->expected_attach_type;
19592 }
19593
19594 /* store info about the attachment target that will be used later */
19595 prog->aux->attach_func_proto = tgt_info.tgt_type;
19596 prog->aux->attach_func_name = tgt_info.tgt_name;
31bf1dbc 19597 prog->aux->mod = tgt_info.tgt_mod;
f7b12b6f 19598
4a1e7c0c
THJ
19599 if (tgt_prog) {
19600 prog->aux->saved_dst_prog_type = tgt_prog->type;
19601 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
19602 }
19603
f7b12b6f
THJ
19604 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
19605 prog->aux->attach_btf_trace = true;
19606 return 0;
19607 } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
19608 if (!bpf_iter_prog_supported(prog))
19609 return -EINVAL;
19610 return 0;
19611 }
19612
19613 if (prog->type == BPF_PROG_TYPE_LSM) {
19614 ret = bpf_lsm_verify_prog(&env->log, prog);
19615 if (ret < 0)
19616 return ret;
35e3815f
JO
19617 } else if (prog->type == BPF_PROG_TYPE_TRACING &&
19618 btf_id_set_contains(&btf_id_deny, btf_id)) {
19619 return -EINVAL;
38207291 19620 }
f7b12b6f 19621
22dc4a0f 19622 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
f7b12b6f
THJ
19623 tr = bpf_trampoline_get(key, &tgt_info);
19624 if (!tr)
19625 return -ENOMEM;
19626
3aac1ead 19627 prog->aux->dst_trampoline = tr;
f7b12b6f 19628 return 0;
38207291
MKL
19629}
19630
76654e67
AM
19631struct btf *bpf_get_btf_vmlinux(void)
19632{
19633 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
19634 mutex_lock(&bpf_verifier_lock);
19635 if (!btf_vmlinux)
19636 btf_vmlinux = btf_parse_vmlinux();
19637 mutex_unlock(&bpf_verifier_lock);
19638 }
19639 return btf_vmlinux;
19640}
19641
47a71c1f 19642int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
51580e79 19643{
06ee7115 19644 u64 start_time = ktime_get_ns();
58e2af8b 19645 struct bpf_verifier_env *env;
bdcab414
AN
19646 int i, len, ret = -EINVAL, err;
19647 u32 log_true_size;
e2ae4ca2 19648 bool is_priv;
51580e79 19649
eba0c929
AB
19650 /* no program is valid */
19651 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
19652 return -EINVAL;
19653
58e2af8b 19654 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
19655 * allocate/free it every time bpf_check() is called
19656 */
58e2af8b 19657 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
19658 if (!env)
19659 return -ENOMEM;
19660
407958a0
AN
19661 env->bt.env = env;
19662
9e4c24e7 19663 len = (*prog)->len;
fad953ce 19664 env->insn_aux_data =
9e4c24e7 19665 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
19666 ret = -ENOMEM;
19667 if (!env->insn_aux_data)
19668 goto err_free_env;
9e4c24e7
JK
19669 for (i = 0; i < len; i++)
19670 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 19671 env->prog = *prog;
00176a34 19672 env->ops = bpf_verifier_ops[env->prog->type];
387544bf 19673 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
2c78ee89 19674 is_priv = bpf_capable();
0246e64d 19675
76654e67 19676 bpf_get_btf_vmlinux();
8580ac94 19677
cbd35700 19678 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
19679 if (!is_priv)
19680 mutex_lock(&bpf_verifier_lock);
cbd35700 19681
bdcab414
AN
19682 /* user could have requested verbose verifier output
19683 * and supplied buffer to store the verification trace
19684 */
19685 ret = bpf_vlog_init(&env->log, attr->log_level,
19686 (char __user *) (unsigned long) attr->log_buf,
19687 attr->log_size);
19688 if (ret)
19689 goto err_unlock;
1ad2f583 19690
0f55f9ed
CL
19691 mark_verifier_state_clean(env);
19692
8580ac94
AS
19693 if (IS_ERR(btf_vmlinux)) {
19694 /* Either gcc or pahole or kernel are broken. */
19695 verbose(env, "in-kernel BTF is malformed\n");
19696 ret = PTR_ERR(btf_vmlinux);
38207291 19697 goto skip_full_check;
8580ac94
AS
19698 }
19699
1ad2f583
DB
19700 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
19701 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 19702 env->strict_alignment = true;
e9ee9efc
DM
19703 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
19704 env->strict_alignment = false;
cbd35700 19705
2c78ee89 19706 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
01f810ac 19707 env->allow_uninit_stack = bpf_allow_uninit_stack();
2c78ee89
AS
19708 env->bypass_spec_v1 = bpf_bypass_spec_v1();
19709 env->bypass_spec_v4 = bpf_bypass_spec_v4();
19710 env->bpf_capable = bpf_capable();
e2ae4ca2 19711
10d274e8
AS
19712 if (is_priv)
19713 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
19714
dc2a4ebc 19715 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 19716 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
19717 GFP_USER);
19718 ret = -ENOMEM;
19719 if (!env->explored_states)
19720 goto skip_full_check;
19721
e6ac2450
MKL
19722 ret = add_subprog_and_kfunc(env);
19723 if (ret < 0)
19724 goto skip_full_check;
19725
d9762e84 19726 ret = check_subprogs(env);
475fb78f
AS
19727 if (ret < 0)
19728 goto skip_full_check;
19729
c454a46b 19730 ret = check_btf_info(env, attr, uattr);
838e9690
YS
19731 if (ret < 0)
19732 goto skip_full_check;
19733
be8704ff
AS
19734 ret = check_attach_btf_id(env);
19735 if (ret)
19736 goto skip_full_check;
19737
4976b718
HL
19738 ret = resolve_pseudo_ldimm64(env);
19739 if (ret < 0)
19740 goto skip_full_check;
19741
9d03ebc7 19742 if (bpf_prog_is_offloaded(env->prog->aux)) {
ceb11679
YZ
19743 ret = bpf_prog_offload_verifier_prep(env->prog);
19744 if (ret)
19745 goto skip_full_check;
19746 }
19747
d9762e84
MKL
19748 ret = check_cfg(env);
19749 if (ret < 0)
19750 goto skip_full_check;
19751
51c39bb1
AS
19752 ret = do_check_subprogs(env);
19753 ret = ret ?: do_check_main(env);
cbd35700 19754
9d03ebc7 19755 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
c941ce9c
QM
19756 ret = bpf_prog_offload_finalize(env);
19757
0246e64d 19758skip_full_check:
51c39bb1 19759 kvfree(env->explored_states);
0246e64d 19760
c131187d 19761 if (ret == 0)
9b38c405 19762 ret = check_max_stack_depth(env);
c131187d 19763
9b38c405 19764 /* instruction rewrites happen after this point */
1ade2371
EZ
19765 if (ret == 0)
19766 ret = optimize_bpf_loop(env);
19767
e2ae4ca2
JK
19768 if (is_priv) {
19769 if (ret == 0)
19770 opt_hard_wire_dead_code_branches(env);
52875a04
JK
19771 if (ret == 0)
19772 ret = opt_remove_dead_code(env);
a1b14abc
JK
19773 if (ret == 0)
19774 ret = opt_remove_nops(env);
52875a04
JK
19775 } else {
19776 if (ret == 0)
19777 sanitize_dead_code(env);
e2ae4ca2
JK
19778 }
19779
9bac3d6d
AS
19780 if (ret == 0)
19781 /* program is valid, convert *(u32*)(ctx + off) accesses */
19782 ret = convert_ctx_accesses(env);
19783
e245c5c6 19784 if (ret == 0)
e6ac5933 19785 ret = do_misc_fixups(env);
e245c5c6 19786
a4b1d3c1
JW
19787 /* do 32-bit optimization after insn patching has done so those patched
19788 * insns could be handled correctly.
19789 */
9d03ebc7 19790 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
d6c2308c
JW
19791 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
19792 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
19793 : false;
a4b1d3c1
JW
19794 }
19795
1ea47e01
AS
19796 if (ret == 0)
19797 ret = fixup_call_args(env);
19798
06ee7115
AS
19799 env->verification_time = ktime_get_ns() - start_time;
19800 print_verification_stats(env);
aba64c7d 19801 env->prog->aux->verified_insns = env->insn_processed;
06ee7115 19802
bdcab414
AN
19803 /* preserve original error even if log finalization is successful */
19804 err = bpf_vlog_finalize(&env->log, &log_true_size);
19805 if (err)
19806 ret = err;
19807
47a71c1f
AN
19808 if (uattr_size >= offsetofend(union bpf_attr, log_true_size) &&
19809 copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size),
bdcab414 19810 &log_true_size, sizeof(log_true_size))) {
47a71c1f
AN
19811 ret = -EFAULT;
19812 goto err_release_maps;
19813 }
cbd35700 19814
541c3bad
AN
19815 if (ret)
19816 goto err_release_maps;
19817
19818 if (env->used_map_cnt) {
0246e64d 19819 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
19820 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
19821 sizeof(env->used_maps[0]),
19822 GFP_KERNEL);
0246e64d 19823
9bac3d6d 19824 if (!env->prog->aux->used_maps) {
0246e64d 19825 ret = -ENOMEM;
a2a7d570 19826 goto err_release_maps;
0246e64d
AS
19827 }
19828
9bac3d6d 19829 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 19830 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 19831 env->prog->aux->used_map_cnt = env->used_map_cnt;
541c3bad
AN
19832 }
19833 if (env->used_btf_cnt) {
19834 /* if program passed verifier, update used_btfs in bpf_prog_aux */
19835 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
19836 sizeof(env->used_btfs[0]),
19837 GFP_KERNEL);
19838 if (!env->prog->aux->used_btfs) {
19839 ret = -ENOMEM;
19840 goto err_release_maps;
19841 }
0246e64d 19842
541c3bad
AN
19843 memcpy(env->prog->aux->used_btfs, env->used_btfs,
19844 sizeof(env->used_btfs[0]) * env->used_btf_cnt);
19845 env->prog->aux->used_btf_cnt = env->used_btf_cnt;
19846 }
19847 if (env->used_map_cnt || env->used_btf_cnt) {
0246e64d
AS
19848 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
19849 * bpf_ld_imm64 instructions
19850 */
19851 convert_pseudo_ld_imm64(env);
19852 }
cbd35700 19853
541c3bad 19854 adjust_btf_func(env);
ba64e7d8 19855
a2a7d570 19856err_release_maps:
9bac3d6d 19857 if (!env->prog->aux->used_maps)
0246e64d 19858 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 19859 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
19860 */
19861 release_maps(env);
541c3bad
AN
19862 if (!env->prog->aux->used_btfs)
19863 release_btfs(env);
03f87c0b
THJ
19864
19865 /* extension progs temporarily inherit the attach_type of their targets
19866 for verification purposes, so set it back to zero before returning
19867 */
19868 if (env->prog->type == BPF_PROG_TYPE_EXT)
19869 env->prog->expected_attach_type = 0;
19870
9bac3d6d 19871 *prog = env->prog;
3df126f3 19872err_unlock:
45a73c17
AS
19873 if (!is_priv)
19874 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
19875 vfree(env->insn_aux_data);
19876err_free_env:
19877 kfree(env);
51580e79
AS
19878 return ret;
19879}