Merge branch 'Support stashing local kptrs with bpf_kptr_xchg'
[linux-block.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
aef2feda 7#include <linux/bpf-cgroup.h>
51580e79
AS
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
838e9690 12#include <linux/btf.h>
58e2af8b 13#include <linux/bpf_verifier.h>
51580e79
AS
14#include <linux/filter.h>
15#include <net/netlink.h>
16#include <linux/file.h>
17#include <linux/vmalloc.h>
ebb676da 18#include <linux/stringify.h>
cc8b0b92
AS
19#include <linux/bsearch.h>
20#include <linux/sort.h>
c195651e 21#include <linux/perf_event.h>
d9762e84 22#include <linux/ctype.h>
6ba43b76 23#include <linux/error-injection.h>
9e4e01df 24#include <linux/bpf_lsm.h>
1e6c62a8 25#include <linux/btf_ids.h>
47e34cb7 26#include <linux/poison.h>
51580e79 27
f4ac7e0b
JK
28#include "disasm.h"
29
00176a34 30static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 31#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
32 [_id] = & _name ## _verifier_ops,
33#define BPF_MAP_TYPE(_id, _ops)
f2e10bff 34#define BPF_LINK_TYPE(_id, _name)
00176a34
JK
35#include <linux/bpf_types.h>
36#undef BPF_PROG_TYPE
37#undef BPF_MAP_TYPE
f2e10bff 38#undef BPF_LINK_TYPE
00176a34
JK
39};
40
51580e79
AS
41/* bpf_check() is a static code analyzer that walks eBPF program
42 * instruction by instruction and updates register/stack state.
43 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
44 *
45 * The first pass is depth-first-search to check that the program is a DAG.
46 * It rejects the following programs:
47 * - larger than BPF_MAXINSNS insns
48 * - if loop is present (detected via back-edge)
49 * - unreachable insns exist (shouldn't be a forest. program = one function)
50 * - out of bounds or malformed jumps
51 * The second pass is all possible path descent from the 1st insn.
8fb33b60 52 * Since it's analyzing all paths through the program, the length of the
eba38a96 53 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
54 * insn is less then 4K, but there are too many branches that change stack/regs.
55 * Number of 'branches to be analyzed' is limited to 1k
56 *
57 * On entry to each instruction, each register has a type, and the instruction
58 * changes the types of the registers depending on instruction semantics.
59 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
60 * copied to R1.
61 *
62 * All registers are 64-bit.
63 * R0 - return register
64 * R1-R5 argument passing registers
65 * R6-R9 callee saved registers
66 * R10 - frame pointer read-only
67 *
68 * At the start of BPF program the register R1 contains a pointer to bpf_context
69 * and has type PTR_TO_CTX.
70 *
71 * Verifier tracks arithmetic operations on pointers in case:
72 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
73 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
74 * 1st insn copies R10 (which has FRAME_PTR) type into R1
75 * and 2nd arithmetic instruction is pattern matched to recognize
76 * that it wants to construct a pointer to some element within stack.
77 * So after 2nd insn, the register R1 has type PTR_TO_STACK
78 * (and -20 constant is saved for further stack bounds checking).
79 * Meaning that this reg is a pointer to stack plus known immediate constant.
80 *
f1174f77 81 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 82 * means the register has some value, but it's not a valid pointer.
f1174f77 83 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
84 *
85 * When verifier sees load or store instructions the type of base register
c64b7983
JS
86 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
87 * four pointer types recognized by check_mem_access() function.
51580e79
AS
88 *
89 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
90 * and the range of [ptr, ptr + map's value_size) is accessible.
91 *
92 * registers used to pass values to function calls are checked against
93 * function argument constraints.
94 *
95 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
96 * It means that the register type passed to this function must be
97 * PTR_TO_STACK and it will be used inside the function as
98 * 'pointer to map element key'
99 *
100 * For example the argument constraints for bpf_map_lookup_elem():
101 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
102 * .arg1_type = ARG_CONST_MAP_PTR,
103 * .arg2_type = ARG_PTR_TO_MAP_KEY,
104 *
105 * ret_type says that this function returns 'pointer to map elem value or null'
106 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
107 * 2nd argument should be a pointer to stack, which will be used inside
108 * the helper function as a pointer to map element key.
109 *
110 * On the kernel side the helper function looks like:
111 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
112 * {
113 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
114 * void *key = (void *) (unsigned long) r2;
115 * void *value;
116 *
117 * here kernel can access 'key' and 'map' pointers safely, knowing that
118 * [key, key + map->key_size) bytes are valid and were initialized on
119 * the stack of eBPF program.
120 * }
121 *
122 * Corresponding eBPF program may look like:
123 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
124 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
125 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
126 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
127 * here verifier looks at prototype of map_lookup_elem() and sees:
128 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
129 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
130 *
131 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
132 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
133 * and were initialized prior to this call.
134 * If it's ok, then verifier allows this BPF_CALL insn and looks at
135 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
136 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
8fb33b60 137 * returns either pointer to map value or NULL.
51580e79
AS
138 *
139 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
140 * insn, the register holding that pointer in the true branch changes state to
141 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
142 * branch. See check_cond_jmp_op().
143 *
144 * After the call R0 is set to return type of the function and registers R1-R5
145 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
146 *
147 * The following reference types represent a potential reference to a kernel
148 * resource which, after first being allocated, must be checked and freed by
149 * the BPF program:
150 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
151 *
152 * When the verifier sees a helper call return a reference type, it allocates a
153 * pointer id for the reference and stores it in the current function state.
154 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
155 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
156 * passes through a NULL-check conditional. For the branch wherein the state is
157 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
158 *
159 * For each helper function that allocates a reference, such as
160 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
161 * bpf_sk_release(). When a reference type passes into the release function,
162 * the verifier also releases the reference. If any unchecked or unreleased
163 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
164 */
165
17a52670 166/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 167struct bpf_verifier_stack_elem {
17a52670
AS
168 /* verifer state is 'st'
169 * before processing instruction 'insn_idx'
170 * and after processing instruction 'prev_insn_idx'
171 */
58e2af8b 172 struct bpf_verifier_state st;
17a52670
AS
173 int insn_idx;
174 int prev_insn_idx;
58e2af8b 175 struct bpf_verifier_stack_elem *next;
6f8a57cc
AN
176 /* length of verifier log at the time this state was pushed on stack */
177 u32 log_pos;
cbd35700
AS
178};
179
b285fcb7 180#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 181#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 182
d2e4c1e6
DB
183#define BPF_MAP_KEY_POISON (1ULL << 63)
184#define BPF_MAP_KEY_SEEN (1ULL << 62)
185
c93552c4
DB
186#define BPF_MAP_PTR_UNPRIV 1UL
187#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
188 POISON_POINTER_DELTA))
189#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
190
bc34dee6
JK
191static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
192static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
6a3cd331 193static void invalidate_non_owning_refs(struct bpf_verifier_env *env);
5d92ddc3 194static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env);
6a3cd331
DM
195static int ref_set_non_owning(struct bpf_verifier_env *env,
196 struct bpf_reg_state *reg);
bc34dee6 197
c93552c4
DB
198static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
199{
d2e4c1e6 200 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
201}
202
203static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
204{
d2e4c1e6 205 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
206}
207
208static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
209 const struct bpf_map *map, bool unpriv)
210{
211 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
212 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
213 aux->map_ptr_state = (unsigned long)map |
214 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
215}
216
217static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
218{
219 return aux->map_key_state & BPF_MAP_KEY_POISON;
220}
221
222static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
223{
224 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
225}
226
227static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
228{
229 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
230}
231
232static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
233{
234 bool poisoned = bpf_map_key_poisoned(aux);
235
236 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
237 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 238}
fad73a1a 239
23a2d70c
YS
240static bool bpf_pseudo_call(const struct bpf_insn *insn)
241{
242 return insn->code == (BPF_JMP | BPF_CALL) &&
243 insn->src_reg == BPF_PSEUDO_CALL;
244}
245
e6ac2450
MKL
246static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
247{
248 return insn->code == (BPF_JMP | BPF_CALL) &&
249 insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
250}
251
33ff9823
DB
252struct bpf_call_arg_meta {
253 struct bpf_map *map_ptr;
435faee1 254 bool raw_mode;
36bbef52 255 bool pkt_access;
8f14852e 256 u8 release_regno;
435faee1
DB
257 int regno;
258 int access_size;
457f4436 259 int mem_size;
10060503 260 u64 msize_max_value;
1b986589 261 int ref_obj_id;
f8064ab9 262 int dynptr_id;
3e8ce298 263 int map_uid;
d83525ca 264 int func_id;
22dc4a0f 265 struct btf *btf;
eaa6bcb7 266 u32 btf_id;
22dc4a0f 267 struct btf *ret_btf;
eaa6bcb7 268 u32 ret_btf_id;
69c087ba 269 u32 subprogno;
aa3496ac 270 struct btf_field *kptr_field;
33ff9823
DB
271};
272
d0e1ac22
AN
273struct bpf_kfunc_call_arg_meta {
274 /* In parameters */
275 struct btf *btf;
276 u32 func_id;
277 u32 kfunc_flags;
278 const struct btf_type *func_proto;
279 const char *func_name;
280 /* Out parameters */
281 u32 ref_obj_id;
282 u8 release_regno;
283 bool r0_rdonly;
284 u32 ret_btf_id;
285 u64 r0_size;
286 u32 subprogno;
287 struct {
288 u64 value;
289 bool found;
290 } arg_constant;
291 struct {
292 struct btf *btf;
293 u32 btf_id;
294 } arg_obj_drop;
295 struct {
296 struct btf_field *field;
297 } arg_list_head;
298 struct {
299 struct btf_field *field;
300 } arg_rbtree_root;
301 struct {
302 enum bpf_dynptr_type type;
303 u32 id;
304 } initialized_dynptr;
06accc87
AN
305 struct {
306 u8 spi;
307 u8 frameno;
308 } iter;
d0e1ac22
AN
309 u64 mem_size;
310};
311
8580ac94
AS
312struct btf *btf_vmlinux;
313
cbd35700
AS
314static DEFINE_MUTEX(bpf_verifier_lock);
315
d9762e84
MKL
316static const struct bpf_line_info *
317find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
318{
319 const struct bpf_line_info *linfo;
320 const struct bpf_prog *prog;
321 u32 i, nr_linfo;
322
323 prog = env->prog;
324 nr_linfo = prog->aux->nr_linfo;
325
326 if (!nr_linfo || insn_off >= prog->len)
327 return NULL;
328
329 linfo = prog->aux->linfo;
330 for (i = 1; i < nr_linfo; i++)
331 if (insn_off < linfo[i].insn_off)
332 break;
333
334 return &linfo[i - 1];
335}
336
77d2e05a
MKL
337void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
338 va_list args)
cbd35700 339{
a2a7d570 340 unsigned int n;
cbd35700 341
a2a7d570 342 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
343
344 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
345 "verifier log line truncated - local buffer too short\n");
346
8580ac94 347 if (log->level == BPF_LOG_KERNEL) {
436d404c
HT
348 bool newline = n > 0 && log->kbuf[n - 1] == '\n';
349
350 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
8580ac94
AS
351 return;
352 }
436d404c
HT
353
354 n = min(log->len_total - log->len_used - 1, n);
355 log->kbuf[n] = '\0';
a2a7d570
JK
356 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
357 log->len_used += n;
358 else
359 log->ubuf = NULL;
cbd35700 360}
abe08840 361
6f8a57cc
AN
362static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
363{
364 char zero = 0;
365
366 if (!bpf_verifier_log_needed(log))
367 return;
368
369 log->len_used = new_pos;
370 if (put_user(zero, log->ubuf + new_pos))
371 log->ubuf = NULL;
372}
373
abe08840
JO
374/* log_level controls verbosity level of eBPF verifier.
375 * bpf_verifier_log_write() is used to dump the verification trace to the log,
376 * so the user can figure out what's wrong with the program
430e68d1 377 */
abe08840
JO
378__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
379 const char *fmt, ...)
380{
381 va_list args;
382
77d2e05a
MKL
383 if (!bpf_verifier_log_needed(&env->log))
384 return;
385
abe08840 386 va_start(args, fmt);
77d2e05a 387 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
388 va_end(args);
389}
390EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
391
392__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
393{
77d2e05a 394 struct bpf_verifier_env *env = private_data;
abe08840
JO
395 va_list args;
396
77d2e05a
MKL
397 if (!bpf_verifier_log_needed(&env->log))
398 return;
399
abe08840 400 va_start(args, fmt);
77d2e05a 401 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
402 va_end(args);
403}
cbd35700 404
9e15db66
AS
405__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
406 const char *fmt, ...)
407{
408 va_list args;
409
410 if (!bpf_verifier_log_needed(log))
411 return;
412
413 va_start(args, fmt);
414 bpf_verifier_vlog(log, fmt, args);
415 va_end(args);
416}
84c6ac41 417EXPORT_SYMBOL_GPL(bpf_log);
9e15db66 418
d9762e84
MKL
419static const char *ltrim(const char *s)
420{
421 while (isspace(*s))
422 s++;
423
424 return s;
425}
426
427__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
428 u32 insn_off,
429 const char *prefix_fmt, ...)
430{
431 const struct bpf_line_info *linfo;
432
433 if (!bpf_verifier_log_needed(&env->log))
434 return;
435
436 linfo = find_linfo(env, insn_off);
437 if (!linfo || linfo == env->prev_linfo)
438 return;
439
440 if (prefix_fmt) {
441 va_list args;
442
443 va_start(args, prefix_fmt);
444 bpf_verifier_vlog(&env->log, prefix_fmt, args);
445 va_end(args);
446 }
447
448 verbose(env, "%s\n",
449 ltrim(btf_name_by_offset(env->prog->aux->btf,
450 linfo->line_off)));
451
452 env->prev_linfo = linfo;
453}
454
bc2591d6
YS
455static void verbose_invalid_scalar(struct bpf_verifier_env *env,
456 struct bpf_reg_state *reg,
457 struct tnum *range, const char *ctx,
458 const char *reg_name)
459{
460 char tn_buf[48];
461
462 verbose(env, "At %s the register %s ", ctx, reg_name);
463 if (!tnum_is_unknown(reg->var_off)) {
464 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
465 verbose(env, "has value %s", tn_buf);
466 } else {
467 verbose(env, "has unknown scalar value");
468 }
469 tnum_strn(tn_buf, sizeof(tn_buf), *range);
470 verbose(env, " should have been in %s\n", tn_buf);
471}
472
de8f3a83
DB
473static bool type_is_pkt_pointer(enum bpf_reg_type type)
474{
0c9a7a7e 475 type = base_type(type);
de8f3a83
DB
476 return type == PTR_TO_PACKET ||
477 type == PTR_TO_PACKET_META;
478}
479
46f8bc92
MKL
480static bool type_is_sk_pointer(enum bpf_reg_type type)
481{
482 return type == PTR_TO_SOCKET ||
655a51e5 483 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
484 type == PTR_TO_TCP_SOCK ||
485 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
486}
487
cac616db
JF
488static bool reg_type_not_null(enum bpf_reg_type type)
489{
490 return type == PTR_TO_SOCKET ||
491 type == PTR_TO_TCP_SOCK ||
492 type == PTR_TO_MAP_VALUE ||
69c087ba 493 type == PTR_TO_MAP_KEY ||
d5271c5b
AN
494 type == PTR_TO_SOCK_COMMON ||
495 type == PTR_TO_MEM;
cac616db
JF
496}
497
d8939cb0
DM
498static bool type_is_ptr_alloc_obj(u32 type)
499{
500 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
501}
502
6a3cd331
DM
503static bool type_is_non_owning_ref(u32 type)
504{
505 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
506}
507
4e814da0
KKD
508static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
509{
510 struct btf_record *rec = NULL;
511 struct btf_struct_meta *meta;
512
513 if (reg->type == PTR_TO_MAP_VALUE) {
514 rec = reg->map_ptr->record;
d8939cb0 515 } else if (type_is_ptr_alloc_obj(reg->type)) {
4e814da0
KKD
516 meta = btf_find_struct_meta(reg->btf, reg->btf_id);
517 if (meta)
518 rec = meta->record;
519 }
520 return rec;
521}
522
d83525ca
AS
523static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
524{
4e814da0 525 return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK);
cba368c1
MKL
526}
527
20b2aff4
HL
528static bool type_is_rdonly_mem(u32 type)
529{
530 return type & MEM_RDONLY;
cba368c1
MKL
531}
532
48946bd6 533static bool type_may_be_null(u32 type)
fd1b0d60 534{
48946bd6 535 return type & PTR_MAYBE_NULL;
fd1b0d60
LB
536}
537
64d85290
JS
538static bool is_acquire_function(enum bpf_func_id func_id,
539 const struct bpf_map *map)
540{
541 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
542
543 if (func_id == BPF_FUNC_sk_lookup_tcp ||
544 func_id == BPF_FUNC_sk_lookup_udp ||
457f4436 545 func_id == BPF_FUNC_skc_lookup_tcp ||
c0a5a21c
KKD
546 func_id == BPF_FUNC_ringbuf_reserve ||
547 func_id == BPF_FUNC_kptr_xchg)
64d85290
JS
548 return true;
549
550 if (func_id == BPF_FUNC_map_lookup_elem &&
551 (map_type == BPF_MAP_TYPE_SOCKMAP ||
552 map_type == BPF_MAP_TYPE_SOCKHASH))
553 return true;
554
555 return false;
46f8bc92
MKL
556}
557
1b986589
MKL
558static bool is_ptr_cast_function(enum bpf_func_id func_id)
559{
560 return func_id == BPF_FUNC_tcp_sock ||
1df8f55a
MKL
561 func_id == BPF_FUNC_sk_fullsock ||
562 func_id == BPF_FUNC_skc_to_tcp_sock ||
563 func_id == BPF_FUNC_skc_to_tcp6_sock ||
564 func_id == BPF_FUNC_skc_to_udp6_sock ||
3bc253c2 565 func_id == BPF_FUNC_skc_to_mptcp_sock ||
1df8f55a
MKL
566 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
567 func_id == BPF_FUNC_skc_to_tcp_request_sock;
1b986589
MKL
568}
569
88374342 570static bool is_dynptr_ref_function(enum bpf_func_id func_id)
b2d8ef19
DM
571{
572 return func_id == BPF_FUNC_dynptr_data;
573}
574
be2ef816
AN
575static bool is_callback_calling_function(enum bpf_func_id func_id)
576{
577 return func_id == BPF_FUNC_for_each_map_elem ||
578 func_id == BPF_FUNC_timer_set_callback ||
579 func_id == BPF_FUNC_find_vma ||
580 func_id == BPF_FUNC_loop ||
581 func_id == BPF_FUNC_user_ringbuf_drain;
582}
583
9bb00b28
YS
584static bool is_storage_get_function(enum bpf_func_id func_id)
585{
586 return func_id == BPF_FUNC_sk_storage_get ||
587 func_id == BPF_FUNC_inode_storage_get ||
588 func_id == BPF_FUNC_task_storage_get ||
589 func_id == BPF_FUNC_cgrp_storage_get;
590}
591
b2d8ef19
DM
592static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id,
593 const struct bpf_map *map)
594{
595 int ref_obj_uses = 0;
596
597 if (is_ptr_cast_function(func_id))
598 ref_obj_uses++;
599 if (is_acquire_function(func_id, map))
600 ref_obj_uses++;
88374342 601 if (is_dynptr_ref_function(func_id))
b2d8ef19
DM
602 ref_obj_uses++;
603
604 return ref_obj_uses > 1;
605}
606
39491867
BJ
607static bool is_cmpxchg_insn(const struct bpf_insn *insn)
608{
609 return BPF_CLASS(insn->code) == BPF_STX &&
610 BPF_MODE(insn->code) == BPF_ATOMIC &&
611 insn->imm == BPF_CMPXCHG;
612}
613
c25b2ae1
HL
614/* string representation of 'enum bpf_reg_type'
615 *
616 * Note that reg_type_str() can not appear more than once in a single verbose()
617 * statement.
618 */
619static const char *reg_type_str(struct bpf_verifier_env *env,
620 enum bpf_reg_type type)
621{
ef66c547 622 char postfix[16] = {0}, prefix[64] = {0};
c25b2ae1
HL
623 static const char * const str[] = {
624 [NOT_INIT] = "?",
7df5072c 625 [SCALAR_VALUE] = "scalar",
c25b2ae1
HL
626 [PTR_TO_CTX] = "ctx",
627 [CONST_PTR_TO_MAP] = "map_ptr",
628 [PTR_TO_MAP_VALUE] = "map_value",
629 [PTR_TO_STACK] = "fp",
630 [PTR_TO_PACKET] = "pkt",
631 [PTR_TO_PACKET_META] = "pkt_meta",
632 [PTR_TO_PACKET_END] = "pkt_end",
633 [PTR_TO_FLOW_KEYS] = "flow_keys",
634 [PTR_TO_SOCKET] = "sock",
635 [PTR_TO_SOCK_COMMON] = "sock_common",
636 [PTR_TO_TCP_SOCK] = "tcp_sock",
637 [PTR_TO_TP_BUFFER] = "tp_buffer",
638 [PTR_TO_XDP_SOCK] = "xdp_sock",
639 [PTR_TO_BTF_ID] = "ptr_",
c25b2ae1 640 [PTR_TO_MEM] = "mem",
20b2aff4 641 [PTR_TO_BUF] = "buf",
c25b2ae1
HL
642 [PTR_TO_FUNC] = "func",
643 [PTR_TO_MAP_KEY] = "map_key",
27060531 644 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
c25b2ae1
HL
645 };
646
647 if (type & PTR_MAYBE_NULL) {
5844101a 648 if (base_type(type) == PTR_TO_BTF_ID)
c25b2ae1
HL
649 strncpy(postfix, "or_null_", 16);
650 else
651 strncpy(postfix, "_or_null", 16);
652 }
653
9bb00b28 654 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
ef66c547
DV
655 type & MEM_RDONLY ? "rdonly_" : "",
656 type & MEM_RINGBUF ? "ringbuf_" : "",
657 type & MEM_USER ? "user_" : "",
658 type & MEM_PERCPU ? "percpu_" : "",
9bb00b28 659 type & MEM_RCU ? "rcu_" : "",
3f00c523
DV
660 type & PTR_UNTRUSTED ? "untrusted_" : "",
661 type & PTR_TRUSTED ? "trusted_" : ""
ef66c547 662 );
20b2aff4
HL
663
664 snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
665 prefix, str[base_type(type)], postfix);
c25b2ae1
HL
666 return env->type_str_buf;
667}
17a52670 668
8efea21d
EC
669static char slot_type_char[] = {
670 [STACK_INVALID] = '?',
671 [STACK_SPILL] = 'r',
672 [STACK_MISC] = 'm',
673 [STACK_ZERO] = '0',
97e03f52 674 [STACK_DYNPTR] = 'd',
06accc87 675 [STACK_ITER] = 'i',
8efea21d
EC
676};
677
4e92024a
AS
678static void print_liveness(struct bpf_verifier_env *env,
679 enum bpf_reg_liveness live)
680{
9242b5f5 681 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
682 verbose(env, "_");
683 if (live & REG_LIVE_READ)
684 verbose(env, "r");
685 if (live & REG_LIVE_WRITTEN)
686 verbose(env, "w");
9242b5f5
AS
687 if (live & REG_LIVE_DONE)
688 verbose(env, "D");
4e92024a
AS
689}
690
79168a66 691static int __get_spi(s32 off)
97e03f52
JK
692{
693 return (-off - 1) / BPF_REG_SIZE;
694}
695
f5b625e5
KKD
696static struct bpf_func_state *func(struct bpf_verifier_env *env,
697 const struct bpf_reg_state *reg)
698{
699 struct bpf_verifier_state *cur = env->cur_state;
700
701 return cur->frame[reg->frameno];
702}
703
97e03f52
JK
704static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
705{
f5b625e5 706 int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
97e03f52 707
f5b625e5
KKD
708 /* We need to check that slots between [spi - nr_slots + 1, spi] are
709 * within [0, allocated_stack).
710 *
711 * Please note that the spi grows downwards. For example, a dynptr
712 * takes the size of two stack slots; the first slot will be at
713 * spi and the second slot will be at spi - 1.
714 */
715 return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
97e03f52
JK
716}
717
a461f5ad
AN
718static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
719 const char *obj_kind, int nr_slots)
f4d7e40a 720{
79168a66 721 int off, spi;
f4d7e40a 722
79168a66 723 if (!tnum_is_const(reg->var_off)) {
a461f5ad 724 verbose(env, "%s has to be at a constant offset\n", obj_kind);
79168a66
KKD
725 return -EINVAL;
726 }
727
728 off = reg->off + reg->var_off.value;
729 if (off % BPF_REG_SIZE) {
a461f5ad 730 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
79168a66
KKD
731 return -EINVAL;
732 }
733
734 spi = __get_spi(off);
a461f5ad
AN
735 if (spi + 1 < nr_slots) {
736 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off);
79168a66
KKD
737 return -EINVAL;
738 }
97e03f52 739
a461f5ad 740 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots))
f5b625e5
KKD
741 return -ERANGE;
742 return spi;
f4d7e40a
AS
743}
744
a461f5ad
AN
745static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
746{
747 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS);
748}
749
06accc87
AN
750static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots)
751{
752 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots);
753}
754
b32a5dae 755static const char *btf_type_name(const struct btf *btf, u32 id)
9e15db66 756{
22dc4a0f 757 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
9e15db66
AS
758}
759
d54e0f6c
AN
760static const char *dynptr_type_str(enum bpf_dynptr_type type)
761{
762 switch (type) {
763 case BPF_DYNPTR_TYPE_LOCAL:
764 return "local";
765 case BPF_DYNPTR_TYPE_RINGBUF:
766 return "ringbuf";
767 case BPF_DYNPTR_TYPE_SKB:
768 return "skb";
769 case BPF_DYNPTR_TYPE_XDP:
770 return "xdp";
771 case BPF_DYNPTR_TYPE_INVALID:
772 return "<invalid>";
773 default:
774 WARN_ONCE(1, "unknown dynptr type %d\n", type);
775 return "<unknown>";
776 }
777}
778
06accc87
AN
779static const char *iter_type_str(const struct btf *btf, u32 btf_id)
780{
781 if (!btf || btf_id == 0)
782 return "<invalid>";
783
784 /* we already validated that type is valid and has conforming name */
b32a5dae 785 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
06accc87
AN
786}
787
788static const char *iter_state_str(enum bpf_iter_state state)
789{
790 switch (state) {
791 case BPF_ITER_STATE_ACTIVE:
792 return "active";
793 case BPF_ITER_STATE_DRAINED:
794 return "drained";
795 case BPF_ITER_STATE_INVALID:
796 return "<invalid>";
797 default:
798 WARN_ONCE(1, "unknown iter state %d\n", state);
799 return "<unknown>";
800 }
801}
802
0f55f9ed
CL
803static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
804{
805 env->scratched_regs |= 1U << regno;
806}
807
808static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
809{
343e5375 810 env->scratched_stack_slots |= 1ULL << spi;
0f55f9ed
CL
811}
812
813static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
814{
815 return (env->scratched_regs >> regno) & 1;
816}
817
818static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
819{
820 return (env->scratched_stack_slots >> regno) & 1;
821}
822
823static bool verifier_state_scratched(const struct bpf_verifier_env *env)
824{
825 return env->scratched_regs || env->scratched_stack_slots;
826}
827
828static void mark_verifier_state_clean(struct bpf_verifier_env *env)
829{
830 env->scratched_regs = 0U;
343e5375 831 env->scratched_stack_slots = 0ULL;
0f55f9ed
CL
832}
833
834/* Used for printing the entire verifier state. */
835static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
836{
837 env->scratched_regs = ~0U;
343e5375 838 env->scratched_stack_slots = ~0ULL;
0f55f9ed
CL
839}
840
97e03f52
JK
841static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
842{
843 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
844 case DYNPTR_TYPE_LOCAL:
845 return BPF_DYNPTR_TYPE_LOCAL;
bc34dee6
JK
846 case DYNPTR_TYPE_RINGBUF:
847 return BPF_DYNPTR_TYPE_RINGBUF;
b5964b96
JK
848 case DYNPTR_TYPE_SKB:
849 return BPF_DYNPTR_TYPE_SKB;
05421aec
JK
850 case DYNPTR_TYPE_XDP:
851 return BPF_DYNPTR_TYPE_XDP;
97e03f52
JK
852 default:
853 return BPF_DYNPTR_TYPE_INVALID;
854 }
855}
856
66e3a13e
JK
857static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type)
858{
859 switch (type) {
860 case BPF_DYNPTR_TYPE_LOCAL:
861 return DYNPTR_TYPE_LOCAL;
862 case BPF_DYNPTR_TYPE_RINGBUF:
863 return DYNPTR_TYPE_RINGBUF;
864 case BPF_DYNPTR_TYPE_SKB:
865 return DYNPTR_TYPE_SKB;
866 case BPF_DYNPTR_TYPE_XDP:
867 return DYNPTR_TYPE_XDP;
868 default:
869 return 0;
870 }
871}
872
bc34dee6
JK
873static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
874{
875 return type == BPF_DYNPTR_TYPE_RINGBUF;
876}
877
27060531
KKD
878static void __mark_dynptr_reg(struct bpf_reg_state *reg,
879 enum bpf_dynptr_type type,
f8064ab9 880 bool first_slot, int dynptr_id);
27060531
KKD
881
882static void __mark_reg_not_init(const struct bpf_verifier_env *env,
883 struct bpf_reg_state *reg);
884
f8064ab9
KKD
885static void mark_dynptr_stack_regs(struct bpf_verifier_env *env,
886 struct bpf_reg_state *sreg1,
27060531
KKD
887 struct bpf_reg_state *sreg2,
888 enum bpf_dynptr_type type)
889{
f8064ab9
KKD
890 int id = ++env->id_gen;
891
892 __mark_dynptr_reg(sreg1, type, true, id);
893 __mark_dynptr_reg(sreg2, type, false, id);
27060531
KKD
894}
895
f8064ab9
KKD
896static void mark_dynptr_cb_reg(struct bpf_verifier_env *env,
897 struct bpf_reg_state *reg,
27060531
KKD
898 enum bpf_dynptr_type type)
899{
f8064ab9 900 __mark_dynptr_reg(reg, type, true, ++env->id_gen);
27060531
KKD
901}
902
ef8fc7a0
KKD
903static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
904 struct bpf_func_state *state, int spi);
27060531 905
97e03f52
JK
906static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
907 enum bpf_arg_type arg_type, int insn_idx)
908{
909 struct bpf_func_state *state = func(env, reg);
910 enum bpf_dynptr_type type;
379d4ba8 911 int spi, i, id, err;
97e03f52 912
79168a66
KKD
913 spi = dynptr_get_spi(env, reg);
914 if (spi < 0)
915 return spi;
97e03f52 916
379d4ba8
KKD
917 /* We cannot assume both spi and spi - 1 belong to the same dynptr,
918 * hence we need to call destroy_if_dynptr_stack_slot twice for both,
919 * to ensure that for the following example:
920 * [d1][d1][d2][d2]
921 * spi 3 2 1 0
922 * So marking spi = 2 should lead to destruction of both d1 and d2. In
923 * case they do belong to same dynptr, second call won't see slot_type
924 * as STACK_DYNPTR and will simply skip destruction.
925 */
926 err = destroy_if_dynptr_stack_slot(env, state, spi);
927 if (err)
928 return err;
929 err = destroy_if_dynptr_stack_slot(env, state, spi - 1);
930 if (err)
931 return err;
97e03f52
JK
932
933 for (i = 0; i < BPF_REG_SIZE; i++) {
934 state->stack[spi].slot_type[i] = STACK_DYNPTR;
935 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
936 }
937
938 type = arg_to_dynptr_type(arg_type);
939 if (type == BPF_DYNPTR_TYPE_INVALID)
940 return -EINVAL;
941
f8064ab9 942 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr,
27060531 943 &state->stack[spi - 1].spilled_ptr, type);
97e03f52 944
bc34dee6
JK
945 if (dynptr_type_refcounted(type)) {
946 /* The id is used to track proper releasing */
947 id = acquire_reference_state(env, insn_idx);
948 if (id < 0)
949 return id;
950
27060531
KKD
951 state->stack[spi].spilled_ptr.ref_obj_id = id;
952 state->stack[spi - 1].spilled_ptr.ref_obj_id = id;
bc34dee6
JK
953 }
954
d6fefa11
KKD
955 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
956 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
957
97e03f52
JK
958 return 0;
959}
960
961static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
962{
963 struct bpf_func_state *state = func(env, reg);
964 int spi, i;
965
79168a66
KKD
966 spi = dynptr_get_spi(env, reg);
967 if (spi < 0)
968 return spi;
97e03f52
JK
969
970 for (i = 0; i < BPF_REG_SIZE; i++) {
971 state->stack[spi].slot_type[i] = STACK_INVALID;
972 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
973 }
974
bc34dee6 975 /* Invalidate any slices associated with this dynptr */
27060531
KKD
976 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type))
977 WARN_ON_ONCE(release_reference(env, state->stack[spi].spilled_ptr.ref_obj_id));
97e03f52 978
27060531
KKD
979 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
980 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
d6fefa11
KKD
981
982 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot?
983 *
984 * While we don't allow reading STACK_INVALID, it is still possible to
985 * do <8 byte writes marking some but not all slots as STACK_MISC. Then,
986 * helpers or insns can do partial read of that part without failing,
987 * but check_stack_range_initialized, check_stack_read_var_off, and
988 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of
989 * the slot conservatively. Hence we need to prevent those liveness
990 * marking walks.
991 *
992 * This was not a problem before because STACK_INVALID is only set by
993 * default (where the default reg state has its reg->parent as NULL), or
994 * in clean_live_states after REG_LIVE_DONE (at which point
995 * mark_reg_read won't walk reg->parent chain), but not randomly during
996 * verifier state exploration (like we did above). Hence, for our case
997 * parentage chain will still be live (i.e. reg->parent may be
998 * non-NULL), while earlier reg->parent was NULL, so we need
999 * REG_LIVE_WRITTEN to screen off read marker propagation when it is
1000 * done later on reads or by mark_dynptr_read as well to unnecessary
1001 * mark registers in verifier state.
1002 */
1003 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1004 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
1005
97e03f52
JK
1006 return 0;
1007}
1008
ef8fc7a0
KKD
1009static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1010 struct bpf_reg_state *reg);
1011
dbd8d228
KKD
1012static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
1013{
1014 if (!env->allow_ptr_leaks)
1015 __mark_reg_not_init(env, reg);
1016 else
1017 __mark_reg_unknown(env, reg);
1018}
1019
ef8fc7a0
KKD
1020static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
1021 struct bpf_func_state *state, int spi)
97e03f52 1022{
f8064ab9
KKD
1023 struct bpf_func_state *fstate;
1024 struct bpf_reg_state *dreg;
1025 int i, dynptr_id;
27060531 1026
ef8fc7a0
KKD
1027 /* We always ensure that STACK_DYNPTR is never set partially,
1028 * hence just checking for slot_type[0] is enough. This is
1029 * different for STACK_SPILL, where it may be only set for
1030 * 1 byte, so code has to use is_spilled_reg.
1031 */
1032 if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
1033 return 0;
97e03f52 1034
ef8fc7a0
KKD
1035 /* Reposition spi to first slot */
1036 if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
1037 spi = spi + 1;
1038
1039 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
1040 verbose(env, "cannot overwrite referenced dynptr\n");
1041 return -EINVAL;
1042 }
1043
1044 mark_stack_slot_scratched(env, spi);
1045 mark_stack_slot_scratched(env, spi - 1);
97e03f52 1046
ef8fc7a0 1047 /* Writing partially to one dynptr stack slot destroys both. */
97e03f52 1048 for (i = 0; i < BPF_REG_SIZE; i++) {
ef8fc7a0
KKD
1049 state->stack[spi].slot_type[i] = STACK_INVALID;
1050 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
97e03f52
JK
1051 }
1052
f8064ab9
KKD
1053 dynptr_id = state->stack[spi].spilled_ptr.id;
1054 /* Invalidate any slices associated with this dynptr */
1055 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({
1056 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */
1057 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM)
1058 continue;
dbd8d228
KKD
1059 if (dreg->dynptr_id == dynptr_id)
1060 mark_reg_invalid(env, dreg);
f8064ab9 1061 }));
ef8fc7a0
KKD
1062
1063 /* Do not release reference state, we are destroying dynptr on stack,
1064 * not using some helper to release it. Just reset register.
1065 */
1066 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
1067 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);
1068
1069 /* Same reason as unmark_stack_slots_dynptr above */
1070 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1071 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;
1072
1073 return 0;
1074}
1075
7e0dac28 1076static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
97e03f52 1077{
7e0dac28
JK
1078 int spi;
1079
27060531
KKD
1080 if (reg->type == CONST_PTR_TO_DYNPTR)
1081 return false;
97e03f52 1082
7e0dac28
JK
1083 spi = dynptr_get_spi(env, reg);
1084
1085 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an
1086 * error because this just means the stack state hasn't been updated yet.
1087 * We will do check_mem_access to check and update stack bounds later.
f5b625e5 1088 */
7e0dac28
JK
1089 if (spi < 0 && spi != -ERANGE)
1090 return false;
1091
1092 /* We don't need to check if the stack slots are marked by previous
1093 * dynptr initializations because we allow overwriting existing unreferenced
1094 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls
1095 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are
1096 * touching are completely destructed before we reinitialize them for a new
1097 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early
1098 * instead of delaying it until the end where the user will get "Unreleased
379d4ba8
KKD
1099 * reference" error.
1100 */
97e03f52
JK
1101 return true;
1102}
1103
7e0dac28 1104static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
97e03f52
JK
1105{
1106 struct bpf_func_state *state = func(env, reg);
7e0dac28 1107 int i, spi;
97e03f52 1108
7e0dac28
JK
1109 /* This already represents first slot of initialized bpf_dynptr.
1110 *
1111 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to
1112 * check_func_arg_reg_off's logic, so we don't need to check its
1113 * offset and alignment.
1114 */
27060531
KKD
1115 if (reg->type == CONST_PTR_TO_DYNPTR)
1116 return true;
1117
7e0dac28 1118 spi = dynptr_get_spi(env, reg);
79168a66
KKD
1119 if (spi < 0)
1120 return false;
f5b625e5 1121 if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
97e03f52
JK
1122 return false;
1123
1124 for (i = 0; i < BPF_REG_SIZE; i++) {
1125 if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
1126 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
1127 return false;
1128 }
1129
e9e315b4
RS
1130 return true;
1131}
1132
6b75bd3d
KKD
1133static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1134 enum bpf_arg_type arg_type)
e9e315b4
RS
1135{
1136 struct bpf_func_state *state = func(env, reg);
1137 enum bpf_dynptr_type dynptr_type;
27060531 1138 int spi;
e9e315b4 1139
97e03f52
JK
1140 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */
1141 if (arg_type == ARG_PTR_TO_DYNPTR)
1142 return true;
1143
e9e315b4 1144 dynptr_type = arg_to_dynptr_type(arg_type);
27060531
KKD
1145 if (reg->type == CONST_PTR_TO_DYNPTR) {
1146 return reg->dynptr.type == dynptr_type;
1147 } else {
79168a66
KKD
1148 spi = dynptr_get_spi(env, reg);
1149 if (spi < 0)
1150 return false;
27060531
KKD
1151 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type;
1152 }
97e03f52
JK
1153}
1154
06accc87
AN
1155static void __mark_reg_known_zero(struct bpf_reg_state *reg);
1156
1157static int mark_stack_slots_iter(struct bpf_verifier_env *env,
1158 struct bpf_reg_state *reg, int insn_idx,
1159 struct btf *btf, u32 btf_id, int nr_slots)
1160{
1161 struct bpf_func_state *state = func(env, reg);
1162 int spi, i, j, id;
1163
1164 spi = iter_get_spi(env, reg, nr_slots);
1165 if (spi < 0)
1166 return spi;
1167
1168 id = acquire_reference_state(env, insn_idx);
1169 if (id < 0)
1170 return id;
1171
1172 for (i = 0; i < nr_slots; i++) {
1173 struct bpf_stack_state *slot = &state->stack[spi - i];
1174 struct bpf_reg_state *st = &slot->spilled_ptr;
1175
1176 __mark_reg_known_zero(st);
1177 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */
1178 st->live |= REG_LIVE_WRITTEN;
1179 st->ref_obj_id = i == 0 ? id : 0;
1180 st->iter.btf = btf;
1181 st->iter.btf_id = btf_id;
1182 st->iter.state = BPF_ITER_STATE_ACTIVE;
1183 st->iter.depth = 0;
1184
1185 for (j = 0; j < BPF_REG_SIZE; j++)
1186 slot->slot_type[j] = STACK_ITER;
1187
1188 mark_stack_slot_scratched(env, spi - i);
1189 }
1190
1191 return 0;
1192}
1193
1194static int unmark_stack_slots_iter(struct bpf_verifier_env *env,
1195 struct bpf_reg_state *reg, int nr_slots)
1196{
1197 struct bpf_func_state *state = func(env, reg);
1198 int spi, i, j;
1199
1200 spi = iter_get_spi(env, reg, nr_slots);
1201 if (spi < 0)
1202 return spi;
1203
1204 for (i = 0; i < nr_slots; i++) {
1205 struct bpf_stack_state *slot = &state->stack[spi - i];
1206 struct bpf_reg_state *st = &slot->spilled_ptr;
1207
1208 if (i == 0)
1209 WARN_ON_ONCE(release_reference(env, st->ref_obj_id));
1210
1211 __mark_reg_not_init(env, st);
1212
1213 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */
1214 st->live |= REG_LIVE_WRITTEN;
1215
1216 for (j = 0; j < BPF_REG_SIZE; j++)
1217 slot->slot_type[j] = STACK_INVALID;
1218
1219 mark_stack_slot_scratched(env, spi - i);
1220 }
1221
1222 return 0;
1223}
1224
1225static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env,
1226 struct bpf_reg_state *reg, int nr_slots)
1227{
1228 struct bpf_func_state *state = func(env, reg);
1229 int spi, i, j;
1230
1231 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we
1232 * will do check_mem_access to check and update stack bounds later, so
1233 * return true for that case.
1234 */
1235 spi = iter_get_spi(env, reg, nr_slots);
1236 if (spi == -ERANGE)
1237 return true;
1238 if (spi < 0)
1239 return false;
1240
1241 for (i = 0; i < nr_slots; i++) {
1242 struct bpf_stack_state *slot = &state->stack[spi - i];
1243
1244 for (j = 0; j < BPF_REG_SIZE; j++)
1245 if (slot->slot_type[j] == STACK_ITER)
1246 return false;
1247 }
1248
1249 return true;
1250}
1251
1252static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
1253 struct btf *btf, u32 btf_id, int nr_slots)
1254{
1255 struct bpf_func_state *state = func(env, reg);
1256 int spi, i, j;
1257
1258 spi = iter_get_spi(env, reg, nr_slots);
1259 if (spi < 0)
1260 return false;
1261
1262 for (i = 0; i < nr_slots; i++) {
1263 struct bpf_stack_state *slot = &state->stack[spi - i];
1264 struct bpf_reg_state *st = &slot->spilled_ptr;
1265
1266 /* only main (first) slot has ref_obj_id set */
1267 if (i == 0 && !st->ref_obj_id)
1268 return false;
1269 if (i != 0 && st->ref_obj_id)
1270 return false;
1271 if (st->iter.btf != btf || st->iter.btf_id != btf_id)
1272 return false;
1273
1274 for (j = 0; j < BPF_REG_SIZE; j++)
1275 if (slot->slot_type[j] != STACK_ITER)
1276 return false;
1277 }
1278
1279 return true;
1280}
1281
1282/* Check if given stack slot is "special":
1283 * - spilled register state (STACK_SPILL);
1284 * - dynptr state (STACK_DYNPTR);
1285 * - iter state (STACK_ITER).
1286 */
1287static bool is_stack_slot_special(const struct bpf_stack_state *stack)
1288{
1289 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1];
1290
1291 switch (type) {
1292 case STACK_SPILL:
1293 case STACK_DYNPTR:
1294 case STACK_ITER:
1295 return true;
1296 case STACK_INVALID:
1297 case STACK_MISC:
1298 case STACK_ZERO:
1299 return false;
1300 default:
1301 WARN_ONCE(1, "unknown stack slot type %d\n", type);
1302 return true;
1303 }
1304}
1305
27113c59
MKL
1306/* The reg state of a pointer or a bounded scalar was saved when
1307 * it was spilled to the stack.
1308 */
1309static bool is_spilled_reg(const struct bpf_stack_state *stack)
1310{
1311 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
1312}
1313
354e8f19
MKL
1314static void scrub_spilled_slot(u8 *stype)
1315{
1316 if (*stype != STACK_INVALID)
1317 *stype = STACK_MISC;
1318}
1319
61bd5218 1320static void print_verifier_state(struct bpf_verifier_env *env,
0f55f9ed
CL
1321 const struct bpf_func_state *state,
1322 bool print_all)
17a52670 1323{
f4d7e40a 1324 const struct bpf_reg_state *reg;
17a52670
AS
1325 enum bpf_reg_type t;
1326 int i;
1327
f4d7e40a
AS
1328 if (state->frameno)
1329 verbose(env, " frame%d:", state->frameno);
17a52670 1330 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
1331 reg = &state->regs[i];
1332 t = reg->type;
17a52670
AS
1333 if (t == NOT_INIT)
1334 continue;
0f55f9ed
CL
1335 if (!print_all && !reg_scratched(env, i))
1336 continue;
4e92024a
AS
1337 verbose(env, " R%d", i);
1338 print_liveness(env, reg->live);
7df5072c 1339 verbose(env, "=");
b5dc0163
AS
1340 if (t == SCALAR_VALUE && reg->precise)
1341 verbose(env, "P");
f1174f77
EC
1342 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
1343 tnum_is_const(reg->var_off)) {
1344 /* reg->off should be 0 for SCALAR_VALUE */
7df5072c 1345 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
61bd5218 1346 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 1347 } else {
7df5072c
ML
1348 const char *sep = "";
1349
1350 verbose(env, "%s", reg_type_str(env, t));
5844101a 1351 if (base_type(t) == PTR_TO_BTF_ID)
b32a5dae 1352 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
7df5072c
ML
1353 verbose(env, "(");
1354/*
1355 * _a stands for append, was shortened to avoid multiline statements below.
1356 * This macro is used to output a comma separated list of attributes.
1357 */
1358#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
1359
1360 if (reg->id)
1361 verbose_a("id=%d", reg->id);
a28ace78 1362 if (reg->ref_obj_id)
7df5072c 1363 verbose_a("ref_obj_id=%d", reg->ref_obj_id);
6a3cd331
DM
1364 if (type_is_non_owning_ref(reg->type))
1365 verbose_a("%s", "non_own_ref");
f1174f77 1366 if (t != SCALAR_VALUE)
7df5072c 1367 verbose_a("off=%d", reg->off);
de8f3a83 1368 if (type_is_pkt_pointer(t))
7df5072c 1369 verbose_a("r=%d", reg->range);
c25b2ae1
HL
1370 else if (base_type(t) == CONST_PTR_TO_MAP ||
1371 base_type(t) == PTR_TO_MAP_KEY ||
1372 base_type(t) == PTR_TO_MAP_VALUE)
7df5072c
ML
1373 verbose_a("ks=%d,vs=%d",
1374 reg->map_ptr->key_size,
1375 reg->map_ptr->value_size);
7d1238f2
EC
1376 if (tnum_is_const(reg->var_off)) {
1377 /* Typically an immediate SCALAR_VALUE, but
1378 * could be a pointer whose offset is too big
1379 * for reg->off
1380 */
7df5072c 1381 verbose_a("imm=%llx", reg->var_off.value);
7d1238f2
EC
1382 } else {
1383 if (reg->smin_value != reg->umin_value &&
1384 reg->smin_value != S64_MIN)
7df5072c 1385 verbose_a("smin=%lld", (long long)reg->smin_value);
7d1238f2
EC
1386 if (reg->smax_value != reg->umax_value &&
1387 reg->smax_value != S64_MAX)
7df5072c 1388 verbose_a("smax=%lld", (long long)reg->smax_value);
7d1238f2 1389 if (reg->umin_value != 0)
7df5072c 1390 verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
7d1238f2 1391 if (reg->umax_value != U64_MAX)
7df5072c 1392 verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
7d1238f2
EC
1393 if (!tnum_is_unknown(reg->var_off)) {
1394 char tn_buf[48];
f1174f77 1395
7d1238f2 1396 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7df5072c 1397 verbose_a("var_off=%s", tn_buf);
7d1238f2 1398 }
3f50f132
JF
1399 if (reg->s32_min_value != reg->smin_value &&
1400 reg->s32_min_value != S32_MIN)
7df5072c 1401 verbose_a("s32_min=%d", (int)(reg->s32_min_value));
3f50f132
JF
1402 if (reg->s32_max_value != reg->smax_value &&
1403 reg->s32_max_value != S32_MAX)
7df5072c 1404 verbose_a("s32_max=%d", (int)(reg->s32_max_value));
3f50f132
JF
1405 if (reg->u32_min_value != reg->umin_value &&
1406 reg->u32_min_value != U32_MIN)
7df5072c 1407 verbose_a("u32_min=%d", (int)(reg->u32_min_value));
3f50f132
JF
1408 if (reg->u32_max_value != reg->umax_value &&
1409 reg->u32_max_value != U32_MAX)
7df5072c 1410 verbose_a("u32_max=%d", (int)(reg->u32_max_value));
f1174f77 1411 }
7df5072c
ML
1412#undef verbose_a
1413
61bd5218 1414 verbose(env, ")");
f1174f77 1415 }
17a52670 1416 }
638f5b90 1417 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
1418 char types_buf[BPF_REG_SIZE + 1];
1419 bool valid = false;
1420 int j;
1421
1422 for (j = 0; j < BPF_REG_SIZE; j++) {
1423 if (state->stack[i].slot_type[j] != STACK_INVALID)
1424 valid = true;
d54e0f6c 1425 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
8efea21d
EC
1426 }
1427 types_buf[BPF_REG_SIZE] = 0;
1428 if (!valid)
1429 continue;
0f55f9ed
CL
1430 if (!print_all && !stack_slot_scratched(env, i))
1431 continue;
d54e0f6c
AN
1432 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
1433 case STACK_SPILL:
b5dc0163
AS
1434 reg = &state->stack[i].spilled_ptr;
1435 t = reg->type;
d54e0f6c
AN
1436
1437 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1438 print_liveness(env, reg->live);
7df5072c 1439 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
b5dc0163
AS
1440 if (t == SCALAR_VALUE && reg->precise)
1441 verbose(env, "P");
1442 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
1443 verbose(env, "%lld", reg->var_off.value + reg->off);
d54e0f6c
AN
1444 break;
1445 case STACK_DYNPTR:
1446 i += BPF_DYNPTR_NR_SLOTS - 1;
1447 reg = &state->stack[i].spilled_ptr;
1448
1449 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1450 print_liveness(env, reg->live);
1451 verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type));
1452 if (reg->ref_obj_id)
1453 verbose(env, "(ref_id=%d)", reg->ref_obj_id);
1454 break;
06accc87
AN
1455 case STACK_ITER:
1456 /* only main slot has ref_obj_id set; skip others */
1457 reg = &state->stack[i].spilled_ptr;
1458 if (!reg->ref_obj_id)
1459 continue;
1460
1461 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1462 print_liveness(env, reg->live);
1463 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
1464 iter_type_str(reg->iter.btf, reg->iter.btf_id),
1465 reg->ref_obj_id, iter_state_str(reg->iter.state),
1466 reg->iter.depth);
1467 break;
d54e0f6c
AN
1468 case STACK_MISC:
1469 case STACK_ZERO:
1470 default:
1471 reg = &state->stack[i].spilled_ptr;
1472
1473 for (j = 0; j < BPF_REG_SIZE; j++)
1474 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]];
1475 types_buf[BPF_REG_SIZE] = 0;
1476
1477 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
1478 print_liveness(env, reg->live);
8efea21d 1479 verbose(env, "=%s", types_buf);
d54e0f6c 1480 break;
b5dc0163 1481 }
17a52670 1482 }
fd978bf7
JS
1483 if (state->acquired_refs && state->refs[0].id) {
1484 verbose(env, " refs=%d", state->refs[0].id);
1485 for (i = 1; i < state->acquired_refs; i++)
1486 if (state->refs[i].id)
1487 verbose(env, ",%d", state->refs[i].id);
1488 }
bfc6bb74
AS
1489 if (state->in_callback_fn)
1490 verbose(env, " cb");
1491 if (state->in_async_callback_fn)
1492 verbose(env, " async_cb");
61bd5218 1493 verbose(env, "\n");
0f55f9ed 1494 mark_verifier_state_clean(env);
17a52670
AS
1495}
1496
2e576648
CL
1497static inline u32 vlog_alignment(u32 pos)
1498{
1499 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
1500 BPF_LOG_MIN_ALIGNMENT) - pos - 1;
1501}
1502
1503static void print_insn_state(struct bpf_verifier_env *env,
1504 const struct bpf_func_state *state)
1505{
1506 if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
1507 /* remove new line character */
1508 bpf_vlog_reset(&env->log, env->prev_log_len - 1);
1509 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
1510 } else {
1511 verbose(env, "%d:", env->insn_idx);
1512 }
1513 print_verifier_state(env, state, false);
17a52670
AS
1514}
1515
c69431aa
LB
1516/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
1517 * small to hold src. This is different from krealloc since we don't want to preserve
1518 * the contents of dst.
1519 *
1520 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
1521 * not be allocated.
638f5b90 1522 */
c69431aa 1523static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
638f5b90 1524{
45435d8d
KC
1525 size_t alloc_bytes;
1526 void *orig = dst;
c69431aa
LB
1527 size_t bytes;
1528
1529 if (ZERO_OR_NULL_PTR(src))
1530 goto out;
1531
1532 if (unlikely(check_mul_overflow(n, size, &bytes)))
1533 return NULL;
1534
45435d8d
KC
1535 alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
1536 dst = krealloc(orig, alloc_bytes, flags);
1537 if (!dst) {
1538 kfree(orig);
1539 return NULL;
c69431aa
LB
1540 }
1541
1542 memcpy(dst, src, bytes);
1543out:
1544 return dst ? dst : ZERO_SIZE_PTR;
1545}
1546
1547/* resize an array from old_n items to new_n items. the array is reallocated if it's too
1548 * small to hold new_n items. new items are zeroed out if the array grows.
1549 *
1550 * Contrary to krealloc_array, does not free arr if new_n is zero.
1551 */
1552static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1553{
ceb35b66 1554 size_t alloc_size;
42378a9c
KC
1555 void *new_arr;
1556
c69431aa
LB
1557 if (!new_n || old_n == new_n)
1558 goto out;
1559
ceb35b66
KC
1560 alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
1561 new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
42378a9c
KC
1562 if (!new_arr) {
1563 kfree(arr);
c69431aa 1564 return NULL;
42378a9c
KC
1565 }
1566 arr = new_arr;
c69431aa
LB
1567
1568 if (new_n > old_n)
1569 memset(arr + old_n * size, 0, (new_n - old_n) * size);
1570
1571out:
1572 return arr ? arr : ZERO_SIZE_PTR;
1573}
1574
1575static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1576{
1577 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1578 sizeof(struct bpf_reference_state), GFP_KERNEL);
1579 if (!dst->refs)
1580 return -ENOMEM;
1581
1582 dst->acquired_refs = src->acquired_refs;
1583 return 0;
1584}
1585
1586static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1587{
1588 size_t n = src->allocated_stack / BPF_REG_SIZE;
1589
1590 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1591 GFP_KERNEL);
1592 if (!dst->stack)
1593 return -ENOMEM;
1594
1595 dst->allocated_stack = src->allocated_stack;
1596 return 0;
1597}
1598
1599static int resize_reference_state(struct bpf_func_state *state, size_t n)
1600{
1601 state->refs = realloc_array(state->refs, state->acquired_refs, n,
1602 sizeof(struct bpf_reference_state));
1603 if (!state->refs)
1604 return -ENOMEM;
1605
1606 state->acquired_refs = n;
1607 return 0;
1608}
1609
1610static int grow_stack_state(struct bpf_func_state *state, int size)
1611{
1612 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1613
1614 if (old_n >= n)
1615 return 0;
1616
1617 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1618 if (!state->stack)
1619 return -ENOMEM;
1620
1621 state->allocated_stack = size;
1622 return 0;
fd978bf7
JS
1623}
1624
1625/* Acquire a pointer id from the env and update the state->refs to include
1626 * this new pointer reference.
1627 * On success, returns a valid pointer id to associate with the register
1628 * On failure, returns a negative errno.
638f5b90 1629 */
fd978bf7 1630static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 1631{
fd978bf7
JS
1632 struct bpf_func_state *state = cur_func(env);
1633 int new_ofs = state->acquired_refs;
1634 int id, err;
1635
c69431aa 1636 err = resize_reference_state(state, state->acquired_refs + 1);
fd978bf7
JS
1637 if (err)
1638 return err;
1639 id = ++env->id_gen;
1640 state->refs[new_ofs].id = id;
1641 state->refs[new_ofs].insn_idx = insn_idx;
9d9d00ac 1642 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0;
638f5b90 1643
fd978bf7
JS
1644 return id;
1645}
1646
1647/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 1648static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
1649{
1650 int i, last_idx;
1651
fd978bf7
JS
1652 last_idx = state->acquired_refs - 1;
1653 for (i = 0; i < state->acquired_refs; i++) {
1654 if (state->refs[i].id == ptr_id) {
9d9d00ac
KKD
1655 /* Cannot release caller references in callbacks */
1656 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
1657 return -EINVAL;
fd978bf7
JS
1658 if (last_idx && i != last_idx)
1659 memcpy(&state->refs[i], &state->refs[last_idx],
1660 sizeof(*state->refs));
1661 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1662 state->acquired_refs--;
638f5b90 1663 return 0;
638f5b90 1664 }
638f5b90 1665 }
46f8bc92 1666 return -EINVAL;
fd978bf7
JS
1667}
1668
f4d7e40a
AS
1669static void free_func_state(struct bpf_func_state *state)
1670{
5896351e
AS
1671 if (!state)
1672 return;
fd978bf7 1673 kfree(state->refs);
f4d7e40a
AS
1674 kfree(state->stack);
1675 kfree(state);
1676}
1677
b5dc0163
AS
1678static void clear_jmp_history(struct bpf_verifier_state *state)
1679{
1680 kfree(state->jmp_history);
1681 state->jmp_history = NULL;
1682 state->jmp_history_cnt = 0;
1683}
1684
1969db47
AS
1685static void free_verifier_state(struct bpf_verifier_state *state,
1686 bool free_self)
638f5b90 1687{
f4d7e40a
AS
1688 int i;
1689
1690 for (i = 0; i <= state->curframe; i++) {
1691 free_func_state(state->frame[i]);
1692 state->frame[i] = NULL;
1693 }
b5dc0163 1694 clear_jmp_history(state);
1969db47
AS
1695 if (free_self)
1696 kfree(state);
638f5b90
AS
1697}
1698
1699/* copy verifier state from src to dst growing dst stack space
1700 * when necessary to accommodate larger src stack
1701 */
f4d7e40a
AS
1702static int copy_func_state(struct bpf_func_state *dst,
1703 const struct bpf_func_state *src)
638f5b90
AS
1704{
1705 int err;
1706
fd978bf7
JS
1707 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1708 err = copy_reference_state(dst, src);
638f5b90
AS
1709 if (err)
1710 return err;
638f5b90
AS
1711 return copy_stack_state(dst, src);
1712}
1713
f4d7e40a
AS
1714static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1715 const struct bpf_verifier_state *src)
1716{
1717 struct bpf_func_state *dst;
1718 int i, err;
1719
06ab6a50
LB
1720 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1721 src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1722 GFP_USER);
1723 if (!dst_state->jmp_history)
1724 return -ENOMEM;
b5dc0163
AS
1725 dst_state->jmp_history_cnt = src->jmp_history_cnt;
1726
f4d7e40a
AS
1727 /* if dst has more stack frames then src frame, free them */
1728 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1729 free_func_state(dst_state->frame[i]);
1730 dst_state->frame[i] = NULL;
1731 }
979d63d5 1732 dst_state->speculative = src->speculative;
9bb00b28 1733 dst_state->active_rcu_lock = src->active_rcu_lock;
f4d7e40a 1734 dst_state->curframe = src->curframe;
d0d78c1d
KKD
1735 dst_state->active_lock.ptr = src->active_lock.ptr;
1736 dst_state->active_lock.id = src->active_lock.id;
2589726d
AS
1737 dst_state->branches = src->branches;
1738 dst_state->parent = src->parent;
b5dc0163
AS
1739 dst_state->first_insn_idx = src->first_insn_idx;
1740 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
1741 for (i = 0; i <= src->curframe; i++) {
1742 dst = dst_state->frame[i];
1743 if (!dst) {
1744 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1745 if (!dst)
1746 return -ENOMEM;
1747 dst_state->frame[i] = dst;
1748 }
1749 err = copy_func_state(dst, src->frame[i]);
1750 if (err)
1751 return err;
1752 }
1753 return 0;
1754}
1755
2589726d
AS
1756static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1757{
1758 while (st) {
1759 u32 br = --st->branches;
1760
1761 /* WARN_ON(br > 1) technically makes sense here,
1762 * but see comment in push_stack(), hence:
1763 */
1764 WARN_ONCE((int)br < 0,
1765 "BUG update_branch_counts:branches_to_explore=%d\n",
1766 br);
1767 if (br)
1768 break;
1769 st = st->parent;
1770 }
1771}
1772
638f5b90 1773static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
6f8a57cc 1774 int *insn_idx, bool pop_log)
638f5b90
AS
1775{
1776 struct bpf_verifier_state *cur = env->cur_state;
1777 struct bpf_verifier_stack_elem *elem, *head = env->head;
1778 int err;
17a52670
AS
1779
1780 if (env->head == NULL)
638f5b90 1781 return -ENOENT;
17a52670 1782
638f5b90
AS
1783 if (cur) {
1784 err = copy_verifier_state(cur, &head->st);
1785 if (err)
1786 return err;
1787 }
6f8a57cc
AN
1788 if (pop_log)
1789 bpf_vlog_reset(&env->log, head->log_pos);
638f5b90
AS
1790 if (insn_idx)
1791 *insn_idx = head->insn_idx;
17a52670 1792 if (prev_insn_idx)
638f5b90
AS
1793 *prev_insn_idx = head->prev_insn_idx;
1794 elem = head->next;
1969db47 1795 free_verifier_state(&head->st, false);
638f5b90 1796 kfree(head);
17a52670
AS
1797 env->head = elem;
1798 env->stack_size--;
638f5b90 1799 return 0;
17a52670
AS
1800}
1801
58e2af8b 1802static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
1803 int insn_idx, int prev_insn_idx,
1804 bool speculative)
17a52670 1805{
638f5b90 1806 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 1807 struct bpf_verifier_stack_elem *elem;
638f5b90 1808 int err;
17a52670 1809
638f5b90 1810 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
1811 if (!elem)
1812 goto err;
1813
17a52670
AS
1814 elem->insn_idx = insn_idx;
1815 elem->prev_insn_idx = prev_insn_idx;
1816 elem->next = env->head;
6f8a57cc 1817 elem->log_pos = env->log.len_used;
17a52670
AS
1818 env->head = elem;
1819 env->stack_size++;
1969db47
AS
1820 err = copy_verifier_state(&elem->st, cur);
1821 if (err)
1822 goto err;
979d63d5 1823 elem->st.speculative |= speculative;
b285fcb7
AS
1824 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1825 verbose(env, "The sequence of %d jumps is too complex.\n",
1826 env->stack_size);
17a52670
AS
1827 goto err;
1828 }
2589726d
AS
1829 if (elem->st.parent) {
1830 ++elem->st.parent->branches;
1831 /* WARN_ON(branches > 2) technically makes sense here,
1832 * but
1833 * 1. speculative states will bump 'branches' for non-branch
1834 * instructions
1835 * 2. is_state_visited() heuristics may decide not to create
1836 * a new state for a sequence of branches and all such current
1837 * and cloned states will be pointing to a single parent state
1838 * which might have large 'branches' count.
1839 */
1840 }
17a52670
AS
1841 return &elem->st;
1842err:
5896351e
AS
1843 free_verifier_state(env->cur_state, true);
1844 env->cur_state = NULL;
17a52670 1845 /* pop all elements and return */
6f8a57cc 1846 while (!pop_stack(env, NULL, NULL, false));
17a52670
AS
1847 return NULL;
1848}
1849
1850#define CALLER_SAVED_REGS 6
1851static const int caller_saved[CALLER_SAVED_REGS] = {
1852 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1853};
1854
e688c3db
AS
1855/* This helper doesn't clear reg->id */
1856static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
b03c9f9f 1857{
b03c9f9f
EC
1858 reg->var_off = tnum_const(imm);
1859 reg->smin_value = (s64)imm;
1860 reg->smax_value = (s64)imm;
1861 reg->umin_value = imm;
1862 reg->umax_value = imm;
3f50f132
JF
1863
1864 reg->s32_min_value = (s32)imm;
1865 reg->s32_max_value = (s32)imm;
1866 reg->u32_min_value = (u32)imm;
1867 reg->u32_max_value = (u32)imm;
1868}
1869
e688c3db
AS
1870/* Mark the unknown part of a register (variable offset or scalar value) as
1871 * known to have the value @imm.
1872 */
1873static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1874{
a73bf9f2 1875 /* Clear off and union(map_ptr, range) */
e688c3db
AS
1876 memset(((u8 *)reg) + sizeof(reg->type), 0,
1877 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
a73bf9f2
AN
1878 reg->id = 0;
1879 reg->ref_obj_id = 0;
e688c3db
AS
1880 ___mark_reg_known(reg, imm);
1881}
1882
3f50f132
JF
1883static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1884{
1885 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1886 reg->s32_min_value = (s32)imm;
1887 reg->s32_max_value = (s32)imm;
1888 reg->u32_min_value = (u32)imm;
1889 reg->u32_max_value = (u32)imm;
b03c9f9f
EC
1890}
1891
f1174f77
EC
1892/* Mark the 'variable offset' part of a register as zero. This should be
1893 * used only on registers holding a pointer type.
1894 */
1895static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 1896{
b03c9f9f 1897 __mark_reg_known(reg, 0);
f1174f77 1898}
a9789ef9 1899
cc2b14d5
AS
1900static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1901{
1902 __mark_reg_known(reg, 0);
cc2b14d5
AS
1903 reg->type = SCALAR_VALUE;
1904}
1905
61bd5218
JK
1906static void mark_reg_known_zero(struct bpf_verifier_env *env,
1907 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1908{
1909 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1910 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
1911 /* Something bad happened, let's kill all regs */
1912 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 1913 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1914 return;
1915 }
1916 __mark_reg_known_zero(regs + regno);
1917}
1918
27060531 1919static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type,
f8064ab9 1920 bool first_slot, int dynptr_id)
27060531
KKD
1921{
1922 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for
1923 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply
1924 * set it unconditionally as it is ignored for STACK_DYNPTR anyway.
1925 */
1926 __mark_reg_known_zero(reg);
1927 reg->type = CONST_PTR_TO_DYNPTR;
f8064ab9
KKD
1928 /* Give each dynptr a unique id to uniquely associate slices to it. */
1929 reg->id = dynptr_id;
27060531
KKD
1930 reg->dynptr.type = type;
1931 reg->dynptr.first_slot = first_slot;
1932}
1933
4ddb7416
DB
1934static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1935{
c25b2ae1 1936 if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
4ddb7416
DB
1937 const struct bpf_map *map = reg->map_ptr;
1938
1939 if (map->inner_map_meta) {
1940 reg->type = CONST_PTR_TO_MAP;
1941 reg->map_ptr = map->inner_map_meta;
3e8ce298
AS
1942 /* transfer reg's id which is unique for every map_lookup_elem
1943 * as UID of the inner map.
1944 */
db559117 1945 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER))
34d11a44 1946 reg->map_uid = reg->id;
4ddb7416
DB
1947 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1948 reg->type = PTR_TO_XDP_SOCK;
1949 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1950 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1951 reg->type = PTR_TO_SOCKET;
1952 } else {
1953 reg->type = PTR_TO_MAP_VALUE;
1954 }
c25b2ae1 1955 return;
4ddb7416 1956 }
c25b2ae1
HL
1957
1958 reg->type &= ~PTR_MAYBE_NULL;
4ddb7416
DB
1959}
1960
5d92ddc3
DM
1961static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
1962 struct btf_field_graph_root *ds_head)
1963{
1964 __mark_reg_known_zero(&regs[regno]);
1965 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
1966 regs[regno].btf = ds_head->btf;
1967 regs[regno].btf_id = ds_head->value_btf_id;
1968 regs[regno].off = ds_head->node_offset;
1969}
1970
de8f3a83
DB
1971static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1972{
1973 return type_is_pkt_pointer(reg->type);
1974}
1975
1976static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1977{
1978 return reg_is_pkt_pointer(reg) ||
1979 reg->type == PTR_TO_PACKET_END;
1980}
1981
66e3a13e
JK
1982static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg)
1983{
1984 return base_type(reg->type) == PTR_TO_MEM &&
1985 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP);
1986}
1987
de8f3a83
DB
1988/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1989static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1990 enum bpf_reg_type which)
1991{
1992 /* The register can already have a range from prior markings.
1993 * This is fine as long as it hasn't been advanced from its
1994 * origin.
1995 */
1996 return reg->type == which &&
1997 reg->id == 0 &&
1998 reg->off == 0 &&
1999 tnum_equals_const(reg->var_off, 0);
2000}
2001
3f50f132
JF
2002/* Reset the min/max bounds of a register */
2003static void __mark_reg_unbounded(struct bpf_reg_state *reg)
2004{
2005 reg->smin_value = S64_MIN;
2006 reg->smax_value = S64_MAX;
2007 reg->umin_value = 0;
2008 reg->umax_value = U64_MAX;
2009
2010 reg->s32_min_value = S32_MIN;
2011 reg->s32_max_value = S32_MAX;
2012 reg->u32_min_value = 0;
2013 reg->u32_max_value = U32_MAX;
2014}
2015
2016static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
2017{
2018 reg->smin_value = S64_MIN;
2019 reg->smax_value = S64_MAX;
2020 reg->umin_value = 0;
2021 reg->umax_value = U64_MAX;
2022}
2023
2024static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
2025{
2026 reg->s32_min_value = S32_MIN;
2027 reg->s32_max_value = S32_MAX;
2028 reg->u32_min_value = 0;
2029 reg->u32_max_value = U32_MAX;
2030}
2031
2032static void __update_reg32_bounds(struct bpf_reg_state *reg)
2033{
2034 struct tnum var32_off = tnum_subreg(reg->var_off);
2035
2036 /* min signed is max(sign bit) | min(other bits) */
2037 reg->s32_min_value = max_t(s32, reg->s32_min_value,
2038 var32_off.value | (var32_off.mask & S32_MIN));
2039 /* max signed is min(sign bit) | max(other bits) */
2040 reg->s32_max_value = min_t(s32, reg->s32_max_value,
2041 var32_off.value | (var32_off.mask & S32_MAX));
2042 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
2043 reg->u32_max_value = min(reg->u32_max_value,
2044 (u32)(var32_off.value | var32_off.mask));
2045}
2046
2047static void __update_reg64_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
2048{
2049 /* min signed is max(sign bit) | min(other bits) */
2050 reg->smin_value = max_t(s64, reg->smin_value,
2051 reg->var_off.value | (reg->var_off.mask & S64_MIN));
2052 /* max signed is min(sign bit) | max(other bits) */
2053 reg->smax_value = min_t(s64, reg->smax_value,
2054 reg->var_off.value | (reg->var_off.mask & S64_MAX));
2055 reg->umin_value = max(reg->umin_value, reg->var_off.value);
2056 reg->umax_value = min(reg->umax_value,
2057 reg->var_off.value | reg->var_off.mask);
2058}
2059
3f50f132
JF
2060static void __update_reg_bounds(struct bpf_reg_state *reg)
2061{
2062 __update_reg32_bounds(reg);
2063 __update_reg64_bounds(reg);
2064}
2065
b03c9f9f 2066/* Uses signed min/max values to inform unsigned, and vice-versa */
3f50f132
JF
2067static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
2068{
2069 /* Learn sign from signed bounds.
2070 * If we cannot cross the sign boundary, then signed and unsigned bounds
2071 * are the same, so combine. This works even in the negative case, e.g.
2072 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2073 */
2074 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
2075 reg->s32_min_value = reg->u32_min_value =
2076 max_t(u32, reg->s32_min_value, reg->u32_min_value);
2077 reg->s32_max_value = reg->u32_max_value =
2078 min_t(u32, reg->s32_max_value, reg->u32_max_value);
2079 return;
2080 }
2081 /* Learn sign from unsigned bounds. Signed bounds cross the sign
2082 * boundary, so we must be careful.
2083 */
2084 if ((s32)reg->u32_max_value >= 0) {
2085 /* Positive. We can't learn anything from the smin, but smax
2086 * is positive, hence safe.
2087 */
2088 reg->s32_min_value = reg->u32_min_value;
2089 reg->s32_max_value = reg->u32_max_value =
2090 min_t(u32, reg->s32_max_value, reg->u32_max_value);
2091 } else if ((s32)reg->u32_min_value < 0) {
2092 /* Negative. We can't learn anything from the smax, but smin
2093 * is negative, hence safe.
2094 */
2095 reg->s32_min_value = reg->u32_min_value =
2096 max_t(u32, reg->s32_min_value, reg->u32_min_value);
2097 reg->s32_max_value = reg->u32_max_value;
2098 }
2099}
2100
2101static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
2102{
2103 /* Learn sign from signed bounds.
2104 * If we cannot cross the sign boundary, then signed and unsigned bounds
2105 * are the same, so combine. This works even in the negative case, e.g.
2106 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
2107 */
2108 if (reg->smin_value >= 0 || reg->smax_value < 0) {
2109 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2110 reg->umin_value);
2111 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2112 reg->umax_value);
2113 return;
2114 }
2115 /* Learn sign from unsigned bounds. Signed bounds cross the sign
2116 * boundary, so we must be careful.
2117 */
2118 if ((s64)reg->umax_value >= 0) {
2119 /* Positive. We can't learn anything from the smin, but smax
2120 * is positive, hence safe.
2121 */
2122 reg->smin_value = reg->umin_value;
2123 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
2124 reg->umax_value);
2125 } else if ((s64)reg->umin_value < 0) {
2126 /* Negative. We can't learn anything from the smax, but smin
2127 * is negative, hence safe.
2128 */
2129 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
2130 reg->umin_value);
2131 reg->smax_value = reg->umax_value;
2132 }
2133}
2134
3f50f132
JF
2135static void __reg_deduce_bounds(struct bpf_reg_state *reg)
2136{
2137 __reg32_deduce_bounds(reg);
2138 __reg64_deduce_bounds(reg);
2139}
2140
b03c9f9f
EC
2141/* Attempts to improve var_off based on unsigned min/max information */
2142static void __reg_bound_offset(struct bpf_reg_state *reg)
2143{
3f50f132
JF
2144 struct tnum var64_off = tnum_intersect(reg->var_off,
2145 tnum_range(reg->umin_value,
2146 reg->umax_value));
2147 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
2148 tnum_range(reg->u32_min_value,
2149 reg->u32_max_value));
2150
2151 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
b03c9f9f
EC
2152}
2153
3844d153
DB
2154static void reg_bounds_sync(struct bpf_reg_state *reg)
2155{
2156 /* We might have learned new bounds from the var_off. */
2157 __update_reg_bounds(reg);
2158 /* We might have learned something about the sign bit. */
2159 __reg_deduce_bounds(reg);
2160 /* We might have learned some bits from the bounds. */
2161 __reg_bound_offset(reg);
2162 /* Intersecting with the old var_off might have improved our bounds
2163 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
2164 * then new var_off is (0; 0x7f...fc) which improves our umax.
2165 */
2166 __update_reg_bounds(reg);
2167}
2168
e572ff80
DB
2169static bool __reg32_bound_s64(s32 a)
2170{
2171 return a >= 0 && a <= S32_MAX;
2172}
2173
3f50f132 2174static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
b03c9f9f 2175{
3f50f132
JF
2176 reg->umin_value = reg->u32_min_value;
2177 reg->umax_value = reg->u32_max_value;
e572ff80
DB
2178
2179 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
2180 * be positive otherwise set to worse case bounds and refine later
2181 * from tnum.
3f50f132 2182 */
e572ff80
DB
2183 if (__reg32_bound_s64(reg->s32_min_value) &&
2184 __reg32_bound_s64(reg->s32_max_value)) {
3a71dc36 2185 reg->smin_value = reg->s32_min_value;
e572ff80
DB
2186 reg->smax_value = reg->s32_max_value;
2187 } else {
3a71dc36 2188 reg->smin_value = 0;
e572ff80
DB
2189 reg->smax_value = U32_MAX;
2190 }
3f50f132
JF
2191}
2192
2193static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
2194{
2195 /* special case when 64-bit register has upper 32-bit register
2196 * zeroed. Typically happens after zext or <<32, >>32 sequence
2197 * allowing us to use 32-bit bounds directly,
2198 */
2199 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
2200 __reg_assign_32_into_64(reg);
2201 } else {
2202 /* Otherwise the best we can do is push lower 32bit known and
2203 * unknown bits into register (var_off set from jmp logic)
2204 * then learn as much as possible from the 64-bit tnum
2205 * known and unknown bits. The previous smin/smax bounds are
2206 * invalid here because of jmp32 compare so mark them unknown
2207 * so they do not impact tnum bounds calculation.
2208 */
2209 __mark_reg64_unbounded(reg);
3f50f132 2210 }
3844d153 2211 reg_bounds_sync(reg);
3f50f132
JF
2212}
2213
2214static bool __reg64_bound_s32(s64 a)
2215{
388e2c0b 2216 return a >= S32_MIN && a <= S32_MAX;
3f50f132
JF
2217}
2218
2219static bool __reg64_bound_u32(u64 a)
2220{
b9979db8 2221 return a >= U32_MIN && a <= U32_MAX;
3f50f132
JF
2222}
2223
2224static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
2225{
2226 __mark_reg32_unbounded(reg);
b0270958 2227 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
3f50f132 2228 reg->s32_min_value = (s32)reg->smin_value;
3f50f132 2229 reg->s32_max_value = (s32)reg->smax_value;
b0270958 2230 }
10bf4e83 2231 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
3f50f132 2232 reg->u32_min_value = (u32)reg->umin_value;
3f50f132 2233 reg->u32_max_value = (u32)reg->umax_value;
10bf4e83 2234 }
3844d153 2235 reg_bounds_sync(reg);
b03c9f9f
EC
2236}
2237
f1174f77 2238/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
2239static void __mark_reg_unknown(const struct bpf_verifier_env *env,
2240 struct bpf_reg_state *reg)
f1174f77 2241{
a9c676bc 2242 /*
a73bf9f2 2243 * Clear type, off, and union(map_ptr, range) and
a9c676bc
AS
2244 * padding between 'type' and union
2245 */
2246 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 2247 reg->type = SCALAR_VALUE;
a73bf9f2
AN
2248 reg->id = 0;
2249 reg->ref_obj_id = 0;
f1174f77 2250 reg->var_off = tnum_unknown;
f4d7e40a 2251 reg->frameno = 0;
be2ef816 2252 reg->precise = !env->bpf_capable;
b03c9f9f 2253 __mark_reg_unbounded(reg);
f1174f77
EC
2254}
2255
61bd5218
JK
2256static void mark_reg_unknown(struct bpf_verifier_env *env,
2257 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
2258{
2259 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 2260 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
2261 /* Something bad happened, let's kill all regs except FP */
2262 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 2263 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
2264 return;
2265 }
f54c7898 2266 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
2267}
2268
f54c7898
DB
2269static void __mark_reg_not_init(const struct bpf_verifier_env *env,
2270 struct bpf_reg_state *reg)
f1174f77 2271{
f54c7898 2272 __mark_reg_unknown(env, reg);
f1174f77
EC
2273 reg->type = NOT_INIT;
2274}
2275
61bd5218
JK
2276static void mark_reg_not_init(struct bpf_verifier_env *env,
2277 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
2278{
2279 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 2280 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
2281 /* Something bad happened, let's kill all regs except FP */
2282 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 2283 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
2284 return;
2285 }
f54c7898 2286 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
2287}
2288
41c48f3a
AI
2289static void mark_btf_ld_reg(struct bpf_verifier_env *env,
2290 struct bpf_reg_state *regs, u32 regno,
22dc4a0f 2291 enum bpf_reg_type reg_type,
c6f1bfe8
YS
2292 struct btf *btf, u32 btf_id,
2293 enum bpf_type_flag flag)
41c48f3a
AI
2294{
2295 if (reg_type == SCALAR_VALUE) {
2296 mark_reg_unknown(env, regs, regno);
2297 return;
2298 }
2299 mark_reg_known_zero(env, regs, regno);
c6f1bfe8 2300 regs[regno].type = PTR_TO_BTF_ID | flag;
22dc4a0f 2301 regs[regno].btf = btf;
41c48f3a
AI
2302 regs[regno].btf_id = btf_id;
2303}
2304
5327ed3d 2305#define DEF_NOT_SUBREG (0)
61bd5218 2306static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 2307 struct bpf_func_state *state)
17a52670 2308{
f4d7e40a 2309 struct bpf_reg_state *regs = state->regs;
17a52670
AS
2310 int i;
2311
dc503a8a 2312 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 2313 mark_reg_not_init(env, regs, i);
dc503a8a 2314 regs[i].live = REG_LIVE_NONE;
679c782d 2315 regs[i].parent = NULL;
5327ed3d 2316 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 2317 }
17a52670
AS
2318
2319 /* frame pointer */
f1174f77 2320 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 2321 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 2322 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
2323}
2324
f4d7e40a
AS
2325#define BPF_MAIN_FUNC (-1)
2326static void init_func_state(struct bpf_verifier_env *env,
2327 struct bpf_func_state *state,
2328 int callsite, int frameno, int subprogno)
2329{
2330 state->callsite = callsite;
2331 state->frameno = frameno;
2332 state->subprogno = subprogno;
1bfe26fb 2333 state->callback_ret_range = tnum_range(0, 0);
f4d7e40a 2334 init_reg_state(env, state);
0f55f9ed 2335 mark_verifier_state_scratched(env);
f4d7e40a
AS
2336}
2337
bfc6bb74
AS
2338/* Similar to push_stack(), but for async callbacks */
2339static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
2340 int insn_idx, int prev_insn_idx,
2341 int subprog)
2342{
2343 struct bpf_verifier_stack_elem *elem;
2344 struct bpf_func_state *frame;
2345
2346 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
2347 if (!elem)
2348 goto err;
2349
2350 elem->insn_idx = insn_idx;
2351 elem->prev_insn_idx = prev_insn_idx;
2352 elem->next = env->head;
2353 elem->log_pos = env->log.len_used;
2354 env->head = elem;
2355 env->stack_size++;
2356 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
2357 verbose(env,
2358 "The sequence of %d jumps is too complex for async cb.\n",
2359 env->stack_size);
2360 goto err;
2361 }
2362 /* Unlike push_stack() do not copy_verifier_state().
2363 * The caller state doesn't matter.
2364 * This is async callback. It starts in a fresh stack.
2365 * Initialize it similar to do_check_common().
2366 */
2367 elem->st.branches = 1;
2368 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
2369 if (!frame)
2370 goto err;
2371 init_func_state(env, frame,
2372 BPF_MAIN_FUNC /* callsite */,
2373 0 /* frameno within this callchain */,
2374 subprog /* subprog number within this prog */);
2375 elem->st.frame[0] = frame;
2376 return &elem->st;
2377err:
2378 free_verifier_state(env->cur_state, true);
2379 env->cur_state = NULL;
2380 /* pop all elements and return */
2381 while (!pop_stack(env, NULL, NULL, false));
2382 return NULL;
2383}
2384
2385
17a52670
AS
2386enum reg_arg_type {
2387 SRC_OP, /* register is used as source operand */
2388 DST_OP, /* register is used as destination operand */
2389 DST_OP_NO_MARK /* same as above, check only, don't mark */
2390};
2391
cc8b0b92
AS
2392static int cmp_subprogs(const void *a, const void *b)
2393{
9c8105bd
JW
2394 return ((struct bpf_subprog_info *)a)->start -
2395 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
2396}
2397
2398static int find_subprog(struct bpf_verifier_env *env, int off)
2399{
9c8105bd 2400 struct bpf_subprog_info *p;
cc8b0b92 2401
9c8105bd
JW
2402 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
2403 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
2404 if (!p)
2405 return -ENOENT;
9c8105bd 2406 return p - env->subprog_info;
cc8b0b92
AS
2407
2408}
2409
2410static int add_subprog(struct bpf_verifier_env *env, int off)
2411{
2412 int insn_cnt = env->prog->len;
2413 int ret;
2414
2415 if (off >= insn_cnt || off < 0) {
2416 verbose(env, "call to invalid destination\n");
2417 return -EINVAL;
2418 }
2419 ret = find_subprog(env, off);
2420 if (ret >= 0)
282a0f46 2421 return ret;
4cb3d99c 2422 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
2423 verbose(env, "too many subprograms\n");
2424 return -E2BIG;
2425 }
e6ac2450 2426 /* determine subprog starts. The end is one before the next starts */
9c8105bd
JW
2427 env->subprog_info[env->subprog_cnt++].start = off;
2428 sort(env->subprog_info, env->subprog_cnt,
2429 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
282a0f46 2430 return env->subprog_cnt - 1;
cc8b0b92
AS
2431}
2432
2357672c
KKD
2433#define MAX_KFUNC_DESCS 256
2434#define MAX_KFUNC_BTFS 256
2435
e6ac2450
MKL
2436struct bpf_kfunc_desc {
2437 struct btf_func_model func_model;
2438 u32 func_id;
2439 s32 imm;
2357672c
KKD
2440 u16 offset;
2441};
2442
2443struct bpf_kfunc_btf {
2444 struct btf *btf;
2445 struct module *module;
2446 u16 offset;
e6ac2450
MKL
2447};
2448
e6ac2450
MKL
2449struct bpf_kfunc_desc_tab {
2450 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
2451 u32 nr_descs;
2452};
2453
2357672c
KKD
2454struct bpf_kfunc_btf_tab {
2455 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
2456 u32 nr_descs;
2457};
2458
2459static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
e6ac2450
MKL
2460{
2461 const struct bpf_kfunc_desc *d0 = a;
2462 const struct bpf_kfunc_desc *d1 = b;
2463
2464 /* func_id is not greater than BTF_MAX_TYPE */
2357672c
KKD
2465 return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
2466}
2467
2468static int kfunc_btf_cmp_by_off(const void *a, const void *b)
2469{
2470 const struct bpf_kfunc_btf *d0 = a;
2471 const struct bpf_kfunc_btf *d1 = b;
2472
2473 return d0->offset - d1->offset;
e6ac2450
MKL
2474}
2475
2476static const struct bpf_kfunc_desc *
2357672c 2477find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
e6ac2450
MKL
2478{
2479 struct bpf_kfunc_desc desc = {
2480 .func_id = func_id,
2357672c 2481 .offset = offset,
e6ac2450
MKL
2482 };
2483 struct bpf_kfunc_desc_tab *tab;
2484
2485 tab = prog->aux->kfunc_tab;
2486 return bsearch(&desc, tab->descs, tab->nr_descs,
2357672c
KKD
2487 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
2488}
2489
2490static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
b202d844 2491 s16 offset)
2357672c
KKD
2492{
2493 struct bpf_kfunc_btf kf_btf = { .offset = offset };
2494 struct bpf_kfunc_btf_tab *tab;
2495 struct bpf_kfunc_btf *b;
2496 struct module *mod;
2497 struct btf *btf;
2498 int btf_fd;
2499
2500 tab = env->prog->aux->kfunc_btf_tab;
2501 b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
2502 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
2503 if (!b) {
2504 if (tab->nr_descs == MAX_KFUNC_BTFS) {
2505 verbose(env, "too many different module BTFs\n");
2506 return ERR_PTR(-E2BIG);
2507 }
2508
2509 if (bpfptr_is_null(env->fd_array)) {
2510 verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
2511 return ERR_PTR(-EPROTO);
2512 }
2513
2514 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
2515 offset * sizeof(btf_fd),
2516 sizeof(btf_fd)))
2517 return ERR_PTR(-EFAULT);
2518
2519 btf = btf_get_by_fd(btf_fd);
588cd7ef
KKD
2520 if (IS_ERR(btf)) {
2521 verbose(env, "invalid module BTF fd specified\n");
2357672c 2522 return btf;
588cd7ef 2523 }
2357672c
KKD
2524
2525 if (!btf_is_module(btf)) {
2526 verbose(env, "BTF fd for kfunc is not a module BTF\n");
2527 btf_put(btf);
2528 return ERR_PTR(-EINVAL);
2529 }
2530
2531 mod = btf_try_get_module(btf);
2532 if (!mod) {
2533 btf_put(btf);
2534 return ERR_PTR(-ENXIO);
2535 }
2536
2537 b = &tab->descs[tab->nr_descs++];
2538 b->btf = btf;
2539 b->module = mod;
2540 b->offset = offset;
2541
2542 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2543 kfunc_btf_cmp_by_off, NULL);
2544 }
2357672c 2545 return b->btf;
e6ac2450
MKL
2546}
2547
2357672c
KKD
2548void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
2549{
2550 if (!tab)
2551 return;
2552
2553 while (tab->nr_descs--) {
2554 module_put(tab->descs[tab->nr_descs].module);
2555 btf_put(tab->descs[tab->nr_descs].btf);
2556 }
2557 kfree(tab);
2558}
2559
43bf0878 2560static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2357672c 2561{
2357672c
KKD
2562 if (offset) {
2563 if (offset < 0) {
2564 /* In the future, this can be allowed to increase limit
2565 * of fd index into fd_array, interpreted as u16.
2566 */
2567 verbose(env, "negative offset disallowed for kernel module function call\n");
2568 return ERR_PTR(-EINVAL);
2569 }
2570
b202d844 2571 return __find_kfunc_desc_btf(env, offset);
2357672c
KKD
2572 }
2573 return btf_vmlinux ?: ERR_PTR(-ENOENT);
e6ac2450
MKL
2574}
2575
2357672c 2576static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
e6ac2450
MKL
2577{
2578 const struct btf_type *func, *func_proto;
2357672c 2579 struct bpf_kfunc_btf_tab *btf_tab;
e6ac2450
MKL
2580 struct bpf_kfunc_desc_tab *tab;
2581 struct bpf_prog_aux *prog_aux;
2582 struct bpf_kfunc_desc *desc;
2583 const char *func_name;
2357672c 2584 struct btf *desc_btf;
8cbf062a 2585 unsigned long call_imm;
e6ac2450
MKL
2586 unsigned long addr;
2587 int err;
2588
2589 prog_aux = env->prog->aux;
2590 tab = prog_aux->kfunc_tab;
2357672c 2591 btf_tab = prog_aux->kfunc_btf_tab;
e6ac2450
MKL
2592 if (!tab) {
2593 if (!btf_vmlinux) {
2594 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2595 return -ENOTSUPP;
2596 }
2597
2598 if (!env->prog->jit_requested) {
2599 verbose(env, "JIT is required for calling kernel function\n");
2600 return -ENOTSUPP;
2601 }
2602
2603 if (!bpf_jit_supports_kfunc_call()) {
2604 verbose(env, "JIT does not support calling kernel function\n");
2605 return -ENOTSUPP;
2606 }
2607
2608 if (!env->prog->gpl_compatible) {
2609 verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2610 return -EINVAL;
2611 }
2612
2613 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2614 if (!tab)
2615 return -ENOMEM;
2616 prog_aux->kfunc_tab = tab;
2617 }
2618
a5d82727
KKD
2619 /* func_id == 0 is always invalid, but instead of returning an error, be
2620 * conservative and wait until the code elimination pass before returning
2621 * error, so that invalid calls that get pruned out can be in BPF programs
2622 * loaded from userspace. It is also required that offset be untouched
2623 * for such calls.
2624 */
2625 if (!func_id && !offset)
2626 return 0;
2627
2357672c
KKD
2628 if (!btf_tab && offset) {
2629 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2630 if (!btf_tab)
2631 return -ENOMEM;
2632 prog_aux->kfunc_btf_tab = btf_tab;
2633 }
2634
43bf0878 2635 desc_btf = find_kfunc_desc_btf(env, offset);
2357672c
KKD
2636 if (IS_ERR(desc_btf)) {
2637 verbose(env, "failed to find BTF for kernel function\n");
2638 return PTR_ERR(desc_btf);
2639 }
2640
2641 if (find_kfunc_desc(env->prog, func_id, offset))
e6ac2450
MKL
2642 return 0;
2643
2644 if (tab->nr_descs == MAX_KFUNC_DESCS) {
2645 verbose(env, "too many different kernel function calls\n");
2646 return -E2BIG;
2647 }
2648
2357672c 2649 func = btf_type_by_id(desc_btf, func_id);
e6ac2450
MKL
2650 if (!func || !btf_type_is_func(func)) {
2651 verbose(env, "kernel btf_id %u is not a function\n",
2652 func_id);
2653 return -EINVAL;
2654 }
2357672c 2655 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450
MKL
2656 if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2657 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2658 func_id);
2659 return -EINVAL;
2660 }
2661
2357672c 2662 func_name = btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
2663 addr = kallsyms_lookup_name(func_name);
2664 if (!addr) {
2665 verbose(env, "cannot find address for kernel function %s\n",
2666 func_name);
2667 return -EINVAL;
2668 }
2669
8cbf062a
HT
2670 call_imm = BPF_CALL_IMM(addr);
2671 /* Check whether or not the relative offset overflows desc->imm */
2672 if ((unsigned long)(s32)call_imm != call_imm) {
2673 verbose(env, "address of kernel function %s is out of range\n",
2674 func_name);
2675 return -EINVAL;
2676 }
2677
3d76a4d3
SF
2678 if (bpf_dev_bound_kfunc_id(func_id)) {
2679 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux);
2680 if (err)
2681 return err;
2682 }
2683
e6ac2450
MKL
2684 desc = &tab->descs[tab->nr_descs++];
2685 desc->func_id = func_id;
8cbf062a 2686 desc->imm = call_imm;
2357672c
KKD
2687 desc->offset = offset;
2688 err = btf_distill_func_proto(&env->log, desc_btf,
e6ac2450
MKL
2689 func_proto, func_name,
2690 &desc->func_model);
2691 if (!err)
2692 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2357672c 2693 kfunc_desc_cmp_by_id_off, NULL);
e6ac2450
MKL
2694 return err;
2695}
2696
2697static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2698{
2699 const struct bpf_kfunc_desc *d0 = a;
2700 const struct bpf_kfunc_desc *d1 = b;
2701
2702 if (d0->imm > d1->imm)
2703 return 1;
2704 else if (d0->imm < d1->imm)
2705 return -1;
2706 return 0;
2707}
2708
2709static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2710{
2711 struct bpf_kfunc_desc_tab *tab;
2712
2713 tab = prog->aux->kfunc_tab;
2714 if (!tab)
2715 return;
2716
2717 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2718 kfunc_desc_cmp_by_imm, NULL);
2719}
2720
2721bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2722{
2723 return !!prog->aux->kfunc_tab;
2724}
2725
2726const struct btf_func_model *
2727bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2728 const struct bpf_insn *insn)
2729{
2730 const struct bpf_kfunc_desc desc = {
2731 .imm = insn->imm,
2732 };
2733 const struct bpf_kfunc_desc *res;
2734 struct bpf_kfunc_desc_tab *tab;
2735
2736 tab = prog->aux->kfunc_tab;
2737 res = bsearch(&desc, tab->descs, tab->nr_descs,
2738 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2739
2740 return res ? &res->func_model : NULL;
2741}
2742
2743static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
cc8b0b92 2744{
9c8105bd 2745 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92 2746 struct bpf_insn *insn = env->prog->insnsi;
e6ac2450 2747 int i, ret, insn_cnt = env->prog->len;
cc8b0b92 2748
f910cefa
JW
2749 /* Add entry function. */
2750 ret = add_subprog(env, 0);
e6ac2450 2751 if (ret)
f910cefa
JW
2752 return ret;
2753
e6ac2450
MKL
2754 for (i = 0; i < insn_cnt; i++, insn++) {
2755 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2756 !bpf_pseudo_kfunc_call(insn))
cc8b0b92 2757 continue;
e6ac2450 2758
2c78ee89 2759 if (!env->bpf_capable) {
e6ac2450 2760 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
cc8b0b92
AS
2761 return -EPERM;
2762 }
e6ac2450 2763
3990ed4c 2764 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
e6ac2450 2765 ret = add_subprog(env, i + insn->imm + 1);
3990ed4c 2766 else
2357672c 2767 ret = add_kfunc_call(env, insn->imm, insn->off);
e6ac2450 2768
cc8b0b92
AS
2769 if (ret < 0)
2770 return ret;
2771 }
2772
4cb3d99c
JW
2773 /* Add a fake 'exit' subprog which could simplify subprog iteration
2774 * logic. 'subprog_cnt' should not be increased.
2775 */
2776 subprog[env->subprog_cnt].start = insn_cnt;
2777
06ee7115 2778 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 2779 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 2780 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92 2781
e6ac2450
MKL
2782 return 0;
2783}
2784
2785static int check_subprogs(struct bpf_verifier_env *env)
2786{
2787 int i, subprog_start, subprog_end, off, cur_subprog = 0;
2788 struct bpf_subprog_info *subprog = env->subprog_info;
2789 struct bpf_insn *insn = env->prog->insnsi;
2790 int insn_cnt = env->prog->len;
2791
cc8b0b92 2792 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
2793 subprog_start = subprog[cur_subprog].start;
2794 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2795 for (i = 0; i < insn_cnt; i++) {
2796 u8 code = insn[i].code;
2797
7f6e4312 2798 if (code == (BPF_JMP | BPF_CALL) &&
df2ccc18
IL
2799 insn[i].src_reg == 0 &&
2800 insn[i].imm == BPF_FUNC_tail_call)
7f6e4312 2801 subprog[cur_subprog].has_tail_call = true;
09b28d76
AS
2802 if (BPF_CLASS(code) == BPF_LD &&
2803 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2804 subprog[cur_subprog].has_ld_abs = true;
092ed096 2805 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
2806 goto next;
2807 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2808 goto next;
2809 off = i + insn[i].off + 1;
2810 if (off < subprog_start || off >= subprog_end) {
2811 verbose(env, "jump out of range from insn %d to %d\n", i, off);
2812 return -EINVAL;
2813 }
2814next:
2815 if (i == subprog_end - 1) {
2816 /* to avoid fall-through from one subprog into another
2817 * the last insn of the subprog should be either exit
2818 * or unconditional jump back
2819 */
2820 if (code != (BPF_JMP | BPF_EXIT) &&
2821 code != (BPF_JMP | BPF_JA)) {
2822 verbose(env, "last insn is not an exit or jmp\n");
2823 return -EINVAL;
2824 }
2825 subprog_start = subprog_end;
4cb3d99c
JW
2826 cur_subprog++;
2827 if (cur_subprog < env->subprog_cnt)
9c8105bd 2828 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2829 }
2830 }
2831 return 0;
2832}
2833
679c782d
EC
2834/* Parentage chain of this register (or stack slot) should take care of all
2835 * issues like callee-saved registers, stack slot allocation time, etc.
2836 */
f4d7e40a 2837static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 2838 const struct bpf_reg_state *state,
5327ed3d 2839 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
2840{
2841 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 2842 int cnt = 0;
dc503a8a
EC
2843
2844 while (parent) {
2845 /* if read wasn't screened by an earlier write ... */
679c782d 2846 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 2847 break;
9242b5f5
AS
2848 if (parent->live & REG_LIVE_DONE) {
2849 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
c25b2ae1 2850 reg_type_str(env, parent->type),
9242b5f5
AS
2851 parent->var_off.value, parent->off);
2852 return -EFAULT;
2853 }
5327ed3d
JW
2854 /* The first condition is more likely to be true than the
2855 * second, checked it first.
2856 */
2857 if ((parent->live & REG_LIVE_READ) == flag ||
2858 parent->live & REG_LIVE_READ64)
25af32da
AS
2859 /* The parentage chain never changes and
2860 * this parent was already marked as LIVE_READ.
2861 * There is no need to keep walking the chain again and
2862 * keep re-marking all parents as LIVE_READ.
2863 * This case happens when the same register is read
2864 * multiple times without writes into it in-between.
5327ed3d
JW
2865 * Also, if parent has the stronger REG_LIVE_READ64 set,
2866 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
2867 */
2868 break;
dc503a8a 2869 /* ... then we depend on parent's value */
5327ed3d
JW
2870 parent->live |= flag;
2871 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2872 if (flag == REG_LIVE_READ64)
2873 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
2874 state = parent;
2875 parent = state->parent;
f4d7e40a 2876 writes = true;
06ee7115 2877 cnt++;
dc503a8a 2878 }
06ee7115
AS
2879
2880 if (env->longest_mark_read_walk < cnt)
2881 env->longest_mark_read_walk = cnt;
f4d7e40a 2882 return 0;
dc503a8a
EC
2883}
2884
d6fefa11
KKD
2885static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
2886{
2887 struct bpf_func_state *state = func(env, reg);
2888 int spi, ret;
2889
2890 /* For CONST_PTR_TO_DYNPTR, it must have already been done by
2891 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in
2892 * check_kfunc_call.
2893 */
2894 if (reg->type == CONST_PTR_TO_DYNPTR)
2895 return 0;
79168a66
KKD
2896 spi = dynptr_get_spi(env, reg);
2897 if (spi < 0)
2898 return spi;
d6fefa11
KKD
2899 /* Caller ensures dynptr is valid and initialized, which means spi is in
2900 * bounds and spi is the first dynptr slot. Simply mark stack slot as
2901 * read.
2902 */
2903 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr,
2904 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64);
2905 if (ret)
2906 return ret;
2907 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr,
2908 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64);
2909}
2910
06accc87
AN
2911static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
2912 int spi, int nr_slots)
2913{
2914 struct bpf_func_state *state = func(env, reg);
2915 int err, i;
2916
2917 for (i = 0; i < nr_slots; i++) {
2918 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr;
2919
2920 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64);
2921 if (err)
2922 return err;
2923
2924 mark_stack_slot_scratched(env, spi - i);
2925 }
2926
2927 return 0;
2928}
2929
5327ed3d
JW
2930/* This function is supposed to be used by the following 32-bit optimization
2931 * code only. It returns TRUE if the source or destination register operates
2932 * on 64-bit, otherwise return FALSE.
2933 */
2934static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2935 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2936{
2937 u8 code, class, op;
2938
2939 code = insn->code;
2940 class = BPF_CLASS(code);
2941 op = BPF_OP(code);
2942 if (class == BPF_JMP) {
2943 /* BPF_EXIT for "main" will reach here. Return TRUE
2944 * conservatively.
2945 */
2946 if (op == BPF_EXIT)
2947 return true;
2948 if (op == BPF_CALL) {
2949 /* BPF to BPF call will reach here because of marking
2950 * caller saved clobber with DST_OP_NO_MARK for which we
2951 * don't care the register def because they are anyway
2952 * marked as NOT_INIT already.
2953 */
2954 if (insn->src_reg == BPF_PSEUDO_CALL)
2955 return false;
2956 /* Helper call will reach here because of arg type
2957 * check, conservatively return TRUE.
2958 */
2959 if (t == SRC_OP)
2960 return true;
2961
2962 return false;
2963 }
2964 }
2965
2966 if (class == BPF_ALU64 || class == BPF_JMP ||
2967 /* BPF_END always use BPF_ALU class. */
2968 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2969 return true;
2970
2971 if (class == BPF_ALU || class == BPF_JMP32)
2972 return false;
2973
2974 if (class == BPF_LDX) {
2975 if (t != SRC_OP)
2976 return BPF_SIZE(code) == BPF_DW;
2977 /* LDX source must be ptr. */
2978 return true;
2979 }
2980
2981 if (class == BPF_STX) {
83a28819
IL
2982 /* BPF_STX (including atomic variants) has multiple source
2983 * operands, one of which is a ptr. Check whether the caller is
2984 * asking about it.
2985 */
2986 if (t == SRC_OP && reg->type != SCALAR_VALUE)
5327ed3d
JW
2987 return true;
2988 return BPF_SIZE(code) == BPF_DW;
2989 }
2990
2991 if (class == BPF_LD) {
2992 u8 mode = BPF_MODE(code);
2993
2994 /* LD_IMM64 */
2995 if (mode == BPF_IMM)
2996 return true;
2997
2998 /* Both LD_IND and LD_ABS return 32-bit data. */
2999 if (t != SRC_OP)
3000 return false;
3001
3002 /* Implicit ctx ptr. */
3003 if (regno == BPF_REG_6)
3004 return true;
3005
3006 /* Explicit source could be any width. */
3007 return true;
3008 }
3009
3010 if (class == BPF_ST)
3011 /* The only source register for BPF_ST is a ptr. */
3012 return true;
3013
3014 /* Conservatively return true at default. */
3015 return true;
3016}
3017
83a28819
IL
3018/* Return the regno defined by the insn, or -1. */
3019static int insn_def_regno(const struct bpf_insn *insn)
b325fbca 3020{
83a28819
IL
3021 switch (BPF_CLASS(insn->code)) {
3022 case BPF_JMP:
3023 case BPF_JMP32:
3024 case BPF_ST:
3025 return -1;
3026 case BPF_STX:
3027 if (BPF_MODE(insn->code) == BPF_ATOMIC &&
3028 (insn->imm & BPF_FETCH)) {
3029 if (insn->imm == BPF_CMPXCHG)
3030 return BPF_REG_0;
3031 else
3032 return insn->src_reg;
3033 } else {
3034 return -1;
3035 }
3036 default:
3037 return insn->dst_reg;
3038 }
b325fbca
JW
3039}
3040
3041/* Return TRUE if INSN has defined any 32-bit value explicitly. */
3042static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
3043{
83a28819
IL
3044 int dst_reg = insn_def_regno(insn);
3045
3046 if (dst_reg == -1)
b325fbca
JW
3047 return false;
3048
83a28819 3049 return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
b325fbca
JW
3050}
3051
5327ed3d
JW
3052static void mark_insn_zext(struct bpf_verifier_env *env,
3053 struct bpf_reg_state *reg)
3054{
3055 s32 def_idx = reg->subreg_def;
3056
3057 if (def_idx == DEF_NOT_SUBREG)
3058 return;
3059
3060 env->insn_aux_data[def_idx - 1].zext_dst = true;
3061 /* The dst will be zero extended, so won't be sub-register anymore. */
3062 reg->subreg_def = DEF_NOT_SUBREG;
3063}
3064
dc503a8a 3065static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
3066 enum reg_arg_type t)
3067{
f4d7e40a
AS
3068 struct bpf_verifier_state *vstate = env->cur_state;
3069 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 3070 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 3071 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 3072 bool rw64;
dc503a8a 3073
17a52670 3074 if (regno >= MAX_BPF_REG) {
61bd5218 3075 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
3076 return -EINVAL;
3077 }
3078
0f55f9ed
CL
3079 mark_reg_scratched(env, regno);
3080
c342dc10 3081 reg = &regs[regno];
5327ed3d 3082 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
3083 if (t == SRC_OP) {
3084 /* check whether register used as source operand can be read */
c342dc10 3085 if (reg->type == NOT_INIT) {
61bd5218 3086 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
3087 return -EACCES;
3088 }
679c782d 3089 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
3090 if (regno == BPF_REG_FP)
3091 return 0;
3092
5327ed3d
JW
3093 if (rw64)
3094 mark_insn_zext(env, reg);
3095
3096 return mark_reg_read(env, reg, reg->parent,
3097 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
3098 } else {
3099 /* check whether register used as dest operand can be written to */
3100 if (regno == BPF_REG_FP) {
61bd5218 3101 verbose(env, "frame pointer is read only\n");
17a52670
AS
3102 return -EACCES;
3103 }
c342dc10 3104 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 3105 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 3106 if (t == DST_OP)
61bd5218 3107 mark_reg_unknown(env, regs, regno);
17a52670
AS
3108 }
3109 return 0;
3110}
3111
bffdeaa8
AN
3112static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
3113{
3114 env->insn_aux_data[idx].jmp_point = true;
3115}
3116
3117static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
3118{
3119 return env->insn_aux_data[insn_idx].jmp_point;
3120}
3121
b5dc0163
AS
3122/* for any branch, call, exit record the history of jmps in the given state */
3123static int push_jmp_history(struct bpf_verifier_env *env,
3124 struct bpf_verifier_state *cur)
3125{
3126 u32 cnt = cur->jmp_history_cnt;
3127 struct bpf_idx_pair *p;
ceb35b66 3128 size_t alloc_size;
b5dc0163 3129
bffdeaa8
AN
3130 if (!is_jmp_point(env, env->insn_idx))
3131 return 0;
3132
b5dc0163 3133 cnt++;
ceb35b66
KC
3134 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
3135 p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
b5dc0163
AS
3136 if (!p)
3137 return -ENOMEM;
3138 p[cnt - 1].idx = env->insn_idx;
3139 p[cnt - 1].prev_idx = env->prev_insn_idx;
3140 cur->jmp_history = p;
3141 cur->jmp_history_cnt = cnt;
3142 return 0;
3143}
3144
3145/* Backtrack one insn at a time. If idx is not at the top of recorded
3146 * history then previous instruction came from straight line execution.
3147 */
3148static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
3149 u32 *history)
3150{
3151 u32 cnt = *history;
3152
3153 if (cnt && st->jmp_history[cnt - 1].idx == i) {
3154 i = st->jmp_history[cnt - 1].prev_idx;
3155 (*history)--;
3156 } else {
3157 i--;
3158 }
3159 return i;
3160}
3161
e6ac2450
MKL
3162static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
3163{
3164 const struct btf_type *func;
2357672c 3165 struct btf *desc_btf;
e6ac2450
MKL
3166
3167 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
3168 return NULL;
3169
43bf0878 3170 desc_btf = find_kfunc_desc_btf(data, insn->off);
2357672c
KKD
3171 if (IS_ERR(desc_btf))
3172 return "<error>";
3173
3174 func = btf_type_by_id(desc_btf, insn->imm);
3175 return btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
3176}
3177
b5dc0163
AS
3178/* For given verifier state backtrack_insn() is called from the last insn to
3179 * the first insn. Its purpose is to compute a bitmask of registers and
3180 * stack slots that needs precision in the parent verifier state.
3181 */
3182static int backtrack_insn(struct bpf_verifier_env *env, int idx,
3183 u32 *reg_mask, u64 *stack_mask)
3184{
3185 const struct bpf_insn_cbs cbs = {
e6ac2450 3186 .cb_call = disasm_kfunc_name,
b5dc0163
AS
3187 .cb_print = verbose,
3188 .private_data = env,
3189 };
3190 struct bpf_insn *insn = env->prog->insnsi + idx;
3191 u8 class = BPF_CLASS(insn->code);
3192 u8 opcode = BPF_OP(insn->code);
3193 u8 mode = BPF_MODE(insn->code);
3194 u32 dreg = 1u << insn->dst_reg;
3195 u32 sreg = 1u << insn->src_reg;
3196 u32 spi;
3197
3198 if (insn->code == 0)
3199 return 0;
496f3324 3200 if (env->log.level & BPF_LOG_LEVEL2) {
b5dc0163
AS
3201 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
3202 verbose(env, "%d: ", idx);
3203 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
3204 }
3205
3206 if (class == BPF_ALU || class == BPF_ALU64) {
3207 if (!(*reg_mask & dreg))
3208 return 0;
3209 if (opcode == BPF_MOV) {
3210 if (BPF_SRC(insn->code) == BPF_X) {
3211 /* dreg = sreg
3212 * dreg needs precision after this insn
3213 * sreg needs precision before this insn
3214 */
3215 *reg_mask &= ~dreg;
3216 *reg_mask |= sreg;
3217 } else {
3218 /* dreg = K
3219 * dreg needs precision after this insn.
3220 * Corresponding register is already marked
3221 * as precise=true in this verifier state.
3222 * No further markings in parent are necessary
3223 */
3224 *reg_mask &= ~dreg;
3225 }
3226 } else {
3227 if (BPF_SRC(insn->code) == BPF_X) {
3228 /* dreg += sreg
3229 * both dreg and sreg need precision
3230 * before this insn
3231 */
3232 *reg_mask |= sreg;
3233 } /* else dreg += K
3234 * dreg still needs precision before this insn
3235 */
3236 }
3237 } else if (class == BPF_LDX) {
3238 if (!(*reg_mask & dreg))
3239 return 0;
3240 *reg_mask &= ~dreg;
3241
3242 /* scalars can only be spilled into stack w/o losing precision.
3243 * Load from any other memory can be zero extended.
3244 * The desire to keep that precision is already indicated
3245 * by 'precise' mark in corresponding register of this state.
3246 * No further tracking necessary.
3247 */
3248 if (insn->src_reg != BPF_REG_FP)
3249 return 0;
b5dc0163
AS
3250
3251 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
3252 * that [fp - off] slot contains scalar that needs to be
3253 * tracked with precision
3254 */
3255 spi = (-insn->off - 1) / BPF_REG_SIZE;
3256 if (spi >= 64) {
3257 verbose(env, "BUG spi %d\n", spi);
3258 WARN_ONCE(1, "verifier backtracking bug");
3259 return -EFAULT;
3260 }
3261 *stack_mask |= 1ull << spi;
b3b50f05 3262 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 3263 if (*reg_mask & dreg)
b3b50f05 3264 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
3265 * to access memory. It means backtracking
3266 * encountered a case of pointer subtraction.
3267 */
3268 return -ENOTSUPP;
3269 /* scalars can only be spilled into stack */
3270 if (insn->dst_reg != BPF_REG_FP)
3271 return 0;
b5dc0163
AS
3272 spi = (-insn->off - 1) / BPF_REG_SIZE;
3273 if (spi >= 64) {
3274 verbose(env, "BUG spi %d\n", spi);
3275 WARN_ONCE(1, "verifier backtracking bug");
3276 return -EFAULT;
3277 }
3278 if (!(*stack_mask & (1ull << spi)))
3279 return 0;
3280 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
3281 if (class == BPF_STX)
3282 *reg_mask |= sreg;
b5dc0163
AS
3283 } else if (class == BPF_JMP || class == BPF_JMP32) {
3284 if (opcode == BPF_CALL) {
3285 if (insn->src_reg == BPF_PSEUDO_CALL)
3286 return -ENOTSUPP;
be2ef816
AN
3287 /* BPF helpers that invoke callback subprogs are
3288 * equivalent to BPF_PSEUDO_CALL above
3289 */
3290 if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
3291 return -ENOTSUPP;
d3178e8a
HS
3292 /* kfunc with imm==0 is invalid and fixup_kfunc_call will
3293 * catch this error later. Make backtracking conservative
3294 * with ENOTSUPP.
3295 */
3296 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
3297 return -ENOTSUPP;
b5dc0163
AS
3298 /* regular helper call sets R0 */
3299 *reg_mask &= ~1;
3300 if (*reg_mask & 0x3f) {
3301 /* if backtracing was looking for registers R1-R5
3302 * they should have been found already.
3303 */
3304 verbose(env, "BUG regs %x\n", *reg_mask);
3305 WARN_ONCE(1, "verifier backtracking bug");
3306 return -EFAULT;
3307 }
3308 } else if (opcode == BPF_EXIT) {
3309 return -ENOTSUPP;
3310 }
3311 } else if (class == BPF_LD) {
3312 if (!(*reg_mask & dreg))
3313 return 0;
3314 *reg_mask &= ~dreg;
3315 /* It's ld_imm64 or ld_abs or ld_ind.
3316 * For ld_imm64 no further tracking of precision
3317 * into parent is necessary
3318 */
3319 if (mode == BPF_IND || mode == BPF_ABS)
3320 /* to be analyzed */
3321 return -ENOTSUPP;
b5dc0163
AS
3322 }
3323 return 0;
3324}
3325
3326/* the scalar precision tracking algorithm:
3327 * . at the start all registers have precise=false.
3328 * . scalar ranges are tracked as normal through alu and jmp insns.
3329 * . once precise value of the scalar register is used in:
3330 * . ptr + scalar alu
3331 * . if (scalar cond K|scalar)
3332 * . helper_call(.., scalar, ...) where ARG_CONST is expected
3333 * backtrack through the verifier states and mark all registers and
3334 * stack slots with spilled constants that these scalar regisers
3335 * should be precise.
3336 * . during state pruning two registers (or spilled stack slots)
3337 * are equivalent if both are not precise.
3338 *
3339 * Note the verifier cannot simply walk register parentage chain,
3340 * since many different registers and stack slots could have been
3341 * used to compute single precise scalar.
3342 *
3343 * The approach of starting with precise=true for all registers and then
3344 * backtrack to mark a register as not precise when the verifier detects
3345 * that program doesn't care about specific value (e.g., when helper
3346 * takes register as ARG_ANYTHING parameter) is not safe.
3347 *
3348 * It's ok to walk single parentage chain of the verifier states.
3349 * It's possible that this backtracking will go all the way till 1st insn.
3350 * All other branches will be explored for needing precision later.
3351 *
3352 * The backtracking needs to deal with cases like:
3353 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
3354 * r9 -= r8
3355 * r5 = r9
3356 * if r5 > 0x79f goto pc+7
3357 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
3358 * r5 += 1
3359 * ...
3360 * call bpf_perf_event_output#25
3361 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
3362 *
3363 * and this case:
3364 * r6 = 1
3365 * call foo // uses callee's r6 inside to compute r0
3366 * r0 += r6
3367 * if r0 == 0 goto
3368 *
3369 * to track above reg_mask/stack_mask needs to be independent for each frame.
3370 *
3371 * Also if parent's curframe > frame where backtracking started,
3372 * the verifier need to mark registers in both frames, otherwise callees
3373 * may incorrectly prune callers. This is similar to
3374 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
3375 *
3376 * For now backtracking falls back into conservative marking.
3377 */
3378static void mark_all_scalars_precise(struct bpf_verifier_env *env,
3379 struct bpf_verifier_state *st)
3380{
3381 struct bpf_func_state *func;
3382 struct bpf_reg_state *reg;
3383 int i, j;
3384
3385 /* big hammer: mark all scalars precise in this path.
3386 * pop_stack may still get !precise scalars.
f63181b6
AN
3387 * We also skip current state and go straight to first parent state,
3388 * because precision markings in current non-checkpointed state are
3389 * not needed. See why in the comment in __mark_chain_precision below.
b5dc0163 3390 */
f63181b6 3391 for (st = st->parent; st; st = st->parent) {
b5dc0163
AS
3392 for (i = 0; i <= st->curframe; i++) {
3393 func = st->frame[i];
3394 for (j = 0; j < BPF_REG_FP; j++) {
3395 reg = &func->regs[j];
3396 if (reg->type != SCALAR_VALUE)
3397 continue;
3398 reg->precise = true;
3399 }
3400 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
27113c59 3401 if (!is_spilled_reg(&func->stack[j]))
b5dc0163
AS
3402 continue;
3403 reg = &func->stack[j].spilled_ptr;
3404 if (reg->type != SCALAR_VALUE)
3405 continue;
3406 reg->precise = true;
3407 }
3408 }
f63181b6 3409 }
b5dc0163
AS
3410}
3411
7a830b53
AN
3412static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
3413{
3414 struct bpf_func_state *func;
3415 struct bpf_reg_state *reg;
3416 int i, j;
3417
3418 for (i = 0; i <= st->curframe; i++) {
3419 func = st->frame[i];
3420 for (j = 0; j < BPF_REG_FP; j++) {
3421 reg = &func->regs[j];
3422 if (reg->type != SCALAR_VALUE)
3423 continue;
3424 reg->precise = false;
3425 }
3426 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
3427 if (!is_spilled_reg(&func->stack[j]))
3428 continue;
3429 reg = &func->stack[j].spilled_ptr;
3430 if (reg->type != SCALAR_VALUE)
3431 continue;
3432 reg->precise = false;
3433 }
3434 }
3435}
3436
f63181b6
AN
3437/*
3438 * __mark_chain_precision() backtracks BPF program instruction sequence and
3439 * chain of verifier states making sure that register *regno* (if regno >= 0)
3440 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
3441 * SCALARS, as well as any other registers and slots that contribute to
3442 * a tracked state of given registers/stack slots, depending on specific BPF
3443 * assembly instructions (see backtrack_insns() for exact instruction handling
3444 * logic). This backtracking relies on recorded jmp_history and is able to
3445 * traverse entire chain of parent states. This process ends only when all the
3446 * necessary registers/slots and their transitive dependencies are marked as
3447 * precise.
3448 *
3449 * One important and subtle aspect is that precise marks *do not matter* in
3450 * the currently verified state (current state). It is important to understand
3451 * why this is the case.
3452 *
3453 * First, note that current state is the state that is not yet "checkpointed",
3454 * i.e., it is not yet put into env->explored_states, and it has no children
3455 * states as well. It's ephemeral, and can end up either a) being discarded if
3456 * compatible explored state is found at some point or BPF_EXIT instruction is
3457 * reached or b) checkpointed and put into env->explored_states, branching out
3458 * into one or more children states.
3459 *
3460 * In the former case, precise markings in current state are completely
3461 * ignored by state comparison code (see regsafe() for details). Only
3462 * checkpointed ("old") state precise markings are important, and if old
3463 * state's register/slot is precise, regsafe() assumes current state's
3464 * register/slot as precise and checks value ranges exactly and precisely. If
3465 * states turn out to be compatible, current state's necessary precise
3466 * markings and any required parent states' precise markings are enforced
3467 * after the fact with propagate_precision() logic, after the fact. But it's
3468 * important to realize that in this case, even after marking current state
3469 * registers/slots as precise, we immediately discard current state. So what
3470 * actually matters is any of the precise markings propagated into current
3471 * state's parent states, which are always checkpointed (due to b) case above).
3472 * As such, for scenario a) it doesn't matter if current state has precise
3473 * markings set or not.
3474 *
3475 * Now, for the scenario b), checkpointing and forking into child(ren)
3476 * state(s). Note that before current state gets to checkpointing step, any
3477 * processed instruction always assumes precise SCALAR register/slot
3478 * knowledge: if precise value or range is useful to prune jump branch, BPF
3479 * verifier takes this opportunity enthusiastically. Similarly, when
3480 * register's value is used to calculate offset or memory address, exact
3481 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
3482 * what we mentioned above about state comparison ignoring precise markings
3483 * during state comparison, BPF verifier ignores and also assumes precise
3484 * markings *at will* during instruction verification process. But as verifier
3485 * assumes precision, it also propagates any precision dependencies across
3486 * parent states, which are not yet finalized, so can be further restricted
3487 * based on new knowledge gained from restrictions enforced by their children
3488 * states. This is so that once those parent states are finalized, i.e., when
3489 * they have no more active children state, state comparison logic in
3490 * is_state_visited() would enforce strict and precise SCALAR ranges, if
3491 * required for correctness.
3492 *
3493 * To build a bit more intuition, note also that once a state is checkpointed,
3494 * the path we took to get to that state is not important. This is crucial
3495 * property for state pruning. When state is checkpointed and finalized at
3496 * some instruction index, it can be correctly and safely used to "short
3497 * circuit" any *compatible* state that reaches exactly the same instruction
3498 * index. I.e., if we jumped to that instruction from a completely different
3499 * code path than original finalized state was derived from, it doesn't
3500 * matter, current state can be discarded because from that instruction
3501 * forward having a compatible state will ensure we will safely reach the
3502 * exit. States describe preconditions for further exploration, but completely
3503 * forget the history of how we got here.
3504 *
3505 * This also means that even if we needed precise SCALAR range to get to
3506 * finalized state, but from that point forward *that same* SCALAR register is
3507 * never used in a precise context (i.e., it's precise value is not needed for
3508 * correctness), it's correct and safe to mark such register as "imprecise"
3509 * (i.e., precise marking set to false). This is what we rely on when we do
3510 * not set precise marking in current state. If no child state requires
3511 * precision for any given SCALAR register, it's safe to dictate that it can
3512 * be imprecise. If any child state does require this register to be precise,
3513 * we'll mark it precise later retroactively during precise markings
3514 * propagation from child state to parent states.
7a830b53
AN
3515 *
3516 * Skipping precise marking setting in current state is a mild version of
3517 * relying on the above observation. But we can utilize this property even
3518 * more aggressively by proactively forgetting any precise marking in the
3519 * current state (which we inherited from the parent state), right before we
3520 * checkpoint it and branch off into new child state. This is done by
3521 * mark_all_scalars_imprecise() to hopefully get more permissive and generic
3522 * finalized states which help in short circuiting more future states.
f63181b6 3523 */
529409ea 3524static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
a3ce685d 3525 int spi)
b5dc0163
AS
3526{
3527 struct bpf_verifier_state *st = env->cur_state;
3528 int first_idx = st->first_insn_idx;
3529 int last_idx = env->insn_idx;
3530 struct bpf_func_state *func;
3531 struct bpf_reg_state *reg;
a3ce685d
AS
3532 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
3533 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 3534 bool skip_first = true;
a3ce685d 3535 bool new_marks = false;
b5dc0163
AS
3536 int i, err;
3537
2c78ee89 3538 if (!env->bpf_capable)
b5dc0163
AS
3539 return 0;
3540
f63181b6
AN
3541 /* Do sanity checks against current state of register and/or stack
3542 * slot, but don't set precise flag in current state, as precision
3543 * tracking in the current state is unnecessary.
3544 */
529409ea 3545 func = st->frame[frame];
a3ce685d
AS
3546 if (regno >= 0) {
3547 reg = &func->regs[regno];
3548 if (reg->type != SCALAR_VALUE) {
3549 WARN_ONCE(1, "backtracing misuse");
3550 return -EFAULT;
3551 }
f63181b6 3552 new_marks = true;
b5dc0163 3553 }
b5dc0163 3554
a3ce685d 3555 while (spi >= 0) {
27113c59 3556 if (!is_spilled_reg(&func->stack[spi])) {
a3ce685d
AS
3557 stack_mask = 0;
3558 break;
3559 }
3560 reg = &func->stack[spi].spilled_ptr;
3561 if (reg->type != SCALAR_VALUE) {
3562 stack_mask = 0;
3563 break;
3564 }
f63181b6 3565 new_marks = true;
a3ce685d
AS
3566 break;
3567 }
3568
3569 if (!new_marks)
3570 return 0;
3571 if (!reg_mask && !stack_mask)
3572 return 0;
be2ef816 3573
b5dc0163
AS
3574 for (;;) {
3575 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
3576 u32 history = st->jmp_history_cnt;
3577
496f3324 3578 if (env->log.level & BPF_LOG_LEVEL2)
b5dc0163 3579 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
be2ef816
AN
3580
3581 if (last_idx < 0) {
3582 /* we are at the entry into subprog, which
3583 * is expected for global funcs, but only if
3584 * requested precise registers are R1-R5
3585 * (which are global func's input arguments)
3586 */
3587 if (st->curframe == 0 &&
3588 st->frame[0]->subprogno > 0 &&
3589 st->frame[0]->callsite == BPF_MAIN_FUNC &&
3590 stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
3591 bitmap_from_u64(mask, reg_mask);
3592 for_each_set_bit(i, mask, 32) {
3593 reg = &st->frame[0]->regs[i];
3594 if (reg->type != SCALAR_VALUE) {
3595 reg_mask &= ~(1u << i);
3596 continue;
3597 }
3598 reg->precise = true;
3599 }
3600 return 0;
3601 }
3602
3603 verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
3604 st->frame[0]->subprogno, reg_mask, stack_mask);
3605 WARN_ONCE(1, "verifier backtracking bug");
3606 return -EFAULT;
3607 }
3608
b5dc0163
AS
3609 for (i = last_idx;;) {
3610 if (skip_first) {
3611 err = 0;
3612 skip_first = false;
3613 } else {
3614 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
3615 }
3616 if (err == -ENOTSUPP) {
3617 mark_all_scalars_precise(env, st);
3618 return 0;
3619 } else if (err) {
3620 return err;
3621 }
3622 if (!reg_mask && !stack_mask)
3623 /* Found assignment(s) into tracked register in this state.
3624 * Since this state is already marked, just return.
3625 * Nothing to be tracked further in the parent state.
3626 */
3627 return 0;
3628 if (i == first_idx)
3629 break;
3630 i = get_prev_insn_idx(st, i, &history);
3631 if (i >= env->prog->len) {
3632 /* This can happen if backtracking reached insn 0
3633 * and there are still reg_mask or stack_mask
3634 * to backtrack.
3635 * It means the backtracking missed the spot where
3636 * particular register was initialized with a constant.
3637 */
3638 verbose(env, "BUG backtracking idx %d\n", i);
3639 WARN_ONCE(1, "verifier backtracking bug");
3640 return -EFAULT;
3641 }
3642 }
3643 st = st->parent;
3644 if (!st)
3645 break;
3646
a3ce685d 3647 new_marks = false;
529409ea 3648 func = st->frame[frame];
b5dc0163
AS
3649 bitmap_from_u64(mask, reg_mask);
3650 for_each_set_bit(i, mask, 32) {
3651 reg = &func->regs[i];
a3ce685d
AS
3652 if (reg->type != SCALAR_VALUE) {
3653 reg_mask &= ~(1u << i);
b5dc0163 3654 continue;
a3ce685d 3655 }
b5dc0163
AS
3656 if (!reg->precise)
3657 new_marks = true;
3658 reg->precise = true;
3659 }
3660
3661 bitmap_from_u64(mask, stack_mask);
3662 for_each_set_bit(i, mask, 64) {
3663 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
3664 /* the sequence of instructions:
3665 * 2: (bf) r3 = r10
3666 * 3: (7b) *(u64 *)(r3 -8) = r0
3667 * 4: (79) r4 = *(u64 *)(r10 -8)
3668 * doesn't contain jmps. It's backtracked
3669 * as a single block.
3670 * During backtracking insn 3 is not recognized as
3671 * stack access, so at the end of backtracking
3672 * stack slot fp-8 is still marked in stack_mask.
3673 * However the parent state may not have accessed
3674 * fp-8 and it's "unallocated" stack space.
3675 * In such case fallback to conservative.
b5dc0163 3676 */
2339cd6c
AS
3677 mark_all_scalars_precise(env, st);
3678 return 0;
b5dc0163
AS
3679 }
3680
27113c59 3681 if (!is_spilled_reg(&func->stack[i])) {
a3ce685d 3682 stack_mask &= ~(1ull << i);
b5dc0163 3683 continue;
a3ce685d 3684 }
b5dc0163 3685 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
3686 if (reg->type != SCALAR_VALUE) {
3687 stack_mask &= ~(1ull << i);
b5dc0163 3688 continue;
a3ce685d 3689 }
b5dc0163
AS
3690 if (!reg->precise)
3691 new_marks = true;
3692 reg->precise = true;
3693 }
496f3324 3694 if (env->log.level & BPF_LOG_LEVEL2) {
2e576648 3695 verbose(env, "parent %s regs=%x stack=%llx marks:",
b5dc0163
AS
3696 new_marks ? "didn't have" : "already had",
3697 reg_mask, stack_mask);
2e576648 3698 print_verifier_state(env, func, true);
b5dc0163
AS
3699 }
3700
a3ce685d
AS
3701 if (!reg_mask && !stack_mask)
3702 break;
b5dc0163
AS
3703 if (!new_marks)
3704 break;
3705
3706 last_idx = st->last_insn_idx;
3707 first_idx = st->first_insn_idx;
3708 }
3709 return 0;
3710}
3711
eb1f7f71 3712int mark_chain_precision(struct bpf_verifier_env *env, int regno)
a3ce685d 3713{
529409ea 3714 return __mark_chain_precision(env, env->cur_state->curframe, regno, -1);
a3ce685d
AS
3715}
3716
529409ea 3717static int mark_chain_precision_frame(struct bpf_verifier_env *env, int frame, int regno)
a3ce685d 3718{
529409ea 3719 return __mark_chain_precision(env, frame, regno, -1);
a3ce685d
AS
3720}
3721
529409ea 3722static int mark_chain_precision_stack_frame(struct bpf_verifier_env *env, int frame, int spi)
a3ce685d 3723{
529409ea 3724 return __mark_chain_precision(env, frame, -1, spi);
a3ce685d 3725}
b5dc0163 3726
1be7f75d
AS
3727static bool is_spillable_regtype(enum bpf_reg_type type)
3728{
c25b2ae1 3729 switch (base_type(type)) {
1be7f75d 3730 case PTR_TO_MAP_VALUE:
1be7f75d
AS
3731 case PTR_TO_STACK:
3732 case PTR_TO_CTX:
969bf05e 3733 case PTR_TO_PACKET:
de8f3a83 3734 case PTR_TO_PACKET_META:
969bf05e 3735 case PTR_TO_PACKET_END:
d58e468b 3736 case PTR_TO_FLOW_KEYS:
1be7f75d 3737 case CONST_PTR_TO_MAP:
c64b7983 3738 case PTR_TO_SOCKET:
46f8bc92 3739 case PTR_TO_SOCK_COMMON:
655a51e5 3740 case PTR_TO_TCP_SOCK:
fada7fdc 3741 case PTR_TO_XDP_SOCK:
65726b5b 3742 case PTR_TO_BTF_ID:
20b2aff4 3743 case PTR_TO_BUF:
744ea4e3 3744 case PTR_TO_MEM:
69c087ba
YS
3745 case PTR_TO_FUNC:
3746 case PTR_TO_MAP_KEY:
1be7f75d
AS
3747 return true;
3748 default:
3749 return false;
3750 }
3751}
3752
cc2b14d5
AS
3753/* Does this register contain a constant zero? */
3754static bool register_is_null(struct bpf_reg_state *reg)
3755{
3756 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
3757}
3758
f7cf25b2
AS
3759static bool register_is_const(struct bpf_reg_state *reg)
3760{
3761 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
3762}
3763
5689d49b
YS
3764static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
3765{
3766 return tnum_is_unknown(reg->var_off) &&
3767 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
3768 reg->umin_value == 0 && reg->umax_value == U64_MAX &&
3769 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
3770 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
3771}
3772
3773static bool register_is_bounded(struct bpf_reg_state *reg)
3774{
3775 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
3776}
3777
6e7e63cb
JH
3778static bool __is_pointer_value(bool allow_ptr_leaks,
3779 const struct bpf_reg_state *reg)
3780{
3781 if (allow_ptr_leaks)
3782 return false;
3783
3784 return reg->type != SCALAR_VALUE;
3785}
3786
71f656a5
EZ
3787/* Copy src state preserving dst->parent and dst->live fields */
3788static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
3789{
3790 struct bpf_reg_state *parent = dst->parent;
3791 enum bpf_reg_liveness live = dst->live;
3792
3793 *dst = *src;
3794 dst->parent = parent;
3795 dst->live = live;
3796}
3797
f7cf25b2 3798static void save_register_state(struct bpf_func_state *state,
354e8f19
MKL
3799 int spi, struct bpf_reg_state *reg,
3800 int size)
f7cf25b2
AS
3801{
3802 int i;
3803
71f656a5 3804 copy_register_state(&state->stack[spi].spilled_ptr, reg);
354e8f19
MKL
3805 if (size == BPF_REG_SIZE)
3806 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
f7cf25b2 3807
354e8f19
MKL
3808 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
3809 state->stack[spi].slot_type[i - 1] = STACK_SPILL;
f7cf25b2 3810
354e8f19
MKL
3811 /* size < 8 bytes spill */
3812 for (; i; i--)
3813 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
f7cf25b2
AS
3814}
3815
ecdf985d
EZ
3816static bool is_bpf_st_mem(struct bpf_insn *insn)
3817{
3818 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
3819}
3820
01f810ac 3821/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
17a52670
AS
3822 * stack boundary and alignment are checked in check_mem_access()
3823 */
01f810ac
AM
3824static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
3825 /* stack frame we're writing to */
3826 struct bpf_func_state *state,
3827 int off, int size, int value_regno,
3828 int insn_idx)
17a52670 3829{
f4d7e40a 3830 struct bpf_func_state *cur; /* state of the current function */
638f5b90 3831 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
ecdf985d 3832 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
f7cf25b2 3833 struct bpf_reg_state *reg = NULL;
ecdf985d 3834 u32 dst_reg = insn->dst_reg;
638f5b90 3835
c69431aa 3836 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
638f5b90
AS
3837 if (err)
3838 return err;
9c399760
AS
3839 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3840 * so it's aligned access and [off, off + size) are within stack limits
3841 */
638f5b90
AS
3842 if (!env->allow_ptr_leaks &&
3843 state->stack[spi].slot_type[0] == STACK_SPILL &&
3844 size != BPF_REG_SIZE) {
3845 verbose(env, "attempt to corrupt spilled pointer on stack\n");
3846 return -EACCES;
3847 }
17a52670 3848
f4d7e40a 3849 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
3850 if (value_regno >= 0)
3851 reg = &cur->regs[value_regno];
2039f26f
DB
3852 if (!env->bypass_spec_v4) {
3853 bool sanitize = reg && is_spillable_regtype(reg->type);
3854
3855 for (i = 0; i < size; i++) {
e4f4db47
LG
3856 u8 type = state->stack[spi].slot_type[i];
3857
3858 if (type != STACK_MISC && type != STACK_ZERO) {
2039f26f
DB
3859 sanitize = true;
3860 break;
3861 }
3862 }
3863
3864 if (sanitize)
3865 env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3866 }
17a52670 3867
ef8fc7a0
KKD
3868 err = destroy_if_dynptr_stack_slot(env, state, spi);
3869 if (err)
3870 return err;
3871
0f55f9ed 3872 mark_stack_slot_scratched(env, spi);
354e8f19 3873 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
2c78ee89 3874 !register_is_null(reg) && env->bpf_capable) {
b5dc0163
AS
3875 if (dst_reg != BPF_REG_FP) {
3876 /* The backtracking logic can only recognize explicit
3877 * stack slot address like [fp - 8]. Other spill of
8fb33b60 3878 * scalar via different register has to be conservative.
b5dc0163
AS
3879 * Backtrack from here and mark all registers as precise
3880 * that contributed into 'reg' being a constant.
3881 */
3882 err = mark_chain_precision(env, value_regno);
3883 if (err)
3884 return err;
3885 }
354e8f19 3886 save_register_state(state, spi, reg, size);
ecdf985d
EZ
3887 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
3888 insn->imm != 0 && env->bpf_capable) {
3889 struct bpf_reg_state fake_reg = {};
3890
3891 __mark_reg_known(&fake_reg, (u32)insn->imm);
3892 fake_reg.type = SCALAR_VALUE;
3893 save_register_state(state, spi, &fake_reg, size);
f7cf25b2 3894 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 3895 /* register containing pointer is being spilled into stack */
9c399760 3896 if (size != BPF_REG_SIZE) {
f7cf25b2 3897 verbose_linfo(env, insn_idx, "; ");
61bd5218 3898 verbose(env, "invalid size of register spill\n");
17a52670
AS
3899 return -EACCES;
3900 }
f7cf25b2 3901 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
3902 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3903 return -EINVAL;
3904 }
354e8f19 3905 save_register_state(state, spi, reg, size);
9c399760 3906 } else {
cc2b14d5
AS
3907 u8 type = STACK_MISC;
3908
679c782d
EC
3909 /* regular write of data into stack destroys any spilled ptr */
3910 state->stack[spi].spilled_ptr.type = NOT_INIT;
06accc87
AN
3911 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */
3912 if (is_stack_slot_special(&state->stack[spi]))
0bae2d4d 3913 for (i = 0; i < BPF_REG_SIZE; i++)
354e8f19 3914 scrub_spilled_slot(&state->stack[spi].slot_type[i]);
9c399760 3915
cc2b14d5
AS
3916 /* only mark the slot as written if all 8 bytes were written
3917 * otherwise read propagation may incorrectly stop too soon
3918 * when stack slots are partially written.
3919 * This heuristic means that read propagation will be
3920 * conservative, since it will add reg_live_read marks
3921 * to stack slots all the way to first state when programs
3922 * writes+reads less than 8 bytes
3923 */
3924 if (size == BPF_REG_SIZE)
3925 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3926
3927 /* when we zero initialize stack slots mark them as such */
ecdf985d
EZ
3928 if ((reg && register_is_null(reg)) ||
3929 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
b5dc0163
AS
3930 /* backtracking doesn't work for STACK_ZERO yet. */
3931 err = mark_chain_precision(env, value_regno);
3932 if (err)
3933 return err;
cc2b14d5 3934 type = STACK_ZERO;
b5dc0163 3935 }
cc2b14d5 3936
0bae2d4d 3937 /* Mark slots affected by this stack write. */
9c399760 3938 for (i = 0; i < size; i++)
638f5b90 3939 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 3940 type;
17a52670
AS
3941 }
3942 return 0;
3943}
3944
01f810ac
AM
3945/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3946 * known to contain a variable offset.
3947 * This function checks whether the write is permitted and conservatively
3948 * tracks the effects of the write, considering that each stack slot in the
3949 * dynamic range is potentially written to.
3950 *
3951 * 'off' includes 'regno->off'.
3952 * 'value_regno' can be -1, meaning that an unknown value is being written to
3953 * the stack.
3954 *
3955 * Spilled pointers in range are not marked as written because we don't know
3956 * what's going to be actually written. This means that read propagation for
3957 * future reads cannot be terminated by this write.
3958 *
3959 * For privileged programs, uninitialized stack slots are considered
3960 * initialized by this write (even though we don't know exactly what offsets
3961 * are going to be written to). The idea is that we don't want the verifier to
3962 * reject future reads that access slots written to through variable offsets.
3963 */
3964static int check_stack_write_var_off(struct bpf_verifier_env *env,
3965 /* func where register points to */
3966 struct bpf_func_state *state,
3967 int ptr_regno, int off, int size,
3968 int value_regno, int insn_idx)
3969{
3970 struct bpf_func_state *cur; /* state of the current function */
3971 int min_off, max_off;
3972 int i, err;
3973 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
31ff2135 3974 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
01f810ac
AM
3975 bool writing_zero = false;
3976 /* set if the fact that we're writing a zero is used to let any
3977 * stack slots remain STACK_ZERO
3978 */
3979 bool zero_used = false;
3980
3981 cur = env->cur_state->frame[env->cur_state->curframe];
3982 ptr_reg = &cur->regs[ptr_regno];
3983 min_off = ptr_reg->smin_value + off;
3984 max_off = ptr_reg->smax_value + off + size;
3985 if (value_regno >= 0)
3986 value_reg = &cur->regs[value_regno];
31ff2135
EZ
3987 if ((value_reg && register_is_null(value_reg)) ||
3988 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0))
01f810ac
AM
3989 writing_zero = true;
3990
c69431aa 3991 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
01f810ac
AM
3992 if (err)
3993 return err;
3994
ef8fc7a0
KKD
3995 for (i = min_off; i < max_off; i++) {
3996 int spi;
3997
3998 spi = __get_spi(i);
3999 err = destroy_if_dynptr_stack_slot(env, state, spi);
4000 if (err)
4001 return err;
4002 }
01f810ac
AM
4003
4004 /* Variable offset writes destroy any spilled pointers in range. */
4005 for (i = min_off; i < max_off; i++) {
4006 u8 new_type, *stype;
4007 int slot, spi;
4008
4009 slot = -i - 1;
4010 spi = slot / BPF_REG_SIZE;
4011 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
0f55f9ed 4012 mark_stack_slot_scratched(env, spi);
01f810ac 4013
f5e477a8
KKD
4014 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) {
4015 /* Reject the write if range we may write to has not
4016 * been initialized beforehand. If we didn't reject
4017 * here, the ptr status would be erased below (even
4018 * though not all slots are actually overwritten),
4019 * possibly opening the door to leaks.
4020 *
4021 * We do however catch STACK_INVALID case below, and
4022 * only allow reading possibly uninitialized memory
4023 * later for CAP_PERFMON, as the write may not happen to
4024 * that slot.
01f810ac
AM
4025 */
4026 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
4027 insn_idx, i);
4028 return -EINVAL;
4029 }
4030
4031 /* Erase all spilled pointers. */
4032 state->stack[spi].spilled_ptr.type = NOT_INIT;
4033
4034 /* Update the slot type. */
4035 new_type = STACK_MISC;
4036 if (writing_zero && *stype == STACK_ZERO) {
4037 new_type = STACK_ZERO;
4038 zero_used = true;
4039 }
4040 /* If the slot is STACK_INVALID, we check whether it's OK to
4041 * pretend that it will be initialized by this write. The slot
4042 * might not actually be written to, and so if we mark it as
4043 * initialized future reads might leak uninitialized memory.
4044 * For privileged programs, we will accept such reads to slots
4045 * that may or may not be written because, if we're reject
4046 * them, the error would be too confusing.
4047 */
4048 if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
4049 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
4050 insn_idx, i);
4051 return -EINVAL;
4052 }
4053 *stype = new_type;
4054 }
4055 if (zero_used) {
4056 /* backtracking doesn't work for STACK_ZERO yet. */
4057 err = mark_chain_precision(env, value_regno);
4058 if (err)
4059 return err;
4060 }
4061 return 0;
4062}
4063
4064/* When register 'dst_regno' is assigned some values from stack[min_off,
4065 * max_off), we set the register's type according to the types of the
4066 * respective stack slots. If all the stack values are known to be zeros, then
4067 * so is the destination reg. Otherwise, the register is considered to be
4068 * SCALAR. This function does not deal with register filling; the caller must
4069 * ensure that all spilled registers in the stack range have been marked as
4070 * read.
4071 */
4072static void mark_reg_stack_read(struct bpf_verifier_env *env,
4073 /* func where src register points to */
4074 struct bpf_func_state *ptr_state,
4075 int min_off, int max_off, int dst_regno)
4076{
4077 struct bpf_verifier_state *vstate = env->cur_state;
4078 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4079 int i, slot, spi;
4080 u8 *stype;
4081 int zeros = 0;
4082
4083 for (i = min_off; i < max_off; i++) {
4084 slot = -i - 1;
4085 spi = slot / BPF_REG_SIZE;
4086 stype = ptr_state->stack[spi].slot_type;
4087 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
4088 break;
4089 zeros++;
4090 }
4091 if (zeros == max_off - min_off) {
4092 /* any access_size read into register is zero extended,
4093 * so the whole register == const_zero
4094 */
4095 __mark_reg_const_zero(&state->regs[dst_regno]);
4096 /* backtracking doesn't support STACK_ZERO yet,
4097 * so mark it precise here, so that later
4098 * backtracking can stop here.
4099 * Backtracking may not need this if this register
4100 * doesn't participate in pointer adjustment.
4101 * Forward propagation of precise flag is not
4102 * necessary either. This mark is only to stop
4103 * backtracking. Any register that contributed
4104 * to const 0 was marked precise before spill.
4105 */
4106 state->regs[dst_regno].precise = true;
4107 } else {
4108 /* have read misc data from the stack */
4109 mark_reg_unknown(env, state->regs, dst_regno);
4110 }
4111 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
4112}
4113
4114/* Read the stack at 'off' and put the results into the register indicated by
4115 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
4116 * spilled reg.
4117 *
4118 * 'dst_regno' can be -1, meaning that the read value is not going to a
4119 * register.
4120 *
4121 * The access is assumed to be within the current stack bounds.
4122 */
4123static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
4124 /* func where src register points to */
4125 struct bpf_func_state *reg_state,
4126 int off, int size, int dst_regno)
17a52670 4127{
f4d7e40a
AS
4128 struct bpf_verifier_state *vstate = env->cur_state;
4129 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 4130 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 4131 struct bpf_reg_state *reg;
354e8f19 4132 u8 *stype, type;
17a52670 4133
f4d7e40a 4134 stype = reg_state->stack[spi].slot_type;
f7cf25b2 4135 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 4136
27113c59 4137 if (is_spilled_reg(&reg_state->stack[spi])) {
f30d4968
MKL
4138 u8 spill_size = 1;
4139
4140 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
4141 spill_size++;
354e8f19 4142
f30d4968 4143 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
f7cf25b2
AS
4144 if (reg->type != SCALAR_VALUE) {
4145 verbose_linfo(env, env->insn_idx, "; ");
4146 verbose(env, "invalid size of register fill\n");
4147 return -EACCES;
4148 }
354e8f19
MKL
4149
4150 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
4151 if (dst_regno < 0)
4152 return 0;
4153
f30d4968 4154 if (!(off % BPF_REG_SIZE) && size == spill_size) {
354e8f19
MKL
4155 /* The earlier check_reg_arg() has decided the
4156 * subreg_def for this insn. Save it first.
4157 */
4158 s32 subreg_def = state->regs[dst_regno].subreg_def;
4159
71f656a5 4160 copy_register_state(&state->regs[dst_regno], reg);
354e8f19
MKL
4161 state->regs[dst_regno].subreg_def = subreg_def;
4162 } else {
4163 for (i = 0; i < size; i++) {
4164 type = stype[(slot - i) % BPF_REG_SIZE];
4165 if (type == STACK_SPILL)
4166 continue;
4167 if (type == STACK_MISC)
4168 continue;
6715df8d
EZ
4169 if (type == STACK_INVALID && env->allow_uninit_stack)
4170 continue;
354e8f19
MKL
4171 verbose(env, "invalid read from stack off %d+%d size %d\n",
4172 off, i, size);
4173 return -EACCES;
4174 }
01f810ac 4175 mark_reg_unknown(env, state->regs, dst_regno);
f7cf25b2 4176 }
354e8f19 4177 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
f7cf25b2 4178 return 0;
17a52670 4179 }
17a52670 4180
01f810ac 4181 if (dst_regno >= 0) {
17a52670 4182 /* restore register state from stack */
71f656a5 4183 copy_register_state(&state->regs[dst_regno], reg);
2f18f62e
AS
4184 /* mark reg as written since spilled pointer state likely
4185 * has its liveness marks cleared by is_state_visited()
4186 * which resets stack/reg liveness for state transitions
4187 */
01f810ac 4188 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
6e7e63cb 4189 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
01f810ac 4190 /* If dst_regno==-1, the caller is asking us whether
6e7e63cb
JH
4191 * it is acceptable to use this value as a SCALAR_VALUE
4192 * (e.g. for XADD).
4193 * We must not allow unprivileged callers to do that
4194 * with spilled pointers.
4195 */
4196 verbose(env, "leaking pointer from stack off %d\n",
4197 off);
4198 return -EACCES;
dc503a8a 4199 }
f7cf25b2 4200 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670
AS
4201 } else {
4202 for (i = 0; i < size; i++) {
01f810ac
AM
4203 type = stype[(slot - i) % BPF_REG_SIZE];
4204 if (type == STACK_MISC)
cc2b14d5 4205 continue;
01f810ac 4206 if (type == STACK_ZERO)
cc2b14d5 4207 continue;
6715df8d
EZ
4208 if (type == STACK_INVALID && env->allow_uninit_stack)
4209 continue;
cc2b14d5
AS
4210 verbose(env, "invalid read from stack off %d+%d size %d\n",
4211 off, i, size);
4212 return -EACCES;
4213 }
f7cf25b2 4214 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
01f810ac
AM
4215 if (dst_regno >= 0)
4216 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
17a52670 4217 }
f7cf25b2 4218 return 0;
17a52670
AS
4219}
4220
61df10c7 4221enum bpf_access_src {
01f810ac
AM
4222 ACCESS_DIRECT = 1, /* the access is performed by an instruction */
4223 ACCESS_HELPER = 2, /* the access is performed by a helper */
4224};
4225
4226static int check_stack_range_initialized(struct bpf_verifier_env *env,
4227 int regno, int off, int access_size,
4228 bool zero_size_allowed,
61df10c7 4229 enum bpf_access_src type,
01f810ac
AM
4230 struct bpf_call_arg_meta *meta);
4231
4232static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
4233{
4234 return cur_regs(env) + regno;
4235}
4236
4237/* Read the stack at 'ptr_regno + off' and put the result into the register
4238 * 'dst_regno'.
4239 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
4240 * but not its variable offset.
4241 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
4242 *
4243 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
4244 * filling registers (i.e. reads of spilled register cannot be detected when
4245 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
4246 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
4247 * offset; for a fixed offset check_stack_read_fixed_off should be used
4248 * instead.
4249 */
4250static int check_stack_read_var_off(struct bpf_verifier_env *env,
4251 int ptr_regno, int off, int size, int dst_regno)
e4298d25 4252{
01f810ac
AM
4253 /* The state of the source register. */
4254 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4255 struct bpf_func_state *ptr_state = func(env, reg);
4256 int err;
4257 int min_off, max_off;
4258
4259 /* Note that we pass a NULL meta, so raw access will not be permitted.
e4298d25 4260 */
01f810ac
AM
4261 err = check_stack_range_initialized(env, ptr_regno, off, size,
4262 false, ACCESS_DIRECT, NULL);
4263 if (err)
4264 return err;
4265
4266 min_off = reg->smin_value + off;
4267 max_off = reg->smax_value + off;
4268 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
4269 return 0;
4270}
4271
4272/* check_stack_read dispatches to check_stack_read_fixed_off or
4273 * check_stack_read_var_off.
4274 *
4275 * The caller must ensure that the offset falls within the allocated stack
4276 * bounds.
4277 *
4278 * 'dst_regno' is a register which will receive the value from the stack. It
4279 * can be -1, meaning that the read value is not going to a register.
4280 */
4281static int check_stack_read(struct bpf_verifier_env *env,
4282 int ptr_regno, int off, int size,
4283 int dst_regno)
4284{
4285 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4286 struct bpf_func_state *state = func(env, reg);
4287 int err;
4288 /* Some accesses are only permitted with a static offset. */
4289 bool var_off = !tnum_is_const(reg->var_off);
4290
4291 /* The offset is required to be static when reads don't go to a
4292 * register, in order to not leak pointers (see
4293 * check_stack_read_fixed_off).
4294 */
4295 if (dst_regno < 0 && var_off) {
e4298d25
DB
4296 char tn_buf[48];
4297
4298 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac 4299 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
e4298d25
DB
4300 tn_buf, off, size);
4301 return -EACCES;
4302 }
01f810ac
AM
4303 /* Variable offset is prohibited for unprivileged mode for simplicity
4304 * since it requires corresponding support in Spectre masking for stack
4305 * ALU. See also retrieve_ptr_limit().
4306 */
4307 if (!env->bypass_spec_v1 && var_off) {
4308 char tn_buf[48];
e4298d25 4309
01f810ac
AM
4310 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4311 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
4312 ptr_regno, tn_buf);
e4298d25
DB
4313 return -EACCES;
4314 }
4315
01f810ac
AM
4316 if (!var_off) {
4317 off += reg->var_off.value;
4318 err = check_stack_read_fixed_off(env, state, off, size,
4319 dst_regno);
4320 } else {
4321 /* Variable offset stack reads need more conservative handling
4322 * than fixed offset ones. Note that dst_regno >= 0 on this
4323 * branch.
4324 */
4325 err = check_stack_read_var_off(env, ptr_regno, off, size,
4326 dst_regno);
4327 }
4328 return err;
4329}
4330
4331
4332/* check_stack_write dispatches to check_stack_write_fixed_off or
4333 * check_stack_write_var_off.
4334 *
4335 * 'ptr_regno' is the register used as a pointer into the stack.
4336 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
4337 * 'value_regno' is the register whose value we're writing to the stack. It can
4338 * be -1, meaning that we're not writing from a register.
4339 *
4340 * The caller must ensure that the offset falls within the maximum stack size.
4341 */
4342static int check_stack_write(struct bpf_verifier_env *env,
4343 int ptr_regno, int off, int size,
4344 int value_regno, int insn_idx)
4345{
4346 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
4347 struct bpf_func_state *state = func(env, reg);
4348 int err;
4349
4350 if (tnum_is_const(reg->var_off)) {
4351 off += reg->var_off.value;
4352 err = check_stack_write_fixed_off(env, state, off, size,
4353 value_regno, insn_idx);
4354 } else {
4355 /* Variable offset stack reads need more conservative handling
4356 * than fixed offset ones.
4357 */
4358 err = check_stack_write_var_off(env, state,
4359 ptr_regno, off, size,
4360 value_regno, insn_idx);
4361 }
4362 return err;
e4298d25
DB
4363}
4364
591fe988
DB
4365static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
4366 int off, int size, enum bpf_access_type type)
4367{
4368 struct bpf_reg_state *regs = cur_regs(env);
4369 struct bpf_map *map = regs[regno].map_ptr;
4370 u32 cap = bpf_map_flags_to_cap(map);
4371
4372 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
4373 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
4374 map->value_size, off, size);
4375 return -EACCES;
4376 }
4377
4378 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
4379 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
4380 map->value_size, off, size);
4381 return -EACCES;
4382 }
4383
4384 return 0;
4385}
4386
457f4436
AN
4387/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
4388static int __check_mem_access(struct bpf_verifier_env *env, int regno,
4389 int off, int size, u32 mem_size,
4390 bool zero_size_allowed)
17a52670 4391{
457f4436
AN
4392 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
4393 struct bpf_reg_state *reg;
4394
4395 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
4396 return 0;
17a52670 4397
457f4436
AN
4398 reg = &cur_regs(env)[regno];
4399 switch (reg->type) {
69c087ba
YS
4400 case PTR_TO_MAP_KEY:
4401 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
4402 mem_size, off, size);
4403 break;
457f4436 4404 case PTR_TO_MAP_VALUE:
61bd5218 4405 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
457f4436
AN
4406 mem_size, off, size);
4407 break;
4408 case PTR_TO_PACKET:
4409 case PTR_TO_PACKET_META:
4410 case PTR_TO_PACKET_END:
4411 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
4412 off, size, regno, reg->id, off, mem_size);
4413 break;
4414 case PTR_TO_MEM:
4415 default:
4416 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
4417 mem_size, off, size);
17a52670 4418 }
457f4436
AN
4419
4420 return -EACCES;
17a52670
AS
4421}
4422
457f4436
AN
4423/* check read/write into a memory region with possible variable offset */
4424static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
4425 int off, int size, u32 mem_size,
4426 bool zero_size_allowed)
dbcfe5f7 4427{
f4d7e40a
AS
4428 struct bpf_verifier_state *vstate = env->cur_state;
4429 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
4430 struct bpf_reg_state *reg = &state->regs[regno];
4431 int err;
4432
457f4436 4433 /* We may have adjusted the register pointing to memory region, so we
f1174f77
EC
4434 * need to try adding each of min_value and max_value to off
4435 * to make sure our theoretical access will be safe.
2e576648
CL
4436 *
4437 * The minimum value is only important with signed
dbcfe5f7
GB
4438 * comparisons where we can't assume the floor of a
4439 * value is 0. If we are using signed variables for our
4440 * index'es we need to make sure that whatever we use
4441 * will have a set floor within our range.
4442 */
b7137c4e
DB
4443 if (reg->smin_value < 0 &&
4444 (reg->smin_value == S64_MIN ||
4445 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
4446 reg->smin_value + off < 0)) {
61bd5218 4447 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
4448 regno);
4449 return -EACCES;
4450 }
457f4436
AN
4451 err = __check_mem_access(env, regno, reg->smin_value + off, size,
4452 mem_size, zero_size_allowed);
dbcfe5f7 4453 if (err) {
457f4436 4454 verbose(env, "R%d min value is outside of the allowed memory range\n",
61bd5218 4455 regno);
dbcfe5f7
GB
4456 return err;
4457 }
4458
b03c9f9f
EC
4459 /* If we haven't set a max value then we need to bail since we can't be
4460 * sure we won't do bad things.
4461 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 4462 */
b03c9f9f 4463 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
457f4436 4464 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
dbcfe5f7
GB
4465 regno);
4466 return -EACCES;
4467 }
457f4436
AN
4468 err = __check_mem_access(env, regno, reg->umax_value + off, size,
4469 mem_size, zero_size_allowed);
4470 if (err) {
4471 verbose(env, "R%d max value is outside of the allowed memory range\n",
61bd5218 4472 regno);
457f4436
AN
4473 return err;
4474 }
4475
4476 return 0;
4477}
d83525ca 4478
e9147b44
KKD
4479static int __check_ptr_off_reg(struct bpf_verifier_env *env,
4480 const struct bpf_reg_state *reg, int regno,
4481 bool fixed_off_ok)
4482{
4483 /* Access to this pointer-typed register or passing it to a helper
4484 * is only allowed in its original, unmodified form.
4485 */
4486
4487 if (reg->off < 0) {
4488 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
4489 reg_type_str(env, reg->type), regno, reg->off);
4490 return -EACCES;
4491 }
4492
4493 if (!fixed_off_ok && reg->off) {
4494 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
4495 reg_type_str(env, reg->type), regno, reg->off);
4496 return -EACCES;
4497 }
4498
4499 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4500 char tn_buf[48];
4501
4502 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4503 verbose(env, "variable %s access var_off=%s disallowed\n",
4504 reg_type_str(env, reg->type), tn_buf);
4505 return -EACCES;
4506 }
4507
4508 return 0;
4509}
4510
4511int check_ptr_off_reg(struct bpf_verifier_env *env,
4512 const struct bpf_reg_state *reg, int regno)
4513{
4514 return __check_ptr_off_reg(env, reg, regno, false);
4515}
4516
61df10c7 4517static int map_kptr_match_type(struct bpf_verifier_env *env,
aa3496ac 4518 struct btf_field *kptr_field,
61df10c7
KKD
4519 struct bpf_reg_state *reg, u32 regno)
4520{
b32a5dae 4521 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id);
20c09d92 4522 int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU;
61df10c7
KKD
4523 const char *reg_name = "";
4524
6efe152d 4525 /* Only unreferenced case accepts untrusted pointers */
aa3496ac 4526 if (kptr_field->type == BPF_KPTR_UNREF)
6efe152d
KKD
4527 perm_flags |= PTR_UNTRUSTED;
4528
4529 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
61df10c7
KKD
4530 goto bad_type;
4531
4532 if (!btf_is_kernel(reg->btf)) {
4533 verbose(env, "R%d must point to kernel BTF\n", regno);
4534 return -EINVAL;
4535 }
4536 /* We need to verify reg->type and reg->btf, before accessing reg->btf */
b32a5dae 4537 reg_name = btf_type_name(reg->btf, reg->btf_id);
61df10c7 4538
c0a5a21c
KKD
4539 /* For ref_ptr case, release function check should ensure we get one
4540 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
4541 * normal store of unreferenced kptr, we must ensure var_off is zero.
4542 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
4543 * reg->off and reg->ref_obj_id are not needed here.
4544 */
61df10c7
KKD
4545 if (__check_ptr_off_reg(env, reg, regno, true))
4546 return -EACCES;
4547
4548 /* A full type match is needed, as BTF can be vmlinux or module BTF, and
4549 * we also need to take into account the reg->off.
4550 *
4551 * We want to support cases like:
4552 *
4553 * struct foo {
4554 * struct bar br;
4555 * struct baz bz;
4556 * };
4557 *
4558 * struct foo *v;
4559 * v = func(); // PTR_TO_BTF_ID
4560 * val->foo = v; // reg->off is zero, btf and btf_id match type
4561 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
4562 * // first member type of struct after comparison fails
4563 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
4564 * // to match type
4565 *
4566 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
2ab3b380
KKD
4567 * is zero. We must also ensure that btf_struct_ids_match does not walk
4568 * the struct to match type against first member of struct, i.e. reject
4569 * second case from above. Hence, when type is BPF_KPTR_REF, we set
4570 * strict mode to true for type match.
61df10c7
KKD
4571 */
4572 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
aa3496ac
KKD
4573 kptr_field->kptr.btf, kptr_field->kptr.btf_id,
4574 kptr_field->type == BPF_KPTR_REF))
61df10c7
KKD
4575 goto bad_type;
4576 return 0;
4577bad_type:
4578 verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
4579 reg_type_str(env, reg->type), reg_name);
6efe152d 4580 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
aa3496ac 4581 if (kptr_field->type == BPF_KPTR_UNREF)
6efe152d
KKD
4582 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
4583 targ_name);
4584 else
4585 verbose(env, "\n");
61df10c7
KKD
4586 return -EINVAL;
4587}
4588
20c09d92
AS
4589/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
4590 * can dereference RCU protected pointers and result is PTR_TRUSTED.
4591 */
4592static bool in_rcu_cs(struct bpf_verifier_env *env)
4593{
4594 return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable;
4595}
4596
4597/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
4598BTF_SET_START(rcu_protected_types)
4599BTF_ID(struct, prog_test_ref_kfunc)
4600BTF_ID(struct, cgroup)
4601BTF_SET_END(rcu_protected_types)
4602
4603static bool rcu_protected_object(const struct btf *btf, u32 btf_id)
4604{
4605 if (!btf_is_kernel(btf))
4606 return false;
4607 return btf_id_set_contains(&rcu_protected_types, btf_id);
4608}
4609
4610static bool rcu_safe_kptr(const struct btf_field *field)
4611{
4612 const struct btf_field_kptr *kptr = &field->kptr;
4613
4614 return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id);
4615}
4616
61df10c7
KKD
4617static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
4618 int value_regno, int insn_idx,
aa3496ac 4619 struct btf_field *kptr_field)
61df10c7
KKD
4620{
4621 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
4622 int class = BPF_CLASS(insn->code);
4623 struct bpf_reg_state *val_reg;
4624
4625 /* Things we already checked for in check_map_access and caller:
4626 * - Reject cases where variable offset may touch kptr
4627 * - size of access (must be BPF_DW)
4628 * - tnum_is_const(reg->var_off)
aa3496ac 4629 * - kptr_field->offset == off + reg->var_off.value
61df10c7
KKD
4630 */
4631 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
4632 if (BPF_MODE(insn->code) != BPF_MEM) {
4633 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
4634 return -EACCES;
4635 }
4636
6efe152d
KKD
4637 /* We only allow loading referenced kptr, since it will be marked as
4638 * untrusted, similar to unreferenced kptr.
4639 */
aa3496ac 4640 if (class != BPF_LDX && kptr_field->type == BPF_KPTR_REF) {
6efe152d 4641 verbose(env, "store to referenced kptr disallowed\n");
c0a5a21c
KKD
4642 return -EACCES;
4643 }
4644
61df10c7
KKD
4645 if (class == BPF_LDX) {
4646 val_reg = reg_state(env, value_regno);
4647 /* We can simply mark the value_regno receiving the pointer
4648 * value from map as PTR_TO_BTF_ID, with the correct type.
4649 */
aa3496ac 4650 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
20c09d92
AS
4651 kptr_field->kptr.btf_id,
4652 rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ?
4653 PTR_MAYBE_NULL | MEM_RCU :
4654 PTR_MAYBE_NULL | PTR_UNTRUSTED);
61df10c7
KKD
4655 /* For mark_ptr_or_null_reg */
4656 val_reg->id = ++env->id_gen;
4657 } else if (class == BPF_STX) {
4658 val_reg = reg_state(env, value_regno);
4659 if (!register_is_null(val_reg) &&
aa3496ac 4660 map_kptr_match_type(env, kptr_field, val_reg, value_regno))
61df10c7
KKD
4661 return -EACCES;
4662 } else if (class == BPF_ST) {
4663 if (insn->imm) {
4664 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
aa3496ac 4665 kptr_field->offset);
61df10c7
KKD
4666 return -EACCES;
4667 }
4668 } else {
4669 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
4670 return -EACCES;
4671 }
4672 return 0;
4673}
4674
457f4436
AN
4675/* check read/write into a map element with possible variable offset */
4676static int check_map_access(struct bpf_verifier_env *env, u32 regno,
61df10c7
KKD
4677 int off, int size, bool zero_size_allowed,
4678 enum bpf_access_src src)
457f4436
AN
4679{
4680 struct bpf_verifier_state *vstate = env->cur_state;
4681 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4682 struct bpf_reg_state *reg = &state->regs[regno];
4683 struct bpf_map *map = reg->map_ptr;
aa3496ac
KKD
4684 struct btf_record *rec;
4685 int err, i;
457f4436
AN
4686
4687 err = check_mem_region_access(env, regno, off, size, map->value_size,
4688 zero_size_allowed);
4689 if (err)
4690 return err;
4691
aa3496ac
KKD
4692 if (IS_ERR_OR_NULL(map->record))
4693 return 0;
4694 rec = map->record;
4695 for (i = 0; i < rec->cnt; i++) {
4696 struct btf_field *field = &rec->fields[i];
4697 u32 p = field->offset;
d83525ca 4698
db559117
KKD
4699 /* If any part of a field can be touched by load/store, reject
4700 * this program. To check that [x1, x2) overlaps with [y1, y2),
d83525ca
AS
4701 * it is sufficient to check x1 < y2 && y1 < x2.
4702 */
aa3496ac
KKD
4703 if (reg->smin_value + off < p + btf_field_type_size(field->type) &&
4704 p < reg->umax_value + off + size) {
4705 switch (field->type) {
4706 case BPF_KPTR_UNREF:
4707 case BPF_KPTR_REF:
61df10c7
KKD
4708 if (src != ACCESS_DIRECT) {
4709 verbose(env, "kptr cannot be accessed indirectly by helper\n");
4710 return -EACCES;
4711 }
4712 if (!tnum_is_const(reg->var_off)) {
4713 verbose(env, "kptr access cannot have variable offset\n");
4714 return -EACCES;
4715 }
4716 if (p != off + reg->var_off.value) {
4717 verbose(env, "kptr access misaligned expected=%u off=%llu\n",
4718 p, off + reg->var_off.value);
4719 return -EACCES;
4720 }
4721 if (size != bpf_size_to_bytes(BPF_DW)) {
4722 verbose(env, "kptr access size must be BPF_DW\n");
4723 return -EACCES;
4724 }
4725 break;
aa3496ac 4726 default:
db559117
KKD
4727 verbose(env, "%s cannot be accessed directly by load/store\n",
4728 btf_field_type_name(field->type));
aa3496ac 4729 return -EACCES;
61df10c7
KKD
4730 }
4731 }
4732 }
aa3496ac 4733 return 0;
dbcfe5f7
GB
4734}
4735
969bf05e
AS
4736#define MAX_PACKET_OFF 0xffff
4737
58e2af8b 4738static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
4739 const struct bpf_call_arg_meta *meta,
4740 enum bpf_access_type t)
4acf6c0b 4741{
7e40781c
UP
4742 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
4743
4744 switch (prog_type) {
5d66fa7d 4745 /* Program types only with direct read access go here! */
3a0af8fd
TG
4746 case BPF_PROG_TYPE_LWT_IN:
4747 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 4748 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 4749 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 4750 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 4751 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
4752 if (t == BPF_WRITE)
4753 return false;
8731745e 4754 fallthrough;
5d66fa7d
DB
4755
4756 /* Program types with direct read + write access go here! */
36bbef52
DB
4757 case BPF_PROG_TYPE_SCHED_CLS:
4758 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 4759 case BPF_PROG_TYPE_XDP:
3a0af8fd 4760 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 4761 case BPF_PROG_TYPE_SK_SKB:
4f738adb 4762 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
4763 if (meta)
4764 return meta->pkt_access;
4765
4766 env->seen_direct_write = true;
4acf6c0b 4767 return true;
0d01da6a
SF
4768
4769 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4770 if (t == BPF_WRITE)
4771 env->seen_direct_write = true;
4772
4773 return true;
4774
4acf6c0b
BB
4775 default:
4776 return false;
4777 }
4778}
4779
f1174f77 4780static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 4781 int size, bool zero_size_allowed)
f1174f77 4782{
638f5b90 4783 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
4784 struct bpf_reg_state *reg = &regs[regno];
4785 int err;
4786
4787 /* We may have added a variable offset to the packet pointer; but any
4788 * reg->range we have comes after that. We are only checking the fixed
4789 * offset.
4790 */
4791
4792 /* We don't allow negative numbers, because we aren't tracking enough
4793 * detail to prove they're safe.
4794 */
b03c9f9f 4795 if (reg->smin_value < 0) {
61bd5218 4796 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
4797 regno);
4798 return -EACCES;
4799 }
6d94e741
AS
4800
4801 err = reg->range < 0 ? -EINVAL :
4802 __check_mem_access(env, regno, off, size, reg->range,
457f4436 4803 zero_size_allowed);
f1174f77 4804 if (err) {
61bd5218 4805 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
4806 return err;
4807 }
e647815a 4808
457f4436 4809 /* __check_mem_access has made sure "off + size - 1" is within u16.
e647815a
JW
4810 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
4811 * otherwise find_good_pkt_pointers would have refused to set range info
457f4436 4812 * that __check_mem_access would have rejected this pkt access.
e647815a
JW
4813 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
4814 */
4815 env->prog->aux->max_pkt_offset =
4816 max_t(u32, env->prog->aux->max_pkt_offset,
4817 off + reg->umax_value + size - 1);
4818
f1174f77
EC
4819 return err;
4820}
4821
4822/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 4823static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66 4824 enum bpf_access_type t, enum bpf_reg_type *reg_type,
22dc4a0f 4825 struct btf **btf, u32 *btf_id)
17a52670 4826{
f96da094
DB
4827 struct bpf_insn_access_aux info = {
4828 .reg_type = *reg_type,
9e15db66 4829 .log = &env->log,
f96da094 4830 };
31fd8581 4831
4f9218aa 4832 if (env->ops->is_valid_access &&
5e43f899 4833 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
4834 /* A non zero info.ctx_field_size indicates that this field is a
4835 * candidate for later verifier transformation to load the whole
4836 * field and then apply a mask when accessed with a narrower
4837 * access than actual ctx access size. A zero info.ctx_field_size
4838 * will only allow for whole field access and rejects any other
4839 * type of narrower access.
31fd8581 4840 */
23994631 4841 *reg_type = info.reg_type;
31fd8581 4842
c25b2ae1 4843 if (base_type(*reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 4844 *btf = info.btf;
9e15db66 4845 *btf_id = info.btf_id;
22dc4a0f 4846 } else {
9e15db66 4847 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
22dc4a0f 4848 }
32bbe007
AS
4849 /* remember the offset of last byte accessed in ctx */
4850 if (env->prog->aux->max_ctx_offset < off + size)
4851 env->prog->aux->max_ctx_offset = off + size;
17a52670 4852 return 0;
32bbe007 4853 }
17a52670 4854
61bd5218 4855 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
4856 return -EACCES;
4857}
4858
d58e468b
PP
4859static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
4860 int size)
4861{
4862 if (size < 0 || off < 0 ||
4863 (u64)off + size > sizeof(struct bpf_flow_keys)) {
4864 verbose(env, "invalid access to flow keys off=%d size=%d\n",
4865 off, size);
4866 return -EACCES;
4867 }
4868 return 0;
4869}
4870
5f456649
MKL
4871static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
4872 u32 regno, int off, int size,
4873 enum bpf_access_type t)
c64b7983
JS
4874{
4875 struct bpf_reg_state *regs = cur_regs(env);
4876 struct bpf_reg_state *reg = &regs[regno];
5f456649 4877 struct bpf_insn_access_aux info = {};
46f8bc92 4878 bool valid;
c64b7983
JS
4879
4880 if (reg->smin_value < 0) {
4881 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
4882 regno);
4883 return -EACCES;
4884 }
4885
46f8bc92
MKL
4886 switch (reg->type) {
4887 case PTR_TO_SOCK_COMMON:
4888 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4889 break;
4890 case PTR_TO_SOCKET:
4891 valid = bpf_sock_is_valid_access(off, size, t, &info);
4892 break;
655a51e5
MKL
4893 case PTR_TO_TCP_SOCK:
4894 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4895 break;
fada7fdc
JL
4896 case PTR_TO_XDP_SOCK:
4897 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4898 break;
46f8bc92
MKL
4899 default:
4900 valid = false;
c64b7983
JS
4901 }
4902
5f456649 4903
46f8bc92
MKL
4904 if (valid) {
4905 env->insn_aux_data[insn_idx].ctx_field_size =
4906 info.ctx_field_size;
4907 return 0;
4908 }
4909
4910 verbose(env, "R%d invalid %s access off=%d size=%d\n",
c25b2ae1 4911 regno, reg_type_str(env, reg->type), off, size);
46f8bc92
MKL
4912
4913 return -EACCES;
c64b7983
JS
4914}
4915
4cabc5b1
DB
4916static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4917{
2a159c6f 4918 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
4919}
4920
f37a8cb8
DB
4921static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4922{
2a159c6f 4923 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 4924
46f8bc92
MKL
4925 return reg->type == PTR_TO_CTX;
4926}
4927
4928static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4929{
4930 const struct bpf_reg_state *reg = reg_state(env, regno);
4931
4932 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
4933}
4934
ca369602
DB
4935static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4936{
2a159c6f 4937 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
4938
4939 return type_is_pkt_pointer(reg->type);
4940}
4941
4b5defde
DB
4942static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4943{
4944 const struct bpf_reg_state *reg = reg_state(env, regno);
4945
4946 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4947 return reg->type == PTR_TO_FLOW_KEYS;
4948}
4949
9bb00b28
YS
4950static bool is_trusted_reg(const struct bpf_reg_state *reg)
4951{
4952 /* A referenced register is always trusted. */
4953 if (reg->ref_obj_id)
4954 return true;
4955
4956 /* If a register is not referenced, it is trusted if it has the
fca1aa75 4957 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the
9bb00b28
YS
4958 * other type modifiers may be safe, but we elect to take an opt-in
4959 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are
4960 * not.
4961 *
4962 * Eventually, we should make PTR_TRUSTED the single source of truth
4963 * for whether a register is trusted.
4964 */
4965 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS &&
4966 !bpf_type_has_unsafe_modifiers(reg->type);
4967}
4968
fca1aa75
YS
4969static bool is_rcu_reg(const struct bpf_reg_state *reg)
4970{
4971 return reg->type & MEM_RCU;
4972}
4973
61bd5218
JK
4974static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4975 const struct bpf_reg_state *reg,
d1174416 4976 int off, int size, bool strict)
969bf05e 4977{
f1174f77 4978 struct tnum reg_off;
e07b98d9 4979 int ip_align;
d1174416
DM
4980
4981 /* Byte size accesses are always allowed. */
4982 if (!strict || size == 1)
4983 return 0;
4984
e4eda884
DM
4985 /* For platforms that do not have a Kconfig enabling
4986 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4987 * NET_IP_ALIGN is universally set to '2'. And on platforms
4988 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4989 * to this code only in strict mode where we want to emulate
4990 * the NET_IP_ALIGN==2 checking. Therefore use an
4991 * unconditional IP align value of '2'.
e07b98d9 4992 */
e4eda884 4993 ip_align = 2;
f1174f77
EC
4994
4995 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4996 if (!tnum_is_aligned(reg_off, size)) {
4997 char tn_buf[48];
4998
4999 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
5000 verbose(env,
5001 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 5002 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
5003 return -EACCES;
5004 }
79adffcd 5005
969bf05e
AS
5006 return 0;
5007}
5008
61bd5218
JK
5009static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
5010 const struct bpf_reg_state *reg,
f1174f77
EC
5011 const char *pointer_desc,
5012 int off, int size, bool strict)
79adffcd 5013{
f1174f77
EC
5014 struct tnum reg_off;
5015
5016 /* Byte size accesses are always allowed. */
5017 if (!strict || size == 1)
5018 return 0;
5019
5020 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
5021 if (!tnum_is_aligned(reg_off, size)) {
5022 char tn_buf[48];
5023
5024 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 5025 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 5026 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
5027 return -EACCES;
5028 }
5029
969bf05e
AS
5030 return 0;
5031}
5032
e07b98d9 5033static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
5034 const struct bpf_reg_state *reg, int off,
5035 int size, bool strict_alignment_once)
79adffcd 5036{
ca369602 5037 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 5038 const char *pointer_desc = "";
d1174416 5039
79adffcd
DB
5040 switch (reg->type) {
5041 case PTR_TO_PACKET:
de8f3a83
DB
5042 case PTR_TO_PACKET_META:
5043 /* Special case, because of NET_IP_ALIGN. Given metadata sits
5044 * right in front, treat it the very same way.
5045 */
61bd5218 5046 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
5047 case PTR_TO_FLOW_KEYS:
5048 pointer_desc = "flow keys ";
5049 break;
69c087ba
YS
5050 case PTR_TO_MAP_KEY:
5051 pointer_desc = "key ";
5052 break;
f1174f77
EC
5053 case PTR_TO_MAP_VALUE:
5054 pointer_desc = "value ";
5055 break;
5056 case PTR_TO_CTX:
5057 pointer_desc = "context ";
5058 break;
5059 case PTR_TO_STACK:
5060 pointer_desc = "stack ";
01f810ac
AM
5061 /* The stack spill tracking logic in check_stack_write_fixed_off()
5062 * and check_stack_read_fixed_off() relies on stack accesses being
a5ec6ae1
JH
5063 * aligned.
5064 */
5065 strict = true;
f1174f77 5066 break;
c64b7983
JS
5067 case PTR_TO_SOCKET:
5068 pointer_desc = "sock ";
5069 break;
46f8bc92
MKL
5070 case PTR_TO_SOCK_COMMON:
5071 pointer_desc = "sock_common ";
5072 break;
655a51e5
MKL
5073 case PTR_TO_TCP_SOCK:
5074 pointer_desc = "tcp_sock ";
5075 break;
fada7fdc
JL
5076 case PTR_TO_XDP_SOCK:
5077 pointer_desc = "xdp_sock ";
5078 break;
79adffcd 5079 default:
f1174f77 5080 break;
79adffcd 5081 }
61bd5218
JK
5082 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
5083 strict);
79adffcd
DB
5084}
5085
f4d7e40a
AS
5086static int update_stack_depth(struct bpf_verifier_env *env,
5087 const struct bpf_func_state *func,
5088 int off)
5089{
9c8105bd 5090 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
5091
5092 if (stack >= -off)
5093 return 0;
5094
5095 /* update known max for given subprogram */
9c8105bd 5096 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
5097 return 0;
5098}
f4d7e40a 5099
70a87ffe
AS
5100/* starting from main bpf function walk all instructions of the function
5101 * and recursively walk all callees that given function can call.
5102 * Ignore jump and exit insns.
5103 * Since recursion is prevented by check_cfg() this algorithm
5104 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
5105 */
5106static int check_max_stack_depth(struct bpf_verifier_env *env)
5107{
9c8105bd
JW
5108 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
5109 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 5110 struct bpf_insn *insn = env->prog->insnsi;
ebf7d1f5 5111 bool tail_call_reachable = false;
70a87ffe
AS
5112 int ret_insn[MAX_CALL_FRAMES];
5113 int ret_prog[MAX_CALL_FRAMES];
ebf7d1f5 5114 int j;
f4d7e40a 5115
70a87ffe 5116process_func:
7f6e4312
MF
5117 /* protect against potential stack overflow that might happen when
5118 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
5119 * depth for such case down to 256 so that the worst case scenario
5120 * would result in 8k stack size (32 which is tailcall limit * 256 =
5121 * 8k).
5122 *
5123 * To get the idea what might happen, see an example:
5124 * func1 -> sub rsp, 128
5125 * subfunc1 -> sub rsp, 256
5126 * tailcall1 -> add rsp, 256
5127 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
5128 * subfunc2 -> sub rsp, 64
5129 * subfunc22 -> sub rsp, 128
5130 * tailcall2 -> add rsp, 128
5131 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
5132 *
5133 * tailcall will unwind the current stack frame but it will not get rid
5134 * of caller's stack as shown on the example above.
5135 */
5136 if (idx && subprog[idx].has_tail_call && depth >= 256) {
5137 verbose(env,
5138 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
5139 depth);
5140 return -EACCES;
5141 }
70a87ffe
AS
5142 /* round up to 32-bytes, since this is granularity
5143 * of interpreter stack size
5144 */
9c8105bd 5145 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 5146 if (depth > MAX_BPF_STACK) {
f4d7e40a 5147 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 5148 frame + 1, depth);
f4d7e40a
AS
5149 return -EACCES;
5150 }
70a87ffe 5151continue_func:
4cb3d99c 5152 subprog_end = subprog[idx + 1].start;
70a87ffe 5153 for (; i < subprog_end; i++) {
7ddc80a4
AS
5154 int next_insn;
5155
69c087ba 5156 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
70a87ffe
AS
5157 continue;
5158 /* remember insn and function to return to */
5159 ret_insn[frame] = i + 1;
9c8105bd 5160 ret_prog[frame] = idx;
70a87ffe
AS
5161
5162 /* find the callee */
7ddc80a4
AS
5163 next_insn = i + insn[i].imm + 1;
5164 idx = find_subprog(env, next_insn);
9c8105bd 5165 if (idx < 0) {
70a87ffe 5166 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
7ddc80a4 5167 next_insn);
70a87ffe
AS
5168 return -EFAULT;
5169 }
7ddc80a4
AS
5170 if (subprog[idx].is_async_cb) {
5171 if (subprog[idx].has_tail_call) {
5172 verbose(env, "verifier bug. subprog has tail_call and async cb\n");
5173 return -EFAULT;
5174 }
5175 /* async callbacks don't increase bpf prog stack size */
5176 continue;
5177 }
5178 i = next_insn;
ebf7d1f5
MF
5179
5180 if (subprog[idx].has_tail_call)
5181 tail_call_reachable = true;
5182
70a87ffe
AS
5183 frame++;
5184 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
5185 verbose(env, "the call stack of %d frames is too deep !\n",
5186 frame);
5187 return -E2BIG;
70a87ffe
AS
5188 }
5189 goto process_func;
5190 }
ebf7d1f5
MF
5191 /* if tail call got detected across bpf2bpf calls then mark each of the
5192 * currently present subprog frames as tail call reachable subprogs;
5193 * this info will be utilized by JIT so that we will be preserving the
5194 * tail call counter throughout bpf2bpf calls combined with tailcalls
5195 */
5196 if (tail_call_reachable)
5197 for (j = 0; j < frame; j++)
5198 subprog[ret_prog[j]].tail_call_reachable = true;
5dd0a6b8
DB
5199 if (subprog[0].tail_call_reachable)
5200 env->prog->aux->tail_call_reachable = true;
ebf7d1f5 5201
70a87ffe
AS
5202 /* end of for() loop means the last insn of the 'subprog'
5203 * was reached. Doesn't matter whether it was JA or EXIT
5204 */
5205 if (frame == 0)
5206 return 0;
9c8105bd 5207 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
5208 frame--;
5209 i = ret_insn[frame];
9c8105bd 5210 idx = ret_prog[frame];
70a87ffe 5211 goto continue_func;
f4d7e40a
AS
5212}
5213
19d28fbd 5214#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
5215static int get_callee_stack_depth(struct bpf_verifier_env *env,
5216 const struct bpf_insn *insn, int idx)
5217{
5218 int start = idx + insn->imm + 1, subprog;
5219
5220 subprog = find_subprog(env, start);
5221 if (subprog < 0) {
5222 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5223 start);
5224 return -EFAULT;
5225 }
9c8105bd 5226 return env->subprog_info[subprog].stack_depth;
1ea47e01 5227}
19d28fbd 5228#endif
1ea47e01 5229
afbf21dc
YS
5230static int __check_buffer_access(struct bpf_verifier_env *env,
5231 const char *buf_info,
5232 const struct bpf_reg_state *reg,
5233 int regno, int off, int size)
9df1c28b
MM
5234{
5235 if (off < 0) {
5236 verbose(env,
4fc00b79 5237 "R%d invalid %s buffer access: off=%d, size=%d\n",
afbf21dc 5238 regno, buf_info, off, size);
9df1c28b
MM
5239 return -EACCES;
5240 }
5241 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5242 char tn_buf[48];
5243
5244 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5245 verbose(env,
4fc00b79 5246 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
9df1c28b
MM
5247 regno, off, tn_buf);
5248 return -EACCES;
5249 }
afbf21dc
YS
5250
5251 return 0;
5252}
5253
5254static int check_tp_buffer_access(struct bpf_verifier_env *env,
5255 const struct bpf_reg_state *reg,
5256 int regno, int off, int size)
5257{
5258 int err;
5259
5260 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
5261 if (err)
5262 return err;
5263
9df1c28b
MM
5264 if (off + size > env->prog->aux->max_tp_access)
5265 env->prog->aux->max_tp_access = off + size;
5266
5267 return 0;
5268}
5269
afbf21dc
YS
5270static int check_buffer_access(struct bpf_verifier_env *env,
5271 const struct bpf_reg_state *reg,
5272 int regno, int off, int size,
5273 bool zero_size_allowed,
afbf21dc
YS
5274 u32 *max_access)
5275{
44e9a741 5276 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
afbf21dc
YS
5277 int err;
5278
5279 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
5280 if (err)
5281 return err;
5282
5283 if (off + size > *max_access)
5284 *max_access = off + size;
5285
5286 return 0;
5287}
5288
3f50f132
JF
5289/* BPF architecture zero extends alu32 ops into 64-bit registesr */
5290static void zext_32_to_64(struct bpf_reg_state *reg)
5291{
5292 reg->var_off = tnum_subreg(reg->var_off);
5293 __reg_assign_32_into_64(reg);
5294}
9df1c28b 5295
0c17d1d2
JH
5296/* truncate register to smaller size (in bytes)
5297 * must be called with size < BPF_REG_SIZE
5298 */
5299static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
5300{
5301 u64 mask;
5302
5303 /* clear high bits in bit representation */
5304 reg->var_off = tnum_cast(reg->var_off, size);
5305
5306 /* fix arithmetic bounds */
5307 mask = ((u64)1 << (size * 8)) - 1;
5308 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
5309 reg->umin_value &= mask;
5310 reg->umax_value &= mask;
5311 } else {
5312 reg->umin_value = 0;
5313 reg->umax_value = mask;
5314 }
5315 reg->smin_value = reg->umin_value;
5316 reg->smax_value = reg->umax_value;
3f50f132
JF
5317
5318 /* If size is smaller than 32bit register the 32bit register
5319 * values are also truncated so we push 64-bit bounds into
5320 * 32-bit bounds. Above were truncated < 32-bits already.
5321 */
5322 if (size >= 4)
5323 return;
5324 __reg_combine_64_into_32(reg);
0c17d1d2
JH
5325}
5326
a23740ec
AN
5327static bool bpf_map_is_rdonly(const struct bpf_map *map)
5328{
353050be
DB
5329 /* A map is considered read-only if the following condition are true:
5330 *
5331 * 1) BPF program side cannot change any of the map content. The
5332 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
5333 * and was set at map creation time.
5334 * 2) The map value(s) have been initialized from user space by a
5335 * loader and then "frozen", such that no new map update/delete
5336 * operations from syscall side are possible for the rest of
5337 * the map's lifetime from that point onwards.
5338 * 3) Any parallel/pending map update/delete operations from syscall
5339 * side have been completed. Only after that point, it's safe to
5340 * assume that map value(s) are immutable.
5341 */
5342 return (map->map_flags & BPF_F_RDONLY_PROG) &&
5343 READ_ONCE(map->frozen) &&
5344 !bpf_map_write_active(map);
a23740ec
AN
5345}
5346
5347static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
5348{
5349 void *ptr;
5350 u64 addr;
5351 int err;
5352
5353 err = map->ops->map_direct_value_addr(map, &addr, off);
5354 if (err)
5355 return err;
2dedd7d2 5356 ptr = (void *)(long)addr + off;
a23740ec
AN
5357
5358 switch (size) {
5359 case sizeof(u8):
5360 *val = (u64)*(u8 *)ptr;
5361 break;
5362 case sizeof(u16):
5363 *val = (u64)*(u16 *)ptr;
5364 break;
5365 case sizeof(u32):
5366 *val = (u64)*(u32 *)ptr;
5367 break;
5368 case sizeof(u64):
5369 *val = *(u64 *)ptr;
5370 break;
5371 default:
5372 return -EINVAL;
5373 }
5374 return 0;
5375}
5376
6fcd486b
AS
5377#define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
5378#define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
57539b1c 5379
6fcd486b
AS
5380/*
5381 * Allow list few fields as RCU trusted or full trusted.
5382 * This logic doesn't allow mix tagging and will be removed once GCC supports
5383 * btf_type_tag.
5384 */
5385
5386/* RCU trusted: these fields are trusted in RCU CS and never NULL */
5387BTF_TYPE_SAFE_RCU(struct task_struct) {
57539b1c 5388 const cpumask_t *cpus_ptr;
8d093b4e 5389 struct css_set __rcu *cgroups;
6fcd486b
AS
5390 struct task_struct __rcu *real_parent;
5391 struct task_struct *group_leader;
8d093b4e
AS
5392};
5393
6fcd486b 5394BTF_TYPE_SAFE_RCU(struct css_set) {
8d093b4e 5395 struct cgroup *dfl_cgrp;
57539b1c
DV
5396};
5397
6fcd486b
AS
5398/* full trusted: these fields are trusted even outside of RCU CS and never NULL */
5399BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
5400 __bpf_md_ptr(struct seq_file *, seq);
5401};
5402
5403BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
5404 __bpf_md_ptr(struct bpf_iter_meta *, meta);
5405 __bpf_md_ptr(struct task_struct *, task);
5406};
5407
5408BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
5409 struct file *file;
5410};
5411
5412BTF_TYPE_SAFE_TRUSTED(struct file) {
5413 struct inode *f_inode;
5414};
5415
5416BTF_TYPE_SAFE_TRUSTED(struct dentry) {
5417 /* no negative dentry-s in places where bpf can see it */
5418 struct inode *d_inode;
5419};
5420
5421BTF_TYPE_SAFE_TRUSTED(struct socket) {
5422 struct sock *sk;
5423};
5424
5425static bool type_is_rcu(struct bpf_verifier_env *env,
5426 struct bpf_reg_state *reg,
5427 int off)
57539b1c 5428{
6fcd486b
AS
5429 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
5430 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
57539b1c 5431
6fcd486b
AS
5432 return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_rcu");
5433}
57539b1c 5434
6fcd486b
AS
5435static bool type_is_trusted(struct bpf_verifier_env *env,
5436 struct bpf_reg_state *reg,
5437 int off)
5438{
5439 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
5440 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
5441 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
5442 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
5443 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
5444 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
5445
5446 return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_trusted");
57539b1c
DV
5447}
5448
9e15db66
AS
5449static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
5450 struct bpf_reg_state *regs,
5451 int regno, int off, int size,
5452 enum bpf_access_type atype,
5453 int value_regno)
5454{
5455 struct bpf_reg_state *reg = regs + regno;
22dc4a0f
AN
5456 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
5457 const char *tname = btf_name_by_offset(reg->btf, t->name_off);
c6f1bfe8 5458 enum bpf_type_flag flag = 0;
9e15db66
AS
5459 u32 btf_id;
5460 int ret;
5461
c67cae55
AS
5462 if (!env->allow_ptr_leaks) {
5463 verbose(env,
5464 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
5465 tname);
5466 return -EPERM;
5467 }
5468 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) {
5469 verbose(env,
5470 "Cannot access kernel 'struct %s' from non-GPL compatible program\n",
5471 tname);
5472 return -EINVAL;
5473 }
9e15db66
AS
5474 if (off < 0) {
5475 verbose(env,
5476 "R%d is ptr_%s invalid negative access: off=%d\n",
5477 regno, tname, off);
5478 return -EACCES;
5479 }
5480 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
5481 char tn_buf[48];
5482
5483 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5484 verbose(env,
5485 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
5486 regno, tname, off, tn_buf);
5487 return -EACCES;
5488 }
5489
c6f1bfe8
YS
5490 if (reg->type & MEM_USER) {
5491 verbose(env,
5492 "R%d is ptr_%s access user memory: off=%d\n",
5493 regno, tname, off);
5494 return -EACCES;
5495 }
5496
5844101a
HL
5497 if (reg->type & MEM_PERCPU) {
5498 verbose(env,
5499 "R%d is ptr_%s access percpu memory: off=%d\n",
5500 regno, tname, off);
5501 return -EACCES;
5502 }
5503
282de143
KKD
5504 if (env->ops->btf_struct_access && !type_is_alloc(reg->type)) {
5505 if (!btf_is_kernel(reg->btf)) {
5506 verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
5507 return -EFAULT;
5508 }
6728aea7 5509 ret = env->ops->btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
27ae7997 5510 } else {
282de143
KKD
5511 /* Writes are permitted with default btf_struct_access for
5512 * program allocated objects (which always have ref_obj_id > 0),
5513 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC.
5514 */
5515 if (atype != BPF_READ && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
27ae7997
MKL
5516 verbose(env, "only read is supported\n");
5517 return -EACCES;
5518 }
5519
6a3cd331
DM
5520 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) &&
5521 !reg->ref_obj_id) {
282de143
KKD
5522 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n");
5523 return -EFAULT;
5524 }
5525
6728aea7 5526 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
27ae7997
MKL
5527 }
5528
9e15db66
AS
5529 if (ret < 0)
5530 return ret;
5531
6fcd486b
AS
5532 if (ret != PTR_TO_BTF_ID) {
5533 /* just mark; */
6efe152d 5534
6fcd486b
AS
5535 } else if (type_flag(reg->type) & PTR_UNTRUSTED) {
5536 /* If this is an untrusted pointer, all pointers formed by walking it
5537 * also inherit the untrusted flag.
5538 */
5539 flag = PTR_UNTRUSTED;
5540
5541 } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) {
5542 /* By default any pointer obtained from walking a trusted pointer is no
5543 * longer trusted, unless the field being accessed has explicitly been
5544 * marked as inheriting its parent's state of trust (either full or RCU).
5545 * For example:
5546 * 'cgroups' pointer is untrusted if task->cgroups dereference
5547 * happened in a sleepable program outside of bpf_rcu_read_lock()
5548 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU).
5549 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED.
5550 *
5551 * A regular RCU-protected pointer with __rcu tag can also be deemed
5552 * trusted if we are in an RCU CS. Such pointer can be NULL.
20c09d92 5553 */
6fcd486b
AS
5554 if (type_is_trusted(env, reg, off)) {
5555 flag |= PTR_TRUSTED;
5556 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
5557 if (type_is_rcu(env, reg, off)) {
5558 /* ignore __rcu tag and mark it MEM_RCU */
5559 flag |= MEM_RCU;
5560 } else if (flag & MEM_RCU) {
5561 /* __rcu tagged pointers can be NULL */
5562 flag |= PTR_MAYBE_NULL;
5563 } else if (flag & (MEM_PERCPU | MEM_USER)) {
5564 /* keep as-is */
5565 } else {
5566 /* walking unknown pointers yields untrusted pointer */
5567 flag = PTR_UNTRUSTED;
5568 }
5569 } else {
5570 /*
5571 * If not in RCU CS or MEM_RCU pointer can be NULL then
5572 * aggressively mark as untrusted otherwise such
5573 * pointers will be plain PTR_TO_BTF_ID without flags
5574 * and will be allowed to be passed into helpers for
5575 * compat reasons.
5576 */
5577 flag = PTR_UNTRUSTED;
5578 }
20c09d92 5579 } else {
6fcd486b 5580 /* Old compat. Deprecated */
57539b1c 5581 flag &= ~PTR_TRUSTED;
20c09d92 5582 }
3f00c523 5583
41c48f3a 5584 if (atype == BPF_READ && value_regno >= 0)
c6f1bfe8 5585 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
41c48f3a
AI
5586
5587 return 0;
5588}
5589
5590static int check_ptr_to_map_access(struct bpf_verifier_env *env,
5591 struct bpf_reg_state *regs,
5592 int regno, int off, int size,
5593 enum bpf_access_type atype,
5594 int value_regno)
5595{
5596 struct bpf_reg_state *reg = regs + regno;
5597 struct bpf_map *map = reg->map_ptr;
6728aea7 5598 struct bpf_reg_state map_reg;
c6f1bfe8 5599 enum bpf_type_flag flag = 0;
41c48f3a
AI
5600 const struct btf_type *t;
5601 const char *tname;
5602 u32 btf_id;
5603 int ret;
5604
5605 if (!btf_vmlinux) {
5606 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
5607 return -ENOTSUPP;
5608 }
5609
5610 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
5611 verbose(env, "map_ptr access not supported for map type %d\n",
5612 map->map_type);
5613 return -ENOTSUPP;
5614 }
5615
5616 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
5617 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
5618
c67cae55 5619 if (!env->allow_ptr_leaks) {
41c48f3a 5620 verbose(env,
c67cae55 5621 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
41c48f3a
AI
5622 tname);
5623 return -EPERM;
9e15db66 5624 }
27ae7997 5625
41c48f3a
AI
5626 if (off < 0) {
5627 verbose(env, "R%d is %s invalid negative access: off=%d\n",
5628 regno, tname, off);
5629 return -EACCES;
5630 }
5631
5632 if (atype != BPF_READ) {
5633 verbose(env, "only read from %s is supported\n", tname);
5634 return -EACCES;
5635 }
5636
6728aea7
KKD
5637 /* Simulate access to a PTR_TO_BTF_ID */
5638 memset(&map_reg, 0, sizeof(map_reg));
5639 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
5640 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag);
41c48f3a
AI
5641 if (ret < 0)
5642 return ret;
5643
5644 if (value_regno >= 0)
c6f1bfe8 5645 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
41c48f3a 5646
9e15db66
AS
5647 return 0;
5648}
5649
01f810ac
AM
5650/* Check that the stack access at the given offset is within bounds. The
5651 * maximum valid offset is -1.
5652 *
5653 * The minimum valid offset is -MAX_BPF_STACK for writes, and
5654 * -state->allocated_stack for reads.
5655 */
5656static int check_stack_slot_within_bounds(int off,
5657 struct bpf_func_state *state,
5658 enum bpf_access_type t)
5659{
5660 int min_valid_off;
5661
5662 if (t == BPF_WRITE)
5663 min_valid_off = -MAX_BPF_STACK;
5664 else
5665 min_valid_off = -state->allocated_stack;
5666
5667 if (off < min_valid_off || off > -1)
5668 return -EACCES;
5669 return 0;
5670}
5671
5672/* Check that the stack access at 'regno + off' falls within the maximum stack
5673 * bounds.
5674 *
5675 * 'off' includes `regno->offset`, but not its dynamic part (if any).
5676 */
5677static int check_stack_access_within_bounds(
5678 struct bpf_verifier_env *env,
5679 int regno, int off, int access_size,
61df10c7 5680 enum bpf_access_src src, enum bpf_access_type type)
01f810ac
AM
5681{
5682 struct bpf_reg_state *regs = cur_regs(env);
5683 struct bpf_reg_state *reg = regs + regno;
5684 struct bpf_func_state *state = func(env, reg);
5685 int min_off, max_off;
5686 int err;
5687 char *err_extra;
5688
5689 if (src == ACCESS_HELPER)
5690 /* We don't know if helpers are reading or writing (or both). */
5691 err_extra = " indirect access to";
5692 else if (type == BPF_READ)
5693 err_extra = " read from";
5694 else
5695 err_extra = " write to";
5696
5697 if (tnum_is_const(reg->var_off)) {
5698 min_off = reg->var_off.value + off;
5699 if (access_size > 0)
5700 max_off = min_off + access_size - 1;
5701 else
5702 max_off = min_off;
5703 } else {
5704 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
5705 reg->smin_value <= -BPF_MAX_VAR_OFF) {
5706 verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
5707 err_extra, regno);
5708 return -EACCES;
5709 }
5710 min_off = reg->smin_value + off;
5711 if (access_size > 0)
5712 max_off = reg->smax_value + off + access_size - 1;
5713 else
5714 max_off = min_off;
5715 }
5716
5717 err = check_stack_slot_within_bounds(min_off, state, type);
5718 if (!err)
5719 err = check_stack_slot_within_bounds(max_off, state, type);
5720
5721 if (err) {
5722 if (tnum_is_const(reg->var_off)) {
5723 verbose(env, "invalid%s stack R%d off=%d size=%d\n",
5724 err_extra, regno, off, access_size);
5725 } else {
5726 char tn_buf[48];
5727
5728 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
5729 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
5730 err_extra, regno, tn_buf, access_size);
5731 }
5732 }
5733 return err;
5734}
41c48f3a 5735
17a52670
AS
5736/* check whether memory at (regno + off) is accessible for t = (read | write)
5737 * if t==write, value_regno is a register which value is stored into memory
5738 * if t==read, value_regno is a register which will receive the value from memory
5739 * if t==write && value_regno==-1, some unknown value is stored into memory
5740 * if t==read && value_regno==-1, don't care what we read from memory
5741 */
ca369602
DB
5742static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
5743 int off, int bpf_size, enum bpf_access_type t,
5744 int value_regno, bool strict_alignment_once)
17a52670 5745{
638f5b90
AS
5746 struct bpf_reg_state *regs = cur_regs(env);
5747 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 5748 struct bpf_func_state *state;
17a52670
AS
5749 int size, err = 0;
5750
5751 size = bpf_size_to_bytes(bpf_size);
5752 if (size < 0)
5753 return size;
5754
f1174f77 5755 /* alignment checks will add in reg->off themselves */
ca369602 5756 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
5757 if (err)
5758 return err;
17a52670 5759
f1174f77
EC
5760 /* for access checks, reg->off is just part of off */
5761 off += reg->off;
5762
69c087ba
YS
5763 if (reg->type == PTR_TO_MAP_KEY) {
5764 if (t == BPF_WRITE) {
5765 verbose(env, "write to change key R%d not allowed\n", regno);
5766 return -EACCES;
5767 }
5768
5769 err = check_mem_region_access(env, regno, off, size,
5770 reg->map_ptr->key_size, false);
5771 if (err)
5772 return err;
5773 if (value_regno >= 0)
5774 mark_reg_unknown(env, regs, value_regno);
5775 } else if (reg->type == PTR_TO_MAP_VALUE) {
aa3496ac 5776 struct btf_field *kptr_field = NULL;
61df10c7 5777
1be7f75d
AS
5778 if (t == BPF_WRITE && value_regno >= 0 &&
5779 is_pointer_value(env, value_regno)) {
61bd5218 5780 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
5781 return -EACCES;
5782 }
591fe988
DB
5783 err = check_map_access_type(env, regno, off, size, t);
5784 if (err)
5785 return err;
61df10c7
KKD
5786 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
5787 if (err)
5788 return err;
5789 if (tnum_is_const(reg->var_off))
aa3496ac
KKD
5790 kptr_field = btf_record_find(reg->map_ptr->record,
5791 off + reg->var_off.value, BPF_KPTR);
5792 if (kptr_field) {
5793 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field);
61df10c7 5794 } else if (t == BPF_READ && value_regno >= 0) {
a23740ec
AN
5795 struct bpf_map *map = reg->map_ptr;
5796
5797 /* if map is read-only, track its contents as scalars */
5798 if (tnum_is_const(reg->var_off) &&
5799 bpf_map_is_rdonly(map) &&
5800 map->ops->map_direct_value_addr) {
5801 int map_off = off + reg->var_off.value;
5802 u64 val = 0;
5803
5804 err = bpf_map_direct_read(map, map_off, size,
5805 &val);
5806 if (err)
5807 return err;
5808
5809 regs[value_regno].type = SCALAR_VALUE;
5810 __mark_reg_known(&regs[value_regno], val);
5811 } else {
5812 mark_reg_unknown(env, regs, value_regno);
5813 }
5814 }
34d3a78c
HL
5815 } else if (base_type(reg->type) == PTR_TO_MEM) {
5816 bool rdonly_mem = type_is_rdonly_mem(reg->type);
5817
5818 if (type_may_be_null(reg->type)) {
5819 verbose(env, "R%d invalid mem access '%s'\n", regno,
5820 reg_type_str(env, reg->type));
5821 return -EACCES;
5822 }
5823
5824 if (t == BPF_WRITE && rdonly_mem) {
5825 verbose(env, "R%d cannot write into %s\n",
5826 regno, reg_type_str(env, reg->type));
5827 return -EACCES;
5828 }
5829
457f4436
AN
5830 if (t == BPF_WRITE && value_regno >= 0 &&
5831 is_pointer_value(env, value_regno)) {
5832 verbose(env, "R%d leaks addr into mem\n", value_regno);
5833 return -EACCES;
5834 }
34d3a78c 5835
457f4436
AN
5836 err = check_mem_region_access(env, regno, off, size,
5837 reg->mem_size, false);
34d3a78c 5838 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
457f4436 5839 mark_reg_unknown(env, regs, value_regno);
1a0dc1ac 5840 } else if (reg->type == PTR_TO_CTX) {
f1174f77 5841 enum bpf_reg_type reg_type = SCALAR_VALUE;
22dc4a0f 5842 struct btf *btf = NULL;
9e15db66 5843 u32 btf_id = 0;
19de99f7 5844
1be7f75d
AS
5845 if (t == BPF_WRITE && value_regno >= 0 &&
5846 is_pointer_value(env, value_regno)) {
61bd5218 5847 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
5848 return -EACCES;
5849 }
f1174f77 5850
be80a1d3 5851 err = check_ptr_off_reg(env, reg, regno);
58990d1f
DB
5852 if (err < 0)
5853 return err;
5854
c6f1bfe8
YS
5855 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
5856 &btf_id);
9e15db66
AS
5857 if (err)
5858 verbose_linfo(env, insn_idx, "; ");
969bf05e 5859 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 5860 /* ctx access returns either a scalar, or a
de8f3a83
DB
5861 * PTR_TO_PACKET[_META,_END]. In the latter
5862 * case, we know the offset is zero.
f1174f77 5863 */
46f8bc92 5864 if (reg_type == SCALAR_VALUE) {
638f5b90 5865 mark_reg_unknown(env, regs, value_regno);
46f8bc92 5866 } else {
638f5b90 5867 mark_reg_known_zero(env, regs,
61bd5218 5868 value_regno);
c25b2ae1 5869 if (type_may_be_null(reg_type))
46f8bc92 5870 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
5871 /* A load of ctx field could have different
5872 * actual load size with the one encoded in the
5873 * insn. When the dst is PTR, it is for sure not
5874 * a sub-register.
5875 */
5876 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
c25b2ae1 5877 if (base_type(reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 5878 regs[value_regno].btf = btf;
9e15db66 5879 regs[value_regno].btf_id = btf_id;
22dc4a0f 5880 }
46f8bc92 5881 }
638f5b90 5882 regs[value_regno].type = reg_type;
969bf05e 5883 }
17a52670 5884
f1174f77 5885 } else if (reg->type == PTR_TO_STACK) {
01f810ac
AM
5886 /* Basic bounds checks. */
5887 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
e4298d25
DB
5888 if (err)
5889 return err;
8726679a 5890
f4d7e40a
AS
5891 state = func(env, reg);
5892 err = update_stack_depth(env, state, off);
5893 if (err)
5894 return err;
8726679a 5895
01f810ac
AM
5896 if (t == BPF_READ)
5897 err = check_stack_read(env, regno, off, size,
61bd5218 5898 value_regno);
01f810ac
AM
5899 else
5900 err = check_stack_write(env, regno, off, size,
5901 value_regno, insn_idx);
de8f3a83 5902 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 5903 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 5904 verbose(env, "cannot write into packet\n");
969bf05e
AS
5905 return -EACCES;
5906 }
4acf6c0b
BB
5907 if (t == BPF_WRITE && value_regno >= 0 &&
5908 is_pointer_value(env, value_regno)) {
61bd5218
JK
5909 verbose(env, "R%d leaks addr into packet\n",
5910 value_regno);
4acf6c0b
BB
5911 return -EACCES;
5912 }
9fd29c08 5913 err = check_packet_access(env, regno, off, size, false);
969bf05e 5914 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 5915 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
5916 } else if (reg->type == PTR_TO_FLOW_KEYS) {
5917 if (t == BPF_WRITE && value_regno >= 0 &&
5918 is_pointer_value(env, value_regno)) {
5919 verbose(env, "R%d leaks addr into flow keys\n",
5920 value_regno);
5921 return -EACCES;
5922 }
5923
5924 err = check_flow_keys_access(env, off, size);
5925 if (!err && t == BPF_READ && value_regno >= 0)
5926 mark_reg_unknown(env, regs, value_regno);
46f8bc92 5927 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 5928 if (t == BPF_WRITE) {
46f8bc92 5929 verbose(env, "R%d cannot write into %s\n",
c25b2ae1 5930 regno, reg_type_str(env, reg->type));
c64b7983
JS
5931 return -EACCES;
5932 }
5f456649 5933 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
5934 if (!err && value_regno >= 0)
5935 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
5936 } else if (reg->type == PTR_TO_TP_BUFFER) {
5937 err = check_tp_buffer_access(env, reg, regno, off, size);
5938 if (!err && t == BPF_READ && value_regno >= 0)
5939 mark_reg_unknown(env, regs, value_regno);
bff61f6f
HL
5940 } else if (base_type(reg->type) == PTR_TO_BTF_ID &&
5941 !type_may_be_null(reg->type)) {
9e15db66
AS
5942 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
5943 value_regno);
41c48f3a
AI
5944 } else if (reg->type == CONST_PTR_TO_MAP) {
5945 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
5946 value_regno);
20b2aff4
HL
5947 } else if (base_type(reg->type) == PTR_TO_BUF) {
5948 bool rdonly_mem = type_is_rdonly_mem(reg->type);
20b2aff4
HL
5949 u32 *max_access;
5950
5951 if (rdonly_mem) {
5952 if (t == BPF_WRITE) {
5953 verbose(env, "R%d cannot write into %s\n",
5954 regno, reg_type_str(env, reg->type));
5955 return -EACCES;
5956 }
20b2aff4
HL
5957 max_access = &env->prog->aux->max_rdonly_access;
5958 } else {
20b2aff4 5959 max_access = &env->prog->aux->max_rdwr_access;
afbf21dc 5960 }
20b2aff4 5961
f6dfbe31 5962 err = check_buffer_access(env, reg, regno, off, size, false,
44e9a741 5963 max_access);
20b2aff4
HL
5964
5965 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
afbf21dc 5966 mark_reg_unknown(env, regs, value_regno);
17a52670 5967 } else {
61bd5218 5968 verbose(env, "R%d invalid mem access '%s'\n", regno,
c25b2ae1 5969 reg_type_str(env, reg->type));
17a52670
AS
5970 return -EACCES;
5971 }
969bf05e 5972
f1174f77 5973 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 5974 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 5975 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 5976 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 5977 }
17a52670
AS
5978 return err;
5979}
5980
91c960b0 5981static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 5982{
5ffa2550 5983 int load_reg;
17a52670
AS
5984 int err;
5985
5ca419f2
BJ
5986 switch (insn->imm) {
5987 case BPF_ADD:
5988 case BPF_ADD | BPF_FETCH:
981f94c3
BJ
5989 case BPF_AND:
5990 case BPF_AND | BPF_FETCH:
5991 case BPF_OR:
5992 case BPF_OR | BPF_FETCH:
5993 case BPF_XOR:
5994 case BPF_XOR | BPF_FETCH:
5ffa2550
BJ
5995 case BPF_XCHG:
5996 case BPF_CMPXCHG:
5ca419f2
BJ
5997 break;
5998 default:
91c960b0
BJ
5999 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
6000 return -EINVAL;
6001 }
6002
6003 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
6004 verbose(env, "invalid atomic operand size\n");
17a52670
AS
6005 return -EINVAL;
6006 }
6007
6008 /* check src1 operand */
dc503a8a 6009 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6010 if (err)
6011 return err;
6012
6013 /* check src2 operand */
dc503a8a 6014 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6015 if (err)
6016 return err;
6017
5ffa2550
BJ
6018 if (insn->imm == BPF_CMPXCHG) {
6019 /* Check comparison of R0 with memory location */
a82fe085
DB
6020 const u32 aux_reg = BPF_REG_0;
6021
6022 err = check_reg_arg(env, aux_reg, SRC_OP);
5ffa2550
BJ
6023 if (err)
6024 return err;
a82fe085
DB
6025
6026 if (is_pointer_value(env, aux_reg)) {
6027 verbose(env, "R%d leaks addr into mem\n", aux_reg);
6028 return -EACCES;
6029 }
5ffa2550
BJ
6030 }
6031
6bdf6abc 6032 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6033 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
6034 return -EACCES;
6035 }
6036
ca369602 6037 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 6038 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
6039 is_flow_key_reg(env, insn->dst_reg) ||
6040 is_sk_reg(env, insn->dst_reg)) {
91c960b0 6041 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
2a159c6f 6042 insn->dst_reg,
c25b2ae1 6043 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
f37a8cb8
DB
6044 return -EACCES;
6045 }
6046
37086bfd
BJ
6047 if (insn->imm & BPF_FETCH) {
6048 if (insn->imm == BPF_CMPXCHG)
6049 load_reg = BPF_REG_0;
6050 else
6051 load_reg = insn->src_reg;
6052
6053 /* check and record load of old value */
6054 err = check_reg_arg(env, load_reg, DST_OP);
6055 if (err)
6056 return err;
6057 } else {
6058 /* This instruction accesses a memory location but doesn't
6059 * actually load it into a register.
6060 */
6061 load_reg = -1;
6062 }
6063
7d3baf0a
DB
6064 /* Check whether we can read the memory, with second call for fetch
6065 * case to simulate the register fill.
6066 */
31fd8581 6067 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7d3baf0a
DB
6068 BPF_SIZE(insn->code), BPF_READ, -1, true);
6069 if (!err && load_reg >= 0)
6070 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6071 BPF_SIZE(insn->code), BPF_READ, load_reg,
6072 true);
17a52670
AS
6073 if (err)
6074 return err;
6075
7d3baf0a 6076 /* Check whether we can write into the same memory. */
5ca419f2
BJ
6077 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
6078 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
6079 if (err)
6080 return err;
6081
5ca419f2 6082 return 0;
17a52670
AS
6083}
6084
01f810ac
AM
6085/* When register 'regno' is used to read the stack (either directly or through
6086 * a helper function) make sure that it's within stack boundary and, depending
6087 * on the access type, that all elements of the stack are initialized.
6088 *
6089 * 'off' includes 'regno->off', but not its dynamic part (if any).
6090 *
6091 * All registers that have been spilled on the stack in the slots within the
6092 * read offsets are marked as read.
6093 */
6094static int check_stack_range_initialized(
6095 struct bpf_verifier_env *env, int regno, int off,
6096 int access_size, bool zero_size_allowed,
61df10c7 6097 enum bpf_access_src type, struct bpf_call_arg_meta *meta)
2011fccf
AI
6098{
6099 struct bpf_reg_state *reg = reg_state(env, regno);
01f810ac
AM
6100 struct bpf_func_state *state = func(env, reg);
6101 int err, min_off, max_off, i, j, slot, spi;
6102 char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
6103 enum bpf_access_type bounds_check_type;
6104 /* Some accesses can write anything into the stack, others are
6105 * read-only.
6106 */
6107 bool clobber = false;
2011fccf 6108
01f810ac
AM
6109 if (access_size == 0 && !zero_size_allowed) {
6110 verbose(env, "invalid zero-sized read\n");
2011fccf
AI
6111 return -EACCES;
6112 }
2011fccf 6113
01f810ac
AM
6114 if (type == ACCESS_HELPER) {
6115 /* The bounds checks for writes are more permissive than for
6116 * reads. However, if raw_mode is not set, we'll do extra
6117 * checks below.
6118 */
6119 bounds_check_type = BPF_WRITE;
6120 clobber = true;
6121 } else {
6122 bounds_check_type = BPF_READ;
6123 }
6124 err = check_stack_access_within_bounds(env, regno, off, access_size,
6125 type, bounds_check_type);
6126 if (err)
6127 return err;
6128
17a52670 6129
2011fccf 6130 if (tnum_is_const(reg->var_off)) {
01f810ac 6131 min_off = max_off = reg->var_off.value + off;
2011fccf 6132 } else {
088ec26d
AI
6133 /* Variable offset is prohibited for unprivileged mode for
6134 * simplicity since it requires corresponding support in
6135 * Spectre masking for stack ALU.
6136 * See also retrieve_ptr_limit().
6137 */
2c78ee89 6138 if (!env->bypass_spec_v1) {
088ec26d 6139 char tn_buf[48];
f1174f77 6140
088ec26d 6141 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
6142 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
6143 regno, err_extra, tn_buf);
088ec26d
AI
6144 return -EACCES;
6145 }
f2bcd05e
AI
6146 /* Only initialized buffer on stack is allowed to be accessed
6147 * with variable offset. With uninitialized buffer it's hard to
6148 * guarantee that whole memory is marked as initialized on
6149 * helper return since specific bounds are unknown what may
6150 * cause uninitialized stack leaking.
6151 */
6152 if (meta && meta->raw_mode)
6153 meta = NULL;
6154
01f810ac
AM
6155 min_off = reg->smin_value + off;
6156 max_off = reg->smax_value + off;
17a52670
AS
6157 }
6158
435faee1 6159 if (meta && meta->raw_mode) {
ef8fc7a0
KKD
6160 /* Ensure we won't be overwriting dynptrs when simulating byte
6161 * by byte access in check_helper_call using meta.access_size.
6162 * This would be a problem if we have a helper in the future
6163 * which takes:
6164 *
6165 * helper(uninit_mem, len, dynptr)
6166 *
6167 * Now, uninint_mem may overlap with dynptr pointer. Hence, it
6168 * may end up writing to dynptr itself when touching memory from
6169 * arg 1. This can be relaxed on a case by case basis for known
6170 * safe cases, but reject due to the possibilitiy of aliasing by
6171 * default.
6172 */
6173 for (i = min_off; i < max_off + access_size; i++) {
6174 int stack_off = -i - 1;
6175
6176 spi = __get_spi(i);
6177 /* raw_mode may write past allocated_stack */
6178 if (state->allocated_stack <= stack_off)
6179 continue;
6180 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
6181 verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
6182 return -EACCES;
6183 }
6184 }
435faee1
DB
6185 meta->access_size = access_size;
6186 meta->regno = regno;
6187 return 0;
6188 }
6189
2011fccf 6190 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
6191 u8 *stype;
6192
2011fccf 6193 slot = -i - 1;
638f5b90 6194 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
6195 if (state->allocated_stack <= slot)
6196 goto err;
6197 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
6198 if (*stype == STACK_MISC)
6199 goto mark;
6715df8d
EZ
6200 if ((*stype == STACK_ZERO) ||
6201 (*stype == STACK_INVALID && env->allow_uninit_stack)) {
01f810ac
AM
6202 if (clobber) {
6203 /* helper can write anything into the stack */
6204 *stype = STACK_MISC;
6205 }
cc2b14d5 6206 goto mark;
17a52670 6207 }
1d68f22b 6208
27113c59 6209 if (is_spilled_reg(&state->stack[spi]) &&
cd17d38f
YS
6210 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
6211 env->allow_ptr_leaks)) {
01f810ac
AM
6212 if (clobber) {
6213 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
6214 for (j = 0; j < BPF_REG_SIZE; j++)
354e8f19 6215 scrub_spilled_slot(&state->stack[spi].slot_type[j]);
01f810ac 6216 }
f7cf25b2
AS
6217 goto mark;
6218 }
6219
cc2b14d5 6220err:
2011fccf 6221 if (tnum_is_const(reg->var_off)) {
01f810ac
AM
6222 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
6223 err_extra, regno, min_off, i - min_off, access_size);
2011fccf
AI
6224 } else {
6225 char tn_buf[48];
6226
6227 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
6228 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
6229 err_extra, regno, tn_buf, i - min_off, access_size);
2011fccf 6230 }
cc2b14d5
AS
6231 return -EACCES;
6232mark:
6233 /* reading any byte out of 8-byte 'spill_slot' will cause
6234 * the whole slot to be marked as 'read'
6235 */
679c782d 6236 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
6237 state->stack[spi].spilled_ptr.parent,
6238 REG_LIVE_READ64);
261f4664
KKD
6239 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not
6240 * be sure that whether stack slot is written to or not. Hence,
6241 * we must still conservatively propagate reads upwards even if
6242 * helper may write to the entire memory range.
6243 */
17a52670 6244 }
2011fccf 6245 return update_stack_depth(env, state, min_off);
17a52670
AS
6246}
6247
06c1c049
GB
6248static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
6249 int access_size, bool zero_size_allowed,
6250 struct bpf_call_arg_meta *meta)
6251{
638f5b90 6252 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
20b2aff4 6253 u32 *max_access;
06c1c049 6254
20b2aff4 6255 switch (base_type(reg->type)) {
06c1c049 6256 case PTR_TO_PACKET:
de8f3a83 6257 case PTR_TO_PACKET_META:
9fd29c08
YS
6258 return check_packet_access(env, regno, reg->off, access_size,
6259 zero_size_allowed);
69c087ba 6260 case PTR_TO_MAP_KEY:
7b3552d3
KKD
6261 if (meta && meta->raw_mode) {
6262 verbose(env, "R%d cannot write into %s\n", regno,
6263 reg_type_str(env, reg->type));
6264 return -EACCES;
6265 }
69c087ba
YS
6266 return check_mem_region_access(env, regno, reg->off, access_size,
6267 reg->map_ptr->key_size, false);
06c1c049 6268 case PTR_TO_MAP_VALUE:
591fe988
DB
6269 if (check_map_access_type(env, regno, reg->off, access_size,
6270 meta && meta->raw_mode ? BPF_WRITE :
6271 BPF_READ))
6272 return -EACCES;
9fd29c08 6273 return check_map_access(env, regno, reg->off, access_size,
61df10c7 6274 zero_size_allowed, ACCESS_HELPER);
457f4436 6275 case PTR_TO_MEM:
97e6d7da
KKD
6276 if (type_is_rdonly_mem(reg->type)) {
6277 if (meta && meta->raw_mode) {
6278 verbose(env, "R%d cannot write into %s\n", regno,
6279 reg_type_str(env, reg->type));
6280 return -EACCES;
6281 }
6282 }
457f4436
AN
6283 return check_mem_region_access(env, regno, reg->off,
6284 access_size, reg->mem_size,
6285 zero_size_allowed);
20b2aff4
HL
6286 case PTR_TO_BUF:
6287 if (type_is_rdonly_mem(reg->type)) {
97e6d7da
KKD
6288 if (meta && meta->raw_mode) {
6289 verbose(env, "R%d cannot write into %s\n", regno,
6290 reg_type_str(env, reg->type));
20b2aff4 6291 return -EACCES;
97e6d7da 6292 }
20b2aff4 6293
20b2aff4
HL
6294 max_access = &env->prog->aux->max_rdonly_access;
6295 } else {
20b2aff4
HL
6296 max_access = &env->prog->aux->max_rdwr_access;
6297 }
afbf21dc
YS
6298 return check_buffer_access(env, reg, regno, reg->off,
6299 access_size, zero_size_allowed,
44e9a741 6300 max_access);
0d004c02 6301 case PTR_TO_STACK:
01f810ac
AM
6302 return check_stack_range_initialized(
6303 env,
6304 regno, reg->off, access_size,
6305 zero_size_allowed, ACCESS_HELPER, meta);
15baa55f
BT
6306 case PTR_TO_CTX:
6307 /* in case the function doesn't know how to access the context,
6308 * (because we are in a program of type SYSCALL for example), we
6309 * can not statically check its size.
6310 * Dynamically check it now.
6311 */
6312 if (!env->ops->convert_ctx_access) {
6313 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
6314 int offset = access_size - 1;
6315
6316 /* Allow zero-byte read from PTR_TO_CTX */
6317 if (access_size == 0)
6318 return zero_size_allowed ? 0 : -EACCES;
6319
6320 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
6321 atype, -1, false);
6322 }
6323
6324 fallthrough;
0d004c02
LB
6325 default: /* scalar_value or invalid ptr */
6326 /* Allow zero-byte read from NULL, regardless of pointer type */
6327 if (zero_size_allowed && access_size == 0 &&
6328 register_is_null(reg))
6329 return 0;
6330
c25b2ae1
HL
6331 verbose(env, "R%d type=%s ", regno,
6332 reg_type_str(env, reg->type));
6333 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
0d004c02 6334 return -EACCES;
06c1c049
GB
6335 }
6336}
6337
d583691c
KKD
6338static int check_mem_size_reg(struct bpf_verifier_env *env,
6339 struct bpf_reg_state *reg, u32 regno,
6340 bool zero_size_allowed,
6341 struct bpf_call_arg_meta *meta)
6342{
6343 int err;
6344
6345 /* This is used to refine r0 return value bounds for helpers
6346 * that enforce this value as an upper bound on return values.
6347 * See do_refine_retval_range() for helpers that can refine
6348 * the return value. C type of helper is u32 so we pull register
6349 * bound from umax_value however, if negative verifier errors
6350 * out. Only upper bounds can be learned because retval is an
6351 * int type and negative retvals are allowed.
6352 */
be77354a 6353 meta->msize_max_value = reg->umax_value;
d583691c
KKD
6354
6355 /* The register is SCALAR_VALUE; the access check
6356 * happens using its boundaries.
6357 */
6358 if (!tnum_is_const(reg->var_off))
6359 /* For unprivileged variable accesses, disable raw
6360 * mode so that the program is required to
6361 * initialize all the memory that the helper could
6362 * just partially fill up.
6363 */
6364 meta = NULL;
6365
6366 if (reg->smin_value < 0) {
6367 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
6368 regno);
6369 return -EACCES;
6370 }
6371
6372 if (reg->umin_value == 0) {
6373 err = check_helper_mem_access(env, regno - 1, 0,
6374 zero_size_allowed,
6375 meta);
6376 if (err)
6377 return err;
6378 }
6379
6380 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
6381 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
6382 regno);
6383 return -EACCES;
6384 }
6385 err = check_helper_mem_access(env, regno - 1,
6386 reg->umax_value,
6387 zero_size_allowed, meta);
6388 if (!err)
6389 err = mark_chain_precision(env, regno);
6390 return err;
6391}
6392
e5069b9c
DB
6393int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
6394 u32 regno, u32 mem_size)
6395{
be77354a
KKD
6396 bool may_be_null = type_may_be_null(reg->type);
6397 struct bpf_reg_state saved_reg;
6398 struct bpf_call_arg_meta meta;
6399 int err;
6400
e5069b9c
DB
6401 if (register_is_null(reg))
6402 return 0;
6403
be77354a
KKD
6404 memset(&meta, 0, sizeof(meta));
6405 /* Assuming that the register contains a value check if the memory
6406 * access is safe. Temporarily save and restore the register's state as
6407 * the conversion shouldn't be visible to a caller.
6408 */
6409 if (may_be_null) {
6410 saved_reg = *reg;
e5069b9c 6411 mark_ptr_not_null_reg(reg);
e5069b9c
DB
6412 }
6413
be77354a
KKD
6414 err = check_helper_mem_access(env, regno, mem_size, true, &meta);
6415 /* Check access for BPF_WRITE */
6416 meta.raw_mode = true;
6417 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
6418
6419 if (may_be_null)
6420 *reg = saved_reg;
6421
6422 return err;
e5069b9c
DB
6423}
6424
00b85860
KKD
6425static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
6426 u32 regno)
d583691c
KKD
6427{
6428 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
6429 bool may_be_null = type_may_be_null(mem_reg->type);
6430 struct bpf_reg_state saved_reg;
be77354a 6431 struct bpf_call_arg_meta meta;
d583691c
KKD
6432 int err;
6433
6434 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
6435
be77354a
KKD
6436 memset(&meta, 0, sizeof(meta));
6437
d583691c
KKD
6438 if (may_be_null) {
6439 saved_reg = *mem_reg;
6440 mark_ptr_not_null_reg(mem_reg);
6441 }
6442
be77354a
KKD
6443 err = check_mem_size_reg(env, reg, regno, true, &meta);
6444 /* Check access for BPF_WRITE */
6445 meta.raw_mode = true;
6446 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
d583691c
KKD
6447
6448 if (may_be_null)
6449 *mem_reg = saved_reg;
6450 return err;
6451}
6452
d83525ca 6453/* Implementation details:
4e814da0
KKD
6454 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
6455 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
d83525ca 6456 * Two bpf_map_lookups (even with the same key) will have different reg->id.
4e814da0
KKD
6457 * Two separate bpf_obj_new will also have different reg->id.
6458 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
6459 * clears reg->id after value_or_null->value transition, since the verifier only
6460 * cares about the range of access to valid map value pointer and doesn't care
6461 * about actual address of the map element.
d83525ca
AS
6462 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
6463 * reg->id > 0 after value_or_null->value transition. By doing so
6464 * two bpf_map_lookups will be considered two different pointers that
4e814da0
KKD
6465 * point to different bpf_spin_locks. Likewise for pointers to allocated objects
6466 * returned from bpf_obj_new.
d83525ca
AS
6467 * The verifier allows taking only one bpf_spin_lock at a time to avoid
6468 * dead-locks.
6469 * Since only one bpf_spin_lock is allowed the checks are simpler than
6470 * reg_is_refcounted() logic. The verifier needs to remember only
6471 * one spin_lock instead of array of acquired_refs.
d0d78c1d 6472 * cur_state->active_lock remembers which map value element or allocated
4e814da0 6473 * object got locked and clears it after bpf_spin_unlock.
d83525ca
AS
6474 */
6475static int process_spin_lock(struct bpf_verifier_env *env, int regno,
6476 bool is_lock)
6477{
6478 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6479 struct bpf_verifier_state *cur = env->cur_state;
6480 bool is_const = tnum_is_const(reg->var_off);
d83525ca 6481 u64 val = reg->var_off.value;
4e814da0
KKD
6482 struct bpf_map *map = NULL;
6483 struct btf *btf = NULL;
6484 struct btf_record *rec;
d83525ca 6485
d83525ca
AS
6486 if (!is_const) {
6487 verbose(env,
6488 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
6489 regno);
6490 return -EINVAL;
6491 }
4e814da0
KKD
6492 if (reg->type == PTR_TO_MAP_VALUE) {
6493 map = reg->map_ptr;
6494 if (!map->btf) {
6495 verbose(env,
6496 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
6497 map->name);
6498 return -EINVAL;
6499 }
6500 } else {
6501 btf = reg->btf;
d83525ca 6502 }
4e814da0
KKD
6503
6504 rec = reg_btf_record(reg);
6505 if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) {
6506 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local",
6507 map ? map->name : "kptr");
d83525ca
AS
6508 return -EINVAL;
6509 }
4e814da0 6510 if (rec->spin_lock_off != val + reg->off) {
db559117 6511 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n",
4e814da0 6512 val + reg->off, rec->spin_lock_off);
d83525ca
AS
6513 return -EINVAL;
6514 }
6515 if (is_lock) {
d0d78c1d 6516 if (cur->active_lock.ptr) {
d83525ca
AS
6517 verbose(env,
6518 "Locking two bpf_spin_locks are not allowed\n");
6519 return -EINVAL;
6520 }
d0d78c1d
KKD
6521 if (map)
6522 cur->active_lock.ptr = map;
6523 else
6524 cur->active_lock.ptr = btf;
6525 cur->active_lock.id = reg->id;
d83525ca 6526 } else {
d0d78c1d
KKD
6527 void *ptr;
6528
6529 if (map)
6530 ptr = map;
6531 else
6532 ptr = btf;
6533
6534 if (!cur->active_lock.ptr) {
d83525ca
AS
6535 verbose(env, "bpf_spin_unlock without taking a lock\n");
6536 return -EINVAL;
6537 }
d0d78c1d
KKD
6538 if (cur->active_lock.ptr != ptr ||
6539 cur->active_lock.id != reg->id) {
d83525ca
AS
6540 verbose(env, "bpf_spin_unlock of different lock\n");
6541 return -EINVAL;
6542 }
534e86bc 6543
6a3cd331 6544 invalidate_non_owning_refs(env);
534e86bc 6545
6a3cd331
DM
6546 cur->active_lock.ptr = NULL;
6547 cur->active_lock.id = 0;
d83525ca
AS
6548 }
6549 return 0;
6550}
6551
b00628b1
AS
6552static int process_timer_func(struct bpf_verifier_env *env, int regno,
6553 struct bpf_call_arg_meta *meta)
6554{
6555 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6556 bool is_const = tnum_is_const(reg->var_off);
6557 struct bpf_map *map = reg->map_ptr;
6558 u64 val = reg->var_off.value;
6559
6560 if (!is_const) {
6561 verbose(env,
6562 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
6563 regno);
6564 return -EINVAL;
6565 }
6566 if (!map->btf) {
6567 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
6568 map->name);
6569 return -EINVAL;
6570 }
db559117
KKD
6571 if (!btf_record_has_field(map->record, BPF_TIMER)) {
6572 verbose(env, "map '%s' has no valid bpf_timer\n", map->name);
68134668
AS
6573 return -EINVAL;
6574 }
db559117 6575 if (map->record->timer_off != val + reg->off) {
68134668 6576 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
db559117 6577 val + reg->off, map->record->timer_off);
b00628b1
AS
6578 return -EINVAL;
6579 }
6580 if (meta->map_ptr) {
6581 verbose(env, "verifier bug. Two map pointers in a timer helper\n");
6582 return -EFAULT;
6583 }
3e8ce298 6584 meta->map_uid = reg->map_uid;
b00628b1
AS
6585 meta->map_ptr = map;
6586 return 0;
6587}
6588
c0a5a21c
KKD
6589static int process_kptr_func(struct bpf_verifier_env *env, int regno,
6590 struct bpf_call_arg_meta *meta)
6591{
6592 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
c0a5a21c 6593 struct bpf_map *map_ptr = reg->map_ptr;
aa3496ac 6594 struct btf_field *kptr_field;
c0a5a21c 6595 u32 kptr_off;
c0a5a21c
KKD
6596
6597 if (!tnum_is_const(reg->var_off)) {
6598 verbose(env,
6599 "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
6600 regno);
6601 return -EINVAL;
6602 }
6603 if (!map_ptr->btf) {
6604 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
6605 map_ptr->name);
6606 return -EINVAL;
6607 }
aa3496ac
KKD
6608 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) {
6609 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
c0a5a21c
KKD
6610 return -EINVAL;
6611 }
6612
6613 meta->map_ptr = map_ptr;
6614 kptr_off = reg->off + reg->var_off.value;
aa3496ac
KKD
6615 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR);
6616 if (!kptr_field) {
c0a5a21c
KKD
6617 verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
6618 return -EACCES;
6619 }
aa3496ac 6620 if (kptr_field->type != BPF_KPTR_REF) {
c0a5a21c
KKD
6621 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
6622 return -EACCES;
6623 }
aa3496ac 6624 meta->kptr_field = kptr_field;
c0a5a21c
KKD
6625 return 0;
6626}
6627
27060531
KKD
6628/* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK
6629 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR.
6630 *
6631 * In both cases we deal with the first 8 bytes, but need to mark the next 8
6632 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of
6633 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object.
6634 *
6635 * Mutability of bpf_dynptr is at two levels, one is at the level of struct
6636 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct
6637 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can
6638 * mutate the view of the dynptr and also possibly destroy it. In the latter
6639 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the
6640 * memory that dynptr points to.
6641 *
6642 * The verifier will keep track both levels of mutation (bpf_dynptr's in
6643 * reg->type and the memory's in reg->dynptr.type), but there is no support for
6644 * readonly dynptr view yet, hence only the first case is tracked and checked.
6645 *
6646 * This is consistent with how C applies the const modifier to a struct object,
6647 * where the pointer itself inside bpf_dynptr becomes const but not what it
6648 * points to.
6649 *
6650 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument
6651 * type, and declare it as 'const struct bpf_dynptr *' in their prototype.
6652 */
1d18feb2
JK
6653static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx,
6654 enum bpf_arg_type arg_type)
6b75bd3d
KKD
6655{
6656 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
1d18feb2 6657 int err;
6b75bd3d 6658
27060531
KKD
6659 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an
6660 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*):
6661 */
6662 if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) {
6663 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n");
6664 return -EFAULT;
6665 }
79168a66 6666
27060531
KKD
6667 /* MEM_UNINIT - Points to memory that is an appropriate candidate for
6668 * constructing a mutable bpf_dynptr object.
6669 *
6670 * Currently, this is only possible with PTR_TO_STACK
6671 * pointing to a region of at least 16 bytes which doesn't
6672 * contain an existing bpf_dynptr.
6673 *
6674 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be
6675 * mutated or destroyed. However, the memory it points to
6676 * may be mutated.
6677 *
6678 * None - Points to a initialized dynptr that can be mutated and
6679 * destroyed, including mutation of the memory it points
6680 * to.
6b75bd3d 6681 */
6b75bd3d 6682 if (arg_type & MEM_UNINIT) {
1d18feb2
JK
6683 int i;
6684
7e0dac28 6685 if (!is_dynptr_reg_valid_uninit(env, reg)) {
6b75bd3d
KKD
6686 verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6687 return -EINVAL;
6688 }
6689
1d18feb2
JK
6690 /* we write BPF_DW bits (8 bytes) at a time */
6691 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
6692 err = check_mem_access(env, insn_idx, regno,
6693 i, BPF_DW, BPF_WRITE, -1, false);
6694 if (err)
6695 return err;
6b75bd3d
KKD
6696 }
6697
1d18feb2 6698 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx);
27060531
KKD
6699 } else /* MEM_RDONLY and None case from above */ {
6700 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */
6701 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) {
6702 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n");
6703 return -EINVAL;
6704 }
6705
7e0dac28 6706 if (!is_dynptr_reg_valid_init(env, reg)) {
6b75bd3d
KKD
6707 verbose(env,
6708 "Expected an initialized dynptr as arg #%d\n",
6709 regno);
6710 return -EINVAL;
6711 }
6712
27060531
KKD
6713 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */
6714 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) {
6b75bd3d
KKD
6715 verbose(env,
6716 "Expected a dynptr of type %s as arg #%d\n",
d54e0f6c 6717 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno);
6b75bd3d
KKD
6718 return -EINVAL;
6719 }
d6fefa11
KKD
6720
6721 err = mark_dynptr_read(env, reg);
6b75bd3d 6722 }
1d18feb2 6723 return err;
6b75bd3d
KKD
6724}
6725
06accc87
AN
6726static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi)
6727{
6728 struct bpf_func_state *state = func(env, reg);
6729
6730 return state->stack[spi].spilled_ptr.ref_obj_id;
6731}
6732
6733static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6734{
6735 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
6736}
6737
6738static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6739{
6740 return meta->kfunc_flags & KF_ITER_NEW;
6741}
6742
6743static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6744{
6745 return meta->kfunc_flags & KF_ITER_NEXT;
6746}
6747
6748static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta)
6749{
6750 return meta->kfunc_flags & KF_ITER_DESTROY;
6751}
6752
6753static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg)
6754{
6755 /* btf_check_iter_kfuncs() guarantees that first argument of any iter
6756 * kfunc is iter state pointer
6757 */
6758 return arg == 0 && is_iter_kfunc(meta);
6759}
6760
6761static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx,
6762 struct bpf_kfunc_call_arg_meta *meta)
6763{
6764 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6765 const struct btf_type *t;
6766 const struct btf_param *arg;
6767 int spi, err, i, nr_slots;
6768 u32 btf_id;
6769
6770 /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */
6771 arg = &btf_params(meta->func_proto)[0];
6772 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */
6773 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */
6774 nr_slots = t->size / BPF_REG_SIZE;
6775
6776 spi = iter_get_spi(env, reg, nr_slots);
6777 if (spi < 0 && spi != -ERANGE)
6778 return spi;
6779
6780 meta->iter.spi = spi;
6781 meta->iter.frameno = reg->frameno;
6782
6783 if (is_iter_new_kfunc(meta)) {
6784 /* bpf_iter_<type>_new() expects pointer to uninit iter state */
6785 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) {
6786 verbose(env, "expected uninitialized iter_%s as arg #%d\n",
6787 iter_type_str(meta->btf, btf_id), regno);
6788 return -EINVAL;
6789 }
6790
6791 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) {
6792 err = check_mem_access(env, insn_idx, regno,
6793 i, BPF_DW, BPF_WRITE, -1, false);
6794 if (err)
6795 return err;
6796 }
6797
6798 err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots);
6799 if (err)
6800 return err;
6801 } else {
6802 /* iter_next() or iter_destroy() expect initialized iter state*/
6803 if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) {
6804 verbose(env, "expected an initialized iter_%s as arg #%d\n",
6805 iter_type_str(meta->btf, btf_id), regno);
6806 return -EINVAL;
6807 }
6808
6809 err = mark_iter_read(env, reg, spi, nr_slots);
6810 if (err)
6811 return err;
6812
6813 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi);
6814
6815 if (is_iter_destroy_kfunc(meta)) {
6816 err = unmark_stack_slots_iter(env, reg, nr_slots);
6817 if (err)
6818 return err;
6819 }
6820 }
6821
6822 return 0;
6823}
6824
6825/* process_iter_next_call() is called when verifier gets to iterator's next
6826 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer
6827 * to it as just "iter_next()" in comments below.
6828 *
6829 * BPF verifier relies on a crucial contract for any iter_next()
6830 * implementation: it should *eventually* return NULL, and once that happens
6831 * it should keep returning NULL. That is, once iterator exhausts elements to
6832 * iterate, it should never reset or spuriously return new elements.
6833 *
6834 * With the assumption of such contract, process_iter_next_call() simulates
6835 * a fork in the verifier state to validate loop logic correctness and safety
6836 * without having to simulate infinite amount of iterations.
6837 *
6838 * In current state, we first assume that iter_next() returned NULL and
6839 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such
6840 * conditions we should not form an infinite loop and should eventually reach
6841 * exit.
6842 *
6843 * Besides that, we also fork current state and enqueue it for later
6844 * verification. In a forked state we keep iterator state as ACTIVE
6845 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We
6846 * also bump iteration depth to prevent erroneous infinite loop detection
6847 * later on (see iter_active_depths_differ() comment for details). In this
6848 * state we assume that we'll eventually loop back to another iter_next()
6849 * calls (it could be in exactly same location or in some other instruction,
6850 * it doesn't matter, we don't make any unnecessary assumptions about this,
6851 * everything revolves around iterator state in a stack slot, not which
6852 * instruction is calling iter_next()). When that happens, we either will come
6853 * to iter_next() with equivalent state and can conclude that next iteration
6854 * will proceed in exactly the same way as we just verified, so it's safe to
6855 * assume that loop converges. If not, we'll go on another iteration
6856 * simulation with a different input state, until all possible starting states
6857 * are validated or we reach maximum number of instructions limit.
6858 *
6859 * This way, we will either exhaustively discover all possible input states
6860 * that iterator loop can start with and eventually will converge, or we'll
6861 * effectively regress into bounded loop simulation logic and either reach
6862 * maximum number of instructions if loop is not provably convergent, or there
6863 * is some statically known limit on number of iterations (e.g., if there is
6864 * an explicit `if n > 100 then break;` statement somewhere in the loop).
6865 *
6866 * One very subtle but very important aspect is that we *always* simulate NULL
6867 * condition first (as the current state) before we simulate non-NULL case.
6868 * This has to do with intricacies of scalar precision tracking. By simulating
6869 * "exit condition" of iter_next() returning NULL first, we make sure all the
6870 * relevant precision marks *that will be set **after** we exit iterator loop*
6871 * are propagated backwards to common parent state of NULL and non-NULL
6872 * branches. Thanks to that, state equivalence checks done later in forked
6873 * state, when reaching iter_next() for ACTIVE iterator, can assume that
6874 * precision marks are finalized and won't change. Because simulating another
6875 * ACTIVE iterator iteration won't change them (because given same input
6876 * states we'll end up with exactly same output states which we are currently
6877 * comparing; and verification after the loop already propagated back what
6878 * needs to be **additionally** tracked as precise). It's subtle, grok
6879 * precision tracking for more intuitive understanding.
6880 */
6881static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx,
6882 struct bpf_kfunc_call_arg_meta *meta)
6883{
6884 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st;
6885 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr;
6886 struct bpf_reg_state *cur_iter, *queued_iter;
6887 int iter_frameno = meta->iter.frameno;
6888 int iter_spi = meta->iter.spi;
6889
6890 BTF_TYPE_EMIT(struct bpf_iter);
6891
6892 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
6893
6894 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE &&
6895 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) {
6896 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n",
6897 cur_iter->iter.state, iter_state_str(cur_iter->iter.state));
6898 return -EFAULT;
6899 }
6900
6901 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) {
6902 /* branch out active iter state */
6903 queued_st = push_stack(env, insn_idx + 1, insn_idx, false);
6904 if (!queued_st)
6905 return -ENOMEM;
6906
6907 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr;
6908 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE;
6909 queued_iter->iter.depth++;
6910
6911 queued_fr = queued_st->frame[queued_st->curframe];
6912 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]);
6913 }
6914
6915 /* switch to DRAINED state, but keep the depth unchanged */
6916 /* mark current iter state as drained and assume returned NULL */
6917 cur_iter->iter.state = BPF_ITER_STATE_DRAINED;
6918 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]);
6919
6920 return 0;
6921}
6922
90133415
DB
6923static bool arg_type_is_mem_size(enum bpf_arg_type type)
6924{
6925 return type == ARG_CONST_SIZE ||
6926 type == ARG_CONST_SIZE_OR_ZERO;
6927}
6928
8f14852e
KKD
6929static bool arg_type_is_release(enum bpf_arg_type type)
6930{
6931 return type & OBJ_RELEASE;
6932}
6933
97e03f52
JK
6934static bool arg_type_is_dynptr(enum bpf_arg_type type)
6935{
6936 return base_type(type) == ARG_PTR_TO_DYNPTR;
6937}
6938
57c3bb72
AI
6939static int int_ptr_type_to_size(enum bpf_arg_type type)
6940{
6941 if (type == ARG_PTR_TO_INT)
6942 return sizeof(u32);
6943 else if (type == ARG_PTR_TO_LONG)
6944 return sizeof(u64);
6945
6946 return -EINVAL;
6947}
6948
912f442c
LB
6949static int resolve_map_arg_type(struct bpf_verifier_env *env,
6950 const struct bpf_call_arg_meta *meta,
6951 enum bpf_arg_type *arg_type)
6952{
6953 if (!meta->map_ptr) {
6954 /* kernel subsystem misconfigured verifier */
6955 verbose(env, "invalid map_ptr to access map->type\n");
6956 return -EACCES;
6957 }
6958
6959 switch (meta->map_ptr->map_type) {
6960 case BPF_MAP_TYPE_SOCKMAP:
6961 case BPF_MAP_TYPE_SOCKHASH:
6962 if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6550f2dd 6963 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
912f442c
LB
6964 } else {
6965 verbose(env, "invalid arg_type for sockmap/sockhash\n");
6966 return -EINVAL;
6967 }
6968 break;
9330986c
JK
6969 case BPF_MAP_TYPE_BLOOM_FILTER:
6970 if (meta->func_id == BPF_FUNC_map_peek_elem)
6971 *arg_type = ARG_PTR_TO_MAP_VALUE;
6972 break;
912f442c
LB
6973 default:
6974 break;
6975 }
6976 return 0;
6977}
6978
f79e7ea5
LB
6979struct bpf_reg_types {
6980 const enum bpf_reg_type types[10];
1df8f55a 6981 u32 *btf_id;
f79e7ea5
LB
6982};
6983
f79e7ea5
LB
6984static const struct bpf_reg_types sock_types = {
6985 .types = {
6986 PTR_TO_SOCK_COMMON,
6987 PTR_TO_SOCKET,
6988 PTR_TO_TCP_SOCK,
6989 PTR_TO_XDP_SOCK,
6990 },
6991};
6992
49a2a4d4 6993#ifdef CONFIG_NET
1df8f55a
MKL
6994static const struct bpf_reg_types btf_id_sock_common_types = {
6995 .types = {
6996 PTR_TO_SOCK_COMMON,
6997 PTR_TO_SOCKET,
6998 PTR_TO_TCP_SOCK,
6999 PTR_TO_XDP_SOCK,
7000 PTR_TO_BTF_ID,
3f00c523 7001 PTR_TO_BTF_ID | PTR_TRUSTED,
1df8f55a
MKL
7002 },
7003 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
7004};
49a2a4d4 7005#endif
1df8f55a 7006
f79e7ea5
LB
7007static const struct bpf_reg_types mem_types = {
7008 .types = {
7009 PTR_TO_STACK,
7010 PTR_TO_PACKET,
7011 PTR_TO_PACKET_META,
69c087ba 7012 PTR_TO_MAP_KEY,
f79e7ea5
LB
7013 PTR_TO_MAP_VALUE,
7014 PTR_TO_MEM,
894f2a8b 7015 PTR_TO_MEM | MEM_RINGBUF,
20b2aff4 7016 PTR_TO_BUF,
f79e7ea5
LB
7017 },
7018};
7019
7020static const struct bpf_reg_types int_ptr_types = {
7021 .types = {
7022 PTR_TO_STACK,
7023 PTR_TO_PACKET,
7024 PTR_TO_PACKET_META,
69c087ba 7025 PTR_TO_MAP_KEY,
f79e7ea5
LB
7026 PTR_TO_MAP_VALUE,
7027 },
7028};
7029
4e814da0
KKD
7030static const struct bpf_reg_types spin_lock_types = {
7031 .types = {
7032 PTR_TO_MAP_VALUE,
7033 PTR_TO_BTF_ID | MEM_ALLOC,
7034 }
7035};
7036
f79e7ea5
LB
7037static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
7038static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
7039static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
894f2a8b 7040static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
f79e7ea5 7041static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
3f00c523
DV
7042static const struct bpf_reg_types btf_ptr_types = {
7043 .types = {
7044 PTR_TO_BTF_ID,
7045 PTR_TO_BTF_ID | PTR_TRUSTED,
fca1aa75 7046 PTR_TO_BTF_ID | MEM_RCU,
3f00c523
DV
7047 },
7048};
7049static const struct bpf_reg_types percpu_btf_ptr_types = {
7050 .types = {
7051 PTR_TO_BTF_ID | MEM_PERCPU,
7052 PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED,
7053 }
7054};
69c087ba
YS
7055static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
7056static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
fff13c4b 7057static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
b00628b1 7058static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
c0a5a21c 7059static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
20571567
DV
7060static const struct bpf_reg_types dynptr_types = {
7061 .types = {
7062 PTR_TO_STACK,
27060531 7063 CONST_PTR_TO_DYNPTR,
20571567
DV
7064 }
7065};
f79e7ea5 7066
0789e13b 7067static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
d1673304
DM
7068 [ARG_PTR_TO_MAP_KEY] = &mem_types,
7069 [ARG_PTR_TO_MAP_VALUE] = &mem_types,
f79e7ea5
LB
7070 [ARG_CONST_SIZE] = &scalar_types,
7071 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
7072 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
7073 [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
7074 [ARG_PTR_TO_CTX] = &context_types,
f79e7ea5 7075 [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
49a2a4d4 7076#ifdef CONFIG_NET
1df8f55a 7077 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
49a2a4d4 7078#endif
f79e7ea5 7079 [ARG_PTR_TO_SOCKET] = &fullsock_types,
f79e7ea5
LB
7080 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
7081 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
7082 [ARG_PTR_TO_MEM] = &mem_types,
894f2a8b 7083 [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types,
f79e7ea5
LB
7084 [ARG_PTR_TO_INT] = &int_ptr_types,
7085 [ARG_PTR_TO_LONG] = &int_ptr_types,
eaa6bcb7 7086 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
69c087ba 7087 [ARG_PTR_TO_FUNC] = &func_ptr_types,
48946bd6 7088 [ARG_PTR_TO_STACK] = &stack_ptr_types,
fff13c4b 7089 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
b00628b1 7090 [ARG_PTR_TO_TIMER] = &timer_types,
c0a5a21c 7091 [ARG_PTR_TO_KPTR] = &kptr_types,
20571567 7092 [ARG_PTR_TO_DYNPTR] = &dynptr_types,
f79e7ea5
LB
7093};
7094
7095static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
a968d5e2 7096 enum bpf_arg_type arg_type,
c0a5a21c
KKD
7097 const u32 *arg_btf_id,
7098 struct bpf_call_arg_meta *meta)
f79e7ea5
LB
7099{
7100 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
7101 enum bpf_reg_type expected, type = reg->type;
a968d5e2 7102 const struct bpf_reg_types *compatible;
f79e7ea5
LB
7103 int i, j;
7104
48946bd6 7105 compatible = compatible_reg_types[base_type(arg_type)];
a968d5e2
MKL
7106 if (!compatible) {
7107 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
7108 return -EFAULT;
7109 }
7110
216e3cd2
HL
7111 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
7112 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
7113 *
7114 * Same for MAYBE_NULL:
7115 *
7116 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
7117 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
7118 *
7119 * Therefore we fold these flags depending on the arg_type before comparison.
7120 */
7121 if (arg_type & MEM_RDONLY)
7122 type &= ~MEM_RDONLY;
7123 if (arg_type & PTR_MAYBE_NULL)
7124 type &= ~PTR_MAYBE_NULL;
7125
738c96d5
DM
7126 if (meta->func_id == BPF_FUNC_kptr_xchg && type & MEM_ALLOC)
7127 type &= ~MEM_ALLOC;
7128
f79e7ea5
LB
7129 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
7130 expected = compatible->types[i];
7131 if (expected == NOT_INIT)
7132 break;
7133
7134 if (type == expected)
a968d5e2 7135 goto found;
f79e7ea5
LB
7136 }
7137
216e3cd2 7138 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
f79e7ea5 7139 for (j = 0; j + 1 < i; j++)
c25b2ae1
HL
7140 verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
7141 verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
f79e7ea5 7142 return -EACCES;
a968d5e2
MKL
7143
7144found:
da03e43a
KKD
7145 if (base_type(reg->type) != PTR_TO_BTF_ID)
7146 return 0;
7147
7148 switch ((int)reg->type) {
7149 case PTR_TO_BTF_ID:
7150 case PTR_TO_BTF_ID | PTR_TRUSTED:
7151 case PTR_TO_BTF_ID | MEM_RCU:
7152 {
2ab3b380
KKD
7153 /* For bpf_sk_release, it needs to match against first member
7154 * 'struct sock_common', hence make an exception for it. This
7155 * allows bpf_sk_release to work for multiple socket types.
7156 */
7157 bool strict_type_match = arg_type_is_release(arg_type) &&
7158 meta->func_id != BPF_FUNC_sk_release;
7159
1df8f55a
MKL
7160 if (!arg_btf_id) {
7161 if (!compatible->btf_id) {
7162 verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
7163 return -EFAULT;
7164 }
7165 arg_btf_id = compatible->btf_id;
7166 }
7167
c0a5a21c 7168 if (meta->func_id == BPF_FUNC_kptr_xchg) {
aa3496ac 7169 if (map_kptr_match_type(env, meta->kptr_field, reg, regno))
c0a5a21c 7170 return -EACCES;
47e34cb7
DM
7171 } else {
7172 if (arg_btf_id == BPF_PTR_POISON) {
7173 verbose(env, "verifier internal error:");
7174 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n",
7175 regno);
7176 return -EACCES;
7177 }
7178
7179 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
7180 btf_vmlinux, *arg_btf_id,
7181 strict_type_match)) {
7182 verbose(env, "R%d is of type %s but %s is expected\n",
b32a5dae
DM
7183 regno, btf_type_name(reg->btf, reg->btf_id),
7184 btf_type_name(btf_vmlinux, *arg_btf_id));
47e34cb7
DM
7185 return -EACCES;
7186 }
a968d5e2 7187 }
da03e43a
KKD
7188 break;
7189 }
7190 case PTR_TO_BTF_ID | MEM_ALLOC:
738c96d5
DM
7191 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
7192 meta->func_id != BPF_FUNC_kptr_xchg) {
4e814da0
KKD
7193 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n");
7194 return -EFAULT;
7195 }
da03e43a
KKD
7196 /* Handled by helper specific checks */
7197 break;
7198 case PTR_TO_BTF_ID | MEM_PERCPU:
7199 case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED:
7200 /* Handled by helper specific checks */
7201 break;
7202 default:
7203 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n");
7204 return -EFAULT;
a968d5e2 7205 }
a968d5e2 7206 return 0;
f79e7ea5
LB
7207}
7208
6a3cd331
DM
7209static struct btf_field *
7210reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields)
7211{
7212 struct btf_field *field;
7213 struct btf_record *rec;
7214
7215 rec = reg_btf_record(reg);
7216 if (!rec)
7217 return NULL;
7218
7219 field = btf_record_find(rec, off, fields);
7220 if (!field)
7221 return NULL;
7222
7223 return field;
7224}
7225
25b35dd2
KKD
7226int check_func_arg_reg_off(struct bpf_verifier_env *env,
7227 const struct bpf_reg_state *reg, int regno,
8f14852e 7228 enum bpf_arg_type arg_type)
25b35dd2 7229{
184c9bdb 7230 u32 type = reg->type;
25b35dd2 7231
184c9bdb
KKD
7232 /* When referenced register is passed to release function, its fixed
7233 * offset must be 0.
7234 *
7235 * We will check arg_type_is_release reg has ref_obj_id when storing
7236 * meta->release_regno.
7237 */
7238 if (arg_type_is_release(arg_type)) {
7239 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it
7240 * may not directly point to the object being released, but to
7241 * dynptr pointing to such object, which might be at some offset
7242 * on the stack. In that case, we simply to fallback to the
7243 * default handling.
7244 */
7245 if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK)
7246 return 0;
6a3cd331
DM
7247
7248 if ((type_is_ptr_alloc_obj(type) || type_is_non_owning_ref(type)) && reg->off) {
7249 if (reg_find_field_offset(reg, reg->off, BPF_GRAPH_NODE_OR_ROOT))
7250 return __check_ptr_off_reg(env, reg, regno, true);
7251
7252 verbose(env, "R%d must have zero offset when passed to release func\n",
7253 regno);
7254 verbose(env, "No graph node or root found at R%d type:%s off:%d\n", regno,
b32a5dae 7255 btf_type_name(reg->btf, reg->btf_id), reg->off);
6a3cd331
DM
7256 return -EINVAL;
7257 }
7258
184c9bdb
KKD
7259 /* Doing check_ptr_off_reg check for the offset will catch this
7260 * because fixed_off_ok is false, but checking here allows us
7261 * to give the user a better error message.
7262 */
7263 if (reg->off) {
7264 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n",
7265 regno);
7266 return -EINVAL;
7267 }
7268 return __check_ptr_off_reg(env, reg, regno, false);
7269 }
7270
7271 switch (type) {
7272 /* Pointer types where both fixed and variable offset is explicitly allowed: */
97e03f52 7273 case PTR_TO_STACK:
25b35dd2
KKD
7274 case PTR_TO_PACKET:
7275 case PTR_TO_PACKET_META:
7276 case PTR_TO_MAP_KEY:
7277 case PTR_TO_MAP_VALUE:
7278 case PTR_TO_MEM:
7279 case PTR_TO_MEM | MEM_RDONLY:
894f2a8b 7280 case PTR_TO_MEM | MEM_RINGBUF:
25b35dd2
KKD
7281 case PTR_TO_BUF:
7282 case PTR_TO_BUF | MEM_RDONLY:
97e03f52 7283 case SCALAR_VALUE:
184c9bdb 7284 return 0;
25b35dd2
KKD
7285 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows
7286 * fixed offset.
7287 */
7288 case PTR_TO_BTF_ID:
282de143 7289 case PTR_TO_BTF_ID | MEM_ALLOC:
3f00c523 7290 case PTR_TO_BTF_ID | PTR_TRUSTED:
fca1aa75 7291 case PTR_TO_BTF_ID | MEM_RCU:
6a3cd331 7292 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF:
24d5bb80 7293 /* When referenced PTR_TO_BTF_ID is passed to release function,
184c9bdb
KKD
7294 * its fixed offset must be 0. In the other cases, fixed offset
7295 * can be non-zero. This was already checked above. So pass
7296 * fixed_off_ok as true to allow fixed offset for all other
7297 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we
7298 * still need to do checks instead of returning.
24d5bb80 7299 */
184c9bdb 7300 return __check_ptr_off_reg(env, reg, regno, true);
25b35dd2 7301 default:
184c9bdb 7302 return __check_ptr_off_reg(env, reg, regno, false);
25b35dd2 7303 }
25b35dd2
KKD
7304}
7305
485ec51e
JK
7306static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env,
7307 const struct bpf_func_proto *fn,
7308 struct bpf_reg_state *regs)
7309{
7310 struct bpf_reg_state *state = NULL;
7311 int i;
7312
7313 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++)
7314 if (arg_type_is_dynptr(fn->arg_type[i])) {
7315 if (state) {
7316 verbose(env, "verifier internal error: multiple dynptr args\n");
7317 return NULL;
7318 }
7319 state = &regs[BPF_REG_1 + i];
7320 }
7321
7322 if (!state)
7323 verbose(env, "verifier internal error: no dynptr arg found\n");
7324
7325 return state;
7326}
7327
f8064ab9 7328static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
34d4ef57
JK
7329{
7330 struct bpf_func_state *state = func(env, reg);
27060531 7331 int spi;
34d4ef57 7332
27060531 7333 if (reg->type == CONST_PTR_TO_DYNPTR)
f8064ab9
KKD
7334 return reg->id;
7335 spi = dynptr_get_spi(env, reg);
7336 if (spi < 0)
7337 return spi;
7338 return state->stack[spi].spilled_ptr.id;
7339}
7340
79168a66 7341static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
34d4ef57
JK
7342{
7343 struct bpf_func_state *state = func(env, reg);
27060531 7344 int spi;
27060531 7345
27060531
KKD
7346 if (reg->type == CONST_PTR_TO_DYNPTR)
7347 return reg->ref_obj_id;
79168a66
KKD
7348 spi = dynptr_get_spi(env, reg);
7349 if (spi < 0)
7350 return spi;
27060531 7351 return state->stack[spi].spilled_ptr.ref_obj_id;
34d4ef57
JK
7352}
7353
b5964b96
JK
7354static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env,
7355 struct bpf_reg_state *reg)
7356{
7357 struct bpf_func_state *state = func(env, reg);
7358 int spi;
7359
7360 if (reg->type == CONST_PTR_TO_DYNPTR)
7361 return reg->dynptr.type;
7362
7363 spi = __get_spi(reg->off);
7364 if (spi < 0) {
7365 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n");
7366 return BPF_DYNPTR_TYPE_INVALID;
7367 }
7368
7369 return state->stack[spi].spilled_ptr.dynptr.type;
7370}
7371
af7ec138
YS
7372static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
7373 struct bpf_call_arg_meta *meta,
1d18feb2
JK
7374 const struct bpf_func_proto *fn,
7375 int insn_idx)
17a52670 7376{
af7ec138 7377 u32 regno = BPF_REG_1 + arg;
638f5b90 7378 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
af7ec138 7379 enum bpf_arg_type arg_type = fn->arg_type[arg];
f79e7ea5 7380 enum bpf_reg_type type = reg->type;
508362ac 7381 u32 *arg_btf_id = NULL;
17a52670
AS
7382 int err = 0;
7383
80f1d68c 7384 if (arg_type == ARG_DONTCARE)
17a52670
AS
7385 return 0;
7386
dc503a8a
EC
7387 err = check_reg_arg(env, regno, SRC_OP);
7388 if (err)
7389 return err;
17a52670 7390
1be7f75d
AS
7391 if (arg_type == ARG_ANYTHING) {
7392 if (is_pointer_value(env, regno)) {
61bd5218
JK
7393 verbose(env, "R%d leaks addr into helper function\n",
7394 regno);
1be7f75d
AS
7395 return -EACCES;
7396 }
80f1d68c 7397 return 0;
1be7f75d 7398 }
80f1d68c 7399
de8f3a83 7400 if (type_is_pkt_pointer(type) &&
3a0af8fd 7401 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 7402 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
7403 return -EACCES;
7404 }
7405
16d1e00c 7406 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
912f442c
LB
7407 err = resolve_map_arg_type(env, meta, &arg_type);
7408 if (err)
7409 return err;
7410 }
7411
48946bd6 7412 if (register_is_null(reg) && type_may_be_null(arg_type))
fd1b0d60
LB
7413 /* A NULL register has a SCALAR_VALUE type, so skip
7414 * type checking.
7415 */
7416 goto skip_type_check;
7417
508362ac 7418 /* arg_btf_id and arg_size are in a union. */
4e814da0
KKD
7419 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID ||
7420 base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
508362ac
MM
7421 arg_btf_id = fn->arg_btf_id[arg];
7422
7423 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
f79e7ea5
LB
7424 if (err)
7425 return err;
7426
8f14852e 7427 err = check_func_arg_reg_off(env, reg, regno, arg_type);
25b35dd2
KKD
7428 if (err)
7429 return err;
d7b9454a 7430
fd1b0d60 7431skip_type_check:
8f14852e 7432 if (arg_type_is_release(arg_type)) {
bc34dee6
JK
7433 if (arg_type_is_dynptr(arg_type)) {
7434 struct bpf_func_state *state = func(env, reg);
27060531 7435 int spi;
bc34dee6 7436
27060531
KKD
7437 /* Only dynptr created on stack can be released, thus
7438 * the get_spi and stack state checks for spilled_ptr
7439 * should only be done before process_dynptr_func for
7440 * PTR_TO_STACK.
7441 */
7442 if (reg->type == PTR_TO_STACK) {
79168a66 7443 spi = dynptr_get_spi(env, reg);
f5b625e5 7444 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) {
27060531
KKD
7445 verbose(env, "arg %d is an unacquired reference\n", regno);
7446 return -EINVAL;
7447 }
7448 } else {
7449 verbose(env, "cannot release unowned const bpf_dynptr\n");
bc34dee6
JK
7450 return -EINVAL;
7451 }
7452 } else if (!reg->ref_obj_id && !register_is_null(reg)) {
8f14852e
KKD
7453 verbose(env, "R%d must be referenced when passed to release function\n",
7454 regno);
7455 return -EINVAL;
7456 }
7457 if (meta->release_regno) {
7458 verbose(env, "verifier internal error: more than one release argument\n");
7459 return -EFAULT;
7460 }
7461 meta->release_regno = regno;
7462 }
7463
02f7c958 7464 if (reg->ref_obj_id) {
457f4436
AN
7465 if (meta->ref_obj_id) {
7466 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
7467 regno, reg->ref_obj_id,
7468 meta->ref_obj_id);
7469 return -EFAULT;
7470 }
7471 meta->ref_obj_id = reg->ref_obj_id;
17a52670
AS
7472 }
7473
8ab4cdcf
JK
7474 switch (base_type(arg_type)) {
7475 case ARG_CONST_MAP_PTR:
17a52670 7476 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3e8ce298
AS
7477 if (meta->map_ptr) {
7478 /* Use map_uid (which is unique id of inner map) to reject:
7479 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
7480 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
7481 * if (inner_map1 && inner_map2) {
7482 * timer = bpf_map_lookup_elem(inner_map1);
7483 * if (timer)
7484 * // mismatch would have been allowed
7485 * bpf_timer_init(timer, inner_map2);
7486 * }
7487 *
7488 * Comparing map_ptr is enough to distinguish normal and outer maps.
7489 */
7490 if (meta->map_ptr != reg->map_ptr ||
7491 meta->map_uid != reg->map_uid) {
7492 verbose(env,
7493 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
7494 meta->map_uid, reg->map_uid);
7495 return -EINVAL;
7496 }
b00628b1 7497 }
33ff9823 7498 meta->map_ptr = reg->map_ptr;
3e8ce298 7499 meta->map_uid = reg->map_uid;
8ab4cdcf
JK
7500 break;
7501 case ARG_PTR_TO_MAP_KEY:
17a52670
AS
7502 /* bpf_map_xxx(..., map_ptr, ..., key) call:
7503 * check that [key, key + map->key_size) are within
7504 * stack limits and initialized
7505 */
33ff9823 7506 if (!meta->map_ptr) {
17a52670
AS
7507 /* in function declaration map_ptr must come before
7508 * map_key, so that it's verified and known before
7509 * we have to check map_key here. Otherwise it means
7510 * that kernel subsystem misconfigured verifier
7511 */
61bd5218 7512 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
7513 return -EACCES;
7514 }
d71962f3
PC
7515 err = check_helper_mem_access(env, regno,
7516 meta->map_ptr->key_size, false,
7517 NULL);
8ab4cdcf
JK
7518 break;
7519 case ARG_PTR_TO_MAP_VALUE:
48946bd6
HL
7520 if (type_may_be_null(arg_type) && register_is_null(reg))
7521 return 0;
7522
17a52670
AS
7523 /* bpf_map_xxx(..., map_ptr, ..., value) call:
7524 * check [value, value + map->value_size) validity
7525 */
33ff9823 7526 if (!meta->map_ptr) {
17a52670 7527 /* kernel subsystem misconfigured verifier */
61bd5218 7528 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
7529 return -EACCES;
7530 }
16d1e00c 7531 meta->raw_mode = arg_type & MEM_UNINIT;
d71962f3
PC
7532 err = check_helper_mem_access(env, regno,
7533 meta->map_ptr->value_size, false,
2ea864c5 7534 meta);
8ab4cdcf
JK
7535 break;
7536 case ARG_PTR_TO_PERCPU_BTF_ID:
eaa6bcb7
HL
7537 if (!reg->btf_id) {
7538 verbose(env, "Helper has invalid btf_id in R%d\n", regno);
7539 return -EACCES;
7540 }
22dc4a0f 7541 meta->ret_btf = reg->btf;
eaa6bcb7 7542 meta->ret_btf_id = reg->btf_id;
8ab4cdcf
JK
7543 break;
7544 case ARG_PTR_TO_SPIN_LOCK:
5d92ddc3
DM
7545 if (in_rbtree_lock_required_cb(env)) {
7546 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n");
7547 return -EACCES;
7548 }
c18f0b6a 7549 if (meta->func_id == BPF_FUNC_spin_lock) {
ac50fe51
KKD
7550 err = process_spin_lock(env, regno, true);
7551 if (err)
7552 return err;
c18f0b6a 7553 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
ac50fe51
KKD
7554 err = process_spin_lock(env, regno, false);
7555 if (err)
7556 return err;
c18f0b6a
LB
7557 } else {
7558 verbose(env, "verifier internal error\n");
7559 return -EFAULT;
7560 }
8ab4cdcf
JK
7561 break;
7562 case ARG_PTR_TO_TIMER:
ac50fe51
KKD
7563 err = process_timer_func(env, regno, meta);
7564 if (err)
7565 return err;
8ab4cdcf
JK
7566 break;
7567 case ARG_PTR_TO_FUNC:
69c087ba 7568 meta->subprogno = reg->subprogno;
8ab4cdcf
JK
7569 break;
7570 case ARG_PTR_TO_MEM:
a2bbe7cc
LB
7571 /* The access to this pointer is only checked when we hit the
7572 * next is_mem_size argument below.
7573 */
16d1e00c 7574 meta->raw_mode = arg_type & MEM_UNINIT;
508362ac
MM
7575 if (arg_type & MEM_FIXED_SIZE) {
7576 err = check_helper_mem_access(env, regno,
7577 fn->arg_size[arg], false,
7578 meta);
7579 }
8ab4cdcf
JK
7580 break;
7581 case ARG_CONST_SIZE:
7582 err = check_mem_size_reg(env, reg, regno, false, meta);
7583 break;
7584 case ARG_CONST_SIZE_OR_ZERO:
7585 err = check_mem_size_reg(env, reg, regno, true, meta);
7586 break;
7587 case ARG_PTR_TO_DYNPTR:
1d18feb2 7588 err = process_dynptr_func(env, regno, insn_idx, arg_type);
ac50fe51
KKD
7589 if (err)
7590 return err;
8ab4cdcf
JK
7591 break;
7592 case ARG_CONST_ALLOC_SIZE_OR_ZERO:
457f4436 7593 if (!tnum_is_const(reg->var_off)) {
28a8add6 7594 verbose(env, "R%d is not a known constant'\n",
457f4436
AN
7595 regno);
7596 return -EACCES;
7597 }
7598 meta->mem_size = reg->var_off.value;
2fc31465
KKD
7599 err = mark_chain_precision(env, regno);
7600 if (err)
7601 return err;
8ab4cdcf
JK
7602 break;
7603 case ARG_PTR_TO_INT:
7604 case ARG_PTR_TO_LONG:
7605 {
57c3bb72
AI
7606 int size = int_ptr_type_to_size(arg_type);
7607
7608 err = check_helper_mem_access(env, regno, size, false, meta);
7609 if (err)
7610 return err;
7611 err = check_ptr_alignment(env, reg, 0, size, true);
8ab4cdcf
JK
7612 break;
7613 }
7614 case ARG_PTR_TO_CONST_STR:
7615 {
fff13c4b
FR
7616 struct bpf_map *map = reg->map_ptr;
7617 int map_off;
7618 u64 map_addr;
7619 char *str_ptr;
7620
a8fad73e 7621 if (!bpf_map_is_rdonly(map)) {
fff13c4b
FR
7622 verbose(env, "R%d does not point to a readonly map'\n", regno);
7623 return -EACCES;
7624 }
7625
7626 if (!tnum_is_const(reg->var_off)) {
7627 verbose(env, "R%d is not a constant address'\n", regno);
7628 return -EACCES;
7629 }
7630
7631 if (!map->ops->map_direct_value_addr) {
7632 verbose(env, "no direct value access support for this map type\n");
7633 return -EACCES;
7634 }
7635
7636 err = check_map_access(env, regno, reg->off,
61df10c7
KKD
7637 map->value_size - reg->off, false,
7638 ACCESS_HELPER);
fff13c4b
FR
7639 if (err)
7640 return err;
7641
7642 map_off = reg->off + reg->var_off.value;
7643 err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
7644 if (err) {
7645 verbose(env, "direct value access on string failed\n");
7646 return err;
7647 }
7648
7649 str_ptr = (char *)(long)(map_addr);
7650 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
7651 verbose(env, "string is not zero-terminated\n");
7652 return -EINVAL;
7653 }
8ab4cdcf
JK
7654 break;
7655 }
7656 case ARG_PTR_TO_KPTR:
ac50fe51
KKD
7657 err = process_kptr_func(env, regno, meta);
7658 if (err)
7659 return err;
8ab4cdcf 7660 break;
17a52670
AS
7661 }
7662
7663 return err;
7664}
7665
0126240f
LB
7666static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
7667{
7668 enum bpf_attach_type eatype = env->prog->expected_attach_type;
7e40781c 7669 enum bpf_prog_type type = resolve_prog_type(env->prog);
0126240f
LB
7670
7671 if (func_id != BPF_FUNC_map_update_elem)
7672 return false;
7673
7674 /* It's not possible to get access to a locked struct sock in these
7675 * contexts, so updating is safe.
7676 */
7677 switch (type) {
7678 case BPF_PROG_TYPE_TRACING:
7679 if (eatype == BPF_TRACE_ITER)
7680 return true;
7681 break;
7682 case BPF_PROG_TYPE_SOCKET_FILTER:
7683 case BPF_PROG_TYPE_SCHED_CLS:
7684 case BPF_PROG_TYPE_SCHED_ACT:
7685 case BPF_PROG_TYPE_XDP:
7686 case BPF_PROG_TYPE_SK_REUSEPORT:
7687 case BPF_PROG_TYPE_FLOW_DISSECTOR:
7688 case BPF_PROG_TYPE_SK_LOOKUP:
7689 return true;
7690 default:
7691 break;
7692 }
7693
7694 verbose(env, "cannot update sockmap in this context\n");
7695 return false;
7696}
7697
e411901c
MF
7698static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
7699{
95acd881
TA
7700 return env->prog->jit_requested &&
7701 bpf_jit_supports_subprog_tailcalls();
e411901c
MF
7702}
7703
61bd5218
JK
7704static int check_map_func_compatibility(struct bpf_verifier_env *env,
7705 struct bpf_map *map, int func_id)
35578d79 7706{
35578d79
KX
7707 if (!map)
7708 return 0;
7709
6aff67c8
AS
7710 /* We need a two way check, first is from map perspective ... */
7711 switch (map->map_type) {
7712 case BPF_MAP_TYPE_PROG_ARRAY:
7713 if (func_id != BPF_FUNC_tail_call)
7714 goto error;
7715 break;
7716 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
7717 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 7718 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 7719 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
7720 func_id != BPF_FUNC_perf_event_read_value &&
7721 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
7722 goto error;
7723 break;
457f4436
AN
7724 case BPF_MAP_TYPE_RINGBUF:
7725 if (func_id != BPF_FUNC_ringbuf_output &&
7726 func_id != BPF_FUNC_ringbuf_reserve &&
bc34dee6
JK
7727 func_id != BPF_FUNC_ringbuf_query &&
7728 func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
7729 func_id != BPF_FUNC_ringbuf_submit_dynptr &&
7730 func_id != BPF_FUNC_ringbuf_discard_dynptr)
457f4436
AN
7731 goto error;
7732 break;
583c1f42 7733 case BPF_MAP_TYPE_USER_RINGBUF:
20571567
DV
7734 if (func_id != BPF_FUNC_user_ringbuf_drain)
7735 goto error;
7736 break;
6aff67c8
AS
7737 case BPF_MAP_TYPE_STACK_TRACE:
7738 if (func_id != BPF_FUNC_get_stackid)
7739 goto error;
7740 break;
4ed8ec52 7741 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 7742 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 7743 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
7744 goto error;
7745 break;
cd339431 7746 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 7747 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
7748 if (func_id != BPF_FUNC_get_local_storage)
7749 goto error;
7750 break;
546ac1ff 7751 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 7752 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
7753 if (func_id != BPF_FUNC_redirect_map &&
7754 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
7755 goto error;
7756 break;
fbfc504a
BT
7757 /* Restrict bpf side of cpumap and xskmap, open when use-cases
7758 * appear.
7759 */
6710e112
JDB
7760 case BPF_MAP_TYPE_CPUMAP:
7761 if (func_id != BPF_FUNC_redirect_map)
7762 goto error;
7763 break;
fada7fdc
JL
7764 case BPF_MAP_TYPE_XSKMAP:
7765 if (func_id != BPF_FUNC_redirect_map &&
7766 func_id != BPF_FUNC_map_lookup_elem)
7767 goto error;
7768 break;
56f668df 7769 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 7770 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
7771 if (func_id != BPF_FUNC_map_lookup_elem)
7772 goto error;
16a43625 7773 break;
174a79ff
JF
7774 case BPF_MAP_TYPE_SOCKMAP:
7775 if (func_id != BPF_FUNC_sk_redirect_map &&
7776 func_id != BPF_FUNC_sock_map_update &&
4f738adb 7777 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 7778 func_id != BPF_FUNC_msg_redirect_map &&
64d85290 7779 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
7780 func_id != BPF_FUNC_map_lookup_elem &&
7781 !may_update_sockmap(env, func_id))
174a79ff
JF
7782 goto error;
7783 break;
81110384
JF
7784 case BPF_MAP_TYPE_SOCKHASH:
7785 if (func_id != BPF_FUNC_sk_redirect_hash &&
7786 func_id != BPF_FUNC_sock_hash_update &&
7787 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 7788 func_id != BPF_FUNC_msg_redirect_hash &&
64d85290 7789 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
7790 func_id != BPF_FUNC_map_lookup_elem &&
7791 !may_update_sockmap(env, func_id))
81110384
JF
7792 goto error;
7793 break;
2dbb9b9e
MKL
7794 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
7795 if (func_id != BPF_FUNC_sk_select_reuseport)
7796 goto error;
7797 break;
f1a2e44a
MV
7798 case BPF_MAP_TYPE_QUEUE:
7799 case BPF_MAP_TYPE_STACK:
7800 if (func_id != BPF_FUNC_map_peek_elem &&
7801 func_id != BPF_FUNC_map_pop_elem &&
7802 func_id != BPF_FUNC_map_push_elem)
7803 goto error;
7804 break;
6ac99e8f
MKL
7805 case BPF_MAP_TYPE_SK_STORAGE:
7806 if (func_id != BPF_FUNC_sk_storage_get &&
9db44fdd
KKD
7807 func_id != BPF_FUNC_sk_storage_delete &&
7808 func_id != BPF_FUNC_kptr_xchg)
6ac99e8f
MKL
7809 goto error;
7810 break;
8ea63684
KS
7811 case BPF_MAP_TYPE_INODE_STORAGE:
7812 if (func_id != BPF_FUNC_inode_storage_get &&
9db44fdd
KKD
7813 func_id != BPF_FUNC_inode_storage_delete &&
7814 func_id != BPF_FUNC_kptr_xchg)
8ea63684
KS
7815 goto error;
7816 break;
4cf1bc1f
KS
7817 case BPF_MAP_TYPE_TASK_STORAGE:
7818 if (func_id != BPF_FUNC_task_storage_get &&
9db44fdd
KKD
7819 func_id != BPF_FUNC_task_storage_delete &&
7820 func_id != BPF_FUNC_kptr_xchg)
4cf1bc1f
KS
7821 goto error;
7822 break;
c4bcfb38
YS
7823 case BPF_MAP_TYPE_CGRP_STORAGE:
7824 if (func_id != BPF_FUNC_cgrp_storage_get &&
9db44fdd
KKD
7825 func_id != BPF_FUNC_cgrp_storage_delete &&
7826 func_id != BPF_FUNC_kptr_xchg)
c4bcfb38
YS
7827 goto error;
7828 break;
9330986c
JK
7829 case BPF_MAP_TYPE_BLOOM_FILTER:
7830 if (func_id != BPF_FUNC_map_peek_elem &&
7831 func_id != BPF_FUNC_map_push_elem)
7832 goto error;
7833 break;
6aff67c8
AS
7834 default:
7835 break;
7836 }
7837
7838 /* ... and second from the function itself. */
7839 switch (func_id) {
7840 case BPF_FUNC_tail_call:
7841 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
7842 goto error;
e411901c
MF
7843 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
7844 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
f4d7e40a
AS
7845 return -EINVAL;
7846 }
6aff67c8
AS
7847 break;
7848 case BPF_FUNC_perf_event_read:
7849 case BPF_FUNC_perf_event_output:
908432ca 7850 case BPF_FUNC_perf_event_read_value:
a7658e1a 7851 case BPF_FUNC_skb_output:
d831ee84 7852 case BPF_FUNC_xdp_output:
6aff67c8
AS
7853 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
7854 goto error;
7855 break;
5b029a32
DB
7856 case BPF_FUNC_ringbuf_output:
7857 case BPF_FUNC_ringbuf_reserve:
7858 case BPF_FUNC_ringbuf_query:
bc34dee6
JK
7859 case BPF_FUNC_ringbuf_reserve_dynptr:
7860 case BPF_FUNC_ringbuf_submit_dynptr:
7861 case BPF_FUNC_ringbuf_discard_dynptr:
5b029a32
DB
7862 if (map->map_type != BPF_MAP_TYPE_RINGBUF)
7863 goto error;
7864 break;
20571567
DV
7865 case BPF_FUNC_user_ringbuf_drain:
7866 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF)
7867 goto error;
7868 break;
6aff67c8
AS
7869 case BPF_FUNC_get_stackid:
7870 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
7871 goto error;
7872 break;
60d20f91 7873 case BPF_FUNC_current_task_under_cgroup:
747ea55e 7874 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
7875 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
7876 goto error;
7877 break;
97f91a7c 7878 case BPF_FUNC_redirect_map:
9c270af3 7879 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 7880 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
7881 map->map_type != BPF_MAP_TYPE_CPUMAP &&
7882 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
7883 goto error;
7884 break;
174a79ff 7885 case BPF_FUNC_sk_redirect_map:
4f738adb 7886 case BPF_FUNC_msg_redirect_map:
81110384 7887 case BPF_FUNC_sock_map_update:
174a79ff
JF
7888 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
7889 goto error;
7890 break;
81110384
JF
7891 case BPF_FUNC_sk_redirect_hash:
7892 case BPF_FUNC_msg_redirect_hash:
7893 case BPF_FUNC_sock_hash_update:
7894 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
7895 goto error;
7896 break;
cd339431 7897 case BPF_FUNC_get_local_storage:
b741f163
RG
7898 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
7899 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
7900 goto error;
7901 break;
2dbb9b9e 7902 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
7903 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
7904 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
7905 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
7906 goto error;
7907 break;
f1a2e44a 7908 case BPF_FUNC_map_pop_elem:
f1a2e44a
MV
7909 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7910 map->map_type != BPF_MAP_TYPE_STACK)
7911 goto error;
7912 break;
9330986c
JK
7913 case BPF_FUNC_map_peek_elem:
7914 case BPF_FUNC_map_push_elem:
7915 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
7916 map->map_type != BPF_MAP_TYPE_STACK &&
7917 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
7918 goto error;
7919 break;
07343110
FZ
7920 case BPF_FUNC_map_lookup_percpu_elem:
7921 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
7922 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
7923 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
7924 goto error;
7925 break;
6ac99e8f
MKL
7926 case BPF_FUNC_sk_storage_get:
7927 case BPF_FUNC_sk_storage_delete:
7928 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
7929 goto error;
7930 break;
8ea63684
KS
7931 case BPF_FUNC_inode_storage_get:
7932 case BPF_FUNC_inode_storage_delete:
7933 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
7934 goto error;
7935 break;
4cf1bc1f
KS
7936 case BPF_FUNC_task_storage_get:
7937 case BPF_FUNC_task_storage_delete:
7938 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
7939 goto error;
7940 break;
c4bcfb38
YS
7941 case BPF_FUNC_cgrp_storage_get:
7942 case BPF_FUNC_cgrp_storage_delete:
7943 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE)
7944 goto error;
7945 break;
6aff67c8
AS
7946 default:
7947 break;
35578d79
KX
7948 }
7949
7950 return 0;
6aff67c8 7951error:
61bd5218 7952 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 7953 map->map_type, func_id_name(func_id), func_id);
6aff67c8 7954 return -EINVAL;
35578d79
KX
7955}
7956
90133415 7957static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
7958{
7959 int count = 0;
7960
39f19ebb 7961 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 7962 count++;
39f19ebb 7963 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 7964 count++;
39f19ebb 7965 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 7966 count++;
39f19ebb 7967 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 7968 count++;
39f19ebb 7969 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
7970 count++;
7971
90133415
DB
7972 /* We only support one arg being in raw mode at the moment,
7973 * which is sufficient for the helper functions we have
7974 * right now.
7975 */
7976 return count <= 1;
7977}
7978
508362ac 7979static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
90133415 7980{
508362ac
MM
7981 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
7982 bool has_size = fn->arg_size[arg] != 0;
7983 bool is_next_size = false;
7984
7985 if (arg + 1 < ARRAY_SIZE(fn->arg_type))
7986 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
7987
7988 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
7989 return is_next_size;
7990
7991 return has_size == is_next_size || is_next_size == is_fixed;
90133415
DB
7992}
7993
7994static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
7995{
7996 /* bpf_xxx(..., buf, len) call will access 'len'
7997 * bytes from memory 'buf'. Both arg types need
7998 * to be paired, so make sure there's no buggy
7999 * helper function specification.
8000 */
8001 if (arg_type_is_mem_size(fn->arg1_type) ||
508362ac
MM
8002 check_args_pair_invalid(fn, 0) ||
8003 check_args_pair_invalid(fn, 1) ||
8004 check_args_pair_invalid(fn, 2) ||
8005 check_args_pair_invalid(fn, 3) ||
8006 check_args_pair_invalid(fn, 4))
90133415
DB
8007 return false;
8008
8009 return true;
8010}
8011
9436ef6e
LB
8012static bool check_btf_id_ok(const struct bpf_func_proto *fn)
8013{
8014 int i;
8015
1df8f55a 8016 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
4e814da0
KKD
8017 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID)
8018 return !!fn->arg_btf_id[i];
8019 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK)
8020 return fn->arg_btf_id[i] == BPF_PTR_POISON;
508362ac
MM
8021 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
8022 /* arg_btf_id and arg_size are in a union. */
8023 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
8024 !(fn->arg_type[i] & MEM_FIXED_SIZE)))
1df8f55a
MKL
8025 return false;
8026 }
8027
9436ef6e
LB
8028 return true;
8029}
8030
0c9a7a7e 8031static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
8032{
8033 return check_raw_mode_ok(fn) &&
fd978bf7 8034 check_arg_pair_ok(fn) &&
b2d8ef19 8035 check_btf_id_ok(fn) ? 0 : -EINVAL;
435faee1
DB
8036}
8037
de8f3a83
DB
8038/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
8039 * are now invalid, so turn them into unknown SCALAR_VALUE.
66e3a13e
JK
8040 *
8041 * This also applies to dynptr slices belonging to skb and xdp dynptrs,
8042 * since these slices point to packet data.
f1174f77 8043 */
b239da34 8044static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
969bf05e 8045{
b239da34
KKD
8046 struct bpf_func_state *state;
8047 struct bpf_reg_state *reg;
969bf05e 8048
b239da34 8049 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
66e3a13e 8050 if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg))
dbd8d228 8051 mark_reg_invalid(env, reg);
b239da34 8052 }));
f4d7e40a
AS
8053}
8054
6d94e741
AS
8055enum {
8056 AT_PKT_END = -1,
8057 BEYOND_PKT_END = -2,
8058};
8059
8060static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
8061{
8062 struct bpf_func_state *state = vstate->frame[vstate->curframe];
8063 struct bpf_reg_state *reg = &state->regs[regn];
8064
8065 if (reg->type != PTR_TO_PACKET)
8066 /* PTR_TO_PACKET_META is not supported yet */
8067 return;
8068
8069 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
8070 * How far beyond pkt_end it goes is unknown.
8071 * if (!range_open) it's the case of pkt >= pkt_end
8072 * if (range_open) it's the case of pkt > pkt_end
8073 * hence this pointer is at least 1 byte bigger than pkt_end
8074 */
8075 if (range_open)
8076 reg->range = BEYOND_PKT_END;
8077 else
8078 reg->range = AT_PKT_END;
8079}
8080
fd978bf7
JS
8081/* The pointer with the specified id has released its reference to kernel
8082 * resources. Identify all copies of the same pointer and clear the reference.
8083 */
8084static int release_reference(struct bpf_verifier_env *env,
1b986589 8085 int ref_obj_id)
fd978bf7 8086{
b239da34
KKD
8087 struct bpf_func_state *state;
8088 struct bpf_reg_state *reg;
1b986589 8089 int err;
fd978bf7 8090
1b986589
MKL
8091 err = release_reference_state(cur_func(env), ref_obj_id);
8092 if (err)
8093 return err;
8094
b239da34 8095 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
dbd8d228
KKD
8096 if (reg->ref_obj_id == ref_obj_id)
8097 mark_reg_invalid(env, reg);
b239da34 8098 }));
fd978bf7 8099
1b986589 8100 return 0;
fd978bf7
JS
8101}
8102
6a3cd331
DM
8103static void invalidate_non_owning_refs(struct bpf_verifier_env *env)
8104{
8105 struct bpf_func_state *unused;
8106 struct bpf_reg_state *reg;
8107
8108 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
8109 if (type_is_non_owning_ref(reg->type))
dbd8d228 8110 mark_reg_invalid(env, reg);
6a3cd331
DM
8111 }));
8112}
8113
51c39bb1
AS
8114static void clear_caller_saved_regs(struct bpf_verifier_env *env,
8115 struct bpf_reg_state *regs)
8116{
8117 int i;
8118
8119 /* after the call registers r0 - r5 were scratched */
8120 for (i = 0; i < CALLER_SAVED_REGS; i++) {
8121 mark_reg_not_init(env, regs, caller_saved[i]);
8122 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
8123 }
8124}
8125
14351375
YS
8126typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
8127 struct bpf_func_state *caller,
8128 struct bpf_func_state *callee,
8129 int insn_idx);
8130
be2ef816
AN
8131static int set_callee_state(struct bpf_verifier_env *env,
8132 struct bpf_func_state *caller,
8133 struct bpf_func_state *callee, int insn_idx);
8134
5d92ddc3
DM
8135static bool is_callback_calling_kfunc(u32 btf_id);
8136
14351375
YS
8137static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8138 int *insn_idx, int subprog,
8139 set_callee_state_fn set_callee_state_cb)
f4d7e40a
AS
8140{
8141 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 8142 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 8143 struct bpf_func_state *caller, *callee;
14351375 8144 int err;
51c39bb1 8145 bool is_global = false;
f4d7e40a 8146
aada9ce6 8147 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 8148 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 8149 state->curframe + 2);
f4d7e40a
AS
8150 return -E2BIG;
8151 }
8152
f4d7e40a
AS
8153 caller = state->frame[state->curframe];
8154 if (state->frame[state->curframe + 1]) {
8155 verbose(env, "verifier bug. Frame %d already allocated\n",
8156 state->curframe + 1);
8157 return -EFAULT;
8158 }
8159
51c39bb1
AS
8160 func_info_aux = env->prog->aux->func_info_aux;
8161 if (func_info_aux)
8162 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
95f2f26f 8163 err = btf_check_subprog_call(env, subprog, caller->regs);
51c39bb1
AS
8164 if (err == -EFAULT)
8165 return err;
8166 if (is_global) {
8167 if (err) {
8168 verbose(env, "Caller passes invalid args into func#%d\n",
8169 subprog);
8170 return err;
8171 } else {
8172 if (env->log.level & BPF_LOG_LEVEL)
8173 verbose(env,
8174 "Func#%d is global and valid. Skipping.\n",
8175 subprog);
8176 clear_caller_saved_regs(env, caller->regs);
8177
45159b27 8178 /* All global functions return a 64-bit SCALAR_VALUE */
51c39bb1 8179 mark_reg_unknown(env, caller->regs, BPF_REG_0);
45159b27 8180 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
51c39bb1
AS
8181
8182 /* continue with next insn after call */
8183 return 0;
8184 }
8185 }
8186
be2ef816
AN
8187 /* set_callee_state is used for direct subprog calls, but we are
8188 * interested in validating only BPF helpers that can call subprogs as
8189 * callbacks
8190 */
5d92ddc3
DM
8191 if (set_callee_state_cb != set_callee_state) {
8192 if (bpf_pseudo_kfunc_call(insn) &&
8193 !is_callback_calling_kfunc(insn->imm)) {
8194 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n",
8195 func_id_name(insn->imm), insn->imm);
8196 return -EFAULT;
8197 } else if (!bpf_pseudo_kfunc_call(insn) &&
8198 !is_callback_calling_function(insn->imm)) { /* helper */
8199 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n",
8200 func_id_name(insn->imm), insn->imm);
8201 return -EFAULT;
8202 }
be2ef816
AN
8203 }
8204
bfc6bb74 8205 if (insn->code == (BPF_JMP | BPF_CALL) &&
a5bebc4f 8206 insn->src_reg == 0 &&
bfc6bb74
AS
8207 insn->imm == BPF_FUNC_timer_set_callback) {
8208 struct bpf_verifier_state *async_cb;
8209
8210 /* there is no real recursion here. timer callbacks are async */
7ddc80a4 8211 env->subprog_info[subprog].is_async_cb = true;
bfc6bb74
AS
8212 async_cb = push_async_cb(env, env->subprog_info[subprog].start,
8213 *insn_idx, subprog);
8214 if (!async_cb)
8215 return -EFAULT;
8216 callee = async_cb->frame[0];
8217 callee->async_entry_cnt = caller->async_entry_cnt + 1;
8218
8219 /* Convert bpf_timer_set_callback() args into timer callback args */
8220 err = set_callee_state_cb(env, caller, callee, *insn_idx);
8221 if (err)
8222 return err;
8223
8224 clear_caller_saved_regs(env, caller->regs);
8225 mark_reg_unknown(env, caller->regs, BPF_REG_0);
8226 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
8227 /* continue with next insn after call */
8228 return 0;
8229 }
8230
f4d7e40a
AS
8231 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
8232 if (!callee)
8233 return -ENOMEM;
8234 state->frame[state->curframe + 1] = callee;
8235
8236 /* callee cannot access r0, r6 - r9 for reading and has to write
8237 * into its own stack before reading from it.
8238 * callee can read/write into caller's stack
8239 */
8240 init_func_state(env, callee,
8241 /* remember the callsite, it will be used by bpf_exit */
8242 *insn_idx /* callsite */,
8243 state->curframe + 1 /* frameno within this callchain */,
f910cefa 8244 subprog /* subprog number within this prog */);
f4d7e40a 8245
fd978bf7 8246 /* Transfer references to the callee */
c69431aa 8247 err = copy_reference_state(callee, caller);
fd978bf7 8248 if (err)
eb86559a 8249 goto err_out;
fd978bf7 8250
14351375
YS
8251 err = set_callee_state_cb(env, caller, callee, *insn_idx);
8252 if (err)
eb86559a 8253 goto err_out;
f4d7e40a 8254
51c39bb1 8255 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
8256
8257 /* only increment it after check_reg_arg() finished */
8258 state->curframe++;
8259
8260 /* and go analyze first insn of the callee */
14351375 8261 *insn_idx = env->subprog_info[subprog].start - 1;
f4d7e40a 8262
06ee7115 8263 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 8264 verbose(env, "caller:\n");
0f55f9ed 8265 print_verifier_state(env, caller, true);
f4d7e40a 8266 verbose(env, "callee:\n");
0f55f9ed 8267 print_verifier_state(env, callee, true);
f4d7e40a
AS
8268 }
8269 return 0;
eb86559a
WY
8270
8271err_out:
8272 free_func_state(callee);
8273 state->frame[state->curframe + 1] = NULL;
8274 return err;
f4d7e40a
AS
8275}
8276
314ee05e
YS
8277int map_set_for_each_callback_args(struct bpf_verifier_env *env,
8278 struct bpf_func_state *caller,
8279 struct bpf_func_state *callee)
8280{
8281 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
8282 * void *callback_ctx, u64 flags);
8283 * callback_fn(struct bpf_map *map, void *key, void *value,
8284 * void *callback_ctx);
8285 */
8286 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
8287
8288 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
8289 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8290 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
8291
8292 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
8293 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
8294 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
8295
8296 /* pointer to stack or null */
8297 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
8298
8299 /* unused */
8300 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8301 return 0;
8302}
8303
14351375
YS
8304static int set_callee_state(struct bpf_verifier_env *env,
8305 struct bpf_func_state *caller,
8306 struct bpf_func_state *callee, int insn_idx)
8307{
8308 int i;
8309
8310 /* copy r1 - r5 args that callee can access. The copy includes parent
8311 * pointers, which connects us up to the liveness chain
8312 */
8313 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
8314 callee->regs[i] = caller->regs[i];
8315 return 0;
8316}
8317
8318static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8319 int *insn_idx)
8320{
8321 int subprog, target_insn;
8322
8323 target_insn = *insn_idx + insn->imm + 1;
8324 subprog = find_subprog(env, target_insn);
8325 if (subprog < 0) {
8326 verbose(env, "verifier bug. No program starts at insn %d\n",
8327 target_insn);
8328 return -EFAULT;
8329 }
8330
8331 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
8332}
8333
69c087ba
YS
8334static int set_map_elem_callback_state(struct bpf_verifier_env *env,
8335 struct bpf_func_state *caller,
8336 struct bpf_func_state *callee,
8337 int insn_idx)
8338{
8339 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
8340 struct bpf_map *map;
8341 int err;
8342
8343 if (bpf_map_ptr_poisoned(insn_aux)) {
8344 verbose(env, "tail_call abusing map_ptr\n");
8345 return -EINVAL;
8346 }
8347
8348 map = BPF_MAP_PTR(insn_aux->map_ptr_state);
8349 if (!map->ops->map_set_for_each_callback_args ||
8350 !map->ops->map_for_each_callback) {
8351 verbose(env, "callback function not allowed for map\n");
8352 return -ENOTSUPP;
8353 }
8354
8355 err = map->ops->map_set_for_each_callback_args(env, caller, callee);
8356 if (err)
8357 return err;
8358
8359 callee->in_callback_fn = true;
1bfe26fb 8360 callee->callback_ret_range = tnum_range(0, 1);
69c087ba
YS
8361 return 0;
8362}
8363
e6f2dd0f
JK
8364static int set_loop_callback_state(struct bpf_verifier_env *env,
8365 struct bpf_func_state *caller,
8366 struct bpf_func_state *callee,
8367 int insn_idx)
8368{
8369 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
8370 * u64 flags);
8371 * callback_fn(u32 index, void *callback_ctx);
8372 */
8373 callee->regs[BPF_REG_1].type = SCALAR_VALUE;
8374 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
8375
8376 /* unused */
8377 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8378 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8379 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8380
8381 callee->in_callback_fn = true;
1bfe26fb 8382 callee->callback_ret_range = tnum_range(0, 1);
e6f2dd0f
JK
8383 return 0;
8384}
8385
b00628b1
AS
8386static int set_timer_callback_state(struct bpf_verifier_env *env,
8387 struct bpf_func_state *caller,
8388 struct bpf_func_state *callee,
8389 int insn_idx)
8390{
8391 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
8392
8393 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
8394 * callback_fn(struct bpf_map *map, void *key, void *value);
8395 */
8396 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
8397 __mark_reg_known_zero(&callee->regs[BPF_REG_1]);
8398 callee->regs[BPF_REG_1].map_ptr = map_ptr;
8399
8400 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
8401 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8402 callee->regs[BPF_REG_2].map_ptr = map_ptr;
8403
8404 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
8405 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
8406 callee->regs[BPF_REG_3].map_ptr = map_ptr;
8407
8408 /* unused */
8409 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8410 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
bfc6bb74 8411 callee->in_async_callback_fn = true;
1bfe26fb 8412 callee->callback_ret_range = tnum_range(0, 1);
b00628b1
AS
8413 return 0;
8414}
8415
7c7e3d31
SL
8416static int set_find_vma_callback_state(struct bpf_verifier_env *env,
8417 struct bpf_func_state *caller,
8418 struct bpf_func_state *callee,
8419 int insn_idx)
8420{
8421 /* bpf_find_vma(struct task_struct *task, u64 addr,
8422 * void *callback_fn, void *callback_ctx, u64 flags)
8423 * (callback_fn)(struct task_struct *task,
8424 * struct vm_area_struct *vma, void *callback_ctx);
8425 */
8426 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
8427
8428 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
8429 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
8430 callee->regs[BPF_REG_2].btf = btf_vmlinux;
d19ddb47 8431 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7c7e3d31
SL
8432
8433 /* pointer to stack or null */
8434 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
8435
8436 /* unused */
8437 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8438 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8439 callee->in_callback_fn = true;
1bfe26fb 8440 callee->callback_ret_range = tnum_range(0, 1);
7c7e3d31
SL
8441 return 0;
8442}
8443
20571567
DV
8444static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env,
8445 struct bpf_func_state *caller,
8446 struct bpf_func_state *callee,
8447 int insn_idx)
8448{
8449 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void
8450 * callback_ctx, u64 flags);
27060531 8451 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx);
20571567
DV
8452 */
8453 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]);
f8064ab9 8454 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL);
20571567
DV
8455 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
8456
8457 /* unused */
8458 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8459 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8460 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8461
8462 callee->in_callback_fn = true;
c92a7a52 8463 callee->callback_ret_range = tnum_range(0, 1);
20571567
DV
8464 return 0;
8465}
8466
5d92ddc3
DM
8467static int set_rbtree_add_callback_state(struct bpf_verifier_env *env,
8468 struct bpf_func_state *caller,
8469 struct bpf_func_state *callee,
8470 int insn_idx)
8471{
8472 /* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node,
8473 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b));
8474 *
8475 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset
8476 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd
8477 * by this point, so look at 'root'
8478 */
8479 struct btf_field *field;
8480
8481 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off,
8482 BPF_RB_ROOT);
8483 if (!field || !field->graph_root.value_btf_id)
8484 return -EFAULT;
8485
8486 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root);
8487 ref_set_non_owning(env, &callee->regs[BPF_REG_1]);
8488 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root);
8489 ref_set_non_owning(env, &callee->regs[BPF_REG_2]);
8490
8491 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
8492 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
8493 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
8494 callee->in_callback_fn = true;
8495 callee->callback_ret_range = tnum_range(0, 1);
8496 return 0;
8497}
8498
8499static bool is_rbtree_lock_required_kfunc(u32 btf_id);
8500
8501/* Are we currently verifying the callback for a rbtree helper that must
8502 * be called with lock held? If so, no need to complain about unreleased
8503 * lock
8504 */
8505static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env)
8506{
8507 struct bpf_verifier_state *state = env->cur_state;
8508 struct bpf_insn *insn = env->prog->insnsi;
8509 struct bpf_func_state *callee;
8510 int kfunc_btf_id;
8511
8512 if (!state->curframe)
8513 return false;
8514
8515 callee = state->frame[state->curframe];
8516
8517 if (!callee->in_callback_fn)
8518 return false;
8519
8520 kfunc_btf_id = insn[callee->callsite].imm;
8521 return is_rbtree_lock_required_kfunc(kfunc_btf_id);
8522}
8523
f4d7e40a
AS
8524static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
8525{
8526 struct bpf_verifier_state *state = env->cur_state;
8527 struct bpf_func_state *caller, *callee;
8528 struct bpf_reg_state *r0;
fd978bf7 8529 int err;
f4d7e40a
AS
8530
8531 callee = state->frame[state->curframe];
8532 r0 = &callee->regs[BPF_REG_0];
8533 if (r0->type == PTR_TO_STACK) {
8534 /* technically it's ok to return caller's stack pointer
8535 * (or caller's caller's pointer) back to the caller,
8536 * since these pointers are valid. Only current stack
8537 * pointer will be invalid as soon as function exits,
8538 * but let's be conservative
8539 */
8540 verbose(env, "cannot return stack pointer to the caller\n");
8541 return -EINVAL;
8542 }
8543
eb86559a 8544 caller = state->frame[state->curframe - 1];
69c087ba
YS
8545 if (callee->in_callback_fn) {
8546 /* enforce R0 return value range [0, 1]. */
1bfe26fb 8547 struct tnum range = callee->callback_ret_range;
69c087ba
YS
8548
8549 if (r0->type != SCALAR_VALUE) {
8550 verbose(env, "R0 not a scalar value\n");
8551 return -EACCES;
8552 }
8553 if (!tnum_in(range, r0->var_off)) {
8554 verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
8555 return -EINVAL;
8556 }
8557 } else {
8558 /* return to the caller whatever r0 had in the callee */
8559 caller->regs[BPF_REG_0] = *r0;
8560 }
f4d7e40a 8561
9d9d00ac
KKD
8562 /* callback_fn frame should have released its own additions to parent's
8563 * reference state at this point, or check_reference_leak would
8564 * complain, hence it must be the same as the caller. There is no need
8565 * to copy it back.
8566 */
8567 if (!callee->in_callback_fn) {
8568 /* Transfer references to the caller */
8569 err = copy_reference_state(caller, callee);
8570 if (err)
8571 return err;
8572 }
fd978bf7 8573
f4d7e40a 8574 *insn_idx = callee->callsite + 1;
06ee7115 8575 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 8576 verbose(env, "returning from callee:\n");
0f55f9ed 8577 print_verifier_state(env, callee, true);
f4d7e40a 8578 verbose(env, "to caller at %d:\n", *insn_idx);
0f55f9ed 8579 print_verifier_state(env, caller, true);
f4d7e40a
AS
8580 }
8581 /* clear everything in the callee */
8582 free_func_state(callee);
eb86559a 8583 state->frame[state->curframe--] = NULL;
f4d7e40a
AS
8584 return 0;
8585}
8586
849fa506
YS
8587static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
8588 int func_id,
8589 struct bpf_call_arg_meta *meta)
8590{
8591 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
8592
8593 if (ret_type != RET_INTEGER ||
8594 (func_id != BPF_FUNC_get_stack &&
fd0b88f7 8595 func_id != BPF_FUNC_get_task_stack &&
47cc0ed5
DB
8596 func_id != BPF_FUNC_probe_read_str &&
8597 func_id != BPF_FUNC_probe_read_kernel_str &&
8598 func_id != BPF_FUNC_probe_read_user_str))
849fa506
YS
8599 return;
8600
10060503 8601 ret_reg->smax_value = meta->msize_max_value;
fa123ac0 8602 ret_reg->s32_max_value = meta->msize_max_value;
b0270958
AS
8603 ret_reg->smin_value = -MAX_ERRNO;
8604 ret_reg->s32_min_value = -MAX_ERRNO;
3844d153 8605 reg_bounds_sync(ret_reg);
849fa506
YS
8606}
8607
c93552c4
DB
8608static int
8609record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
8610 int func_id, int insn_idx)
8611{
8612 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 8613 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
8614
8615 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
8616 func_id != BPF_FUNC_map_lookup_elem &&
8617 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
8618 func_id != BPF_FUNC_map_delete_elem &&
8619 func_id != BPF_FUNC_map_push_elem &&
8620 func_id != BPF_FUNC_map_pop_elem &&
69c087ba 8621 func_id != BPF_FUNC_map_peek_elem &&
e6a4750f 8622 func_id != BPF_FUNC_for_each_map_elem &&
07343110
FZ
8623 func_id != BPF_FUNC_redirect_map &&
8624 func_id != BPF_FUNC_map_lookup_percpu_elem)
c93552c4 8625 return 0;
09772d92 8626
591fe988 8627 if (map == NULL) {
c93552c4
DB
8628 verbose(env, "kernel subsystem misconfigured verifier\n");
8629 return -EINVAL;
8630 }
8631
591fe988
DB
8632 /* In case of read-only, some additional restrictions
8633 * need to be applied in order to prevent altering the
8634 * state of the map from program side.
8635 */
8636 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
8637 (func_id == BPF_FUNC_map_delete_elem ||
8638 func_id == BPF_FUNC_map_update_elem ||
8639 func_id == BPF_FUNC_map_push_elem ||
8640 func_id == BPF_FUNC_map_pop_elem)) {
8641 verbose(env, "write into map forbidden\n");
8642 return -EACCES;
8643 }
8644
d2e4c1e6 8645 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4 8646 bpf_map_ptr_store(aux, meta->map_ptr,
2c78ee89 8647 !meta->map_ptr->bypass_spec_v1);
d2e4c1e6 8648 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4 8649 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2c78ee89 8650 !meta->map_ptr->bypass_spec_v1);
c93552c4
DB
8651 return 0;
8652}
8653
d2e4c1e6
DB
8654static int
8655record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
8656 int func_id, int insn_idx)
8657{
8658 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
8659 struct bpf_reg_state *regs = cur_regs(env), *reg;
8660 struct bpf_map *map = meta->map_ptr;
a657182a 8661 u64 val, max;
cc52d914 8662 int err;
d2e4c1e6
DB
8663
8664 if (func_id != BPF_FUNC_tail_call)
8665 return 0;
8666 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
8667 verbose(env, "kernel subsystem misconfigured verifier\n");
8668 return -EINVAL;
8669 }
8670
d2e4c1e6 8671 reg = &regs[BPF_REG_3];
a657182a
DB
8672 val = reg->var_off.value;
8673 max = map->max_entries;
d2e4c1e6 8674
a657182a 8675 if (!(register_is_const(reg) && val < max)) {
d2e4c1e6
DB
8676 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8677 return 0;
8678 }
8679
cc52d914
DB
8680 err = mark_chain_precision(env, BPF_REG_3);
8681 if (err)
8682 return err;
d2e4c1e6
DB
8683 if (bpf_map_key_unseen(aux))
8684 bpf_map_key_store(aux, val);
8685 else if (!bpf_map_key_poisoned(aux) &&
8686 bpf_map_key_immediate(aux) != val)
8687 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
8688 return 0;
8689}
8690
fd978bf7
JS
8691static int check_reference_leak(struct bpf_verifier_env *env)
8692{
8693 struct bpf_func_state *state = cur_func(env);
9d9d00ac 8694 bool refs_lingering = false;
fd978bf7
JS
8695 int i;
8696
9d9d00ac
KKD
8697 if (state->frameno && !state->in_callback_fn)
8698 return 0;
8699
fd978bf7 8700 for (i = 0; i < state->acquired_refs; i++) {
9d9d00ac
KKD
8701 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno)
8702 continue;
fd978bf7
JS
8703 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
8704 state->refs[i].id, state->refs[i].insn_idx);
9d9d00ac 8705 refs_lingering = true;
fd978bf7 8706 }
9d9d00ac 8707 return refs_lingering ? -EINVAL : 0;
fd978bf7
JS
8708}
8709
7b15523a
FR
8710static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
8711 struct bpf_reg_state *regs)
8712{
8713 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
8714 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
8715 struct bpf_map *fmt_map = fmt_reg->map_ptr;
78aa1cc9 8716 struct bpf_bprintf_data data = {};
7b15523a
FR
8717 int err, fmt_map_off, num_args;
8718 u64 fmt_addr;
8719 char *fmt;
8720
8721 /* data must be an array of u64 */
8722 if (data_len_reg->var_off.value % 8)
8723 return -EINVAL;
8724 num_args = data_len_reg->var_off.value / 8;
8725
8726 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
8727 * and map_direct_value_addr is set.
8728 */
8729 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
8730 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
8731 fmt_map_off);
8e8ee109
FR
8732 if (err) {
8733 verbose(env, "verifier bug\n");
8734 return -EFAULT;
8735 }
7b15523a
FR
8736 fmt = (char *)(long)fmt_addr + fmt_map_off;
8737
8738 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
8739 * can focus on validating the format specifiers.
8740 */
78aa1cc9 8741 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data);
7b15523a
FR
8742 if (err < 0)
8743 verbose(env, "Invalid format string\n");
8744
8745 return err;
8746}
8747
9b99edca
JO
8748static int check_get_func_ip(struct bpf_verifier_env *env)
8749{
9b99edca
JO
8750 enum bpf_prog_type type = resolve_prog_type(env->prog);
8751 int func_id = BPF_FUNC_get_func_ip;
8752
8753 if (type == BPF_PROG_TYPE_TRACING) {
f92c1e18 8754 if (!bpf_prog_has_trampoline(env->prog)) {
9b99edca
JO
8755 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
8756 func_id_name(func_id), func_id);
8757 return -ENOTSUPP;
8758 }
8759 return 0;
9ffd9f3f
JO
8760 } else if (type == BPF_PROG_TYPE_KPROBE) {
8761 return 0;
9b99edca
JO
8762 }
8763
8764 verbose(env, "func %s#%d not supported for program type %d\n",
8765 func_id_name(func_id), func_id, type);
8766 return -ENOTSUPP;
8767}
8768
1ade2371
EZ
8769static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
8770{
8771 return &env->insn_aux_data[env->insn_idx];
8772}
8773
8774static bool loop_flag_is_zero(struct bpf_verifier_env *env)
8775{
8776 struct bpf_reg_state *regs = cur_regs(env);
8777 struct bpf_reg_state *reg = &regs[BPF_REG_4];
8778 bool reg_is_null = register_is_null(reg);
8779
8780 if (reg_is_null)
8781 mark_chain_precision(env, BPF_REG_4);
8782
8783 return reg_is_null;
8784}
8785
8786static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
8787{
8788 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
8789
8790 if (!state->initialized) {
8791 state->initialized = 1;
8792 state->fit_for_inline = loop_flag_is_zero(env);
8793 state->callback_subprogno = subprogno;
8794 return;
8795 }
8796
8797 if (!state->fit_for_inline)
8798 return;
8799
8800 state->fit_for_inline = (loop_flag_is_zero(env) &&
8801 state->callback_subprogno == subprogno);
8802}
8803
69c087ba
YS
8804static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
8805 int *insn_idx_p)
17a52670 8806{
aef9d4a3 8807 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
17a52670 8808 const struct bpf_func_proto *fn = NULL;
3c480732 8809 enum bpf_return_type ret_type;
c25b2ae1 8810 enum bpf_type_flag ret_flag;
638f5b90 8811 struct bpf_reg_state *regs;
33ff9823 8812 struct bpf_call_arg_meta meta;
69c087ba 8813 int insn_idx = *insn_idx_p;
969bf05e 8814 bool changes_data;
69c087ba 8815 int i, err, func_id;
17a52670
AS
8816
8817 /* find function prototype */
69c087ba 8818 func_id = insn->imm;
17a52670 8819 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
8820 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
8821 func_id);
17a52670
AS
8822 return -EINVAL;
8823 }
8824
00176a34 8825 if (env->ops->get_func_proto)
5e43f899 8826 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 8827 if (!fn) {
61bd5218
JK
8828 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
8829 func_id);
17a52670
AS
8830 return -EINVAL;
8831 }
8832
8833 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 8834 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 8835 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
8836 return -EINVAL;
8837 }
8838
eae2e83e
JO
8839 if (fn->allowed && !fn->allowed(env->prog)) {
8840 verbose(env, "helper call is not allowed in probe\n");
8841 return -EINVAL;
8842 }
8843
01685c5b
YS
8844 if (!env->prog->aux->sleepable && fn->might_sleep) {
8845 verbose(env, "helper call might sleep in a non-sleepable prog\n");
8846 return -EINVAL;
8847 }
8848
04514d13 8849 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 8850 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
8851 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
8852 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
8853 func_id_name(func_id), func_id);
8854 return -EINVAL;
8855 }
969bf05e 8856
33ff9823 8857 memset(&meta, 0, sizeof(meta));
36bbef52 8858 meta.pkt_access = fn->pkt_access;
33ff9823 8859
0c9a7a7e 8860 err = check_func_proto(fn, func_id);
435faee1 8861 if (err) {
61bd5218 8862 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 8863 func_id_name(func_id), func_id);
435faee1
DB
8864 return err;
8865 }
8866
9bb00b28
YS
8867 if (env->cur_state->active_rcu_lock) {
8868 if (fn->might_sleep) {
8869 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n",
8870 func_id_name(func_id), func_id);
8871 return -EINVAL;
8872 }
8873
8874 if (env->prog->aux->sleepable && is_storage_get_function(func_id))
8875 env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
8876 }
8877
d83525ca 8878 meta.func_id = func_id;
17a52670 8879 /* check args */
523a4cf4 8880 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
1d18feb2 8881 err = check_func_arg(env, i, &meta, fn, insn_idx);
a7658e1a
AS
8882 if (err)
8883 return err;
8884 }
17a52670 8885
c93552c4
DB
8886 err = record_func_map(env, &meta, func_id, insn_idx);
8887 if (err)
8888 return err;
8889
d2e4c1e6
DB
8890 err = record_func_key(env, &meta, func_id, insn_idx);
8891 if (err)
8892 return err;
8893
435faee1
DB
8894 /* Mark slots with STACK_MISC in case of raw mode, stack offset
8895 * is inferred from register state.
8896 */
8897 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
8898 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
8899 BPF_WRITE, -1, false);
435faee1
DB
8900 if (err)
8901 return err;
8902 }
8903
8f14852e
KKD
8904 regs = cur_regs(env);
8905
8906 if (meta.release_regno) {
8907 err = -EINVAL;
27060531
KKD
8908 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot
8909 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr
8910 * is safe to do directly.
8911 */
8912 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) {
8913 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) {
8914 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n");
8915 return -EFAULT;
8916 }
97e03f52 8917 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
27060531 8918 } else if (meta.ref_obj_id) {
8f14852e 8919 err = release_reference(env, meta.ref_obj_id);
27060531
KKD
8920 } else if (register_is_null(&regs[meta.release_regno])) {
8921 /* meta.ref_obj_id can only be 0 if register that is meant to be
8922 * released is NULL, which must be > R0.
8923 */
8f14852e 8924 err = 0;
27060531 8925 }
46f8bc92
MKL
8926 if (err) {
8927 verbose(env, "func %s#%d reference has not been acquired before\n",
8928 func_id_name(func_id), func_id);
fd978bf7 8929 return err;
46f8bc92 8930 }
fd978bf7
JS
8931 }
8932
e6f2dd0f
JK
8933 switch (func_id) {
8934 case BPF_FUNC_tail_call:
8935 err = check_reference_leak(env);
8936 if (err) {
8937 verbose(env, "tail_call would lead to reference leak\n");
8938 return err;
8939 }
8940 break;
8941 case BPF_FUNC_get_local_storage:
8942 /* check that flags argument in get_local_storage(map, flags) is 0,
8943 * this is required because get_local_storage() can't return an error.
8944 */
8945 if (!register_is_null(&regs[BPF_REG_2])) {
8946 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
8947 return -EINVAL;
8948 }
8949 break;
8950 case BPF_FUNC_for_each_map_elem:
69c087ba
YS
8951 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8952 set_map_elem_callback_state);
e6f2dd0f
JK
8953 break;
8954 case BPF_FUNC_timer_set_callback:
b00628b1
AS
8955 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8956 set_timer_callback_state);
e6f2dd0f
JK
8957 break;
8958 case BPF_FUNC_find_vma:
7c7e3d31
SL
8959 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8960 set_find_vma_callback_state);
e6f2dd0f
JK
8961 break;
8962 case BPF_FUNC_snprintf:
7b15523a 8963 err = check_bpf_snprintf_call(env, regs);
e6f2dd0f
JK
8964 break;
8965 case BPF_FUNC_loop:
1ade2371 8966 update_loop_inline_state(env, meta.subprogno);
e6f2dd0f
JK
8967 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
8968 set_loop_callback_state);
8969 break;
263ae152
JK
8970 case BPF_FUNC_dynptr_from_mem:
8971 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
8972 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
8973 reg_type_str(env, regs[BPF_REG_1].type));
8974 return -EACCES;
8975 }
69fd337a
SF
8976 break;
8977 case BPF_FUNC_set_retval:
aef9d4a3
SF
8978 if (prog_type == BPF_PROG_TYPE_LSM &&
8979 env->prog->expected_attach_type == BPF_LSM_CGROUP) {
69fd337a
SF
8980 if (!env->prog->aux->attach_func_proto->type) {
8981 /* Make sure programs that attach to void
8982 * hooks don't try to modify return value.
8983 */
8984 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
8985 return -EINVAL;
8986 }
8987 }
8988 break;
88374342 8989 case BPF_FUNC_dynptr_data:
485ec51e
JK
8990 {
8991 struct bpf_reg_state *reg;
8992 int id, ref_obj_id;
20571567 8993
485ec51e
JK
8994 reg = get_dynptr_arg_reg(env, fn, regs);
8995 if (!reg)
8996 return -EFAULT;
f8064ab9 8997
f8064ab9 8998
485ec51e
JK
8999 if (meta.dynptr_id) {
9000 verbose(env, "verifier internal error: meta.dynptr_id already set\n");
9001 return -EFAULT;
88374342 9002 }
485ec51e
JK
9003 if (meta.ref_obj_id) {
9004 verbose(env, "verifier internal error: meta.ref_obj_id already set\n");
88374342
JK
9005 return -EFAULT;
9006 }
485ec51e
JK
9007
9008 id = dynptr_id(env, reg);
9009 if (id < 0) {
9010 verbose(env, "verifier internal error: failed to obtain dynptr id\n");
9011 return id;
9012 }
9013
9014 ref_obj_id = dynptr_ref_obj_id(env, reg);
9015 if (ref_obj_id < 0) {
9016 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n");
9017 return ref_obj_id;
9018 }
9019
9020 meta.dynptr_id = id;
9021 meta.ref_obj_id = ref_obj_id;
9022
88374342 9023 break;
485ec51e 9024 }
b5964b96
JK
9025 case BPF_FUNC_dynptr_write:
9026 {
9027 enum bpf_dynptr_type dynptr_type;
9028 struct bpf_reg_state *reg;
9029
9030 reg = get_dynptr_arg_reg(env, fn, regs);
9031 if (!reg)
9032 return -EFAULT;
9033
9034 dynptr_type = dynptr_get_type(env, reg);
9035 if (dynptr_type == BPF_DYNPTR_TYPE_INVALID)
9036 return -EFAULT;
9037
9038 if (dynptr_type == BPF_DYNPTR_TYPE_SKB)
9039 /* this will trigger clear_all_pkt_pointers(), which will
9040 * invalidate all dynptr slices associated with the skb
9041 */
9042 changes_data = true;
9043
9044 break;
9045 }
20571567
DV
9046 case BPF_FUNC_user_ringbuf_drain:
9047 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
9048 set_user_ringbuf_callback_state);
9049 break;
7b15523a
FR
9050 }
9051
e6f2dd0f
JK
9052 if (err)
9053 return err;
9054
17a52670 9055 /* reset caller saved regs */
dc503a8a 9056 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 9057 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
9058 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
9059 }
17a52670 9060
5327ed3d
JW
9061 /* helper call returns 64-bit value. */
9062 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
9063
dc503a8a 9064 /* update return register (already marked as written above) */
3c480732 9065 ret_type = fn->ret_type;
0c9a7a7e
JK
9066 ret_flag = type_flag(ret_type);
9067
9068 switch (base_type(ret_type)) {
9069 case RET_INTEGER:
f1174f77 9070 /* sets type to SCALAR_VALUE */
61bd5218 9071 mark_reg_unknown(env, regs, BPF_REG_0);
0c9a7a7e
JK
9072 break;
9073 case RET_VOID:
17a52670 9074 regs[BPF_REG_0].type = NOT_INIT;
0c9a7a7e
JK
9075 break;
9076 case RET_PTR_TO_MAP_VALUE:
f1174f77 9077 /* There is no offset yet applied, variable or fixed */
61bd5218 9078 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
9079 /* remember map_ptr, so that check_map_access()
9080 * can check 'value_size' boundary of memory access
9081 * to map element returned from bpf_map_lookup_elem()
9082 */
33ff9823 9083 if (meta.map_ptr == NULL) {
61bd5218
JK
9084 verbose(env,
9085 "kernel subsystem misconfigured verifier\n");
17a52670
AS
9086 return -EINVAL;
9087 }
33ff9823 9088 regs[BPF_REG_0].map_ptr = meta.map_ptr;
3e8ce298 9089 regs[BPF_REG_0].map_uid = meta.map_uid;
c25b2ae1
HL
9090 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
9091 if (!type_may_be_null(ret_type) &&
db559117 9092 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) {
c25b2ae1 9093 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301 9094 }
0c9a7a7e
JK
9095 break;
9096 case RET_PTR_TO_SOCKET:
c64b7983 9097 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9098 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
0c9a7a7e
JK
9099 break;
9100 case RET_PTR_TO_SOCK_COMMON:
85a51f8c 9101 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9102 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
0c9a7a7e
JK
9103 break;
9104 case RET_PTR_TO_TCP_SOCK:
655a51e5 9105 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9106 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
0c9a7a7e 9107 break;
2de2669b 9108 case RET_PTR_TO_MEM:
457f4436 9109 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9110 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
457f4436 9111 regs[BPF_REG_0].mem_size = meta.mem_size;
0c9a7a7e
JK
9112 break;
9113 case RET_PTR_TO_MEM_OR_BTF_ID:
9114 {
eaa6bcb7
HL
9115 const struct btf_type *t;
9116
9117 mark_reg_known_zero(env, regs, BPF_REG_0);
22dc4a0f 9118 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
eaa6bcb7
HL
9119 if (!btf_type_is_struct(t)) {
9120 u32 tsize;
9121 const struct btf_type *ret;
9122 const char *tname;
9123
9124 /* resolve the type size of ksym. */
22dc4a0f 9125 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
eaa6bcb7 9126 if (IS_ERR(ret)) {
22dc4a0f 9127 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
eaa6bcb7
HL
9128 verbose(env, "unable to resolve the size of type '%s': %ld\n",
9129 tname, PTR_ERR(ret));
9130 return -EINVAL;
9131 }
c25b2ae1 9132 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
eaa6bcb7
HL
9133 regs[BPF_REG_0].mem_size = tsize;
9134 } else {
34d3a78c
HL
9135 /* MEM_RDONLY may be carried from ret_flag, but it
9136 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
9137 * it will confuse the check of PTR_TO_BTF_ID in
9138 * check_mem_access().
9139 */
9140 ret_flag &= ~MEM_RDONLY;
9141
c25b2ae1 9142 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
22dc4a0f 9143 regs[BPF_REG_0].btf = meta.ret_btf;
eaa6bcb7
HL
9144 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
9145 }
0c9a7a7e
JK
9146 break;
9147 }
9148 case RET_PTR_TO_BTF_ID:
9149 {
c0a5a21c 9150 struct btf *ret_btf;
af7ec138
YS
9151 int ret_btf_id;
9152
9153 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 9154 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
c0a5a21c 9155 if (func_id == BPF_FUNC_kptr_xchg) {
aa3496ac
KKD
9156 ret_btf = meta.kptr_field->kptr.btf;
9157 ret_btf_id = meta.kptr_field->kptr.btf_id;
738c96d5
DM
9158 if (!btf_is_kernel(ret_btf))
9159 regs[BPF_REG_0].type |= MEM_ALLOC;
c0a5a21c 9160 } else {
47e34cb7
DM
9161 if (fn->ret_btf_id == BPF_PTR_POISON) {
9162 verbose(env, "verifier internal error:");
9163 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n",
9164 func_id_name(func_id));
9165 return -EINVAL;
9166 }
c0a5a21c
KKD
9167 ret_btf = btf_vmlinux;
9168 ret_btf_id = *fn->ret_btf_id;
9169 }
af7ec138 9170 if (ret_btf_id == 0) {
3c480732
HL
9171 verbose(env, "invalid return type %u of func %s#%d\n",
9172 base_type(ret_type), func_id_name(func_id),
9173 func_id);
af7ec138
YS
9174 return -EINVAL;
9175 }
c0a5a21c 9176 regs[BPF_REG_0].btf = ret_btf;
af7ec138 9177 regs[BPF_REG_0].btf_id = ret_btf_id;
0c9a7a7e
JK
9178 break;
9179 }
9180 default:
3c480732
HL
9181 verbose(env, "unknown return type %u of func %s#%d\n",
9182 base_type(ret_type), func_id_name(func_id), func_id);
17a52670
AS
9183 return -EINVAL;
9184 }
04fd61ab 9185
c25b2ae1 9186 if (type_may_be_null(regs[BPF_REG_0].type))
93c230e3
MKL
9187 regs[BPF_REG_0].id = ++env->id_gen;
9188
b2d8ef19
DM
9189 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) {
9190 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n",
9191 func_id_name(func_id), func_id);
9192 return -EFAULT;
9193 }
9194
f8064ab9
KKD
9195 if (is_dynptr_ref_function(func_id))
9196 regs[BPF_REG_0].dynptr_id = meta.dynptr_id;
9197
88374342 9198 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) {
1b986589
MKL
9199 /* For release_reference() */
9200 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290 9201 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc28
LB
9202 int id = acquire_reference_state(env, insn_idx);
9203
9204 if (id < 0)
9205 return id;
9206 /* For mark_ptr_or_null_reg() */
9207 regs[BPF_REG_0].id = id;
9208 /* For release_reference() */
9209 regs[BPF_REG_0].ref_obj_id = id;
9210 }
1b986589 9211
849fa506
YS
9212 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
9213
61bd5218 9214 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
9215 if (err)
9216 return err;
04fd61ab 9217
fa28dcb8
SL
9218 if ((func_id == BPF_FUNC_get_stack ||
9219 func_id == BPF_FUNC_get_task_stack) &&
9220 !env->prog->has_callchain_buf) {
c195651e
YS
9221 const char *err_str;
9222
9223#ifdef CONFIG_PERF_EVENTS
9224 err = get_callchain_buffers(sysctl_perf_event_max_stack);
9225 err_str = "cannot get callchain buffer for func %s#%d\n";
9226#else
9227 err = -ENOTSUPP;
9228 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
9229#endif
9230 if (err) {
9231 verbose(env, err_str, func_id_name(func_id), func_id);
9232 return err;
9233 }
9234
9235 env->prog->has_callchain_buf = true;
9236 }
9237
5d99cb2c
SL
9238 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
9239 env->prog->call_get_stack = true;
9240
9b99edca
JO
9241 if (func_id == BPF_FUNC_get_func_ip) {
9242 if (check_get_func_ip(env))
9243 return -ENOTSUPP;
9244 env->prog->call_get_func_ip = true;
9245 }
9246
969bf05e
AS
9247 if (changes_data)
9248 clear_all_pkt_pointers(env);
9249 return 0;
9250}
9251
e6ac2450
MKL
9252/* mark_btf_func_reg_size() is used when the reg size is determined by
9253 * the BTF func_proto's return value size and argument.
9254 */
9255static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
9256 size_t reg_size)
9257{
9258 struct bpf_reg_state *reg = &cur_regs(env)[regno];
9259
9260 if (regno == BPF_REG_0) {
9261 /* Function return value */
9262 reg->live |= REG_LIVE_WRITTEN;
9263 reg->subreg_def = reg_size == sizeof(u64) ?
9264 DEF_NOT_SUBREG : env->insn_idx + 1;
9265 } else {
9266 /* Function argument */
9267 if (reg_size == sizeof(u64)) {
9268 mark_insn_zext(env, reg);
9269 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
9270 } else {
9271 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
9272 }
9273 }
9274}
9275
00b85860
KKD
9276static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta)
9277{
9278 return meta->kfunc_flags & KF_ACQUIRE;
9279}
a5d82727 9280
00b85860
KKD
9281static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
9282{
9283 return meta->kfunc_flags & KF_RET_NULL;
9284}
2357672c 9285
00b85860
KKD
9286static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta)
9287{
9288 return meta->kfunc_flags & KF_RELEASE;
9289}
e6ac2450 9290
00b85860
KKD
9291static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta)
9292{
9293 return meta->kfunc_flags & KF_TRUSTED_ARGS;
9294}
4dd48c6f 9295
00b85860
KKD
9296static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta)
9297{
9298 return meta->kfunc_flags & KF_SLEEPABLE;
9299}
5c073f26 9300
00b85860
KKD
9301static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta)
9302{
9303 return meta->kfunc_flags & KF_DESTRUCTIVE;
9304}
eb1f7f71 9305
fca1aa75
YS
9306static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta)
9307{
9308 return meta->kfunc_flags & KF_RCU;
9309}
9310
00b85860
KKD
9311static bool is_kfunc_arg_kptr_get(struct bpf_kfunc_call_arg_meta *meta, int arg)
9312{
9313 return arg == 0 && (meta->kfunc_flags & KF_KPTR_GET);
9314}
e6ac2450 9315
a50388db
KKD
9316static bool __kfunc_param_match_suffix(const struct btf *btf,
9317 const struct btf_param *arg,
9318 const char *suffix)
00b85860 9319{
a50388db 9320 int suffix_len = strlen(suffix), len;
00b85860 9321 const char *param_name;
e6ac2450 9322
00b85860
KKD
9323 /* In the future, this can be ported to use BTF tagging */
9324 param_name = btf_name_by_offset(btf, arg->name_off);
9325 if (str_is_empty(param_name))
9326 return false;
9327 len = strlen(param_name);
a50388db 9328 if (len < suffix_len)
00b85860 9329 return false;
a50388db
KKD
9330 param_name += len - suffix_len;
9331 return !strncmp(param_name, suffix, suffix_len);
9332}
5c073f26 9333
a50388db
KKD
9334static bool is_kfunc_arg_mem_size(const struct btf *btf,
9335 const struct btf_param *arg,
9336 const struct bpf_reg_state *reg)
9337{
9338 const struct btf_type *t;
5c073f26 9339
a50388db
KKD
9340 t = btf_type_skip_modifiers(btf, arg->type, NULL);
9341 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
00b85860 9342 return false;
eb1f7f71 9343
a50388db
KKD
9344 return __kfunc_param_match_suffix(btf, arg, "__sz");
9345}
eb1f7f71 9346
66e3a13e
JK
9347static bool is_kfunc_arg_const_mem_size(const struct btf *btf,
9348 const struct btf_param *arg,
9349 const struct bpf_reg_state *reg)
9350{
9351 const struct btf_type *t;
9352
9353 t = btf_type_skip_modifiers(btf, arg->type, NULL);
9354 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE)
9355 return false;
9356
9357 return __kfunc_param_match_suffix(btf, arg, "__szk");
9358}
9359
a50388db
KKD
9360static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg)
9361{
9362 return __kfunc_param_match_suffix(btf, arg, "__k");
00b85860 9363}
eb1f7f71 9364
958cf2e2
KKD
9365static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg)
9366{
9367 return __kfunc_param_match_suffix(btf, arg, "__ign");
9368}
5c073f26 9369
ac9f0605
KKD
9370static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg)
9371{
9372 return __kfunc_param_match_suffix(btf, arg, "__alloc");
9373}
e6ac2450 9374
d96d937d
JK
9375static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg)
9376{
9377 return __kfunc_param_match_suffix(btf, arg, "__uninit");
9378}
9379
00b85860
KKD
9380static bool is_kfunc_arg_scalar_with_name(const struct btf *btf,
9381 const struct btf_param *arg,
9382 const char *name)
9383{
9384 int len, target_len = strlen(name);
9385 const char *param_name;
e6ac2450 9386
00b85860
KKD
9387 param_name = btf_name_by_offset(btf, arg->name_off);
9388 if (str_is_empty(param_name))
9389 return false;
9390 len = strlen(param_name);
9391 if (len != target_len)
9392 return false;
9393 if (strcmp(param_name, name))
9394 return false;
e6ac2450 9395
00b85860 9396 return true;
e6ac2450
MKL
9397}
9398
00b85860
KKD
9399enum {
9400 KF_ARG_DYNPTR_ID,
8cab76ec
KKD
9401 KF_ARG_LIST_HEAD_ID,
9402 KF_ARG_LIST_NODE_ID,
cd6791b4
DM
9403 KF_ARG_RB_ROOT_ID,
9404 KF_ARG_RB_NODE_ID,
00b85860 9405};
b03c9f9f 9406
00b85860
KKD
9407BTF_ID_LIST(kf_arg_btf_ids)
9408BTF_ID(struct, bpf_dynptr_kern)
8cab76ec
KKD
9409BTF_ID(struct, bpf_list_head)
9410BTF_ID(struct, bpf_list_node)
bd1279ae
DM
9411BTF_ID(struct, bpf_rb_root)
9412BTF_ID(struct, bpf_rb_node)
b03c9f9f 9413
8cab76ec
KKD
9414static bool __is_kfunc_ptr_arg_type(const struct btf *btf,
9415 const struct btf_param *arg, int type)
3f50f132 9416{
00b85860
KKD
9417 const struct btf_type *t;
9418 u32 res_id;
3f50f132 9419
00b85860
KKD
9420 t = btf_type_skip_modifiers(btf, arg->type, NULL);
9421 if (!t)
9422 return false;
9423 if (!btf_type_is_ptr(t))
9424 return false;
9425 t = btf_type_skip_modifiers(btf, t->type, &res_id);
9426 if (!t)
9427 return false;
8cab76ec 9428 return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]);
3f50f132
JF
9429}
9430
8cab76ec 9431static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg)
b03c9f9f 9432{
8cab76ec 9433 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID);
969bf05e
AS
9434}
9435
8cab76ec 9436static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg)
3f50f132 9437{
8cab76ec 9438 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID);
3f50f132
JF
9439}
9440
8cab76ec 9441static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg)
bb7f0f98 9442{
8cab76ec 9443 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID);
00b85860
KKD
9444}
9445
cd6791b4
DM
9446static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg)
9447{
9448 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID);
9449}
9450
9451static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg)
9452{
9453 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID);
9454}
9455
5d92ddc3
DM
9456static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf,
9457 const struct btf_param *arg)
9458{
9459 const struct btf_type *t;
9460
9461 t = btf_type_resolve_func_ptr(btf, arg->type, NULL);
9462 if (!t)
9463 return false;
9464
9465 return true;
9466}
9467
00b85860
KKD
9468/* Returns true if struct is composed of scalars, 4 levels of nesting allowed */
9469static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env,
9470 const struct btf *btf,
9471 const struct btf_type *t, int rec)
9472{
9473 const struct btf_type *member_type;
9474 const struct btf_member *member;
9475 u32 i;
9476
9477 if (!btf_type_is_struct(t))
9478 return false;
9479
9480 for_each_member(i, t, member) {
9481 const struct btf_array *array;
9482
9483 member_type = btf_type_skip_modifiers(btf, member->type, NULL);
9484 if (btf_type_is_struct(member_type)) {
9485 if (rec >= 3) {
9486 verbose(env, "max struct nesting depth exceeded\n");
9487 return false;
9488 }
9489 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1))
9490 return false;
9491 continue;
9492 }
9493 if (btf_type_is_array(member_type)) {
9494 array = btf_array(member_type);
9495 if (!array->nelems)
9496 return false;
9497 member_type = btf_type_skip_modifiers(btf, array->type, NULL);
9498 if (!btf_type_is_scalar(member_type))
9499 return false;
9500 continue;
9501 }
9502 if (!btf_type_is_scalar(member_type))
9503 return false;
9504 }
9505 return true;
9506}
9507
9508
9509static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {
9510#ifdef CONFIG_NET
9511 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],
9512 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
9513 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
9514#endif
9515};
9516
9517enum kfunc_ptr_arg_type {
9518 KF_ARG_PTR_TO_CTX,
ac9f0605 9519 KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */
00b85860
KKD
9520 KF_ARG_PTR_TO_KPTR, /* PTR_TO_KPTR but type specific */
9521 KF_ARG_PTR_TO_DYNPTR,
06accc87 9522 KF_ARG_PTR_TO_ITER,
8cab76ec
KKD
9523 KF_ARG_PTR_TO_LIST_HEAD,
9524 KF_ARG_PTR_TO_LIST_NODE,
00b85860
KKD
9525 KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */
9526 KF_ARG_PTR_TO_MEM,
9527 KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */
5d92ddc3 9528 KF_ARG_PTR_TO_CALLBACK,
cd6791b4
DM
9529 KF_ARG_PTR_TO_RB_ROOT,
9530 KF_ARG_PTR_TO_RB_NODE,
00b85860
KKD
9531};
9532
ac9f0605
KKD
9533enum special_kfunc_type {
9534 KF_bpf_obj_new_impl,
9535 KF_bpf_obj_drop_impl,
8cab76ec
KKD
9536 KF_bpf_list_push_front,
9537 KF_bpf_list_push_back,
9538 KF_bpf_list_pop_front,
9539 KF_bpf_list_pop_back,
fd264ca0 9540 KF_bpf_cast_to_kern_ctx,
a35b9af4 9541 KF_bpf_rdonly_cast,
9bb00b28
YS
9542 KF_bpf_rcu_read_lock,
9543 KF_bpf_rcu_read_unlock,
bd1279ae
DM
9544 KF_bpf_rbtree_remove,
9545 KF_bpf_rbtree_add,
9546 KF_bpf_rbtree_first,
b5964b96 9547 KF_bpf_dynptr_from_skb,
05421aec 9548 KF_bpf_dynptr_from_xdp,
66e3a13e
JK
9549 KF_bpf_dynptr_slice,
9550 KF_bpf_dynptr_slice_rdwr,
ac9f0605
KKD
9551};
9552
9553BTF_SET_START(special_kfunc_set)
9554BTF_ID(func, bpf_obj_new_impl)
9555BTF_ID(func, bpf_obj_drop_impl)
8cab76ec
KKD
9556BTF_ID(func, bpf_list_push_front)
9557BTF_ID(func, bpf_list_push_back)
9558BTF_ID(func, bpf_list_pop_front)
9559BTF_ID(func, bpf_list_pop_back)
fd264ca0 9560BTF_ID(func, bpf_cast_to_kern_ctx)
a35b9af4 9561BTF_ID(func, bpf_rdonly_cast)
bd1279ae
DM
9562BTF_ID(func, bpf_rbtree_remove)
9563BTF_ID(func, bpf_rbtree_add)
9564BTF_ID(func, bpf_rbtree_first)
b5964b96 9565BTF_ID(func, bpf_dynptr_from_skb)
05421aec 9566BTF_ID(func, bpf_dynptr_from_xdp)
66e3a13e
JK
9567BTF_ID(func, bpf_dynptr_slice)
9568BTF_ID(func, bpf_dynptr_slice_rdwr)
ac9f0605
KKD
9569BTF_SET_END(special_kfunc_set)
9570
9571BTF_ID_LIST(special_kfunc_list)
9572BTF_ID(func, bpf_obj_new_impl)
9573BTF_ID(func, bpf_obj_drop_impl)
8cab76ec
KKD
9574BTF_ID(func, bpf_list_push_front)
9575BTF_ID(func, bpf_list_push_back)
9576BTF_ID(func, bpf_list_pop_front)
9577BTF_ID(func, bpf_list_pop_back)
fd264ca0 9578BTF_ID(func, bpf_cast_to_kern_ctx)
a35b9af4 9579BTF_ID(func, bpf_rdonly_cast)
9bb00b28
YS
9580BTF_ID(func, bpf_rcu_read_lock)
9581BTF_ID(func, bpf_rcu_read_unlock)
bd1279ae
DM
9582BTF_ID(func, bpf_rbtree_remove)
9583BTF_ID(func, bpf_rbtree_add)
9584BTF_ID(func, bpf_rbtree_first)
b5964b96 9585BTF_ID(func, bpf_dynptr_from_skb)
05421aec 9586BTF_ID(func, bpf_dynptr_from_xdp)
66e3a13e
JK
9587BTF_ID(func, bpf_dynptr_slice)
9588BTF_ID(func, bpf_dynptr_slice_rdwr)
9bb00b28
YS
9589
9590static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta)
9591{
9592 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock];
9593}
9594
9595static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta)
9596{
9597 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock];
9598}
ac9f0605 9599
00b85860
KKD
9600static enum kfunc_ptr_arg_type
9601get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
9602 struct bpf_kfunc_call_arg_meta *meta,
9603 const struct btf_type *t, const struct btf_type *ref_t,
9604 const char *ref_tname, const struct btf_param *args,
9605 int argno, int nargs)
9606{
9607 u32 regno = argno + 1;
9608 struct bpf_reg_state *regs = cur_regs(env);
9609 struct bpf_reg_state *reg = &regs[regno];
9610 bool arg_mem_size = false;
9611
fd264ca0
YS
9612 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx])
9613 return KF_ARG_PTR_TO_CTX;
9614
00b85860
KKD
9615 /* In this function, we verify the kfunc's BTF as per the argument type,
9616 * leaving the rest of the verification with respect to the register
9617 * type to our caller. When a set of conditions hold in the BTF type of
9618 * arguments, we resolve it to a known kfunc_ptr_arg_type.
9619 */
9620 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno))
9621 return KF_ARG_PTR_TO_CTX;
9622
ac9f0605
KKD
9623 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno]))
9624 return KF_ARG_PTR_TO_ALLOC_BTF_ID;
9625
00b85860
KKD
9626 if (is_kfunc_arg_kptr_get(meta, argno)) {
9627 if (!btf_type_is_ptr(ref_t)) {
9628 verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n");
9629 return -EINVAL;
9630 }
9631 ref_t = btf_type_by_id(meta->btf, ref_t->type);
9632 ref_tname = btf_name_by_offset(meta->btf, ref_t->name_off);
9633 if (!btf_type_is_struct(ref_t)) {
9634 verbose(env, "kernel function %s args#0 pointer type %s %s is not supported\n",
9635 meta->func_name, btf_type_str(ref_t), ref_tname);
9636 return -EINVAL;
9637 }
9638 return KF_ARG_PTR_TO_KPTR;
9639 }
9640
9641 if (is_kfunc_arg_dynptr(meta->btf, &args[argno]))
9642 return KF_ARG_PTR_TO_DYNPTR;
9643
06accc87
AN
9644 if (is_kfunc_arg_iter(meta, argno))
9645 return KF_ARG_PTR_TO_ITER;
9646
8cab76ec
KKD
9647 if (is_kfunc_arg_list_head(meta->btf, &args[argno]))
9648 return KF_ARG_PTR_TO_LIST_HEAD;
9649
9650 if (is_kfunc_arg_list_node(meta->btf, &args[argno]))
9651 return KF_ARG_PTR_TO_LIST_NODE;
9652
cd6791b4
DM
9653 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno]))
9654 return KF_ARG_PTR_TO_RB_ROOT;
9655
9656 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno]))
9657 return KF_ARG_PTR_TO_RB_NODE;
9658
00b85860
KKD
9659 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) {
9660 if (!btf_type_is_struct(ref_t)) {
9661 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n",
9662 meta->func_name, argno, btf_type_str(ref_t), ref_tname);
9663 return -EINVAL;
9664 }
9665 return KF_ARG_PTR_TO_BTF_ID;
9666 }
9667
5d92ddc3
DM
9668 if (is_kfunc_arg_callback(env, meta->btf, &args[argno]))
9669 return KF_ARG_PTR_TO_CALLBACK;
9670
66e3a13e
JK
9671
9672 if (argno + 1 < nargs &&
9673 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1]) ||
9674 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], &regs[regno + 1])))
00b85860
KKD
9675 arg_mem_size = true;
9676
9677 /* This is the catch all argument type of register types supported by
9678 * check_helper_mem_access. However, we only allow when argument type is
9679 * pointer to scalar, or struct composed (recursively) of scalars. When
9680 * arg_mem_size is true, the pointer can be void *.
9681 */
9682 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) &&
9683 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) {
9684 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n",
9685 argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : "");
9686 return -EINVAL;
9687 }
9688 return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM;
9689}
9690
9691static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
9692 struct bpf_reg_state *reg,
9693 const struct btf_type *ref_t,
9694 const char *ref_tname, u32 ref_id,
9695 struct bpf_kfunc_call_arg_meta *meta,
9696 int argno)
9697{
9698 const struct btf_type *reg_ref_t;
9699 bool strict_type_match = false;
9700 const struct btf *reg_btf;
9701 const char *reg_ref_tname;
9702 u32 reg_ref_id;
9703
3f00c523 9704 if (base_type(reg->type) == PTR_TO_BTF_ID) {
00b85860
KKD
9705 reg_btf = reg->btf;
9706 reg_ref_id = reg->btf_id;
9707 } else {
9708 reg_btf = btf_vmlinux;
9709 reg_ref_id = *reg2btf_ids[base_type(reg->type)];
9710 }
9711
b613d335
DV
9712 /* Enforce strict type matching for calls to kfuncs that are acquiring
9713 * or releasing a reference, or are no-cast aliases. We do _not_
9714 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default,
9715 * as we want to enable BPF programs to pass types that are bitwise
9716 * equivalent without forcing them to explicitly cast with something
9717 * like bpf_cast_to_kern_ctx().
9718 *
9719 * For example, say we had a type like the following:
9720 *
9721 * struct bpf_cpumask {
9722 * cpumask_t cpumask;
9723 * refcount_t usage;
9724 * };
9725 *
9726 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed
9727 * to a struct cpumask, so it would be safe to pass a struct
9728 * bpf_cpumask * to a kfunc expecting a struct cpumask *.
9729 *
9730 * The philosophy here is similar to how we allow scalars of different
9731 * types to be passed to kfuncs as long as the size is the same. The
9732 * only difference here is that we're simply allowing
9733 * btf_struct_ids_match() to walk the struct at the 0th offset, and
9734 * resolve types.
9735 */
9736 if (is_kfunc_acquire(meta) ||
9737 (is_kfunc_release(meta) && reg->ref_obj_id) ||
9738 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
00b85860
KKD
9739 strict_type_match = true;
9740
b613d335
DV
9741 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
9742
00b85860
KKD
9743 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
9744 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
9745 if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) {
9746 verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n",
9747 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1,
9748 btf_type_str(reg_ref_t), reg_ref_tname);
9749 return -EINVAL;
9750 }
9751 return 0;
9752}
9753
9754static int process_kf_arg_ptr_to_kptr(struct bpf_verifier_env *env,
9755 struct bpf_reg_state *reg,
9756 const struct btf_type *ref_t,
9757 const char *ref_tname,
9758 struct bpf_kfunc_call_arg_meta *meta,
9759 int argno)
9760{
9761 struct btf_field *kptr_field;
9762
9763 /* check_func_arg_reg_off allows var_off for
9764 * PTR_TO_MAP_VALUE, but we need fixed offset to find
9765 * off_desc.
9766 */
9767 if (!tnum_is_const(reg->var_off)) {
9768 verbose(env, "arg#0 must have constant offset\n");
9769 return -EINVAL;
9770 }
9771
9772 kptr_field = btf_record_find(reg->map_ptr->record, reg->off + reg->var_off.value, BPF_KPTR);
9773 if (!kptr_field || kptr_field->type != BPF_KPTR_REF) {
9774 verbose(env, "arg#0 no referenced kptr at map value offset=%llu\n",
9775 reg->off + reg->var_off.value);
9776 return -EINVAL;
9777 }
9778
9779 if (!btf_struct_ids_match(&env->log, meta->btf, ref_t->type, 0, kptr_field->kptr.btf,
9780 kptr_field->kptr.btf_id, true)) {
9781 verbose(env, "kernel function %s args#%d expected pointer to %s %s\n",
9782 meta->func_name, argno, btf_type_str(ref_t), ref_tname);
9783 return -EINVAL;
9784 }
9785 return 0;
9786}
9787
6a3cd331 9788static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
534e86bc 9789{
6a3cd331
DM
9790 struct bpf_verifier_state *state = env->cur_state;
9791
9792 if (!state->active_lock.ptr) {
9793 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n");
9794 return -EFAULT;
9795 }
9796
9797 if (type_flag(reg->type) & NON_OWN_REF) {
9798 verbose(env, "verifier internal error: NON_OWN_REF already set\n");
9799 return -EFAULT;
9800 }
9801
9802 reg->type |= NON_OWN_REF;
9803 return 0;
9804}
9805
9806static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id)
9807{
9808 struct bpf_func_state *state, *unused;
534e86bc
KKD
9809 struct bpf_reg_state *reg;
9810 int i;
9811
6a3cd331
DM
9812 state = cur_func(env);
9813
534e86bc 9814 if (!ref_obj_id) {
6a3cd331
DM
9815 verbose(env, "verifier internal error: ref_obj_id is zero for "
9816 "owning -> non-owning conversion\n");
534e86bc
KKD
9817 return -EFAULT;
9818 }
6a3cd331 9819
534e86bc 9820 for (i = 0; i < state->acquired_refs; i++) {
6a3cd331
DM
9821 if (state->refs[i].id != ref_obj_id)
9822 continue;
9823
9824 /* Clear ref_obj_id here so release_reference doesn't clobber
9825 * the whole reg
9826 */
9827 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({
9828 if (reg->ref_obj_id == ref_obj_id) {
9829 reg->ref_obj_id = 0;
9830 ref_set_non_owning(env, reg);
534e86bc 9831 }
6a3cd331
DM
9832 }));
9833 return 0;
534e86bc 9834 }
6a3cd331 9835
534e86bc
KKD
9836 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n");
9837 return -EFAULT;
9838}
9839
8cab76ec
KKD
9840/* Implementation details:
9841 *
9842 * Each register points to some region of memory, which we define as an
9843 * allocation. Each allocation may embed a bpf_spin_lock which protects any
9844 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same
9845 * allocation. The lock and the data it protects are colocated in the same
9846 * memory region.
9847 *
9848 * Hence, everytime a register holds a pointer value pointing to such
9849 * allocation, the verifier preserves a unique reg->id for it.
9850 *
9851 * The verifier remembers the lock 'ptr' and the lock 'id' whenever
9852 * bpf_spin_lock is called.
9853 *
9854 * To enable this, lock state in the verifier captures two values:
9855 * active_lock.ptr = Register's type specific pointer
9856 * active_lock.id = A unique ID for each register pointer value
9857 *
9858 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two
9859 * supported register types.
9860 *
9861 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of
9862 * allocated objects is the reg->btf pointer.
9863 *
9864 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we
9865 * can establish the provenance of the map value statically for each distinct
9866 * lookup into such maps. They always contain a single map value hence unique
9867 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs.
9868 *
9869 * So, in case of global variables, they use array maps with max_entries = 1,
9870 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point
9871 * into the same map value as max_entries is 1, as described above).
9872 *
9873 * In case of inner map lookups, the inner map pointer has same map_ptr as the
9874 * outer map pointer (in verifier context), but each lookup into an inner map
9875 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner
9876 * maps from the same outer map share the same map_ptr as active_lock.ptr, they
9877 * will get different reg->id assigned to each lookup, hence different
9878 * active_lock.id.
9879 *
9880 * In case of allocated objects, active_lock.ptr is the reg->btf, and the
9881 * reg->id is a unique ID preserved after the NULL pointer check on the pointer
9882 * returned from bpf_obj_new. Each allocation receives a new reg->id.
9883 */
9884static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
9885{
9886 void *ptr;
9887 u32 id;
9888
9889 switch ((int)reg->type) {
9890 case PTR_TO_MAP_VALUE:
9891 ptr = reg->map_ptr;
9892 break;
9893 case PTR_TO_BTF_ID | MEM_ALLOC:
9894 ptr = reg->btf;
9895 break;
9896 default:
9897 verbose(env, "verifier internal error: unknown reg type for lock check\n");
9898 return -EFAULT;
9899 }
9900 id = reg->id;
9901
9902 if (!env->cur_state->active_lock.ptr)
9903 return -EINVAL;
9904 if (env->cur_state->active_lock.ptr != ptr ||
9905 env->cur_state->active_lock.id != id) {
9906 verbose(env, "held lock and object are not in the same allocation\n");
9907 return -EINVAL;
9908 }
9909 return 0;
9910}
9911
9912static bool is_bpf_list_api_kfunc(u32 btf_id)
9913{
9914 return btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
9915 btf_id == special_kfunc_list[KF_bpf_list_push_back] ||
9916 btf_id == special_kfunc_list[KF_bpf_list_pop_front] ||
9917 btf_id == special_kfunc_list[KF_bpf_list_pop_back];
9918}
9919
cd6791b4
DM
9920static bool is_bpf_rbtree_api_kfunc(u32 btf_id)
9921{
9922 return btf_id == special_kfunc_list[KF_bpf_rbtree_add] ||
9923 btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9924 btf_id == special_kfunc_list[KF_bpf_rbtree_first];
9925}
9926
9927static bool is_bpf_graph_api_kfunc(u32 btf_id)
9928{
9929 return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id);
9930}
9931
5d92ddc3
DM
9932static bool is_callback_calling_kfunc(u32 btf_id)
9933{
9934 return btf_id == special_kfunc_list[KF_bpf_rbtree_add];
9935}
9936
9937static bool is_rbtree_lock_required_kfunc(u32 btf_id)
9938{
9939 return is_bpf_rbtree_api_kfunc(btf_id);
9940}
9941
cd6791b4
DM
9942static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env,
9943 enum btf_field_type head_field_type,
9944 u32 kfunc_btf_id)
9945{
9946 bool ret;
9947
9948 switch (head_field_type) {
9949 case BPF_LIST_HEAD:
9950 ret = is_bpf_list_api_kfunc(kfunc_btf_id);
9951 break;
9952 case BPF_RB_ROOT:
9953 ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id);
9954 break;
9955 default:
9956 verbose(env, "verifier internal error: unexpected graph root argument type %s\n",
9957 btf_field_type_name(head_field_type));
9958 return false;
9959 }
9960
9961 if (!ret)
9962 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n",
9963 btf_field_type_name(head_field_type));
9964 return ret;
9965}
9966
9967static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env,
9968 enum btf_field_type node_field_type,
9969 u32 kfunc_btf_id)
8cab76ec 9970{
cd6791b4
DM
9971 bool ret;
9972
9973 switch (node_field_type) {
9974 case BPF_LIST_NODE:
9975 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] ||
9976 kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]);
9977 break;
9978 case BPF_RB_NODE:
9979 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
9980 kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]);
9981 break;
9982 default:
9983 verbose(env, "verifier internal error: unexpected graph node argument type %s\n",
9984 btf_field_type_name(node_field_type));
9985 return false;
9986 }
9987
9988 if (!ret)
9989 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n",
9990 btf_field_type_name(node_field_type));
9991 return ret;
9992}
9993
9994static int
9995__process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env,
9996 struct bpf_reg_state *reg, u32 regno,
9997 struct bpf_kfunc_call_arg_meta *meta,
9998 enum btf_field_type head_field_type,
9999 struct btf_field **head_field)
10000{
10001 const char *head_type_name;
8cab76ec
KKD
10002 struct btf_field *field;
10003 struct btf_record *rec;
cd6791b4 10004 u32 head_off;
8cab76ec 10005
cd6791b4
DM
10006 if (meta->btf != btf_vmlinux) {
10007 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
8cab76ec
KKD
10008 return -EFAULT;
10009 }
10010
cd6791b4
DM
10011 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id))
10012 return -EFAULT;
10013
10014 head_type_name = btf_field_type_name(head_field_type);
8cab76ec
KKD
10015 if (!tnum_is_const(reg->var_off)) {
10016 verbose(env,
cd6791b4
DM
10017 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
10018 regno, head_type_name);
8cab76ec
KKD
10019 return -EINVAL;
10020 }
10021
10022 rec = reg_btf_record(reg);
cd6791b4
DM
10023 head_off = reg->off + reg->var_off.value;
10024 field = btf_record_find(rec, head_off, head_field_type);
8cab76ec 10025 if (!field) {
cd6791b4 10026 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off);
8cab76ec
KKD
10027 return -EINVAL;
10028 }
10029
10030 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */
10031 if (check_reg_allocation_locked(env, reg)) {
cd6791b4
DM
10032 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n",
10033 rec->spin_lock_off, head_type_name);
8cab76ec
KKD
10034 return -EINVAL;
10035 }
10036
cd6791b4
DM
10037 if (*head_field) {
10038 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name);
8cab76ec
KKD
10039 return -EFAULT;
10040 }
cd6791b4 10041 *head_field = field;
8cab76ec
KKD
10042 return 0;
10043}
10044
cd6791b4 10045static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env,
8cab76ec
KKD
10046 struct bpf_reg_state *reg, u32 regno,
10047 struct bpf_kfunc_call_arg_meta *meta)
10048{
cd6791b4
DM
10049 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD,
10050 &meta->arg_list_head.field);
10051}
10052
10053static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env,
10054 struct bpf_reg_state *reg, u32 regno,
10055 struct bpf_kfunc_call_arg_meta *meta)
10056{
10057 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT,
10058 &meta->arg_rbtree_root.field);
10059}
10060
10061static int
10062__process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env,
10063 struct bpf_reg_state *reg, u32 regno,
10064 struct bpf_kfunc_call_arg_meta *meta,
10065 enum btf_field_type head_field_type,
10066 enum btf_field_type node_field_type,
10067 struct btf_field **node_field)
10068{
10069 const char *node_type_name;
8cab76ec
KKD
10070 const struct btf_type *et, *t;
10071 struct btf_field *field;
cd6791b4 10072 u32 node_off;
8cab76ec 10073
cd6791b4
DM
10074 if (meta->btf != btf_vmlinux) {
10075 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n");
8cab76ec
KKD
10076 return -EFAULT;
10077 }
10078
cd6791b4
DM
10079 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id))
10080 return -EFAULT;
10081
10082 node_type_name = btf_field_type_name(node_field_type);
8cab76ec
KKD
10083 if (!tnum_is_const(reg->var_off)) {
10084 verbose(env,
cd6791b4
DM
10085 "R%d doesn't have constant offset. %s has to be at the constant offset\n",
10086 regno, node_type_name);
8cab76ec
KKD
10087 return -EINVAL;
10088 }
10089
cd6791b4
DM
10090 node_off = reg->off + reg->var_off.value;
10091 field = reg_find_field_offset(reg, node_off, node_field_type);
10092 if (!field || field->offset != node_off) {
10093 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off);
8cab76ec
KKD
10094 return -EINVAL;
10095 }
10096
cd6791b4 10097 field = *node_field;
8cab76ec 10098
30465003 10099 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id);
8cab76ec 10100 t = btf_type_by_id(reg->btf, reg->btf_id);
30465003
DM
10101 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf,
10102 field->graph_root.value_btf_id, true)) {
cd6791b4 10103 verbose(env, "operation on %s expects arg#1 %s at offset=%d "
8cab76ec 10104 "in struct %s, but arg is at offset=%d in struct %s\n",
cd6791b4
DM
10105 btf_field_type_name(head_field_type),
10106 btf_field_type_name(node_field_type),
30465003
DM
10107 field->graph_root.node_offset,
10108 btf_name_by_offset(field->graph_root.btf, et->name_off),
cd6791b4 10109 node_off, btf_name_by_offset(reg->btf, t->name_off));
8cab76ec
KKD
10110 return -EINVAL;
10111 }
10112
cd6791b4
DM
10113 if (node_off != field->graph_root.node_offset) {
10114 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n",
10115 node_off, btf_field_type_name(node_field_type),
10116 field->graph_root.node_offset,
30465003 10117 btf_name_by_offset(field->graph_root.btf, et->name_off));
8cab76ec
KKD
10118 return -EINVAL;
10119 }
6a3cd331
DM
10120
10121 return 0;
8cab76ec
KKD
10122}
10123
cd6791b4
DM
10124static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env,
10125 struct bpf_reg_state *reg, u32 regno,
10126 struct bpf_kfunc_call_arg_meta *meta)
10127{
10128 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10129 BPF_LIST_HEAD, BPF_LIST_NODE,
10130 &meta->arg_list_head.field);
10131}
10132
10133static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
10134 struct bpf_reg_state *reg, u32 regno,
10135 struct bpf_kfunc_call_arg_meta *meta)
10136{
10137 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta,
10138 BPF_RB_ROOT, BPF_RB_NODE,
10139 &meta->arg_rbtree_root.field);
10140}
10141
1d18feb2
JK
10142static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta,
10143 int insn_idx)
00b85860
KKD
10144{
10145 const char *func_name = meta->func_name, *ref_tname;
10146 const struct btf *btf = meta->btf;
10147 const struct btf_param *args;
10148 u32 i, nargs;
10149 int ret;
10150
10151 args = (const struct btf_param *)(meta->func_proto + 1);
10152 nargs = btf_type_vlen(meta->func_proto);
10153 if (nargs > MAX_BPF_FUNC_REG_ARGS) {
10154 verbose(env, "Function %s has %d > %d args\n", func_name, nargs,
10155 MAX_BPF_FUNC_REG_ARGS);
10156 return -EINVAL;
10157 }
10158
10159 /* Check that BTF function arguments match actual types that the
10160 * verifier sees.
10161 */
10162 for (i = 0; i < nargs; i++) {
10163 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[i + 1];
10164 const struct btf_type *t, *ref_t, *resolve_ret;
10165 enum bpf_arg_type arg_type = ARG_DONTCARE;
10166 u32 regno = i + 1, ref_id, type_size;
10167 bool is_ret_buf_sz = false;
10168 int kf_arg_type;
10169
10170 t = btf_type_skip_modifiers(btf, args[i].type, NULL);
958cf2e2
KKD
10171
10172 if (is_kfunc_arg_ignore(btf, &args[i]))
10173 continue;
10174
00b85860
KKD
10175 if (btf_type_is_scalar(t)) {
10176 if (reg->type != SCALAR_VALUE) {
10177 verbose(env, "R%d is not a scalar\n", regno);
10178 return -EINVAL;
10179 }
a50388db
KKD
10180
10181 if (is_kfunc_arg_constant(meta->btf, &args[i])) {
10182 if (meta->arg_constant.found) {
10183 verbose(env, "verifier internal error: only one constant argument permitted\n");
10184 return -EFAULT;
10185 }
10186 if (!tnum_is_const(reg->var_off)) {
10187 verbose(env, "R%d must be a known constant\n", regno);
10188 return -EINVAL;
10189 }
10190 ret = mark_chain_precision(env, regno);
10191 if (ret < 0)
10192 return ret;
10193 meta->arg_constant.found = true;
10194 meta->arg_constant.value = reg->var_off.value;
10195 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) {
00b85860
KKD
10196 meta->r0_rdonly = true;
10197 is_ret_buf_sz = true;
10198 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) {
10199 is_ret_buf_sz = true;
10200 }
10201
10202 if (is_ret_buf_sz) {
10203 if (meta->r0_size) {
10204 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc");
10205 return -EINVAL;
10206 }
10207
10208 if (!tnum_is_const(reg->var_off)) {
10209 verbose(env, "R%d is not a const\n", regno);
10210 return -EINVAL;
10211 }
10212
10213 meta->r0_size = reg->var_off.value;
10214 ret = mark_chain_precision(env, regno);
10215 if (ret)
10216 return ret;
10217 }
10218 continue;
10219 }
10220
10221 if (!btf_type_is_ptr(t)) {
10222 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t));
10223 return -EINVAL;
10224 }
10225
20c09d92 10226 if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
caf713c3
DV
10227 (register_is_null(reg) || type_may_be_null(reg->type))) {
10228 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
10229 return -EACCES;
10230 }
10231
00b85860
KKD
10232 if (reg->ref_obj_id) {
10233 if (is_kfunc_release(meta) && meta->ref_obj_id) {
10234 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
10235 regno, reg->ref_obj_id,
10236 meta->ref_obj_id);
10237 return -EFAULT;
10238 }
10239 meta->ref_obj_id = reg->ref_obj_id;
10240 if (is_kfunc_release(meta))
10241 meta->release_regno = regno;
10242 }
10243
10244 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id);
10245 ref_tname = btf_name_by_offset(btf, ref_t->name_off);
10246
10247 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs);
10248 if (kf_arg_type < 0)
10249 return kf_arg_type;
10250
10251 switch (kf_arg_type) {
ac9f0605 10252 case KF_ARG_PTR_TO_ALLOC_BTF_ID:
00b85860 10253 case KF_ARG_PTR_TO_BTF_ID:
fca1aa75 10254 if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
00b85860 10255 break;
3f00c523
DV
10256
10257 if (!is_trusted_reg(reg)) {
fca1aa75
YS
10258 if (!is_kfunc_rcu(meta)) {
10259 verbose(env, "R%d must be referenced or trusted\n", regno);
10260 return -EINVAL;
10261 }
10262 if (!is_rcu_reg(reg)) {
10263 verbose(env, "R%d must be a rcu pointer\n", regno);
10264 return -EINVAL;
10265 }
00b85860 10266 }
fca1aa75 10267
00b85860
KKD
10268 fallthrough;
10269 case KF_ARG_PTR_TO_CTX:
10270 /* Trusted arguments have the same offset checks as release arguments */
10271 arg_type |= OBJ_RELEASE;
10272 break;
10273 case KF_ARG_PTR_TO_KPTR:
10274 case KF_ARG_PTR_TO_DYNPTR:
06accc87 10275 case KF_ARG_PTR_TO_ITER:
8cab76ec
KKD
10276 case KF_ARG_PTR_TO_LIST_HEAD:
10277 case KF_ARG_PTR_TO_LIST_NODE:
cd6791b4
DM
10278 case KF_ARG_PTR_TO_RB_ROOT:
10279 case KF_ARG_PTR_TO_RB_NODE:
00b85860
KKD
10280 case KF_ARG_PTR_TO_MEM:
10281 case KF_ARG_PTR_TO_MEM_SIZE:
5d92ddc3 10282 case KF_ARG_PTR_TO_CALLBACK:
00b85860
KKD
10283 /* Trusted by default */
10284 break;
10285 default:
10286 WARN_ON_ONCE(1);
10287 return -EFAULT;
10288 }
10289
10290 if (is_kfunc_release(meta) && reg->ref_obj_id)
10291 arg_type |= OBJ_RELEASE;
10292 ret = check_func_arg_reg_off(env, reg, regno, arg_type);
10293 if (ret < 0)
10294 return ret;
10295
10296 switch (kf_arg_type) {
10297 case KF_ARG_PTR_TO_CTX:
10298 if (reg->type != PTR_TO_CTX) {
10299 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t));
10300 return -EINVAL;
10301 }
fd264ca0
YS
10302
10303 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
10304 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog));
10305 if (ret < 0)
10306 return -EINVAL;
10307 meta->ret_btf_id = ret;
10308 }
00b85860 10309 break;
ac9f0605
KKD
10310 case KF_ARG_PTR_TO_ALLOC_BTF_ID:
10311 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10312 verbose(env, "arg#%d expected pointer to allocated object\n", i);
10313 return -EINVAL;
10314 }
10315 if (!reg->ref_obj_id) {
10316 verbose(env, "allocated object must be referenced\n");
10317 return -EINVAL;
10318 }
10319 if (meta->btf == btf_vmlinux &&
10320 meta->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
10321 meta->arg_obj_drop.btf = reg->btf;
10322 meta->arg_obj_drop.btf_id = reg->btf_id;
10323 }
10324 break;
00b85860
KKD
10325 case KF_ARG_PTR_TO_KPTR:
10326 if (reg->type != PTR_TO_MAP_VALUE) {
10327 verbose(env, "arg#0 expected pointer to map value\n");
10328 return -EINVAL;
10329 }
10330 ret = process_kf_arg_ptr_to_kptr(env, reg, ref_t, ref_tname, meta, i);
10331 if (ret < 0)
10332 return ret;
10333 break;
10334 case KF_ARG_PTR_TO_DYNPTR:
d96d937d
JK
10335 {
10336 enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR;
10337
6b75bd3d 10338 if (reg->type != PTR_TO_STACK &&
27060531 10339 reg->type != CONST_PTR_TO_DYNPTR) {
6b75bd3d 10340 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i);
00b85860
KKD
10341 return -EINVAL;
10342 }
10343
d96d937d
JK
10344 if (reg->type == CONST_PTR_TO_DYNPTR)
10345 dynptr_arg_type |= MEM_RDONLY;
10346
10347 if (is_kfunc_arg_uninit(btf, &args[i]))
10348 dynptr_arg_type |= MEM_UNINIT;
10349
b5964b96
JK
10350 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb])
10351 dynptr_arg_type |= DYNPTR_TYPE_SKB;
05421aec
JK
10352 else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp])
10353 dynptr_arg_type |= DYNPTR_TYPE_XDP;
b5964b96 10354
d96d937d 10355 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type);
6b75bd3d
KKD
10356 if (ret < 0)
10357 return ret;
66e3a13e
JK
10358
10359 if (!(dynptr_arg_type & MEM_UNINIT)) {
10360 int id = dynptr_id(env, reg);
10361
10362 if (id < 0) {
10363 verbose(env, "verifier internal error: failed to obtain dynptr id\n");
10364 return id;
10365 }
10366 meta->initialized_dynptr.id = id;
10367 meta->initialized_dynptr.type = dynptr_get_type(env, reg);
10368 }
10369
00b85860 10370 break;
d96d937d 10371 }
06accc87
AN
10372 case KF_ARG_PTR_TO_ITER:
10373 ret = process_iter_arg(env, regno, insn_idx, meta);
10374 if (ret < 0)
10375 return ret;
10376 break;
8cab76ec
KKD
10377 case KF_ARG_PTR_TO_LIST_HEAD:
10378 if (reg->type != PTR_TO_MAP_VALUE &&
10379 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10380 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
10381 return -EINVAL;
10382 }
10383 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
10384 verbose(env, "allocated object must be referenced\n");
10385 return -EINVAL;
10386 }
10387 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta);
10388 if (ret < 0)
10389 return ret;
10390 break;
cd6791b4
DM
10391 case KF_ARG_PTR_TO_RB_ROOT:
10392 if (reg->type != PTR_TO_MAP_VALUE &&
10393 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10394 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i);
10395 return -EINVAL;
10396 }
10397 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) {
10398 verbose(env, "allocated object must be referenced\n");
10399 return -EINVAL;
10400 }
10401 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta);
10402 if (ret < 0)
10403 return ret;
10404 break;
8cab76ec
KKD
10405 case KF_ARG_PTR_TO_LIST_NODE:
10406 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10407 verbose(env, "arg#%d expected pointer to allocated object\n", i);
10408 return -EINVAL;
10409 }
10410 if (!reg->ref_obj_id) {
10411 verbose(env, "allocated object must be referenced\n");
10412 return -EINVAL;
10413 }
10414 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta);
10415 if (ret < 0)
10416 return ret;
10417 break;
cd6791b4 10418 case KF_ARG_PTR_TO_RB_NODE:
a40d3632
DM
10419 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) {
10420 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) {
10421 verbose(env, "rbtree_remove node input must be non-owning ref\n");
10422 return -EINVAL;
10423 }
10424 if (in_rbtree_lock_required_cb(env)) {
10425 verbose(env, "rbtree_remove not allowed in rbtree cb\n");
10426 return -EINVAL;
10427 }
10428 } else {
10429 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
10430 verbose(env, "arg#%d expected pointer to allocated object\n", i);
10431 return -EINVAL;
10432 }
10433 if (!reg->ref_obj_id) {
10434 verbose(env, "allocated object must be referenced\n");
10435 return -EINVAL;
10436 }
cd6791b4 10437 }
a40d3632 10438
cd6791b4
DM
10439 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta);
10440 if (ret < 0)
10441 return ret;
10442 break;
00b85860
KKD
10443 case KF_ARG_PTR_TO_BTF_ID:
10444 /* Only base_type is checked, further checks are done here */
3f00c523 10445 if ((base_type(reg->type) != PTR_TO_BTF_ID ||
fca1aa75 10446 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
3f00c523
DV
10447 !reg2btf_ids[base_type(reg->type)]) {
10448 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type));
10449 verbose(env, "expected %s or socket\n",
10450 reg_type_str(env, base_type(reg->type) |
10451 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
00b85860
KKD
10452 return -EINVAL;
10453 }
10454 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
10455 if (ret < 0)
10456 return ret;
10457 break;
10458 case KF_ARG_PTR_TO_MEM:
10459 resolve_ret = btf_resolve_size(btf, ref_t, &type_size);
10460 if (IS_ERR(resolve_ret)) {
10461 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n",
10462 i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret));
10463 return -EINVAL;
10464 }
10465 ret = check_mem_reg(env, reg, regno, type_size);
10466 if (ret < 0)
10467 return ret;
10468 break;
10469 case KF_ARG_PTR_TO_MEM_SIZE:
66e3a13e
JK
10470 {
10471 struct bpf_reg_state *size_reg = &regs[regno + 1];
10472 const struct btf_param *size_arg = &args[i + 1];
10473
10474 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1);
00b85860
KKD
10475 if (ret < 0) {
10476 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1);
10477 return ret;
10478 }
66e3a13e
JK
10479
10480 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) {
10481 if (meta->arg_constant.found) {
10482 verbose(env, "verifier internal error: only one constant argument permitted\n");
10483 return -EFAULT;
10484 }
10485 if (!tnum_is_const(size_reg->var_off)) {
10486 verbose(env, "R%d must be a known constant\n", regno + 1);
10487 return -EINVAL;
10488 }
10489 meta->arg_constant.found = true;
10490 meta->arg_constant.value = size_reg->var_off.value;
10491 }
10492
10493 /* Skip next '__sz' or '__szk' argument */
00b85860
KKD
10494 i++;
10495 break;
66e3a13e 10496 }
5d92ddc3
DM
10497 case KF_ARG_PTR_TO_CALLBACK:
10498 meta->subprogno = reg->subprogno;
10499 break;
00b85860
KKD
10500 }
10501 }
10502
10503 if (is_kfunc_release(meta) && !meta->release_regno) {
10504 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n",
10505 func_name);
10506 return -EINVAL;
10507 }
10508
10509 return 0;
10510}
10511
07236eab
AN
10512static int fetch_kfunc_meta(struct bpf_verifier_env *env,
10513 struct bpf_insn *insn,
10514 struct bpf_kfunc_call_arg_meta *meta,
10515 const char **kfunc_name)
e6ac2450 10516{
07236eab
AN
10517 const struct btf_type *func, *func_proto;
10518 u32 func_id, *kfunc_flags;
10519 const char *func_name;
2357672c 10520 struct btf *desc_btf;
e6ac2450 10521
07236eab
AN
10522 if (kfunc_name)
10523 *kfunc_name = NULL;
10524
a5d82727 10525 if (!insn->imm)
07236eab 10526 return -EINVAL;
a5d82727 10527
43bf0878 10528 desc_btf = find_kfunc_desc_btf(env, insn->off);
2357672c
KKD
10529 if (IS_ERR(desc_btf))
10530 return PTR_ERR(desc_btf);
10531
e6ac2450 10532 func_id = insn->imm;
2357672c
KKD
10533 func = btf_type_by_id(desc_btf, func_id);
10534 func_name = btf_name_by_offset(desc_btf, func->name_off);
07236eab
AN
10535 if (kfunc_name)
10536 *kfunc_name = func_name;
2357672c 10537 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450 10538
a4703e31
KKD
10539 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
10540 if (!kfunc_flags) {
e6ac2450
MKL
10541 return -EACCES;
10542 }
00b85860 10543
07236eab
AN
10544 memset(meta, 0, sizeof(*meta));
10545 meta->btf = desc_btf;
10546 meta->func_id = func_id;
10547 meta->kfunc_flags = *kfunc_flags;
10548 meta->func_proto = func_proto;
10549 meta->func_name = func_name;
10550
10551 return 0;
10552}
10553
10554static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
10555 int *insn_idx_p)
10556{
10557 const struct btf_type *t, *ptr_type;
10558 u32 i, nargs, ptr_type_id, release_ref_obj_id;
10559 struct bpf_reg_state *regs = cur_regs(env);
10560 const char *func_name, *ptr_type_name;
10561 bool sleepable, rcu_lock, rcu_unlock;
10562 struct bpf_kfunc_call_arg_meta meta;
10563 struct bpf_insn_aux_data *insn_aux;
10564 int err, insn_idx = *insn_idx_p;
10565 const struct btf_param *args;
10566 const struct btf_type *ret_t;
10567 struct btf *desc_btf;
10568
10569 /* skip for now, but return error when we find this in fixup_kfunc_call */
10570 if (!insn->imm)
10571 return 0;
10572
10573 err = fetch_kfunc_meta(env, insn, &meta, &func_name);
10574 if (err == -EACCES && func_name)
10575 verbose(env, "calling kernel function %s is not allowed\n", func_name);
10576 if (err)
10577 return err;
10578 desc_btf = meta.btf;
10579 insn_aux = &env->insn_aux_data[insn_idx];
00b85860 10580
06accc87
AN
10581 insn_aux->is_iter_next = is_iter_next_kfunc(&meta);
10582
00b85860
KKD
10583 if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) {
10584 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n");
4dd48c6f
AS
10585 return -EACCES;
10586 }
10587
9bb00b28
YS
10588 sleepable = is_kfunc_sleepable(&meta);
10589 if (sleepable && !env->prog->aux->sleepable) {
00b85860
KKD
10590 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
10591 return -EACCES;
10592 }
eb1f7f71 10593
9bb00b28
YS
10594 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta);
10595 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta);
9bb00b28
YS
10596
10597 if (env->cur_state->active_rcu_lock) {
10598 struct bpf_func_state *state;
10599 struct bpf_reg_state *reg;
10600
10601 if (rcu_lock) {
10602 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name);
10603 return -EINVAL;
10604 } else if (rcu_unlock) {
10605 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({
10606 if (reg->type & MEM_RCU) {
fca1aa75 10607 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL);
9bb00b28
YS
10608 reg->type |= PTR_UNTRUSTED;
10609 }
10610 }));
10611 env->cur_state->active_rcu_lock = false;
10612 } else if (sleepable) {
10613 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name);
10614 return -EACCES;
10615 }
10616 } else if (rcu_lock) {
10617 env->cur_state->active_rcu_lock = true;
10618 } else if (rcu_unlock) {
10619 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name);
10620 return -EINVAL;
10621 }
10622
e6ac2450 10623 /* Check the arguments */
1d18feb2 10624 err = check_kfunc_args(env, &meta, insn_idx);
5c073f26 10625 if (err < 0)
e6ac2450 10626 return err;
5c073f26 10627 /* In case of release function, we get register number of refcounted
00b85860 10628 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now.
5c073f26 10629 */
00b85860
KKD
10630 if (meta.release_regno) {
10631 err = release_reference(env, regs[meta.release_regno].ref_obj_id);
5c073f26
KKD
10632 if (err) {
10633 verbose(env, "kfunc %s#%d reference has not been acquired before\n",
07236eab 10634 func_name, meta.func_id);
5c073f26
KKD
10635 return err;
10636 }
10637 }
e6ac2450 10638
6a3cd331 10639 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] ||
bd1279ae
DM
10640 meta.func_id == special_kfunc_list[KF_bpf_list_push_back] ||
10641 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
6a3cd331
DM
10642 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id;
10643 err = ref_convert_owning_non_owning(env, release_ref_obj_id);
10644 if (err) {
10645 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n",
07236eab 10646 func_name, meta.func_id);
6a3cd331
DM
10647 return err;
10648 }
10649
10650 err = release_reference(env, release_ref_obj_id);
10651 if (err) {
10652 verbose(env, "kfunc %s#%d reference has not been acquired before\n",
07236eab 10653 func_name, meta.func_id);
6a3cd331
DM
10654 return err;
10655 }
10656 }
10657
5d92ddc3
DM
10658 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) {
10659 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
10660 set_rbtree_add_callback_state);
10661 if (err) {
10662 verbose(env, "kfunc %s#%d failed callback verification\n",
07236eab 10663 func_name, meta.func_id);
5d92ddc3
DM
10664 return err;
10665 }
10666 }
10667
e6ac2450
MKL
10668 for (i = 0; i < CALLER_SAVED_REGS; i++)
10669 mark_reg_not_init(env, regs, caller_saved[i]);
10670
10671 /* Check return type */
07236eab 10672 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL);
5c073f26 10673
00b85860 10674 if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) {
958cf2e2
KKD
10675 /* Only exception is bpf_obj_new_impl */
10676 if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) {
10677 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
10678 return -EINVAL;
10679 }
5c073f26
KKD
10680 }
10681
e6ac2450
MKL
10682 if (btf_type_is_scalar(t)) {
10683 mark_reg_unknown(env, regs, BPF_REG_0);
10684 mark_btf_func_reg_size(env, BPF_REG_0, t->size);
10685 } else if (btf_type_is_ptr(t)) {
958cf2e2
KKD
10686 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id);
10687
10688 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
10689 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
958cf2e2
KKD
10690 struct btf *ret_btf;
10691 u32 ret_btf_id;
10692
e181d3f1
KKD
10693 if (unlikely(!bpf_global_ma_set))
10694 return -ENOMEM;
10695
958cf2e2
KKD
10696 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) {
10697 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n");
10698 return -EINVAL;
10699 }
10700
10701 ret_btf = env->prog->aux->btf;
10702 ret_btf_id = meta.arg_constant.value;
10703
10704 /* This may be NULL due to user not supplying a BTF */
10705 if (!ret_btf) {
10706 verbose(env, "bpf_obj_new requires prog BTF\n");
10707 return -EINVAL;
10708 }
10709
10710 ret_t = btf_type_by_id(ret_btf, ret_btf_id);
10711 if (!ret_t || !__btf_type_is_struct(ret_t)) {
10712 verbose(env, "bpf_obj_new type ID argument must be of a struct\n");
10713 return -EINVAL;
10714 }
10715
10716 mark_reg_known_zero(env, regs, BPF_REG_0);
10717 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC;
10718 regs[BPF_REG_0].btf = ret_btf;
10719 regs[BPF_REG_0].btf_id = ret_btf_id;
10720
07236eab
AN
10721 insn_aux->obj_new_size = ret_t->size;
10722 insn_aux->kptr_struct_meta =
958cf2e2 10723 btf_find_struct_meta(ret_btf, ret_btf_id);
ac9f0605 10724 } else if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
07236eab 10725 insn_aux->kptr_struct_meta =
ac9f0605
KKD
10726 btf_find_struct_meta(meta.arg_obj_drop.btf,
10727 meta.arg_obj_drop.btf_id);
8cab76ec
KKD
10728 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] ||
10729 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) {
10730 struct btf_field *field = meta.arg_list_head.field;
10731
a40d3632
DM
10732 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
10733 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] ||
10734 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
10735 struct btf_field *field = meta.arg_rbtree_root.field;
10736
10737 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root);
fd264ca0
YS
10738 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
10739 mark_reg_known_zero(env, regs, BPF_REG_0);
10740 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED;
10741 regs[BPF_REG_0].btf = desc_btf;
10742 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
a35b9af4
YS
10743 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
10744 ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
10745 if (!ret_t || !btf_type_is_struct(ret_t)) {
10746 verbose(env,
10747 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
10748 return -EINVAL;
10749 }
10750
10751 mark_reg_known_zero(env, regs, BPF_REG_0);
10752 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
10753 regs[BPF_REG_0].btf = desc_btf;
10754 regs[BPF_REG_0].btf_id = meta.arg_constant.value;
66e3a13e
JK
10755 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] ||
10756 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) {
10757 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type);
10758
10759 mark_reg_known_zero(env, regs, BPF_REG_0);
10760
10761 if (!meta.arg_constant.found) {
10762 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n");
10763 return -EFAULT;
10764 }
10765
10766 regs[BPF_REG_0].mem_size = meta.arg_constant.value;
10767
10768 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */
10769 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag;
10770
10771 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) {
10772 regs[BPF_REG_0].type |= MEM_RDONLY;
10773 } else {
10774 /* this will set env->seen_direct_write to true */
10775 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) {
10776 verbose(env, "the prog does not allow writes to packet data\n");
10777 return -EINVAL;
10778 }
10779 }
10780
10781 if (!meta.initialized_dynptr.id) {
10782 verbose(env, "verifier internal error: no dynptr id\n");
10783 return -EFAULT;
10784 }
10785 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id;
10786
10787 /* we don't need to set BPF_REG_0's ref obj id
10788 * because packet slices are not refcounted (see
10789 * dynptr_type_refcounted)
10790 */
958cf2e2
KKD
10791 } else {
10792 verbose(env, "kernel function %s unhandled dynamic return type\n",
10793 meta.func_name);
10794 return -EFAULT;
10795 }
10796 } else if (!__btf_type_is_struct(ptr_type)) {
f4b4eee6
AN
10797 if (!meta.r0_size) {
10798 __u32 sz;
10799
10800 if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) {
10801 meta.r0_size = sz;
10802 meta.r0_rdonly = true;
10803 }
10804 }
eb1f7f71
BT
10805 if (!meta.r0_size) {
10806 ptr_type_name = btf_name_by_offset(desc_btf,
10807 ptr_type->name_off);
10808 verbose(env,
10809 "kernel function %s returns pointer type %s %s is not supported\n",
10810 func_name,
10811 btf_type_str(ptr_type),
10812 ptr_type_name);
10813 return -EINVAL;
10814 }
10815
10816 mark_reg_known_zero(env, regs, BPF_REG_0);
10817 regs[BPF_REG_0].type = PTR_TO_MEM;
10818 regs[BPF_REG_0].mem_size = meta.r0_size;
10819
10820 if (meta.r0_rdonly)
10821 regs[BPF_REG_0].type |= MEM_RDONLY;
10822
10823 /* Ensures we don't access the memory after a release_reference() */
10824 if (meta.ref_obj_id)
10825 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
10826 } else {
10827 mark_reg_known_zero(env, regs, BPF_REG_0);
10828 regs[BPF_REG_0].btf = desc_btf;
10829 regs[BPF_REG_0].type = PTR_TO_BTF_ID;
10830 regs[BPF_REG_0].btf_id = ptr_type_id;
e6ac2450 10831 }
958cf2e2 10832
00b85860 10833 if (is_kfunc_ret_null(&meta)) {
5c073f26
KKD
10834 regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
10835 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
10836 regs[BPF_REG_0].id = ++env->id_gen;
10837 }
e6ac2450 10838 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
00b85860 10839 if (is_kfunc_acquire(&meta)) {
5c073f26
KKD
10840 int id = acquire_reference_state(env, insn_idx);
10841
10842 if (id < 0)
10843 return id;
00b85860
KKD
10844 if (is_kfunc_ret_null(&meta))
10845 regs[BPF_REG_0].id = id;
5c073f26 10846 regs[BPF_REG_0].ref_obj_id = id;
a40d3632
DM
10847 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) {
10848 ref_set_non_owning(env, &regs[BPF_REG_0]);
5c073f26 10849 }
a40d3632
DM
10850
10851 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove])
10852 invalidate_non_owning_refs(env);
10853
00b85860
KKD
10854 if (reg_may_point_to_spin_lock(&regs[BPF_REG_0]) && !regs[BPF_REG_0].id)
10855 regs[BPF_REG_0].id = ++env->id_gen;
e6ac2450
MKL
10856 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
10857
07236eab
AN
10858 nargs = btf_type_vlen(meta.func_proto);
10859 args = (const struct btf_param *)(meta.func_proto + 1);
e6ac2450
MKL
10860 for (i = 0; i < nargs; i++) {
10861 u32 regno = i + 1;
10862
2357672c 10863 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
e6ac2450
MKL
10864 if (btf_type_is_ptr(t))
10865 mark_btf_func_reg_size(env, regno, sizeof(void *));
10866 else
10867 /* scalar. ensured by btf_check_kfunc_arg_match() */
10868 mark_btf_func_reg_size(env, regno, t->size);
10869 }
10870
06accc87
AN
10871 if (is_iter_next_kfunc(&meta)) {
10872 err = process_iter_next_call(env, insn_idx, &meta);
10873 if (err)
10874 return err;
10875 }
10876
e6ac2450
MKL
10877 return 0;
10878}
10879
b03c9f9f
EC
10880static bool signed_add_overflows(s64 a, s64 b)
10881{
10882 /* Do the add in u64, where overflow is well-defined */
10883 s64 res = (s64)((u64)a + (u64)b);
10884
10885 if (b < 0)
10886 return res > a;
10887 return res < a;
10888}
10889
bc895e8b 10890static bool signed_add32_overflows(s32 a, s32 b)
3f50f132
JF
10891{
10892 /* Do the add in u32, where overflow is well-defined */
10893 s32 res = (s32)((u32)a + (u32)b);
10894
10895 if (b < 0)
10896 return res > a;
10897 return res < a;
10898}
10899
bc895e8b 10900static bool signed_sub_overflows(s64 a, s64 b)
b03c9f9f
EC
10901{
10902 /* Do the sub in u64, where overflow is well-defined */
10903 s64 res = (s64)((u64)a - (u64)b);
10904
10905 if (b < 0)
10906 return res < a;
10907 return res > a;
969bf05e
AS
10908}
10909
3f50f132
JF
10910static bool signed_sub32_overflows(s32 a, s32 b)
10911{
bc895e8b 10912 /* Do the sub in u32, where overflow is well-defined */
3f50f132
JF
10913 s32 res = (s32)((u32)a - (u32)b);
10914
10915 if (b < 0)
10916 return res < a;
10917 return res > a;
10918}
10919
bb7f0f98
AS
10920static bool check_reg_sane_offset(struct bpf_verifier_env *env,
10921 const struct bpf_reg_state *reg,
10922 enum bpf_reg_type type)
10923{
10924 bool known = tnum_is_const(reg->var_off);
10925 s64 val = reg->var_off.value;
10926 s64 smin = reg->smin_value;
10927
10928 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
10929 verbose(env, "math between %s pointer and %lld is not allowed\n",
c25b2ae1 10930 reg_type_str(env, type), val);
bb7f0f98
AS
10931 return false;
10932 }
10933
10934 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
10935 verbose(env, "%s pointer offset %d is not allowed\n",
c25b2ae1 10936 reg_type_str(env, type), reg->off);
bb7f0f98
AS
10937 return false;
10938 }
10939
10940 if (smin == S64_MIN) {
10941 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
c25b2ae1 10942 reg_type_str(env, type));
bb7f0f98
AS
10943 return false;
10944 }
10945
10946 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
10947 verbose(env, "value %lld makes %s pointer be out of bounds\n",
c25b2ae1 10948 smin, reg_type_str(env, type));
bb7f0f98
AS
10949 return false;
10950 }
10951
10952 return true;
10953}
10954
a6aaece0
DB
10955enum {
10956 REASON_BOUNDS = -1,
10957 REASON_TYPE = -2,
10958 REASON_PATHS = -3,
10959 REASON_LIMIT = -4,
10960 REASON_STACK = -5,
10961};
10962
979d63d5 10963static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
bb01a1bb 10964 u32 *alu_limit, bool mask_to_left)
979d63d5 10965{
7fedb63a 10966 u32 max = 0, ptr_limit = 0;
979d63d5
DB
10967
10968 switch (ptr_reg->type) {
10969 case PTR_TO_STACK:
1b1597e6 10970 /* Offset 0 is out-of-bounds, but acceptable start for the
7fedb63a
DB
10971 * left direction, see BPF_REG_FP. Also, unknown scalar
10972 * offset where we would need to deal with min/max bounds is
10973 * currently prohibited for unprivileged.
1b1597e6
PK
10974 */
10975 max = MAX_BPF_STACK + mask_to_left;
7fedb63a 10976 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
b658bbb8 10977 break;
979d63d5 10978 case PTR_TO_MAP_VALUE:
1b1597e6 10979 max = ptr_reg->map_ptr->value_size;
7fedb63a
DB
10980 ptr_limit = (mask_to_left ?
10981 ptr_reg->smin_value :
10982 ptr_reg->umax_value) + ptr_reg->off;
b658bbb8 10983 break;
979d63d5 10984 default:
a6aaece0 10985 return REASON_TYPE;
979d63d5 10986 }
b658bbb8
DB
10987
10988 if (ptr_limit >= max)
a6aaece0 10989 return REASON_LIMIT;
b658bbb8
DB
10990 *alu_limit = ptr_limit;
10991 return 0;
979d63d5
DB
10992}
10993
d3bd7413
DB
10994static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
10995 const struct bpf_insn *insn)
10996{
2c78ee89 10997 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
d3bd7413
DB
10998}
10999
11000static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
11001 u32 alu_state, u32 alu_limit)
11002{
11003 /* If we arrived here from different branches with different
11004 * state or limits to sanitize, then this won't work.
11005 */
11006 if (aux->alu_state &&
11007 (aux->alu_state != alu_state ||
11008 aux->alu_limit != alu_limit))
a6aaece0 11009 return REASON_PATHS;
d3bd7413 11010
e6ac5933 11011 /* Corresponding fixup done in do_misc_fixups(). */
d3bd7413
DB
11012 aux->alu_state = alu_state;
11013 aux->alu_limit = alu_limit;
11014 return 0;
11015}
11016
11017static int sanitize_val_alu(struct bpf_verifier_env *env,
11018 struct bpf_insn *insn)
11019{
11020 struct bpf_insn_aux_data *aux = cur_aux(env);
11021
11022 if (can_skip_alu_sanitation(env, insn))
11023 return 0;
11024
11025 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
11026}
11027
f5288193
DB
11028static bool sanitize_needed(u8 opcode)
11029{
11030 return opcode == BPF_ADD || opcode == BPF_SUB;
11031}
11032
3d0220f6
DB
11033struct bpf_sanitize_info {
11034 struct bpf_insn_aux_data aux;
bb01a1bb 11035 bool mask_to_left;
3d0220f6
DB
11036};
11037
9183671a
DB
11038static struct bpf_verifier_state *
11039sanitize_speculative_path(struct bpf_verifier_env *env,
11040 const struct bpf_insn *insn,
11041 u32 next_idx, u32 curr_idx)
11042{
11043 struct bpf_verifier_state *branch;
11044 struct bpf_reg_state *regs;
11045
11046 branch = push_stack(env, next_idx, curr_idx, true);
11047 if (branch && insn) {
11048 regs = branch->frame[branch->curframe]->regs;
11049 if (BPF_SRC(insn->code) == BPF_K) {
11050 mark_reg_unknown(env, regs, insn->dst_reg);
11051 } else if (BPF_SRC(insn->code) == BPF_X) {
11052 mark_reg_unknown(env, regs, insn->dst_reg);
11053 mark_reg_unknown(env, regs, insn->src_reg);
11054 }
11055 }
11056 return branch;
11057}
11058
979d63d5
DB
11059static int sanitize_ptr_alu(struct bpf_verifier_env *env,
11060 struct bpf_insn *insn,
11061 const struct bpf_reg_state *ptr_reg,
6f55b2f2 11062 const struct bpf_reg_state *off_reg,
979d63d5 11063 struct bpf_reg_state *dst_reg,
3d0220f6 11064 struct bpf_sanitize_info *info,
7fedb63a 11065 const bool commit_window)
979d63d5 11066{
3d0220f6 11067 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
979d63d5 11068 struct bpf_verifier_state *vstate = env->cur_state;
801c6058 11069 bool off_is_imm = tnum_is_const(off_reg->var_off);
6f55b2f2 11070 bool off_is_neg = off_reg->smin_value < 0;
979d63d5
DB
11071 bool ptr_is_dst_reg = ptr_reg == dst_reg;
11072 u8 opcode = BPF_OP(insn->code);
11073 u32 alu_state, alu_limit;
11074 struct bpf_reg_state tmp;
11075 bool ret;
f232326f 11076 int err;
979d63d5 11077
d3bd7413 11078 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
11079 return 0;
11080
11081 /* We already marked aux for masking from non-speculative
11082 * paths, thus we got here in the first place. We only care
11083 * to explore bad access from here.
11084 */
11085 if (vstate->speculative)
11086 goto do_sim;
11087
bb01a1bb
DB
11088 if (!commit_window) {
11089 if (!tnum_is_const(off_reg->var_off) &&
11090 (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
11091 return REASON_BOUNDS;
11092
11093 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
11094 (opcode == BPF_SUB && !off_is_neg);
11095 }
11096
11097 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
f232326f
PK
11098 if (err < 0)
11099 return err;
11100
7fedb63a
DB
11101 if (commit_window) {
11102 /* In commit phase we narrow the masking window based on
11103 * the observed pointer move after the simulated operation.
11104 */
3d0220f6
DB
11105 alu_state = info->aux.alu_state;
11106 alu_limit = abs(info->aux.alu_limit - alu_limit);
7fedb63a
DB
11107 } else {
11108 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
801c6058 11109 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7fedb63a
DB
11110 alu_state |= ptr_is_dst_reg ?
11111 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
e042aa53
DB
11112
11113 /* Limit pruning on unknown scalars to enable deep search for
11114 * potential masking differences from other program paths.
11115 */
11116 if (!off_is_imm)
11117 env->explore_alu_limits = true;
7fedb63a
DB
11118 }
11119
f232326f
PK
11120 err = update_alu_sanitation_state(aux, alu_state, alu_limit);
11121 if (err < 0)
11122 return err;
979d63d5 11123do_sim:
7fedb63a
DB
11124 /* If we're in commit phase, we're done here given we already
11125 * pushed the truncated dst_reg into the speculative verification
11126 * stack.
a7036191
DB
11127 *
11128 * Also, when register is a known constant, we rewrite register-based
11129 * operation to immediate-based, and thus do not need masking (and as
11130 * a consequence, do not need to simulate the zero-truncation either).
7fedb63a 11131 */
a7036191 11132 if (commit_window || off_is_imm)
7fedb63a
DB
11133 return 0;
11134
979d63d5
DB
11135 /* Simulate and find potential out-of-bounds access under
11136 * speculative execution from truncation as a result of
11137 * masking when off was not within expected range. If off
11138 * sits in dst, then we temporarily need to move ptr there
11139 * to simulate dst (== 0) +/-= ptr. Needed, for example,
11140 * for cases where we use K-based arithmetic in one direction
11141 * and truncated reg-based in the other in order to explore
11142 * bad access.
11143 */
11144 if (!ptr_is_dst_reg) {
11145 tmp = *dst_reg;
71f656a5 11146 copy_register_state(dst_reg, ptr_reg);
979d63d5 11147 }
9183671a
DB
11148 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
11149 env->insn_idx);
0803278b 11150 if (!ptr_is_dst_reg && ret)
979d63d5 11151 *dst_reg = tmp;
a6aaece0
DB
11152 return !ret ? REASON_STACK : 0;
11153}
11154
fe9a5ca7
DB
11155static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
11156{
11157 struct bpf_verifier_state *vstate = env->cur_state;
11158
11159 /* If we simulate paths under speculation, we don't update the
11160 * insn as 'seen' such that when we verify unreachable paths in
11161 * the non-speculative domain, sanitize_dead_code() can still
11162 * rewrite/sanitize them.
11163 */
11164 if (!vstate->speculative)
11165 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
11166}
11167
a6aaece0
DB
11168static int sanitize_err(struct bpf_verifier_env *env,
11169 const struct bpf_insn *insn, int reason,
11170 const struct bpf_reg_state *off_reg,
11171 const struct bpf_reg_state *dst_reg)
11172{
11173 static const char *err = "pointer arithmetic with it prohibited for !root";
11174 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
11175 u32 dst = insn->dst_reg, src = insn->src_reg;
11176
11177 switch (reason) {
11178 case REASON_BOUNDS:
11179 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
11180 off_reg == dst_reg ? dst : src, err);
11181 break;
11182 case REASON_TYPE:
11183 verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
11184 off_reg == dst_reg ? src : dst, err);
11185 break;
11186 case REASON_PATHS:
11187 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
11188 dst, op, err);
11189 break;
11190 case REASON_LIMIT:
11191 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
11192 dst, op, err);
11193 break;
11194 case REASON_STACK:
11195 verbose(env, "R%d could not be pushed for speculative verification, %s\n",
11196 dst, err);
11197 break;
11198 default:
11199 verbose(env, "verifier internal error: unknown reason (%d)\n",
11200 reason);
11201 break;
11202 }
11203
11204 return -EACCES;
979d63d5
DB
11205}
11206
01f810ac
AM
11207/* check that stack access falls within stack limits and that 'reg' doesn't
11208 * have a variable offset.
11209 *
11210 * Variable offset is prohibited for unprivileged mode for simplicity since it
11211 * requires corresponding support in Spectre masking for stack ALU. See also
11212 * retrieve_ptr_limit().
11213 *
11214 *
11215 * 'off' includes 'reg->off'.
11216 */
11217static int check_stack_access_for_ptr_arithmetic(
11218 struct bpf_verifier_env *env,
11219 int regno,
11220 const struct bpf_reg_state *reg,
11221 int off)
11222{
11223 if (!tnum_is_const(reg->var_off)) {
11224 char tn_buf[48];
11225
11226 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
11227 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
11228 regno, tn_buf, off);
11229 return -EACCES;
11230 }
11231
11232 if (off >= 0 || off < -MAX_BPF_STACK) {
11233 verbose(env, "R%d stack pointer arithmetic goes out of range, "
11234 "prohibited for !root; off=%d\n", regno, off);
11235 return -EACCES;
11236 }
11237
11238 return 0;
11239}
11240
073815b7
DB
11241static int sanitize_check_bounds(struct bpf_verifier_env *env,
11242 const struct bpf_insn *insn,
11243 const struct bpf_reg_state *dst_reg)
11244{
11245 u32 dst = insn->dst_reg;
11246
11247 /* For unprivileged we require that resulting offset must be in bounds
11248 * in order to be able to sanitize access later on.
11249 */
11250 if (env->bypass_spec_v1)
11251 return 0;
11252
11253 switch (dst_reg->type) {
11254 case PTR_TO_STACK:
11255 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
11256 dst_reg->off + dst_reg->var_off.value))
11257 return -EACCES;
11258 break;
11259 case PTR_TO_MAP_VALUE:
61df10c7 11260 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
073815b7
DB
11261 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
11262 "prohibited for !root\n", dst);
11263 return -EACCES;
11264 }
11265 break;
11266 default:
11267 break;
11268 }
11269
11270 return 0;
11271}
01f810ac 11272
f1174f77 11273/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
11274 * Caller should also handle BPF_MOV case separately.
11275 * If we return -EACCES, caller may want to try again treating pointer as a
11276 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
11277 */
11278static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
11279 struct bpf_insn *insn,
11280 const struct bpf_reg_state *ptr_reg,
11281 const struct bpf_reg_state *off_reg)
969bf05e 11282{
f4d7e40a
AS
11283 struct bpf_verifier_state *vstate = env->cur_state;
11284 struct bpf_func_state *state = vstate->frame[vstate->curframe];
11285 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 11286 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
11287 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
11288 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
11289 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
11290 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3d0220f6 11291 struct bpf_sanitize_info info = {};
969bf05e 11292 u8 opcode = BPF_OP(insn->code);
24c109bb 11293 u32 dst = insn->dst_reg;
979d63d5 11294 int ret;
969bf05e 11295
f1174f77 11296 dst_reg = &regs[dst];
969bf05e 11297
6f16101e
DB
11298 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
11299 smin_val > smax_val || umin_val > umax_val) {
11300 /* Taint dst register if offset had invalid bounds derived from
11301 * e.g. dead branches.
11302 */
f54c7898 11303 __mark_reg_unknown(env, dst_reg);
6f16101e 11304 return 0;
f1174f77
EC
11305 }
11306
11307 if (BPF_CLASS(insn->code) != BPF_ALU64) {
11308 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
6c693541
YS
11309 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
11310 __mark_reg_unknown(env, dst_reg);
11311 return 0;
11312 }
11313
82abbf8d
AS
11314 verbose(env,
11315 "R%d 32-bit pointer arithmetic prohibited\n",
11316 dst);
f1174f77 11317 return -EACCES;
969bf05e
AS
11318 }
11319
c25b2ae1 11320 if (ptr_reg->type & PTR_MAYBE_NULL) {
aad2eeaf 11321 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
c25b2ae1 11322 dst, reg_type_str(env, ptr_reg->type));
f1174f77 11323 return -EACCES;
c25b2ae1
HL
11324 }
11325
11326 switch (base_type(ptr_reg->type)) {
aad2eeaf 11327 case CONST_PTR_TO_MAP:
7c696732
YS
11328 /* smin_val represents the known value */
11329 if (known && smin_val == 0 && opcode == BPF_ADD)
11330 break;
8731745e 11331 fallthrough;
aad2eeaf 11332 case PTR_TO_PACKET_END:
c64b7983 11333 case PTR_TO_SOCKET:
46f8bc92 11334 case PTR_TO_SOCK_COMMON:
655a51e5 11335 case PTR_TO_TCP_SOCK:
fada7fdc 11336 case PTR_TO_XDP_SOCK:
aad2eeaf 11337 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
c25b2ae1 11338 dst, reg_type_str(env, ptr_reg->type));
f1174f77 11339 return -EACCES;
aad2eeaf
JS
11340 default:
11341 break;
f1174f77
EC
11342 }
11343
11344 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
11345 * The id may be overwritten later if we create a new variable offset.
969bf05e 11346 */
f1174f77
EC
11347 dst_reg->type = ptr_reg->type;
11348 dst_reg->id = ptr_reg->id;
969bf05e 11349
bb7f0f98
AS
11350 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
11351 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
11352 return -EINVAL;
11353
3f50f132
JF
11354 /* pointer types do not carry 32-bit bounds at the moment. */
11355 __mark_reg32_unbounded(dst_reg);
11356
7fedb63a
DB
11357 if (sanitize_needed(opcode)) {
11358 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
3d0220f6 11359 &info, false);
a6aaece0
DB
11360 if (ret < 0)
11361 return sanitize_err(env, insn, ret, off_reg, dst_reg);
7fedb63a 11362 }
a6aaece0 11363
f1174f77
EC
11364 switch (opcode) {
11365 case BPF_ADD:
11366 /* We can take a fixed offset as long as it doesn't overflow
11367 * the s32 'off' field
969bf05e 11368 */
b03c9f9f
EC
11369 if (known && (ptr_reg->off + smin_val ==
11370 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 11371 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
11372 dst_reg->smin_value = smin_ptr;
11373 dst_reg->smax_value = smax_ptr;
11374 dst_reg->umin_value = umin_ptr;
11375 dst_reg->umax_value = umax_ptr;
f1174f77 11376 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 11377 dst_reg->off = ptr_reg->off + smin_val;
0962590e 11378 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
11379 break;
11380 }
f1174f77
EC
11381 /* A new variable offset is created. Note that off_reg->off
11382 * == 0, since it's a scalar.
11383 * dst_reg gets the pointer type and since some positive
11384 * integer value was added to the pointer, give it a new 'id'
11385 * if it's a PTR_TO_PACKET.
11386 * this creates a new 'base' pointer, off_reg (variable) gets
11387 * added into the variable offset, and we copy the fixed offset
11388 * from ptr_reg.
969bf05e 11389 */
b03c9f9f
EC
11390 if (signed_add_overflows(smin_ptr, smin_val) ||
11391 signed_add_overflows(smax_ptr, smax_val)) {
11392 dst_reg->smin_value = S64_MIN;
11393 dst_reg->smax_value = S64_MAX;
11394 } else {
11395 dst_reg->smin_value = smin_ptr + smin_val;
11396 dst_reg->smax_value = smax_ptr + smax_val;
11397 }
11398 if (umin_ptr + umin_val < umin_ptr ||
11399 umax_ptr + umax_val < umax_ptr) {
11400 dst_reg->umin_value = 0;
11401 dst_reg->umax_value = U64_MAX;
11402 } else {
11403 dst_reg->umin_value = umin_ptr + umin_val;
11404 dst_reg->umax_value = umax_ptr + umax_val;
11405 }
f1174f77
EC
11406 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
11407 dst_reg->off = ptr_reg->off;
0962590e 11408 dst_reg->raw = ptr_reg->raw;
de8f3a83 11409 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
11410 dst_reg->id = ++env->id_gen;
11411 /* something was added to pkt_ptr, set range to zero */
22dc4a0f 11412 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
f1174f77
EC
11413 }
11414 break;
11415 case BPF_SUB:
11416 if (dst_reg == off_reg) {
11417 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
11418 verbose(env, "R%d tried to subtract pointer from scalar\n",
11419 dst);
f1174f77
EC
11420 return -EACCES;
11421 }
11422 /* We don't allow subtraction from FP, because (according to
11423 * test_verifier.c test "invalid fp arithmetic", JITs might not
11424 * be able to deal with it.
969bf05e 11425 */
f1174f77 11426 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
11427 verbose(env, "R%d subtraction from stack pointer prohibited\n",
11428 dst);
f1174f77
EC
11429 return -EACCES;
11430 }
b03c9f9f
EC
11431 if (known && (ptr_reg->off - smin_val ==
11432 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 11433 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
11434 dst_reg->smin_value = smin_ptr;
11435 dst_reg->smax_value = smax_ptr;
11436 dst_reg->umin_value = umin_ptr;
11437 dst_reg->umax_value = umax_ptr;
f1174f77
EC
11438 dst_reg->var_off = ptr_reg->var_off;
11439 dst_reg->id = ptr_reg->id;
b03c9f9f 11440 dst_reg->off = ptr_reg->off - smin_val;
0962590e 11441 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
11442 break;
11443 }
f1174f77
EC
11444 /* A new variable offset is created. If the subtrahend is known
11445 * nonnegative, then any reg->range we had before is still good.
969bf05e 11446 */
b03c9f9f
EC
11447 if (signed_sub_overflows(smin_ptr, smax_val) ||
11448 signed_sub_overflows(smax_ptr, smin_val)) {
11449 /* Overflow possible, we know nothing */
11450 dst_reg->smin_value = S64_MIN;
11451 dst_reg->smax_value = S64_MAX;
11452 } else {
11453 dst_reg->smin_value = smin_ptr - smax_val;
11454 dst_reg->smax_value = smax_ptr - smin_val;
11455 }
11456 if (umin_ptr < umax_val) {
11457 /* Overflow possible, we know nothing */
11458 dst_reg->umin_value = 0;
11459 dst_reg->umax_value = U64_MAX;
11460 } else {
11461 /* Cannot overflow (as long as bounds are consistent) */
11462 dst_reg->umin_value = umin_ptr - umax_val;
11463 dst_reg->umax_value = umax_ptr - umin_val;
11464 }
f1174f77
EC
11465 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
11466 dst_reg->off = ptr_reg->off;
0962590e 11467 dst_reg->raw = ptr_reg->raw;
de8f3a83 11468 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
11469 dst_reg->id = ++env->id_gen;
11470 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 11471 if (smin_val < 0)
22dc4a0f 11472 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
43188702 11473 }
f1174f77
EC
11474 break;
11475 case BPF_AND:
11476 case BPF_OR:
11477 case BPF_XOR:
82abbf8d
AS
11478 /* bitwise ops on pointers are troublesome, prohibit. */
11479 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
11480 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
11481 return -EACCES;
11482 default:
11483 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
11484 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
11485 dst, bpf_alu_string[opcode >> 4]);
f1174f77 11486 return -EACCES;
43188702
JF
11487 }
11488
bb7f0f98
AS
11489 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
11490 return -EINVAL;
3844d153 11491 reg_bounds_sync(dst_reg);
073815b7
DB
11492 if (sanitize_check_bounds(env, insn, dst_reg) < 0)
11493 return -EACCES;
7fedb63a
DB
11494 if (sanitize_needed(opcode)) {
11495 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
3d0220f6 11496 &info, true);
7fedb63a
DB
11497 if (ret < 0)
11498 return sanitize_err(env, insn, ret, off_reg, dst_reg);
0d6303db
DB
11499 }
11500
43188702
JF
11501 return 0;
11502}
11503
3f50f132
JF
11504static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
11505 struct bpf_reg_state *src_reg)
11506{
11507 s32 smin_val = src_reg->s32_min_value;
11508 s32 smax_val = src_reg->s32_max_value;
11509 u32 umin_val = src_reg->u32_min_value;
11510 u32 umax_val = src_reg->u32_max_value;
11511
11512 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
11513 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
11514 dst_reg->s32_min_value = S32_MIN;
11515 dst_reg->s32_max_value = S32_MAX;
11516 } else {
11517 dst_reg->s32_min_value += smin_val;
11518 dst_reg->s32_max_value += smax_val;
11519 }
11520 if (dst_reg->u32_min_value + umin_val < umin_val ||
11521 dst_reg->u32_max_value + umax_val < umax_val) {
11522 dst_reg->u32_min_value = 0;
11523 dst_reg->u32_max_value = U32_MAX;
11524 } else {
11525 dst_reg->u32_min_value += umin_val;
11526 dst_reg->u32_max_value += umax_val;
11527 }
11528}
11529
07cd2631
JF
11530static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
11531 struct bpf_reg_state *src_reg)
11532{
11533 s64 smin_val = src_reg->smin_value;
11534 s64 smax_val = src_reg->smax_value;
11535 u64 umin_val = src_reg->umin_value;
11536 u64 umax_val = src_reg->umax_value;
11537
11538 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
11539 signed_add_overflows(dst_reg->smax_value, smax_val)) {
11540 dst_reg->smin_value = S64_MIN;
11541 dst_reg->smax_value = S64_MAX;
11542 } else {
11543 dst_reg->smin_value += smin_val;
11544 dst_reg->smax_value += smax_val;
11545 }
11546 if (dst_reg->umin_value + umin_val < umin_val ||
11547 dst_reg->umax_value + umax_val < umax_val) {
11548 dst_reg->umin_value = 0;
11549 dst_reg->umax_value = U64_MAX;
11550 } else {
11551 dst_reg->umin_value += umin_val;
11552 dst_reg->umax_value += umax_val;
11553 }
3f50f132
JF
11554}
11555
11556static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
11557 struct bpf_reg_state *src_reg)
11558{
11559 s32 smin_val = src_reg->s32_min_value;
11560 s32 smax_val = src_reg->s32_max_value;
11561 u32 umin_val = src_reg->u32_min_value;
11562 u32 umax_val = src_reg->u32_max_value;
11563
11564 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
11565 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
11566 /* Overflow possible, we know nothing */
11567 dst_reg->s32_min_value = S32_MIN;
11568 dst_reg->s32_max_value = S32_MAX;
11569 } else {
11570 dst_reg->s32_min_value -= smax_val;
11571 dst_reg->s32_max_value -= smin_val;
11572 }
11573 if (dst_reg->u32_min_value < umax_val) {
11574 /* Overflow possible, we know nothing */
11575 dst_reg->u32_min_value = 0;
11576 dst_reg->u32_max_value = U32_MAX;
11577 } else {
11578 /* Cannot overflow (as long as bounds are consistent) */
11579 dst_reg->u32_min_value -= umax_val;
11580 dst_reg->u32_max_value -= umin_val;
11581 }
07cd2631
JF
11582}
11583
11584static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
11585 struct bpf_reg_state *src_reg)
11586{
11587 s64 smin_val = src_reg->smin_value;
11588 s64 smax_val = src_reg->smax_value;
11589 u64 umin_val = src_reg->umin_value;
11590 u64 umax_val = src_reg->umax_value;
11591
11592 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
11593 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
11594 /* Overflow possible, we know nothing */
11595 dst_reg->smin_value = S64_MIN;
11596 dst_reg->smax_value = S64_MAX;
11597 } else {
11598 dst_reg->smin_value -= smax_val;
11599 dst_reg->smax_value -= smin_val;
11600 }
11601 if (dst_reg->umin_value < umax_val) {
11602 /* Overflow possible, we know nothing */
11603 dst_reg->umin_value = 0;
11604 dst_reg->umax_value = U64_MAX;
11605 } else {
11606 /* Cannot overflow (as long as bounds are consistent) */
11607 dst_reg->umin_value -= umax_val;
11608 dst_reg->umax_value -= umin_val;
11609 }
3f50f132
JF
11610}
11611
11612static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
11613 struct bpf_reg_state *src_reg)
11614{
11615 s32 smin_val = src_reg->s32_min_value;
11616 u32 umin_val = src_reg->u32_min_value;
11617 u32 umax_val = src_reg->u32_max_value;
11618
11619 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
11620 /* Ain't nobody got time to multiply that sign */
11621 __mark_reg32_unbounded(dst_reg);
11622 return;
11623 }
11624 /* Both values are positive, so we can work with unsigned and
11625 * copy the result to signed (unless it exceeds S32_MAX).
11626 */
11627 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
11628 /* Potential overflow, we know nothing */
11629 __mark_reg32_unbounded(dst_reg);
11630 return;
11631 }
11632 dst_reg->u32_min_value *= umin_val;
11633 dst_reg->u32_max_value *= umax_val;
11634 if (dst_reg->u32_max_value > S32_MAX) {
11635 /* Overflow possible, we know nothing */
11636 dst_reg->s32_min_value = S32_MIN;
11637 dst_reg->s32_max_value = S32_MAX;
11638 } else {
11639 dst_reg->s32_min_value = dst_reg->u32_min_value;
11640 dst_reg->s32_max_value = dst_reg->u32_max_value;
11641 }
07cd2631
JF
11642}
11643
11644static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
11645 struct bpf_reg_state *src_reg)
11646{
11647 s64 smin_val = src_reg->smin_value;
11648 u64 umin_val = src_reg->umin_value;
11649 u64 umax_val = src_reg->umax_value;
11650
07cd2631
JF
11651 if (smin_val < 0 || dst_reg->smin_value < 0) {
11652 /* Ain't nobody got time to multiply that sign */
3f50f132 11653 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
11654 return;
11655 }
11656 /* Both values are positive, so we can work with unsigned and
11657 * copy the result to signed (unless it exceeds S64_MAX).
11658 */
11659 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
11660 /* Potential overflow, we know nothing */
3f50f132 11661 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
11662 return;
11663 }
11664 dst_reg->umin_value *= umin_val;
11665 dst_reg->umax_value *= umax_val;
11666 if (dst_reg->umax_value > S64_MAX) {
11667 /* Overflow possible, we know nothing */
11668 dst_reg->smin_value = S64_MIN;
11669 dst_reg->smax_value = S64_MAX;
11670 } else {
11671 dst_reg->smin_value = dst_reg->umin_value;
11672 dst_reg->smax_value = dst_reg->umax_value;
11673 }
11674}
11675
3f50f132
JF
11676static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
11677 struct bpf_reg_state *src_reg)
11678{
11679 bool src_known = tnum_subreg_is_const(src_reg->var_off);
11680 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11681 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11682 s32 smin_val = src_reg->s32_min_value;
11683 u32 umax_val = src_reg->u32_max_value;
11684
049c4e13
DB
11685 if (src_known && dst_known) {
11686 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 11687 return;
049c4e13 11688 }
3f50f132
JF
11689
11690 /* We get our minimum from the var_off, since that's inherently
11691 * bitwise. Our maximum is the minimum of the operands' maxima.
11692 */
11693 dst_reg->u32_min_value = var32_off.value;
11694 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
11695 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
11696 /* Lose signed bounds when ANDing negative numbers,
11697 * ain't nobody got time for that.
11698 */
11699 dst_reg->s32_min_value = S32_MIN;
11700 dst_reg->s32_max_value = S32_MAX;
11701 } else {
11702 /* ANDing two positives gives a positive, so safe to
11703 * cast result into s64.
11704 */
11705 dst_reg->s32_min_value = dst_reg->u32_min_value;
11706 dst_reg->s32_max_value = dst_reg->u32_max_value;
11707 }
3f50f132
JF
11708}
11709
07cd2631
JF
11710static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
11711 struct bpf_reg_state *src_reg)
11712{
3f50f132
JF
11713 bool src_known = tnum_is_const(src_reg->var_off);
11714 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
11715 s64 smin_val = src_reg->smin_value;
11716 u64 umax_val = src_reg->umax_value;
11717
3f50f132 11718 if (src_known && dst_known) {
4fbb38a3 11719 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
11720 return;
11721 }
11722
07cd2631
JF
11723 /* We get our minimum from the var_off, since that's inherently
11724 * bitwise. Our maximum is the minimum of the operands' maxima.
11725 */
07cd2631
JF
11726 dst_reg->umin_value = dst_reg->var_off.value;
11727 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
11728 if (dst_reg->smin_value < 0 || smin_val < 0) {
11729 /* Lose signed bounds when ANDing negative numbers,
11730 * ain't nobody got time for that.
11731 */
11732 dst_reg->smin_value = S64_MIN;
11733 dst_reg->smax_value = S64_MAX;
11734 } else {
11735 /* ANDing two positives gives a positive, so safe to
11736 * cast result into s64.
11737 */
11738 dst_reg->smin_value = dst_reg->umin_value;
11739 dst_reg->smax_value = dst_reg->umax_value;
11740 }
11741 /* We may learn something more from the var_off */
11742 __update_reg_bounds(dst_reg);
11743}
11744
3f50f132
JF
11745static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
11746 struct bpf_reg_state *src_reg)
11747{
11748 bool src_known = tnum_subreg_is_const(src_reg->var_off);
11749 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11750 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5b9fbeb7
DB
11751 s32 smin_val = src_reg->s32_min_value;
11752 u32 umin_val = src_reg->u32_min_value;
3f50f132 11753
049c4e13
DB
11754 if (src_known && dst_known) {
11755 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 11756 return;
049c4e13 11757 }
3f50f132
JF
11758
11759 /* We get our maximum from the var_off, and our minimum is the
11760 * maximum of the operands' minima
11761 */
11762 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
11763 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
11764 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
11765 /* Lose signed bounds when ORing negative numbers,
11766 * ain't nobody got time for that.
11767 */
11768 dst_reg->s32_min_value = S32_MIN;
11769 dst_reg->s32_max_value = S32_MAX;
11770 } else {
11771 /* ORing two positives gives a positive, so safe to
11772 * cast result into s64.
11773 */
5b9fbeb7
DB
11774 dst_reg->s32_min_value = dst_reg->u32_min_value;
11775 dst_reg->s32_max_value = dst_reg->u32_max_value;
3f50f132
JF
11776 }
11777}
11778
07cd2631
JF
11779static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
11780 struct bpf_reg_state *src_reg)
11781{
3f50f132
JF
11782 bool src_known = tnum_is_const(src_reg->var_off);
11783 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
11784 s64 smin_val = src_reg->smin_value;
11785 u64 umin_val = src_reg->umin_value;
11786
3f50f132 11787 if (src_known && dst_known) {
4fbb38a3 11788 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
11789 return;
11790 }
11791
07cd2631
JF
11792 /* We get our maximum from the var_off, and our minimum is the
11793 * maximum of the operands' minima
11794 */
07cd2631
JF
11795 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
11796 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11797 if (dst_reg->smin_value < 0 || smin_val < 0) {
11798 /* Lose signed bounds when ORing negative numbers,
11799 * ain't nobody got time for that.
11800 */
11801 dst_reg->smin_value = S64_MIN;
11802 dst_reg->smax_value = S64_MAX;
11803 } else {
11804 /* ORing two positives gives a positive, so safe to
11805 * cast result into s64.
11806 */
11807 dst_reg->smin_value = dst_reg->umin_value;
11808 dst_reg->smax_value = dst_reg->umax_value;
11809 }
11810 /* We may learn something more from the var_off */
11811 __update_reg_bounds(dst_reg);
11812}
11813
2921c90d
YS
11814static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
11815 struct bpf_reg_state *src_reg)
11816{
11817 bool src_known = tnum_subreg_is_const(src_reg->var_off);
11818 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
11819 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
11820 s32 smin_val = src_reg->s32_min_value;
11821
049c4e13
DB
11822 if (src_known && dst_known) {
11823 __mark_reg32_known(dst_reg, var32_off.value);
2921c90d 11824 return;
049c4e13 11825 }
2921c90d
YS
11826
11827 /* We get both minimum and maximum from the var32_off. */
11828 dst_reg->u32_min_value = var32_off.value;
11829 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
11830
11831 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
11832 /* XORing two positive sign numbers gives a positive,
11833 * so safe to cast u32 result into s32.
11834 */
11835 dst_reg->s32_min_value = dst_reg->u32_min_value;
11836 dst_reg->s32_max_value = dst_reg->u32_max_value;
11837 } else {
11838 dst_reg->s32_min_value = S32_MIN;
11839 dst_reg->s32_max_value = S32_MAX;
11840 }
11841}
11842
11843static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
11844 struct bpf_reg_state *src_reg)
11845{
11846 bool src_known = tnum_is_const(src_reg->var_off);
11847 bool dst_known = tnum_is_const(dst_reg->var_off);
11848 s64 smin_val = src_reg->smin_value;
11849
11850 if (src_known && dst_known) {
11851 /* dst_reg->var_off.value has been updated earlier */
11852 __mark_reg_known(dst_reg, dst_reg->var_off.value);
11853 return;
11854 }
11855
11856 /* We get both minimum and maximum from the var_off. */
11857 dst_reg->umin_value = dst_reg->var_off.value;
11858 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
11859
11860 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
11861 /* XORing two positive sign numbers gives a positive,
11862 * so safe to cast u64 result into s64.
11863 */
11864 dst_reg->smin_value = dst_reg->umin_value;
11865 dst_reg->smax_value = dst_reg->umax_value;
11866 } else {
11867 dst_reg->smin_value = S64_MIN;
11868 dst_reg->smax_value = S64_MAX;
11869 }
11870
11871 __update_reg_bounds(dst_reg);
11872}
11873
3f50f132
JF
11874static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11875 u64 umin_val, u64 umax_val)
07cd2631 11876{
07cd2631
JF
11877 /* We lose all sign bit information (except what we can pick
11878 * up from var_off)
11879 */
3f50f132
JF
11880 dst_reg->s32_min_value = S32_MIN;
11881 dst_reg->s32_max_value = S32_MAX;
11882 /* If we might shift our top bit out, then we know nothing */
11883 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
11884 dst_reg->u32_min_value = 0;
11885 dst_reg->u32_max_value = U32_MAX;
11886 } else {
11887 dst_reg->u32_min_value <<= umin_val;
11888 dst_reg->u32_max_value <<= umax_val;
11889 }
11890}
11891
11892static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
11893 struct bpf_reg_state *src_reg)
11894{
11895 u32 umax_val = src_reg->u32_max_value;
11896 u32 umin_val = src_reg->u32_min_value;
11897 /* u32 alu operation will zext upper bits */
11898 struct tnum subreg = tnum_subreg(dst_reg->var_off);
11899
11900 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11901 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
11902 /* Not required but being careful mark reg64 bounds as unknown so
11903 * that we are forced to pick them up from tnum and zext later and
11904 * if some path skips this step we are still safe.
11905 */
11906 __mark_reg64_unbounded(dst_reg);
11907 __update_reg32_bounds(dst_reg);
11908}
11909
11910static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
11911 u64 umin_val, u64 umax_val)
11912{
11913 /* Special case <<32 because it is a common compiler pattern to sign
11914 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
11915 * positive we know this shift will also be positive so we can track
11916 * bounds correctly. Otherwise we lose all sign bit information except
11917 * what we can pick up from var_off. Perhaps we can generalize this
11918 * later to shifts of any length.
11919 */
11920 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
11921 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
11922 else
11923 dst_reg->smax_value = S64_MAX;
11924
11925 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
11926 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
11927 else
11928 dst_reg->smin_value = S64_MIN;
11929
07cd2631
JF
11930 /* If we might shift our top bit out, then we know nothing */
11931 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
11932 dst_reg->umin_value = 0;
11933 dst_reg->umax_value = U64_MAX;
11934 } else {
11935 dst_reg->umin_value <<= umin_val;
11936 dst_reg->umax_value <<= umax_val;
11937 }
3f50f132
JF
11938}
11939
11940static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
11941 struct bpf_reg_state *src_reg)
11942{
11943 u64 umax_val = src_reg->umax_value;
11944 u64 umin_val = src_reg->umin_value;
11945
11946 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
11947 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
11948 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
11949
07cd2631
JF
11950 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
11951 /* We may learn something more from the var_off */
11952 __update_reg_bounds(dst_reg);
11953}
11954
3f50f132
JF
11955static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
11956 struct bpf_reg_state *src_reg)
11957{
11958 struct tnum subreg = tnum_subreg(dst_reg->var_off);
11959 u32 umax_val = src_reg->u32_max_value;
11960 u32 umin_val = src_reg->u32_min_value;
11961
11962 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
11963 * be negative, then either:
11964 * 1) src_reg might be zero, so the sign bit of the result is
11965 * unknown, so we lose our signed bounds
11966 * 2) it's known negative, thus the unsigned bounds capture the
11967 * signed bounds
11968 * 3) the signed bounds cross zero, so they tell us nothing
11969 * about the result
11970 * If the value in dst_reg is known nonnegative, then again the
18b24d78 11971 * unsigned bounds capture the signed bounds.
3f50f132
JF
11972 * Thus, in all cases it suffices to blow away our signed bounds
11973 * and rely on inferring new ones from the unsigned bounds and
11974 * var_off of the result.
11975 */
11976 dst_reg->s32_min_value = S32_MIN;
11977 dst_reg->s32_max_value = S32_MAX;
11978
11979 dst_reg->var_off = tnum_rshift(subreg, umin_val);
11980 dst_reg->u32_min_value >>= umax_val;
11981 dst_reg->u32_max_value >>= umin_val;
11982
11983 __mark_reg64_unbounded(dst_reg);
11984 __update_reg32_bounds(dst_reg);
11985}
11986
07cd2631
JF
11987static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
11988 struct bpf_reg_state *src_reg)
11989{
11990 u64 umax_val = src_reg->umax_value;
11991 u64 umin_val = src_reg->umin_value;
11992
11993 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
11994 * be negative, then either:
11995 * 1) src_reg might be zero, so the sign bit of the result is
11996 * unknown, so we lose our signed bounds
11997 * 2) it's known negative, thus the unsigned bounds capture the
11998 * signed bounds
11999 * 3) the signed bounds cross zero, so they tell us nothing
12000 * about the result
12001 * If the value in dst_reg is known nonnegative, then again the
18b24d78 12002 * unsigned bounds capture the signed bounds.
07cd2631
JF
12003 * Thus, in all cases it suffices to blow away our signed bounds
12004 * and rely on inferring new ones from the unsigned bounds and
12005 * var_off of the result.
12006 */
12007 dst_reg->smin_value = S64_MIN;
12008 dst_reg->smax_value = S64_MAX;
12009 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
12010 dst_reg->umin_value >>= umax_val;
12011 dst_reg->umax_value >>= umin_val;
3f50f132
JF
12012
12013 /* Its not easy to operate on alu32 bounds here because it depends
12014 * on bits being shifted in. Take easy way out and mark unbounded
12015 * so we can recalculate later from tnum.
12016 */
12017 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
12018 __update_reg_bounds(dst_reg);
12019}
12020
3f50f132
JF
12021static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
12022 struct bpf_reg_state *src_reg)
07cd2631 12023{
3f50f132 12024 u64 umin_val = src_reg->u32_min_value;
07cd2631
JF
12025
12026 /* Upon reaching here, src_known is true and
12027 * umax_val is equal to umin_val.
12028 */
3f50f132
JF
12029 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
12030 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
07cd2631 12031
3f50f132
JF
12032 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
12033
12034 /* blow away the dst_reg umin_value/umax_value and rely on
12035 * dst_reg var_off to refine the result.
12036 */
12037 dst_reg->u32_min_value = 0;
12038 dst_reg->u32_max_value = U32_MAX;
12039
12040 __mark_reg64_unbounded(dst_reg);
12041 __update_reg32_bounds(dst_reg);
12042}
12043
12044static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
12045 struct bpf_reg_state *src_reg)
12046{
12047 u64 umin_val = src_reg->umin_value;
12048
12049 /* Upon reaching here, src_known is true and umax_val is equal
12050 * to umin_val.
12051 */
12052 dst_reg->smin_value >>= umin_val;
12053 dst_reg->smax_value >>= umin_val;
12054
12055 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
07cd2631
JF
12056
12057 /* blow away the dst_reg umin_value/umax_value and rely on
12058 * dst_reg var_off to refine the result.
12059 */
12060 dst_reg->umin_value = 0;
12061 dst_reg->umax_value = U64_MAX;
3f50f132
JF
12062
12063 /* Its not easy to operate on alu32 bounds here because it depends
12064 * on bits being shifted in from upper 32-bits. Take easy way out
12065 * and mark unbounded so we can recalculate later from tnum.
12066 */
12067 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
12068 __update_reg_bounds(dst_reg);
12069}
12070
468f6eaf
JH
12071/* WARNING: This function does calculations on 64-bit values, but the actual
12072 * execution may occur on 32-bit values. Therefore, things like bitshifts
12073 * need extra checks in the 32-bit case.
12074 */
f1174f77
EC
12075static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
12076 struct bpf_insn *insn,
12077 struct bpf_reg_state *dst_reg,
12078 struct bpf_reg_state src_reg)
969bf05e 12079{
638f5b90 12080 struct bpf_reg_state *regs = cur_regs(env);
48461135 12081 u8 opcode = BPF_OP(insn->code);
b0b3fb67 12082 bool src_known;
b03c9f9f
EC
12083 s64 smin_val, smax_val;
12084 u64 umin_val, umax_val;
3f50f132
JF
12085 s32 s32_min_val, s32_max_val;
12086 u32 u32_min_val, u32_max_val;
468f6eaf 12087 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3f50f132 12088 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
a6aaece0 12089 int ret;
b799207e 12090
b03c9f9f
EC
12091 smin_val = src_reg.smin_value;
12092 smax_val = src_reg.smax_value;
12093 umin_val = src_reg.umin_value;
12094 umax_val = src_reg.umax_value;
f23cc643 12095
3f50f132
JF
12096 s32_min_val = src_reg.s32_min_value;
12097 s32_max_val = src_reg.s32_max_value;
12098 u32_min_val = src_reg.u32_min_value;
12099 u32_max_val = src_reg.u32_max_value;
12100
12101 if (alu32) {
12102 src_known = tnum_subreg_is_const(src_reg.var_off);
3f50f132
JF
12103 if ((src_known &&
12104 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
12105 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
12106 /* Taint dst register if offset had invalid bounds
12107 * derived from e.g. dead branches.
12108 */
12109 __mark_reg_unknown(env, dst_reg);
12110 return 0;
12111 }
12112 } else {
12113 src_known = tnum_is_const(src_reg.var_off);
3f50f132
JF
12114 if ((src_known &&
12115 (smin_val != smax_val || umin_val != umax_val)) ||
12116 smin_val > smax_val || umin_val > umax_val) {
12117 /* Taint dst register if offset had invalid bounds
12118 * derived from e.g. dead branches.
12119 */
12120 __mark_reg_unknown(env, dst_reg);
12121 return 0;
12122 }
6f16101e
DB
12123 }
12124
bb7f0f98
AS
12125 if (!src_known &&
12126 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 12127 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
12128 return 0;
12129 }
12130
f5288193
DB
12131 if (sanitize_needed(opcode)) {
12132 ret = sanitize_val_alu(env, insn);
12133 if (ret < 0)
12134 return sanitize_err(env, insn, ret, NULL, NULL);
12135 }
12136
3f50f132
JF
12137 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
12138 * There are two classes of instructions: The first class we track both
12139 * alu32 and alu64 sign/unsigned bounds independently this provides the
12140 * greatest amount of precision when alu operations are mixed with jmp32
12141 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
12142 * and BPF_OR. This is possible because these ops have fairly easy to
12143 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
12144 * See alu32 verifier tests for examples. The second class of
12145 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
12146 * with regards to tracking sign/unsigned bounds because the bits may
12147 * cross subreg boundaries in the alu64 case. When this happens we mark
12148 * the reg unbounded in the subreg bound space and use the resulting
12149 * tnum to calculate an approximation of the sign/unsigned bounds.
12150 */
48461135
JB
12151 switch (opcode) {
12152 case BPF_ADD:
3f50f132 12153 scalar32_min_max_add(dst_reg, &src_reg);
07cd2631 12154 scalar_min_max_add(dst_reg, &src_reg);
3f50f132 12155 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
12156 break;
12157 case BPF_SUB:
3f50f132 12158 scalar32_min_max_sub(dst_reg, &src_reg);
07cd2631 12159 scalar_min_max_sub(dst_reg, &src_reg);
3f50f132 12160 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
12161 break;
12162 case BPF_MUL:
3f50f132
JF
12163 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
12164 scalar32_min_max_mul(dst_reg, &src_reg);
07cd2631 12165 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
12166 break;
12167 case BPF_AND:
3f50f132
JF
12168 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
12169 scalar32_min_max_and(dst_reg, &src_reg);
07cd2631 12170 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
12171 break;
12172 case BPF_OR:
3f50f132
JF
12173 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
12174 scalar32_min_max_or(dst_reg, &src_reg);
07cd2631 12175 scalar_min_max_or(dst_reg, &src_reg);
48461135 12176 break;
2921c90d
YS
12177 case BPF_XOR:
12178 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
12179 scalar32_min_max_xor(dst_reg, &src_reg);
12180 scalar_min_max_xor(dst_reg, &src_reg);
12181 break;
48461135 12182 case BPF_LSH:
468f6eaf
JH
12183 if (umax_val >= insn_bitness) {
12184 /* Shifts greater than 31 or 63 are undefined.
12185 * This includes shifts by a negative number.
b03c9f9f 12186 */
61bd5218 12187 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
12188 break;
12189 }
3f50f132
JF
12190 if (alu32)
12191 scalar32_min_max_lsh(dst_reg, &src_reg);
12192 else
12193 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
12194 break;
12195 case BPF_RSH:
468f6eaf
JH
12196 if (umax_val >= insn_bitness) {
12197 /* Shifts greater than 31 or 63 are undefined.
12198 * This includes shifts by a negative number.
b03c9f9f 12199 */
61bd5218 12200 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
12201 break;
12202 }
3f50f132
JF
12203 if (alu32)
12204 scalar32_min_max_rsh(dst_reg, &src_reg);
12205 else
12206 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 12207 break;
9cbe1f5a
YS
12208 case BPF_ARSH:
12209 if (umax_val >= insn_bitness) {
12210 /* Shifts greater than 31 or 63 are undefined.
12211 * This includes shifts by a negative number.
12212 */
12213 mark_reg_unknown(env, regs, insn->dst_reg);
12214 break;
12215 }
3f50f132
JF
12216 if (alu32)
12217 scalar32_min_max_arsh(dst_reg, &src_reg);
12218 else
12219 scalar_min_max_arsh(dst_reg, &src_reg);
9cbe1f5a 12220 break;
48461135 12221 default:
61bd5218 12222 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
12223 break;
12224 }
12225
3f50f132
JF
12226 /* ALU32 ops are zero extended into 64bit register */
12227 if (alu32)
12228 zext_32_to_64(dst_reg);
3844d153 12229 reg_bounds_sync(dst_reg);
f1174f77
EC
12230 return 0;
12231}
12232
12233/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
12234 * and var_off.
12235 */
12236static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
12237 struct bpf_insn *insn)
12238{
f4d7e40a
AS
12239 struct bpf_verifier_state *vstate = env->cur_state;
12240 struct bpf_func_state *state = vstate->frame[vstate->curframe];
12241 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
12242 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
12243 u8 opcode = BPF_OP(insn->code);
b5dc0163 12244 int err;
f1174f77
EC
12245
12246 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
12247 src_reg = NULL;
12248 if (dst_reg->type != SCALAR_VALUE)
12249 ptr_reg = dst_reg;
75748837
AS
12250 else
12251 /* Make sure ID is cleared otherwise dst_reg min/max could be
12252 * incorrectly propagated into other registers by find_equal_scalars()
12253 */
12254 dst_reg->id = 0;
f1174f77
EC
12255 if (BPF_SRC(insn->code) == BPF_X) {
12256 src_reg = &regs[insn->src_reg];
f1174f77
EC
12257 if (src_reg->type != SCALAR_VALUE) {
12258 if (dst_reg->type != SCALAR_VALUE) {
12259 /* Combining two pointers by any ALU op yields
82abbf8d
AS
12260 * an arbitrary scalar. Disallow all math except
12261 * pointer subtraction
f1174f77 12262 */
dd066823 12263 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
12264 mark_reg_unknown(env, regs, insn->dst_reg);
12265 return 0;
f1174f77 12266 }
82abbf8d
AS
12267 verbose(env, "R%d pointer %s pointer prohibited\n",
12268 insn->dst_reg,
12269 bpf_alu_string[opcode >> 4]);
12270 return -EACCES;
f1174f77
EC
12271 } else {
12272 /* scalar += pointer
12273 * This is legal, but we have to reverse our
12274 * src/dest handling in computing the range
12275 */
b5dc0163
AS
12276 err = mark_chain_precision(env, insn->dst_reg);
12277 if (err)
12278 return err;
82abbf8d
AS
12279 return adjust_ptr_min_max_vals(env, insn,
12280 src_reg, dst_reg);
f1174f77
EC
12281 }
12282 } else if (ptr_reg) {
12283 /* pointer += scalar */
b5dc0163
AS
12284 err = mark_chain_precision(env, insn->src_reg);
12285 if (err)
12286 return err;
82abbf8d
AS
12287 return adjust_ptr_min_max_vals(env, insn,
12288 dst_reg, src_reg);
a3b666bf
AN
12289 } else if (dst_reg->precise) {
12290 /* if dst_reg is precise, src_reg should be precise as well */
12291 err = mark_chain_precision(env, insn->src_reg);
12292 if (err)
12293 return err;
f1174f77
EC
12294 }
12295 } else {
12296 /* Pretend the src is a reg with a known value, since we only
12297 * need to be able to read from this state.
12298 */
12299 off_reg.type = SCALAR_VALUE;
b03c9f9f 12300 __mark_reg_known(&off_reg, insn->imm);
f1174f77 12301 src_reg = &off_reg;
82abbf8d
AS
12302 if (ptr_reg) /* pointer += K */
12303 return adjust_ptr_min_max_vals(env, insn,
12304 ptr_reg, src_reg);
f1174f77
EC
12305 }
12306
12307 /* Got here implies adding two SCALAR_VALUEs */
12308 if (WARN_ON_ONCE(ptr_reg)) {
0f55f9ed 12309 print_verifier_state(env, state, true);
61bd5218 12310 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
12311 return -EINVAL;
12312 }
12313 if (WARN_ON(!src_reg)) {
0f55f9ed 12314 print_verifier_state(env, state, true);
61bd5218 12315 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
12316 return -EINVAL;
12317 }
12318 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
12319}
12320
17a52670 12321/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 12322static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 12323{
638f5b90 12324 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
12325 u8 opcode = BPF_OP(insn->code);
12326 int err;
12327
12328 if (opcode == BPF_END || opcode == BPF_NEG) {
12329 if (opcode == BPF_NEG) {
395e942d 12330 if (BPF_SRC(insn->code) != BPF_K ||
17a52670
AS
12331 insn->src_reg != BPF_REG_0 ||
12332 insn->off != 0 || insn->imm != 0) {
61bd5218 12333 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
12334 return -EINVAL;
12335 }
12336 } else {
12337 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
12338 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
12339 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 12340 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
12341 return -EINVAL;
12342 }
12343 }
12344
12345 /* check src operand */
dc503a8a 12346 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
12347 if (err)
12348 return err;
12349
1be7f75d 12350 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 12351 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
12352 insn->dst_reg);
12353 return -EACCES;
12354 }
12355
17a52670 12356 /* check dest operand */
dc503a8a 12357 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
12358 if (err)
12359 return err;
12360
12361 } else if (opcode == BPF_MOV) {
12362
12363 if (BPF_SRC(insn->code) == BPF_X) {
12364 if (insn->imm != 0 || insn->off != 0) {
61bd5218 12365 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
12366 return -EINVAL;
12367 }
12368
12369 /* check src operand */
dc503a8a 12370 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
12371 if (err)
12372 return err;
12373 } else {
12374 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 12375 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
12376 return -EINVAL;
12377 }
12378 }
12379
fbeb1603
AF
12380 /* check dest operand, mark as required later */
12381 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
12382 if (err)
12383 return err;
12384
12385 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
12386 struct bpf_reg_state *src_reg = regs + insn->src_reg;
12387 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
12388
17a52670
AS
12389 if (BPF_CLASS(insn->code) == BPF_ALU64) {
12390 /* case: R1 = R2
12391 * copy register state to dest reg
12392 */
75748837
AS
12393 if (src_reg->type == SCALAR_VALUE && !src_reg->id)
12394 /* Assign src and dst registers the same ID
12395 * that will be used by find_equal_scalars()
12396 * to propagate min/max range.
12397 */
12398 src_reg->id = ++env->id_gen;
71f656a5 12399 copy_register_state(dst_reg, src_reg);
e434b8cd 12400 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 12401 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 12402 } else {
f1174f77 12403 /* R1 = (u32) R2 */
1be7f75d 12404 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
12405 verbose(env,
12406 "R%d partial copy of pointer\n",
1be7f75d
AS
12407 insn->src_reg);
12408 return -EACCES;
e434b8cd 12409 } else if (src_reg->type == SCALAR_VALUE) {
71f656a5 12410 copy_register_state(dst_reg, src_reg);
75748837
AS
12411 /* Make sure ID is cleared otherwise
12412 * dst_reg min/max could be incorrectly
12413 * propagated into src_reg by find_equal_scalars()
12414 */
12415 dst_reg->id = 0;
e434b8cd 12416 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 12417 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
12418 } else {
12419 mark_reg_unknown(env, regs,
12420 insn->dst_reg);
1be7f75d 12421 }
3f50f132 12422 zext_32_to_64(dst_reg);
3844d153 12423 reg_bounds_sync(dst_reg);
17a52670
AS
12424 }
12425 } else {
12426 /* case: R = imm
12427 * remember the value we stored into this reg
12428 */
fbeb1603
AF
12429 /* clear any state __mark_reg_known doesn't set */
12430 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 12431 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
12432 if (BPF_CLASS(insn->code) == BPF_ALU64) {
12433 __mark_reg_known(regs + insn->dst_reg,
12434 insn->imm);
12435 } else {
12436 __mark_reg_known(regs + insn->dst_reg,
12437 (u32)insn->imm);
12438 }
17a52670
AS
12439 }
12440
12441 } else if (opcode > BPF_END) {
61bd5218 12442 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
12443 return -EINVAL;
12444
12445 } else { /* all other ALU ops: and, sub, xor, add, ... */
12446
17a52670
AS
12447 if (BPF_SRC(insn->code) == BPF_X) {
12448 if (insn->imm != 0 || insn->off != 0) {
61bd5218 12449 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
12450 return -EINVAL;
12451 }
12452 /* check src1 operand */
dc503a8a 12453 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
12454 if (err)
12455 return err;
12456 } else {
12457 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 12458 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
12459 return -EINVAL;
12460 }
12461 }
12462
12463 /* check src2 operand */
dc503a8a 12464 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
12465 if (err)
12466 return err;
12467
12468 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
12469 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 12470 verbose(env, "div by zero\n");
17a52670
AS
12471 return -EINVAL;
12472 }
12473
229394e8
RV
12474 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
12475 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
12476 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
12477
12478 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 12479 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
12480 return -EINVAL;
12481 }
12482 }
12483
1a0dc1ac 12484 /* check dest operand */
dc503a8a 12485 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
12486 if (err)
12487 return err;
12488
f1174f77 12489 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
12490 }
12491
12492 return 0;
12493}
12494
f4d7e40a 12495static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 12496 struct bpf_reg_state *dst_reg,
f8ddadc4 12497 enum bpf_reg_type type,
fb2a311a 12498 bool range_right_open)
969bf05e 12499{
b239da34
KKD
12500 struct bpf_func_state *state;
12501 struct bpf_reg_state *reg;
12502 int new_range;
2d2be8ca 12503
fb2a311a
DB
12504 if (dst_reg->off < 0 ||
12505 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
12506 /* This doesn't give us any range */
12507 return;
12508
b03c9f9f
EC
12509 if (dst_reg->umax_value > MAX_PACKET_OFF ||
12510 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
12511 /* Risk of overflow. For instance, ptr + (1<<63) may be less
12512 * than pkt_end, but that's because it's also less than pkt.
12513 */
12514 return;
12515
fb2a311a
DB
12516 new_range = dst_reg->off;
12517 if (range_right_open)
2fa7d94a 12518 new_range++;
fb2a311a
DB
12519
12520 /* Examples for register markings:
2d2be8ca 12521 *
fb2a311a 12522 * pkt_data in dst register:
2d2be8ca
DB
12523 *
12524 * r2 = r3;
12525 * r2 += 8;
12526 * if (r2 > pkt_end) goto <handle exception>
12527 * <access okay>
12528 *
b4e432f1
DB
12529 * r2 = r3;
12530 * r2 += 8;
12531 * if (r2 < pkt_end) goto <access okay>
12532 * <handle exception>
12533 *
2d2be8ca
DB
12534 * Where:
12535 * r2 == dst_reg, pkt_end == src_reg
12536 * r2=pkt(id=n,off=8,r=0)
12537 * r3=pkt(id=n,off=0,r=0)
12538 *
fb2a311a 12539 * pkt_data in src register:
2d2be8ca
DB
12540 *
12541 * r2 = r3;
12542 * r2 += 8;
12543 * if (pkt_end >= r2) goto <access okay>
12544 * <handle exception>
12545 *
b4e432f1
DB
12546 * r2 = r3;
12547 * r2 += 8;
12548 * if (pkt_end <= r2) goto <handle exception>
12549 * <access okay>
12550 *
2d2be8ca
DB
12551 * Where:
12552 * pkt_end == dst_reg, r2 == src_reg
12553 * r2=pkt(id=n,off=8,r=0)
12554 * r3=pkt(id=n,off=0,r=0)
12555 *
12556 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
12557 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
12558 * and [r3, r3 + 8-1) respectively is safe to access depending on
12559 * the check.
969bf05e 12560 */
2d2be8ca 12561
f1174f77
EC
12562 /* If our ids match, then we must have the same max_value. And we
12563 * don't care about the other reg's fixed offset, since if it's too big
12564 * the range won't allow anything.
12565 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
12566 */
b239da34
KKD
12567 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
12568 if (reg->type == type && reg->id == dst_reg->id)
12569 /* keep the maximum range already checked */
12570 reg->range = max(reg->range, new_range);
12571 }));
969bf05e
AS
12572}
12573
3f50f132 12574static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
4f7b3e82 12575{
3f50f132
JF
12576 struct tnum subreg = tnum_subreg(reg->var_off);
12577 s32 sval = (s32)val;
a72dafaf 12578
3f50f132
JF
12579 switch (opcode) {
12580 case BPF_JEQ:
12581 if (tnum_is_const(subreg))
12582 return !!tnum_equals_const(subreg, val);
12583 break;
12584 case BPF_JNE:
12585 if (tnum_is_const(subreg))
12586 return !tnum_equals_const(subreg, val);
12587 break;
12588 case BPF_JSET:
12589 if ((~subreg.mask & subreg.value) & val)
12590 return 1;
12591 if (!((subreg.mask | subreg.value) & val))
12592 return 0;
12593 break;
12594 case BPF_JGT:
12595 if (reg->u32_min_value > val)
12596 return 1;
12597 else if (reg->u32_max_value <= val)
12598 return 0;
12599 break;
12600 case BPF_JSGT:
12601 if (reg->s32_min_value > sval)
12602 return 1;
ee114dd6 12603 else if (reg->s32_max_value <= sval)
3f50f132
JF
12604 return 0;
12605 break;
12606 case BPF_JLT:
12607 if (reg->u32_max_value < val)
12608 return 1;
12609 else if (reg->u32_min_value >= val)
12610 return 0;
12611 break;
12612 case BPF_JSLT:
12613 if (reg->s32_max_value < sval)
12614 return 1;
12615 else if (reg->s32_min_value >= sval)
12616 return 0;
12617 break;
12618 case BPF_JGE:
12619 if (reg->u32_min_value >= val)
12620 return 1;
12621 else if (reg->u32_max_value < val)
12622 return 0;
12623 break;
12624 case BPF_JSGE:
12625 if (reg->s32_min_value >= sval)
12626 return 1;
12627 else if (reg->s32_max_value < sval)
12628 return 0;
12629 break;
12630 case BPF_JLE:
12631 if (reg->u32_max_value <= val)
12632 return 1;
12633 else if (reg->u32_min_value > val)
12634 return 0;
12635 break;
12636 case BPF_JSLE:
12637 if (reg->s32_max_value <= sval)
12638 return 1;
12639 else if (reg->s32_min_value > sval)
12640 return 0;
12641 break;
12642 }
4f7b3e82 12643
3f50f132
JF
12644 return -1;
12645}
092ed096 12646
3f50f132
JF
12647
12648static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
12649{
12650 s64 sval = (s64)val;
a72dafaf 12651
4f7b3e82
AS
12652 switch (opcode) {
12653 case BPF_JEQ:
12654 if (tnum_is_const(reg->var_off))
12655 return !!tnum_equals_const(reg->var_off, val);
12656 break;
12657 case BPF_JNE:
12658 if (tnum_is_const(reg->var_off))
12659 return !tnum_equals_const(reg->var_off, val);
12660 break;
960ea056
JK
12661 case BPF_JSET:
12662 if ((~reg->var_off.mask & reg->var_off.value) & val)
12663 return 1;
12664 if (!((reg->var_off.mask | reg->var_off.value) & val))
12665 return 0;
12666 break;
4f7b3e82
AS
12667 case BPF_JGT:
12668 if (reg->umin_value > val)
12669 return 1;
12670 else if (reg->umax_value <= val)
12671 return 0;
12672 break;
12673 case BPF_JSGT:
a72dafaf 12674 if (reg->smin_value > sval)
4f7b3e82 12675 return 1;
ee114dd6 12676 else if (reg->smax_value <= sval)
4f7b3e82
AS
12677 return 0;
12678 break;
12679 case BPF_JLT:
12680 if (reg->umax_value < val)
12681 return 1;
12682 else if (reg->umin_value >= val)
12683 return 0;
12684 break;
12685 case BPF_JSLT:
a72dafaf 12686 if (reg->smax_value < sval)
4f7b3e82 12687 return 1;
a72dafaf 12688 else if (reg->smin_value >= sval)
4f7b3e82
AS
12689 return 0;
12690 break;
12691 case BPF_JGE:
12692 if (reg->umin_value >= val)
12693 return 1;
12694 else if (reg->umax_value < val)
12695 return 0;
12696 break;
12697 case BPF_JSGE:
a72dafaf 12698 if (reg->smin_value >= sval)
4f7b3e82 12699 return 1;
a72dafaf 12700 else if (reg->smax_value < sval)
4f7b3e82
AS
12701 return 0;
12702 break;
12703 case BPF_JLE:
12704 if (reg->umax_value <= val)
12705 return 1;
12706 else if (reg->umin_value > val)
12707 return 0;
12708 break;
12709 case BPF_JSLE:
a72dafaf 12710 if (reg->smax_value <= sval)
4f7b3e82 12711 return 1;
a72dafaf 12712 else if (reg->smin_value > sval)
4f7b3e82
AS
12713 return 0;
12714 break;
12715 }
12716
12717 return -1;
12718}
12719
3f50f132
JF
12720/* compute branch direction of the expression "if (reg opcode val) goto target;"
12721 * and return:
12722 * 1 - branch will be taken and "goto target" will be executed
12723 * 0 - branch will not be taken and fall-through to next insn
12724 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
12725 * range [0,10]
604dca5e 12726 */
3f50f132
JF
12727static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
12728 bool is_jmp32)
604dca5e 12729{
cac616db
JF
12730 if (__is_pointer_value(false, reg)) {
12731 if (!reg_type_not_null(reg->type))
12732 return -1;
12733
12734 /* If pointer is valid tests against zero will fail so we can
12735 * use this to direct branch taken.
12736 */
12737 if (val != 0)
12738 return -1;
12739
12740 switch (opcode) {
12741 case BPF_JEQ:
12742 return 0;
12743 case BPF_JNE:
12744 return 1;
12745 default:
12746 return -1;
12747 }
12748 }
604dca5e 12749
3f50f132
JF
12750 if (is_jmp32)
12751 return is_branch32_taken(reg, val, opcode);
12752 return is_branch64_taken(reg, val, opcode);
604dca5e
JH
12753}
12754
6d94e741
AS
12755static int flip_opcode(u32 opcode)
12756{
12757 /* How can we transform "a <op> b" into "b <op> a"? */
12758 static const u8 opcode_flip[16] = {
12759 /* these stay the same */
12760 [BPF_JEQ >> 4] = BPF_JEQ,
12761 [BPF_JNE >> 4] = BPF_JNE,
12762 [BPF_JSET >> 4] = BPF_JSET,
12763 /* these swap "lesser" and "greater" (L and G in the opcodes) */
12764 [BPF_JGE >> 4] = BPF_JLE,
12765 [BPF_JGT >> 4] = BPF_JLT,
12766 [BPF_JLE >> 4] = BPF_JGE,
12767 [BPF_JLT >> 4] = BPF_JGT,
12768 [BPF_JSGE >> 4] = BPF_JSLE,
12769 [BPF_JSGT >> 4] = BPF_JSLT,
12770 [BPF_JSLE >> 4] = BPF_JSGE,
12771 [BPF_JSLT >> 4] = BPF_JSGT
12772 };
12773 return opcode_flip[opcode >> 4];
12774}
12775
12776static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
12777 struct bpf_reg_state *src_reg,
12778 u8 opcode)
12779{
12780 struct bpf_reg_state *pkt;
12781
12782 if (src_reg->type == PTR_TO_PACKET_END) {
12783 pkt = dst_reg;
12784 } else if (dst_reg->type == PTR_TO_PACKET_END) {
12785 pkt = src_reg;
12786 opcode = flip_opcode(opcode);
12787 } else {
12788 return -1;
12789 }
12790
12791 if (pkt->range >= 0)
12792 return -1;
12793
12794 switch (opcode) {
12795 case BPF_JLE:
12796 /* pkt <= pkt_end */
12797 fallthrough;
12798 case BPF_JGT:
12799 /* pkt > pkt_end */
12800 if (pkt->range == BEYOND_PKT_END)
12801 /* pkt has at last one extra byte beyond pkt_end */
12802 return opcode == BPF_JGT;
12803 break;
12804 case BPF_JLT:
12805 /* pkt < pkt_end */
12806 fallthrough;
12807 case BPF_JGE:
12808 /* pkt >= pkt_end */
12809 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
12810 return opcode == BPF_JGE;
12811 break;
12812 }
12813 return -1;
12814}
12815
48461135
JB
12816/* Adjusts the register min/max values in the case that the dst_reg is the
12817 * variable register that we are working on, and src_reg is a constant or we're
12818 * simply doing a BPF_K check.
f1174f77 12819 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
12820 */
12821static void reg_set_min_max(struct bpf_reg_state *true_reg,
3f50f132
JF
12822 struct bpf_reg_state *false_reg,
12823 u64 val, u32 val32,
092ed096 12824 u8 opcode, bool is_jmp32)
48461135 12825{
3f50f132
JF
12826 struct tnum false_32off = tnum_subreg(false_reg->var_off);
12827 struct tnum false_64off = false_reg->var_off;
12828 struct tnum true_32off = tnum_subreg(true_reg->var_off);
12829 struct tnum true_64off = true_reg->var_off;
12830 s64 sval = (s64)val;
12831 s32 sval32 = (s32)val32;
a72dafaf 12832
f1174f77
EC
12833 /* If the dst_reg is a pointer, we can't learn anything about its
12834 * variable offset from the compare (unless src_reg were a pointer into
12835 * the same object, but we don't bother with that.
12836 * Since false_reg and true_reg have the same type by construction, we
12837 * only need to check one of them for pointerness.
12838 */
12839 if (__is_pointer_value(false, false_reg))
12840 return;
4cabc5b1 12841
48461135 12842 switch (opcode) {
a12ca627
DB
12843 /* JEQ/JNE comparison doesn't change the register equivalence.
12844 *
12845 * r1 = r2;
12846 * if (r1 == 42) goto label;
12847 * ...
12848 * label: // here both r1 and r2 are known to be 42.
12849 *
12850 * Hence when marking register as known preserve it's ID.
12851 */
48461135 12852 case BPF_JEQ:
a12ca627
DB
12853 if (is_jmp32) {
12854 __mark_reg32_known(true_reg, val32);
12855 true_32off = tnum_subreg(true_reg->var_off);
12856 } else {
12857 ___mark_reg_known(true_reg, val);
12858 true_64off = true_reg->var_off;
12859 }
12860 break;
48461135 12861 case BPF_JNE:
a12ca627
DB
12862 if (is_jmp32) {
12863 __mark_reg32_known(false_reg, val32);
12864 false_32off = tnum_subreg(false_reg->var_off);
12865 } else {
12866 ___mark_reg_known(false_reg, val);
12867 false_64off = false_reg->var_off;
12868 }
48461135 12869 break;
960ea056 12870 case BPF_JSET:
3f50f132
JF
12871 if (is_jmp32) {
12872 false_32off = tnum_and(false_32off, tnum_const(~val32));
12873 if (is_power_of_2(val32))
12874 true_32off = tnum_or(true_32off,
12875 tnum_const(val32));
12876 } else {
12877 false_64off = tnum_and(false_64off, tnum_const(~val));
12878 if (is_power_of_2(val))
12879 true_64off = tnum_or(true_64off,
12880 tnum_const(val));
12881 }
960ea056 12882 break;
48461135 12883 case BPF_JGE:
a72dafaf
JW
12884 case BPF_JGT:
12885 {
3f50f132
JF
12886 if (is_jmp32) {
12887 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
12888 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
12889
12890 false_reg->u32_max_value = min(false_reg->u32_max_value,
12891 false_umax);
12892 true_reg->u32_min_value = max(true_reg->u32_min_value,
12893 true_umin);
12894 } else {
12895 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
12896 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
12897
12898 false_reg->umax_value = min(false_reg->umax_value, false_umax);
12899 true_reg->umin_value = max(true_reg->umin_value, true_umin);
12900 }
b03c9f9f 12901 break;
a72dafaf 12902 }
48461135 12903 case BPF_JSGE:
a72dafaf
JW
12904 case BPF_JSGT:
12905 {
3f50f132
JF
12906 if (is_jmp32) {
12907 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
12908 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
a72dafaf 12909
3f50f132
JF
12910 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
12911 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
12912 } else {
12913 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
12914 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
12915
12916 false_reg->smax_value = min(false_reg->smax_value, false_smax);
12917 true_reg->smin_value = max(true_reg->smin_value, true_smin);
12918 }
48461135 12919 break;
a72dafaf 12920 }
b4e432f1 12921 case BPF_JLE:
a72dafaf
JW
12922 case BPF_JLT:
12923 {
3f50f132
JF
12924 if (is_jmp32) {
12925 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
12926 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
12927
12928 false_reg->u32_min_value = max(false_reg->u32_min_value,
12929 false_umin);
12930 true_reg->u32_max_value = min(true_reg->u32_max_value,
12931 true_umax);
12932 } else {
12933 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
12934 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
12935
12936 false_reg->umin_value = max(false_reg->umin_value, false_umin);
12937 true_reg->umax_value = min(true_reg->umax_value, true_umax);
12938 }
b4e432f1 12939 break;
a72dafaf 12940 }
b4e432f1 12941 case BPF_JSLE:
a72dafaf
JW
12942 case BPF_JSLT:
12943 {
3f50f132
JF
12944 if (is_jmp32) {
12945 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
12946 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
a72dafaf 12947
3f50f132
JF
12948 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
12949 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
12950 } else {
12951 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
12952 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
12953
12954 false_reg->smin_value = max(false_reg->smin_value, false_smin);
12955 true_reg->smax_value = min(true_reg->smax_value, true_smax);
12956 }
b4e432f1 12957 break;
a72dafaf 12958 }
48461135 12959 default:
0fc31b10 12960 return;
48461135
JB
12961 }
12962
3f50f132
JF
12963 if (is_jmp32) {
12964 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
12965 tnum_subreg(false_32off));
12966 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
12967 tnum_subreg(true_32off));
12968 __reg_combine_32_into_64(false_reg);
12969 __reg_combine_32_into_64(true_reg);
12970 } else {
12971 false_reg->var_off = false_64off;
12972 true_reg->var_off = true_64off;
12973 __reg_combine_64_into_32(false_reg);
12974 __reg_combine_64_into_32(true_reg);
12975 }
48461135
JB
12976}
12977
f1174f77
EC
12978/* Same as above, but for the case that dst_reg holds a constant and src_reg is
12979 * the variable reg.
48461135
JB
12980 */
12981static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3f50f132
JF
12982 struct bpf_reg_state *false_reg,
12983 u64 val, u32 val32,
092ed096 12984 u8 opcode, bool is_jmp32)
48461135 12985{
6d94e741 12986 opcode = flip_opcode(opcode);
0fc31b10
JH
12987 /* This uses zero as "not present in table"; luckily the zero opcode,
12988 * BPF_JA, can't get here.
b03c9f9f 12989 */
0fc31b10 12990 if (opcode)
3f50f132 12991 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
f1174f77
EC
12992}
12993
12994/* Regs are known to be equal, so intersect their min/max/var_off */
12995static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
12996 struct bpf_reg_state *dst_reg)
12997{
b03c9f9f
EC
12998 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
12999 dst_reg->umin_value);
13000 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
13001 dst_reg->umax_value);
13002 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
13003 dst_reg->smin_value);
13004 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
13005 dst_reg->smax_value);
f1174f77
EC
13006 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
13007 dst_reg->var_off);
3844d153
DB
13008 reg_bounds_sync(src_reg);
13009 reg_bounds_sync(dst_reg);
f1174f77
EC
13010}
13011
13012static void reg_combine_min_max(struct bpf_reg_state *true_src,
13013 struct bpf_reg_state *true_dst,
13014 struct bpf_reg_state *false_src,
13015 struct bpf_reg_state *false_dst,
13016 u8 opcode)
13017{
13018 switch (opcode) {
13019 case BPF_JEQ:
13020 __reg_combine_min_max(true_src, true_dst);
13021 break;
13022 case BPF_JNE:
13023 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 13024 break;
4cabc5b1 13025 }
48461135
JB
13026}
13027
fd978bf7
JS
13028static void mark_ptr_or_null_reg(struct bpf_func_state *state,
13029 struct bpf_reg_state *reg, u32 id,
840b9615 13030 bool is_null)
57a09bf0 13031{
c25b2ae1 13032 if (type_may_be_null(reg->type) && reg->id == id &&
fca1aa75 13033 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) {
df57f38a
KKD
13034 /* Old offset (both fixed and variable parts) should have been
13035 * known-zero, because we don't allow pointer arithmetic on
13036 * pointers that might be NULL. If we see this happening, don't
13037 * convert the register.
13038 *
13039 * But in some cases, some helpers that return local kptrs
13040 * advance offset for the returned pointer. In those cases, it
13041 * is fine to expect to see reg->off.
13042 */
13043 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0)))
13044 return;
6a3cd331
DM
13045 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) &&
13046 WARN_ON_ONCE(reg->off))
e60b0d12 13047 return;
6a3cd331 13048
f1174f77
EC
13049 if (is_null) {
13050 reg->type = SCALAR_VALUE;
1b986589
MKL
13051 /* We don't need id and ref_obj_id from this point
13052 * onwards anymore, thus we should better reset it,
13053 * so that state pruning has chances to take effect.
13054 */
13055 reg->id = 0;
13056 reg->ref_obj_id = 0;
4ddb7416
DB
13057
13058 return;
13059 }
13060
13061 mark_ptr_not_null_reg(reg);
13062
13063 if (!reg_may_point_to_spin_lock(reg)) {
1b986589 13064 /* For not-NULL ptr, reg->ref_obj_id will be reset
b239da34 13065 * in release_reference().
1b986589
MKL
13066 *
13067 * reg->id is still used by spin_lock ptr. Other
13068 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
13069 */
13070 reg->id = 0;
56f668df 13071 }
57a09bf0
TG
13072 }
13073}
13074
13075/* The logic is similar to find_good_pkt_pointers(), both could eventually
13076 * be folded together at some point.
13077 */
840b9615
JS
13078static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
13079 bool is_null)
57a09bf0 13080{
f4d7e40a 13081 struct bpf_func_state *state = vstate->frame[vstate->curframe];
b239da34 13082 struct bpf_reg_state *regs = state->regs, *reg;
1b986589 13083 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 13084 u32 id = regs[regno].id;
57a09bf0 13085
1b986589
MKL
13086 if (ref_obj_id && ref_obj_id == id && is_null)
13087 /* regs[regno] is in the " == NULL" branch.
13088 * No one could have freed the reference state before
13089 * doing the NULL check.
13090 */
13091 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 13092
b239da34
KKD
13093 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13094 mark_ptr_or_null_reg(state, reg, id, is_null);
13095 }));
57a09bf0
TG
13096}
13097
5beca081
DB
13098static bool try_match_pkt_pointers(const struct bpf_insn *insn,
13099 struct bpf_reg_state *dst_reg,
13100 struct bpf_reg_state *src_reg,
13101 struct bpf_verifier_state *this_branch,
13102 struct bpf_verifier_state *other_branch)
13103{
13104 if (BPF_SRC(insn->code) != BPF_X)
13105 return false;
13106
092ed096
JW
13107 /* Pointers are always 64-bit. */
13108 if (BPF_CLASS(insn->code) == BPF_JMP32)
13109 return false;
13110
5beca081
DB
13111 switch (BPF_OP(insn->code)) {
13112 case BPF_JGT:
13113 if ((dst_reg->type == PTR_TO_PACKET &&
13114 src_reg->type == PTR_TO_PACKET_END) ||
13115 (dst_reg->type == PTR_TO_PACKET_META &&
13116 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13117 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
13118 find_good_pkt_pointers(this_branch, dst_reg,
13119 dst_reg->type, false);
6d94e741 13120 mark_pkt_end(other_branch, insn->dst_reg, true);
5beca081
DB
13121 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13122 src_reg->type == PTR_TO_PACKET) ||
13123 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13124 src_reg->type == PTR_TO_PACKET_META)) {
13125 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
13126 find_good_pkt_pointers(other_branch, src_reg,
13127 src_reg->type, true);
6d94e741 13128 mark_pkt_end(this_branch, insn->src_reg, false);
5beca081
DB
13129 } else {
13130 return false;
13131 }
13132 break;
13133 case BPF_JLT:
13134 if ((dst_reg->type == PTR_TO_PACKET &&
13135 src_reg->type == PTR_TO_PACKET_END) ||
13136 (dst_reg->type == PTR_TO_PACKET_META &&
13137 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13138 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
13139 find_good_pkt_pointers(other_branch, dst_reg,
13140 dst_reg->type, true);
6d94e741 13141 mark_pkt_end(this_branch, insn->dst_reg, false);
5beca081
DB
13142 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13143 src_reg->type == PTR_TO_PACKET) ||
13144 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13145 src_reg->type == PTR_TO_PACKET_META)) {
13146 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
13147 find_good_pkt_pointers(this_branch, src_reg,
13148 src_reg->type, false);
6d94e741 13149 mark_pkt_end(other_branch, insn->src_reg, true);
5beca081
DB
13150 } else {
13151 return false;
13152 }
13153 break;
13154 case BPF_JGE:
13155 if ((dst_reg->type == PTR_TO_PACKET &&
13156 src_reg->type == PTR_TO_PACKET_END) ||
13157 (dst_reg->type == PTR_TO_PACKET_META &&
13158 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13159 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
13160 find_good_pkt_pointers(this_branch, dst_reg,
13161 dst_reg->type, true);
6d94e741 13162 mark_pkt_end(other_branch, insn->dst_reg, false);
5beca081
DB
13163 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13164 src_reg->type == PTR_TO_PACKET) ||
13165 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13166 src_reg->type == PTR_TO_PACKET_META)) {
13167 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
13168 find_good_pkt_pointers(other_branch, src_reg,
13169 src_reg->type, false);
6d94e741 13170 mark_pkt_end(this_branch, insn->src_reg, true);
5beca081
DB
13171 } else {
13172 return false;
13173 }
13174 break;
13175 case BPF_JLE:
13176 if ((dst_reg->type == PTR_TO_PACKET &&
13177 src_reg->type == PTR_TO_PACKET_END) ||
13178 (dst_reg->type == PTR_TO_PACKET_META &&
13179 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
13180 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
13181 find_good_pkt_pointers(other_branch, dst_reg,
13182 dst_reg->type, false);
6d94e741 13183 mark_pkt_end(this_branch, insn->dst_reg, true);
5beca081
DB
13184 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
13185 src_reg->type == PTR_TO_PACKET) ||
13186 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
13187 src_reg->type == PTR_TO_PACKET_META)) {
13188 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
13189 find_good_pkt_pointers(this_branch, src_reg,
13190 src_reg->type, true);
6d94e741 13191 mark_pkt_end(other_branch, insn->src_reg, false);
5beca081
DB
13192 } else {
13193 return false;
13194 }
13195 break;
13196 default:
13197 return false;
13198 }
13199
13200 return true;
13201}
13202
75748837
AS
13203static void find_equal_scalars(struct bpf_verifier_state *vstate,
13204 struct bpf_reg_state *known_reg)
13205{
13206 struct bpf_func_state *state;
13207 struct bpf_reg_state *reg;
75748837 13208
b239da34
KKD
13209 bpf_for_each_reg_in_vstate(vstate, state, reg, ({
13210 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
71f656a5 13211 copy_register_state(reg, known_reg);
b239da34 13212 }));
75748837
AS
13213}
13214
58e2af8b 13215static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
13216 struct bpf_insn *insn, int *insn_idx)
13217{
f4d7e40a
AS
13218 struct bpf_verifier_state *this_branch = env->cur_state;
13219 struct bpf_verifier_state *other_branch;
13220 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 13221 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
befae758 13222 struct bpf_reg_state *eq_branch_regs;
17a52670 13223 u8 opcode = BPF_OP(insn->code);
092ed096 13224 bool is_jmp32;
fb8d251e 13225 int pred = -1;
17a52670
AS
13226 int err;
13227
092ed096
JW
13228 /* Only conditional jumps are expected to reach here. */
13229 if (opcode == BPF_JA || opcode > BPF_JSLE) {
13230 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
13231 return -EINVAL;
13232 }
13233
13234 if (BPF_SRC(insn->code) == BPF_X) {
13235 if (insn->imm != 0) {
092ed096 13236 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
13237 return -EINVAL;
13238 }
13239
13240 /* check src1 operand */
dc503a8a 13241 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
13242 if (err)
13243 return err;
1be7f75d
AS
13244
13245 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 13246 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
13247 insn->src_reg);
13248 return -EACCES;
13249 }
fb8d251e 13250 src_reg = &regs[insn->src_reg];
17a52670
AS
13251 } else {
13252 if (insn->src_reg != BPF_REG_0) {
092ed096 13253 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
13254 return -EINVAL;
13255 }
13256 }
13257
13258 /* check src2 operand */
dc503a8a 13259 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
13260 if (err)
13261 return err;
13262
1a0dc1ac 13263 dst_reg = &regs[insn->dst_reg];
092ed096 13264 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 13265
3f50f132
JF
13266 if (BPF_SRC(insn->code) == BPF_K) {
13267 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
13268 } else if (src_reg->type == SCALAR_VALUE &&
13269 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
13270 pred = is_branch_taken(dst_reg,
13271 tnum_subreg(src_reg->var_off).value,
13272 opcode,
13273 is_jmp32);
13274 } else if (src_reg->type == SCALAR_VALUE &&
13275 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
13276 pred = is_branch_taken(dst_reg,
13277 src_reg->var_off.value,
13278 opcode,
13279 is_jmp32);
6d94e741
AS
13280 } else if (reg_is_pkt_pointer_any(dst_reg) &&
13281 reg_is_pkt_pointer_any(src_reg) &&
13282 !is_jmp32) {
13283 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
3f50f132
JF
13284 }
13285
b5dc0163 13286 if (pred >= 0) {
cac616db
JF
13287 /* If we get here with a dst_reg pointer type it is because
13288 * above is_branch_taken() special cased the 0 comparison.
13289 */
13290 if (!__is_pointer_value(false, dst_reg))
13291 err = mark_chain_precision(env, insn->dst_reg);
6d94e741
AS
13292 if (BPF_SRC(insn->code) == BPF_X && !err &&
13293 !__is_pointer_value(false, src_reg))
b5dc0163
AS
13294 err = mark_chain_precision(env, insn->src_reg);
13295 if (err)
13296 return err;
13297 }
9183671a 13298
fb8d251e 13299 if (pred == 1) {
9183671a
DB
13300 /* Only follow the goto, ignore fall-through. If needed, push
13301 * the fall-through branch for simulation under speculative
13302 * execution.
13303 */
13304 if (!env->bypass_spec_v1 &&
13305 !sanitize_speculative_path(env, insn, *insn_idx + 1,
13306 *insn_idx))
13307 return -EFAULT;
fb8d251e
AS
13308 *insn_idx += insn->off;
13309 return 0;
13310 } else if (pred == 0) {
9183671a
DB
13311 /* Only follow the fall-through branch, since that's where the
13312 * program will go. If needed, push the goto branch for
13313 * simulation under speculative execution.
fb8d251e 13314 */
9183671a
DB
13315 if (!env->bypass_spec_v1 &&
13316 !sanitize_speculative_path(env, insn,
13317 *insn_idx + insn->off + 1,
13318 *insn_idx))
13319 return -EFAULT;
fb8d251e 13320 return 0;
17a52670
AS
13321 }
13322
979d63d5
DB
13323 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
13324 false);
17a52670
AS
13325 if (!other_branch)
13326 return -EFAULT;
f4d7e40a 13327 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 13328
48461135
JB
13329 /* detect if we are comparing against a constant value so we can adjust
13330 * our min/max values for our dst register.
f1174f77 13331 * this is only legit if both are scalars (or pointers to the same
befae758
EZ
13332 * object, I suppose, see the PTR_MAYBE_NULL related if block below),
13333 * because otherwise the different base pointers mean the offsets aren't
f1174f77 13334 * comparable.
48461135
JB
13335 */
13336 if (BPF_SRC(insn->code) == BPF_X) {
092ed096 13337 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
092ed096 13338
f1174f77 13339 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
13340 src_reg->type == SCALAR_VALUE) {
13341 if (tnum_is_const(src_reg->var_off) ||
3f50f132
JF
13342 (is_jmp32 &&
13343 tnum_is_const(tnum_subreg(src_reg->var_off))))
f4d7e40a 13344 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 13345 dst_reg,
3f50f132
JF
13346 src_reg->var_off.value,
13347 tnum_subreg(src_reg->var_off).value,
092ed096
JW
13348 opcode, is_jmp32);
13349 else if (tnum_is_const(dst_reg->var_off) ||
3f50f132
JF
13350 (is_jmp32 &&
13351 tnum_is_const(tnum_subreg(dst_reg->var_off))))
f4d7e40a 13352 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096 13353 src_reg,
3f50f132
JF
13354 dst_reg->var_off.value,
13355 tnum_subreg(dst_reg->var_off).value,
092ed096
JW
13356 opcode, is_jmp32);
13357 else if (!is_jmp32 &&
13358 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 13359 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
13360 reg_combine_min_max(&other_branch_regs[insn->src_reg],
13361 &other_branch_regs[insn->dst_reg],
092ed096 13362 src_reg, dst_reg, opcode);
e688c3db
AS
13363 if (src_reg->id &&
13364 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
75748837
AS
13365 find_equal_scalars(this_branch, src_reg);
13366 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
13367 }
13368
f1174f77
EC
13369 }
13370 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 13371 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3f50f132
JF
13372 dst_reg, insn->imm, (u32)insn->imm,
13373 opcode, is_jmp32);
48461135
JB
13374 }
13375
e688c3db
AS
13376 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
13377 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
75748837
AS
13378 find_equal_scalars(this_branch, dst_reg);
13379 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
13380 }
13381
befae758
EZ
13382 /* if one pointer register is compared to another pointer
13383 * register check if PTR_MAYBE_NULL could be lifted.
13384 * E.g. register A - maybe null
13385 * register B - not null
13386 * for JNE A, B, ... - A is not null in the false branch;
13387 * for JEQ A, B, ... - A is not null in the true branch.
8374bfd5
HS
13388 *
13389 * Since PTR_TO_BTF_ID points to a kernel struct that does
13390 * not need to be null checked by the BPF program, i.e.,
13391 * could be null even without PTR_MAYBE_NULL marking, so
13392 * only propagate nullness when neither reg is that type.
befae758
EZ
13393 */
13394 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
13395 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
8374bfd5
HS
13396 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
13397 base_type(src_reg->type) != PTR_TO_BTF_ID &&
13398 base_type(dst_reg->type) != PTR_TO_BTF_ID) {
befae758
EZ
13399 eq_branch_regs = NULL;
13400 switch (opcode) {
13401 case BPF_JEQ:
13402 eq_branch_regs = other_branch_regs;
13403 break;
13404 case BPF_JNE:
13405 eq_branch_regs = regs;
13406 break;
13407 default:
13408 /* do nothing */
13409 break;
13410 }
13411 if (eq_branch_regs) {
13412 if (type_may_be_null(src_reg->type))
13413 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]);
13414 else
13415 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]);
13416 }
13417 }
13418
092ed096
JW
13419 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
13420 * NOTE: these optimizations below are related with pointer comparison
13421 * which will never be JMP32.
13422 */
13423 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 13424 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
c25b2ae1 13425 type_may_be_null(dst_reg->type)) {
840b9615 13426 /* Mark all identical registers in each branch as either
57a09bf0
TG
13427 * safe or unknown depending R == 0 or R != 0 conditional.
13428 */
840b9615
JS
13429 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
13430 opcode == BPF_JNE);
13431 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
13432 opcode == BPF_JEQ);
5beca081
DB
13433 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
13434 this_branch, other_branch) &&
13435 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
13436 verbose(env, "R%d pointer comparison prohibited\n",
13437 insn->dst_reg);
1be7f75d 13438 return -EACCES;
17a52670 13439 }
06ee7115 13440 if (env->log.level & BPF_LOG_LEVEL)
2e576648 13441 print_insn_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
13442 return 0;
13443}
13444
17a52670 13445/* verify BPF_LD_IMM64 instruction */
58e2af8b 13446static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 13447{
d8eca5bb 13448 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 13449 struct bpf_reg_state *regs = cur_regs(env);
4976b718 13450 struct bpf_reg_state *dst_reg;
d8eca5bb 13451 struct bpf_map *map;
17a52670
AS
13452 int err;
13453
13454 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 13455 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
13456 return -EINVAL;
13457 }
13458 if (insn->off != 0) {
61bd5218 13459 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
13460 return -EINVAL;
13461 }
13462
dc503a8a 13463 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
13464 if (err)
13465 return err;
13466
4976b718 13467 dst_reg = &regs[insn->dst_reg];
6b173873 13468 if (insn->src_reg == 0) {
6b173873
JK
13469 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
13470
4976b718 13471 dst_reg->type = SCALAR_VALUE;
b03c9f9f 13472 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 13473 return 0;
6b173873 13474 }
17a52670 13475
d400a6cf
DB
13476 /* All special src_reg cases are listed below. From this point onwards
13477 * we either succeed and assign a corresponding dst_reg->type after
13478 * zeroing the offset, or fail and reject the program.
13479 */
13480 mark_reg_known_zero(env, regs, insn->dst_reg);
4976b718 13481
d400a6cf 13482 if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
4976b718 13483 dst_reg->type = aux->btf_var.reg_type;
34d3a78c 13484 switch (base_type(dst_reg->type)) {
4976b718
HL
13485 case PTR_TO_MEM:
13486 dst_reg->mem_size = aux->btf_var.mem_size;
13487 break;
13488 case PTR_TO_BTF_ID:
22dc4a0f 13489 dst_reg->btf = aux->btf_var.btf;
4976b718
HL
13490 dst_reg->btf_id = aux->btf_var.btf_id;
13491 break;
13492 default:
13493 verbose(env, "bpf verifier is misconfigured\n");
13494 return -EFAULT;
13495 }
13496 return 0;
13497 }
13498
69c087ba
YS
13499 if (insn->src_reg == BPF_PSEUDO_FUNC) {
13500 struct bpf_prog_aux *aux = env->prog->aux;
3990ed4c
MKL
13501 u32 subprogno = find_subprog(env,
13502 env->insn_idx + insn->imm + 1);
69c087ba
YS
13503
13504 if (!aux->func_info) {
13505 verbose(env, "missing btf func_info\n");
13506 return -EINVAL;
13507 }
13508 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
13509 verbose(env, "callback function not static\n");
13510 return -EINVAL;
13511 }
13512
13513 dst_reg->type = PTR_TO_FUNC;
13514 dst_reg->subprogno = subprogno;
13515 return 0;
13516 }
13517
d8eca5bb 13518 map = env->used_maps[aux->map_index];
4976b718 13519 dst_reg->map_ptr = map;
d8eca5bb 13520
387544bf
AS
13521 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
13522 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
4976b718
HL
13523 dst_reg->type = PTR_TO_MAP_VALUE;
13524 dst_reg->off = aux->map_off;
d0d78c1d
KKD
13525 WARN_ON_ONCE(map->max_entries != 1);
13526 /* We want reg->id to be same (0) as map_value is not distinct */
387544bf
AS
13527 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
13528 insn->src_reg == BPF_PSEUDO_MAP_IDX) {
4976b718 13529 dst_reg->type = CONST_PTR_TO_MAP;
d8eca5bb
DB
13530 } else {
13531 verbose(env, "bpf verifier is misconfigured\n");
13532 return -EINVAL;
13533 }
17a52670 13534
17a52670
AS
13535 return 0;
13536}
13537
96be4325
DB
13538static bool may_access_skb(enum bpf_prog_type type)
13539{
13540 switch (type) {
13541 case BPF_PROG_TYPE_SOCKET_FILTER:
13542 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 13543 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
13544 return true;
13545 default:
13546 return false;
13547 }
13548}
13549
ddd872bc
AS
13550/* verify safety of LD_ABS|LD_IND instructions:
13551 * - they can only appear in the programs where ctx == skb
13552 * - since they are wrappers of function calls, they scratch R1-R5 registers,
13553 * preserve R6-R9, and store return value into R0
13554 *
13555 * Implicit input:
13556 * ctx == skb == R6 == CTX
13557 *
13558 * Explicit input:
13559 * SRC == any register
13560 * IMM == 32-bit immediate
13561 *
13562 * Output:
13563 * R0 - 8/16/32-bit skb data converted to cpu endianness
13564 */
58e2af8b 13565static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 13566{
638f5b90 13567 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 13568 static const int ctx_reg = BPF_REG_6;
ddd872bc 13569 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
13570 int i, err;
13571
7e40781c 13572 if (!may_access_skb(resolve_prog_type(env->prog))) {
61bd5218 13573 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
13574 return -EINVAL;
13575 }
13576
e0cea7ce
DB
13577 if (!env->ops->gen_ld_abs) {
13578 verbose(env, "bpf verifier is misconfigured\n");
13579 return -EINVAL;
13580 }
13581
ddd872bc 13582 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 13583 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 13584 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 13585 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
13586 return -EINVAL;
13587 }
13588
13589 /* check whether implicit source operand (register R6) is readable */
6d4f151a 13590 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
13591 if (err)
13592 return err;
13593
fd978bf7
JS
13594 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
13595 * gen_ld_abs() may terminate the program at runtime, leading to
13596 * reference leak.
13597 */
13598 err = check_reference_leak(env);
13599 if (err) {
13600 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
13601 return err;
13602 }
13603
d0d78c1d 13604 if (env->cur_state->active_lock.ptr) {
d83525ca
AS
13605 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
13606 return -EINVAL;
13607 }
13608
9bb00b28
YS
13609 if (env->cur_state->active_rcu_lock) {
13610 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n");
13611 return -EINVAL;
13612 }
13613
6d4f151a 13614 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
13615 verbose(env,
13616 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
13617 return -EINVAL;
13618 }
13619
13620 if (mode == BPF_IND) {
13621 /* check explicit source operand */
dc503a8a 13622 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
13623 if (err)
13624 return err;
13625 }
13626
be80a1d3 13627 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
6d4f151a
DB
13628 if (err < 0)
13629 return err;
13630
ddd872bc 13631 /* reset caller saved regs to unreadable */
dc503a8a 13632 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 13633 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
13634 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
13635 }
ddd872bc
AS
13636
13637 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
13638 * the value fetched from the packet.
13639 * Already marked as written above.
ddd872bc 13640 */
61bd5218 13641 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
13642 /* ld_abs load up to 32-bit skb data. */
13643 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
13644 return 0;
13645}
13646
390ee7e2
AS
13647static int check_return_code(struct bpf_verifier_env *env)
13648{
5cf1e914 13649 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 13650 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
13651 struct bpf_reg_state *reg;
13652 struct tnum range = tnum_range(0, 1);
7e40781c 13653 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
27ae7997 13654 int err;
bfc6bb74
AS
13655 struct bpf_func_state *frame = env->cur_state->frame[0];
13656 const bool is_subprog = frame->subprogno;
27ae7997 13657
9e4e01df 13658 /* LSM and struct_ops func-ptr's return type could be "void" */
d1a6edec
SF
13659 if (!is_subprog) {
13660 switch (prog_type) {
13661 case BPF_PROG_TYPE_LSM:
13662 if (prog->expected_attach_type == BPF_LSM_CGROUP)
13663 /* See below, can be 0 or 0-1 depending on hook. */
13664 break;
13665 fallthrough;
13666 case BPF_PROG_TYPE_STRUCT_OPS:
13667 if (!prog->aux->attach_func_proto->type)
13668 return 0;
13669 break;
13670 default:
13671 break;
13672 }
13673 }
27ae7997 13674
8fb33b60 13675 /* eBPF calling convention is such that R0 is used
27ae7997
MKL
13676 * to return the value from eBPF program.
13677 * Make sure that it's readable at this time
13678 * of bpf_exit, which means that program wrote
13679 * something into it earlier
13680 */
13681 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
13682 if (err)
13683 return err;
13684
13685 if (is_pointer_value(env, BPF_REG_0)) {
13686 verbose(env, "R0 leaks addr as return value\n");
13687 return -EACCES;
13688 }
390ee7e2 13689
f782e2c3 13690 reg = cur_regs(env) + BPF_REG_0;
bfc6bb74
AS
13691
13692 if (frame->in_async_callback_fn) {
13693 /* enforce return zero from async callbacks like timer */
13694 if (reg->type != SCALAR_VALUE) {
13695 verbose(env, "In async callback the register R0 is not a known value (%s)\n",
c25b2ae1 13696 reg_type_str(env, reg->type));
bfc6bb74
AS
13697 return -EINVAL;
13698 }
13699
13700 if (!tnum_in(tnum_const(0), reg->var_off)) {
13701 verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
13702 return -EINVAL;
13703 }
13704 return 0;
13705 }
13706
f782e2c3
DB
13707 if (is_subprog) {
13708 if (reg->type != SCALAR_VALUE) {
13709 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
c25b2ae1 13710 reg_type_str(env, reg->type));
f782e2c3
DB
13711 return -EINVAL;
13712 }
13713 return 0;
13714 }
13715
7e40781c 13716 switch (prog_type) {
983695fa
DB
13717 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
13718 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
1b66d253
DB
13719 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
13720 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
13721 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
13722 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
13723 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
983695fa 13724 range = tnum_range(1, 1);
77241217
SF
13725 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
13726 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
13727 range = tnum_range(0, 3);
ed4ed404 13728 break;
390ee7e2 13729 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 13730 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
13731 range = tnum_range(0, 3);
13732 enforce_attach_type_range = tnum_range(2, 3);
13733 }
ed4ed404 13734 break;
390ee7e2
AS
13735 case BPF_PROG_TYPE_CGROUP_SOCK:
13736 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 13737 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 13738 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 13739 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 13740 break;
15ab09bd
AS
13741 case BPF_PROG_TYPE_RAW_TRACEPOINT:
13742 if (!env->prog->aux->attach_btf_id)
13743 return 0;
13744 range = tnum_const(0);
13745 break;
15d83c4d 13746 case BPF_PROG_TYPE_TRACING:
e92888c7
YS
13747 switch (env->prog->expected_attach_type) {
13748 case BPF_TRACE_FENTRY:
13749 case BPF_TRACE_FEXIT:
13750 range = tnum_const(0);
13751 break;
13752 case BPF_TRACE_RAW_TP:
13753 case BPF_MODIFY_RETURN:
15d83c4d 13754 return 0;
2ec0616e
DB
13755 case BPF_TRACE_ITER:
13756 break;
e92888c7
YS
13757 default:
13758 return -ENOTSUPP;
13759 }
15d83c4d 13760 break;
e9ddbb77
JS
13761 case BPF_PROG_TYPE_SK_LOOKUP:
13762 range = tnum_range(SK_DROP, SK_PASS);
13763 break;
69fd337a
SF
13764
13765 case BPF_PROG_TYPE_LSM:
13766 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
13767 /* Regular BPF_PROG_TYPE_LSM programs can return
13768 * any value.
13769 */
13770 return 0;
13771 }
13772 if (!env->prog->aux->attach_func_proto->type) {
13773 /* Make sure programs that attach to void
13774 * hooks don't try to modify return value.
13775 */
13776 range = tnum_range(1, 1);
13777 }
13778 break;
13779
e92888c7
YS
13780 case BPF_PROG_TYPE_EXT:
13781 /* freplace program can return anything as its return value
13782 * depends on the to-be-replaced kernel func or bpf program.
13783 */
390ee7e2
AS
13784 default:
13785 return 0;
13786 }
13787
390ee7e2 13788 if (reg->type != SCALAR_VALUE) {
61bd5218 13789 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
c25b2ae1 13790 reg_type_str(env, reg->type));
390ee7e2
AS
13791 return -EINVAL;
13792 }
13793
13794 if (!tnum_in(range, reg->var_off)) {
bc2591d6 13795 verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
69fd337a 13796 if (prog->expected_attach_type == BPF_LSM_CGROUP &&
d1a6edec 13797 prog_type == BPF_PROG_TYPE_LSM &&
69fd337a
SF
13798 !prog->aux->attach_func_proto->type)
13799 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
390ee7e2
AS
13800 return -EINVAL;
13801 }
5cf1e914 13802
13803 if (!tnum_is_unknown(enforce_attach_type_range) &&
13804 tnum_in(enforce_attach_type_range, reg->var_off))
13805 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
13806 return 0;
13807}
13808
475fb78f
AS
13809/* non-recursive DFS pseudo code
13810 * 1 procedure DFS-iterative(G,v):
13811 * 2 label v as discovered
13812 * 3 let S be a stack
13813 * 4 S.push(v)
13814 * 5 while S is not empty
b6d20799 13815 * 6 t <- S.peek()
475fb78f
AS
13816 * 7 if t is what we're looking for:
13817 * 8 return t
13818 * 9 for all edges e in G.adjacentEdges(t) do
13819 * 10 if edge e is already labelled
13820 * 11 continue with the next edge
13821 * 12 w <- G.adjacentVertex(t,e)
13822 * 13 if vertex w is not discovered and not explored
13823 * 14 label e as tree-edge
13824 * 15 label w as discovered
13825 * 16 S.push(w)
13826 * 17 continue at 5
13827 * 18 else if vertex w is discovered
13828 * 19 label e as back-edge
13829 * 20 else
13830 * 21 // vertex w is explored
13831 * 22 label e as forward- or cross-edge
13832 * 23 label t as explored
13833 * 24 S.pop()
13834 *
13835 * convention:
13836 * 0x10 - discovered
13837 * 0x11 - discovered and fall-through edge labelled
13838 * 0x12 - discovered and fall-through and branch edges labelled
13839 * 0x20 - explored
13840 */
13841
13842enum {
13843 DISCOVERED = 0x10,
13844 EXPLORED = 0x20,
13845 FALLTHROUGH = 1,
13846 BRANCH = 2,
13847};
13848
dc2a4ebc
AS
13849static u32 state_htab_size(struct bpf_verifier_env *env)
13850{
13851 return env->prog->len;
13852}
13853
5d839021
AS
13854static struct bpf_verifier_state_list **explored_state(
13855 struct bpf_verifier_env *env,
13856 int idx)
13857{
dc2a4ebc
AS
13858 struct bpf_verifier_state *cur = env->cur_state;
13859 struct bpf_func_state *state = cur->frame[cur->curframe];
13860
13861 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
13862}
13863
bffdeaa8 13864static void mark_prune_point(struct bpf_verifier_env *env, int idx)
5d839021 13865{
a8f500af 13866 env->insn_aux_data[idx].prune_point = true;
5d839021 13867}
f1bca824 13868
bffdeaa8
AN
13869static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
13870{
13871 return env->insn_aux_data[insn_idx].prune_point;
13872}
13873
4b5ce570
AN
13874static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx)
13875{
13876 env->insn_aux_data[idx].force_checkpoint = true;
13877}
13878
13879static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx)
13880{
13881 return env->insn_aux_data[insn_idx].force_checkpoint;
13882}
13883
13884
59e2e27d
WAF
13885enum {
13886 DONE_EXPLORING = 0,
13887 KEEP_EXPLORING = 1,
13888};
13889
475fb78f
AS
13890/* t, w, e - match pseudo-code above:
13891 * t - index of current instruction
13892 * w - next instruction
13893 * e - edge
13894 */
2589726d
AS
13895static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
13896 bool loop_ok)
475fb78f 13897{
7df737e9
AS
13898 int *insn_stack = env->cfg.insn_stack;
13899 int *insn_state = env->cfg.insn_state;
13900
475fb78f 13901 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
59e2e27d 13902 return DONE_EXPLORING;
475fb78f
AS
13903
13904 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
59e2e27d 13905 return DONE_EXPLORING;
475fb78f
AS
13906
13907 if (w < 0 || w >= env->prog->len) {
d9762e84 13908 verbose_linfo(env, t, "%d: ", t);
61bd5218 13909 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
13910 return -EINVAL;
13911 }
13912
bffdeaa8 13913 if (e == BRANCH) {
f1bca824 13914 /* mark branch target for state pruning */
bffdeaa8
AN
13915 mark_prune_point(env, w);
13916 mark_jmp_point(env, w);
13917 }
f1bca824 13918
475fb78f
AS
13919 if (insn_state[w] == 0) {
13920 /* tree-edge */
13921 insn_state[t] = DISCOVERED | e;
13922 insn_state[w] = DISCOVERED;
7df737e9 13923 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 13924 return -E2BIG;
7df737e9 13925 insn_stack[env->cfg.cur_stack++] = w;
59e2e27d 13926 return KEEP_EXPLORING;
475fb78f 13927 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2c78ee89 13928 if (loop_ok && env->bpf_capable)
59e2e27d 13929 return DONE_EXPLORING;
d9762e84
MKL
13930 verbose_linfo(env, t, "%d: ", t);
13931 verbose_linfo(env, w, "%d: ", w);
61bd5218 13932 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
13933 return -EINVAL;
13934 } else if (insn_state[w] == EXPLORED) {
13935 /* forward- or cross-edge */
13936 insn_state[t] = DISCOVERED | e;
13937 } else {
61bd5218 13938 verbose(env, "insn state internal bug\n");
475fb78f
AS
13939 return -EFAULT;
13940 }
59e2e27d
WAF
13941 return DONE_EXPLORING;
13942}
13943
dcb2288b 13944static int visit_func_call_insn(int t, struct bpf_insn *insns,
efdb22de
YS
13945 struct bpf_verifier_env *env,
13946 bool visit_callee)
13947{
13948 int ret;
13949
13950 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
13951 if (ret)
13952 return ret;
13953
618945fb
AN
13954 mark_prune_point(env, t + 1);
13955 /* when we exit from subprog, we need to record non-linear history */
13956 mark_jmp_point(env, t + 1);
13957
efdb22de 13958 if (visit_callee) {
bffdeaa8 13959 mark_prune_point(env, t);
86fc6ee6
AS
13960 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
13961 /* It's ok to allow recursion from CFG point of
13962 * view. __check_func_call() will do the actual
13963 * check.
13964 */
13965 bpf_pseudo_func(insns + t));
efdb22de
YS
13966 }
13967 return ret;
13968}
13969
59e2e27d
WAF
13970/* Visits the instruction at index t and returns one of the following:
13971 * < 0 - an error occurred
13972 * DONE_EXPLORING - the instruction was fully explored
13973 * KEEP_EXPLORING - there is still work to be done before it is fully explored
13974 */
dcb2288b 13975static int visit_insn(int t, struct bpf_verifier_env *env)
59e2e27d 13976{
653ae3a8 13977 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
59e2e27d
WAF
13978 int ret;
13979
653ae3a8 13980 if (bpf_pseudo_func(insn))
dcb2288b 13981 return visit_func_call_insn(t, insns, env, true);
69c087ba 13982
59e2e27d 13983 /* All non-branch instructions have a single fall-through edge. */
653ae3a8
AN
13984 if (BPF_CLASS(insn->code) != BPF_JMP &&
13985 BPF_CLASS(insn->code) != BPF_JMP32)
59e2e27d
WAF
13986 return push_insn(t, t + 1, FALLTHROUGH, env, false);
13987
653ae3a8 13988 switch (BPF_OP(insn->code)) {
59e2e27d
WAF
13989 case BPF_EXIT:
13990 return DONE_EXPLORING;
13991
13992 case BPF_CALL:
c1ee85a9 13993 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback)
618945fb
AN
13994 /* Mark this call insn as a prune point to trigger
13995 * is_state_visited() check before call itself is
13996 * processed by __check_func_call(). Otherwise new
13997 * async state will be pushed for further exploration.
bfc6bb74 13998 */
bffdeaa8 13999 mark_prune_point(env, t);
06accc87
AN
14000 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
14001 struct bpf_kfunc_call_arg_meta meta;
14002
14003 ret = fetch_kfunc_meta(env, insn, &meta, NULL);
4b5ce570 14004 if (ret == 0 && is_iter_next_kfunc(&meta)) {
06accc87 14005 mark_prune_point(env, t);
4b5ce570
AN
14006 /* Checking and saving state checkpoints at iter_next() call
14007 * is crucial for fast convergence of open-coded iterator loop
14008 * logic, so we need to force it. If we don't do that,
14009 * is_state_visited() might skip saving a checkpoint, causing
14010 * unnecessarily long sequence of not checkpointed
14011 * instructions and jumps, leading to exhaustion of jump
14012 * history buffer, and potentially other undesired outcomes.
14013 * It is expected that with correct open-coded iterators
14014 * convergence will happen quickly, so we don't run a risk of
14015 * exhausting memory.
14016 */
14017 mark_force_checkpoint(env, t);
14018 }
06accc87 14019 }
653ae3a8 14020 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
59e2e27d
WAF
14021
14022 case BPF_JA:
653ae3a8 14023 if (BPF_SRC(insn->code) != BPF_K)
59e2e27d
WAF
14024 return -EINVAL;
14025
14026 /* unconditional jump with single edge */
653ae3a8 14027 ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
59e2e27d
WAF
14028 true);
14029 if (ret)
14030 return ret;
14031
653ae3a8
AN
14032 mark_prune_point(env, t + insn->off + 1);
14033 mark_jmp_point(env, t + insn->off + 1);
59e2e27d
WAF
14034
14035 return ret;
14036
14037 default:
14038 /* conditional jump with two edges */
bffdeaa8 14039 mark_prune_point(env, t);
618945fb 14040
59e2e27d
WAF
14041 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
14042 if (ret)
14043 return ret;
14044
653ae3a8 14045 return push_insn(t, t + insn->off + 1, BRANCH, env, true);
59e2e27d 14046 }
475fb78f
AS
14047}
14048
14049/* non-recursive depth-first-search to detect loops in BPF program
14050 * loop == back-edge in directed graph
14051 */
58e2af8b 14052static int check_cfg(struct bpf_verifier_env *env)
475fb78f 14053{
475fb78f 14054 int insn_cnt = env->prog->len;
7df737e9 14055 int *insn_stack, *insn_state;
475fb78f 14056 int ret = 0;
59e2e27d 14057 int i;
475fb78f 14058
7df737e9 14059 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
14060 if (!insn_state)
14061 return -ENOMEM;
14062
7df737e9 14063 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 14064 if (!insn_stack) {
71dde681 14065 kvfree(insn_state);
475fb78f
AS
14066 return -ENOMEM;
14067 }
14068
14069 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
14070 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 14071 env->cfg.cur_stack = 1;
475fb78f 14072
59e2e27d
WAF
14073 while (env->cfg.cur_stack > 0) {
14074 int t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 14075
dcb2288b 14076 ret = visit_insn(t, env);
59e2e27d
WAF
14077 switch (ret) {
14078 case DONE_EXPLORING:
14079 insn_state[t] = EXPLORED;
14080 env->cfg.cur_stack--;
14081 break;
14082 case KEEP_EXPLORING:
14083 break;
14084 default:
14085 if (ret > 0) {
14086 verbose(env, "visit_insn internal bug\n");
14087 ret = -EFAULT;
475fb78f 14088 }
475fb78f 14089 goto err_free;
59e2e27d 14090 }
475fb78f
AS
14091 }
14092
59e2e27d 14093 if (env->cfg.cur_stack < 0) {
61bd5218 14094 verbose(env, "pop stack internal bug\n");
475fb78f
AS
14095 ret = -EFAULT;
14096 goto err_free;
14097 }
475fb78f 14098
475fb78f
AS
14099 for (i = 0; i < insn_cnt; i++) {
14100 if (insn_state[i] != EXPLORED) {
61bd5218 14101 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
14102 ret = -EINVAL;
14103 goto err_free;
14104 }
14105 }
14106 ret = 0; /* cfg looks good */
14107
14108err_free:
71dde681
AS
14109 kvfree(insn_state);
14110 kvfree(insn_stack);
7df737e9 14111 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
14112 return ret;
14113}
14114
09b28d76
AS
14115static int check_abnormal_return(struct bpf_verifier_env *env)
14116{
14117 int i;
14118
14119 for (i = 1; i < env->subprog_cnt; i++) {
14120 if (env->subprog_info[i].has_ld_abs) {
14121 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
14122 return -EINVAL;
14123 }
14124 if (env->subprog_info[i].has_tail_call) {
14125 verbose(env, "tail_call is not allowed in subprogs without BTF\n");
14126 return -EINVAL;
14127 }
14128 }
14129 return 0;
14130}
14131
838e9690
YS
14132/* The minimum supported BTF func info size */
14133#define MIN_BPF_FUNCINFO_SIZE 8
14134#define MAX_FUNCINFO_REC_SIZE 252
14135
c454a46b
MKL
14136static int check_btf_func(struct bpf_verifier_env *env,
14137 const union bpf_attr *attr,
af2ac3e1 14138 bpfptr_t uattr)
838e9690 14139{
09b28d76 14140 const struct btf_type *type, *func_proto, *ret_type;
d0b2818e 14141 u32 i, nfuncs, urec_size, min_size;
838e9690 14142 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 14143 struct bpf_func_info *krecord;
8c1b6e69 14144 struct bpf_func_info_aux *info_aux = NULL;
c454a46b
MKL
14145 struct bpf_prog *prog;
14146 const struct btf *btf;
af2ac3e1 14147 bpfptr_t urecord;
d0b2818e 14148 u32 prev_offset = 0;
09b28d76 14149 bool scalar_return;
e7ed83d6 14150 int ret = -ENOMEM;
838e9690
YS
14151
14152 nfuncs = attr->func_info_cnt;
09b28d76
AS
14153 if (!nfuncs) {
14154 if (check_abnormal_return(env))
14155 return -EINVAL;
838e9690 14156 return 0;
09b28d76 14157 }
838e9690
YS
14158
14159 if (nfuncs != env->subprog_cnt) {
14160 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
14161 return -EINVAL;
14162 }
14163
14164 urec_size = attr->func_info_rec_size;
14165 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
14166 urec_size > MAX_FUNCINFO_REC_SIZE ||
14167 urec_size % sizeof(u32)) {
14168 verbose(env, "invalid func info rec size %u\n", urec_size);
14169 return -EINVAL;
14170 }
14171
c454a46b
MKL
14172 prog = env->prog;
14173 btf = prog->aux->btf;
838e9690 14174
af2ac3e1 14175 urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
838e9690
YS
14176 min_size = min_t(u32, krec_size, urec_size);
14177
ba64e7d8 14178 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
14179 if (!krecord)
14180 return -ENOMEM;
8c1b6e69
AS
14181 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
14182 if (!info_aux)
14183 goto err_free;
ba64e7d8 14184
838e9690
YS
14185 for (i = 0; i < nfuncs; i++) {
14186 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
14187 if (ret) {
14188 if (ret == -E2BIG) {
14189 verbose(env, "nonzero tailing record in func info");
14190 /* set the size kernel expects so loader can zero
14191 * out the rest of the record.
14192 */
af2ac3e1
AS
14193 if (copy_to_bpfptr_offset(uattr,
14194 offsetof(union bpf_attr, func_info_rec_size),
14195 &min_size, sizeof(min_size)))
838e9690
YS
14196 ret = -EFAULT;
14197 }
c454a46b 14198 goto err_free;
838e9690
YS
14199 }
14200
af2ac3e1 14201 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
838e9690 14202 ret = -EFAULT;
c454a46b 14203 goto err_free;
838e9690
YS
14204 }
14205
d30d42e0 14206 /* check insn_off */
09b28d76 14207 ret = -EINVAL;
838e9690 14208 if (i == 0) {
d30d42e0 14209 if (krecord[i].insn_off) {
838e9690 14210 verbose(env,
d30d42e0
MKL
14211 "nonzero insn_off %u for the first func info record",
14212 krecord[i].insn_off);
c454a46b 14213 goto err_free;
838e9690 14214 }
d30d42e0 14215 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
14216 verbose(env,
14217 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 14218 krecord[i].insn_off, prev_offset);
c454a46b 14219 goto err_free;
838e9690
YS
14220 }
14221
d30d42e0 14222 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690 14223 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
c454a46b 14224 goto err_free;
838e9690
YS
14225 }
14226
14227 /* check type_id */
ba64e7d8 14228 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 14229 if (!type || !btf_type_is_func(type)) {
838e9690 14230 verbose(env, "invalid type id %d in func info",
ba64e7d8 14231 krecord[i].type_id);
c454a46b 14232 goto err_free;
838e9690 14233 }
51c39bb1 14234 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
09b28d76
AS
14235
14236 func_proto = btf_type_by_id(btf, type->type);
14237 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
14238 /* btf_func_check() already verified it during BTF load */
14239 goto err_free;
14240 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
14241 scalar_return =
6089fb32 14242 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
09b28d76
AS
14243 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
14244 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
14245 goto err_free;
14246 }
14247 if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
14248 verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
14249 goto err_free;
14250 }
14251
d30d42e0 14252 prev_offset = krecord[i].insn_off;
af2ac3e1 14253 bpfptr_add(&urecord, urec_size);
838e9690
YS
14254 }
14255
ba64e7d8
YS
14256 prog->aux->func_info = krecord;
14257 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 14258 prog->aux->func_info_aux = info_aux;
838e9690
YS
14259 return 0;
14260
c454a46b 14261err_free:
ba64e7d8 14262 kvfree(krecord);
8c1b6e69 14263 kfree(info_aux);
838e9690
YS
14264 return ret;
14265}
14266
ba64e7d8
YS
14267static void adjust_btf_func(struct bpf_verifier_env *env)
14268{
8c1b6e69 14269 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
14270 int i;
14271
8c1b6e69 14272 if (!aux->func_info)
ba64e7d8
YS
14273 return;
14274
14275 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 14276 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
14277}
14278
1b773d00 14279#define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col)
c454a46b
MKL
14280#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
14281
14282static int check_btf_line(struct bpf_verifier_env *env,
14283 const union bpf_attr *attr,
af2ac3e1 14284 bpfptr_t uattr)
c454a46b
MKL
14285{
14286 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
14287 struct bpf_subprog_info *sub;
14288 struct bpf_line_info *linfo;
14289 struct bpf_prog *prog;
14290 const struct btf *btf;
af2ac3e1 14291 bpfptr_t ulinfo;
c454a46b
MKL
14292 int err;
14293
14294 nr_linfo = attr->line_info_cnt;
14295 if (!nr_linfo)
14296 return 0;
0e6491b5
BC
14297 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
14298 return -EINVAL;
c454a46b
MKL
14299
14300 rec_size = attr->line_info_rec_size;
14301 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
14302 rec_size > MAX_LINEINFO_REC_SIZE ||
14303 rec_size & (sizeof(u32) - 1))
14304 return -EINVAL;
14305
14306 /* Need to zero it in case the userspace may
14307 * pass in a smaller bpf_line_info object.
14308 */
14309 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
14310 GFP_KERNEL | __GFP_NOWARN);
14311 if (!linfo)
14312 return -ENOMEM;
14313
14314 prog = env->prog;
14315 btf = prog->aux->btf;
14316
14317 s = 0;
14318 sub = env->subprog_info;
af2ac3e1 14319 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
c454a46b
MKL
14320 expected_size = sizeof(struct bpf_line_info);
14321 ncopy = min_t(u32, expected_size, rec_size);
14322 for (i = 0; i < nr_linfo; i++) {
14323 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
14324 if (err) {
14325 if (err == -E2BIG) {
14326 verbose(env, "nonzero tailing record in line_info");
af2ac3e1
AS
14327 if (copy_to_bpfptr_offset(uattr,
14328 offsetof(union bpf_attr, line_info_rec_size),
14329 &expected_size, sizeof(expected_size)))
c454a46b
MKL
14330 err = -EFAULT;
14331 }
14332 goto err_free;
14333 }
14334
af2ac3e1 14335 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
c454a46b
MKL
14336 err = -EFAULT;
14337 goto err_free;
14338 }
14339
14340 /*
14341 * Check insn_off to ensure
14342 * 1) strictly increasing AND
14343 * 2) bounded by prog->len
14344 *
14345 * The linfo[0].insn_off == 0 check logically falls into
14346 * the later "missing bpf_line_info for func..." case
14347 * because the first linfo[0].insn_off must be the
14348 * first sub also and the first sub must have
14349 * subprog_info[0].start == 0.
14350 */
14351 if ((i && linfo[i].insn_off <= prev_offset) ||
14352 linfo[i].insn_off >= prog->len) {
14353 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
14354 i, linfo[i].insn_off, prev_offset,
14355 prog->len);
14356 err = -EINVAL;
14357 goto err_free;
14358 }
14359
fdbaa0be
MKL
14360 if (!prog->insnsi[linfo[i].insn_off].code) {
14361 verbose(env,
14362 "Invalid insn code at line_info[%u].insn_off\n",
14363 i);
14364 err = -EINVAL;
14365 goto err_free;
14366 }
14367
23127b33
MKL
14368 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
14369 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
14370 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
14371 err = -EINVAL;
14372 goto err_free;
14373 }
14374
14375 if (s != env->subprog_cnt) {
14376 if (linfo[i].insn_off == sub[s].start) {
14377 sub[s].linfo_idx = i;
14378 s++;
14379 } else if (sub[s].start < linfo[i].insn_off) {
14380 verbose(env, "missing bpf_line_info for func#%u\n", s);
14381 err = -EINVAL;
14382 goto err_free;
14383 }
14384 }
14385
14386 prev_offset = linfo[i].insn_off;
af2ac3e1 14387 bpfptr_add(&ulinfo, rec_size);
c454a46b
MKL
14388 }
14389
14390 if (s != env->subprog_cnt) {
14391 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
14392 env->subprog_cnt - s, s);
14393 err = -EINVAL;
14394 goto err_free;
14395 }
14396
14397 prog->aux->linfo = linfo;
14398 prog->aux->nr_linfo = nr_linfo;
14399
14400 return 0;
14401
14402err_free:
14403 kvfree(linfo);
14404 return err;
14405}
14406
fbd94c7a
AS
14407#define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo)
14408#define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE
14409
14410static int check_core_relo(struct bpf_verifier_env *env,
14411 const union bpf_attr *attr,
14412 bpfptr_t uattr)
14413{
14414 u32 i, nr_core_relo, ncopy, expected_size, rec_size;
14415 struct bpf_core_relo core_relo = {};
14416 struct bpf_prog *prog = env->prog;
14417 const struct btf *btf = prog->aux->btf;
14418 struct bpf_core_ctx ctx = {
14419 .log = &env->log,
14420 .btf = btf,
14421 };
14422 bpfptr_t u_core_relo;
14423 int err;
14424
14425 nr_core_relo = attr->core_relo_cnt;
14426 if (!nr_core_relo)
14427 return 0;
14428 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
14429 return -EINVAL;
14430
14431 rec_size = attr->core_relo_rec_size;
14432 if (rec_size < MIN_CORE_RELO_SIZE ||
14433 rec_size > MAX_CORE_RELO_SIZE ||
14434 rec_size % sizeof(u32))
14435 return -EINVAL;
14436
14437 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
14438 expected_size = sizeof(struct bpf_core_relo);
14439 ncopy = min_t(u32, expected_size, rec_size);
14440
14441 /* Unlike func_info and line_info, copy and apply each CO-RE
14442 * relocation record one at a time.
14443 */
14444 for (i = 0; i < nr_core_relo; i++) {
14445 /* future proofing when sizeof(bpf_core_relo) changes */
14446 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
14447 if (err) {
14448 if (err == -E2BIG) {
14449 verbose(env, "nonzero tailing record in core_relo");
14450 if (copy_to_bpfptr_offset(uattr,
14451 offsetof(union bpf_attr, core_relo_rec_size),
14452 &expected_size, sizeof(expected_size)))
14453 err = -EFAULT;
14454 }
14455 break;
14456 }
14457
14458 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
14459 err = -EFAULT;
14460 break;
14461 }
14462
14463 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
14464 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
14465 i, core_relo.insn_off, prog->len);
14466 err = -EINVAL;
14467 break;
14468 }
14469
14470 err = bpf_core_apply(&ctx, &core_relo, i,
14471 &prog->insnsi[core_relo.insn_off / 8]);
14472 if (err)
14473 break;
14474 bpfptr_add(&u_core_relo, rec_size);
14475 }
14476 return err;
14477}
14478
c454a46b
MKL
14479static int check_btf_info(struct bpf_verifier_env *env,
14480 const union bpf_attr *attr,
af2ac3e1 14481 bpfptr_t uattr)
c454a46b
MKL
14482{
14483 struct btf *btf;
14484 int err;
14485
09b28d76
AS
14486 if (!attr->func_info_cnt && !attr->line_info_cnt) {
14487 if (check_abnormal_return(env))
14488 return -EINVAL;
c454a46b 14489 return 0;
09b28d76 14490 }
c454a46b
MKL
14491
14492 btf = btf_get_by_fd(attr->prog_btf_fd);
14493 if (IS_ERR(btf))
14494 return PTR_ERR(btf);
350a5c4d
AS
14495 if (btf_is_kernel(btf)) {
14496 btf_put(btf);
14497 return -EACCES;
14498 }
c454a46b
MKL
14499 env->prog->aux->btf = btf;
14500
14501 err = check_btf_func(env, attr, uattr);
14502 if (err)
14503 return err;
14504
14505 err = check_btf_line(env, attr, uattr);
14506 if (err)
14507 return err;
14508
fbd94c7a
AS
14509 err = check_core_relo(env, attr, uattr);
14510 if (err)
14511 return err;
14512
c454a46b 14513 return 0;
ba64e7d8
YS
14514}
14515
f1174f77
EC
14516/* check %cur's range satisfies %old's */
14517static bool range_within(struct bpf_reg_state *old,
14518 struct bpf_reg_state *cur)
14519{
b03c9f9f
EC
14520 return old->umin_value <= cur->umin_value &&
14521 old->umax_value >= cur->umax_value &&
14522 old->smin_value <= cur->smin_value &&
fd675184
DB
14523 old->smax_value >= cur->smax_value &&
14524 old->u32_min_value <= cur->u32_min_value &&
14525 old->u32_max_value >= cur->u32_max_value &&
14526 old->s32_min_value <= cur->s32_min_value &&
14527 old->s32_max_value >= cur->s32_max_value;
f1174f77
EC
14528}
14529
f1174f77
EC
14530/* If in the old state two registers had the same id, then they need to have
14531 * the same id in the new state as well. But that id could be different from
14532 * the old state, so we need to track the mapping from old to new ids.
14533 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
14534 * regs with old id 5 must also have new id 9 for the new state to be safe. But
14535 * regs with a different old id could still have new id 9, we don't care about
14536 * that.
14537 * So we look through our idmap to see if this old id has been seen before. If
14538 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 14539 */
c9e73e3d 14540static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
969bf05e 14541{
f1174f77 14542 unsigned int i;
969bf05e 14543
4633a006
AN
14544 /* either both IDs should be set or both should be zero */
14545 if (!!old_id != !!cur_id)
14546 return false;
14547
14548 if (old_id == 0) /* cur_id == 0 as well */
14549 return true;
14550
c9e73e3d 14551 for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
f1174f77
EC
14552 if (!idmap[i].old) {
14553 /* Reached an empty slot; haven't seen this id before */
14554 idmap[i].old = old_id;
14555 idmap[i].cur = cur_id;
14556 return true;
14557 }
14558 if (idmap[i].old == old_id)
14559 return idmap[i].cur == cur_id;
14560 }
14561 /* We ran out of idmap slots, which should be impossible */
14562 WARN_ON_ONCE(1);
14563 return false;
14564}
14565
9242b5f5
AS
14566static void clean_func_state(struct bpf_verifier_env *env,
14567 struct bpf_func_state *st)
14568{
14569 enum bpf_reg_liveness live;
14570 int i, j;
14571
14572 for (i = 0; i < BPF_REG_FP; i++) {
14573 live = st->regs[i].live;
14574 /* liveness must not touch this register anymore */
14575 st->regs[i].live |= REG_LIVE_DONE;
14576 if (!(live & REG_LIVE_READ))
14577 /* since the register is unused, clear its state
14578 * to make further comparison simpler
14579 */
f54c7898 14580 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
14581 }
14582
14583 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
14584 live = st->stack[i].spilled_ptr.live;
14585 /* liveness must not touch this stack slot anymore */
14586 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
14587 if (!(live & REG_LIVE_READ)) {
f54c7898 14588 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
14589 for (j = 0; j < BPF_REG_SIZE; j++)
14590 st->stack[i].slot_type[j] = STACK_INVALID;
14591 }
14592 }
14593}
14594
14595static void clean_verifier_state(struct bpf_verifier_env *env,
14596 struct bpf_verifier_state *st)
14597{
14598 int i;
14599
14600 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
14601 /* all regs in this state in all frames were already marked */
14602 return;
14603
14604 for (i = 0; i <= st->curframe; i++)
14605 clean_func_state(env, st->frame[i]);
14606}
14607
14608/* the parentage chains form a tree.
14609 * the verifier states are added to state lists at given insn and
14610 * pushed into state stack for future exploration.
14611 * when the verifier reaches bpf_exit insn some of the verifer states
14612 * stored in the state lists have their final liveness state already,
14613 * but a lot of states will get revised from liveness point of view when
14614 * the verifier explores other branches.
14615 * Example:
14616 * 1: r0 = 1
14617 * 2: if r1 == 100 goto pc+1
14618 * 3: r0 = 2
14619 * 4: exit
14620 * when the verifier reaches exit insn the register r0 in the state list of
14621 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
14622 * of insn 2 and goes exploring further. At the insn 4 it will walk the
14623 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
14624 *
14625 * Since the verifier pushes the branch states as it sees them while exploring
14626 * the program the condition of walking the branch instruction for the second
14627 * time means that all states below this branch were already explored and
8fb33b60 14628 * their final liveness marks are already propagated.
9242b5f5
AS
14629 * Hence when the verifier completes the search of state list in is_state_visited()
14630 * we can call this clean_live_states() function to mark all liveness states
14631 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
14632 * will not be used.
14633 * This function also clears the registers and stack for states that !READ
14634 * to simplify state merging.
14635 *
14636 * Important note here that walking the same branch instruction in the callee
14637 * doesn't meant that the states are DONE. The verifier has to compare
14638 * the callsites
14639 */
14640static void clean_live_states(struct bpf_verifier_env *env, int insn,
14641 struct bpf_verifier_state *cur)
14642{
14643 struct bpf_verifier_state_list *sl;
14644 int i;
14645
5d839021 14646 sl = *explored_state(env, insn);
a8f500af 14647 while (sl) {
2589726d
AS
14648 if (sl->state.branches)
14649 goto next;
dc2a4ebc
AS
14650 if (sl->state.insn_idx != insn ||
14651 sl->state.curframe != cur->curframe)
9242b5f5
AS
14652 goto next;
14653 for (i = 0; i <= cur->curframe; i++)
14654 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
14655 goto next;
14656 clean_verifier_state(env, &sl->state);
14657next:
14658 sl = sl->next;
14659 }
14660}
14661
4a95c85c 14662static bool regs_exact(const struct bpf_reg_state *rold,
4633a006
AN
14663 const struct bpf_reg_state *rcur,
14664 struct bpf_id_pair *idmap)
4a95c85c 14665{
4633a006
AN
14666 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
14667 check_ids(rold->id, rcur->id, idmap) &&
14668 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
4a95c85c
AN
14669}
14670
f1174f77 14671/* Returns true if (rold safe implies rcur safe) */
e042aa53
DB
14672static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
14673 struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
f1174f77 14674{
dc503a8a
EC
14675 if (!(rold->live & REG_LIVE_READ))
14676 /* explored state didn't use this */
14677 return true;
f1174f77
EC
14678 if (rold->type == NOT_INIT)
14679 /* explored state can't have used this */
969bf05e 14680 return true;
f1174f77
EC
14681 if (rcur->type == NOT_INIT)
14682 return false;
7f4ce97c 14683
910f6999
AN
14684 /* Enforce that register types have to match exactly, including their
14685 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general
14686 * rule.
14687 *
14688 * One can make a point that using a pointer register as unbounded
14689 * SCALAR would be technically acceptable, but this could lead to
14690 * pointer leaks because scalars are allowed to leak while pointers
14691 * are not. We could make this safe in special cases if root is
14692 * calling us, but it's probably not worth the hassle.
14693 *
14694 * Also, register types that are *not* MAYBE_NULL could technically be
14695 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE
14696 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point
14697 * to the same map).
7f4ce97c
AN
14698 * However, if the old MAYBE_NULL register then got NULL checked,
14699 * doing so could have affected others with the same id, and we can't
14700 * check for that because we lost the id when we converted to
14701 * a non-MAYBE_NULL variant.
14702 * So, as a general rule we don't allow mixing MAYBE_NULL and
910f6999 14703 * non-MAYBE_NULL registers as well.
7f4ce97c 14704 */
910f6999 14705 if (rold->type != rcur->type)
7f4ce97c
AN
14706 return false;
14707
c25b2ae1 14708 switch (base_type(rold->type)) {
f1174f77 14709 case SCALAR_VALUE:
4633a006 14710 if (regs_exact(rold, rcur, idmap))
7c884339 14711 return true;
e042aa53
DB
14712 if (env->explore_alu_limits)
14713 return false;
910f6999
AN
14714 if (!rold->precise)
14715 return true;
14716 /* new val must satisfy old val knowledge */
14717 return range_within(rold, rcur) &&
14718 tnum_in(rold->var_off, rcur->var_off);
69c087ba 14719 case PTR_TO_MAP_KEY:
f1174f77 14720 case PTR_TO_MAP_VALUE:
567da5d2
AN
14721 case PTR_TO_MEM:
14722 case PTR_TO_BUF:
14723 case PTR_TO_TP_BUFFER:
1b688a19
EC
14724 /* If the new min/max/var_off satisfy the old ones and
14725 * everything else matches, we are OK.
1b688a19 14726 */
a73bf9f2 14727 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 &&
1b688a19 14728 range_within(rold, rcur) &&
4ea2bb15 14729 tnum_in(rold->var_off, rcur->var_off) &&
567da5d2
AN
14730 check_ids(rold->id, rcur->id, idmap) &&
14731 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap);
de8f3a83 14732 case PTR_TO_PACKET_META:
f1174f77 14733 case PTR_TO_PACKET:
f1174f77
EC
14734 /* We must have at least as much range as the old ptr
14735 * did, so that any accesses which were safe before are
14736 * still safe. This is true even if old range < old off,
14737 * since someone could have accessed through (ptr - k), or
14738 * even done ptr -= k in a register, to get a safe access.
14739 */
14740 if (rold->range > rcur->range)
14741 return false;
14742 /* If the offsets don't match, we can't trust our alignment;
14743 * nor can we be sure that we won't fall out of range.
14744 */
14745 if (rold->off != rcur->off)
14746 return false;
14747 /* id relations must be preserved */
4633a006 14748 if (!check_ids(rold->id, rcur->id, idmap))
f1174f77
EC
14749 return false;
14750 /* new val must satisfy old val knowledge */
14751 return range_within(rold, rcur) &&
14752 tnum_in(rold->var_off, rcur->var_off);
7c884339
EZ
14753 case PTR_TO_STACK:
14754 /* two stack pointers are equal only if they're pointing to
14755 * the same stack frame, since fp-8 in foo != fp-8 in bar
f1174f77 14756 */
4633a006 14757 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
f1174f77 14758 default:
4633a006 14759 return regs_exact(rold, rcur, idmap);
f1174f77 14760 }
969bf05e
AS
14761}
14762
e042aa53
DB
14763static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
14764 struct bpf_func_state *cur, struct bpf_id_pair *idmap)
638f5b90
AS
14765{
14766 int i, spi;
14767
638f5b90
AS
14768 /* walk slots of the explored stack and ignore any additional
14769 * slots in the current stack, since explored(safe) state
14770 * didn't use them
14771 */
14772 for (i = 0; i < old->allocated_stack; i++) {
06accc87
AN
14773 struct bpf_reg_state *old_reg, *cur_reg;
14774
638f5b90
AS
14775 spi = i / BPF_REG_SIZE;
14776
b233920c
AS
14777 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
14778 i += BPF_REG_SIZE - 1;
cc2b14d5 14779 /* explored state didn't use this */
fd05e57b 14780 continue;
b233920c 14781 }
cc2b14d5 14782
638f5b90
AS
14783 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
14784 continue;
19e2dbb7 14785
6715df8d
EZ
14786 if (env->allow_uninit_stack &&
14787 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
14788 continue;
14789
19e2dbb7
AS
14790 /* explored stack has more populated slots than current stack
14791 * and these slots were used
14792 */
14793 if (i >= cur->allocated_stack)
14794 return false;
14795
cc2b14d5
AS
14796 /* if old state was safe with misc data in the stack
14797 * it will be safe with zero-initialized stack.
14798 * The opposite is not true
14799 */
14800 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
14801 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
14802 continue;
638f5b90
AS
14803 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
14804 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
14805 /* Ex: old explored (safe) state has STACK_SPILL in
b8c1a309 14806 * this stack slot, but current has STACK_MISC ->
638f5b90
AS
14807 * this verifier states are not equivalent,
14808 * return false to continue verification of this path
14809 */
14810 return false;
27113c59 14811 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
638f5b90 14812 continue;
d6fefa11
KKD
14813 /* Both old and cur are having same slot_type */
14814 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) {
14815 case STACK_SPILL:
638f5b90
AS
14816 /* when explored and current stack slot are both storing
14817 * spilled registers, check that stored pointers types
14818 * are the same as well.
14819 * Ex: explored safe path could have stored
14820 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
14821 * but current path has stored:
14822 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
14823 * such verifier states are not equivalent.
14824 * return false to continue verification of this path
14825 */
d6fefa11
KKD
14826 if (!regsafe(env, &old->stack[spi].spilled_ptr,
14827 &cur->stack[spi].spilled_ptr, idmap))
14828 return false;
14829 break;
14830 case STACK_DYNPTR:
d6fefa11
KKD
14831 old_reg = &old->stack[spi].spilled_ptr;
14832 cur_reg = &cur->stack[spi].spilled_ptr;
14833 if (old_reg->dynptr.type != cur_reg->dynptr.type ||
14834 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot ||
14835 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
14836 return false;
14837 break;
06accc87
AN
14838 case STACK_ITER:
14839 old_reg = &old->stack[spi].spilled_ptr;
14840 cur_reg = &cur->stack[spi].spilled_ptr;
14841 /* iter.depth is not compared between states as it
14842 * doesn't matter for correctness and would otherwise
14843 * prevent convergence; we maintain it only to prevent
14844 * infinite loop check triggering, see
14845 * iter_active_depths_differ()
14846 */
14847 if (old_reg->iter.btf != cur_reg->iter.btf ||
14848 old_reg->iter.btf_id != cur_reg->iter.btf_id ||
14849 old_reg->iter.state != cur_reg->iter.state ||
14850 /* ignore {old_reg,cur_reg}->iter.depth, see above */
14851 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap))
14852 return false;
14853 break;
d6fefa11
KKD
14854 case STACK_MISC:
14855 case STACK_ZERO:
14856 case STACK_INVALID:
14857 continue;
14858 /* Ensure that new unhandled slot types return false by default */
14859 default:
638f5b90 14860 return false;
d6fefa11 14861 }
638f5b90
AS
14862 }
14863 return true;
14864}
14865
e8f55fcf
AN
14866static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur,
14867 struct bpf_id_pair *idmap)
fd978bf7 14868{
e8f55fcf
AN
14869 int i;
14870
fd978bf7
JS
14871 if (old->acquired_refs != cur->acquired_refs)
14872 return false;
e8f55fcf
AN
14873
14874 for (i = 0; i < old->acquired_refs; i++) {
14875 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap))
14876 return false;
14877 }
14878
14879 return true;
fd978bf7
JS
14880}
14881
f1bca824
AS
14882/* compare two verifier states
14883 *
14884 * all states stored in state_list are known to be valid, since
14885 * verifier reached 'bpf_exit' instruction through them
14886 *
14887 * this function is called when verifier exploring different branches of
14888 * execution popped from the state stack. If it sees an old state that has
14889 * more strict register state and more strict stack state then this execution
14890 * branch doesn't need to be explored further, since verifier already
14891 * concluded that more strict state leads to valid finish.
14892 *
14893 * Therefore two states are equivalent if register state is more conservative
14894 * and explored stack state is more conservative than the current one.
14895 * Example:
14896 * explored current
14897 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
14898 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
14899 *
14900 * In other words if current stack state (one being explored) has more
14901 * valid slots than old one that already passed validation, it means
14902 * the verifier can stop exploring and conclude that current state is valid too
14903 *
14904 * Similarly with registers. If explored state has register type as invalid
14905 * whereas register type in current state is meaningful, it means that
14906 * the current state will reach 'bpf_exit' instruction safely
14907 */
c9e73e3d 14908static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
f4d7e40a 14909 struct bpf_func_state *cur)
f1bca824
AS
14910{
14911 int i;
14912
c9e73e3d 14913 for (i = 0; i < MAX_BPF_REG; i++)
e042aa53
DB
14914 if (!regsafe(env, &old->regs[i], &cur->regs[i],
14915 env->idmap_scratch))
c9e73e3d 14916 return false;
f1bca824 14917
e042aa53 14918 if (!stacksafe(env, old, cur, env->idmap_scratch))
c9e73e3d 14919 return false;
fd978bf7 14920
e8f55fcf 14921 if (!refsafe(old, cur, env->idmap_scratch))
c9e73e3d
LB
14922 return false;
14923
14924 return true;
f1bca824
AS
14925}
14926
f4d7e40a
AS
14927static bool states_equal(struct bpf_verifier_env *env,
14928 struct bpf_verifier_state *old,
14929 struct bpf_verifier_state *cur)
14930{
14931 int i;
14932
14933 if (old->curframe != cur->curframe)
14934 return false;
14935
5dd9cdbc
EZ
14936 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
14937
979d63d5
DB
14938 /* Verification state from speculative execution simulation
14939 * must never prune a non-speculative execution one.
14940 */
14941 if (old->speculative && !cur->speculative)
14942 return false;
14943
4ea2bb15
EZ
14944 if (old->active_lock.ptr != cur->active_lock.ptr)
14945 return false;
14946
14947 /* Old and cur active_lock's have to be either both present
14948 * or both absent.
14949 */
14950 if (!!old->active_lock.id != !!cur->active_lock.id)
14951 return false;
14952
14953 if (old->active_lock.id &&
14954 !check_ids(old->active_lock.id, cur->active_lock.id, env->idmap_scratch))
d83525ca
AS
14955 return false;
14956
9bb00b28 14957 if (old->active_rcu_lock != cur->active_rcu_lock)
d83525ca
AS
14958 return false;
14959
f4d7e40a
AS
14960 /* for states to be equal callsites have to be the same
14961 * and all frame states need to be equivalent
14962 */
14963 for (i = 0; i <= old->curframe; i++) {
14964 if (old->frame[i]->callsite != cur->frame[i]->callsite)
14965 return false;
c9e73e3d 14966 if (!func_states_equal(env, old->frame[i], cur->frame[i]))
f4d7e40a
AS
14967 return false;
14968 }
14969 return true;
14970}
14971
5327ed3d
JW
14972/* Return 0 if no propagation happened. Return negative error code if error
14973 * happened. Otherwise, return the propagated bit.
14974 */
55e7f3b5
JW
14975static int propagate_liveness_reg(struct bpf_verifier_env *env,
14976 struct bpf_reg_state *reg,
14977 struct bpf_reg_state *parent_reg)
14978{
5327ed3d
JW
14979 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
14980 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
14981 int err;
14982
5327ed3d
JW
14983 /* When comes here, read flags of PARENT_REG or REG could be any of
14984 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
14985 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
14986 */
14987 if (parent_flag == REG_LIVE_READ64 ||
14988 /* Or if there is no read flag from REG. */
14989 !flag ||
14990 /* Or if the read flag from REG is the same as PARENT_REG. */
14991 parent_flag == flag)
55e7f3b5
JW
14992 return 0;
14993
5327ed3d 14994 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
14995 if (err)
14996 return err;
14997
5327ed3d 14998 return flag;
55e7f3b5
JW
14999}
15000
8e9cd9ce 15001/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
15002 * straight-line code between a state and its parent. When we arrive at an
15003 * equivalent state (jump target or such) we didn't arrive by the straight-line
15004 * code, so read marks in the state must propagate to the parent regardless
15005 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 15006 * in mark_reg_read() is for.
8e9cd9ce 15007 */
f4d7e40a
AS
15008static int propagate_liveness(struct bpf_verifier_env *env,
15009 const struct bpf_verifier_state *vstate,
15010 struct bpf_verifier_state *vparent)
dc503a8a 15011{
3f8cafa4 15012 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 15013 struct bpf_func_state *state, *parent;
3f8cafa4 15014 int i, frame, err = 0;
dc503a8a 15015
f4d7e40a
AS
15016 if (vparent->curframe != vstate->curframe) {
15017 WARN(1, "propagate_live: parent frame %d current frame %d\n",
15018 vparent->curframe, vstate->curframe);
15019 return -EFAULT;
15020 }
dc503a8a
EC
15021 /* Propagate read liveness of registers... */
15022 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 15023 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
15024 parent = vparent->frame[frame];
15025 state = vstate->frame[frame];
15026 parent_reg = parent->regs;
15027 state_reg = state->regs;
83d16312
JK
15028 /* We don't need to worry about FP liveness, it's read-only */
15029 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
15030 err = propagate_liveness_reg(env, &state_reg[i],
15031 &parent_reg[i]);
5327ed3d 15032 if (err < 0)
3f8cafa4 15033 return err;
5327ed3d
JW
15034 if (err == REG_LIVE_READ64)
15035 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 15036 }
f4d7e40a 15037
1b04aee7 15038 /* Propagate stack slots. */
f4d7e40a
AS
15039 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
15040 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
15041 parent_reg = &parent->stack[i].spilled_ptr;
15042 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
15043 err = propagate_liveness_reg(env, state_reg,
15044 parent_reg);
5327ed3d 15045 if (err < 0)
3f8cafa4 15046 return err;
dc503a8a
EC
15047 }
15048 }
5327ed3d 15049 return 0;
dc503a8a
EC
15050}
15051
a3ce685d
AS
15052/* find precise scalars in the previous equivalent state and
15053 * propagate them into the current state
15054 */
15055static int propagate_precision(struct bpf_verifier_env *env,
15056 const struct bpf_verifier_state *old)
15057{
15058 struct bpf_reg_state *state_reg;
15059 struct bpf_func_state *state;
529409ea 15060 int i, err = 0, fr;
a3ce685d 15061
529409ea
AN
15062 for (fr = old->curframe; fr >= 0; fr--) {
15063 state = old->frame[fr];
15064 state_reg = state->regs;
15065 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
15066 if (state_reg->type != SCALAR_VALUE ||
52c2b005
AN
15067 !state_reg->precise ||
15068 !(state_reg->live & REG_LIVE_READ))
529409ea
AN
15069 continue;
15070 if (env->log.level & BPF_LOG_LEVEL2)
15071 verbose(env, "frame %d: propagating r%d\n", i, fr);
15072 err = mark_chain_precision_frame(env, fr, i);
15073 if (err < 0)
15074 return err;
15075 }
a3ce685d 15076
529409ea
AN
15077 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
15078 if (!is_spilled_reg(&state->stack[i]))
15079 continue;
15080 state_reg = &state->stack[i].spilled_ptr;
15081 if (state_reg->type != SCALAR_VALUE ||
52c2b005
AN
15082 !state_reg->precise ||
15083 !(state_reg->live & REG_LIVE_READ))
529409ea
AN
15084 continue;
15085 if (env->log.level & BPF_LOG_LEVEL2)
15086 verbose(env, "frame %d: propagating fp%d\n",
15087 (-i - 1) * BPF_REG_SIZE, fr);
15088 err = mark_chain_precision_stack_frame(env, fr, i);
15089 if (err < 0)
15090 return err;
15091 }
a3ce685d
AS
15092 }
15093 return 0;
15094}
15095
2589726d
AS
15096static bool states_maybe_looping(struct bpf_verifier_state *old,
15097 struct bpf_verifier_state *cur)
15098{
15099 struct bpf_func_state *fold, *fcur;
15100 int i, fr = cur->curframe;
15101
15102 if (old->curframe != fr)
15103 return false;
15104
15105 fold = old->frame[fr];
15106 fcur = cur->frame[fr];
15107 for (i = 0; i < MAX_BPF_REG; i++)
15108 if (memcmp(&fold->regs[i], &fcur->regs[i],
15109 offsetof(struct bpf_reg_state, parent)))
15110 return false;
15111 return true;
15112}
15113
06accc87
AN
15114static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx)
15115{
15116 return env->insn_aux_data[insn_idx].is_iter_next;
15117}
15118
15119/* is_state_visited() handles iter_next() (see process_iter_next_call() for
15120 * terminology) calls specially: as opposed to bounded BPF loops, it *expects*
15121 * states to match, which otherwise would look like an infinite loop. So while
15122 * iter_next() calls are taken care of, we still need to be careful and
15123 * prevent erroneous and too eager declaration of "ininite loop", when
15124 * iterators are involved.
15125 *
15126 * Here's a situation in pseudo-BPF assembly form:
15127 *
15128 * 0: again: ; set up iter_next() call args
15129 * 1: r1 = &it ; <CHECKPOINT HERE>
15130 * 2: call bpf_iter_num_next ; this is iter_next() call
15131 * 3: if r0 == 0 goto done
15132 * 4: ... something useful here ...
15133 * 5: goto again ; another iteration
15134 * 6: done:
15135 * 7: r1 = &it
15136 * 8: call bpf_iter_num_destroy ; clean up iter state
15137 * 9: exit
15138 *
15139 * This is a typical loop. Let's assume that we have a prune point at 1:,
15140 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto
15141 * again`, assuming other heuristics don't get in a way).
15142 *
15143 * When we first time come to 1:, let's say we have some state X. We proceed
15144 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit.
15145 * Now we come back to validate that forked ACTIVE state. We proceed through
15146 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we
15147 * are converging. But the problem is that we don't know that yet, as this
15148 * convergence has to happen at iter_next() call site only. So if nothing is
15149 * done, at 1: verifier will use bounded loop logic and declare infinite
15150 * looping (and would be *technically* correct, if not for iterator's
15151 * "eventual sticky NULL" contract, see process_iter_next_call()). But we
15152 * don't want that. So what we do in process_iter_next_call() when we go on
15153 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's
15154 * a different iteration. So when we suspect an infinite loop, we additionally
15155 * check if any of the *ACTIVE* iterator states depths differ. If yes, we
15156 * pretend we are not looping and wait for next iter_next() call.
15157 *
15158 * This only applies to ACTIVE state. In DRAINED state we don't expect to
15159 * loop, because that would actually mean infinite loop, as DRAINED state is
15160 * "sticky", and so we'll keep returning into the same instruction with the
15161 * same state (at least in one of possible code paths).
15162 *
15163 * This approach allows to keep infinite loop heuristic even in the face of
15164 * active iterator. E.g., C snippet below is and will be detected as
15165 * inifintely looping:
15166 *
15167 * struct bpf_iter_num it;
15168 * int *p, x;
15169 *
15170 * bpf_iter_num_new(&it, 0, 10);
15171 * while ((p = bpf_iter_num_next(&t))) {
15172 * x = p;
15173 * while (x--) {} // <<-- infinite loop here
15174 * }
15175 *
15176 */
15177static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur)
15178{
15179 struct bpf_reg_state *slot, *cur_slot;
15180 struct bpf_func_state *state;
15181 int i, fr;
15182
15183 for (fr = old->curframe; fr >= 0; fr--) {
15184 state = old->frame[fr];
15185 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
15186 if (state->stack[i].slot_type[0] != STACK_ITER)
15187 continue;
15188
15189 slot = &state->stack[i].spilled_ptr;
15190 if (slot->iter.state != BPF_ITER_STATE_ACTIVE)
15191 continue;
15192
15193 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr;
15194 if (cur_slot->iter.depth != slot->iter.depth)
15195 return true;
15196 }
15197 }
15198 return false;
15199}
2589726d 15200
58e2af8b 15201static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 15202{
58e2af8b 15203 struct bpf_verifier_state_list *new_sl;
9f4686c4 15204 struct bpf_verifier_state_list *sl, **pprev;
679c782d 15205 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 15206 int i, j, err, states_cnt = 0;
4b5ce570
AN
15207 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx);
15208 bool add_new_state = force_new_state;
f1bca824 15209
2589726d
AS
15210 /* bpf progs typically have pruning point every 4 instructions
15211 * http://vger.kernel.org/bpfconf2019.html#session-1
15212 * Do not add new state for future pruning if the verifier hasn't seen
15213 * at least 2 jumps and at least 8 instructions.
15214 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
15215 * In tests that amounts to up to 50% reduction into total verifier
15216 * memory consumption and 20% verifier time speedup.
15217 */
15218 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
15219 env->insn_processed - env->prev_insn_processed >= 8)
15220 add_new_state = true;
15221
a8f500af
AS
15222 pprev = explored_state(env, insn_idx);
15223 sl = *pprev;
15224
9242b5f5
AS
15225 clean_live_states(env, insn_idx, cur);
15226
a8f500af 15227 while (sl) {
dc2a4ebc
AS
15228 states_cnt++;
15229 if (sl->state.insn_idx != insn_idx)
15230 goto next;
bfc6bb74 15231
2589726d 15232 if (sl->state.branches) {
bfc6bb74
AS
15233 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
15234
15235 if (frame->in_async_callback_fn &&
15236 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
15237 /* Different async_entry_cnt means that the verifier is
15238 * processing another entry into async callback.
15239 * Seeing the same state is not an indication of infinite
15240 * loop or infinite recursion.
15241 * But finding the same state doesn't mean that it's safe
15242 * to stop processing the current state. The previous state
15243 * hasn't yet reached bpf_exit, since state.branches > 0.
15244 * Checking in_async_callback_fn alone is not enough either.
15245 * Since the verifier still needs to catch infinite loops
15246 * inside async callbacks.
15247 */
06accc87
AN
15248 goto skip_inf_loop_check;
15249 }
15250 /* BPF open-coded iterators loop detection is special.
15251 * states_maybe_looping() logic is too simplistic in detecting
15252 * states that *might* be equivalent, because it doesn't know
15253 * about ID remapping, so don't even perform it.
15254 * See process_iter_next_call() and iter_active_depths_differ()
15255 * for overview of the logic. When current and one of parent
15256 * states are detected as equivalent, it's a good thing: we prove
15257 * convergence and can stop simulating further iterations.
15258 * It's safe to assume that iterator loop will finish, taking into
15259 * account iter_next() contract of eventually returning
15260 * sticky NULL result.
15261 */
15262 if (is_iter_next_insn(env, insn_idx)) {
15263 if (states_equal(env, &sl->state, cur)) {
15264 struct bpf_func_state *cur_frame;
15265 struct bpf_reg_state *iter_state, *iter_reg;
15266 int spi;
15267
15268 cur_frame = cur->frame[cur->curframe];
15269 /* btf_check_iter_kfuncs() enforces that
15270 * iter state pointer is always the first arg
15271 */
15272 iter_reg = &cur_frame->regs[BPF_REG_1];
15273 /* current state is valid due to states_equal(),
15274 * so we can assume valid iter and reg state,
15275 * no need for extra (re-)validations
15276 */
15277 spi = __get_spi(iter_reg->off + iter_reg->var_off.value);
15278 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr;
15279 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE)
15280 goto hit;
15281 }
15282 goto skip_inf_loop_check;
15283 }
15284 /* attempt to detect infinite loop to avoid unnecessary doomed work */
15285 if (states_maybe_looping(&sl->state, cur) &&
15286 states_equal(env, &sl->state, cur) &&
15287 !iter_active_depths_differ(&sl->state, cur)) {
2589726d
AS
15288 verbose_linfo(env, insn_idx, "; ");
15289 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
15290 return -EINVAL;
15291 }
15292 /* if the verifier is processing a loop, avoid adding new state
15293 * too often, since different loop iterations have distinct
15294 * states and may not help future pruning.
15295 * This threshold shouldn't be too low to make sure that
15296 * a loop with large bound will be rejected quickly.
15297 * The most abusive loop will be:
15298 * r1 += 1
15299 * if r1 < 1000000 goto pc-2
15300 * 1M insn_procssed limit / 100 == 10k peak states.
15301 * This threshold shouldn't be too high either, since states
15302 * at the end of the loop are likely to be useful in pruning.
15303 */
06accc87 15304skip_inf_loop_check:
4b5ce570 15305 if (!force_new_state &&
98ddcf38 15306 env->jmps_processed - env->prev_jmps_processed < 20 &&
2589726d
AS
15307 env->insn_processed - env->prev_insn_processed < 100)
15308 add_new_state = false;
15309 goto miss;
15310 }
638f5b90 15311 if (states_equal(env, &sl->state, cur)) {
06accc87 15312hit:
9f4686c4 15313 sl->hit_cnt++;
f1bca824 15314 /* reached equivalent register/stack state,
dc503a8a
EC
15315 * prune the search.
15316 * Registers read by the continuation are read by us.
8e9cd9ce
EC
15317 * If we have any write marks in env->cur_state, they
15318 * will prevent corresponding reads in the continuation
15319 * from reaching our parent (an explored_state). Our
15320 * own state will get the read marks recorded, but
15321 * they'll be immediately forgotten as we're pruning
15322 * this state and will pop a new one.
f1bca824 15323 */
f4d7e40a 15324 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
15325
15326 /* if previous state reached the exit with precision and
15327 * current state is equivalent to it (except precsion marks)
15328 * the precision needs to be propagated back in
15329 * the current state.
15330 */
15331 err = err ? : push_jmp_history(env, cur);
15332 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
15333 if (err)
15334 return err;
f1bca824 15335 return 1;
dc503a8a 15336 }
2589726d
AS
15337miss:
15338 /* when new state is not going to be added do not increase miss count.
15339 * Otherwise several loop iterations will remove the state
15340 * recorded earlier. The goal of these heuristics is to have
15341 * states from some iterations of the loop (some in the beginning
15342 * and some at the end) to help pruning.
15343 */
15344 if (add_new_state)
15345 sl->miss_cnt++;
9f4686c4
AS
15346 /* heuristic to determine whether this state is beneficial
15347 * to keep checking from state equivalence point of view.
15348 * Higher numbers increase max_states_per_insn and verification time,
15349 * but do not meaningfully decrease insn_processed.
15350 */
15351 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
15352 /* the state is unlikely to be useful. Remove it to
15353 * speed up verification
15354 */
15355 *pprev = sl->next;
15356 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
15357 u32 br = sl->state.branches;
15358
15359 WARN_ONCE(br,
15360 "BUG live_done but branches_to_explore %d\n",
15361 br);
9f4686c4
AS
15362 free_verifier_state(&sl->state, false);
15363 kfree(sl);
15364 env->peak_states--;
15365 } else {
15366 /* cannot free this state, since parentage chain may
15367 * walk it later. Add it for free_list instead to
15368 * be freed at the end of verification
15369 */
15370 sl->next = env->free_list;
15371 env->free_list = sl;
15372 }
15373 sl = *pprev;
15374 continue;
15375 }
dc2a4ebc 15376next:
9f4686c4
AS
15377 pprev = &sl->next;
15378 sl = *pprev;
f1bca824
AS
15379 }
15380
06ee7115
AS
15381 if (env->max_states_per_insn < states_cnt)
15382 env->max_states_per_insn = states_cnt;
15383
2c78ee89 15384 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
a095f421 15385 return 0;
ceefbc96 15386
2589726d 15387 if (!add_new_state)
a095f421 15388 return 0;
ceefbc96 15389
2589726d
AS
15390 /* There were no equivalent states, remember the current one.
15391 * Technically the current state is not proven to be safe yet,
f4d7e40a 15392 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 15393 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 15394 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
15395 * again on the way to bpf_exit.
15396 * When looping the sl->state.branches will be > 0 and this state
15397 * will not be considered for equivalence until branches == 0.
f1bca824 15398 */
638f5b90 15399 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
15400 if (!new_sl)
15401 return -ENOMEM;
06ee7115
AS
15402 env->total_states++;
15403 env->peak_states++;
2589726d
AS
15404 env->prev_jmps_processed = env->jmps_processed;
15405 env->prev_insn_processed = env->insn_processed;
f1bca824 15406
7a830b53
AN
15407 /* forget precise markings we inherited, see __mark_chain_precision */
15408 if (env->bpf_capable)
15409 mark_all_scalars_imprecise(env, cur);
15410
f1bca824 15411 /* add new state to the head of linked list */
679c782d
EC
15412 new = &new_sl->state;
15413 err = copy_verifier_state(new, cur);
1969db47 15414 if (err) {
679c782d 15415 free_verifier_state(new, false);
1969db47
AS
15416 kfree(new_sl);
15417 return err;
15418 }
dc2a4ebc 15419 new->insn_idx = insn_idx;
2589726d
AS
15420 WARN_ONCE(new->branches != 1,
15421 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 15422
2589726d 15423 cur->parent = new;
b5dc0163
AS
15424 cur->first_insn_idx = insn_idx;
15425 clear_jmp_history(cur);
5d839021
AS
15426 new_sl->next = *explored_state(env, insn_idx);
15427 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
15428 /* connect new state to parentage chain. Current frame needs all
15429 * registers connected. Only r6 - r9 of the callers are alive (pushed
15430 * to the stack implicitly by JITs) so in callers' frames connect just
15431 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
15432 * the state of the call instruction (with WRITTEN set), and r0 comes
15433 * from callee with its full parentage chain, anyway.
15434 */
8e9cd9ce
EC
15435 /* clear write marks in current state: the writes we did are not writes
15436 * our child did, so they don't screen off its reads from us.
15437 * (There are no read marks in current state, because reads always mark
15438 * their parent and current state never has children yet. Only
15439 * explored_states can get read marks.)
15440 */
eea1c227
AS
15441 for (j = 0; j <= cur->curframe; j++) {
15442 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
15443 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
15444 for (i = 0; i < BPF_REG_FP; i++)
15445 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
15446 }
f4d7e40a
AS
15447
15448 /* all stack frames are accessible from callee, clear them all */
15449 for (j = 0; j <= cur->curframe; j++) {
15450 struct bpf_func_state *frame = cur->frame[j];
679c782d 15451 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 15452
679c782d 15453 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 15454 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
15455 frame->stack[i].spilled_ptr.parent =
15456 &newframe->stack[i].spilled_ptr;
15457 }
f4d7e40a 15458 }
f1bca824
AS
15459 return 0;
15460}
15461
c64b7983
JS
15462/* Return true if it's OK to have the same insn return a different type. */
15463static bool reg_type_mismatch_ok(enum bpf_reg_type type)
15464{
c25b2ae1 15465 switch (base_type(type)) {
c64b7983
JS
15466 case PTR_TO_CTX:
15467 case PTR_TO_SOCKET:
46f8bc92 15468 case PTR_TO_SOCK_COMMON:
655a51e5 15469 case PTR_TO_TCP_SOCK:
fada7fdc 15470 case PTR_TO_XDP_SOCK:
2a02759e 15471 case PTR_TO_BTF_ID:
c64b7983
JS
15472 return false;
15473 default:
15474 return true;
15475 }
15476}
15477
15478/* If an instruction was previously used with particular pointer types, then we
15479 * need to be careful to avoid cases such as the below, where it may be ok
15480 * for one branch accessing the pointer, but not ok for the other branch:
15481 *
15482 * R1 = sock_ptr
15483 * goto X;
15484 * ...
15485 * R1 = some_other_valid_ptr;
15486 * goto X;
15487 * ...
15488 * R2 = *(u32 *)(R1 + 0);
15489 */
15490static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
15491{
15492 return src != prev && (!reg_type_mismatch_ok(src) ||
15493 !reg_type_mismatch_ok(prev));
15494}
15495
0d80a619
EZ
15496static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type,
15497 bool allow_trust_missmatch)
15498{
15499 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type;
15500
15501 if (*prev_type == NOT_INIT) {
15502 /* Saw a valid insn
15503 * dst_reg = *(u32 *)(src_reg + off)
15504 * save type to validate intersecting paths
15505 */
15506 *prev_type = type;
15507 } else if (reg_type_mismatch(type, *prev_type)) {
15508 /* Abuser program is trying to use the same insn
15509 * dst_reg = *(u32*) (src_reg + off)
15510 * with different pointer types:
15511 * src_reg == ctx in one branch and
15512 * src_reg == stack|map in some other branch.
15513 * Reject it.
15514 */
15515 if (allow_trust_missmatch &&
15516 base_type(type) == PTR_TO_BTF_ID &&
15517 base_type(*prev_type) == PTR_TO_BTF_ID) {
15518 /*
15519 * Have to support a use case when one path through
15520 * the program yields TRUSTED pointer while another
15521 * is UNTRUSTED. Fallback to UNTRUSTED to generate
15522 * BPF_PROBE_MEM.
15523 */
15524 *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
15525 } else {
15526 verbose(env, "same insn cannot be used with different pointers\n");
15527 return -EINVAL;
15528 }
15529 }
15530
15531 return 0;
15532}
15533
58e2af8b 15534static int do_check(struct bpf_verifier_env *env)
17a52670 15535{
6f8a57cc 15536 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1 15537 struct bpf_verifier_state *state = env->cur_state;
17a52670 15538 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 15539 struct bpf_reg_state *regs;
06ee7115 15540 int insn_cnt = env->prog->len;
17a52670 15541 bool do_print_state = false;
b5dc0163 15542 int prev_insn_idx = -1;
17a52670 15543
17a52670
AS
15544 for (;;) {
15545 struct bpf_insn *insn;
15546 u8 class;
15547 int err;
15548
b5dc0163 15549 env->prev_insn_idx = prev_insn_idx;
c08435ec 15550 if (env->insn_idx >= insn_cnt) {
61bd5218 15551 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 15552 env->insn_idx, insn_cnt);
17a52670
AS
15553 return -EFAULT;
15554 }
15555
c08435ec 15556 insn = &insns[env->insn_idx];
17a52670
AS
15557 class = BPF_CLASS(insn->code);
15558
06ee7115 15559 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
15560 verbose(env,
15561 "BPF program is too large. Processed %d insn\n",
06ee7115 15562 env->insn_processed);
17a52670
AS
15563 return -E2BIG;
15564 }
15565
a095f421
AN
15566 state->last_insn_idx = env->prev_insn_idx;
15567
15568 if (is_prune_point(env, env->insn_idx)) {
15569 err = is_state_visited(env, env->insn_idx);
15570 if (err < 0)
15571 return err;
15572 if (err == 1) {
15573 /* found equivalent state, can prune the search */
15574 if (env->log.level & BPF_LOG_LEVEL) {
15575 if (do_print_state)
15576 verbose(env, "\nfrom %d to %d%s: safe\n",
15577 env->prev_insn_idx, env->insn_idx,
15578 env->cur_state->speculative ?
15579 " (speculative execution)" : "");
15580 else
15581 verbose(env, "%d: safe\n", env->insn_idx);
15582 }
15583 goto process_bpf_exit;
f1bca824 15584 }
a095f421
AN
15585 }
15586
15587 if (is_jmp_point(env, env->insn_idx)) {
15588 err = push_jmp_history(env, state);
15589 if (err)
15590 return err;
f1bca824
AS
15591 }
15592
c3494801
AS
15593 if (signal_pending(current))
15594 return -EAGAIN;
15595
3c2ce60b
DB
15596 if (need_resched())
15597 cond_resched();
15598
2e576648
CL
15599 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
15600 verbose(env, "\nfrom %d to %d%s:",
15601 env->prev_insn_idx, env->insn_idx,
15602 env->cur_state->speculative ?
15603 " (speculative execution)" : "");
15604 print_verifier_state(env, state->frame[state->curframe], true);
17a52670
AS
15605 do_print_state = false;
15606 }
15607
06ee7115 15608 if (env->log.level & BPF_LOG_LEVEL) {
7105e828 15609 const struct bpf_insn_cbs cbs = {
e6ac2450 15610 .cb_call = disasm_kfunc_name,
7105e828 15611 .cb_print = verbose,
abe08840 15612 .private_data = env,
7105e828
DB
15613 };
15614
2e576648
CL
15615 if (verifier_state_scratched(env))
15616 print_insn_state(env, state->frame[state->curframe]);
15617
c08435ec 15618 verbose_linfo(env, env->insn_idx, "; ");
2e576648 15619 env->prev_log_len = env->log.len_used;
c08435ec 15620 verbose(env, "%d: ", env->insn_idx);
abe08840 15621 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2e576648
CL
15622 env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
15623 env->prev_log_len = env->log.len_used;
17a52670
AS
15624 }
15625
9d03ebc7 15626 if (bpf_prog_is_offloaded(env->prog->aux)) {
c08435ec
DB
15627 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
15628 env->prev_insn_idx);
cae1927c
JK
15629 if (err)
15630 return err;
15631 }
13a27dfc 15632
638f5b90 15633 regs = cur_regs(env);
fe9a5ca7 15634 sanitize_mark_insn_seen(env);
b5dc0163 15635 prev_insn_idx = env->insn_idx;
fd978bf7 15636
17a52670 15637 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 15638 err = check_alu_op(env, insn);
17a52670
AS
15639 if (err)
15640 return err;
15641
15642 } else if (class == BPF_LDX) {
0d80a619 15643 enum bpf_reg_type src_reg_type;
9bac3d6d
AS
15644
15645 /* check for reserved fields is already done */
15646
17a52670 15647 /* check src operand */
dc503a8a 15648 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
15649 if (err)
15650 return err;
15651
dc503a8a 15652 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
15653 if (err)
15654 return err;
15655
725f9dcd
AS
15656 src_reg_type = regs[insn->src_reg].type;
15657
17a52670
AS
15658 /* check that memory (src_reg + off) is readable,
15659 * the state of dst_reg will be updated by this func
15660 */
c08435ec
DB
15661 err = check_mem_access(env, env->insn_idx, insn->src_reg,
15662 insn->off, BPF_SIZE(insn->code),
15663 BPF_READ, insn->dst_reg, false);
17a52670
AS
15664 if (err)
15665 return err;
15666
0d80a619
EZ
15667 err = save_aux_ptr_type(env, src_reg_type, true);
15668 if (err)
15669 return err;
17a52670 15670 } else if (class == BPF_STX) {
0d80a619 15671 enum bpf_reg_type dst_reg_type;
d691f9e8 15672
91c960b0
BJ
15673 if (BPF_MODE(insn->code) == BPF_ATOMIC) {
15674 err = check_atomic(env, env->insn_idx, insn);
17a52670
AS
15675 if (err)
15676 return err;
c08435ec 15677 env->insn_idx++;
17a52670
AS
15678 continue;
15679 }
15680
5ca419f2
BJ
15681 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
15682 verbose(env, "BPF_STX uses reserved fields\n");
15683 return -EINVAL;
15684 }
15685
17a52670 15686 /* check src1 operand */
dc503a8a 15687 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
15688 if (err)
15689 return err;
15690 /* check src2 operand */
dc503a8a 15691 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
15692 if (err)
15693 return err;
15694
d691f9e8
AS
15695 dst_reg_type = regs[insn->dst_reg].type;
15696
17a52670 15697 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
15698 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
15699 insn->off, BPF_SIZE(insn->code),
15700 BPF_WRITE, insn->src_reg, false);
17a52670
AS
15701 if (err)
15702 return err;
15703
0d80a619
EZ
15704 err = save_aux_ptr_type(env, dst_reg_type, false);
15705 if (err)
15706 return err;
17a52670 15707 } else if (class == BPF_ST) {
0d80a619
EZ
15708 enum bpf_reg_type dst_reg_type;
15709
17a52670
AS
15710 if (BPF_MODE(insn->code) != BPF_MEM ||
15711 insn->src_reg != BPF_REG_0) {
61bd5218 15712 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
15713 return -EINVAL;
15714 }
15715 /* check src operand */
dc503a8a 15716 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
15717 if (err)
15718 return err;
15719
0d80a619 15720 dst_reg_type = regs[insn->dst_reg].type;
f37a8cb8 15721
17a52670 15722 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
15723 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
15724 insn->off, BPF_SIZE(insn->code),
15725 BPF_WRITE, -1, false);
17a52670
AS
15726 if (err)
15727 return err;
15728
0d80a619
EZ
15729 err = save_aux_ptr_type(env, dst_reg_type, false);
15730 if (err)
15731 return err;
092ed096 15732 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
15733 u8 opcode = BPF_OP(insn->code);
15734
2589726d 15735 env->jmps_processed++;
17a52670
AS
15736 if (opcode == BPF_CALL) {
15737 if (BPF_SRC(insn->code) != BPF_K ||
2357672c
KKD
15738 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
15739 && insn->off != 0) ||
f4d7e40a 15740 (insn->src_reg != BPF_REG_0 &&
e6ac2450
MKL
15741 insn->src_reg != BPF_PSEUDO_CALL &&
15742 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
092ed096
JW
15743 insn->dst_reg != BPF_REG_0 ||
15744 class == BPF_JMP32) {
61bd5218 15745 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
15746 return -EINVAL;
15747 }
15748
8cab76ec
KKD
15749 if (env->cur_state->active_lock.ptr) {
15750 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
15751 (insn->src_reg == BPF_PSEUDO_CALL) ||
15752 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
cd6791b4 15753 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
8cab76ec
KKD
15754 verbose(env, "function calls are not allowed while holding a lock\n");
15755 return -EINVAL;
15756 }
d83525ca 15757 }
f4d7e40a 15758 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 15759 err = check_func_call(env, insn, &env->insn_idx);
e6ac2450 15760 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
5c073f26 15761 err = check_kfunc_call(env, insn, &env->insn_idx);
f4d7e40a 15762 else
69c087ba 15763 err = check_helper_call(env, insn, &env->insn_idx);
17a52670
AS
15764 if (err)
15765 return err;
553a64a8
AN
15766
15767 mark_reg_scratched(env, BPF_REG_0);
17a52670
AS
15768 } else if (opcode == BPF_JA) {
15769 if (BPF_SRC(insn->code) != BPF_K ||
15770 insn->imm != 0 ||
15771 insn->src_reg != BPF_REG_0 ||
092ed096
JW
15772 insn->dst_reg != BPF_REG_0 ||
15773 class == BPF_JMP32) {
61bd5218 15774 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
15775 return -EINVAL;
15776 }
15777
c08435ec 15778 env->insn_idx += insn->off + 1;
17a52670
AS
15779 continue;
15780
15781 } else if (opcode == BPF_EXIT) {
15782 if (BPF_SRC(insn->code) != BPF_K ||
15783 insn->imm != 0 ||
15784 insn->src_reg != BPF_REG_0 ||
092ed096
JW
15785 insn->dst_reg != BPF_REG_0 ||
15786 class == BPF_JMP32) {
61bd5218 15787 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
15788 return -EINVAL;
15789 }
15790
5d92ddc3
DM
15791 if (env->cur_state->active_lock.ptr &&
15792 !in_rbtree_lock_required_cb(env)) {
d83525ca
AS
15793 verbose(env, "bpf_spin_unlock is missing\n");
15794 return -EINVAL;
15795 }
15796
9bb00b28
YS
15797 if (env->cur_state->active_rcu_lock) {
15798 verbose(env, "bpf_rcu_read_unlock is missing\n");
15799 return -EINVAL;
15800 }
15801
9d9d00ac
KKD
15802 /* We must do check_reference_leak here before
15803 * prepare_func_exit to handle the case when
15804 * state->curframe > 0, it may be a callback
15805 * function, for which reference_state must
15806 * match caller reference state when it exits.
15807 */
15808 err = check_reference_leak(env);
15809 if (err)
15810 return err;
15811
f4d7e40a
AS
15812 if (state->curframe) {
15813 /* exit from nested function */
c08435ec 15814 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
15815 if (err)
15816 return err;
15817 do_print_state = true;
15818 continue;
15819 }
15820
390ee7e2
AS
15821 err = check_return_code(env);
15822 if (err)
15823 return err;
f1bca824 15824process_bpf_exit:
0f55f9ed 15825 mark_verifier_state_scratched(env);
2589726d 15826 update_branch_counts(env, env->cur_state);
b5dc0163 15827 err = pop_stack(env, &prev_insn_idx,
6f8a57cc 15828 &env->insn_idx, pop_log);
638f5b90
AS
15829 if (err < 0) {
15830 if (err != -ENOENT)
15831 return err;
17a52670
AS
15832 break;
15833 } else {
15834 do_print_state = true;
15835 continue;
15836 }
15837 } else {
c08435ec 15838 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
15839 if (err)
15840 return err;
15841 }
15842 } else if (class == BPF_LD) {
15843 u8 mode = BPF_MODE(insn->code);
15844
15845 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
15846 err = check_ld_abs(env, insn);
15847 if (err)
15848 return err;
15849
17a52670
AS
15850 } else if (mode == BPF_IMM) {
15851 err = check_ld_imm(env, insn);
15852 if (err)
15853 return err;
15854
c08435ec 15855 env->insn_idx++;
fe9a5ca7 15856 sanitize_mark_insn_seen(env);
17a52670 15857 } else {
61bd5218 15858 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
15859 return -EINVAL;
15860 }
15861 } else {
61bd5218 15862 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
15863 return -EINVAL;
15864 }
15865
c08435ec 15866 env->insn_idx++;
17a52670
AS
15867 }
15868
15869 return 0;
15870}
15871
541c3bad
AN
15872static int find_btf_percpu_datasec(struct btf *btf)
15873{
15874 const struct btf_type *t;
15875 const char *tname;
15876 int i, n;
15877
15878 /*
15879 * Both vmlinux and module each have their own ".data..percpu"
15880 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
15881 * types to look at only module's own BTF types.
15882 */
15883 n = btf_nr_types(btf);
15884 if (btf_is_module(btf))
15885 i = btf_nr_types(btf_vmlinux);
15886 else
15887 i = 1;
15888
15889 for(; i < n; i++) {
15890 t = btf_type_by_id(btf, i);
15891 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
15892 continue;
15893
15894 tname = btf_name_by_offset(btf, t->name_off);
15895 if (!strcmp(tname, ".data..percpu"))
15896 return i;
15897 }
15898
15899 return -ENOENT;
15900}
15901
4976b718
HL
15902/* replace pseudo btf_id with kernel symbol address */
15903static int check_pseudo_btf_id(struct bpf_verifier_env *env,
15904 struct bpf_insn *insn,
15905 struct bpf_insn_aux_data *aux)
15906{
eaa6bcb7
HL
15907 const struct btf_var_secinfo *vsi;
15908 const struct btf_type *datasec;
541c3bad 15909 struct btf_mod_pair *btf_mod;
4976b718
HL
15910 const struct btf_type *t;
15911 const char *sym_name;
eaa6bcb7 15912 bool percpu = false;
f16e6313 15913 u32 type, id = insn->imm;
541c3bad 15914 struct btf *btf;
f16e6313 15915 s32 datasec_id;
4976b718 15916 u64 addr;
541c3bad 15917 int i, btf_fd, err;
4976b718 15918
541c3bad
AN
15919 btf_fd = insn[1].imm;
15920 if (btf_fd) {
15921 btf = btf_get_by_fd(btf_fd);
15922 if (IS_ERR(btf)) {
15923 verbose(env, "invalid module BTF object FD specified.\n");
15924 return -EINVAL;
15925 }
15926 } else {
15927 if (!btf_vmlinux) {
15928 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
15929 return -EINVAL;
15930 }
15931 btf = btf_vmlinux;
15932 btf_get(btf);
4976b718
HL
15933 }
15934
541c3bad 15935 t = btf_type_by_id(btf, id);
4976b718
HL
15936 if (!t) {
15937 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
541c3bad
AN
15938 err = -ENOENT;
15939 goto err_put;
4976b718
HL
15940 }
15941
15942 if (!btf_type_is_var(t)) {
541c3bad
AN
15943 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
15944 err = -EINVAL;
15945 goto err_put;
4976b718
HL
15946 }
15947
541c3bad 15948 sym_name = btf_name_by_offset(btf, t->name_off);
4976b718
HL
15949 addr = kallsyms_lookup_name(sym_name);
15950 if (!addr) {
15951 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
15952 sym_name);
541c3bad
AN
15953 err = -ENOENT;
15954 goto err_put;
4976b718
HL
15955 }
15956
541c3bad 15957 datasec_id = find_btf_percpu_datasec(btf);
eaa6bcb7 15958 if (datasec_id > 0) {
541c3bad 15959 datasec = btf_type_by_id(btf, datasec_id);
eaa6bcb7
HL
15960 for_each_vsi(i, datasec, vsi) {
15961 if (vsi->type == id) {
15962 percpu = true;
15963 break;
15964 }
15965 }
15966 }
15967
4976b718
HL
15968 insn[0].imm = (u32)addr;
15969 insn[1].imm = addr >> 32;
15970
15971 type = t->type;
541c3bad 15972 t = btf_type_skip_modifiers(btf, type, NULL);
eaa6bcb7 15973 if (percpu) {
5844101a 15974 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
541c3bad 15975 aux->btf_var.btf = btf;
eaa6bcb7
HL
15976 aux->btf_var.btf_id = type;
15977 } else if (!btf_type_is_struct(t)) {
4976b718
HL
15978 const struct btf_type *ret;
15979 const char *tname;
15980 u32 tsize;
15981
15982 /* resolve the type size of ksym. */
541c3bad 15983 ret = btf_resolve_size(btf, t, &tsize);
4976b718 15984 if (IS_ERR(ret)) {
541c3bad 15985 tname = btf_name_by_offset(btf, t->name_off);
4976b718
HL
15986 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
15987 tname, PTR_ERR(ret));
541c3bad
AN
15988 err = -EINVAL;
15989 goto err_put;
4976b718 15990 }
34d3a78c 15991 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
4976b718
HL
15992 aux->btf_var.mem_size = tsize;
15993 } else {
15994 aux->btf_var.reg_type = PTR_TO_BTF_ID;
541c3bad 15995 aux->btf_var.btf = btf;
4976b718
HL
15996 aux->btf_var.btf_id = type;
15997 }
541c3bad
AN
15998
15999 /* check whether we recorded this BTF (and maybe module) already */
16000 for (i = 0; i < env->used_btf_cnt; i++) {
16001 if (env->used_btfs[i].btf == btf) {
16002 btf_put(btf);
16003 return 0;
16004 }
16005 }
16006
16007 if (env->used_btf_cnt >= MAX_USED_BTFS) {
16008 err = -E2BIG;
16009 goto err_put;
16010 }
16011
16012 btf_mod = &env->used_btfs[env->used_btf_cnt];
16013 btf_mod->btf = btf;
16014 btf_mod->module = NULL;
16015
16016 /* if we reference variables from kernel module, bump its refcount */
16017 if (btf_is_module(btf)) {
16018 btf_mod->module = btf_try_get_module(btf);
16019 if (!btf_mod->module) {
16020 err = -ENXIO;
16021 goto err_put;
16022 }
16023 }
16024
16025 env->used_btf_cnt++;
16026
4976b718 16027 return 0;
541c3bad
AN
16028err_put:
16029 btf_put(btf);
16030 return err;
4976b718
HL
16031}
16032
d83525ca
AS
16033static bool is_tracing_prog_type(enum bpf_prog_type type)
16034{
16035 switch (type) {
16036 case BPF_PROG_TYPE_KPROBE:
16037 case BPF_PROG_TYPE_TRACEPOINT:
16038 case BPF_PROG_TYPE_PERF_EVENT:
16039 case BPF_PROG_TYPE_RAW_TRACEPOINT:
5002615a 16040 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
d83525ca
AS
16041 return true;
16042 default:
16043 return false;
16044 }
16045}
16046
61bd5218
JK
16047static int check_map_prog_compatibility(struct bpf_verifier_env *env,
16048 struct bpf_map *map,
fdc15d38
AS
16049 struct bpf_prog *prog)
16050
16051{
7e40781c 16052 enum bpf_prog_type prog_type = resolve_prog_type(prog);
a3884572 16053
9c395c1b
DM
16054 if (btf_record_has_field(map->record, BPF_LIST_HEAD) ||
16055 btf_record_has_field(map->record, BPF_RB_ROOT)) {
f0c5941f 16056 if (is_tracing_prog_type(prog_type)) {
9c395c1b 16057 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n");
f0c5941f
KKD
16058 return -EINVAL;
16059 }
16060 }
16061
db559117 16062 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) {
9e7a4d98
KS
16063 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
16064 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
16065 return -EINVAL;
16066 }
16067
16068 if (is_tracing_prog_type(prog_type)) {
16069 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
16070 return -EINVAL;
16071 }
16072
16073 if (prog->aux->sleepable) {
16074 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
16075 return -EINVAL;
16076 }
d83525ca
AS
16077 }
16078
db559117 16079 if (btf_record_has_field(map->record, BPF_TIMER)) {
5e0bc308
DB
16080 if (is_tracing_prog_type(prog_type)) {
16081 verbose(env, "tracing progs cannot use bpf_timer yet\n");
16082 return -EINVAL;
16083 }
16084 }
16085
9d03ebc7 16086 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
09728266 16087 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
16088 verbose(env, "offload device mismatch between prog and map\n");
16089 return -EINVAL;
16090 }
16091
85d33df3
MKL
16092 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
16093 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
16094 return -EINVAL;
16095 }
16096
1e6c62a8
AS
16097 if (prog->aux->sleepable)
16098 switch (map->map_type) {
16099 case BPF_MAP_TYPE_HASH:
16100 case BPF_MAP_TYPE_LRU_HASH:
16101 case BPF_MAP_TYPE_ARRAY:
638e4b82
AS
16102 case BPF_MAP_TYPE_PERCPU_HASH:
16103 case BPF_MAP_TYPE_PERCPU_ARRAY:
16104 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
16105 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
16106 case BPF_MAP_TYPE_HASH_OF_MAPS:
ba90c2cc 16107 case BPF_MAP_TYPE_RINGBUF:
583c1f42 16108 case BPF_MAP_TYPE_USER_RINGBUF:
0fe4b381
KS
16109 case BPF_MAP_TYPE_INODE_STORAGE:
16110 case BPF_MAP_TYPE_SK_STORAGE:
16111 case BPF_MAP_TYPE_TASK_STORAGE:
2c40d97d 16112 case BPF_MAP_TYPE_CGRP_STORAGE:
ba90c2cc 16113 break;
1e6c62a8
AS
16114 default:
16115 verbose(env,
2c40d97d 16116 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n");
1e6c62a8
AS
16117 return -EINVAL;
16118 }
16119
fdc15d38
AS
16120 return 0;
16121}
16122
b741f163
RG
16123static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
16124{
16125 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
16126 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
16127}
16128
4976b718
HL
16129/* find and rewrite pseudo imm in ld_imm64 instructions:
16130 *
16131 * 1. if it accesses map FD, replace it with actual map pointer.
16132 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
16133 *
16134 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
0246e64d 16135 */
4976b718 16136static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
0246e64d
AS
16137{
16138 struct bpf_insn *insn = env->prog->insnsi;
16139 int insn_cnt = env->prog->len;
fdc15d38 16140 int i, j, err;
0246e64d 16141
f1f7714e 16142 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
16143 if (err)
16144 return err;
16145
0246e64d 16146 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 16147 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 16148 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 16149 verbose(env, "BPF_LDX uses reserved fields\n");
d691f9e8
AS
16150 return -EINVAL;
16151 }
16152
0246e64d 16153 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 16154 struct bpf_insn_aux_data *aux;
0246e64d
AS
16155 struct bpf_map *map;
16156 struct fd f;
d8eca5bb 16157 u64 addr;
387544bf 16158 u32 fd;
0246e64d
AS
16159
16160 if (i == insn_cnt - 1 || insn[1].code != 0 ||
16161 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
16162 insn[1].off != 0) {
61bd5218 16163 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
16164 return -EINVAL;
16165 }
16166
d8eca5bb 16167 if (insn[0].src_reg == 0)
0246e64d
AS
16168 /* valid generic load 64-bit imm */
16169 goto next_insn;
16170
4976b718
HL
16171 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
16172 aux = &env->insn_aux_data[i];
16173 err = check_pseudo_btf_id(env, insn, aux);
16174 if (err)
16175 return err;
16176 goto next_insn;
16177 }
16178
69c087ba
YS
16179 if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
16180 aux = &env->insn_aux_data[i];
16181 aux->ptr_type = PTR_TO_FUNC;
16182 goto next_insn;
16183 }
16184
d8eca5bb
DB
16185 /* In final convert_pseudo_ld_imm64() step, this is
16186 * converted into regular 64-bit imm load insn.
16187 */
387544bf
AS
16188 switch (insn[0].src_reg) {
16189 case BPF_PSEUDO_MAP_VALUE:
16190 case BPF_PSEUDO_MAP_IDX_VALUE:
16191 break;
16192 case BPF_PSEUDO_MAP_FD:
16193 case BPF_PSEUDO_MAP_IDX:
16194 if (insn[1].imm == 0)
16195 break;
16196 fallthrough;
16197 default:
16198 verbose(env, "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
16199 return -EINVAL;
16200 }
16201
387544bf
AS
16202 switch (insn[0].src_reg) {
16203 case BPF_PSEUDO_MAP_IDX_VALUE:
16204 case BPF_PSEUDO_MAP_IDX:
16205 if (bpfptr_is_null(env->fd_array)) {
16206 verbose(env, "fd_idx without fd_array is invalid\n");
16207 return -EPROTO;
16208 }
16209 if (copy_from_bpfptr_offset(&fd, env->fd_array,
16210 insn[0].imm * sizeof(fd),
16211 sizeof(fd)))
16212 return -EFAULT;
16213 break;
16214 default:
16215 fd = insn[0].imm;
16216 break;
16217 }
16218
16219 f = fdget(fd);
c2101297 16220 map = __bpf_map_get(f);
0246e64d 16221 if (IS_ERR(map)) {
61bd5218 16222 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 16223 insn[0].imm);
0246e64d
AS
16224 return PTR_ERR(map);
16225 }
16226
61bd5218 16227 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
16228 if (err) {
16229 fdput(f);
16230 return err;
16231 }
16232
d8eca5bb 16233 aux = &env->insn_aux_data[i];
387544bf
AS
16234 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
16235 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
d8eca5bb
DB
16236 addr = (unsigned long)map;
16237 } else {
16238 u32 off = insn[1].imm;
16239
16240 if (off >= BPF_MAX_VAR_OFF) {
16241 verbose(env, "direct value offset of %u is not allowed\n", off);
16242 fdput(f);
16243 return -EINVAL;
16244 }
16245
16246 if (!map->ops->map_direct_value_addr) {
16247 verbose(env, "no direct value access support for this map type\n");
16248 fdput(f);
16249 return -EINVAL;
16250 }
16251
16252 err = map->ops->map_direct_value_addr(map, &addr, off);
16253 if (err) {
16254 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
16255 map->value_size, off);
16256 fdput(f);
16257 return err;
16258 }
16259
16260 aux->map_off = off;
16261 addr += off;
16262 }
16263
16264 insn[0].imm = (u32)addr;
16265 insn[1].imm = addr >> 32;
0246e64d
AS
16266
16267 /* check whether we recorded this map already */
d8eca5bb 16268 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 16269 if (env->used_maps[j] == map) {
d8eca5bb 16270 aux->map_index = j;
0246e64d
AS
16271 fdput(f);
16272 goto next_insn;
16273 }
d8eca5bb 16274 }
0246e64d
AS
16275
16276 if (env->used_map_cnt >= MAX_USED_MAPS) {
16277 fdput(f);
16278 return -E2BIG;
16279 }
16280
0246e64d
AS
16281 /* hold the map. If the program is rejected by verifier,
16282 * the map will be released by release_maps() or it
16283 * will be used by the valid program until it's unloaded
ab7f5bf0 16284 * and all maps are released in free_used_maps()
0246e64d 16285 */
1e0bd5a0 16286 bpf_map_inc(map);
d8eca5bb
DB
16287
16288 aux->map_index = env->used_map_cnt;
92117d84
AS
16289 env->used_maps[env->used_map_cnt++] = map;
16290
b741f163 16291 if (bpf_map_is_cgroup_storage(map) &&
e4730423 16292 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 16293 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
16294 fdput(f);
16295 return -EBUSY;
16296 }
16297
0246e64d
AS
16298 fdput(f);
16299next_insn:
16300 insn++;
16301 i++;
5e581dad
DB
16302 continue;
16303 }
16304
16305 /* Basic sanity check before we invest more work here. */
16306 if (!bpf_opcode_in_insntable(insn->code)) {
16307 verbose(env, "unknown opcode %02x\n", insn->code);
16308 return -EINVAL;
0246e64d
AS
16309 }
16310 }
16311
16312 /* now all pseudo BPF_LD_IMM64 instructions load valid
16313 * 'struct bpf_map *' into a register instead of user map_fd.
16314 * These pointers will be used later by verifier to validate map access.
16315 */
16316 return 0;
16317}
16318
16319/* drop refcnt of maps used by the rejected program */
58e2af8b 16320static void release_maps(struct bpf_verifier_env *env)
0246e64d 16321{
a2ea0746
DB
16322 __bpf_free_used_maps(env->prog->aux, env->used_maps,
16323 env->used_map_cnt);
0246e64d
AS
16324}
16325
541c3bad
AN
16326/* drop refcnt of maps used by the rejected program */
16327static void release_btfs(struct bpf_verifier_env *env)
16328{
16329 __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
16330 env->used_btf_cnt);
16331}
16332
0246e64d 16333/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 16334static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
16335{
16336 struct bpf_insn *insn = env->prog->insnsi;
16337 int insn_cnt = env->prog->len;
16338 int i;
16339
69c087ba
YS
16340 for (i = 0; i < insn_cnt; i++, insn++) {
16341 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
16342 continue;
16343 if (insn->src_reg == BPF_PSEUDO_FUNC)
16344 continue;
16345 insn->src_reg = 0;
16346 }
0246e64d
AS
16347}
16348
8041902d
AS
16349/* single env->prog->insni[off] instruction was replaced with the range
16350 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
16351 * [0, off) and [off, end) to new locations, so the patched range stays zero
16352 */
75f0fc7b
HF
16353static void adjust_insn_aux_data(struct bpf_verifier_env *env,
16354 struct bpf_insn_aux_data *new_data,
16355 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d 16356{
75f0fc7b 16357 struct bpf_insn_aux_data *old_data = env->insn_aux_data;
b325fbca 16358 struct bpf_insn *insn = new_prog->insnsi;
d203b0fd 16359 u32 old_seen = old_data[off].seen;
b325fbca 16360 u32 prog_len;
c131187d 16361 int i;
8041902d 16362
b325fbca
JW
16363 /* aux info at OFF always needs adjustment, no matter fast path
16364 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
16365 * original insn at old prog.
16366 */
16367 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
16368
8041902d 16369 if (cnt == 1)
75f0fc7b 16370 return;
b325fbca 16371 prog_len = new_prog->len;
75f0fc7b 16372
8041902d
AS
16373 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
16374 memcpy(new_data + off + cnt - 1, old_data + off,
16375 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 16376 for (i = off; i < off + cnt - 1; i++) {
d203b0fd
DB
16377 /* Expand insni[off]'s seen count to the patched range. */
16378 new_data[i].seen = old_seen;
b325fbca
JW
16379 new_data[i].zext_dst = insn_has_def32(env, insn + i);
16380 }
8041902d
AS
16381 env->insn_aux_data = new_data;
16382 vfree(old_data);
8041902d
AS
16383}
16384
cc8b0b92
AS
16385static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
16386{
16387 int i;
16388
16389 if (len == 1)
16390 return;
4cb3d99c
JW
16391 /* NOTE: fake 'exit' subprog should be updated as well. */
16392 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 16393 if (env->subprog_info[i].start <= off)
cc8b0b92 16394 continue;
9c8105bd 16395 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
16396 }
16397}
16398
7506d211 16399static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
a748c697
MF
16400{
16401 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
16402 int i, sz = prog->aux->size_poke_tab;
16403 struct bpf_jit_poke_descriptor *desc;
16404
16405 for (i = 0; i < sz; i++) {
16406 desc = &tab[i];
7506d211
JF
16407 if (desc->insn_idx <= off)
16408 continue;
a748c697
MF
16409 desc->insn_idx += len - 1;
16410 }
16411}
16412
8041902d
AS
16413static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
16414 const struct bpf_insn *patch, u32 len)
16415{
16416 struct bpf_prog *new_prog;
75f0fc7b
HF
16417 struct bpf_insn_aux_data *new_data = NULL;
16418
16419 if (len > 1) {
16420 new_data = vzalloc(array_size(env->prog->len + len - 1,
16421 sizeof(struct bpf_insn_aux_data)));
16422 if (!new_data)
16423 return NULL;
16424 }
8041902d
AS
16425
16426 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
16427 if (IS_ERR(new_prog)) {
16428 if (PTR_ERR(new_prog) == -ERANGE)
16429 verbose(env,
16430 "insn %d cannot be patched due to 16-bit range\n",
16431 env->insn_aux_data[off].orig_idx);
75f0fc7b 16432 vfree(new_data);
8041902d 16433 return NULL;
4f73379e 16434 }
75f0fc7b 16435 adjust_insn_aux_data(env, new_data, new_prog, off, len);
cc8b0b92 16436 adjust_subprog_starts(env, off, len);
7506d211 16437 adjust_poke_descs(new_prog, off, len);
8041902d
AS
16438 return new_prog;
16439}
16440
52875a04
JK
16441static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
16442 u32 off, u32 cnt)
16443{
16444 int i, j;
16445
16446 /* find first prog starting at or after off (first to remove) */
16447 for (i = 0; i < env->subprog_cnt; i++)
16448 if (env->subprog_info[i].start >= off)
16449 break;
16450 /* find first prog starting at or after off + cnt (first to stay) */
16451 for (j = i; j < env->subprog_cnt; j++)
16452 if (env->subprog_info[j].start >= off + cnt)
16453 break;
16454 /* if j doesn't start exactly at off + cnt, we are just removing
16455 * the front of previous prog
16456 */
16457 if (env->subprog_info[j].start != off + cnt)
16458 j--;
16459
16460 if (j > i) {
16461 struct bpf_prog_aux *aux = env->prog->aux;
16462 int move;
16463
16464 /* move fake 'exit' subprog as well */
16465 move = env->subprog_cnt + 1 - j;
16466
16467 memmove(env->subprog_info + i,
16468 env->subprog_info + j,
16469 sizeof(*env->subprog_info) * move);
16470 env->subprog_cnt -= j - i;
16471
16472 /* remove func_info */
16473 if (aux->func_info) {
16474 move = aux->func_info_cnt - j;
16475
16476 memmove(aux->func_info + i,
16477 aux->func_info + j,
16478 sizeof(*aux->func_info) * move);
16479 aux->func_info_cnt -= j - i;
16480 /* func_info->insn_off is set after all code rewrites,
16481 * in adjust_btf_func() - no need to adjust
16482 */
16483 }
16484 } else {
16485 /* convert i from "first prog to remove" to "first to adjust" */
16486 if (env->subprog_info[i].start == off)
16487 i++;
16488 }
16489
16490 /* update fake 'exit' subprog as well */
16491 for (; i <= env->subprog_cnt; i++)
16492 env->subprog_info[i].start -= cnt;
16493
16494 return 0;
16495}
16496
16497static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
16498 u32 cnt)
16499{
16500 struct bpf_prog *prog = env->prog;
16501 u32 i, l_off, l_cnt, nr_linfo;
16502 struct bpf_line_info *linfo;
16503
16504 nr_linfo = prog->aux->nr_linfo;
16505 if (!nr_linfo)
16506 return 0;
16507
16508 linfo = prog->aux->linfo;
16509
16510 /* find first line info to remove, count lines to be removed */
16511 for (i = 0; i < nr_linfo; i++)
16512 if (linfo[i].insn_off >= off)
16513 break;
16514
16515 l_off = i;
16516 l_cnt = 0;
16517 for (; i < nr_linfo; i++)
16518 if (linfo[i].insn_off < off + cnt)
16519 l_cnt++;
16520 else
16521 break;
16522
16523 /* First live insn doesn't match first live linfo, it needs to "inherit"
16524 * last removed linfo. prog is already modified, so prog->len == off
16525 * means no live instructions after (tail of the program was removed).
16526 */
16527 if (prog->len != off && l_cnt &&
16528 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
16529 l_cnt--;
16530 linfo[--i].insn_off = off + cnt;
16531 }
16532
16533 /* remove the line info which refer to the removed instructions */
16534 if (l_cnt) {
16535 memmove(linfo + l_off, linfo + i,
16536 sizeof(*linfo) * (nr_linfo - i));
16537
16538 prog->aux->nr_linfo -= l_cnt;
16539 nr_linfo = prog->aux->nr_linfo;
16540 }
16541
16542 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
16543 for (i = l_off; i < nr_linfo; i++)
16544 linfo[i].insn_off -= cnt;
16545
16546 /* fix up all subprogs (incl. 'exit') which start >= off */
16547 for (i = 0; i <= env->subprog_cnt; i++)
16548 if (env->subprog_info[i].linfo_idx > l_off) {
16549 /* program may have started in the removed region but
16550 * may not be fully removed
16551 */
16552 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
16553 env->subprog_info[i].linfo_idx -= l_cnt;
16554 else
16555 env->subprog_info[i].linfo_idx = l_off;
16556 }
16557
16558 return 0;
16559}
16560
16561static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
16562{
16563 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16564 unsigned int orig_prog_len = env->prog->len;
16565 int err;
16566
9d03ebc7 16567 if (bpf_prog_is_offloaded(env->prog->aux))
08ca90af
JK
16568 bpf_prog_offload_remove_insns(env, off, cnt);
16569
52875a04
JK
16570 err = bpf_remove_insns(env->prog, off, cnt);
16571 if (err)
16572 return err;
16573
16574 err = adjust_subprog_starts_after_remove(env, off, cnt);
16575 if (err)
16576 return err;
16577
16578 err = bpf_adj_linfo_after_remove(env, off, cnt);
16579 if (err)
16580 return err;
16581
16582 memmove(aux_data + off, aux_data + off + cnt,
16583 sizeof(*aux_data) * (orig_prog_len - off - cnt));
16584
16585 return 0;
16586}
16587
2a5418a1
DB
16588/* The verifier does more data flow analysis than llvm and will not
16589 * explore branches that are dead at run time. Malicious programs can
16590 * have dead code too. Therefore replace all dead at-run-time code
16591 * with 'ja -1'.
16592 *
16593 * Just nops are not optimal, e.g. if they would sit at the end of the
16594 * program and through another bug we would manage to jump there, then
16595 * we'd execute beyond program memory otherwise. Returning exception
16596 * code also wouldn't work since we can have subprogs where the dead
16597 * code could be located.
c131187d
AS
16598 */
16599static void sanitize_dead_code(struct bpf_verifier_env *env)
16600{
16601 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 16602 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
16603 struct bpf_insn *insn = env->prog->insnsi;
16604 const int insn_cnt = env->prog->len;
16605 int i;
16606
16607 for (i = 0; i < insn_cnt; i++) {
16608 if (aux_data[i].seen)
16609 continue;
2a5418a1 16610 memcpy(insn + i, &trap, sizeof(trap));
45c709f8 16611 aux_data[i].zext_dst = false;
c131187d
AS
16612 }
16613}
16614
e2ae4ca2
JK
16615static bool insn_is_cond_jump(u8 code)
16616{
16617 u8 op;
16618
092ed096
JW
16619 if (BPF_CLASS(code) == BPF_JMP32)
16620 return true;
16621
e2ae4ca2
JK
16622 if (BPF_CLASS(code) != BPF_JMP)
16623 return false;
16624
16625 op = BPF_OP(code);
16626 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
16627}
16628
16629static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
16630{
16631 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16632 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
16633 struct bpf_insn *insn = env->prog->insnsi;
16634 const int insn_cnt = env->prog->len;
16635 int i;
16636
16637 for (i = 0; i < insn_cnt; i++, insn++) {
16638 if (!insn_is_cond_jump(insn->code))
16639 continue;
16640
16641 if (!aux_data[i + 1].seen)
16642 ja.off = insn->off;
16643 else if (!aux_data[i + 1 + insn->off].seen)
16644 ja.off = 0;
16645 else
16646 continue;
16647
9d03ebc7 16648 if (bpf_prog_is_offloaded(env->prog->aux))
08ca90af
JK
16649 bpf_prog_offload_replace_insn(env, i, &ja);
16650
e2ae4ca2
JK
16651 memcpy(insn, &ja, sizeof(ja));
16652 }
16653}
16654
52875a04
JK
16655static int opt_remove_dead_code(struct bpf_verifier_env *env)
16656{
16657 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
16658 int insn_cnt = env->prog->len;
16659 int i, err;
16660
16661 for (i = 0; i < insn_cnt; i++) {
16662 int j;
16663
16664 j = 0;
16665 while (i + j < insn_cnt && !aux_data[i + j].seen)
16666 j++;
16667 if (!j)
16668 continue;
16669
16670 err = verifier_remove_insns(env, i, j);
16671 if (err)
16672 return err;
16673 insn_cnt = env->prog->len;
16674 }
16675
16676 return 0;
16677}
16678
a1b14abc
JK
16679static int opt_remove_nops(struct bpf_verifier_env *env)
16680{
16681 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
16682 struct bpf_insn *insn = env->prog->insnsi;
16683 int insn_cnt = env->prog->len;
16684 int i, err;
16685
16686 for (i = 0; i < insn_cnt; i++) {
16687 if (memcmp(&insn[i], &ja, sizeof(ja)))
16688 continue;
16689
16690 err = verifier_remove_insns(env, i, 1);
16691 if (err)
16692 return err;
16693 insn_cnt--;
16694 i--;
16695 }
16696
16697 return 0;
16698}
16699
d6c2308c
JW
16700static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
16701 const union bpf_attr *attr)
a4b1d3c1 16702{
d6c2308c 16703 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 16704 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 16705 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 16706 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 16707 struct bpf_prog *new_prog;
d6c2308c 16708 bool rnd_hi32;
a4b1d3c1 16709
d6c2308c 16710 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 16711 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
16712 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
16713 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
16714 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
16715 for (i = 0; i < len; i++) {
16716 int adj_idx = i + delta;
16717 struct bpf_insn insn;
83a28819 16718 int load_reg;
a4b1d3c1 16719
d6c2308c 16720 insn = insns[adj_idx];
83a28819 16721 load_reg = insn_def_regno(&insn);
d6c2308c
JW
16722 if (!aux[adj_idx].zext_dst) {
16723 u8 code, class;
16724 u32 imm_rnd;
16725
16726 if (!rnd_hi32)
16727 continue;
16728
16729 code = insn.code;
16730 class = BPF_CLASS(code);
83a28819 16731 if (load_reg == -1)
d6c2308c
JW
16732 continue;
16733
16734 /* NOTE: arg "reg" (the fourth one) is only used for
83a28819
IL
16735 * BPF_STX + SRC_OP, so it is safe to pass NULL
16736 * here.
d6c2308c 16737 */
83a28819 16738 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
d6c2308c
JW
16739 if (class == BPF_LD &&
16740 BPF_MODE(code) == BPF_IMM)
16741 i++;
16742 continue;
16743 }
16744
16745 /* ctx load could be transformed into wider load. */
16746 if (class == BPF_LDX &&
16747 aux[adj_idx].ptr_type == PTR_TO_CTX)
16748 continue;
16749
a251c17a 16750 imm_rnd = get_random_u32();
d6c2308c
JW
16751 rnd_hi32_patch[0] = insn;
16752 rnd_hi32_patch[1].imm = imm_rnd;
83a28819 16753 rnd_hi32_patch[3].dst_reg = load_reg;
d6c2308c
JW
16754 patch = rnd_hi32_patch;
16755 patch_len = 4;
16756 goto apply_patch_buffer;
16757 }
16758
39491867
BJ
16759 /* Add in an zero-extend instruction if a) the JIT has requested
16760 * it or b) it's a CMPXCHG.
16761 *
16762 * The latter is because: BPF_CMPXCHG always loads a value into
16763 * R0, therefore always zero-extends. However some archs'
16764 * equivalent instruction only does this load when the
16765 * comparison is successful. This detail of CMPXCHG is
16766 * orthogonal to the general zero-extension behaviour of the
16767 * CPU, so it's treated independently of bpf_jit_needs_zext.
16768 */
16769 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
a4b1d3c1
JW
16770 continue;
16771
d35af0a7
BT
16772 /* Zero-extension is done by the caller. */
16773 if (bpf_pseudo_kfunc_call(&insn))
16774 continue;
16775
83a28819
IL
16776 if (WARN_ON(load_reg == -1)) {
16777 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
16778 return -EFAULT;
b2e37a71
IL
16779 }
16780
a4b1d3c1 16781 zext_patch[0] = insn;
b2e37a71
IL
16782 zext_patch[1].dst_reg = load_reg;
16783 zext_patch[1].src_reg = load_reg;
d6c2308c
JW
16784 patch = zext_patch;
16785 patch_len = 2;
16786apply_patch_buffer:
16787 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
16788 if (!new_prog)
16789 return -ENOMEM;
16790 env->prog = new_prog;
16791 insns = new_prog->insnsi;
16792 aux = env->insn_aux_data;
d6c2308c 16793 delta += patch_len - 1;
a4b1d3c1
JW
16794 }
16795
16796 return 0;
16797}
16798
c64b7983
JS
16799/* convert load instructions that access fields of a context type into a
16800 * sequence of instructions that access fields of the underlying structure:
16801 * struct __sk_buff -> struct sk_buff
16802 * struct bpf_sock_ops -> struct sock
9bac3d6d 16803 */
58e2af8b 16804static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 16805{
00176a34 16806 const struct bpf_verifier_ops *ops = env->ops;
f96da094 16807 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 16808 const int insn_cnt = env->prog->len;
36bbef52 16809 struct bpf_insn insn_buf[16], *insn;
46f53a65 16810 u32 target_size, size_default, off;
9bac3d6d 16811 struct bpf_prog *new_prog;
d691f9e8 16812 enum bpf_access_type type;
f96da094 16813 bool is_narrower_load;
9bac3d6d 16814
b09928b9
DB
16815 if (ops->gen_prologue || env->seen_direct_write) {
16816 if (!ops->gen_prologue) {
16817 verbose(env, "bpf verifier is misconfigured\n");
16818 return -EINVAL;
16819 }
36bbef52
DB
16820 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
16821 env->prog);
16822 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 16823 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
16824 return -EINVAL;
16825 } else if (cnt) {
8041902d 16826 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
16827 if (!new_prog)
16828 return -ENOMEM;
8041902d 16829
36bbef52 16830 env->prog = new_prog;
3df126f3 16831 delta += cnt - 1;
36bbef52
DB
16832 }
16833 }
16834
9d03ebc7 16835 if (bpf_prog_is_offloaded(env->prog->aux))
9bac3d6d
AS
16836 return 0;
16837
3df126f3 16838 insn = env->prog->insnsi + delta;
36bbef52 16839
9bac3d6d 16840 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
16841 bpf_convert_ctx_access_t convert_ctx_access;
16842
62c7989b
DB
16843 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
16844 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
16845 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
2039f26f 16846 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
d691f9e8 16847 type = BPF_READ;
2039f26f
DB
16848 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
16849 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
16850 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
16851 insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
16852 insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
16853 insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
16854 insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
16855 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
d691f9e8 16856 type = BPF_WRITE;
2039f26f 16857 } else {
9bac3d6d 16858 continue;
2039f26f 16859 }
9bac3d6d 16860
af86ca4e 16861 if (type == BPF_WRITE &&
2039f26f 16862 env->insn_aux_data[i + delta].sanitize_stack_spill) {
af86ca4e 16863 struct bpf_insn patch[] = {
af86ca4e 16864 *insn,
2039f26f 16865 BPF_ST_NOSPEC(),
af86ca4e
AS
16866 };
16867
16868 cnt = ARRAY_SIZE(patch);
16869 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
16870 if (!new_prog)
16871 return -ENOMEM;
16872
16873 delta += cnt - 1;
16874 env->prog = new_prog;
16875 insn = new_prog->insnsi + i + delta;
16876 continue;
16877 }
16878
6efe152d 16879 switch ((int)env->insn_aux_data[i + delta].ptr_type) {
c64b7983
JS
16880 case PTR_TO_CTX:
16881 if (!ops->convert_ctx_access)
16882 continue;
16883 convert_ctx_access = ops->convert_ctx_access;
16884 break;
16885 case PTR_TO_SOCKET:
46f8bc92 16886 case PTR_TO_SOCK_COMMON:
c64b7983
JS
16887 convert_ctx_access = bpf_sock_convert_ctx_access;
16888 break;
655a51e5
MKL
16889 case PTR_TO_TCP_SOCK:
16890 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
16891 break;
fada7fdc
JL
16892 case PTR_TO_XDP_SOCK:
16893 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
16894 break;
2a02759e 16895 case PTR_TO_BTF_ID:
6efe152d 16896 case PTR_TO_BTF_ID | PTR_UNTRUSTED:
282de143
KKD
16897 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike
16898 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot
16899 * be said once it is marked PTR_UNTRUSTED, hence we must handle
16900 * any faults for loads into such types. BPF_WRITE is disallowed
16901 * for this case.
16902 */
16903 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
27ae7997
MKL
16904 if (type == BPF_READ) {
16905 insn->code = BPF_LDX | BPF_PROBE_MEM |
16906 BPF_SIZE((insn)->code);
16907 env->prog->aux->num_exentries++;
2a02759e 16908 }
2a02759e 16909 continue;
c64b7983 16910 default:
9bac3d6d 16911 continue;
c64b7983 16912 }
9bac3d6d 16913
31fd8581 16914 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 16915 size = BPF_LDST_BYTES(insn);
31fd8581
YS
16916
16917 /* If the read access is a narrower load of the field,
16918 * convert to a 4/8-byte load, to minimum program type specific
16919 * convert_ctx_access changes. If conversion is successful,
16920 * we will apply proper mask to the result.
16921 */
f96da094 16922 is_narrower_load = size < ctx_field_size;
46f53a65
AI
16923 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
16924 off = insn->off;
31fd8581 16925 if (is_narrower_load) {
f96da094
DB
16926 u8 size_code;
16927
16928 if (type == BPF_WRITE) {
61bd5218 16929 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
16930 return -EINVAL;
16931 }
31fd8581 16932
f96da094 16933 size_code = BPF_H;
31fd8581
YS
16934 if (ctx_field_size == 4)
16935 size_code = BPF_W;
16936 else if (ctx_field_size == 8)
16937 size_code = BPF_DW;
f96da094 16938
bc23105c 16939 insn->off = off & ~(size_default - 1);
31fd8581
YS
16940 insn->code = BPF_LDX | BPF_MEM | size_code;
16941 }
f96da094
DB
16942
16943 target_size = 0;
c64b7983
JS
16944 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
16945 &target_size);
f96da094
DB
16946 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
16947 (ctx_field_size && !target_size)) {
61bd5218 16948 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
16949 return -EINVAL;
16950 }
f96da094
DB
16951
16952 if (is_narrower_load && size < target_size) {
d895a0f1
IL
16953 u8 shift = bpf_ctx_narrow_access_offset(
16954 off, size, size_default) * 8;
d7af7e49
AI
16955 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
16956 verbose(env, "bpf verifier narrow ctx load misconfigured\n");
16957 return -EINVAL;
16958 }
46f53a65
AI
16959 if (ctx_field_size <= 4) {
16960 if (shift)
16961 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
16962 insn->dst_reg,
16963 shift);
31fd8581 16964 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 16965 (1 << size * 8) - 1);
46f53a65
AI
16966 } else {
16967 if (shift)
16968 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
16969 insn->dst_reg,
16970 shift);
31fd8581 16971 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 16972 (1ULL << size * 8) - 1);
46f53a65 16973 }
31fd8581 16974 }
9bac3d6d 16975
8041902d 16976 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
16977 if (!new_prog)
16978 return -ENOMEM;
16979
3df126f3 16980 delta += cnt - 1;
9bac3d6d
AS
16981
16982 /* keep walking new program and skip insns we just inserted */
16983 env->prog = new_prog;
3df126f3 16984 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
16985 }
16986
16987 return 0;
16988}
16989
1c2a088a
AS
16990static int jit_subprogs(struct bpf_verifier_env *env)
16991{
16992 struct bpf_prog *prog = env->prog, **func, *tmp;
16993 int i, j, subprog_start, subprog_end = 0, len, subprog;
a748c697 16994 struct bpf_map *map_ptr;
7105e828 16995 struct bpf_insn *insn;
1c2a088a 16996 void *old_bpf_func;
c4c0bdc0 16997 int err, num_exentries;
1c2a088a 16998
f910cefa 16999 if (env->subprog_cnt <= 1)
1c2a088a
AS
17000 return 0;
17001
7105e828 17002 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
3990ed4c 17003 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
69c087ba 17004 continue;
69c087ba 17005
c7a89784
DB
17006 /* Upon error here we cannot fall back to interpreter but
17007 * need a hard reject of the program. Thus -EFAULT is
17008 * propagated in any case.
17009 */
1c2a088a
AS
17010 subprog = find_subprog(env, i + insn->imm + 1);
17011 if (subprog < 0) {
17012 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
17013 i + insn->imm + 1);
17014 return -EFAULT;
17015 }
17016 /* temporarily remember subprog id inside insn instead of
17017 * aux_data, since next loop will split up all insns into funcs
17018 */
f910cefa 17019 insn->off = subprog;
1c2a088a
AS
17020 /* remember original imm in case JIT fails and fallback
17021 * to interpreter will be needed
17022 */
17023 env->insn_aux_data[i].call_imm = insn->imm;
17024 /* point imm to __bpf_call_base+1 from JITs point of view */
17025 insn->imm = 1;
3990ed4c
MKL
17026 if (bpf_pseudo_func(insn))
17027 /* jit (e.g. x86_64) may emit fewer instructions
17028 * if it learns a u32 imm is the same as a u64 imm.
17029 * Force a non zero here.
17030 */
17031 insn[1].imm = 1;
1c2a088a
AS
17032 }
17033
c454a46b
MKL
17034 err = bpf_prog_alloc_jited_linfo(prog);
17035 if (err)
17036 goto out_undo_insn;
17037
17038 err = -ENOMEM;
6396bb22 17039 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 17040 if (!func)
c7a89784 17041 goto out_undo_insn;
1c2a088a 17042
f910cefa 17043 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 17044 subprog_start = subprog_end;
4cb3d99c 17045 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
17046
17047 len = subprog_end - subprog_start;
fb7dd8bc 17048 /* bpf_prog_run() doesn't call subprogs directly,
492ecee8
AS
17049 * hence main prog stats include the runtime of subprogs.
17050 * subprogs don't have IDs and not reachable via prog_get_next_id
700d4796 17051 * func[i]->stats will never be accessed and stays NULL
492ecee8
AS
17052 */
17053 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
17054 if (!func[i])
17055 goto out_free;
17056 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
17057 len * sizeof(struct bpf_insn));
4f74d809 17058 func[i]->type = prog->type;
1c2a088a 17059 func[i]->len = len;
4f74d809
DB
17060 if (bpf_prog_calc_tag(func[i]))
17061 goto out_free;
1c2a088a 17062 func[i]->is_func = 1;
ba64e7d8 17063 func[i]->aux->func_idx = i;
f263a814 17064 /* Below members will be freed only at prog->aux */
ba64e7d8
YS
17065 func[i]->aux->btf = prog->aux->btf;
17066 func[i]->aux->func_info = prog->aux->func_info;
9c7c48d6 17067 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
f263a814
JF
17068 func[i]->aux->poke_tab = prog->aux->poke_tab;
17069 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
ba64e7d8 17070
a748c697 17071 for (j = 0; j < prog->aux->size_poke_tab; j++) {
f263a814 17072 struct bpf_jit_poke_descriptor *poke;
a748c697 17073
f263a814
JF
17074 poke = &prog->aux->poke_tab[j];
17075 if (poke->insn_idx < subprog_end &&
17076 poke->insn_idx >= subprog_start)
17077 poke->aux = func[i]->aux;
a748c697
MF
17078 }
17079
1c2a088a 17080 func[i]->aux->name[0] = 'F';
9c8105bd 17081 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 17082 func[i]->jit_requested = 1;
d2a3b7c5 17083 func[i]->blinding_requested = prog->blinding_requested;
e6ac2450 17084 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
2357672c 17085 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
c454a46b
MKL
17086 func[i]->aux->linfo = prog->aux->linfo;
17087 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
17088 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
17089 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
c4c0bdc0
YS
17090 num_exentries = 0;
17091 insn = func[i]->insnsi;
17092 for (j = 0; j < func[i]->len; j++, insn++) {
17093 if (BPF_CLASS(insn->code) == BPF_LDX &&
17094 BPF_MODE(insn->code) == BPF_PROBE_MEM)
17095 num_exentries++;
17096 }
17097 func[i]->aux->num_exentries = num_exentries;
ebf7d1f5 17098 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
1c2a088a
AS
17099 func[i] = bpf_int_jit_compile(func[i]);
17100 if (!func[i]->jited) {
17101 err = -ENOTSUPP;
17102 goto out_free;
17103 }
17104 cond_resched();
17105 }
a748c697 17106
1c2a088a
AS
17107 /* at this point all bpf functions were successfully JITed
17108 * now populate all bpf_calls with correct addresses and
17109 * run last pass of JIT
17110 */
f910cefa 17111 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
17112 insn = func[i]->insnsi;
17113 for (j = 0; j < func[i]->len; j++, insn++) {
69c087ba 17114 if (bpf_pseudo_func(insn)) {
3990ed4c 17115 subprog = insn->off;
69c087ba
YS
17116 insn[0].imm = (u32)(long)func[subprog]->bpf_func;
17117 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
17118 continue;
17119 }
23a2d70c 17120 if (!bpf_pseudo_call(insn))
1c2a088a
AS
17121 continue;
17122 subprog = insn->off;
3d717fad 17123 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
1c2a088a 17124 }
2162fed4
SD
17125
17126 /* we use the aux data to keep a list of the start addresses
17127 * of the JITed images for each function in the program
17128 *
17129 * for some architectures, such as powerpc64, the imm field
17130 * might not be large enough to hold the offset of the start
17131 * address of the callee's JITed image from __bpf_call_base
17132 *
17133 * in such cases, we can lookup the start address of a callee
17134 * by using its subprog id, available from the off field of
17135 * the call instruction, as an index for this list
17136 */
17137 func[i]->aux->func = func;
17138 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 17139 }
f910cefa 17140 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
17141 old_bpf_func = func[i]->bpf_func;
17142 tmp = bpf_int_jit_compile(func[i]);
17143 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
17144 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 17145 err = -ENOTSUPP;
1c2a088a
AS
17146 goto out_free;
17147 }
17148 cond_resched();
17149 }
17150
17151 /* finally lock prog and jit images for all functions and
17152 * populate kallsysm
17153 */
f910cefa 17154 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
17155 bpf_prog_lock_ro(func[i]);
17156 bpf_prog_kallsyms_add(func[i]);
17157 }
7105e828
DB
17158
17159 /* Last step: make now unused interpreter insns from main
17160 * prog consistent for later dump requests, so they can
17161 * later look the same as if they were interpreted only.
17162 */
17163 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
69c087ba
YS
17164 if (bpf_pseudo_func(insn)) {
17165 insn[0].imm = env->insn_aux_data[i].call_imm;
3990ed4c
MKL
17166 insn[1].imm = insn->off;
17167 insn->off = 0;
69c087ba
YS
17168 continue;
17169 }
23a2d70c 17170 if (!bpf_pseudo_call(insn))
7105e828
DB
17171 continue;
17172 insn->off = env->insn_aux_data[i].call_imm;
17173 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 17174 insn->imm = subprog;
7105e828
DB
17175 }
17176
1c2a088a
AS
17177 prog->jited = 1;
17178 prog->bpf_func = func[0]->bpf_func;
d00c6473 17179 prog->jited_len = func[0]->jited_len;
1c2a088a 17180 prog->aux->func = func;
f910cefa 17181 prog->aux->func_cnt = env->subprog_cnt;
e16301fb 17182 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
17183 return 0;
17184out_free:
f263a814
JF
17185 /* We failed JIT'ing, so at this point we need to unregister poke
17186 * descriptors from subprogs, so that kernel is not attempting to
17187 * patch it anymore as we're freeing the subprog JIT memory.
17188 */
17189 for (i = 0; i < prog->aux->size_poke_tab; i++) {
17190 map_ptr = prog->aux->poke_tab[i].tail_call.map;
17191 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
17192 }
17193 /* At this point we're guaranteed that poke descriptors are not
17194 * live anymore. We can just unlink its descriptor table as it's
17195 * released with the main prog.
17196 */
a748c697
MF
17197 for (i = 0; i < env->subprog_cnt; i++) {
17198 if (!func[i])
17199 continue;
f263a814 17200 func[i]->aux->poke_tab = NULL;
a748c697
MF
17201 bpf_jit_free(func[i]);
17202 }
1c2a088a 17203 kfree(func);
c7a89784 17204out_undo_insn:
1c2a088a
AS
17205 /* cleanup main prog to be interpreted */
17206 prog->jit_requested = 0;
d2a3b7c5 17207 prog->blinding_requested = 0;
1c2a088a 17208 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
23a2d70c 17209 if (!bpf_pseudo_call(insn))
1c2a088a
AS
17210 continue;
17211 insn->off = 0;
17212 insn->imm = env->insn_aux_data[i].call_imm;
17213 }
e16301fb 17214 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
17215 return err;
17216}
17217
1ea47e01
AS
17218static int fixup_call_args(struct bpf_verifier_env *env)
17219{
19d28fbd 17220#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
17221 struct bpf_prog *prog = env->prog;
17222 struct bpf_insn *insn = prog->insnsi;
e6ac2450 17223 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
1ea47e01 17224 int i, depth;
19d28fbd 17225#endif
e4052d06 17226 int err = 0;
1ea47e01 17227
e4052d06 17228 if (env->prog->jit_requested &&
9d03ebc7 17229 !bpf_prog_is_offloaded(env->prog->aux)) {
19d28fbd
DM
17230 err = jit_subprogs(env);
17231 if (err == 0)
1c2a088a 17232 return 0;
c7a89784
DB
17233 if (err == -EFAULT)
17234 return err;
19d28fbd
DM
17235 }
17236#ifndef CONFIG_BPF_JIT_ALWAYS_ON
e6ac2450
MKL
17237 if (has_kfunc_call) {
17238 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
17239 return -EINVAL;
17240 }
e411901c
MF
17241 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
17242 /* When JIT fails the progs with bpf2bpf calls and tail_calls
17243 * have to be rejected, since interpreter doesn't support them yet.
17244 */
17245 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
17246 return -EINVAL;
17247 }
1ea47e01 17248 for (i = 0; i < prog->len; i++, insn++) {
69c087ba
YS
17249 if (bpf_pseudo_func(insn)) {
17250 /* When JIT fails the progs with callback calls
17251 * have to be rejected, since interpreter doesn't support them yet.
17252 */
17253 verbose(env, "callbacks are not allowed in non-JITed programs\n");
17254 return -EINVAL;
17255 }
17256
23a2d70c 17257 if (!bpf_pseudo_call(insn))
1ea47e01
AS
17258 continue;
17259 depth = get_callee_stack_depth(env, insn, i);
17260 if (depth < 0)
17261 return depth;
17262 bpf_patch_call_args(insn, depth);
17263 }
19d28fbd
DM
17264 err = 0;
17265#endif
17266 return err;
1ea47e01
AS
17267}
17268
958cf2e2
KKD
17269static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
17270 struct bpf_insn *insn_buf, int insn_idx, int *cnt)
e6ac2450
MKL
17271{
17272 const struct bpf_kfunc_desc *desc;
3d76a4d3 17273 void *xdp_kfunc;
e6ac2450 17274
a5d82727
KKD
17275 if (!insn->imm) {
17276 verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
17277 return -EINVAL;
17278 }
17279
3d76a4d3
SF
17280 *cnt = 0;
17281
17282 if (bpf_dev_bound_kfunc_id(insn->imm)) {
17283 xdp_kfunc = bpf_dev_bound_resolve_kfunc(env->prog, insn->imm);
17284 if (xdp_kfunc) {
17285 insn->imm = BPF_CALL_IMM(xdp_kfunc);
17286 return 0;
17287 }
17288
17289 /* fallback to default kfunc when not supported by netdev */
17290 }
17291
e6ac2450 17292 /* insn->imm has the btf func_id. Replace it with
c2cc0ce7 17293 * an address (relative to __bpf_call_base).
e6ac2450 17294 */
2357672c 17295 desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
e6ac2450
MKL
17296 if (!desc) {
17297 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
17298 insn->imm);
17299 return -EFAULT;
17300 }
17301
17302 insn->imm = desc->imm;
958cf2e2
KKD
17303 if (insn->off)
17304 return 0;
17305 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
17306 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
17307 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
17308 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size;
e6ac2450 17309
958cf2e2
KKD
17310 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size);
17311 insn_buf[1] = addr[0];
17312 insn_buf[2] = addr[1];
17313 insn_buf[3] = *insn;
17314 *cnt = 4;
ac9f0605
KKD
17315 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) {
17316 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta;
17317 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) };
17318
17319 insn_buf[0] = addr[0];
17320 insn_buf[1] = addr[1];
17321 insn_buf[2] = *insn;
17322 *cnt = 3;
a35b9af4
YS
17323 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
17324 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
fd264ca0
YS
17325 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
17326 *cnt = 1;
b5964b96
JK
17327 } else if (desc->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) {
17328 bool seen_direct_write = env->seen_direct_write;
17329 bool is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE);
17330
17331 if (is_rdonly)
17332 insn->imm = BPF_CALL_IMM(bpf_dynptr_from_skb_rdonly);
17333
17334 /* restore env->seen_direct_write to its original value, since
17335 * may_access_direct_pkt_data mutates it
17336 */
17337 env->seen_direct_write = seen_direct_write;
958cf2e2 17338 }
e6ac2450
MKL
17339 return 0;
17340}
17341
e6ac5933
BJ
17342/* Do various post-verification rewrites in a single program pass.
17343 * These rewrites simplify JIT and interpreter implementations.
e245c5c6 17344 */
e6ac5933 17345static int do_misc_fixups(struct bpf_verifier_env *env)
e245c5c6 17346{
79741b3b 17347 struct bpf_prog *prog = env->prog;
f92c1e18 17348 enum bpf_attach_type eatype = prog->expected_attach_type;
9b99edca 17349 enum bpf_prog_type prog_type = resolve_prog_type(prog);
79741b3b 17350 struct bpf_insn *insn = prog->insnsi;
e245c5c6 17351 const struct bpf_func_proto *fn;
79741b3b 17352 const int insn_cnt = prog->len;
09772d92 17353 const struct bpf_map_ops *ops;
c93552c4 17354 struct bpf_insn_aux_data *aux;
81ed18ab
AS
17355 struct bpf_insn insn_buf[16];
17356 struct bpf_prog *new_prog;
17357 struct bpf_map *map_ptr;
d2e4c1e6 17358 int i, ret, cnt, delta = 0;
e245c5c6 17359
79741b3b 17360 for (i = 0; i < insn_cnt; i++, insn++) {
e6ac5933 17361 /* Make divide-by-zero exceptions impossible. */
f6b1b3bf
DB
17362 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
17363 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
17364 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 17365 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf 17366 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
e88b2c6e
DB
17367 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
17368 struct bpf_insn *patchlet;
17369 struct bpf_insn chk_and_div[] = {
9b00f1b7 17370 /* [R,W]x div 0 -> 0 */
e88b2c6e
DB
17371 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
17372 BPF_JNE | BPF_K, insn->src_reg,
17373 0, 2, 0),
f6b1b3bf
DB
17374 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
17375 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
17376 *insn,
17377 };
e88b2c6e 17378 struct bpf_insn chk_and_mod[] = {
9b00f1b7 17379 /* [R,W]x mod 0 -> [R,W]x */
e88b2c6e
DB
17380 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
17381 BPF_JEQ | BPF_K, insn->src_reg,
9b00f1b7 17382 0, 1 + (is64 ? 0 : 1), 0),
f6b1b3bf 17383 *insn,
9b00f1b7
DB
17384 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
17385 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
f6b1b3bf 17386 };
f6b1b3bf 17387
e88b2c6e
DB
17388 patchlet = isdiv ? chk_and_div : chk_and_mod;
17389 cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
9b00f1b7 17390 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
f6b1b3bf
DB
17391
17392 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
17393 if (!new_prog)
17394 return -ENOMEM;
17395
17396 delta += cnt - 1;
17397 env->prog = prog = new_prog;
17398 insn = new_prog->insnsi + i + delta;
17399 continue;
17400 }
17401
e6ac5933 17402 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
e0cea7ce
DB
17403 if (BPF_CLASS(insn->code) == BPF_LD &&
17404 (BPF_MODE(insn->code) == BPF_ABS ||
17405 BPF_MODE(insn->code) == BPF_IND)) {
17406 cnt = env->ops->gen_ld_abs(insn, insn_buf);
17407 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
17408 verbose(env, "bpf verifier is misconfigured\n");
17409 return -EINVAL;
17410 }
17411
17412 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17413 if (!new_prog)
17414 return -ENOMEM;
17415
17416 delta += cnt - 1;
17417 env->prog = prog = new_prog;
17418 insn = new_prog->insnsi + i + delta;
17419 continue;
17420 }
17421
e6ac5933 17422 /* Rewrite pointer arithmetic to mitigate speculation attacks. */
979d63d5
DB
17423 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
17424 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
17425 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
17426 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
979d63d5 17427 struct bpf_insn *patch = &insn_buf[0];
801c6058 17428 bool issrc, isneg, isimm;
979d63d5
DB
17429 u32 off_reg;
17430
17431 aux = &env->insn_aux_data[i + delta];
3612af78
DB
17432 if (!aux->alu_state ||
17433 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
17434 continue;
17435
17436 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
17437 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
17438 BPF_ALU_SANITIZE_SRC;
801c6058 17439 isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
979d63d5
DB
17440
17441 off_reg = issrc ? insn->src_reg : insn->dst_reg;
801c6058
DB
17442 if (isimm) {
17443 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
17444 } else {
17445 if (isneg)
17446 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
17447 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
17448 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
17449 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
17450 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
17451 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
17452 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
17453 }
b9b34ddb
DB
17454 if (!issrc)
17455 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
17456 insn->src_reg = BPF_REG_AX;
979d63d5
DB
17457 if (isneg)
17458 insn->code = insn->code == code_add ?
17459 code_sub : code_add;
17460 *patch++ = *insn;
801c6058 17461 if (issrc && isneg && !isimm)
979d63d5
DB
17462 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
17463 cnt = patch - insn_buf;
17464
17465 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17466 if (!new_prog)
17467 return -ENOMEM;
17468
17469 delta += cnt - 1;
17470 env->prog = prog = new_prog;
17471 insn = new_prog->insnsi + i + delta;
17472 continue;
17473 }
17474
79741b3b
AS
17475 if (insn->code != (BPF_JMP | BPF_CALL))
17476 continue;
cc8b0b92
AS
17477 if (insn->src_reg == BPF_PSEUDO_CALL)
17478 continue;
e6ac2450 17479 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
958cf2e2 17480 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt);
e6ac2450
MKL
17481 if (ret)
17482 return ret;
958cf2e2
KKD
17483 if (cnt == 0)
17484 continue;
17485
17486 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17487 if (!new_prog)
17488 return -ENOMEM;
17489
17490 delta += cnt - 1;
17491 env->prog = prog = new_prog;
17492 insn = new_prog->insnsi + i + delta;
e6ac2450
MKL
17493 continue;
17494 }
e245c5c6 17495
79741b3b
AS
17496 if (insn->imm == BPF_FUNC_get_route_realm)
17497 prog->dst_needed = 1;
17498 if (insn->imm == BPF_FUNC_get_prandom_u32)
17499 bpf_user_rnd_init_once();
9802d865
JB
17500 if (insn->imm == BPF_FUNC_override_return)
17501 prog->kprobe_override = 1;
79741b3b 17502 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
17503 /* If we tail call into other programs, we
17504 * cannot make any assumptions since they can
17505 * be replaced dynamically during runtime in
17506 * the program array.
17507 */
17508 prog->cb_access = 1;
e411901c
MF
17509 if (!allow_tail_call_in_subprogs(env))
17510 prog->aux->stack_depth = MAX_BPF_STACK;
17511 prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 17512
79741b3b 17513 /* mark bpf_tail_call as different opcode to avoid
8fb33b60 17514 * conditional branch in the interpreter for every normal
79741b3b
AS
17515 * call and to prevent accidental JITing by JIT compiler
17516 * that doesn't support bpf_tail_call yet
e245c5c6 17517 */
79741b3b 17518 insn->imm = 0;
71189fa9 17519 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 17520
c93552c4 17521 aux = &env->insn_aux_data[i + delta];
d2a3b7c5 17522 if (env->bpf_capable && !prog->blinding_requested &&
cc52d914 17523 prog->jit_requested &&
d2e4c1e6
DB
17524 !bpf_map_key_poisoned(aux) &&
17525 !bpf_map_ptr_poisoned(aux) &&
17526 !bpf_map_ptr_unpriv(aux)) {
17527 struct bpf_jit_poke_descriptor desc = {
17528 .reason = BPF_POKE_REASON_TAIL_CALL,
17529 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
17530 .tail_call.key = bpf_map_key_immediate(aux),
a748c697 17531 .insn_idx = i + delta,
d2e4c1e6
DB
17532 };
17533
17534 ret = bpf_jit_add_poke_descriptor(prog, &desc);
17535 if (ret < 0) {
17536 verbose(env, "adding tail call poke descriptor failed\n");
17537 return ret;
17538 }
17539
17540 insn->imm = ret + 1;
17541 continue;
17542 }
17543
c93552c4
DB
17544 if (!bpf_map_ptr_unpriv(aux))
17545 continue;
17546
b2157399
AS
17547 /* instead of changing every JIT dealing with tail_call
17548 * emit two extra insns:
17549 * if (index >= max_entries) goto out;
17550 * index &= array->index_mask;
17551 * to avoid out-of-bounds cpu speculation
17552 */
c93552c4 17553 if (bpf_map_ptr_poisoned(aux)) {
40950343 17554 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
17555 return -EINVAL;
17556 }
c93552c4 17557
d2e4c1e6 17558 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
17559 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
17560 map_ptr->max_entries, 2);
17561 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
17562 container_of(map_ptr,
17563 struct bpf_array,
17564 map)->index_mask);
17565 insn_buf[2] = *insn;
17566 cnt = 3;
17567 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17568 if (!new_prog)
17569 return -ENOMEM;
17570
17571 delta += cnt - 1;
17572 env->prog = prog = new_prog;
17573 insn = new_prog->insnsi + i + delta;
79741b3b
AS
17574 continue;
17575 }
e245c5c6 17576
b00628b1
AS
17577 if (insn->imm == BPF_FUNC_timer_set_callback) {
17578 /* The verifier will process callback_fn as many times as necessary
17579 * with different maps and the register states prepared by
17580 * set_timer_callback_state will be accurate.
17581 *
17582 * The following use case is valid:
17583 * map1 is shared by prog1, prog2, prog3.
17584 * prog1 calls bpf_timer_init for some map1 elements
17585 * prog2 calls bpf_timer_set_callback for some map1 elements.
17586 * Those that were not bpf_timer_init-ed will return -EINVAL.
17587 * prog3 calls bpf_timer_start for some map1 elements.
17588 * Those that were not both bpf_timer_init-ed and
17589 * bpf_timer_set_callback-ed will return -EINVAL.
17590 */
17591 struct bpf_insn ld_addrs[2] = {
17592 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
17593 };
17594
17595 insn_buf[0] = ld_addrs[0];
17596 insn_buf[1] = ld_addrs[1];
17597 insn_buf[2] = *insn;
17598 cnt = 3;
17599
17600 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17601 if (!new_prog)
17602 return -ENOMEM;
17603
17604 delta += cnt - 1;
17605 env->prog = prog = new_prog;
17606 insn = new_prog->insnsi + i + delta;
17607 goto patch_call_imm;
17608 }
17609
9bb00b28
YS
17610 if (is_storage_get_function(insn->imm)) {
17611 if (!env->prog->aux->sleepable ||
17612 env->insn_aux_data[i + delta].storage_get_func_atomic)
d56c9fe6 17613 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
9bb00b28
YS
17614 else
17615 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
b00fa38a
JK
17616 insn_buf[1] = *insn;
17617 cnt = 2;
17618
17619 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17620 if (!new_prog)
17621 return -ENOMEM;
17622
17623 delta += cnt - 1;
17624 env->prog = prog = new_prog;
17625 insn = new_prog->insnsi + i + delta;
17626 goto patch_call_imm;
17627 }
17628
89c63074 17629 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
17630 * and other inlining handlers are currently limited to 64 bit
17631 * only.
89c63074 17632 */
60b58afc 17633 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
17634 (insn->imm == BPF_FUNC_map_lookup_elem ||
17635 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
17636 insn->imm == BPF_FUNC_map_delete_elem ||
17637 insn->imm == BPF_FUNC_map_push_elem ||
17638 insn->imm == BPF_FUNC_map_pop_elem ||
e6a4750f 17639 insn->imm == BPF_FUNC_map_peek_elem ||
0640c77c 17640 insn->imm == BPF_FUNC_redirect_map ||
07343110
FZ
17641 insn->imm == BPF_FUNC_for_each_map_elem ||
17642 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
c93552c4
DB
17643 aux = &env->insn_aux_data[i + delta];
17644 if (bpf_map_ptr_poisoned(aux))
17645 goto patch_call_imm;
17646
d2e4c1e6 17647 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
17648 ops = map_ptr->ops;
17649 if (insn->imm == BPF_FUNC_map_lookup_elem &&
17650 ops->map_gen_lookup) {
17651 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
4a8f87e6
DB
17652 if (cnt == -EOPNOTSUPP)
17653 goto patch_map_ops_generic;
17654 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
09772d92
DB
17655 verbose(env, "bpf verifier is misconfigured\n");
17656 return -EINVAL;
17657 }
81ed18ab 17658
09772d92
DB
17659 new_prog = bpf_patch_insn_data(env, i + delta,
17660 insn_buf, cnt);
17661 if (!new_prog)
17662 return -ENOMEM;
81ed18ab 17663
09772d92
DB
17664 delta += cnt - 1;
17665 env->prog = prog = new_prog;
17666 insn = new_prog->insnsi + i + delta;
17667 continue;
17668 }
81ed18ab 17669
09772d92
DB
17670 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
17671 (void *(*)(struct bpf_map *map, void *key))NULL));
17672 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
17673 (int (*)(struct bpf_map *map, void *key))NULL));
17674 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
17675 (int (*)(struct bpf_map *map, void *key, void *value,
17676 u64 flags))NULL));
84430d42
DB
17677 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
17678 (int (*)(struct bpf_map *map, void *value,
17679 u64 flags))NULL));
17680 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
17681 (int (*)(struct bpf_map *map, void *value))NULL));
17682 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
17683 (int (*)(struct bpf_map *map, void *value))NULL));
e6a4750f 17684 BUILD_BUG_ON(!__same_type(ops->map_redirect,
32637e33 17685 (int (*)(struct bpf_map *map, u64 index, u64 flags))NULL));
0640c77c
AI
17686 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
17687 (int (*)(struct bpf_map *map,
17688 bpf_callback_t callback_fn,
17689 void *callback_ctx,
17690 u64 flags))NULL));
07343110
FZ
17691 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
17692 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
e6a4750f 17693
4a8f87e6 17694patch_map_ops_generic:
09772d92
DB
17695 switch (insn->imm) {
17696 case BPF_FUNC_map_lookup_elem:
3d717fad 17697 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
09772d92
DB
17698 continue;
17699 case BPF_FUNC_map_update_elem:
3d717fad 17700 insn->imm = BPF_CALL_IMM(ops->map_update_elem);
09772d92
DB
17701 continue;
17702 case BPF_FUNC_map_delete_elem:
3d717fad 17703 insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
09772d92 17704 continue;
84430d42 17705 case BPF_FUNC_map_push_elem:
3d717fad 17706 insn->imm = BPF_CALL_IMM(ops->map_push_elem);
84430d42
DB
17707 continue;
17708 case BPF_FUNC_map_pop_elem:
3d717fad 17709 insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
84430d42
DB
17710 continue;
17711 case BPF_FUNC_map_peek_elem:
3d717fad 17712 insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
84430d42 17713 continue;
e6a4750f 17714 case BPF_FUNC_redirect_map:
3d717fad 17715 insn->imm = BPF_CALL_IMM(ops->map_redirect);
e6a4750f 17716 continue;
0640c77c
AI
17717 case BPF_FUNC_for_each_map_elem:
17718 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
e6a4750f 17719 continue;
07343110
FZ
17720 case BPF_FUNC_map_lookup_percpu_elem:
17721 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
17722 continue;
09772d92 17723 }
81ed18ab 17724
09772d92 17725 goto patch_call_imm;
81ed18ab
AS
17726 }
17727
e6ac5933 17728 /* Implement bpf_jiffies64 inline. */
5576b991
MKL
17729 if (prog->jit_requested && BITS_PER_LONG == 64 &&
17730 insn->imm == BPF_FUNC_jiffies64) {
17731 struct bpf_insn ld_jiffies_addr[2] = {
17732 BPF_LD_IMM64(BPF_REG_0,
17733 (unsigned long)&jiffies),
17734 };
17735
17736 insn_buf[0] = ld_jiffies_addr[0];
17737 insn_buf[1] = ld_jiffies_addr[1];
17738 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
17739 BPF_REG_0, 0);
17740 cnt = 3;
17741
17742 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
17743 cnt);
17744 if (!new_prog)
17745 return -ENOMEM;
17746
17747 delta += cnt - 1;
17748 env->prog = prog = new_prog;
17749 insn = new_prog->insnsi + i + delta;
17750 continue;
17751 }
17752
f92c1e18
JO
17753 /* Implement bpf_get_func_arg inline. */
17754 if (prog_type == BPF_PROG_TYPE_TRACING &&
17755 insn->imm == BPF_FUNC_get_func_arg) {
17756 /* Load nr_args from ctx - 8 */
17757 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17758 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
17759 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
17760 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
17761 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
17762 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
17763 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
17764 insn_buf[7] = BPF_JMP_A(1);
17765 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
17766 cnt = 9;
17767
17768 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17769 if (!new_prog)
17770 return -ENOMEM;
17771
17772 delta += cnt - 1;
17773 env->prog = prog = new_prog;
17774 insn = new_prog->insnsi + i + delta;
17775 continue;
17776 }
17777
17778 /* Implement bpf_get_func_ret inline. */
17779 if (prog_type == BPF_PROG_TYPE_TRACING &&
17780 insn->imm == BPF_FUNC_get_func_ret) {
17781 if (eatype == BPF_TRACE_FEXIT ||
17782 eatype == BPF_MODIFY_RETURN) {
17783 /* Load nr_args from ctx - 8 */
17784 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17785 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
17786 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
17787 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
17788 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
17789 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
17790 cnt = 6;
17791 } else {
17792 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
17793 cnt = 1;
17794 }
17795
17796 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
17797 if (!new_prog)
17798 return -ENOMEM;
17799
17800 delta += cnt - 1;
17801 env->prog = prog = new_prog;
17802 insn = new_prog->insnsi + i + delta;
17803 continue;
17804 }
17805
17806 /* Implement get_func_arg_cnt inline. */
17807 if (prog_type == BPF_PROG_TYPE_TRACING &&
17808 insn->imm == BPF_FUNC_get_func_arg_cnt) {
17809 /* Load nr_args from ctx - 8 */
17810 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
17811
17812 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
17813 if (!new_prog)
17814 return -ENOMEM;
17815
17816 env->prog = prog = new_prog;
17817 insn = new_prog->insnsi + i + delta;
17818 continue;
17819 }
17820
f705ec76 17821 /* Implement bpf_get_func_ip inline. */
9b99edca
JO
17822 if (prog_type == BPF_PROG_TYPE_TRACING &&
17823 insn->imm == BPF_FUNC_get_func_ip) {
f92c1e18
JO
17824 /* Load IP address from ctx - 16 */
17825 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
9b99edca
JO
17826
17827 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
17828 if (!new_prog)
17829 return -ENOMEM;
17830
17831 env->prog = prog = new_prog;
17832 insn = new_prog->insnsi + i + delta;
17833 continue;
17834 }
17835
81ed18ab 17836patch_call_imm:
5e43f899 17837 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
17838 /* all functions that have prototype and verifier allowed
17839 * programs to call them, must be real in-kernel functions
17840 */
17841 if (!fn->func) {
61bd5218
JK
17842 verbose(env,
17843 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
17844 func_id_name(insn->imm), insn->imm);
17845 return -EFAULT;
e245c5c6 17846 }
79741b3b 17847 insn->imm = fn->func - __bpf_call_base;
e245c5c6 17848 }
e245c5c6 17849
d2e4c1e6
DB
17850 /* Since poke tab is now finalized, publish aux to tracker. */
17851 for (i = 0; i < prog->aux->size_poke_tab; i++) {
17852 map_ptr = prog->aux->poke_tab[i].tail_call.map;
17853 if (!map_ptr->ops->map_poke_track ||
17854 !map_ptr->ops->map_poke_untrack ||
17855 !map_ptr->ops->map_poke_run) {
17856 verbose(env, "bpf verifier is misconfigured\n");
17857 return -EINVAL;
17858 }
17859
17860 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
17861 if (ret < 0) {
17862 verbose(env, "tracking tail call prog failed\n");
17863 return ret;
17864 }
17865 }
17866
e6ac2450
MKL
17867 sort_kfunc_descs_by_imm(env->prog);
17868
79741b3b
AS
17869 return 0;
17870}
e245c5c6 17871
1ade2371
EZ
17872static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
17873 int position,
17874 s32 stack_base,
17875 u32 callback_subprogno,
17876 u32 *cnt)
17877{
17878 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
17879 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
17880 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
17881 int reg_loop_max = BPF_REG_6;
17882 int reg_loop_cnt = BPF_REG_7;
17883 int reg_loop_ctx = BPF_REG_8;
17884
17885 struct bpf_prog *new_prog;
17886 u32 callback_start;
17887 u32 call_insn_offset;
17888 s32 callback_offset;
17889
17890 /* This represents an inlined version of bpf_iter.c:bpf_loop,
17891 * be careful to modify this code in sync.
17892 */
17893 struct bpf_insn insn_buf[] = {
17894 /* Return error and jump to the end of the patch if
17895 * expected number of iterations is too big.
17896 */
17897 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
17898 BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
17899 BPF_JMP_IMM(BPF_JA, 0, 0, 16),
17900 /* spill R6, R7, R8 to use these as loop vars */
17901 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
17902 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
17903 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
17904 /* initialize loop vars */
17905 BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
17906 BPF_MOV32_IMM(reg_loop_cnt, 0),
17907 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
17908 /* loop header,
17909 * if reg_loop_cnt >= reg_loop_max skip the loop body
17910 */
17911 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
17912 /* callback call,
17913 * correct callback offset would be set after patching
17914 */
17915 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
17916 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
17917 BPF_CALL_REL(0),
17918 /* increment loop counter */
17919 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
17920 /* jump to loop header if callback returned 0 */
17921 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
17922 /* return value of bpf_loop,
17923 * set R0 to the number of iterations
17924 */
17925 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
17926 /* restore original values of R6, R7, R8 */
17927 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
17928 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
17929 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
17930 };
17931
17932 *cnt = ARRAY_SIZE(insn_buf);
17933 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
17934 if (!new_prog)
17935 return new_prog;
17936
17937 /* callback start is known only after patching */
17938 callback_start = env->subprog_info[callback_subprogno].start;
17939 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
17940 call_insn_offset = position + 12;
17941 callback_offset = callback_start - call_insn_offset - 1;
fb4e3b33 17942 new_prog->insnsi[call_insn_offset].imm = callback_offset;
1ade2371
EZ
17943
17944 return new_prog;
17945}
17946
17947static bool is_bpf_loop_call(struct bpf_insn *insn)
17948{
17949 return insn->code == (BPF_JMP | BPF_CALL) &&
17950 insn->src_reg == 0 &&
17951 insn->imm == BPF_FUNC_loop;
17952}
17953
17954/* For all sub-programs in the program (including main) check
17955 * insn_aux_data to see if there are bpf_loop calls that require
17956 * inlining. If such calls are found the calls are replaced with a
17957 * sequence of instructions produced by `inline_bpf_loop` function and
17958 * subprog stack_depth is increased by the size of 3 registers.
17959 * This stack space is used to spill values of the R6, R7, R8. These
17960 * registers are used to store the loop bound, counter and context
17961 * variables.
17962 */
17963static int optimize_bpf_loop(struct bpf_verifier_env *env)
17964{
17965 struct bpf_subprog_info *subprogs = env->subprog_info;
17966 int i, cur_subprog = 0, cnt, delta = 0;
17967 struct bpf_insn *insn = env->prog->insnsi;
17968 int insn_cnt = env->prog->len;
17969 u16 stack_depth = subprogs[cur_subprog].stack_depth;
17970 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
17971 u16 stack_depth_extra = 0;
17972
17973 for (i = 0; i < insn_cnt; i++, insn++) {
17974 struct bpf_loop_inline_state *inline_state =
17975 &env->insn_aux_data[i + delta].loop_inline_state;
17976
17977 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
17978 struct bpf_prog *new_prog;
17979
17980 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
17981 new_prog = inline_bpf_loop(env,
17982 i + delta,
17983 -(stack_depth + stack_depth_extra),
17984 inline_state->callback_subprogno,
17985 &cnt);
17986 if (!new_prog)
17987 return -ENOMEM;
17988
17989 delta += cnt - 1;
17990 env->prog = new_prog;
17991 insn = new_prog->insnsi + i + delta;
17992 }
17993
17994 if (subprogs[cur_subprog + 1].start == i + delta + 1) {
17995 subprogs[cur_subprog].stack_depth += stack_depth_extra;
17996 cur_subprog++;
17997 stack_depth = subprogs[cur_subprog].stack_depth;
17998 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
17999 stack_depth_extra = 0;
18000 }
18001 }
18002
18003 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
18004
18005 return 0;
18006}
18007
58e2af8b 18008static void free_states(struct bpf_verifier_env *env)
f1bca824 18009{
58e2af8b 18010 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
18011 int i;
18012
9f4686c4
AS
18013 sl = env->free_list;
18014 while (sl) {
18015 sln = sl->next;
18016 free_verifier_state(&sl->state, false);
18017 kfree(sl);
18018 sl = sln;
18019 }
51c39bb1 18020 env->free_list = NULL;
9f4686c4 18021
f1bca824
AS
18022 if (!env->explored_states)
18023 return;
18024
dc2a4ebc 18025 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
18026 sl = env->explored_states[i];
18027
a8f500af
AS
18028 while (sl) {
18029 sln = sl->next;
18030 free_verifier_state(&sl->state, false);
18031 kfree(sl);
18032 sl = sln;
18033 }
51c39bb1 18034 env->explored_states[i] = NULL;
f1bca824 18035 }
51c39bb1 18036}
f1bca824 18037
51c39bb1
AS
18038static int do_check_common(struct bpf_verifier_env *env, int subprog)
18039{
6f8a57cc 18040 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1
AS
18041 struct bpf_verifier_state *state;
18042 struct bpf_reg_state *regs;
18043 int ret, i;
18044
18045 env->prev_linfo = NULL;
18046 env->pass_cnt++;
18047
18048 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
18049 if (!state)
18050 return -ENOMEM;
18051 state->curframe = 0;
18052 state->speculative = false;
18053 state->branches = 1;
18054 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
18055 if (!state->frame[0]) {
18056 kfree(state);
18057 return -ENOMEM;
18058 }
18059 env->cur_state = state;
18060 init_func_state(env, state->frame[0],
18061 BPF_MAIN_FUNC /* callsite */,
18062 0 /* frameno */,
18063 subprog);
be2ef816
AN
18064 state->first_insn_idx = env->subprog_info[subprog].start;
18065 state->last_insn_idx = -1;
51c39bb1
AS
18066
18067 regs = state->frame[state->curframe]->regs;
be8704ff 18068 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
18069 ret = btf_prepare_func_args(env, subprog, regs);
18070 if (ret)
18071 goto out;
18072 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
18073 if (regs[i].type == PTR_TO_CTX)
18074 mark_reg_known_zero(env, regs, i);
18075 else if (regs[i].type == SCALAR_VALUE)
18076 mark_reg_unknown(env, regs, i);
cf9f2f8d 18077 else if (base_type(regs[i].type) == PTR_TO_MEM) {
e5069b9c
DB
18078 const u32 mem_size = regs[i].mem_size;
18079
18080 mark_reg_known_zero(env, regs, i);
18081 regs[i].mem_size = mem_size;
18082 regs[i].id = ++env->id_gen;
18083 }
51c39bb1
AS
18084 }
18085 } else {
18086 /* 1st arg to a function */
18087 regs[BPF_REG_1].type = PTR_TO_CTX;
18088 mark_reg_known_zero(env, regs, BPF_REG_1);
34747c41 18089 ret = btf_check_subprog_arg_match(env, subprog, regs);
51c39bb1
AS
18090 if (ret == -EFAULT)
18091 /* unlikely verifier bug. abort.
18092 * ret == 0 and ret < 0 are sadly acceptable for
18093 * main() function due to backward compatibility.
18094 * Like socket filter program may be written as:
18095 * int bpf_prog(struct pt_regs *ctx)
18096 * and never dereference that ctx in the program.
18097 * 'struct pt_regs' is a type mismatch for socket
18098 * filter that should be using 'struct __sk_buff'.
18099 */
18100 goto out;
18101 }
18102
18103 ret = do_check(env);
18104out:
f59bbfc2
AS
18105 /* check for NULL is necessary, since cur_state can be freed inside
18106 * do_check() under memory pressure.
18107 */
18108 if (env->cur_state) {
18109 free_verifier_state(env->cur_state, true);
18110 env->cur_state = NULL;
18111 }
6f8a57cc
AN
18112 while (!pop_stack(env, NULL, NULL, false));
18113 if (!ret && pop_log)
18114 bpf_vlog_reset(&env->log, 0);
51c39bb1 18115 free_states(env);
51c39bb1
AS
18116 return ret;
18117}
18118
18119/* Verify all global functions in a BPF program one by one based on their BTF.
18120 * All global functions must pass verification. Otherwise the whole program is rejected.
18121 * Consider:
18122 * int bar(int);
18123 * int foo(int f)
18124 * {
18125 * return bar(f);
18126 * }
18127 * int bar(int b)
18128 * {
18129 * ...
18130 * }
18131 * foo() will be verified first for R1=any_scalar_value. During verification it
18132 * will be assumed that bar() already verified successfully and call to bar()
18133 * from foo() will be checked for type match only. Later bar() will be verified
18134 * independently to check that it's safe for R1=any_scalar_value.
18135 */
18136static int do_check_subprogs(struct bpf_verifier_env *env)
18137{
18138 struct bpf_prog_aux *aux = env->prog->aux;
18139 int i, ret;
18140
18141 if (!aux->func_info)
18142 return 0;
18143
18144 for (i = 1; i < env->subprog_cnt; i++) {
18145 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
18146 continue;
18147 env->insn_idx = env->subprog_info[i].start;
18148 WARN_ON_ONCE(env->insn_idx == 0);
18149 ret = do_check_common(env, i);
18150 if (ret) {
18151 return ret;
18152 } else if (env->log.level & BPF_LOG_LEVEL) {
18153 verbose(env,
18154 "Func#%d is safe for any args that match its prototype\n",
18155 i);
18156 }
18157 }
18158 return 0;
18159}
18160
18161static int do_check_main(struct bpf_verifier_env *env)
18162{
18163 int ret;
18164
18165 env->insn_idx = 0;
18166 ret = do_check_common(env, 0);
18167 if (!ret)
18168 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
18169 return ret;
18170}
18171
18172
06ee7115
AS
18173static void print_verification_stats(struct bpf_verifier_env *env)
18174{
18175 int i;
18176
18177 if (env->log.level & BPF_LOG_STATS) {
18178 verbose(env, "verification time %lld usec\n",
18179 div_u64(env->verification_time, 1000));
18180 verbose(env, "stack depth ");
18181 for (i = 0; i < env->subprog_cnt; i++) {
18182 u32 depth = env->subprog_info[i].stack_depth;
18183
18184 verbose(env, "%d", depth);
18185 if (i + 1 < env->subprog_cnt)
18186 verbose(env, "+");
18187 }
18188 verbose(env, "\n");
18189 }
18190 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
18191 "total_states %d peak_states %d mark_read %d\n",
18192 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
18193 env->max_states_per_insn, env->total_states,
18194 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
18195}
18196
27ae7997
MKL
18197static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
18198{
18199 const struct btf_type *t, *func_proto;
18200 const struct bpf_struct_ops *st_ops;
18201 const struct btf_member *member;
18202 struct bpf_prog *prog = env->prog;
18203 u32 btf_id, member_idx;
18204 const char *mname;
18205
12aa8a94
THJ
18206 if (!prog->gpl_compatible) {
18207 verbose(env, "struct ops programs must have a GPL compatible license\n");
18208 return -EINVAL;
18209 }
18210
27ae7997
MKL
18211 btf_id = prog->aux->attach_btf_id;
18212 st_ops = bpf_struct_ops_find(btf_id);
18213 if (!st_ops) {
18214 verbose(env, "attach_btf_id %u is not a supported struct\n",
18215 btf_id);
18216 return -ENOTSUPP;
18217 }
18218
18219 t = st_ops->type;
18220 member_idx = prog->expected_attach_type;
18221 if (member_idx >= btf_type_vlen(t)) {
18222 verbose(env, "attach to invalid member idx %u of struct %s\n",
18223 member_idx, st_ops->name);
18224 return -EINVAL;
18225 }
18226
18227 member = &btf_type_member(t)[member_idx];
18228 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
18229 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
18230 NULL);
18231 if (!func_proto) {
18232 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
18233 mname, member_idx, st_ops->name);
18234 return -EINVAL;
18235 }
18236
18237 if (st_ops->check_member) {
51a52a29 18238 int err = st_ops->check_member(t, member, prog);
27ae7997
MKL
18239
18240 if (err) {
18241 verbose(env, "attach to unsupported member %s of struct %s\n",
18242 mname, st_ops->name);
18243 return err;
18244 }
18245 }
18246
18247 prog->aux->attach_func_proto = func_proto;
18248 prog->aux->attach_func_name = mname;
18249 env->ops = st_ops->verifier_ops;
18250
18251 return 0;
18252}
6ba43b76
KS
18253#define SECURITY_PREFIX "security_"
18254
f7b12b6f 18255static int check_attach_modify_return(unsigned long addr, const char *func_name)
6ba43b76 18256{
69191754 18257 if (within_error_injection_list(addr) ||
f7b12b6f 18258 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
6ba43b76 18259 return 0;
6ba43b76 18260
6ba43b76
KS
18261 return -EINVAL;
18262}
27ae7997 18263
1e6c62a8
AS
18264/* list of non-sleepable functions that are otherwise on
18265 * ALLOW_ERROR_INJECTION list
18266 */
18267BTF_SET_START(btf_non_sleepable_error_inject)
18268/* Three functions below can be called from sleepable and non-sleepable context.
18269 * Assume non-sleepable from bpf safety point of view.
18270 */
9dd3d069 18271BTF_ID(func, __filemap_add_folio)
1e6c62a8
AS
18272BTF_ID(func, should_fail_alloc_page)
18273BTF_ID(func, should_failslab)
18274BTF_SET_END(btf_non_sleepable_error_inject)
18275
18276static int check_non_sleepable_error_inject(u32 btf_id)
18277{
18278 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
18279}
18280
f7b12b6f
THJ
18281int bpf_check_attach_target(struct bpf_verifier_log *log,
18282 const struct bpf_prog *prog,
18283 const struct bpf_prog *tgt_prog,
18284 u32 btf_id,
18285 struct bpf_attach_target_info *tgt_info)
38207291 18286{
be8704ff 18287 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
f1b9509c 18288 const char prefix[] = "btf_trace_";
5b92a28a 18289 int ret = 0, subprog = -1, i;
38207291 18290 const struct btf_type *t;
5b92a28a 18291 bool conservative = true;
38207291 18292 const char *tname;
5b92a28a 18293 struct btf *btf;
f7b12b6f 18294 long addr = 0;
38207291 18295
f1b9509c 18296 if (!btf_id) {
efc68158 18297 bpf_log(log, "Tracing programs must provide btf_id\n");
f1b9509c
AS
18298 return -EINVAL;
18299 }
22dc4a0f 18300 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
5b92a28a 18301 if (!btf) {
efc68158 18302 bpf_log(log,
5b92a28a
AS
18303 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
18304 return -EINVAL;
18305 }
18306 t = btf_type_by_id(btf, btf_id);
f1b9509c 18307 if (!t) {
efc68158 18308 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
f1b9509c
AS
18309 return -EINVAL;
18310 }
5b92a28a 18311 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c 18312 if (!tname) {
efc68158 18313 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
f1b9509c
AS
18314 return -EINVAL;
18315 }
5b92a28a
AS
18316 if (tgt_prog) {
18317 struct bpf_prog_aux *aux = tgt_prog->aux;
18318
fd7c211d
THJ
18319 if (bpf_prog_is_dev_bound(prog->aux) &&
18320 !bpf_prog_dev_bound_match(prog, tgt_prog)) {
18321 bpf_log(log, "Target program bound device mismatch");
3d76a4d3
SF
18322 return -EINVAL;
18323 }
18324
5b92a28a
AS
18325 for (i = 0; i < aux->func_info_cnt; i++)
18326 if (aux->func_info[i].type_id == btf_id) {
18327 subprog = i;
18328 break;
18329 }
18330 if (subprog == -1) {
efc68158 18331 bpf_log(log, "Subprog %s doesn't exist\n", tname);
5b92a28a
AS
18332 return -EINVAL;
18333 }
18334 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
18335 if (prog_extension) {
18336 if (conservative) {
efc68158 18337 bpf_log(log,
be8704ff
AS
18338 "Cannot replace static functions\n");
18339 return -EINVAL;
18340 }
18341 if (!prog->jit_requested) {
efc68158 18342 bpf_log(log,
be8704ff
AS
18343 "Extension programs should be JITed\n");
18344 return -EINVAL;
18345 }
be8704ff
AS
18346 }
18347 if (!tgt_prog->jited) {
efc68158 18348 bpf_log(log, "Can attach to only JITed progs\n");
be8704ff
AS
18349 return -EINVAL;
18350 }
18351 if (tgt_prog->type == prog->type) {
18352 /* Cannot fentry/fexit another fentry/fexit program.
18353 * Cannot attach program extension to another extension.
18354 * It's ok to attach fentry/fexit to extension program.
18355 */
efc68158 18356 bpf_log(log, "Cannot recursively attach\n");
be8704ff
AS
18357 return -EINVAL;
18358 }
18359 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
18360 prog_extension &&
18361 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
18362 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
18363 /* Program extensions can extend all program types
18364 * except fentry/fexit. The reason is the following.
18365 * The fentry/fexit programs are used for performance
18366 * analysis, stats and can be attached to any program
18367 * type except themselves. When extension program is
18368 * replacing XDP function it is necessary to allow
18369 * performance analysis of all functions. Both original
18370 * XDP program and its program extension. Hence
18371 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
18372 * allowed. If extending of fentry/fexit was allowed it
18373 * would be possible to create long call chain
18374 * fentry->extension->fentry->extension beyond
18375 * reasonable stack size. Hence extending fentry is not
18376 * allowed.
18377 */
efc68158 18378 bpf_log(log, "Cannot extend fentry/fexit\n");
be8704ff
AS
18379 return -EINVAL;
18380 }
5b92a28a 18381 } else {
be8704ff 18382 if (prog_extension) {
efc68158 18383 bpf_log(log, "Cannot replace kernel functions\n");
be8704ff
AS
18384 return -EINVAL;
18385 }
5b92a28a 18386 }
f1b9509c
AS
18387
18388 switch (prog->expected_attach_type) {
18389 case BPF_TRACE_RAW_TP:
5b92a28a 18390 if (tgt_prog) {
efc68158 18391 bpf_log(log,
5b92a28a
AS
18392 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
18393 return -EINVAL;
18394 }
38207291 18395 if (!btf_type_is_typedef(t)) {
efc68158 18396 bpf_log(log, "attach_btf_id %u is not a typedef\n",
38207291
MKL
18397 btf_id);
18398 return -EINVAL;
18399 }
f1b9509c 18400 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
efc68158 18401 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
38207291
MKL
18402 btf_id, tname);
18403 return -EINVAL;
18404 }
18405 tname += sizeof(prefix) - 1;
5b92a28a 18406 t = btf_type_by_id(btf, t->type);
38207291
MKL
18407 if (!btf_type_is_ptr(t))
18408 /* should never happen in valid vmlinux build */
18409 return -EINVAL;
5b92a28a 18410 t = btf_type_by_id(btf, t->type);
38207291
MKL
18411 if (!btf_type_is_func_proto(t))
18412 /* should never happen in valid vmlinux build */
18413 return -EINVAL;
18414
f7b12b6f 18415 break;
15d83c4d
YS
18416 case BPF_TRACE_ITER:
18417 if (!btf_type_is_func(t)) {
efc68158 18418 bpf_log(log, "attach_btf_id %u is not a function\n",
15d83c4d
YS
18419 btf_id);
18420 return -EINVAL;
18421 }
18422 t = btf_type_by_id(btf, t->type);
18423 if (!btf_type_is_func_proto(t))
18424 return -EINVAL;
f7b12b6f
THJ
18425 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
18426 if (ret)
18427 return ret;
18428 break;
be8704ff
AS
18429 default:
18430 if (!prog_extension)
18431 return -EINVAL;
df561f66 18432 fallthrough;
ae240823 18433 case BPF_MODIFY_RETURN:
9e4e01df 18434 case BPF_LSM_MAC:
69fd337a 18435 case BPF_LSM_CGROUP:
fec56f58
AS
18436 case BPF_TRACE_FENTRY:
18437 case BPF_TRACE_FEXIT:
18438 if (!btf_type_is_func(t)) {
efc68158 18439 bpf_log(log, "attach_btf_id %u is not a function\n",
fec56f58
AS
18440 btf_id);
18441 return -EINVAL;
18442 }
be8704ff 18443 if (prog_extension &&
efc68158 18444 btf_check_type_match(log, prog, btf, t))
be8704ff 18445 return -EINVAL;
5b92a28a 18446 t = btf_type_by_id(btf, t->type);
fec56f58
AS
18447 if (!btf_type_is_func_proto(t))
18448 return -EINVAL;
f7b12b6f 18449
4a1e7c0c
THJ
18450 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
18451 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
18452 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
18453 return -EINVAL;
18454
f7b12b6f 18455 if (tgt_prog && conservative)
5b92a28a 18456 t = NULL;
f7b12b6f
THJ
18457
18458 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
fec56f58 18459 if (ret < 0)
f7b12b6f
THJ
18460 return ret;
18461
5b92a28a 18462 if (tgt_prog) {
e9eeec58
YS
18463 if (subprog == 0)
18464 addr = (long) tgt_prog->bpf_func;
18465 else
18466 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
18467 } else {
18468 addr = kallsyms_lookup_name(tname);
18469 if (!addr) {
efc68158 18470 bpf_log(log,
5b92a28a
AS
18471 "The address of function %s cannot be found\n",
18472 tname);
f7b12b6f 18473 return -ENOENT;
5b92a28a 18474 }
fec56f58 18475 }
18644cec 18476
1e6c62a8
AS
18477 if (prog->aux->sleepable) {
18478 ret = -EINVAL;
18479 switch (prog->type) {
18480 case BPF_PROG_TYPE_TRACING:
5b481aca
BT
18481
18482 /* fentry/fexit/fmod_ret progs can be sleepable if they are
1e6c62a8
AS
18483 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
18484 */
18485 if (!check_non_sleepable_error_inject(btf_id) &&
18486 within_error_injection_list(addr))
18487 ret = 0;
5b481aca
BT
18488 /* fentry/fexit/fmod_ret progs can also be sleepable if they are
18489 * in the fmodret id set with the KF_SLEEPABLE flag.
18490 */
18491 else {
18492 u32 *flags = btf_kfunc_is_modify_return(btf, btf_id);
18493
18494 if (flags && (*flags & KF_SLEEPABLE))
18495 ret = 0;
18496 }
1e6c62a8
AS
18497 break;
18498 case BPF_PROG_TYPE_LSM:
18499 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
18500 * Only some of them are sleepable.
18501 */
423f1610 18502 if (bpf_lsm_is_sleepable_hook(btf_id))
1e6c62a8
AS
18503 ret = 0;
18504 break;
18505 default:
18506 break;
18507 }
f7b12b6f
THJ
18508 if (ret) {
18509 bpf_log(log, "%s is not sleepable\n", tname);
18510 return ret;
18511 }
1e6c62a8 18512 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
1af9270e 18513 if (tgt_prog) {
efc68158 18514 bpf_log(log, "can't modify return codes of BPF programs\n");
f7b12b6f
THJ
18515 return -EINVAL;
18516 }
5b481aca
BT
18517 ret = -EINVAL;
18518 if (btf_kfunc_is_modify_return(btf, btf_id) ||
18519 !check_attach_modify_return(addr, tname))
18520 ret = 0;
f7b12b6f
THJ
18521 if (ret) {
18522 bpf_log(log, "%s() is not modifiable\n", tname);
18523 return ret;
1af9270e 18524 }
18644cec 18525 }
f7b12b6f
THJ
18526
18527 break;
18528 }
18529 tgt_info->tgt_addr = addr;
18530 tgt_info->tgt_name = tname;
18531 tgt_info->tgt_type = t;
18532 return 0;
18533}
18534
35e3815f
JO
18535BTF_SET_START(btf_id_deny)
18536BTF_ID_UNUSED
18537#ifdef CONFIG_SMP
18538BTF_ID(func, migrate_disable)
18539BTF_ID(func, migrate_enable)
18540#endif
18541#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
18542BTF_ID(func, rcu_read_unlock_strict)
18543#endif
18544BTF_SET_END(btf_id_deny)
18545
700e6f85
JO
18546static bool can_be_sleepable(struct bpf_prog *prog)
18547{
18548 if (prog->type == BPF_PROG_TYPE_TRACING) {
18549 switch (prog->expected_attach_type) {
18550 case BPF_TRACE_FENTRY:
18551 case BPF_TRACE_FEXIT:
18552 case BPF_MODIFY_RETURN:
18553 case BPF_TRACE_ITER:
18554 return true;
18555 default:
18556 return false;
18557 }
18558 }
18559 return prog->type == BPF_PROG_TYPE_LSM ||
1e12d3ef
DV
18560 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
18561 prog->type == BPF_PROG_TYPE_STRUCT_OPS;
700e6f85
JO
18562}
18563
f7b12b6f
THJ
18564static int check_attach_btf_id(struct bpf_verifier_env *env)
18565{
18566 struct bpf_prog *prog = env->prog;
3aac1ead 18567 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
f7b12b6f
THJ
18568 struct bpf_attach_target_info tgt_info = {};
18569 u32 btf_id = prog->aux->attach_btf_id;
18570 struct bpf_trampoline *tr;
18571 int ret;
18572 u64 key;
18573
79a7f8bd
AS
18574 if (prog->type == BPF_PROG_TYPE_SYSCALL) {
18575 if (prog->aux->sleepable)
18576 /* attach_btf_id checked to be zero already */
18577 return 0;
18578 verbose(env, "Syscall programs can only be sleepable\n");
18579 return -EINVAL;
18580 }
18581
700e6f85 18582 if (prog->aux->sleepable && !can_be_sleepable(prog)) {
1e12d3ef 18583 verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
f7b12b6f
THJ
18584 return -EINVAL;
18585 }
18586
18587 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
18588 return check_struct_ops_btf_id(env);
18589
18590 if (prog->type != BPF_PROG_TYPE_TRACING &&
18591 prog->type != BPF_PROG_TYPE_LSM &&
18592 prog->type != BPF_PROG_TYPE_EXT)
18593 return 0;
18594
18595 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
18596 if (ret)
fec56f58 18597 return ret;
f7b12b6f
THJ
18598
18599 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
3aac1ead
THJ
18600 /* to make freplace equivalent to their targets, they need to
18601 * inherit env->ops and expected_attach_type for the rest of the
18602 * verification
18603 */
f7b12b6f
THJ
18604 env->ops = bpf_verifier_ops[tgt_prog->type];
18605 prog->expected_attach_type = tgt_prog->expected_attach_type;
18606 }
18607
18608 /* store info about the attachment target that will be used later */
18609 prog->aux->attach_func_proto = tgt_info.tgt_type;
18610 prog->aux->attach_func_name = tgt_info.tgt_name;
18611
4a1e7c0c
THJ
18612 if (tgt_prog) {
18613 prog->aux->saved_dst_prog_type = tgt_prog->type;
18614 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
18615 }
18616
f7b12b6f
THJ
18617 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
18618 prog->aux->attach_btf_trace = true;
18619 return 0;
18620 } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
18621 if (!bpf_iter_prog_supported(prog))
18622 return -EINVAL;
18623 return 0;
18624 }
18625
18626 if (prog->type == BPF_PROG_TYPE_LSM) {
18627 ret = bpf_lsm_verify_prog(&env->log, prog);
18628 if (ret < 0)
18629 return ret;
35e3815f
JO
18630 } else if (prog->type == BPF_PROG_TYPE_TRACING &&
18631 btf_id_set_contains(&btf_id_deny, btf_id)) {
18632 return -EINVAL;
38207291 18633 }
f7b12b6f 18634
22dc4a0f 18635 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
f7b12b6f
THJ
18636 tr = bpf_trampoline_get(key, &tgt_info);
18637 if (!tr)
18638 return -ENOMEM;
18639
3aac1ead 18640 prog->aux->dst_trampoline = tr;
f7b12b6f 18641 return 0;
38207291
MKL
18642}
18643
76654e67
AM
18644struct btf *bpf_get_btf_vmlinux(void)
18645{
18646 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
18647 mutex_lock(&bpf_verifier_lock);
18648 if (!btf_vmlinux)
18649 btf_vmlinux = btf_parse_vmlinux();
18650 mutex_unlock(&bpf_verifier_lock);
18651 }
18652 return btf_vmlinux;
18653}
18654
af2ac3e1 18655int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
51580e79 18656{
06ee7115 18657 u64 start_time = ktime_get_ns();
58e2af8b 18658 struct bpf_verifier_env *env;
b9193c1b 18659 struct bpf_verifier_log *log;
9e4c24e7 18660 int i, len, ret = -EINVAL;
e2ae4ca2 18661 bool is_priv;
51580e79 18662
eba0c929
AB
18663 /* no program is valid */
18664 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
18665 return -EINVAL;
18666
58e2af8b 18667 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
18668 * allocate/free it every time bpf_check() is called
18669 */
58e2af8b 18670 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
18671 if (!env)
18672 return -ENOMEM;
61bd5218 18673 log = &env->log;
cbd35700 18674
9e4c24e7 18675 len = (*prog)->len;
fad953ce 18676 env->insn_aux_data =
9e4c24e7 18677 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
18678 ret = -ENOMEM;
18679 if (!env->insn_aux_data)
18680 goto err_free_env;
9e4c24e7
JK
18681 for (i = 0; i < len; i++)
18682 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 18683 env->prog = *prog;
00176a34 18684 env->ops = bpf_verifier_ops[env->prog->type];
387544bf 18685 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
2c78ee89 18686 is_priv = bpf_capable();
0246e64d 18687
76654e67 18688 bpf_get_btf_vmlinux();
8580ac94 18689
cbd35700 18690 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
18691 if (!is_priv)
18692 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
18693
18694 if (attr->log_level || attr->log_buf || attr->log_size) {
18695 /* user requested verbose verifier output
18696 * and supplied buffer to store the verification trace
18697 */
e7bf8249
JK
18698 log->level = attr->log_level;
18699 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
18700 log->len_total = attr->log_size;
cbd35700 18701
e7bf8249 18702 /* log attributes have to be sane */
866de407
HT
18703 if (!bpf_verifier_log_attr_valid(log)) {
18704 ret = -EINVAL;
3df126f3 18705 goto err_unlock;
866de407 18706 }
cbd35700 18707 }
1ad2f583 18708
0f55f9ed
CL
18709 mark_verifier_state_clean(env);
18710
8580ac94
AS
18711 if (IS_ERR(btf_vmlinux)) {
18712 /* Either gcc or pahole or kernel are broken. */
18713 verbose(env, "in-kernel BTF is malformed\n");
18714 ret = PTR_ERR(btf_vmlinux);
38207291 18715 goto skip_full_check;
8580ac94
AS
18716 }
18717
1ad2f583
DB
18718 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
18719 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 18720 env->strict_alignment = true;
e9ee9efc
DM
18721 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
18722 env->strict_alignment = false;
cbd35700 18723
2c78ee89 18724 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
01f810ac 18725 env->allow_uninit_stack = bpf_allow_uninit_stack();
2c78ee89
AS
18726 env->bypass_spec_v1 = bpf_bypass_spec_v1();
18727 env->bypass_spec_v4 = bpf_bypass_spec_v4();
18728 env->bpf_capable = bpf_capable();
e2ae4ca2 18729
10d274e8
AS
18730 if (is_priv)
18731 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
18732
dc2a4ebc 18733 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 18734 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
18735 GFP_USER);
18736 ret = -ENOMEM;
18737 if (!env->explored_states)
18738 goto skip_full_check;
18739
e6ac2450
MKL
18740 ret = add_subprog_and_kfunc(env);
18741 if (ret < 0)
18742 goto skip_full_check;
18743
d9762e84 18744 ret = check_subprogs(env);
475fb78f
AS
18745 if (ret < 0)
18746 goto skip_full_check;
18747
c454a46b 18748 ret = check_btf_info(env, attr, uattr);
838e9690
YS
18749 if (ret < 0)
18750 goto skip_full_check;
18751
be8704ff
AS
18752 ret = check_attach_btf_id(env);
18753 if (ret)
18754 goto skip_full_check;
18755
4976b718
HL
18756 ret = resolve_pseudo_ldimm64(env);
18757 if (ret < 0)
18758 goto skip_full_check;
18759
9d03ebc7 18760 if (bpf_prog_is_offloaded(env->prog->aux)) {
ceb11679
YZ
18761 ret = bpf_prog_offload_verifier_prep(env->prog);
18762 if (ret)
18763 goto skip_full_check;
18764 }
18765
d9762e84
MKL
18766 ret = check_cfg(env);
18767 if (ret < 0)
18768 goto skip_full_check;
18769
51c39bb1
AS
18770 ret = do_check_subprogs(env);
18771 ret = ret ?: do_check_main(env);
cbd35700 18772
9d03ebc7 18773 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
c941ce9c
QM
18774 ret = bpf_prog_offload_finalize(env);
18775
0246e64d 18776skip_full_check:
51c39bb1 18777 kvfree(env->explored_states);
0246e64d 18778
c131187d 18779 if (ret == 0)
9b38c405 18780 ret = check_max_stack_depth(env);
c131187d 18781
9b38c405 18782 /* instruction rewrites happen after this point */
1ade2371
EZ
18783 if (ret == 0)
18784 ret = optimize_bpf_loop(env);
18785
e2ae4ca2
JK
18786 if (is_priv) {
18787 if (ret == 0)
18788 opt_hard_wire_dead_code_branches(env);
52875a04
JK
18789 if (ret == 0)
18790 ret = opt_remove_dead_code(env);
a1b14abc
JK
18791 if (ret == 0)
18792 ret = opt_remove_nops(env);
52875a04
JK
18793 } else {
18794 if (ret == 0)
18795 sanitize_dead_code(env);
e2ae4ca2
JK
18796 }
18797
9bac3d6d
AS
18798 if (ret == 0)
18799 /* program is valid, convert *(u32*)(ctx + off) accesses */
18800 ret = convert_ctx_accesses(env);
18801
e245c5c6 18802 if (ret == 0)
e6ac5933 18803 ret = do_misc_fixups(env);
e245c5c6 18804
a4b1d3c1
JW
18805 /* do 32-bit optimization after insn patching has done so those patched
18806 * insns could be handled correctly.
18807 */
9d03ebc7 18808 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
d6c2308c
JW
18809 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
18810 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
18811 : false;
a4b1d3c1
JW
18812 }
18813
1ea47e01
AS
18814 if (ret == 0)
18815 ret = fixup_call_args(env);
18816
06ee7115
AS
18817 env->verification_time = ktime_get_ns() - start_time;
18818 print_verification_stats(env);
aba64c7d 18819 env->prog->aux->verified_insns = env->insn_processed;
06ee7115 18820
a2a7d570 18821 if (log->level && bpf_verifier_log_full(log))
cbd35700 18822 ret = -ENOSPC;
a2a7d570 18823 if (log->level && !log->ubuf) {
cbd35700 18824 ret = -EFAULT;
a2a7d570 18825 goto err_release_maps;
cbd35700
AS
18826 }
18827
541c3bad
AN
18828 if (ret)
18829 goto err_release_maps;
18830
18831 if (env->used_map_cnt) {
0246e64d 18832 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
18833 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
18834 sizeof(env->used_maps[0]),
18835 GFP_KERNEL);
0246e64d 18836
9bac3d6d 18837 if (!env->prog->aux->used_maps) {
0246e64d 18838 ret = -ENOMEM;
a2a7d570 18839 goto err_release_maps;
0246e64d
AS
18840 }
18841
9bac3d6d 18842 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 18843 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 18844 env->prog->aux->used_map_cnt = env->used_map_cnt;
541c3bad
AN
18845 }
18846 if (env->used_btf_cnt) {
18847 /* if program passed verifier, update used_btfs in bpf_prog_aux */
18848 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
18849 sizeof(env->used_btfs[0]),
18850 GFP_KERNEL);
18851 if (!env->prog->aux->used_btfs) {
18852 ret = -ENOMEM;
18853 goto err_release_maps;
18854 }
0246e64d 18855
541c3bad
AN
18856 memcpy(env->prog->aux->used_btfs, env->used_btfs,
18857 sizeof(env->used_btfs[0]) * env->used_btf_cnt);
18858 env->prog->aux->used_btf_cnt = env->used_btf_cnt;
18859 }
18860 if (env->used_map_cnt || env->used_btf_cnt) {
0246e64d
AS
18861 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
18862 * bpf_ld_imm64 instructions
18863 */
18864 convert_pseudo_ld_imm64(env);
18865 }
cbd35700 18866
541c3bad 18867 adjust_btf_func(env);
ba64e7d8 18868
a2a7d570 18869err_release_maps:
9bac3d6d 18870 if (!env->prog->aux->used_maps)
0246e64d 18871 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 18872 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
18873 */
18874 release_maps(env);
541c3bad
AN
18875 if (!env->prog->aux->used_btfs)
18876 release_btfs(env);
03f87c0b
THJ
18877
18878 /* extension progs temporarily inherit the attach_type of their targets
18879 for verification purposes, so set it back to zero before returning
18880 */
18881 if (env->prog->type == BPF_PROG_TYPE_EXT)
18882 env->prog->expected_attach_type = 0;
18883
9bac3d6d 18884 *prog = env->prog;
3df126f3 18885err_unlock:
45a73c17
AS
18886 if (!is_priv)
18887 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
18888 vfree(env->insn_aux_data);
18889err_free_env:
18890 kfree(env);
51580e79
AS
18891 return ret;
18892}