xsk: Clear page contiguity bit when unmapping pool
[linux-block.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
aef2feda 7#include <linux/bpf-cgroup.h>
51580e79
AS
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/bpf.h>
838e9690 12#include <linux/btf.h>
58e2af8b 13#include <linux/bpf_verifier.h>
51580e79
AS
14#include <linux/filter.h>
15#include <net/netlink.h>
16#include <linux/file.h>
17#include <linux/vmalloc.h>
ebb676da 18#include <linux/stringify.h>
cc8b0b92
AS
19#include <linux/bsearch.h>
20#include <linux/sort.h>
c195651e 21#include <linux/perf_event.h>
d9762e84 22#include <linux/ctype.h>
6ba43b76 23#include <linux/error-injection.h>
9e4e01df 24#include <linux/bpf_lsm.h>
1e6c62a8 25#include <linux/btf_ids.h>
51580e79 26
f4ac7e0b
JK
27#include "disasm.h"
28
00176a34 29static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 30#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
31 [_id] = & _name ## _verifier_ops,
32#define BPF_MAP_TYPE(_id, _ops)
f2e10bff 33#define BPF_LINK_TYPE(_id, _name)
00176a34
JK
34#include <linux/bpf_types.h>
35#undef BPF_PROG_TYPE
36#undef BPF_MAP_TYPE
f2e10bff 37#undef BPF_LINK_TYPE
00176a34
JK
38};
39
51580e79
AS
40/* bpf_check() is a static code analyzer that walks eBPF program
41 * instruction by instruction and updates register/stack state.
42 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
43 *
44 * The first pass is depth-first-search to check that the program is a DAG.
45 * It rejects the following programs:
46 * - larger than BPF_MAXINSNS insns
47 * - if loop is present (detected via back-edge)
48 * - unreachable insns exist (shouldn't be a forest. program = one function)
49 * - out of bounds or malformed jumps
50 * The second pass is all possible path descent from the 1st insn.
8fb33b60 51 * Since it's analyzing all paths through the program, the length of the
eba38a96 52 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
53 * insn is less then 4K, but there are too many branches that change stack/regs.
54 * Number of 'branches to be analyzed' is limited to 1k
55 *
56 * On entry to each instruction, each register has a type, and the instruction
57 * changes the types of the registers depending on instruction semantics.
58 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
59 * copied to R1.
60 *
61 * All registers are 64-bit.
62 * R0 - return register
63 * R1-R5 argument passing registers
64 * R6-R9 callee saved registers
65 * R10 - frame pointer read-only
66 *
67 * At the start of BPF program the register R1 contains a pointer to bpf_context
68 * and has type PTR_TO_CTX.
69 *
70 * Verifier tracks arithmetic operations on pointers in case:
71 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
72 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
73 * 1st insn copies R10 (which has FRAME_PTR) type into R1
74 * and 2nd arithmetic instruction is pattern matched to recognize
75 * that it wants to construct a pointer to some element within stack.
76 * So after 2nd insn, the register R1 has type PTR_TO_STACK
77 * (and -20 constant is saved for further stack bounds checking).
78 * Meaning that this reg is a pointer to stack plus known immediate constant.
79 *
f1174f77 80 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 81 * means the register has some value, but it's not a valid pointer.
f1174f77 82 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
83 *
84 * When verifier sees load or store instructions the type of base register
c64b7983
JS
85 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
86 * four pointer types recognized by check_mem_access() function.
51580e79
AS
87 *
88 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
89 * and the range of [ptr, ptr + map's value_size) is accessible.
90 *
91 * registers used to pass values to function calls are checked against
92 * function argument constraints.
93 *
94 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
95 * It means that the register type passed to this function must be
96 * PTR_TO_STACK and it will be used inside the function as
97 * 'pointer to map element key'
98 *
99 * For example the argument constraints for bpf_map_lookup_elem():
100 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
101 * .arg1_type = ARG_CONST_MAP_PTR,
102 * .arg2_type = ARG_PTR_TO_MAP_KEY,
103 *
104 * ret_type says that this function returns 'pointer to map elem value or null'
105 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
106 * 2nd argument should be a pointer to stack, which will be used inside
107 * the helper function as a pointer to map element key.
108 *
109 * On the kernel side the helper function looks like:
110 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
111 * {
112 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
113 * void *key = (void *) (unsigned long) r2;
114 * void *value;
115 *
116 * here kernel can access 'key' and 'map' pointers safely, knowing that
117 * [key, key + map->key_size) bytes are valid and were initialized on
118 * the stack of eBPF program.
119 * }
120 *
121 * Corresponding eBPF program may look like:
122 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
123 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
124 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
125 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
126 * here verifier looks at prototype of map_lookup_elem() and sees:
127 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
128 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
129 *
130 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
131 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
132 * and were initialized prior to this call.
133 * If it's ok, then verifier allows this BPF_CALL insn and looks at
134 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
135 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
8fb33b60 136 * returns either pointer to map value or NULL.
51580e79
AS
137 *
138 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
139 * insn, the register holding that pointer in the true branch changes state to
140 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
141 * branch. See check_cond_jmp_op().
142 *
143 * After the call R0 is set to return type of the function and registers R1-R5
144 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
145 *
146 * The following reference types represent a potential reference to a kernel
147 * resource which, after first being allocated, must be checked and freed by
148 * the BPF program:
149 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
150 *
151 * When the verifier sees a helper call return a reference type, it allocates a
152 * pointer id for the reference and stores it in the current function state.
153 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
154 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
155 * passes through a NULL-check conditional. For the branch wherein the state is
156 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
157 *
158 * For each helper function that allocates a reference, such as
159 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
160 * bpf_sk_release(). When a reference type passes into the release function,
161 * the verifier also releases the reference. If any unchecked or unreleased
162 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
163 */
164
17a52670 165/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 166struct bpf_verifier_stack_elem {
17a52670
AS
167 /* verifer state is 'st'
168 * before processing instruction 'insn_idx'
169 * and after processing instruction 'prev_insn_idx'
170 */
58e2af8b 171 struct bpf_verifier_state st;
17a52670
AS
172 int insn_idx;
173 int prev_insn_idx;
58e2af8b 174 struct bpf_verifier_stack_elem *next;
6f8a57cc
AN
175 /* length of verifier log at the time this state was pushed on stack */
176 u32 log_pos;
cbd35700
AS
177};
178
b285fcb7 179#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 180#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 181
d2e4c1e6
DB
182#define BPF_MAP_KEY_POISON (1ULL << 63)
183#define BPF_MAP_KEY_SEEN (1ULL << 62)
184
c93552c4
DB
185#define BPF_MAP_PTR_UNPRIV 1UL
186#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
187 POISON_POINTER_DELTA))
188#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
189
bc34dee6
JK
190static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
191static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
192
c93552c4
DB
193static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
194{
d2e4c1e6 195 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
196}
197
198static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
199{
d2e4c1e6 200 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
201}
202
203static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
204 const struct bpf_map *map, bool unpriv)
205{
206 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
207 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
208 aux->map_ptr_state = (unsigned long)map |
209 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
210}
211
212static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
213{
214 return aux->map_key_state & BPF_MAP_KEY_POISON;
215}
216
217static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
218{
219 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
220}
221
222static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
223{
224 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
225}
226
227static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
228{
229 bool poisoned = bpf_map_key_poisoned(aux);
230
231 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
232 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 233}
fad73a1a 234
23a2d70c
YS
235static bool bpf_pseudo_call(const struct bpf_insn *insn)
236{
237 return insn->code == (BPF_JMP | BPF_CALL) &&
238 insn->src_reg == BPF_PSEUDO_CALL;
239}
240
e6ac2450
MKL
241static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn)
242{
243 return insn->code == (BPF_JMP | BPF_CALL) &&
244 insn->src_reg == BPF_PSEUDO_KFUNC_CALL;
245}
246
33ff9823
DB
247struct bpf_call_arg_meta {
248 struct bpf_map *map_ptr;
435faee1 249 bool raw_mode;
36bbef52 250 bool pkt_access;
8f14852e 251 u8 release_regno;
435faee1
DB
252 int regno;
253 int access_size;
457f4436 254 int mem_size;
10060503 255 u64 msize_max_value;
1b986589 256 int ref_obj_id;
3e8ce298 257 int map_uid;
d83525ca 258 int func_id;
22dc4a0f 259 struct btf *btf;
eaa6bcb7 260 u32 btf_id;
22dc4a0f 261 struct btf *ret_btf;
eaa6bcb7 262 u32 ret_btf_id;
69c087ba 263 u32 subprogno;
c0a5a21c 264 struct bpf_map_value_off_desc *kptr_off_desc;
97e03f52 265 u8 uninit_dynptr_regno;
33ff9823
DB
266};
267
8580ac94
AS
268struct btf *btf_vmlinux;
269
cbd35700
AS
270static DEFINE_MUTEX(bpf_verifier_lock);
271
d9762e84
MKL
272static const struct bpf_line_info *
273find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
274{
275 const struct bpf_line_info *linfo;
276 const struct bpf_prog *prog;
277 u32 i, nr_linfo;
278
279 prog = env->prog;
280 nr_linfo = prog->aux->nr_linfo;
281
282 if (!nr_linfo || insn_off >= prog->len)
283 return NULL;
284
285 linfo = prog->aux->linfo;
286 for (i = 1; i < nr_linfo; i++)
287 if (insn_off < linfo[i].insn_off)
288 break;
289
290 return &linfo[i - 1];
291}
292
77d2e05a
MKL
293void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
294 va_list args)
cbd35700 295{
a2a7d570 296 unsigned int n;
cbd35700 297
a2a7d570 298 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
299
300 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
301 "verifier log line truncated - local buffer too short\n");
302
8580ac94 303 if (log->level == BPF_LOG_KERNEL) {
436d404c
HT
304 bool newline = n > 0 && log->kbuf[n - 1] == '\n';
305
306 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
8580ac94
AS
307 return;
308 }
436d404c
HT
309
310 n = min(log->len_total - log->len_used - 1, n);
311 log->kbuf[n] = '\0';
a2a7d570
JK
312 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
313 log->len_used += n;
314 else
315 log->ubuf = NULL;
cbd35700 316}
abe08840 317
6f8a57cc
AN
318static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
319{
320 char zero = 0;
321
322 if (!bpf_verifier_log_needed(log))
323 return;
324
325 log->len_used = new_pos;
326 if (put_user(zero, log->ubuf + new_pos))
327 log->ubuf = NULL;
328}
329
abe08840
JO
330/* log_level controls verbosity level of eBPF verifier.
331 * bpf_verifier_log_write() is used to dump the verification trace to the log,
332 * so the user can figure out what's wrong with the program
430e68d1 333 */
abe08840
JO
334__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
335 const char *fmt, ...)
336{
337 va_list args;
338
77d2e05a
MKL
339 if (!bpf_verifier_log_needed(&env->log))
340 return;
341
abe08840 342 va_start(args, fmt);
77d2e05a 343 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
344 va_end(args);
345}
346EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
347
348__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
349{
77d2e05a 350 struct bpf_verifier_env *env = private_data;
abe08840
JO
351 va_list args;
352
77d2e05a
MKL
353 if (!bpf_verifier_log_needed(&env->log))
354 return;
355
abe08840 356 va_start(args, fmt);
77d2e05a 357 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
358 va_end(args);
359}
cbd35700 360
9e15db66
AS
361__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
362 const char *fmt, ...)
363{
364 va_list args;
365
366 if (!bpf_verifier_log_needed(log))
367 return;
368
369 va_start(args, fmt);
370 bpf_verifier_vlog(log, fmt, args);
371 va_end(args);
372}
373
d9762e84
MKL
374static const char *ltrim(const char *s)
375{
376 while (isspace(*s))
377 s++;
378
379 return s;
380}
381
382__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
383 u32 insn_off,
384 const char *prefix_fmt, ...)
385{
386 const struct bpf_line_info *linfo;
387
388 if (!bpf_verifier_log_needed(&env->log))
389 return;
390
391 linfo = find_linfo(env, insn_off);
392 if (!linfo || linfo == env->prev_linfo)
393 return;
394
395 if (prefix_fmt) {
396 va_list args;
397
398 va_start(args, prefix_fmt);
399 bpf_verifier_vlog(&env->log, prefix_fmt, args);
400 va_end(args);
401 }
402
403 verbose(env, "%s\n",
404 ltrim(btf_name_by_offset(env->prog->aux->btf,
405 linfo->line_off)));
406
407 env->prev_linfo = linfo;
408}
409
bc2591d6
YS
410static void verbose_invalid_scalar(struct bpf_verifier_env *env,
411 struct bpf_reg_state *reg,
412 struct tnum *range, const char *ctx,
413 const char *reg_name)
414{
415 char tn_buf[48];
416
417 verbose(env, "At %s the register %s ", ctx, reg_name);
418 if (!tnum_is_unknown(reg->var_off)) {
419 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
420 verbose(env, "has value %s", tn_buf);
421 } else {
422 verbose(env, "has unknown scalar value");
423 }
424 tnum_strn(tn_buf, sizeof(tn_buf), *range);
425 verbose(env, " should have been in %s\n", tn_buf);
426}
427
de8f3a83
DB
428static bool type_is_pkt_pointer(enum bpf_reg_type type)
429{
430 return type == PTR_TO_PACKET ||
431 type == PTR_TO_PACKET_META;
432}
433
46f8bc92
MKL
434static bool type_is_sk_pointer(enum bpf_reg_type type)
435{
436 return type == PTR_TO_SOCKET ||
655a51e5 437 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
438 type == PTR_TO_TCP_SOCK ||
439 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
440}
441
cac616db
JF
442static bool reg_type_not_null(enum bpf_reg_type type)
443{
444 return type == PTR_TO_SOCKET ||
445 type == PTR_TO_TCP_SOCK ||
446 type == PTR_TO_MAP_VALUE ||
69c087ba 447 type == PTR_TO_MAP_KEY ||
01c66c48 448 type == PTR_TO_SOCK_COMMON;
cac616db
JF
449}
450
d83525ca
AS
451static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
452{
453 return reg->type == PTR_TO_MAP_VALUE &&
454 map_value_has_spin_lock(reg->map_ptr);
455}
456
cba368c1
MKL
457static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
458{
c25b2ae1
HL
459 return base_type(type) == PTR_TO_SOCKET ||
460 base_type(type) == PTR_TO_TCP_SOCK ||
5c073f26
KKD
461 base_type(type) == PTR_TO_MEM ||
462 base_type(type) == PTR_TO_BTF_ID;
cba368c1
MKL
463}
464
20b2aff4
HL
465static bool type_is_rdonly_mem(u32 type)
466{
467 return type & MEM_RDONLY;
cba368c1
MKL
468}
469
1b986589 470static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 471{
1b986589 472 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
473}
474
48946bd6 475static bool type_may_be_null(u32 type)
fd1b0d60 476{
48946bd6 477 return type & PTR_MAYBE_NULL;
fd1b0d60
LB
478}
479
64d85290 480static bool may_be_acquire_function(enum bpf_func_id func_id)
46f8bc92
MKL
481{
482 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01 483 func_id == BPF_FUNC_sk_lookup_udp ||
64d85290 484 func_id == BPF_FUNC_skc_lookup_tcp ||
457f4436
AN
485 func_id == BPF_FUNC_map_lookup_elem ||
486 func_id == BPF_FUNC_ringbuf_reserve;
64d85290
JS
487}
488
489static bool is_acquire_function(enum bpf_func_id func_id,
490 const struct bpf_map *map)
491{
492 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
493
494 if (func_id == BPF_FUNC_sk_lookup_tcp ||
495 func_id == BPF_FUNC_sk_lookup_udp ||
457f4436 496 func_id == BPF_FUNC_skc_lookup_tcp ||
c0a5a21c
KKD
497 func_id == BPF_FUNC_ringbuf_reserve ||
498 func_id == BPF_FUNC_kptr_xchg)
64d85290
JS
499 return true;
500
501 if (func_id == BPF_FUNC_map_lookup_elem &&
502 (map_type == BPF_MAP_TYPE_SOCKMAP ||
503 map_type == BPF_MAP_TYPE_SOCKHASH))
504 return true;
505
506 return false;
46f8bc92
MKL
507}
508
1b986589
MKL
509static bool is_ptr_cast_function(enum bpf_func_id func_id)
510{
511 return func_id == BPF_FUNC_tcp_sock ||
1df8f55a
MKL
512 func_id == BPF_FUNC_sk_fullsock ||
513 func_id == BPF_FUNC_skc_to_tcp_sock ||
514 func_id == BPF_FUNC_skc_to_tcp6_sock ||
515 func_id == BPF_FUNC_skc_to_udp6_sock ||
3bc253c2 516 func_id == BPF_FUNC_skc_to_mptcp_sock ||
1df8f55a
MKL
517 func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
518 func_id == BPF_FUNC_skc_to_tcp_request_sock;
1b986589
MKL
519}
520
39491867
BJ
521static bool is_cmpxchg_insn(const struct bpf_insn *insn)
522{
523 return BPF_CLASS(insn->code) == BPF_STX &&
524 BPF_MODE(insn->code) == BPF_ATOMIC &&
525 insn->imm == BPF_CMPXCHG;
526}
527
c25b2ae1
HL
528/* string representation of 'enum bpf_reg_type'
529 *
530 * Note that reg_type_str() can not appear more than once in a single verbose()
531 * statement.
532 */
533static const char *reg_type_str(struct bpf_verifier_env *env,
534 enum bpf_reg_type type)
535{
c6f1bfe8 536 char postfix[16] = {0}, prefix[32] = {0};
c25b2ae1
HL
537 static const char * const str[] = {
538 [NOT_INIT] = "?",
7df5072c 539 [SCALAR_VALUE] = "scalar",
c25b2ae1
HL
540 [PTR_TO_CTX] = "ctx",
541 [CONST_PTR_TO_MAP] = "map_ptr",
542 [PTR_TO_MAP_VALUE] = "map_value",
543 [PTR_TO_STACK] = "fp",
544 [PTR_TO_PACKET] = "pkt",
545 [PTR_TO_PACKET_META] = "pkt_meta",
546 [PTR_TO_PACKET_END] = "pkt_end",
547 [PTR_TO_FLOW_KEYS] = "flow_keys",
548 [PTR_TO_SOCKET] = "sock",
549 [PTR_TO_SOCK_COMMON] = "sock_common",
550 [PTR_TO_TCP_SOCK] = "tcp_sock",
551 [PTR_TO_TP_BUFFER] = "tp_buffer",
552 [PTR_TO_XDP_SOCK] = "xdp_sock",
553 [PTR_TO_BTF_ID] = "ptr_",
c25b2ae1 554 [PTR_TO_MEM] = "mem",
20b2aff4 555 [PTR_TO_BUF] = "buf",
c25b2ae1
HL
556 [PTR_TO_FUNC] = "func",
557 [PTR_TO_MAP_KEY] = "map_key",
558 };
559
560 if (type & PTR_MAYBE_NULL) {
5844101a 561 if (base_type(type) == PTR_TO_BTF_ID)
c25b2ae1
HL
562 strncpy(postfix, "or_null_", 16);
563 else
564 strncpy(postfix, "_or_null", 16);
565 }
566
20b2aff4 567 if (type & MEM_RDONLY)
c6f1bfe8 568 strncpy(prefix, "rdonly_", 32);
a672b2e3 569 if (type & MEM_ALLOC)
c6f1bfe8
YS
570 strncpy(prefix, "alloc_", 32);
571 if (type & MEM_USER)
572 strncpy(prefix, "user_", 32);
5844101a
HL
573 if (type & MEM_PERCPU)
574 strncpy(prefix, "percpu_", 32);
6efe152d
KKD
575 if (type & PTR_UNTRUSTED)
576 strncpy(prefix, "untrusted_", 32);
20b2aff4
HL
577
578 snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
579 prefix, str[base_type(type)], postfix);
c25b2ae1
HL
580 return env->type_str_buf;
581}
17a52670 582
8efea21d
EC
583static char slot_type_char[] = {
584 [STACK_INVALID] = '?',
585 [STACK_SPILL] = 'r',
586 [STACK_MISC] = 'm',
587 [STACK_ZERO] = '0',
97e03f52 588 [STACK_DYNPTR] = 'd',
8efea21d
EC
589};
590
4e92024a
AS
591static void print_liveness(struct bpf_verifier_env *env,
592 enum bpf_reg_liveness live)
593{
9242b5f5 594 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
595 verbose(env, "_");
596 if (live & REG_LIVE_READ)
597 verbose(env, "r");
598 if (live & REG_LIVE_WRITTEN)
599 verbose(env, "w");
9242b5f5
AS
600 if (live & REG_LIVE_DONE)
601 verbose(env, "D");
4e92024a
AS
602}
603
97e03f52
JK
604static int get_spi(s32 off)
605{
606 return (-off - 1) / BPF_REG_SIZE;
607}
608
609static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots)
610{
611 int allocated_slots = state->allocated_stack / BPF_REG_SIZE;
612
613 /* We need to check that slots between [spi - nr_slots + 1, spi] are
614 * within [0, allocated_stack).
615 *
616 * Please note that the spi grows downwards. For example, a dynptr
617 * takes the size of two stack slots; the first slot will be at
618 * spi and the second slot will be at spi - 1.
619 */
620 return spi - nr_slots + 1 >= 0 && spi < allocated_slots;
621}
622
f4d7e40a
AS
623static struct bpf_func_state *func(struct bpf_verifier_env *env,
624 const struct bpf_reg_state *reg)
625{
626 struct bpf_verifier_state *cur = env->cur_state;
627
628 return cur->frame[reg->frameno];
629}
630
22dc4a0f 631static const char *kernel_type_name(const struct btf* btf, u32 id)
9e15db66 632{
22dc4a0f 633 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
9e15db66
AS
634}
635
0f55f9ed
CL
636static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
637{
638 env->scratched_regs |= 1U << regno;
639}
640
641static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
642{
343e5375 643 env->scratched_stack_slots |= 1ULL << spi;
0f55f9ed
CL
644}
645
646static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
647{
648 return (env->scratched_regs >> regno) & 1;
649}
650
651static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
652{
653 return (env->scratched_stack_slots >> regno) & 1;
654}
655
656static bool verifier_state_scratched(const struct bpf_verifier_env *env)
657{
658 return env->scratched_regs || env->scratched_stack_slots;
659}
660
661static void mark_verifier_state_clean(struct bpf_verifier_env *env)
662{
663 env->scratched_regs = 0U;
343e5375 664 env->scratched_stack_slots = 0ULL;
0f55f9ed
CL
665}
666
667/* Used for printing the entire verifier state. */
668static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
669{
670 env->scratched_regs = ~0U;
343e5375 671 env->scratched_stack_slots = ~0ULL;
0f55f9ed
CL
672}
673
97e03f52
JK
674static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
675{
676 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
677 case DYNPTR_TYPE_LOCAL:
678 return BPF_DYNPTR_TYPE_LOCAL;
bc34dee6
JK
679 case DYNPTR_TYPE_RINGBUF:
680 return BPF_DYNPTR_TYPE_RINGBUF;
97e03f52
JK
681 default:
682 return BPF_DYNPTR_TYPE_INVALID;
683 }
684}
685
bc34dee6
JK
686static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
687{
688 return type == BPF_DYNPTR_TYPE_RINGBUF;
689}
690
97e03f52
JK
691static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
692 enum bpf_arg_type arg_type, int insn_idx)
693{
694 struct bpf_func_state *state = func(env, reg);
695 enum bpf_dynptr_type type;
bc34dee6 696 int spi, i, id;
97e03f52
JK
697
698 spi = get_spi(reg->off);
699
700 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
701 return -EINVAL;
702
703 for (i = 0; i < BPF_REG_SIZE; i++) {
704 state->stack[spi].slot_type[i] = STACK_DYNPTR;
705 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR;
706 }
707
708 type = arg_to_dynptr_type(arg_type);
709 if (type == BPF_DYNPTR_TYPE_INVALID)
710 return -EINVAL;
711
712 state->stack[spi].spilled_ptr.dynptr.first_slot = true;
713 state->stack[spi].spilled_ptr.dynptr.type = type;
714 state->stack[spi - 1].spilled_ptr.dynptr.type = type;
715
bc34dee6
JK
716 if (dynptr_type_refcounted(type)) {
717 /* The id is used to track proper releasing */
718 id = acquire_reference_state(env, insn_idx);
719 if (id < 0)
720 return id;
721
722 state->stack[spi].spilled_ptr.id = id;
723 state->stack[spi - 1].spilled_ptr.id = id;
724 }
725
97e03f52
JK
726 return 0;
727}
728
729static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
730{
731 struct bpf_func_state *state = func(env, reg);
732 int spi, i;
733
734 spi = get_spi(reg->off);
735
736 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
737 return -EINVAL;
738
739 for (i = 0; i < BPF_REG_SIZE; i++) {
740 state->stack[spi].slot_type[i] = STACK_INVALID;
741 state->stack[spi - 1].slot_type[i] = STACK_INVALID;
742 }
743
bc34dee6
JK
744 /* Invalidate any slices associated with this dynptr */
745 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
746 release_reference(env, state->stack[spi].spilled_ptr.id);
747 state->stack[spi].spilled_ptr.id = 0;
748 state->stack[spi - 1].spilled_ptr.id = 0;
749 }
750
97e03f52
JK
751 state->stack[spi].spilled_ptr.dynptr.first_slot = false;
752 state->stack[spi].spilled_ptr.dynptr.type = 0;
753 state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
754
755 return 0;
756}
757
758static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
759{
760 struct bpf_func_state *state = func(env, reg);
761 int spi = get_spi(reg->off);
762 int i;
763
764 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS))
765 return true;
766
767 for (i = 0; i < BPF_REG_SIZE; i++) {
768 if (state->stack[spi].slot_type[i] == STACK_DYNPTR ||
769 state->stack[spi - 1].slot_type[i] == STACK_DYNPTR)
770 return false;
771 }
772
773 return true;
774}
775
776static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
777 enum bpf_arg_type arg_type)
778{
779 struct bpf_func_state *state = func(env, reg);
780 int spi = get_spi(reg->off);
781 int i;
782
783 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
784 !state->stack[spi].spilled_ptr.dynptr.first_slot)
785 return false;
786
787 for (i = 0; i < BPF_REG_SIZE; i++) {
788 if (state->stack[spi].slot_type[i] != STACK_DYNPTR ||
789 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR)
790 return false;
791 }
792
793 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */
794 if (arg_type == ARG_PTR_TO_DYNPTR)
795 return true;
796
797 return state->stack[spi].spilled_ptr.dynptr.type == arg_to_dynptr_type(arg_type);
798}
799
27113c59
MKL
800/* The reg state of a pointer or a bounded scalar was saved when
801 * it was spilled to the stack.
802 */
803static bool is_spilled_reg(const struct bpf_stack_state *stack)
804{
805 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL;
806}
807
354e8f19
MKL
808static void scrub_spilled_slot(u8 *stype)
809{
810 if (*stype != STACK_INVALID)
811 *stype = STACK_MISC;
812}
813
61bd5218 814static void print_verifier_state(struct bpf_verifier_env *env,
0f55f9ed
CL
815 const struct bpf_func_state *state,
816 bool print_all)
17a52670 817{
f4d7e40a 818 const struct bpf_reg_state *reg;
17a52670
AS
819 enum bpf_reg_type t;
820 int i;
821
f4d7e40a
AS
822 if (state->frameno)
823 verbose(env, " frame%d:", state->frameno);
17a52670 824 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
825 reg = &state->regs[i];
826 t = reg->type;
17a52670
AS
827 if (t == NOT_INIT)
828 continue;
0f55f9ed
CL
829 if (!print_all && !reg_scratched(env, i))
830 continue;
4e92024a
AS
831 verbose(env, " R%d", i);
832 print_liveness(env, reg->live);
7df5072c 833 verbose(env, "=");
b5dc0163
AS
834 if (t == SCALAR_VALUE && reg->precise)
835 verbose(env, "P");
f1174f77
EC
836 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
837 tnum_is_const(reg->var_off)) {
838 /* reg->off should be 0 for SCALAR_VALUE */
7df5072c 839 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
61bd5218 840 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 841 } else {
7df5072c
ML
842 const char *sep = "";
843
844 verbose(env, "%s", reg_type_str(env, t));
5844101a 845 if (base_type(t) == PTR_TO_BTF_ID)
22dc4a0f 846 verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
7df5072c
ML
847 verbose(env, "(");
848/*
849 * _a stands for append, was shortened to avoid multiline statements below.
850 * This macro is used to output a comma separated list of attributes.
851 */
852#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
853
854 if (reg->id)
855 verbose_a("id=%d", reg->id);
856 if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
857 verbose_a("ref_obj_id=%d", reg->ref_obj_id);
f1174f77 858 if (t != SCALAR_VALUE)
7df5072c 859 verbose_a("off=%d", reg->off);
de8f3a83 860 if (type_is_pkt_pointer(t))
7df5072c 861 verbose_a("r=%d", reg->range);
c25b2ae1
HL
862 else if (base_type(t) == CONST_PTR_TO_MAP ||
863 base_type(t) == PTR_TO_MAP_KEY ||
864 base_type(t) == PTR_TO_MAP_VALUE)
7df5072c
ML
865 verbose_a("ks=%d,vs=%d",
866 reg->map_ptr->key_size,
867 reg->map_ptr->value_size);
7d1238f2
EC
868 if (tnum_is_const(reg->var_off)) {
869 /* Typically an immediate SCALAR_VALUE, but
870 * could be a pointer whose offset is too big
871 * for reg->off
872 */
7df5072c 873 verbose_a("imm=%llx", reg->var_off.value);
7d1238f2
EC
874 } else {
875 if (reg->smin_value != reg->umin_value &&
876 reg->smin_value != S64_MIN)
7df5072c 877 verbose_a("smin=%lld", (long long)reg->smin_value);
7d1238f2
EC
878 if (reg->smax_value != reg->umax_value &&
879 reg->smax_value != S64_MAX)
7df5072c 880 verbose_a("smax=%lld", (long long)reg->smax_value);
7d1238f2 881 if (reg->umin_value != 0)
7df5072c 882 verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
7d1238f2 883 if (reg->umax_value != U64_MAX)
7df5072c 884 verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
7d1238f2
EC
885 if (!tnum_is_unknown(reg->var_off)) {
886 char tn_buf[48];
f1174f77 887
7d1238f2 888 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7df5072c 889 verbose_a("var_off=%s", tn_buf);
7d1238f2 890 }
3f50f132
JF
891 if (reg->s32_min_value != reg->smin_value &&
892 reg->s32_min_value != S32_MIN)
7df5072c 893 verbose_a("s32_min=%d", (int)(reg->s32_min_value));
3f50f132
JF
894 if (reg->s32_max_value != reg->smax_value &&
895 reg->s32_max_value != S32_MAX)
7df5072c 896 verbose_a("s32_max=%d", (int)(reg->s32_max_value));
3f50f132
JF
897 if (reg->u32_min_value != reg->umin_value &&
898 reg->u32_min_value != U32_MIN)
7df5072c 899 verbose_a("u32_min=%d", (int)(reg->u32_min_value));
3f50f132
JF
900 if (reg->u32_max_value != reg->umax_value &&
901 reg->u32_max_value != U32_MAX)
7df5072c 902 verbose_a("u32_max=%d", (int)(reg->u32_max_value));
f1174f77 903 }
7df5072c
ML
904#undef verbose_a
905
61bd5218 906 verbose(env, ")");
f1174f77 907 }
17a52670 908 }
638f5b90 909 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
910 char types_buf[BPF_REG_SIZE + 1];
911 bool valid = false;
912 int j;
913
914 for (j = 0; j < BPF_REG_SIZE; j++) {
915 if (state->stack[i].slot_type[j] != STACK_INVALID)
916 valid = true;
917 types_buf[j] = slot_type_char[
918 state->stack[i].slot_type[j]];
919 }
920 types_buf[BPF_REG_SIZE] = 0;
921 if (!valid)
922 continue;
0f55f9ed
CL
923 if (!print_all && !stack_slot_scratched(env, i))
924 continue;
8efea21d
EC
925 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
926 print_liveness(env, state->stack[i].spilled_ptr.live);
27113c59 927 if (is_spilled_reg(&state->stack[i])) {
b5dc0163
AS
928 reg = &state->stack[i].spilled_ptr;
929 t = reg->type;
7df5072c 930 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
b5dc0163
AS
931 if (t == SCALAR_VALUE && reg->precise)
932 verbose(env, "P");
933 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
934 verbose(env, "%lld", reg->var_off.value + reg->off);
935 } else {
8efea21d 936 verbose(env, "=%s", types_buf);
b5dc0163 937 }
17a52670 938 }
fd978bf7
JS
939 if (state->acquired_refs && state->refs[0].id) {
940 verbose(env, " refs=%d", state->refs[0].id);
941 for (i = 1; i < state->acquired_refs; i++)
942 if (state->refs[i].id)
943 verbose(env, ",%d", state->refs[i].id);
944 }
bfc6bb74
AS
945 if (state->in_callback_fn)
946 verbose(env, " cb");
947 if (state->in_async_callback_fn)
948 verbose(env, " async_cb");
61bd5218 949 verbose(env, "\n");
0f55f9ed 950 mark_verifier_state_clean(env);
17a52670
AS
951}
952
2e576648
CL
953static inline u32 vlog_alignment(u32 pos)
954{
955 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
956 BPF_LOG_MIN_ALIGNMENT) - pos - 1;
957}
958
959static void print_insn_state(struct bpf_verifier_env *env,
960 const struct bpf_func_state *state)
961{
962 if (env->prev_log_len && env->prev_log_len == env->log.len_used) {
963 /* remove new line character */
964 bpf_vlog_reset(&env->log, env->prev_log_len - 1);
965 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' ');
966 } else {
967 verbose(env, "%d:", env->insn_idx);
968 }
969 print_verifier_state(env, state, false);
17a52670
AS
970}
971
c69431aa
LB
972/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
973 * small to hold src. This is different from krealloc since we don't want to preserve
974 * the contents of dst.
975 *
976 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
977 * not be allocated.
638f5b90 978 */
c69431aa 979static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
638f5b90 980{
c69431aa
LB
981 size_t bytes;
982
983 if (ZERO_OR_NULL_PTR(src))
984 goto out;
985
986 if (unlikely(check_mul_overflow(n, size, &bytes)))
987 return NULL;
988
989 if (ksize(dst) < bytes) {
990 kfree(dst);
991 dst = kmalloc_track_caller(bytes, flags);
992 if (!dst)
993 return NULL;
994 }
995
996 memcpy(dst, src, bytes);
997out:
998 return dst ? dst : ZERO_SIZE_PTR;
999}
1000
1001/* resize an array from old_n items to new_n items. the array is reallocated if it's too
1002 * small to hold new_n items. new items are zeroed out if the array grows.
1003 *
1004 * Contrary to krealloc_array, does not free arr if new_n is zero.
1005 */
1006static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
1007{
1008 if (!new_n || old_n == new_n)
1009 goto out;
1010
1011 arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
1012 if (!arr)
1013 return NULL;
1014
1015 if (new_n > old_n)
1016 memset(arr + old_n * size, 0, (new_n - old_n) * size);
1017
1018out:
1019 return arr ? arr : ZERO_SIZE_PTR;
1020}
1021
1022static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1023{
1024 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
1025 sizeof(struct bpf_reference_state), GFP_KERNEL);
1026 if (!dst->refs)
1027 return -ENOMEM;
1028
1029 dst->acquired_refs = src->acquired_refs;
1030 return 0;
1031}
1032
1033static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
1034{
1035 size_t n = src->allocated_stack / BPF_REG_SIZE;
1036
1037 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
1038 GFP_KERNEL);
1039 if (!dst->stack)
1040 return -ENOMEM;
1041
1042 dst->allocated_stack = src->allocated_stack;
1043 return 0;
1044}
1045
1046static int resize_reference_state(struct bpf_func_state *state, size_t n)
1047{
1048 state->refs = realloc_array(state->refs, state->acquired_refs, n,
1049 sizeof(struct bpf_reference_state));
1050 if (!state->refs)
1051 return -ENOMEM;
1052
1053 state->acquired_refs = n;
1054 return 0;
1055}
1056
1057static int grow_stack_state(struct bpf_func_state *state, int size)
1058{
1059 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
1060
1061 if (old_n >= n)
1062 return 0;
1063
1064 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
1065 if (!state->stack)
1066 return -ENOMEM;
1067
1068 state->allocated_stack = size;
1069 return 0;
fd978bf7
JS
1070}
1071
1072/* Acquire a pointer id from the env and update the state->refs to include
1073 * this new pointer reference.
1074 * On success, returns a valid pointer id to associate with the register
1075 * On failure, returns a negative errno.
638f5b90 1076 */
fd978bf7 1077static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 1078{
fd978bf7
JS
1079 struct bpf_func_state *state = cur_func(env);
1080 int new_ofs = state->acquired_refs;
1081 int id, err;
1082
c69431aa 1083 err = resize_reference_state(state, state->acquired_refs + 1);
fd978bf7
JS
1084 if (err)
1085 return err;
1086 id = ++env->id_gen;
1087 state->refs[new_ofs].id = id;
1088 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 1089
fd978bf7
JS
1090 return id;
1091}
1092
1093/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 1094static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
1095{
1096 int i, last_idx;
1097
fd978bf7
JS
1098 last_idx = state->acquired_refs - 1;
1099 for (i = 0; i < state->acquired_refs; i++) {
1100 if (state->refs[i].id == ptr_id) {
1101 if (last_idx && i != last_idx)
1102 memcpy(&state->refs[i], &state->refs[last_idx],
1103 sizeof(*state->refs));
1104 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
1105 state->acquired_refs--;
638f5b90 1106 return 0;
638f5b90 1107 }
638f5b90 1108 }
46f8bc92 1109 return -EINVAL;
fd978bf7
JS
1110}
1111
f4d7e40a
AS
1112static void free_func_state(struct bpf_func_state *state)
1113{
5896351e
AS
1114 if (!state)
1115 return;
fd978bf7 1116 kfree(state->refs);
f4d7e40a
AS
1117 kfree(state->stack);
1118 kfree(state);
1119}
1120
b5dc0163
AS
1121static void clear_jmp_history(struct bpf_verifier_state *state)
1122{
1123 kfree(state->jmp_history);
1124 state->jmp_history = NULL;
1125 state->jmp_history_cnt = 0;
1126}
1127
1969db47
AS
1128static void free_verifier_state(struct bpf_verifier_state *state,
1129 bool free_self)
638f5b90 1130{
f4d7e40a
AS
1131 int i;
1132
1133 for (i = 0; i <= state->curframe; i++) {
1134 free_func_state(state->frame[i]);
1135 state->frame[i] = NULL;
1136 }
b5dc0163 1137 clear_jmp_history(state);
1969db47
AS
1138 if (free_self)
1139 kfree(state);
638f5b90
AS
1140}
1141
1142/* copy verifier state from src to dst growing dst stack space
1143 * when necessary to accommodate larger src stack
1144 */
f4d7e40a
AS
1145static int copy_func_state(struct bpf_func_state *dst,
1146 const struct bpf_func_state *src)
638f5b90
AS
1147{
1148 int err;
1149
fd978bf7
JS
1150 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
1151 err = copy_reference_state(dst, src);
638f5b90
AS
1152 if (err)
1153 return err;
638f5b90
AS
1154 return copy_stack_state(dst, src);
1155}
1156
f4d7e40a
AS
1157static int copy_verifier_state(struct bpf_verifier_state *dst_state,
1158 const struct bpf_verifier_state *src)
1159{
1160 struct bpf_func_state *dst;
1161 int i, err;
1162
06ab6a50
LB
1163 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
1164 src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
1165 GFP_USER);
1166 if (!dst_state->jmp_history)
1167 return -ENOMEM;
b5dc0163
AS
1168 dst_state->jmp_history_cnt = src->jmp_history_cnt;
1169
f4d7e40a
AS
1170 /* if dst has more stack frames then src frame, free them */
1171 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
1172 free_func_state(dst_state->frame[i]);
1173 dst_state->frame[i] = NULL;
1174 }
979d63d5 1175 dst_state->speculative = src->speculative;
f4d7e40a 1176 dst_state->curframe = src->curframe;
d83525ca 1177 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
1178 dst_state->branches = src->branches;
1179 dst_state->parent = src->parent;
b5dc0163
AS
1180 dst_state->first_insn_idx = src->first_insn_idx;
1181 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
1182 for (i = 0; i <= src->curframe; i++) {
1183 dst = dst_state->frame[i];
1184 if (!dst) {
1185 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1186 if (!dst)
1187 return -ENOMEM;
1188 dst_state->frame[i] = dst;
1189 }
1190 err = copy_func_state(dst, src->frame[i]);
1191 if (err)
1192 return err;
1193 }
1194 return 0;
1195}
1196
2589726d
AS
1197static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
1198{
1199 while (st) {
1200 u32 br = --st->branches;
1201
1202 /* WARN_ON(br > 1) technically makes sense here,
1203 * but see comment in push_stack(), hence:
1204 */
1205 WARN_ONCE((int)br < 0,
1206 "BUG update_branch_counts:branches_to_explore=%d\n",
1207 br);
1208 if (br)
1209 break;
1210 st = st->parent;
1211 }
1212}
1213
638f5b90 1214static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
6f8a57cc 1215 int *insn_idx, bool pop_log)
638f5b90
AS
1216{
1217 struct bpf_verifier_state *cur = env->cur_state;
1218 struct bpf_verifier_stack_elem *elem, *head = env->head;
1219 int err;
17a52670
AS
1220
1221 if (env->head == NULL)
638f5b90 1222 return -ENOENT;
17a52670 1223
638f5b90
AS
1224 if (cur) {
1225 err = copy_verifier_state(cur, &head->st);
1226 if (err)
1227 return err;
1228 }
6f8a57cc
AN
1229 if (pop_log)
1230 bpf_vlog_reset(&env->log, head->log_pos);
638f5b90
AS
1231 if (insn_idx)
1232 *insn_idx = head->insn_idx;
17a52670 1233 if (prev_insn_idx)
638f5b90
AS
1234 *prev_insn_idx = head->prev_insn_idx;
1235 elem = head->next;
1969db47 1236 free_verifier_state(&head->st, false);
638f5b90 1237 kfree(head);
17a52670
AS
1238 env->head = elem;
1239 env->stack_size--;
638f5b90 1240 return 0;
17a52670
AS
1241}
1242
58e2af8b 1243static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
1244 int insn_idx, int prev_insn_idx,
1245 bool speculative)
17a52670 1246{
638f5b90 1247 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 1248 struct bpf_verifier_stack_elem *elem;
638f5b90 1249 int err;
17a52670 1250
638f5b90 1251 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
1252 if (!elem)
1253 goto err;
1254
17a52670
AS
1255 elem->insn_idx = insn_idx;
1256 elem->prev_insn_idx = prev_insn_idx;
1257 elem->next = env->head;
6f8a57cc 1258 elem->log_pos = env->log.len_used;
17a52670
AS
1259 env->head = elem;
1260 env->stack_size++;
1969db47
AS
1261 err = copy_verifier_state(&elem->st, cur);
1262 if (err)
1263 goto err;
979d63d5 1264 elem->st.speculative |= speculative;
b285fcb7
AS
1265 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1266 verbose(env, "The sequence of %d jumps is too complex.\n",
1267 env->stack_size);
17a52670
AS
1268 goto err;
1269 }
2589726d
AS
1270 if (elem->st.parent) {
1271 ++elem->st.parent->branches;
1272 /* WARN_ON(branches > 2) technically makes sense here,
1273 * but
1274 * 1. speculative states will bump 'branches' for non-branch
1275 * instructions
1276 * 2. is_state_visited() heuristics may decide not to create
1277 * a new state for a sequence of branches and all such current
1278 * and cloned states will be pointing to a single parent state
1279 * which might have large 'branches' count.
1280 */
1281 }
17a52670
AS
1282 return &elem->st;
1283err:
5896351e
AS
1284 free_verifier_state(env->cur_state, true);
1285 env->cur_state = NULL;
17a52670 1286 /* pop all elements and return */
6f8a57cc 1287 while (!pop_stack(env, NULL, NULL, false));
17a52670
AS
1288 return NULL;
1289}
1290
1291#define CALLER_SAVED_REGS 6
1292static const int caller_saved[CALLER_SAVED_REGS] = {
1293 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
1294};
1295
f54c7898
DB
1296static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1297 struct bpf_reg_state *reg);
f1174f77 1298
e688c3db
AS
1299/* This helper doesn't clear reg->id */
1300static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
b03c9f9f 1301{
b03c9f9f
EC
1302 reg->var_off = tnum_const(imm);
1303 reg->smin_value = (s64)imm;
1304 reg->smax_value = (s64)imm;
1305 reg->umin_value = imm;
1306 reg->umax_value = imm;
3f50f132
JF
1307
1308 reg->s32_min_value = (s32)imm;
1309 reg->s32_max_value = (s32)imm;
1310 reg->u32_min_value = (u32)imm;
1311 reg->u32_max_value = (u32)imm;
1312}
1313
e688c3db
AS
1314/* Mark the unknown part of a register (variable offset or scalar value) as
1315 * known to have the value @imm.
1316 */
1317static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
1318{
1319 /* Clear id, off, and union(map_ptr, range) */
1320 memset(((u8 *)reg) + sizeof(reg->type), 0,
1321 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
1322 ___mark_reg_known(reg, imm);
1323}
1324
3f50f132
JF
1325static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
1326{
1327 reg->var_off = tnum_const_subreg(reg->var_off, imm);
1328 reg->s32_min_value = (s32)imm;
1329 reg->s32_max_value = (s32)imm;
1330 reg->u32_min_value = (u32)imm;
1331 reg->u32_max_value = (u32)imm;
b03c9f9f
EC
1332}
1333
f1174f77
EC
1334/* Mark the 'variable offset' part of a register as zero. This should be
1335 * used only on registers holding a pointer type.
1336 */
1337static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 1338{
b03c9f9f 1339 __mark_reg_known(reg, 0);
f1174f77 1340}
a9789ef9 1341
cc2b14d5
AS
1342static void __mark_reg_const_zero(struct bpf_reg_state *reg)
1343{
1344 __mark_reg_known(reg, 0);
cc2b14d5
AS
1345 reg->type = SCALAR_VALUE;
1346}
1347
61bd5218
JK
1348static void mark_reg_known_zero(struct bpf_verifier_env *env,
1349 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1350{
1351 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1352 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
1353 /* Something bad happened, let's kill all regs */
1354 for (regno = 0; regno < MAX_BPF_REG; regno++)
f54c7898 1355 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1356 return;
1357 }
1358 __mark_reg_known_zero(regs + regno);
1359}
1360
4ddb7416
DB
1361static void mark_ptr_not_null_reg(struct bpf_reg_state *reg)
1362{
c25b2ae1 1363 if (base_type(reg->type) == PTR_TO_MAP_VALUE) {
4ddb7416
DB
1364 const struct bpf_map *map = reg->map_ptr;
1365
1366 if (map->inner_map_meta) {
1367 reg->type = CONST_PTR_TO_MAP;
1368 reg->map_ptr = map->inner_map_meta;
3e8ce298
AS
1369 /* transfer reg's id which is unique for every map_lookup_elem
1370 * as UID of the inner map.
1371 */
34d11a44
AS
1372 if (map_value_has_timer(map->inner_map_meta))
1373 reg->map_uid = reg->id;
4ddb7416
DB
1374 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
1375 reg->type = PTR_TO_XDP_SOCK;
1376 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
1377 map->map_type == BPF_MAP_TYPE_SOCKHASH) {
1378 reg->type = PTR_TO_SOCKET;
1379 } else {
1380 reg->type = PTR_TO_MAP_VALUE;
1381 }
c25b2ae1 1382 return;
4ddb7416 1383 }
c25b2ae1
HL
1384
1385 reg->type &= ~PTR_MAYBE_NULL;
4ddb7416
DB
1386}
1387
de8f3a83
DB
1388static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
1389{
1390 return type_is_pkt_pointer(reg->type);
1391}
1392
1393static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
1394{
1395 return reg_is_pkt_pointer(reg) ||
1396 reg->type == PTR_TO_PACKET_END;
1397}
1398
1399/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
1400static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
1401 enum bpf_reg_type which)
1402{
1403 /* The register can already have a range from prior markings.
1404 * This is fine as long as it hasn't been advanced from its
1405 * origin.
1406 */
1407 return reg->type == which &&
1408 reg->id == 0 &&
1409 reg->off == 0 &&
1410 tnum_equals_const(reg->var_off, 0);
1411}
1412
3f50f132
JF
1413/* Reset the min/max bounds of a register */
1414static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1415{
1416 reg->smin_value = S64_MIN;
1417 reg->smax_value = S64_MAX;
1418 reg->umin_value = 0;
1419 reg->umax_value = U64_MAX;
1420
1421 reg->s32_min_value = S32_MIN;
1422 reg->s32_max_value = S32_MAX;
1423 reg->u32_min_value = 0;
1424 reg->u32_max_value = U32_MAX;
1425}
1426
1427static void __mark_reg64_unbounded(struct bpf_reg_state *reg)
1428{
1429 reg->smin_value = S64_MIN;
1430 reg->smax_value = S64_MAX;
1431 reg->umin_value = 0;
1432 reg->umax_value = U64_MAX;
1433}
1434
1435static void __mark_reg32_unbounded(struct bpf_reg_state *reg)
1436{
1437 reg->s32_min_value = S32_MIN;
1438 reg->s32_max_value = S32_MAX;
1439 reg->u32_min_value = 0;
1440 reg->u32_max_value = U32_MAX;
1441}
1442
1443static void __update_reg32_bounds(struct bpf_reg_state *reg)
1444{
1445 struct tnum var32_off = tnum_subreg(reg->var_off);
1446
1447 /* min signed is max(sign bit) | min(other bits) */
1448 reg->s32_min_value = max_t(s32, reg->s32_min_value,
1449 var32_off.value | (var32_off.mask & S32_MIN));
1450 /* max signed is min(sign bit) | max(other bits) */
1451 reg->s32_max_value = min_t(s32, reg->s32_max_value,
1452 var32_off.value | (var32_off.mask & S32_MAX));
1453 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value);
1454 reg->u32_max_value = min(reg->u32_max_value,
1455 (u32)(var32_off.value | var32_off.mask));
1456}
1457
1458static void __update_reg64_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1459{
1460 /* min signed is max(sign bit) | min(other bits) */
1461 reg->smin_value = max_t(s64, reg->smin_value,
1462 reg->var_off.value | (reg->var_off.mask & S64_MIN));
1463 /* max signed is min(sign bit) | max(other bits) */
1464 reg->smax_value = min_t(s64, reg->smax_value,
1465 reg->var_off.value | (reg->var_off.mask & S64_MAX));
1466 reg->umin_value = max(reg->umin_value, reg->var_off.value);
1467 reg->umax_value = min(reg->umax_value,
1468 reg->var_off.value | reg->var_off.mask);
1469}
1470
3f50f132
JF
1471static void __update_reg_bounds(struct bpf_reg_state *reg)
1472{
1473 __update_reg32_bounds(reg);
1474 __update_reg64_bounds(reg);
1475}
1476
b03c9f9f 1477/* Uses signed min/max values to inform unsigned, and vice-versa */
3f50f132
JF
1478static void __reg32_deduce_bounds(struct bpf_reg_state *reg)
1479{
1480 /* Learn sign from signed bounds.
1481 * If we cannot cross the sign boundary, then signed and unsigned bounds
1482 * are the same, so combine. This works even in the negative case, e.g.
1483 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1484 */
1485 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) {
1486 reg->s32_min_value = reg->u32_min_value =
1487 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1488 reg->s32_max_value = reg->u32_max_value =
1489 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1490 return;
1491 }
1492 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1493 * boundary, so we must be careful.
1494 */
1495 if ((s32)reg->u32_max_value >= 0) {
1496 /* Positive. We can't learn anything from the smin, but smax
1497 * is positive, hence safe.
1498 */
1499 reg->s32_min_value = reg->u32_min_value;
1500 reg->s32_max_value = reg->u32_max_value =
1501 min_t(u32, reg->s32_max_value, reg->u32_max_value);
1502 } else if ((s32)reg->u32_min_value < 0) {
1503 /* Negative. We can't learn anything from the smax, but smin
1504 * is negative, hence safe.
1505 */
1506 reg->s32_min_value = reg->u32_min_value =
1507 max_t(u32, reg->s32_min_value, reg->u32_min_value);
1508 reg->s32_max_value = reg->u32_max_value;
1509 }
1510}
1511
1512static void __reg64_deduce_bounds(struct bpf_reg_state *reg)
b03c9f9f
EC
1513{
1514 /* Learn sign from signed bounds.
1515 * If we cannot cross the sign boundary, then signed and unsigned bounds
1516 * are the same, so combine. This works even in the negative case, e.g.
1517 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1518 */
1519 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1520 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1521 reg->umin_value);
1522 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1523 reg->umax_value);
1524 return;
1525 }
1526 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1527 * boundary, so we must be careful.
1528 */
1529 if ((s64)reg->umax_value >= 0) {
1530 /* Positive. We can't learn anything from the smin, but smax
1531 * is positive, hence safe.
1532 */
1533 reg->smin_value = reg->umin_value;
1534 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1535 reg->umax_value);
1536 } else if ((s64)reg->umin_value < 0) {
1537 /* Negative. We can't learn anything from the smax, but smin
1538 * is negative, hence safe.
1539 */
1540 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1541 reg->umin_value);
1542 reg->smax_value = reg->umax_value;
1543 }
1544}
1545
3f50f132
JF
1546static void __reg_deduce_bounds(struct bpf_reg_state *reg)
1547{
1548 __reg32_deduce_bounds(reg);
1549 __reg64_deduce_bounds(reg);
1550}
1551
b03c9f9f
EC
1552/* Attempts to improve var_off based on unsigned min/max information */
1553static void __reg_bound_offset(struct bpf_reg_state *reg)
1554{
3f50f132
JF
1555 struct tnum var64_off = tnum_intersect(reg->var_off,
1556 tnum_range(reg->umin_value,
1557 reg->umax_value));
1558 struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off),
1559 tnum_range(reg->u32_min_value,
1560 reg->u32_max_value));
1561
1562 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
b03c9f9f
EC
1563}
1564
e572ff80
DB
1565static bool __reg32_bound_s64(s32 a)
1566{
1567 return a >= 0 && a <= S32_MAX;
1568}
1569
3f50f132 1570static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
b03c9f9f 1571{
3f50f132
JF
1572 reg->umin_value = reg->u32_min_value;
1573 reg->umax_value = reg->u32_max_value;
e572ff80
DB
1574
1575 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
1576 * be positive otherwise set to worse case bounds and refine later
1577 * from tnum.
3f50f132 1578 */
e572ff80
DB
1579 if (__reg32_bound_s64(reg->s32_min_value) &&
1580 __reg32_bound_s64(reg->s32_max_value)) {
3a71dc36 1581 reg->smin_value = reg->s32_min_value;
e572ff80
DB
1582 reg->smax_value = reg->s32_max_value;
1583 } else {
3a71dc36 1584 reg->smin_value = 0;
e572ff80
DB
1585 reg->smax_value = U32_MAX;
1586 }
3f50f132
JF
1587}
1588
1589static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
1590{
1591 /* special case when 64-bit register has upper 32-bit register
1592 * zeroed. Typically happens after zext or <<32, >>32 sequence
1593 * allowing us to use 32-bit bounds directly,
1594 */
1595 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) {
1596 __reg_assign_32_into_64(reg);
1597 } else {
1598 /* Otherwise the best we can do is push lower 32bit known and
1599 * unknown bits into register (var_off set from jmp logic)
1600 * then learn as much as possible from the 64-bit tnum
1601 * known and unknown bits. The previous smin/smax bounds are
1602 * invalid here because of jmp32 compare so mark them unknown
1603 * so they do not impact tnum bounds calculation.
1604 */
1605 __mark_reg64_unbounded(reg);
1606 __update_reg_bounds(reg);
1607 }
1608
1609 /* Intersecting with the old var_off might have improved our bounds
1610 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1611 * then new var_off is (0; 0x7f...fc) which improves our umax.
1612 */
1613 __reg_deduce_bounds(reg);
1614 __reg_bound_offset(reg);
1615 __update_reg_bounds(reg);
1616}
1617
1618static bool __reg64_bound_s32(s64 a)
1619{
388e2c0b 1620 return a >= S32_MIN && a <= S32_MAX;
3f50f132
JF
1621}
1622
1623static bool __reg64_bound_u32(u64 a)
1624{
b9979db8 1625 return a >= U32_MIN && a <= U32_MAX;
3f50f132
JF
1626}
1627
1628static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
1629{
1630 __mark_reg32_unbounded(reg);
1631
b0270958 1632 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
3f50f132 1633 reg->s32_min_value = (s32)reg->smin_value;
3f50f132 1634 reg->s32_max_value = (s32)reg->smax_value;
b0270958 1635 }
10bf4e83 1636 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
3f50f132 1637 reg->u32_min_value = (u32)reg->umin_value;
3f50f132 1638 reg->u32_max_value = (u32)reg->umax_value;
10bf4e83 1639 }
3f50f132
JF
1640
1641 /* Intersecting with the old var_off might have improved our bounds
1642 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
1643 * then new var_off is (0; 0x7f...fc) which improves our umax.
1644 */
1645 __reg_deduce_bounds(reg);
1646 __reg_bound_offset(reg);
1647 __update_reg_bounds(reg);
b03c9f9f
EC
1648}
1649
f1174f77 1650/* Mark a register as having a completely unknown (scalar) value. */
f54c7898
DB
1651static void __mark_reg_unknown(const struct bpf_verifier_env *env,
1652 struct bpf_reg_state *reg)
f1174f77 1653{
a9c676bc
AS
1654 /*
1655 * Clear type, id, off, and union(map_ptr, range) and
1656 * padding between 'type' and union
1657 */
1658 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1659 reg->type = SCALAR_VALUE;
f1174f77 1660 reg->var_off = tnum_unknown;
f4d7e40a 1661 reg->frameno = 0;
2c78ee89 1662 reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
b03c9f9f 1663 __mark_reg_unbounded(reg);
f1174f77
EC
1664}
1665
61bd5218
JK
1666static void mark_reg_unknown(struct bpf_verifier_env *env,
1667 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1668{
1669 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1670 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1671 /* Something bad happened, let's kill all regs except FP */
1672 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1673 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1674 return;
1675 }
f54c7898 1676 __mark_reg_unknown(env, regs + regno);
f1174f77
EC
1677}
1678
f54c7898
DB
1679static void __mark_reg_not_init(const struct bpf_verifier_env *env,
1680 struct bpf_reg_state *reg)
f1174f77 1681{
f54c7898 1682 __mark_reg_unknown(env, reg);
f1174f77
EC
1683 reg->type = NOT_INIT;
1684}
1685
61bd5218
JK
1686static void mark_reg_not_init(struct bpf_verifier_env *env,
1687 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1688{
1689 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1690 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1691 /* Something bad happened, let's kill all regs except FP */
1692 for (regno = 0; regno < BPF_REG_FP; regno++)
f54c7898 1693 __mark_reg_not_init(env, regs + regno);
f1174f77
EC
1694 return;
1695 }
f54c7898 1696 __mark_reg_not_init(env, regs + regno);
a9789ef9
DB
1697}
1698
41c48f3a
AI
1699static void mark_btf_ld_reg(struct bpf_verifier_env *env,
1700 struct bpf_reg_state *regs, u32 regno,
22dc4a0f 1701 enum bpf_reg_type reg_type,
c6f1bfe8
YS
1702 struct btf *btf, u32 btf_id,
1703 enum bpf_type_flag flag)
41c48f3a
AI
1704{
1705 if (reg_type == SCALAR_VALUE) {
1706 mark_reg_unknown(env, regs, regno);
1707 return;
1708 }
1709 mark_reg_known_zero(env, regs, regno);
c6f1bfe8 1710 regs[regno].type = PTR_TO_BTF_ID | flag;
22dc4a0f 1711 regs[regno].btf = btf;
41c48f3a
AI
1712 regs[regno].btf_id = btf_id;
1713}
1714
5327ed3d 1715#define DEF_NOT_SUBREG (0)
61bd5218 1716static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1717 struct bpf_func_state *state)
17a52670 1718{
f4d7e40a 1719 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1720 int i;
1721
dc503a8a 1722 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1723 mark_reg_not_init(env, regs, i);
dc503a8a 1724 regs[i].live = REG_LIVE_NONE;
679c782d 1725 regs[i].parent = NULL;
5327ed3d 1726 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1727 }
17a52670
AS
1728
1729 /* frame pointer */
f1174f77 1730 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1731 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1732 regs[BPF_REG_FP].frameno = state->frameno;
6760bf2d
DB
1733}
1734
f4d7e40a
AS
1735#define BPF_MAIN_FUNC (-1)
1736static void init_func_state(struct bpf_verifier_env *env,
1737 struct bpf_func_state *state,
1738 int callsite, int frameno, int subprogno)
1739{
1740 state->callsite = callsite;
1741 state->frameno = frameno;
1742 state->subprogno = subprogno;
1743 init_reg_state(env, state);
0f55f9ed 1744 mark_verifier_state_scratched(env);
f4d7e40a
AS
1745}
1746
bfc6bb74
AS
1747/* Similar to push_stack(), but for async callbacks */
1748static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
1749 int insn_idx, int prev_insn_idx,
1750 int subprog)
1751{
1752 struct bpf_verifier_stack_elem *elem;
1753 struct bpf_func_state *frame;
1754
1755 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
1756 if (!elem)
1757 goto err;
1758
1759 elem->insn_idx = insn_idx;
1760 elem->prev_insn_idx = prev_insn_idx;
1761 elem->next = env->head;
1762 elem->log_pos = env->log.len_used;
1763 env->head = elem;
1764 env->stack_size++;
1765 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
1766 verbose(env,
1767 "The sequence of %d jumps is too complex for async cb.\n",
1768 env->stack_size);
1769 goto err;
1770 }
1771 /* Unlike push_stack() do not copy_verifier_state().
1772 * The caller state doesn't matter.
1773 * This is async callback. It starts in a fresh stack.
1774 * Initialize it similar to do_check_common().
1775 */
1776 elem->st.branches = 1;
1777 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
1778 if (!frame)
1779 goto err;
1780 init_func_state(env, frame,
1781 BPF_MAIN_FUNC /* callsite */,
1782 0 /* frameno within this callchain */,
1783 subprog /* subprog number within this prog */);
1784 elem->st.frame[0] = frame;
1785 return &elem->st;
1786err:
1787 free_verifier_state(env->cur_state, true);
1788 env->cur_state = NULL;
1789 /* pop all elements and return */
1790 while (!pop_stack(env, NULL, NULL, false));
1791 return NULL;
1792}
1793
1794
17a52670
AS
1795enum reg_arg_type {
1796 SRC_OP, /* register is used as source operand */
1797 DST_OP, /* register is used as destination operand */
1798 DST_OP_NO_MARK /* same as above, check only, don't mark */
1799};
1800
cc8b0b92
AS
1801static int cmp_subprogs(const void *a, const void *b)
1802{
9c8105bd
JW
1803 return ((struct bpf_subprog_info *)a)->start -
1804 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1805}
1806
1807static int find_subprog(struct bpf_verifier_env *env, int off)
1808{
9c8105bd 1809 struct bpf_subprog_info *p;
cc8b0b92 1810
9c8105bd
JW
1811 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1812 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1813 if (!p)
1814 return -ENOENT;
9c8105bd 1815 return p - env->subprog_info;
cc8b0b92
AS
1816
1817}
1818
1819static int add_subprog(struct bpf_verifier_env *env, int off)
1820{
1821 int insn_cnt = env->prog->len;
1822 int ret;
1823
1824 if (off >= insn_cnt || off < 0) {
1825 verbose(env, "call to invalid destination\n");
1826 return -EINVAL;
1827 }
1828 ret = find_subprog(env, off);
1829 if (ret >= 0)
282a0f46 1830 return ret;
4cb3d99c 1831 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1832 verbose(env, "too many subprograms\n");
1833 return -E2BIG;
1834 }
e6ac2450 1835 /* determine subprog starts. The end is one before the next starts */
9c8105bd
JW
1836 env->subprog_info[env->subprog_cnt++].start = off;
1837 sort(env->subprog_info, env->subprog_cnt,
1838 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
282a0f46 1839 return env->subprog_cnt - 1;
cc8b0b92
AS
1840}
1841
2357672c
KKD
1842#define MAX_KFUNC_DESCS 256
1843#define MAX_KFUNC_BTFS 256
1844
e6ac2450
MKL
1845struct bpf_kfunc_desc {
1846 struct btf_func_model func_model;
1847 u32 func_id;
1848 s32 imm;
2357672c
KKD
1849 u16 offset;
1850};
1851
1852struct bpf_kfunc_btf {
1853 struct btf *btf;
1854 struct module *module;
1855 u16 offset;
e6ac2450
MKL
1856};
1857
e6ac2450
MKL
1858struct bpf_kfunc_desc_tab {
1859 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
1860 u32 nr_descs;
1861};
1862
2357672c
KKD
1863struct bpf_kfunc_btf_tab {
1864 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
1865 u32 nr_descs;
1866};
1867
1868static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
e6ac2450
MKL
1869{
1870 const struct bpf_kfunc_desc *d0 = a;
1871 const struct bpf_kfunc_desc *d1 = b;
1872
1873 /* func_id is not greater than BTF_MAX_TYPE */
2357672c
KKD
1874 return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
1875}
1876
1877static int kfunc_btf_cmp_by_off(const void *a, const void *b)
1878{
1879 const struct bpf_kfunc_btf *d0 = a;
1880 const struct bpf_kfunc_btf *d1 = b;
1881
1882 return d0->offset - d1->offset;
e6ac2450
MKL
1883}
1884
1885static const struct bpf_kfunc_desc *
2357672c 1886find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
e6ac2450
MKL
1887{
1888 struct bpf_kfunc_desc desc = {
1889 .func_id = func_id,
2357672c 1890 .offset = offset,
e6ac2450
MKL
1891 };
1892 struct bpf_kfunc_desc_tab *tab;
1893
1894 tab = prog->aux->kfunc_tab;
1895 return bsearch(&desc, tab->descs, tab->nr_descs,
2357672c
KKD
1896 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
1897}
1898
1899static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
b202d844 1900 s16 offset)
2357672c
KKD
1901{
1902 struct bpf_kfunc_btf kf_btf = { .offset = offset };
1903 struct bpf_kfunc_btf_tab *tab;
1904 struct bpf_kfunc_btf *b;
1905 struct module *mod;
1906 struct btf *btf;
1907 int btf_fd;
1908
1909 tab = env->prog->aux->kfunc_btf_tab;
1910 b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
1911 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
1912 if (!b) {
1913 if (tab->nr_descs == MAX_KFUNC_BTFS) {
1914 verbose(env, "too many different module BTFs\n");
1915 return ERR_PTR(-E2BIG);
1916 }
1917
1918 if (bpfptr_is_null(env->fd_array)) {
1919 verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
1920 return ERR_PTR(-EPROTO);
1921 }
1922
1923 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
1924 offset * sizeof(btf_fd),
1925 sizeof(btf_fd)))
1926 return ERR_PTR(-EFAULT);
1927
1928 btf = btf_get_by_fd(btf_fd);
588cd7ef
KKD
1929 if (IS_ERR(btf)) {
1930 verbose(env, "invalid module BTF fd specified\n");
2357672c 1931 return btf;
588cd7ef 1932 }
2357672c
KKD
1933
1934 if (!btf_is_module(btf)) {
1935 verbose(env, "BTF fd for kfunc is not a module BTF\n");
1936 btf_put(btf);
1937 return ERR_PTR(-EINVAL);
1938 }
1939
1940 mod = btf_try_get_module(btf);
1941 if (!mod) {
1942 btf_put(btf);
1943 return ERR_PTR(-ENXIO);
1944 }
1945
1946 b = &tab->descs[tab->nr_descs++];
1947 b->btf = btf;
1948 b->module = mod;
1949 b->offset = offset;
1950
1951 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
1952 kfunc_btf_cmp_by_off, NULL);
1953 }
2357672c 1954 return b->btf;
e6ac2450
MKL
1955}
1956
2357672c
KKD
1957void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
1958{
1959 if (!tab)
1960 return;
1961
1962 while (tab->nr_descs--) {
1963 module_put(tab->descs[tab->nr_descs].module);
1964 btf_put(tab->descs[tab->nr_descs].btf);
1965 }
1966 kfree(tab);
1967}
1968
43bf0878 1969static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset)
2357672c 1970{
2357672c
KKD
1971 if (offset) {
1972 if (offset < 0) {
1973 /* In the future, this can be allowed to increase limit
1974 * of fd index into fd_array, interpreted as u16.
1975 */
1976 verbose(env, "negative offset disallowed for kernel module function call\n");
1977 return ERR_PTR(-EINVAL);
1978 }
1979
b202d844 1980 return __find_kfunc_desc_btf(env, offset);
2357672c
KKD
1981 }
1982 return btf_vmlinux ?: ERR_PTR(-ENOENT);
e6ac2450
MKL
1983}
1984
2357672c 1985static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
e6ac2450
MKL
1986{
1987 const struct btf_type *func, *func_proto;
2357672c 1988 struct bpf_kfunc_btf_tab *btf_tab;
e6ac2450
MKL
1989 struct bpf_kfunc_desc_tab *tab;
1990 struct bpf_prog_aux *prog_aux;
1991 struct bpf_kfunc_desc *desc;
1992 const char *func_name;
2357672c 1993 struct btf *desc_btf;
8cbf062a 1994 unsigned long call_imm;
e6ac2450
MKL
1995 unsigned long addr;
1996 int err;
1997
1998 prog_aux = env->prog->aux;
1999 tab = prog_aux->kfunc_tab;
2357672c 2000 btf_tab = prog_aux->kfunc_btf_tab;
e6ac2450
MKL
2001 if (!tab) {
2002 if (!btf_vmlinux) {
2003 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
2004 return -ENOTSUPP;
2005 }
2006
2007 if (!env->prog->jit_requested) {
2008 verbose(env, "JIT is required for calling kernel function\n");
2009 return -ENOTSUPP;
2010 }
2011
2012 if (!bpf_jit_supports_kfunc_call()) {
2013 verbose(env, "JIT does not support calling kernel function\n");
2014 return -ENOTSUPP;
2015 }
2016
2017 if (!env->prog->gpl_compatible) {
2018 verbose(env, "cannot call kernel function from non-GPL compatible program\n");
2019 return -EINVAL;
2020 }
2021
2022 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
2023 if (!tab)
2024 return -ENOMEM;
2025 prog_aux->kfunc_tab = tab;
2026 }
2027
a5d82727
KKD
2028 /* func_id == 0 is always invalid, but instead of returning an error, be
2029 * conservative and wait until the code elimination pass before returning
2030 * error, so that invalid calls that get pruned out can be in BPF programs
2031 * loaded from userspace. It is also required that offset be untouched
2032 * for such calls.
2033 */
2034 if (!func_id && !offset)
2035 return 0;
2036
2357672c
KKD
2037 if (!btf_tab && offset) {
2038 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
2039 if (!btf_tab)
2040 return -ENOMEM;
2041 prog_aux->kfunc_btf_tab = btf_tab;
2042 }
2043
43bf0878 2044 desc_btf = find_kfunc_desc_btf(env, offset);
2357672c
KKD
2045 if (IS_ERR(desc_btf)) {
2046 verbose(env, "failed to find BTF for kernel function\n");
2047 return PTR_ERR(desc_btf);
2048 }
2049
2050 if (find_kfunc_desc(env->prog, func_id, offset))
e6ac2450
MKL
2051 return 0;
2052
2053 if (tab->nr_descs == MAX_KFUNC_DESCS) {
2054 verbose(env, "too many different kernel function calls\n");
2055 return -E2BIG;
2056 }
2057
2357672c 2058 func = btf_type_by_id(desc_btf, func_id);
e6ac2450
MKL
2059 if (!func || !btf_type_is_func(func)) {
2060 verbose(env, "kernel btf_id %u is not a function\n",
2061 func_id);
2062 return -EINVAL;
2063 }
2357672c 2064 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450
MKL
2065 if (!func_proto || !btf_type_is_func_proto(func_proto)) {
2066 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
2067 func_id);
2068 return -EINVAL;
2069 }
2070
2357672c 2071 func_name = btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
2072 addr = kallsyms_lookup_name(func_name);
2073 if (!addr) {
2074 verbose(env, "cannot find address for kernel function %s\n",
2075 func_name);
2076 return -EINVAL;
2077 }
2078
8cbf062a
HT
2079 call_imm = BPF_CALL_IMM(addr);
2080 /* Check whether or not the relative offset overflows desc->imm */
2081 if ((unsigned long)(s32)call_imm != call_imm) {
2082 verbose(env, "address of kernel function %s is out of range\n",
2083 func_name);
2084 return -EINVAL;
2085 }
2086
e6ac2450
MKL
2087 desc = &tab->descs[tab->nr_descs++];
2088 desc->func_id = func_id;
8cbf062a 2089 desc->imm = call_imm;
2357672c
KKD
2090 desc->offset = offset;
2091 err = btf_distill_func_proto(&env->log, desc_btf,
e6ac2450
MKL
2092 func_proto, func_name,
2093 &desc->func_model);
2094 if (!err)
2095 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2357672c 2096 kfunc_desc_cmp_by_id_off, NULL);
e6ac2450
MKL
2097 return err;
2098}
2099
2100static int kfunc_desc_cmp_by_imm(const void *a, const void *b)
2101{
2102 const struct bpf_kfunc_desc *d0 = a;
2103 const struct bpf_kfunc_desc *d1 = b;
2104
2105 if (d0->imm > d1->imm)
2106 return 1;
2107 else if (d0->imm < d1->imm)
2108 return -1;
2109 return 0;
2110}
2111
2112static void sort_kfunc_descs_by_imm(struct bpf_prog *prog)
2113{
2114 struct bpf_kfunc_desc_tab *tab;
2115
2116 tab = prog->aux->kfunc_tab;
2117 if (!tab)
2118 return;
2119
2120 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
2121 kfunc_desc_cmp_by_imm, NULL);
2122}
2123
2124bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2125{
2126 return !!prog->aux->kfunc_tab;
2127}
2128
2129const struct btf_func_model *
2130bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2131 const struct bpf_insn *insn)
2132{
2133 const struct bpf_kfunc_desc desc = {
2134 .imm = insn->imm,
2135 };
2136 const struct bpf_kfunc_desc *res;
2137 struct bpf_kfunc_desc_tab *tab;
2138
2139 tab = prog->aux->kfunc_tab;
2140 res = bsearch(&desc, tab->descs, tab->nr_descs,
2141 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm);
2142
2143 return res ? &res->func_model : NULL;
2144}
2145
2146static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
cc8b0b92 2147{
9c8105bd 2148 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92 2149 struct bpf_insn *insn = env->prog->insnsi;
e6ac2450 2150 int i, ret, insn_cnt = env->prog->len;
cc8b0b92 2151
f910cefa
JW
2152 /* Add entry function. */
2153 ret = add_subprog(env, 0);
e6ac2450 2154 if (ret)
f910cefa
JW
2155 return ret;
2156
e6ac2450
MKL
2157 for (i = 0; i < insn_cnt; i++, insn++) {
2158 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) &&
2159 !bpf_pseudo_kfunc_call(insn))
cc8b0b92 2160 continue;
e6ac2450 2161
2c78ee89 2162 if (!env->bpf_capable) {
e6ac2450 2163 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n");
cc8b0b92
AS
2164 return -EPERM;
2165 }
e6ac2450 2166
3990ed4c 2167 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
e6ac2450 2168 ret = add_subprog(env, i + insn->imm + 1);
3990ed4c 2169 else
2357672c 2170 ret = add_kfunc_call(env, insn->imm, insn->off);
e6ac2450 2171
cc8b0b92
AS
2172 if (ret < 0)
2173 return ret;
2174 }
2175
4cb3d99c
JW
2176 /* Add a fake 'exit' subprog which could simplify subprog iteration
2177 * logic. 'subprog_cnt' should not be increased.
2178 */
2179 subprog[env->subprog_cnt].start = insn_cnt;
2180
06ee7115 2181 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 2182 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 2183 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92 2184
e6ac2450
MKL
2185 return 0;
2186}
2187
2188static int check_subprogs(struct bpf_verifier_env *env)
2189{
2190 int i, subprog_start, subprog_end, off, cur_subprog = 0;
2191 struct bpf_subprog_info *subprog = env->subprog_info;
2192 struct bpf_insn *insn = env->prog->insnsi;
2193 int insn_cnt = env->prog->len;
2194
cc8b0b92 2195 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
2196 subprog_start = subprog[cur_subprog].start;
2197 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2198 for (i = 0; i < insn_cnt; i++) {
2199 u8 code = insn[i].code;
2200
7f6e4312
MF
2201 if (code == (BPF_JMP | BPF_CALL) &&
2202 insn[i].imm == BPF_FUNC_tail_call &&
2203 insn[i].src_reg != BPF_PSEUDO_CALL)
2204 subprog[cur_subprog].has_tail_call = true;
09b28d76
AS
2205 if (BPF_CLASS(code) == BPF_LD &&
2206 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
2207 subprog[cur_subprog].has_ld_abs = true;
092ed096 2208 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
2209 goto next;
2210 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
2211 goto next;
2212 off = i + insn[i].off + 1;
2213 if (off < subprog_start || off >= subprog_end) {
2214 verbose(env, "jump out of range from insn %d to %d\n", i, off);
2215 return -EINVAL;
2216 }
2217next:
2218 if (i == subprog_end - 1) {
2219 /* to avoid fall-through from one subprog into another
2220 * the last insn of the subprog should be either exit
2221 * or unconditional jump back
2222 */
2223 if (code != (BPF_JMP | BPF_EXIT) &&
2224 code != (BPF_JMP | BPF_JA)) {
2225 verbose(env, "last insn is not an exit or jmp\n");
2226 return -EINVAL;
2227 }
2228 subprog_start = subprog_end;
4cb3d99c
JW
2229 cur_subprog++;
2230 if (cur_subprog < env->subprog_cnt)
9c8105bd 2231 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
2232 }
2233 }
2234 return 0;
2235}
2236
679c782d
EC
2237/* Parentage chain of this register (or stack slot) should take care of all
2238 * issues like callee-saved registers, stack slot allocation time, etc.
2239 */
f4d7e40a 2240static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 2241 const struct bpf_reg_state *state,
5327ed3d 2242 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
2243{
2244 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 2245 int cnt = 0;
dc503a8a
EC
2246
2247 while (parent) {
2248 /* if read wasn't screened by an earlier write ... */
679c782d 2249 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 2250 break;
9242b5f5
AS
2251 if (parent->live & REG_LIVE_DONE) {
2252 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
c25b2ae1 2253 reg_type_str(env, parent->type),
9242b5f5
AS
2254 parent->var_off.value, parent->off);
2255 return -EFAULT;
2256 }
5327ed3d
JW
2257 /* The first condition is more likely to be true than the
2258 * second, checked it first.
2259 */
2260 if ((parent->live & REG_LIVE_READ) == flag ||
2261 parent->live & REG_LIVE_READ64)
25af32da
AS
2262 /* The parentage chain never changes and
2263 * this parent was already marked as LIVE_READ.
2264 * There is no need to keep walking the chain again and
2265 * keep re-marking all parents as LIVE_READ.
2266 * This case happens when the same register is read
2267 * multiple times without writes into it in-between.
5327ed3d
JW
2268 * Also, if parent has the stronger REG_LIVE_READ64 set,
2269 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
2270 */
2271 break;
dc503a8a 2272 /* ... then we depend on parent's value */
5327ed3d
JW
2273 parent->live |= flag;
2274 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
2275 if (flag == REG_LIVE_READ64)
2276 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
2277 state = parent;
2278 parent = state->parent;
f4d7e40a 2279 writes = true;
06ee7115 2280 cnt++;
dc503a8a 2281 }
06ee7115
AS
2282
2283 if (env->longest_mark_read_walk < cnt)
2284 env->longest_mark_read_walk = cnt;
f4d7e40a 2285 return 0;
dc503a8a
EC
2286}
2287
5327ed3d
JW
2288/* This function is supposed to be used by the following 32-bit optimization
2289 * code only. It returns TRUE if the source or destination register operates
2290 * on 64-bit, otherwise return FALSE.
2291 */
2292static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
2293 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
2294{
2295 u8 code, class, op;
2296
2297 code = insn->code;
2298 class = BPF_CLASS(code);
2299 op = BPF_OP(code);
2300 if (class == BPF_JMP) {
2301 /* BPF_EXIT for "main" will reach here. Return TRUE
2302 * conservatively.
2303 */
2304 if (op == BPF_EXIT)
2305 return true;
2306 if (op == BPF_CALL) {
2307 /* BPF to BPF call will reach here because of marking
2308 * caller saved clobber with DST_OP_NO_MARK for which we
2309 * don't care the register def because they are anyway
2310 * marked as NOT_INIT already.
2311 */
2312 if (insn->src_reg == BPF_PSEUDO_CALL)
2313 return false;
2314 /* Helper call will reach here because of arg type
2315 * check, conservatively return TRUE.
2316 */
2317 if (t == SRC_OP)
2318 return true;
2319
2320 return false;
2321 }
2322 }
2323
2324 if (class == BPF_ALU64 || class == BPF_JMP ||
2325 /* BPF_END always use BPF_ALU class. */
2326 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
2327 return true;
2328
2329 if (class == BPF_ALU || class == BPF_JMP32)
2330 return false;
2331
2332 if (class == BPF_LDX) {
2333 if (t != SRC_OP)
2334 return BPF_SIZE(code) == BPF_DW;
2335 /* LDX source must be ptr. */
2336 return true;
2337 }
2338
2339 if (class == BPF_STX) {
83a28819
IL
2340 /* BPF_STX (including atomic variants) has multiple source
2341 * operands, one of which is a ptr. Check whether the caller is
2342 * asking about it.
2343 */
2344 if (t == SRC_OP && reg->type != SCALAR_VALUE)
5327ed3d
JW
2345 return true;
2346 return BPF_SIZE(code) == BPF_DW;
2347 }
2348
2349 if (class == BPF_LD) {
2350 u8 mode = BPF_MODE(code);
2351
2352 /* LD_IMM64 */
2353 if (mode == BPF_IMM)
2354 return true;
2355
2356 /* Both LD_IND and LD_ABS return 32-bit data. */
2357 if (t != SRC_OP)
2358 return false;
2359
2360 /* Implicit ctx ptr. */
2361 if (regno == BPF_REG_6)
2362 return true;
2363
2364 /* Explicit source could be any width. */
2365 return true;
2366 }
2367
2368 if (class == BPF_ST)
2369 /* The only source register for BPF_ST is a ptr. */
2370 return true;
2371
2372 /* Conservatively return true at default. */
2373 return true;
2374}
2375
83a28819
IL
2376/* Return the regno defined by the insn, or -1. */
2377static int insn_def_regno(const struct bpf_insn *insn)
b325fbca 2378{
83a28819
IL
2379 switch (BPF_CLASS(insn->code)) {
2380 case BPF_JMP:
2381 case BPF_JMP32:
2382 case BPF_ST:
2383 return -1;
2384 case BPF_STX:
2385 if (BPF_MODE(insn->code) == BPF_ATOMIC &&
2386 (insn->imm & BPF_FETCH)) {
2387 if (insn->imm == BPF_CMPXCHG)
2388 return BPF_REG_0;
2389 else
2390 return insn->src_reg;
2391 } else {
2392 return -1;
2393 }
2394 default:
2395 return insn->dst_reg;
2396 }
b325fbca
JW
2397}
2398
2399/* Return TRUE if INSN has defined any 32-bit value explicitly. */
2400static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
2401{
83a28819
IL
2402 int dst_reg = insn_def_regno(insn);
2403
2404 if (dst_reg == -1)
b325fbca
JW
2405 return false;
2406
83a28819 2407 return !is_reg64(env, insn, dst_reg, NULL, DST_OP);
b325fbca
JW
2408}
2409
5327ed3d
JW
2410static void mark_insn_zext(struct bpf_verifier_env *env,
2411 struct bpf_reg_state *reg)
2412{
2413 s32 def_idx = reg->subreg_def;
2414
2415 if (def_idx == DEF_NOT_SUBREG)
2416 return;
2417
2418 env->insn_aux_data[def_idx - 1].zext_dst = true;
2419 /* The dst will be zero extended, so won't be sub-register anymore. */
2420 reg->subreg_def = DEF_NOT_SUBREG;
2421}
2422
dc503a8a 2423static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
2424 enum reg_arg_type t)
2425{
f4d7e40a
AS
2426 struct bpf_verifier_state *vstate = env->cur_state;
2427 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 2428 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 2429 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 2430 bool rw64;
dc503a8a 2431
17a52670 2432 if (regno >= MAX_BPF_REG) {
61bd5218 2433 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
2434 return -EINVAL;
2435 }
2436
0f55f9ed
CL
2437 mark_reg_scratched(env, regno);
2438
c342dc10 2439 reg = &regs[regno];
5327ed3d 2440 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
2441 if (t == SRC_OP) {
2442 /* check whether register used as source operand can be read */
c342dc10 2443 if (reg->type == NOT_INIT) {
61bd5218 2444 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
2445 return -EACCES;
2446 }
679c782d 2447 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
2448 if (regno == BPF_REG_FP)
2449 return 0;
2450
5327ed3d
JW
2451 if (rw64)
2452 mark_insn_zext(env, reg);
2453
2454 return mark_reg_read(env, reg, reg->parent,
2455 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
2456 } else {
2457 /* check whether register used as dest operand can be written to */
2458 if (regno == BPF_REG_FP) {
61bd5218 2459 verbose(env, "frame pointer is read only\n");
17a52670
AS
2460 return -EACCES;
2461 }
c342dc10 2462 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 2463 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 2464 if (t == DST_OP)
61bd5218 2465 mark_reg_unknown(env, regs, regno);
17a52670
AS
2466 }
2467 return 0;
2468}
2469
b5dc0163
AS
2470/* for any branch, call, exit record the history of jmps in the given state */
2471static int push_jmp_history(struct bpf_verifier_env *env,
2472 struct bpf_verifier_state *cur)
2473{
2474 u32 cnt = cur->jmp_history_cnt;
2475 struct bpf_idx_pair *p;
2476
2477 cnt++;
2478 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
2479 if (!p)
2480 return -ENOMEM;
2481 p[cnt - 1].idx = env->insn_idx;
2482 p[cnt - 1].prev_idx = env->prev_insn_idx;
2483 cur->jmp_history = p;
2484 cur->jmp_history_cnt = cnt;
2485 return 0;
2486}
2487
2488/* Backtrack one insn at a time. If idx is not at the top of recorded
2489 * history then previous instruction came from straight line execution.
2490 */
2491static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
2492 u32 *history)
2493{
2494 u32 cnt = *history;
2495
2496 if (cnt && st->jmp_history[cnt - 1].idx == i) {
2497 i = st->jmp_history[cnt - 1].prev_idx;
2498 (*history)--;
2499 } else {
2500 i--;
2501 }
2502 return i;
2503}
2504
e6ac2450
MKL
2505static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
2506{
2507 const struct btf_type *func;
2357672c 2508 struct btf *desc_btf;
e6ac2450
MKL
2509
2510 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
2511 return NULL;
2512
43bf0878 2513 desc_btf = find_kfunc_desc_btf(data, insn->off);
2357672c
KKD
2514 if (IS_ERR(desc_btf))
2515 return "<error>";
2516
2517 func = btf_type_by_id(desc_btf, insn->imm);
2518 return btf_name_by_offset(desc_btf, func->name_off);
e6ac2450
MKL
2519}
2520
b5dc0163
AS
2521/* For given verifier state backtrack_insn() is called from the last insn to
2522 * the first insn. Its purpose is to compute a bitmask of registers and
2523 * stack slots that needs precision in the parent verifier state.
2524 */
2525static int backtrack_insn(struct bpf_verifier_env *env, int idx,
2526 u32 *reg_mask, u64 *stack_mask)
2527{
2528 const struct bpf_insn_cbs cbs = {
e6ac2450 2529 .cb_call = disasm_kfunc_name,
b5dc0163
AS
2530 .cb_print = verbose,
2531 .private_data = env,
2532 };
2533 struct bpf_insn *insn = env->prog->insnsi + idx;
2534 u8 class = BPF_CLASS(insn->code);
2535 u8 opcode = BPF_OP(insn->code);
2536 u8 mode = BPF_MODE(insn->code);
2537 u32 dreg = 1u << insn->dst_reg;
2538 u32 sreg = 1u << insn->src_reg;
2539 u32 spi;
2540
2541 if (insn->code == 0)
2542 return 0;
496f3324 2543 if (env->log.level & BPF_LOG_LEVEL2) {
b5dc0163
AS
2544 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
2545 verbose(env, "%d: ", idx);
2546 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2547 }
2548
2549 if (class == BPF_ALU || class == BPF_ALU64) {
2550 if (!(*reg_mask & dreg))
2551 return 0;
2552 if (opcode == BPF_MOV) {
2553 if (BPF_SRC(insn->code) == BPF_X) {
2554 /* dreg = sreg
2555 * dreg needs precision after this insn
2556 * sreg needs precision before this insn
2557 */
2558 *reg_mask &= ~dreg;
2559 *reg_mask |= sreg;
2560 } else {
2561 /* dreg = K
2562 * dreg needs precision after this insn.
2563 * Corresponding register is already marked
2564 * as precise=true in this verifier state.
2565 * No further markings in parent are necessary
2566 */
2567 *reg_mask &= ~dreg;
2568 }
2569 } else {
2570 if (BPF_SRC(insn->code) == BPF_X) {
2571 /* dreg += sreg
2572 * both dreg and sreg need precision
2573 * before this insn
2574 */
2575 *reg_mask |= sreg;
2576 } /* else dreg += K
2577 * dreg still needs precision before this insn
2578 */
2579 }
2580 } else if (class == BPF_LDX) {
2581 if (!(*reg_mask & dreg))
2582 return 0;
2583 *reg_mask &= ~dreg;
2584
2585 /* scalars can only be spilled into stack w/o losing precision.
2586 * Load from any other memory can be zero extended.
2587 * The desire to keep that precision is already indicated
2588 * by 'precise' mark in corresponding register of this state.
2589 * No further tracking necessary.
2590 */
2591 if (insn->src_reg != BPF_REG_FP)
2592 return 0;
b5dc0163
AS
2593
2594 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
2595 * that [fp - off] slot contains scalar that needs to be
2596 * tracked with precision
2597 */
2598 spi = (-insn->off - 1) / BPF_REG_SIZE;
2599 if (spi >= 64) {
2600 verbose(env, "BUG spi %d\n", spi);
2601 WARN_ONCE(1, "verifier backtracking bug");
2602 return -EFAULT;
2603 }
2604 *stack_mask |= 1ull << spi;
b3b50f05 2605 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 2606 if (*reg_mask & dreg)
b3b50f05 2607 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
2608 * to access memory. It means backtracking
2609 * encountered a case of pointer subtraction.
2610 */
2611 return -ENOTSUPP;
2612 /* scalars can only be spilled into stack */
2613 if (insn->dst_reg != BPF_REG_FP)
2614 return 0;
b5dc0163
AS
2615 spi = (-insn->off - 1) / BPF_REG_SIZE;
2616 if (spi >= 64) {
2617 verbose(env, "BUG spi %d\n", spi);
2618 WARN_ONCE(1, "verifier backtracking bug");
2619 return -EFAULT;
2620 }
2621 if (!(*stack_mask & (1ull << spi)))
2622 return 0;
2623 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
2624 if (class == BPF_STX)
2625 *reg_mask |= sreg;
b5dc0163
AS
2626 } else if (class == BPF_JMP || class == BPF_JMP32) {
2627 if (opcode == BPF_CALL) {
2628 if (insn->src_reg == BPF_PSEUDO_CALL)
2629 return -ENOTSUPP;
2630 /* regular helper call sets R0 */
2631 *reg_mask &= ~1;
2632 if (*reg_mask & 0x3f) {
2633 /* if backtracing was looking for registers R1-R5
2634 * they should have been found already.
2635 */
2636 verbose(env, "BUG regs %x\n", *reg_mask);
2637 WARN_ONCE(1, "verifier backtracking bug");
2638 return -EFAULT;
2639 }
2640 } else if (opcode == BPF_EXIT) {
2641 return -ENOTSUPP;
2642 }
2643 } else if (class == BPF_LD) {
2644 if (!(*reg_mask & dreg))
2645 return 0;
2646 *reg_mask &= ~dreg;
2647 /* It's ld_imm64 or ld_abs or ld_ind.
2648 * For ld_imm64 no further tracking of precision
2649 * into parent is necessary
2650 */
2651 if (mode == BPF_IND || mode == BPF_ABS)
2652 /* to be analyzed */
2653 return -ENOTSUPP;
b5dc0163
AS
2654 }
2655 return 0;
2656}
2657
2658/* the scalar precision tracking algorithm:
2659 * . at the start all registers have precise=false.
2660 * . scalar ranges are tracked as normal through alu and jmp insns.
2661 * . once precise value of the scalar register is used in:
2662 * . ptr + scalar alu
2663 * . if (scalar cond K|scalar)
2664 * . helper_call(.., scalar, ...) where ARG_CONST is expected
2665 * backtrack through the verifier states and mark all registers and
2666 * stack slots with spilled constants that these scalar regisers
2667 * should be precise.
2668 * . during state pruning two registers (or spilled stack slots)
2669 * are equivalent if both are not precise.
2670 *
2671 * Note the verifier cannot simply walk register parentage chain,
2672 * since many different registers and stack slots could have been
2673 * used to compute single precise scalar.
2674 *
2675 * The approach of starting with precise=true for all registers and then
2676 * backtrack to mark a register as not precise when the verifier detects
2677 * that program doesn't care about specific value (e.g., when helper
2678 * takes register as ARG_ANYTHING parameter) is not safe.
2679 *
2680 * It's ok to walk single parentage chain of the verifier states.
2681 * It's possible that this backtracking will go all the way till 1st insn.
2682 * All other branches will be explored for needing precision later.
2683 *
2684 * The backtracking needs to deal with cases like:
2685 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
2686 * r9 -= r8
2687 * r5 = r9
2688 * if r5 > 0x79f goto pc+7
2689 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
2690 * r5 += 1
2691 * ...
2692 * call bpf_perf_event_output#25
2693 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
2694 *
2695 * and this case:
2696 * r6 = 1
2697 * call foo // uses callee's r6 inside to compute r0
2698 * r0 += r6
2699 * if r0 == 0 goto
2700 *
2701 * to track above reg_mask/stack_mask needs to be independent for each frame.
2702 *
2703 * Also if parent's curframe > frame where backtracking started,
2704 * the verifier need to mark registers in both frames, otherwise callees
2705 * may incorrectly prune callers. This is similar to
2706 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
2707 *
2708 * For now backtracking falls back into conservative marking.
2709 */
2710static void mark_all_scalars_precise(struct bpf_verifier_env *env,
2711 struct bpf_verifier_state *st)
2712{
2713 struct bpf_func_state *func;
2714 struct bpf_reg_state *reg;
2715 int i, j;
2716
2717 /* big hammer: mark all scalars precise in this path.
2718 * pop_stack may still get !precise scalars.
2719 */
2720 for (; st; st = st->parent)
2721 for (i = 0; i <= st->curframe; i++) {
2722 func = st->frame[i];
2723 for (j = 0; j < BPF_REG_FP; j++) {
2724 reg = &func->regs[j];
2725 if (reg->type != SCALAR_VALUE)
2726 continue;
2727 reg->precise = true;
2728 }
2729 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
27113c59 2730 if (!is_spilled_reg(&func->stack[j]))
b5dc0163
AS
2731 continue;
2732 reg = &func->stack[j].spilled_ptr;
2733 if (reg->type != SCALAR_VALUE)
2734 continue;
2735 reg->precise = true;
2736 }
2737 }
2738}
2739
a3ce685d
AS
2740static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
2741 int spi)
b5dc0163
AS
2742{
2743 struct bpf_verifier_state *st = env->cur_state;
2744 int first_idx = st->first_insn_idx;
2745 int last_idx = env->insn_idx;
2746 struct bpf_func_state *func;
2747 struct bpf_reg_state *reg;
a3ce685d
AS
2748 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
2749 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 2750 bool skip_first = true;
a3ce685d 2751 bool new_marks = false;
b5dc0163
AS
2752 int i, err;
2753
2c78ee89 2754 if (!env->bpf_capable)
b5dc0163
AS
2755 return 0;
2756
2757 func = st->frame[st->curframe];
a3ce685d
AS
2758 if (regno >= 0) {
2759 reg = &func->regs[regno];
2760 if (reg->type != SCALAR_VALUE) {
2761 WARN_ONCE(1, "backtracing misuse");
2762 return -EFAULT;
2763 }
2764 if (!reg->precise)
2765 new_marks = true;
2766 else
2767 reg_mask = 0;
2768 reg->precise = true;
b5dc0163 2769 }
b5dc0163 2770
a3ce685d 2771 while (spi >= 0) {
27113c59 2772 if (!is_spilled_reg(&func->stack[spi])) {
a3ce685d
AS
2773 stack_mask = 0;
2774 break;
2775 }
2776 reg = &func->stack[spi].spilled_ptr;
2777 if (reg->type != SCALAR_VALUE) {
2778 stack_mask = 0;
2779 break;
2780 }
2781 if (!reg->precise)
2782 new_marks = true;
2783 else
2784 stack_mask = 0;
2785 reg->precise = true;
2786 break;
2787 }
2788
2789 if (!new_marks)
2790 return 0;
2791 if (!reg_mask && !stack_mask)
2792 return 0;
b5dc0163
AS
2793 for (;;) {
2794 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
2795 u32 history = st->jmp_history_cnt;
2796
496f3324 2797 if (env->log.level & BPF_LOG_LEVEL2)
b5dc0163
AS
2798 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
2799 for (i = last_idx;;) {
2800 if (skip_first) {
2801 err = 0;
2802 skip_first = false;
2803 } else {
2804 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
2805 }
2806 if (err == -ENOTSUPP) {
2807 mark_all_scalars_precise(env, st);
2808 return 0;
2809 } else if (err) {
2810 return err;
2811 }
2812 if (!reg_mask && !stack_mask)
2813 /* Found assignment(s) into tracked register in this state.
2814 * Since this state is already marked, just return.
2815 * Nothing to be tracked further in the parent state.
2816 */
2817 return 0;
2818 if (i == first_idx)
2819 break;
2820 i = get_prev_insn_idx(st, i, &history);
2821 if (i >= env->prog->len) {
2822 /* This can happen if backtracking reached insn 0
2823 * and there are still reg_mask or stack_mask
2824 * to backtrack.
2825 * It means the backtracking missed the spot where
2826 * particular register was initialized with a constant.
2827 */
2828 verbose(env, "BUG backtracking idx %d\n", i);
2829 WARN_ONCE(1, "verifier backtracking bug");
2830 return -EFAULT;
2831 }
2832 }
2833 st = st->parent;
2834 if (!st)
2835 break;
2836
a3ce685d 2837 new_marks = false;
b5dc0163
AS
2838 func = st->frame[st->curframe];
2839 bitmap_from_u64(mask, reg_mask);
2840 for_each_set_bit(i, mask, 32) {
2841 reg = &func->regs[i];
a3ce685d
AS
2842 if (reg->type != SCALAR_VALUE) {
2843 reg_mask &= ~(1u << i);
b5dc0163 2844 continue;
a3ce685d 2845 }
b5dc0163
AS
2846 if (!reg->precise)
2847 new_marks = true;
2848 reg->precise = true;
2849 }
2850
2851 bitmap_from_u64(mask, stack_mask);
2852 for_each_set_bit(i, mask, 64) {
2853 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
2854 /* the sequence of instructions:
2855 * 2: (bf) r3 = r10
2856 * 3: (7b) *(u64 *)(r3 -8) = r0
2857 * 4: (79) r4 = *(u64 *)(r10 -8)
2858 * doesn't contain jmps. It's backtracked
2859 * as a single block.
2860 * During backtracking insn 3 is not recognized as
2861 * stack access, so at the end of backtracking
2862 * stack slot fp-8 is still marked in stack_mask.
2863 * However the parent state may not have accessed
2864 * fp-8 and it's "unallocated" stack space.
2865 * In such case fallback to conservative.
b5dc0163 2866 */
2339cd6c
AS
2867 mark_all_scalars_precise(env, st);
2868 return 0;
b5dc0163
AS
2869 }
2870
27113c59 2871 if (!is_spilled_reg(&func->stack[i])) {
a3ce685d 2872 stack_mask &= ~(1ull << i);
b5dc0163 2873 continue;
a3ce685d 2874 }
b5dc0163 2875 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
2876 if (reg->type != SCALAR_VALUE) {
2877 stack_mask &= ~(1ull << i);
b5dc0163 2878 continue;
a3ce685d 2879 }
b5dc0163
AS
2880 if (!reg->precise)
2881 new_marks = true;
2882 reg->precise = true;
2883 }
496f3324 2884 if (env->log.level & BPF_LOG_LEVEL2) {
2e576648 2885 verbose(env, "parent %s regs=%x stack=%llx marks:",
b5dc0163
AS
2886 new_marks ? "didn't have" : "already had",
2887 reg_mask, stack_mask);
2e576648 2888 print_verifier_state(env, func, true);
b5dc0163
AS
2889 }
2890
a3ce685d
AS
2891 if (!reg_mask && !stack_mask)
2892 break;
b5dc0163
AS
2893 if (!new_marks)
2894 break;
2895
2896 last_idx = st->last_insn_idx;
2897 first_idx = st->first_insn_idx;
2898 }
2899 return 0;
2900}
2901
a3ce685d
AS
2902static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
2903{
2904 return __mark_chain_precision(env, regno, -1);
2905}
2906
2907static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
2908{
2909 return __mark_chain_precision(env, -1, spi);
2910}
b5dc0163 2911
1be7f75d
AS
2912static bool is_spillable_regtype(enum bpf_reg_type type)
2913{
c25b2ae1 2914 switch (base_type(type)) {
1be7f75d 2915 case PTR_TO_MAP_VALUE:
1be7f75d
AS
2916 case PTR_TO_STACK:
2917 case PTR_TO_CTX:
969bf05e 2918 case PTR_TO_PACKET:
de8f3a83 2919 case PTR_TO_PACKET_META:
969bf05e 2920 case PTR_TO_PACKET_END:
d58e468b 2921 case PTR_TO_FLOW_KEYS:
1be7f75d 2922 case CONST_PTR_TO_MAP:
c64b7983 2923 case PTR_TO_SOCKET:
46f8bc92 2924 case PTR_TO_SOCK_COMMON:
655a51e5 2925 case PTR_TO_TCP_SOCK:
fada7fdc 2926 case PTR_TO_XDP_SOCK:
65726b5b 2927 case PTR_TO_BTF_ID:
20b2aff4 2928 case PTR_TO_BUF:
744ea4e3 2929 case PTR_TO_MEM:
69c087ba
YS
2930 case PTR_TO_FUNC:
2931 case PTR_TO_MAP_KEY:
1be7f75d
AS
2932 return true;
2933 default:
2934 return false;
2935 }
2936}
2937
cc2b14d5
AS
2938/* Does this register contain a constant zero? */
2939static bool register_is_null(struct bpf_reg_state *reg)
2940{
2941 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
2942}
2943
f7cf25b2
AS
2944static bool register_is_const(struct bpf_reg_state *reg)
2945{
2946 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
2947}
2948
5689d49b
YS
2949static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
2950{
2951 return tnum_is_unknown(reg->var_off) &&
2952 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
2953 reg->umin_value == 0 && reg->umax_value == U64_MAX &&
2954 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
2955 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
2956}
2957
2958static bool register_is_bounded(struct bpf_reg_state *reg)
2959{
2960 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
2961}
2962
6e7e63cb
JH
2963static bool __is_pointer_value(bool allow_ptr_leaks,
2964 const struct bpf_reg_state *reg)
2965{
2966 if (allow_ptr_leaks)
2967 return false;
2968
2969 return reg->type != SCALAR_VALUE;
2970}
2971
f7cf25b2 2972static void save_register_state(struct bpf_func_state *state,
354e8f19
MKL
2973 int spi, struct bpf_reg_state *reg,
2974 int size)
f7cf25b2
AS
2975{
2976 int i;
2977
2978 state->stack[spi].spilled_ptr = *reg;
354e8f19
MKL
2979 if (size == BPF_REG_SIZE)
2980 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
f7cf25b2 2981
354e8f19
MKL
2982 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--)
2983 state->stack[spi].slot_type[i - 1] = STACK_SPILL;
f7cf25b2 2984
354e8f19
MKL
2985 /* size < 8 bytes spill */
2986 for (; i; i--)
2987 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
f7cf25b2
AS
2988}
2989
01f810ac 2990/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
17a52670
AS
2991 * stack boundary and alignment are checked in check_mem_access()
2992 */
01f810ac
AM
2993static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
2994 /* stack frame we're writing to */
2995 struct bpf_func_state *state,
2996 int off, int size, int value_regno,
2997 int insn_idx)
17a52670 2998{
f4d7e40a 2999 struct bpf_func_state *cur; /* state of the current function */
638f5b90 3000 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 3001 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 3002 struct bpf_reg_state *reg = NULL;
638f5b90 3003
c69431aa 3004 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
638f5b90
AS
3005 if (err)
3006 return err;
9c399760
AS
3007 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
3008 * so it's aligned access and [off, off + size) are within stack limits
3009 */
638f5b90
AS
3010 if (!env->allow_ptr_leaks &&
3011 state->stack[spi].slot_type[0] == STACK_SPILL &&
3012 size != BPF_REG_SIZE) {
3013 verbose(env, "attempt to corrupt spilled pointer on stack\n");
3014 return -EACCES;
3015 }
17a52670 3016
f4d7e40a 3017 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
3018 if (value_regno >= 0)
3019 reg = &cur->regs[value_regno];
2039f26f
DB
3020 if (!env->bypass_spec_v4) {
3021 bool sanitize = reg && is_spillable_regtype(reg->type);
3022
3023 for (i = 0; i < size; i++) {
3024 if (state->stack[spi].slot_type[i] == STACK_INVALID) {
3025 sanitize = true;
3026 break;
3027 }
3028 }
3029
3030 if (sanitize)
3031 env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
3032 }
17a52670 3033
0f55f9ed 3034 mark_stack_slot_scratched(env, spi);
354e8f19 3035 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
2c78ee89 3036 !register_is_null(reg) && env->bpf_capable) {
b5dc0163
AS
3037 if (dst_reg != BPF_REG_FP) {
3038 /* The backtracking logic can only recognize explicit
3039 * stack slot address like [fp - 8]. Other spill of
8fb33b60 3040 * scalar via different register has to be conservative.
b5dc0163
AS
3041 * Backtrack from here and mark all registers as precise
3042 * that contributed into 'reg' being a constant.
3043 */
3044 err = mark_chain_precision(env, value_regno);
3045 if (err)
3046 return err;
3047 }
354e8f19 3048 save_register_state(state, spi, reg, size);
f7cf25b2 3049 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 3050 /* register containing pointer is being spilled into stack */
9c399760 3051 if (size != BPF_REG_SIZE) {
f7cf25b2 3052 verbose_linfo(env, insn_idx, "; ");
61bd5218 3053 verbose(env, "invalid size of register spill\n");
17a52670
AS
3054 return -EACCES;
3055 }
f7cf25b2 3056 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
3057 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
3058 return -EINVAL;
3059 }
354e8f19 3060 save_register_state(state, spi, reg, size);
9c399760 3061 } else {
cc2b14d5
AS
3062 u8 type = STACK_MISC;
3063
679c782d
EC
3064 /* regular write of data into stack destroys any spilled ptr */
3065 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d 3066 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
27113c59 3067 if (is_spilled_reg(&state->stack[spi]))
0bae2d4d 3068 for (i = 0; i < BPF_REG_SIZE; i++)
354e8f19 3069 scrub_spilled_slot(&state->stack[spi].slot_type[i]);
9c399760 3070
cc2b14d5
AS
3071 /* only mark the slot as written if all 8 bytes were written
3072 * otherwise read propagation may incorrectly stop too soon
3073 * when stack slots are partially written.
3074 * This heuristic means that read propagation will be
3075 * conservative, since it will add reg_live_read marks
3076 * to stack slots all the way to first state when programs
3077 * writes+reads less than 8 bytes
3078 */
3079 if (size == BPF_REG_SIZE)
3080 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
3081
3082 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
3083 if (reg && register_is_null(reg)) {
3084 /* backtracking doesn't work for STACK_ZERO yet. */
3085 err = mark_chain_precision(env, value_regno);
3086 if (err)
3087 return err;
cc2b14d5 3088 type = STACK_ZERO;
b5dc0163 3089 }
cc2b14d5 3090
0bae2d4d 3091 /* Mark slots affected by this stack write. */
9c399760 3092 for (i = 0; i < size; i++)
638f5b90 3093 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 3094 type;
17a52670
AS
3095 }
3096 return 0;
3097}
3098
01f810ac
AM
3099/* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is
3100 * known to contain a variable offset.
3101 * This function checks whether the write is permitted and conservatively
3102 * tracks the effects of the write, considering that each stack slot in the
3103 * dynamic range is potentially written to.
3104 *
3105 * 'off' includes 'regno->off'.
3106 * 'value_regno' can be -1, meaning that an unknown value is being written to
3107 * the stack.
3108 *
3109 * Spilled pointers in range are not marked as written because we don't know
3110 * what's going to be actually written. This means that read propagation for
3111 * future reads cannot be terminated by this write.
3112 *
3113 * For privileged programs, uninitialized stack slots are considered
3114 * initialized by this write (even though we don't know exactly what offsets
3115 * are going to be written to). The idea is that we don't want the verifier to
3116 * reject future reads that access slots written to through variable offsets.
3117 */
3118static int check_stack_write_var_off(struct bpf_verifier_env *env,
3119 /* func where register points to */
3120 struct bpf_func_state *state,
3121 int ptr_regno, int off, int size,
3122 int value_regno, int insn_idx)
3123{
3124 struct bpf_func_state *cur; /* state of the current function */
3125 int min_off, max_off;
3126 int i, err;
3127 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL;
3128 bool writing_zero = false;
3129 /* set if the fact that we're writing a zero is used to let any
3130 * stack slots remain STACK_ZERO
3131 */
3132 bool zero_used = false;
3133
3134 cur = env->cur_state->frame[env->cur_state->curframe];
3135 ptr_reg = &cur->regs[ptr_regno];
3136 min_off = ptr_reg->smin_value + off;
3137 max_off = ptr_reg->smax_value + off + size;
3138 if (value_regno >= 0)
3139 value_reg = &cur->regs[value_regno];
3140 if (value_reg && register_is_null(value_reg))
3141 writing_zero = true;
3142
c69431aa 3143 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
01f810ac
AM
3144 if (err)
3145 return err;
3146
3147
3148 /* Variable offset writes destroy any spilled pointers in range. */
3149 for (i = min_off; i < max_off; i++) {
3150 u8 new_type, *stype;
3151 int slot, spi;
3152
3153 slot = -i - 1;
3154 spi = slot / BPF_REG_SIZE;
3155 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
0f55f9ed 3156 mark_stack_slot_scratched(env, spi);
01f810ac
AM
3157
3158 if (!env->allow_ptr_leaks
3159 && *stype != NOT_INIT
3160 && *stype != SCALAR_VALUE) {
3161 /* Reject the write if there's are spilled pointers in
3162 * range. If we didn't reject here, the ptr status
3163 * would be erased below (even though not all slots are
3164 * actually overwritten), possibly opening the door to
3165 * leaks.
3166 */
3167 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d",
3168 insn_idx, i);
3169 return -EINVAL;
3170 }
3171
3172 /* Erase all spilled pointers. */
3173 state->stack[spi].spilled_ptr.type = NOT_INIT;
3174
3175 /* Update the slot type. */
3176 new_type = STACK_MISC;
3177 if (writing_zero && *stype == STACK_ZERO) {
3178 new_type = STACK_ZERO;
3179 zero_used = true;
3180 }
3181 /* If the slot is STACK_INVALID, we check whether it's OK to
3182 * pretend that it will be initialized by this write. The slot
3183 * might not actually be written to, and so if we mark it as
3184 * initialized future reads might leak uninitialized memory.
3185 * For privileged programs, we will accept such reads to slots
3186 * that may or may not be written because, if we're reject
3187 * them, the error would be too confusing.
3188 */
3189 if (*stype == STACK_INVALID && !env->allow_uninit_stack) {
3190 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d",
3191 insn_idx, i);
3192 return -EINVAL;
3193 }
3194 *stype = new_type;
3195 }
3196 if (zero_used) {
3197 /* backtracking doesn't work for STACK_ZERO yet. */
3198 err = mark_chain_precision(env, value_regno);
3199 if (err)
3200 return err;
3201 }
3202 return 0;
3203}
3204
3205/* When register 'dst_regno' is assigned some values from stack[min_off,
3206 * max_off), we set the register's type according to the types of the
3207 * respective stack slots. If all the stack values are known to be zeros, then
3208 * so is the destination reg. Otherwise, the register is considered to be
3209 * SCALAR. This function does not deal with register filling; the caller must
3210 * ensure that all spilled registers in the stack range have been marked as
3211 * read.
3212 */
3213static void mark_reg_stack_read(struct bpf_verifier_env *env,
3214 /* func where src register points to */
3215 struct bpf_func_state *ptr_state,
3216 int min_off, int max_off, int dst_regno)
3217{
3218 struct bpf_verifier_state *vstate = env->cur_state;
3219 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3220 int i, slot, spi;
3221 u8 *stype;
3222 int zeros = 0;
3223
3224 for (i = min_off; i < max_off; i++) {
3225 slot = -i - 1;
3226 spi = slot / BPF_REG_SIZE;
3227 stype = ptr_state->stack[spi].slot_type;
3228 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO)
3229 break;
3230 zeros++;
3231 }
3232 if (zeros == max_off - min_off) {
3233 /* any access_size read into register is zero extended,
3234 * so the whole register == const_zero
3235 */
3236 __mark_reg_const_zero(&state->regs[dst_regno]);
3237 /* backtracking doesn't support STACK_ZERO yet,
3238 * so mark it precise here, so that later
3239 * backtracking can stop here.
3240 * Backtracking may not need this if this register
3241 * doesn't participate in pointer adjustment.
3242 * Forward propagation of precise flag is not
3243 * necessary either. This mark is only to stop
3244 * backtracking. Any register that contributed
3245 * to const 0 was marked precise before spill.
3246 */
3247 state->regs[dst_regno].precise = true;
3248 } else {
3249 /* have read misc data from the stack */
3250 mark_reg_unknown(env, state->regs, dst_regno);
3251 }
3252 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
3253}
3254
3255/* Read the stack at 'off' and put the results into the register indicated by
3256 * 'dst_regno'. It handles reg filling if the addressed stack slot is a
3257 * spilled reg.
3258 *
3259 * 'dst_regno' can be -1, meaning that the read value is not going to a
3260 * register.
3261 *
3262 * The access is assumed to be within the current stack bounds.
3263 */
3264static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
3265 /* func where src register points to */
3266 struct bpf_func_state *reg_state,
3267 int off, int size, int dst_regno)
17a52670 3268{
f4d7e40a
AS
3269 struct bpf_verifier_state *vstate = env->cur_state;
3270 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 3271 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 3272 struct bpf_reg_state *reg;
354e8f19 3273 u8 *stype, type;
17a52670 3274
f4d7e40a 3275 stype = reg_state->stack[spi].slot_type;
f7cf25b2 3276 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 3277
27113c59 3278 if (is_spilled_reg(&reg_state->stack[spi])) {
f30d4968
MKL
3279 u8 spill_size = 1;
3280
3281 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--)
3282 spill_size++;
354e8f19 3283
f30d4968 3284 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) {
f7cf25b2
AS
3285 if (reg->type != SCALAR_VALUE) {
3286 verbose_linfo(env, env->insn_idx, "; ");
3287 verbose(env, "invalid size of register fill\n");
3288 return -EACCES;
3289 }
354e8f19
MKL
3290
3291 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
3292 if (dst_regno < 0)
3293 return 0;
3294
f30d4968 3295 if (!(off % BPF_REG_SIZE) && size == spill_size) {
354e8f19
MKL
3296 /* The earlier check_reg_arg() has decided the
3297 * subreg_def for this insn. Save it first.
3298 */
3299 s32 subreg_def = state->regs[dst_regno].subreg_def;
3300
3301 state->regs[dst_regno] = *reg;
3302 state->regs[dst_regno].subreg_def = subreg_def;
3303 } else {
3304 for (i = 0; i < size; i++) {
3305 type = stype[(slot - i) % BPF_REG_SIZE];
3306 if (type == STACK_SPILL)
3307 continue;
3308 if (type == STACK_MISC)
3309 continue;
3310 verbose(env, "invalid read from stack off %d+%d size %d\n",
3311 off, i, size);
3312 return -EACCES;
3313 }
01f810ac 3314 mark_reg_unknown(env, state->regs, dst_regno);
f7cf25b2 3315 }
354e8f19 3316 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
f7cf25b2 3317 return 0;
17a52670 3318 }
17a52670 3319
01f810ac 3320 if (dst_regno >= 0) {
17a52670 3321 /* restore register state from stack */
01f810ac 3322 state->regs[dst_regno] = *reg;
2f18f62e
AS
3323 /* mark reg as written since spilled pointer state likely
3324 * has its liveness marks cleared by is_state_visited()
3325 * which resets stack/reg liveness for state transitions
3326 */
01f810ac 3327 state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
6e7e63cb 3328 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
01f810ac 3329 /* If dst_regno==-1, the caller is asking us whether
6e7e63cb
JH
3330 * it is acceptable to use this value as a SCALAR_VALUE
3331 * (e.g. for XADD).
3332 * We must not allow unprivileged callers to do that
3333 * with spilled pointers.
3334 */
3335 verbose(env, "leaking pointer from stack off %d\n",
3336 off);
3337 return -EACCES;
dc503a8a 3338 }
f7cf25b2 3339 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670
AS
3340 } else {
3341 for (i = 0; i < size; i++) {
01f810ac
AM
3342 type = stype[(slot - i) % BPF_REG_SIZE];
3343 if (type == STACK_MISC)
cc2b14d5 3344 continue;
01f810ac 3345 if (type == STACK_ZERO)
cc2b14d5 3346 continue;
cc2b14d5
AS
3347 verbose(env, "invalid read from stack off %d+%d size %d\n",
3348 off, i, size);
3349 return -EACCES;
3350 }
f7cf25b2 3351 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
01f810ac
AM
3352 if (dst_regno >= 0)
3353 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
17a52670 3354 }
f7cf25b2 3355 return 0;
17a52670
AS
3356}
3357
61df10c7 3358enum bpf_access_src {
01f810ac
AM
3359 ACCESS_DIRECT = 1, /* the access is performed by an instruction */
3360 ACCESS_HELPER = 2, /* the access is performed by a helper */
3361};
3362
3363static int check_stack_range_initialized(struct bpf_verifier_env *env,
3364 int regno, int off, int access_size,
3365 bool zero_size_allowed,
61df10c7 3366 enum bpf_access_src type,
01f810ac
AM
3367 struct bpf_call_arg_meta *meta);
3368
3369static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
3370{
3371 return cur_regs(env) + regno;
3372}
3373
3374/* Read the stack at 'ptr_regno + off' and put the result into the register
3375 * 'dst_regno'.
3376 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'),
3377 * but not its variable offset.
3378 * 'size' is assumed to be <= reg size and the access is assumed to be aligned.
3379 *
3380 * As opposed to check_stack_read_fixed_off, this function doesn't deal with
3381 * filling registers (i.e. reads of spilled register cannot be detected when
3382 * the offset is not fixed). We conservatively mark 'dst_regno' as containing
3383 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable
3384 * offset; for a fixed offset check_stack_read_fixed_off should be used
3385 * instead.
3386 */
3387static int check_stack_read_var_off(struct bpf_verifier_env *env,
3388 int ptr_regno, int off, int size, int dst_regno)
e4298d25 3389{
01f810ac
AM
3390 /* The state of the source register. */
3391 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3392 struct bpf_func_state *ptr_state = func(env, reg);
3393 int err;
3394 int min_off, max_off;
3395
3396 /* Note that we pass a NULL meta, so raw access will not be permitted.
e4298d25 3397 */
01f810ac
AM
3398 err = check_stack_range_initialized(env, ptr_regno, off, size,
3399 false, ACCESS_DIRECT, NULL);
3400 if (err)
3401 return err;
3402
3403 min_off = reg->smin_value + off;
3404 max_off = reg->smax_value + off;
3405 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno);
3406 return 0;
3407}
3408
3409/* check_stack_read dispatches to check_stack_read_fixed_off or
3410 * check_stack_read_var_off.
3411 *
3412 * The caller must ensure that the offset falls within the allocated stack
3413 * bounds.
3414 *
3415 * 'dst_regno' is a register which will receive the value from the stack. It
3416 * can be -1, meaning that the read value is not going to a register.
3417 */
3418static int check_stack_read(struct bpf_verifier_env *env,
3419 int ptr_regno, int off, int size,
3420 int dst_regno)
3421{
3422 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3423 struct bpf_func_state *state = func(env, reg);
3424 int err;
3425 /* Some accesses are only permitted with a static offset. */
3426 bool var_off = !tnum_is_const(reg->var_off);
3427
3428 /* The offset is required to be static when reads don't go to a
3429 * register, in order to not leak pointers (see
3430 * check_stack_read_fixed_off).
3431 */
3432 if (dst_regno < 0 && var_off) {
e4298d25
DB
3433 char tn_buf[48];
3434
3435 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac 3436 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n",
e4298d25
DB
3437 tn_buf, off, size);
3438 return -EACCES;
3439 }
01f810ac
AM
3440 /* Variable offset is prohibited for unprivileged mode for simplicity
3441 * since it requires corresponding support in Spectre masking for stack
3442 * ALU. See also retrieve_ptr_limit().
3443 */
3444 if (!env->bypass_spec_v1 && var_off) {
3445 char tn_buf[48];
e4298d25 3446
01f810ac
AM
3447 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3448 verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n",
3449 ptr_regno, tn_buf);
e4298d25
DB
3450 return -EACCES;
3451 }
3452
01f810ac
AM
3453 if (!var_off) {
3454 off += reg->var_off.value;
3455 err = check_stack_read_fixed_off(env, state, off, size,
3456 dst_regno);
3457 } else {
3458 /* Variable offset stack reads need more conservative handling
3459 * than fixed offset ones. Note that dst_regno >= 0 on this
3460 * branch.
3461 */
3462 err = check_stack_read_var_off(env, ptr_regno, off, size,
3463 dst_regno);
3464 }
3465 return err;
3466}
3467
3468
3469/* check_stack_write dispatches to check_stack_write_fixed_off or
3470 * check_stack_write_var_off.
3471 *
3472 * 'ptr_regno' is the register used as a pointer into the stack.
3473 * 'off' includes 'ptr_regno->off', but not its variable offset (if any).
3474 * 'value_regno' is the register whose value we're writing to the stack. It can
3475 * be -1, meaning that we're not writing from a register.
3476 *
3477 * The caller must ensure that the offset falls within the maximum stack size.
3478 */
3479static int check_stack_write(struct bpf_verifier_env *env,
3480 int ptr_regno, int off, int size,
3481 int value_regno, int insn_idx)
3482{
3483 struct bpf_reg_state *reg = reg_state(env, ptr_regno);
3484 struct bpf_func_state *state = func(env, reg);
3485 int err;
3486
3487 if (tnum_is_const(reg->var_off)) {
3488 off += reg->var_off.value;
3489 err = check_stack_write_fixed_off(env, state, off, size,
3490 value_regno, insn_idx);
3491 } else {
3492 /* Variable offset stack reads need more conservative handling
3493 * than fixed offset ones.
3494 */
3495 err = check_stack_write_var_off(env, state,
3496 ptr_regno, off, size,
3497 value_regno, insn_idx);
3498 }
3499 return err;
e4298d25
DB
3500}
3501
591fe988
DB
3502static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
3503 int off, int size, enum bpf_access_type type)
3504{
3505 struct bpf_reg_state *regs = cur_regs(env);
3506 struct bpf_map *map = regs[regno].map_ptr;
3507 u32 cap = bpf_map_flags_to_cap(map);
3508
3509 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
3510 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
3511 map->value_size, off, size);
3512 return -EACCES;
3513 }
3514
3515 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
3516 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
3517 map->value_size, off, size);
3518 return -EACCES;
3519 }
3520
3521 return 0;
3522}
3523
457f4436
AN
3524/* check read/write into memory region (e.g., map value, ringbuf sample, etc) */
3525static int __check_mem_access(struct bpf_verifier_env *env, int regno,
3526 int off, int size, u32 mem_size,
3527 bool zero_size_allowed)
17a52670 3528{
457f4436
AN
3529 bool size_ok = size > 0 || (size == 0 && zero_size_allowed);
3530 struct bpf_reg_state *reg;
3531
3532 if (off >= 0 && size_ok && (u64)off + size <= mem_size)
3533 return 0;
17a52670 3534
457f4436
AN
3535 reg = &cur_regs(env)[regno];
3536 switch (reg->type) {
69c087ba
YS
3537 case PTR_TO_MAP_KEY:
3538 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n",
3539 mem_size, off, size);
3540 break;
457f4436 3541 case PTR_TO_MAP_VALUE:
61bd5218 3542 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
457f4436
AN
3543 mem_size, off, size);
3544 break;
3545 case PTR_TO_PACKET:
3546 case PTR_TO_PACKET_META:
3547 case PTR_TO_PACKET_END:
3548 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
3549 off, size, regno, reg->id, off, mem_size);
3550 break;
3551 case PTR_TO_MEM:
3552 default:
3553 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n",
3554 mem_size, off, size);
17a52670 3555 }
457f4436
AN
3556
3557 return -EACCES;
17a52670
AS
3558}
3559
457f4436
AN
3560/* check read/write into a memory region with possible variable offset */
3561static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno,
3562 int off, int size, u32 mem_size,
3563 bool zero_size_allowed)
dbcfe5f7 3564{
f4d7e40a
AS
3565 struct bpf_verifier_state *vstate = env->cur_state;
3566 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
3567 struct bpf_reg_state *reg = &state->regs[regno];
3568 int err;
3569
457f4436 3570 /* We may have adjusted the register pointing to memory region, so we
f1174f77
EC
3571 * need to try adding each of min_value and max_value to off
3572 * to make sure our theoretical access will be safe.
2e576648
CL
3573 *
3574 * The minimum value is only important with signed
dbcfe5f7
GB
3575 * comparisons where we can't assume the floor of a
3576 * value is 0. If we are using signed variables for our
3577 * index'es we need to make sure that whatever we use
3578 * will have a set floor within our range.
3579 */
b7137c4e
DB
3580 if (reg->smin_value < 0 &&
3581 (reg->smin_value == S64_MIN ||
3582 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
3583 reg->smin_value + off < 0)) {
61bd5218 3584 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
3585 regno);
3586 return -EACCES;
3587 }
457f4436
AN
3588 err = __check_mem_access(env, regno, reg->smin_value + off, size,
3589 mem_size, zero_size_allowed);
dbcfe5f7 3590 if (err) {
457f4436 3591 verbose(env, "R%d min value is outside of the allowed memory range\n",
61bd5218 3592 regno);
dbcfe5f7
GB
3593 return err;
3594 }
3595
b03c9f9f
EC
3596 /* If we haven't set a max value then we need to bail since we can't be
3597 * sure we won't do bad things.
3598 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 3599 */
b03c9f9f 3600 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
457f4436 3601 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n",
dbcfe5f7
GB
3602 regno);
3603 return -EACCES;
3604 }
457f4436
AN
3605 err = __check_mem_access(env, regno, reg->umax_value + off, size,
3606 mem_size, zero_size_allowed);
3607 if (err) {
3608 verbose(env, "R%d max value is outside of the allowed memory range\n",
61bd5218 3609 regno);
457f4436
AN
3610 return err;
3611 }
3612
3613 return 0;
3614}
d83525ca 3615
e9147b44
KKD
3616static int __check_ptr_off_reg(struct bpf_verifier_env *env,
3617 const struct bpf_reg_state *reg, int regno,
3618 bool fixed_off_ok)
3619{
3620 /* Access to this pointer-typed register or passing it to a helper
3621 * is only allowed in its original, unmodified form.
3622 */
3623
3624 if (reg->off < 0) {
3625 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n",
3626 reg_type_str(env, reg->type), regno, reg->off);
3627 return -EACCES;
3628 }
3629
3630 if (!fixed_off_ok && reg->off) {
3631 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
3632 reg_type_str(env, reg->type), regno, reg->off);
3633 return -EACCES;
3634 }
3635
3636 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
3637 char tn_buf[48];
3638
3639 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3640 verbose(env, "variable %s access var_off=%s disallowed\n",
3641 reg_type_str(env, reg->type), tn_buf);
3642 return -EACCES;
3643 }
3644
3645 return 0;
3646}
3647
3648int check_ptr_off_reg(struct bpf_verifier_env *env,
3649 const struct bpf_reg_state *reg, int regno)
3650{
3651 return __check_ptr_off_reg(env, reg, regno, false);
3652}
3653
61df10c7
KKD
3654static int map_kptr_match_type(struct bpf_verifier_env *env,
3655 struct bpf_map_value_off_desc *off_desc,
3656 struct bpf_reg_state *reg, u32 regno)
3657{
3658 const char *targ_name = kernel_type_name(off_desc->kptr.btf, off_desc->kptr.btf_id);
6efe152d 3659 int perm_flags = PTR_MAYBE_NULL;
61df10c7
KKD
3660 const char *reg_name = "";
3661
6efe152d
KKD
3662 /* Only unreferenced case accepts untrusted pointers */
3663 if (off_desc->type == BPF_KPTR_UNREF)
3664 perm_flags |= PTR_UNTRUSTED;
3665
3666 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags))
61df10c7
KKD
3667 goto bad_type;
3668
3669 if (!btf_is_kernel(reg->btf)) {
3670 verbose(env, "R%d must point to kernel BTF\n", regno);
3671 return -EINVAL;
3672 }
3673 /* We need to verify reg->type and reg->btf, before accessing reg->btf */
3674 reg_name = kernel_type_name(reg->btf, reg->btf_id);
3675
c0a5a21c
KKD
3676 /* For ref_ptr case, release function check should ensure we get one
3677 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the
3678 * normal store of unreferenced kptr, we must ensure var_off is zero.
3679 * Since ref_ptr cannot be accessed directly by BPF insns, checks for
3680 * reg->off and reg->ref_obj_id are not needed here.
3681 */
61df10c7
KKD
3682 if (__check_ptr_off_reg(env, reg, regno, true))
3683 return -EACCES;
3684
3685 /* A full type match is needed, as BTF can be vmlinux or module BTF, and
3686 * we also need to take into account the reg->off.
3687 *
3688 * We want to support cases like:
3689 *
3690 * struct foo {
3691 * struct bar br;
3692 * struct baz bz;
3693 * };
3694 *
3695 * struct foo *v;
3696 * v = func(); // PTR_TO_BTF_ID
3697 * val->foo = v; // reg->off is zero, btf and btf_id match type
3698 * val->bar = &v->br; // reg->off is still zero, but we need to retry with
3699 * // first member type of struct after comparison fails
3700 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked
3701 * // to match type
3702 *
3703 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off
2ab3b380
KKD
3704 * is zero. We must also ensure that btf_struct_ids_match does not walk
3705 * the struct to match type against first member of struct, i.e. reject
3706 * second case from above. Hence, when type is BPF_KPTR_REF, we set
3707 * strict mode to true for type match.
61df10c7
KKD
3708 */
3709 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
2ab3b380
KKD
3710 off_desc->kptr.btf, off_desc->kptr.btf_id,
3711 off_desc->type == BPF_KPTR_REF))
61df10c7
KKD
3712 goto bad_type;
3713 return 0;
3714bad_type:
3715 verbose(env, "invalid kptr access, R%d type=%s%s ", regno,
3716 reg_type_str(env, reg->type), reg_name);
6efe152d
KKD
3717 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name);
3718 if (off_desc->type == BPF_KPTR_UNREF)
3719 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED),
3720 targ_name);
3721 else
3722 verbose(env, "\n");
61df10c7
KKD
3723 return -EINVAL;
3724}
3725
3726static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
3727 int value_regno, int insn_idx,
3728 struct bpf_map_value_off_desc *off_desc)
3729{
3730 struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
3731 int class = BPF_CLASS(insn->code);
3732 struct bpf_reg_state *val_reg;
3733
3734 /* Things we already checked for in check_map_access and caller:
3735 * - Reject cases where variable offset may touch kptr
3736 * - size of access (must be BPF_DW)
3737 * - tnum_is_const(reg->var_off)
3738 * - off_desc->offset == off + reg->var_off.value
3739 */
3740 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */
3741 if (BPF_MODE(insn->code) != BPF_MEM) {
3742 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n");
3743 return -EACCES;
3744 }
3745
6efe152d
KKD
3746 /* We only allow loading referenced kptr, since it will be marked as
3747 * untrusted, similar to unreferenced kptr.
3748 */
3749 if (class != BPF_LDX && off_desc->type == BPF_KPTR_REF) {
3750 verbose(env, "store to referenced kptr disallowed\n");
c0a5a21c
KKD
3751 return -EACCES;
3752 }
3753
61df10c7
KKD
3754 if (class == BPF_LDX) {
3755 val_reg = reg_state(env, value_regno);
3756 /* We can simply mark the value_regno receiving the pointer
3757 * value from map as PTR_TO_BTF_ID, with the correct type.
3758 */
3759 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, off_desc->kptr.btf,
6efe152d 3760 off_desc->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED);
61df10c7
KKD
3761 /* For mark_ptr_or_null_reg */
3762 val_reg->id = ++env->id_gen;
3763 } else if (class == BPF_STX) {
3764 val_reg = reg_state(env, value_regno);
3765 if (!register_is_null(val_reg) &&
3766 map_kptr_match_type(env, off_desc, val_reg, value_regno))
3767 return -EACCES;
3768 } else if (class == BPF_ST) {
3769 if (insn->imm) {
3770 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n",
3771 off_desc->offset);
3772 return -EACCES;
3773 }
3774 } else {
3775 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n");
3776 return -EACCES;
3777 }
3778 return 0;
3779}
3780
457f4436
AN
3781/* check read/write into a map element with possible variable offset */
3782static int check_map_access(struct bpf_verifier_env *env, u32 regno,
61df10c7
KKD
3783 int off, int size, bool zero_size_allowed,
3784 enum bpf_access_src src)
457f4436
AN
3785{
3786 struct bpf_verifier_state *vstate = env->cur_state;
3787 struct bpf_func_state *state = vstate->frame[vstate->curframe];
3788 struct bpf_reg_state *reg = &state->regs[regno];
3789 struct bpf_map *map = reg->map_ptr;
3790 int err;
3791
3792 err = check_mem_region_access(env, regno, off, size, map->value_size,
3793 zero_size_allowed);
3794 if (err)
3795 return err;
3796
3797 if (map_value_has_spin_lock(map)) {
3798 u32 lock = map->spin_lock_off;
d83525ca
AS
3799
3800 /* if any part of struct bpf_spin_lock can be touched by
3801 * load/store reject this program.
3802 * To check that [x1, x2) overlaps with [y1, y2)
3803 * it is sufficient to check x1 < y2 && y1 < x2.
3804 */
3805 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
3806 lock < reg->umax_value + off + size) {
3807 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
3808 return -EACCES;
3809 }
3810 }
68134668
AS
3811 if (map_value_has_timer(map)) {
3812 u32 t = map->timer_off;
3813
3814 if (reg->smin_value + off < t + sizeof(struct bpf_timer) &&
3815 t < reg->umax_value + off + size) {
3816 verbose(env, "bpf_timer cannot be accessed directly by load/store\n");
3817 return -EACCES;
3818 }
3819 }
61df10c7
KKD
3820 if (map_value_has_kptrs(map)) {
3821 struct bpf_map_value_off *tab = map->kptr_off_tab;
3822 int i;
3823
3824 for (i = 0; i < tab->nr_off; i++) {
3825 u32 p = tab->off[i].offset;
3826
3827 if (reg->smin_value + off < p + sizeof(u64) &&
3828 p < reg->umax_value + off + size) {
3829 if (src != ACCESS_DIRECT) {
3830 verbose(env, "kptr cannot be accessed indirectly by helper\n");
3831 return -EACCES;
3832 }
3833 if (!tnum_is_const(reg->var_off)) {
3834 verbose(env, "kptr access cannot have variable offset\n");
3835 return -EACCES;
3836 }
3837 if (p != off + reg->var_off.value) {
3838 verbose(env, "kptr access misaligned expected=%u off=%llu\n",
3839 p, off + reg->var_off.value);
3840 return -EACCES;
3841 }
3842 if (size != bpf_size_to_bytes(BPF_DW)) {
3843 verbose(env, "kptr access size must be BPF_DW\n");
3844 return -EACCES;
3845 }
3846 break;
3847 }
3848 }
3849 }
f1174f77 3850 return err;
dbcfe5f7
GB
3851}
3852
969bf05e
AS
3853#define MAX_PACKET_OFF 0xffff
3854
58e2af8b 3855static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
3856 const struct bpf_call_arg_meta *meta,
3857 enum bpf_access_type t)
4acf6c0b 3858{
7e40781c
UP
3859 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
3860
3861 switch (prog_type) {
5d66fa7d 3862 /* Program types only with direct read access go here! */
3a0af8fd
TG
3863 case BPF_PROG_TYPE_LWT_IN:
3864 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 3865 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 3866 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 3867 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 3868 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
3869 if (t == BPF_WRITE)
3870 return false;
8731745e 3871 fallthrough;
5d66fa7d
DB
3872
3873 /* Program types with direct read + write access go here! */
36bbef52
DB
3874 case BPF_PROG_TYPE_SCHED_CLS:
3875 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 3876 case BPF_PROG_TYPE_XDP:
3a0af8fd 3877 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 3878 case BPF_PROG_TYPE_SK_SKB:
4f738adb 3879 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
3880 if (meta)
3881 return meta->pkt_access;
3882
3883 env->seen_direct_write = true;
4acf6c0b 3884 return true;
0d01da6a
SF
3885
3886 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3887 if (t == BPF_WRITE)
3888 env->seen_direct_write = true;
3889
3890 return true;
3891
4acf6c0b
BB
3892 default:
3893 return false;
3894 }
3895}
3896
f1174f77 3897static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 3898 int size, bool zero_size_allowed)
f1174f77 3899{
638f5b90 3900 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
3901 struct bpf_reg_state *reg = &regs[regno];
3902 int err;
3903
3904 /* We may have added a variable offset to the packet pointer; but any
3905 * reg->range we have comes after that. We are only checking the fixed
3906 * offset.
3907 */
3908
3909 /* We don't allow negative numbers, because we aren't tracking enough
3910 * detail to prove they're safe.
3911 */
b03c9f9f 3912 if (reg->smin_value < 0) {
61bd5218 3913 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
3914 regno);
3915 return -EACCES;
3916 }
6d94e741
AS
3917
3918 err = reg->range < 0 ? -EINVAL :
3919 __check_mem_access(env, regno, off, size, reg->range,
457f4436 3920 zero_size_allowed);
f1174f77 3921 if (err) {
61bd5218 3922 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
3923 return err;
3924 }
e647815a 3925
457f4436 3926 /* __check_mem_access has made sure "off + size - 1" is within u16.
e647815a
JW
3927 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
3928 * otherwise find_good_pkt_pointers would have refused to set range info
457f4436 3929 * that __check_mem_access would have rejected this pkt access.
e647815a
JW
3930 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
3931 */
3932 env->prog->aux->max_pkt_offset =
3933 max_t(u32, env->prog->aux->max_pkt_offset,
3934 off + reg->umax_value + size - 1);
3935
f1174f77
EC
3936 return err;
3937}
3938
3939/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 3940static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66 3941 enum bpf_access_type t, enum bpf_reg_type *reg_type,
22dc4a0f 3942 struct btf **btf, u32 *btf_id)
17a52670 3943{
f96da094
DB
3944 struct bpf_insn_access_aux info = {
3945 .reg_type = *reg_type,
9e15db66 3946 .log = &env->log,
f96da094 3947 };
31fd8581 3948
4f9218aa 3949 if (env->ops->is_valid_access &&
5e43f899 3950 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
3951 /* A non zero info.ctx_field_size indicates that this field is a
3952 * candidate for later verifier transformation to load the whole
3953 * field and then apply a mask when accessed with a narrower
3954 * access than actual ctx access size. A zero info.ctx_field_size
3955 * will only allow for whole field access and rejects any other
3956 * type of narrower access.
31fd8581 3957 */
23994631 3958 *reg_type = info.reg_type;
31fd8581 3959
c25b2ae1 3960 if (base_type(*reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 3961 *btf = info.btf;
9e15db66 3962 *btf_id = info.btf_id;
22dc4a0f 3963 } else {
9e15db66 3964 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
22dc4a0f 3965 }
32bbe007
AS
3966 /* remember the offset of last byte accessed in ctx */
3967 if (env->prog->aux->max_ctx_offset < off + size)
3968 env->prog->aux->max_ctx_offset = off + size;
17a52670 3969 return 0;
32bbe007 3970 }
17a52670 3971
61bd5218 3972 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
3973 return -EACCES;
3974}
3975
d58e468b
PP
3976static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
3977 int size)
3978{
3979 if (size < 0 || off < 0 ||
3980 (u64)off + size > sizeof(struct bpf_flow_keys)) {
3981 verbose(env, "invalid access to flow keys off=%d size=%d\n",
3982 off, size);
3983 return -EACCES;
3984 }
3985 return 0;
3986}
3987
5f456649
MKL
3988static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
3989 u32 regno, int off, int size,
3990 enum bpf_access_type t)
c64b7983
JS
3991{
3992 struct bpf_reg_state *regs = cur_regs(env);
3993 struct bpf_reg_state *reg = &regs[regno];
5f456649 3994 struct bpf_insn_access_aux info = {};
46f8bc92 3995 bool valid;
c64b7983
JS
3996
3997 if (reg->smin_value < 0) {
3998 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
3999 regno);
4000 return -EACCES;
4001 }
4002
46f8bc92
MKL
4003 switch (reg->type) {
4004 case PTR_TO_SOCK_COMMON:
4005 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
4006 break;
4007 case PTR_TO_SOCKET:
4008 valid = bpf_sock_is_valid_access(off, size, t, &info);
4009 break;
655a51e5
MKL
4010 case PTR_TO_TCP_SOCK:
4011 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
4012 break;
fada7fdc
JL
4013 case PTR_TO_XDP_SOCK:
4014 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
4015 break;
46f8bc92
MKL
4016 default:
4017 valid = false;
c64b7983
JS
4018 }
4019
5f456649 4020
46f8bc92
MKL
4021 if (valid) {
4022 env->insn_aux_data[insn_idx].ctx_field_size =
4023 info.ctx_field_size;
4024 return 0;
4025 }
4026
4027 verbose(env, "R%d invalid %s access off=%d size=%d\n",
c25b2ae1 4028 regno, reg_type_str(env, reg->type), off, size);
46f8bc92
MKL
4029
4030 return -EACCES;
c64b7983
JS
4031}
4032
4cabc5b1
DB
4033static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
4034{
2a159c6f 4035 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
4036}
4037
f37a8cb8
DB
4038static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
4039{
2a159c6f 4040 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 4041
46f8bc92
MKL
4042 return reg->type == PTR_TO_CTX;
4043}
4044
4045static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
4046{
4047 const struct bpf_reg_state *reg = reg_state(env, regno);
4048
4049 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
4050}
4051
ca369602
DB
4052static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
4053{
2a159c6f 4054 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
4055
4056 return type_is_pkt_pointer(reg->type);
4057}
4058
4b5defde
DB
4059static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
4060{
4061 const struct bpf_reg_state *reg = reg_state(env, regno);
4062
4063 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
4064 return reg->type == PTR_TO_FLOW_KEYS;
4065}
4066
61bd5218
JK
4067static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
4068 const struct bpf_reg_state *reg,
d1174416 4069 int off, int size, bool strict)
969bf05e 4070{
f1174f77 4071 struct tnum reg_off;
e07b98d9 4072 int ip_align;
d1174416
DM
4073
4074 /* Byte size accesses are always allowed. */
4075 if (!strict || size == 1)
4076 return 0;
4077
e4eda884
DM
4078 /* For platforms that do not have a Kconfig enabling
4079 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
4080 * NET_IP_ALIGN is universally set to '2'. And on platforms
4081 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
4082 * to this code only in strict mode where we want to emulate
4083 * the NET_IP_ALIGN==2 checking. Therefore use an
4084 * unconditional IP align value of '2'.
e07b98d9 4085 */
e4eda884 4086 ip_align = 2;
f1174f77
EC
4087
4088 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
4089 if (!tnum_is_aligned(reg_off, size)) {
4090 char tn_buf[48];
4091
4092 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
4093 verbose(env,
4094 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 4095 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
4096 return -EACCES;
4097 }
79adffcd 4098
969bf05e
AS
4099 return 0;
4100}
4101
61bd5218
JK
4102static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
4103 const struct bpf_reg_state *reg,
f1174f77
EC
4104 const char *pointer_desc,
4105 int off, int size, bool strict)
79adffcd 4106{
f1174f77
EC
4107 struct tnum reg_off;
4108
4109 /* Byte size accesses are always allowed. */
4110 if (!strict || size == 1)
4111 return 0;
4112
4113 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
4114 if (!tnum_is_aligned(reg_off, size)) {
4115 char tn_buf[48];
4116
4117 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 4118 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 4119 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
4120 return -EACCES;
4121 }
4122
969bf05e
AS
4123 return 0;
4124}
4125
e07b98d9 4126static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
4127 const struct bpf_reg_state *reg, int off,
4128 int size, bool strict_alignment_once)
79adffcd 4129{
ca369602 4130 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 4131 const char *pointer_desc = "";
d1174416 4132
79adffcd
DB
4133 switch (reg->type) {
4134 case PTR_TO_PACKET:
de8f3a83
DB
4135 case PTR_TO_PACKET_META:
4136 /* Special case, because of NET_IP_ALIGN. Given metadata sits
4137 * right in front, treat it the very same way.
4138 */
61bd5218 4139 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
4140 case PTR_TO_FLOW_KEYS:
4141 pointer_desc = "flow keys ";
4142 break;
69c087ba
YS
4143 case PTR_TO_MAP_KEY:
4144 pointer_desc = "key ";
4145 break;
f1174f77
EC
4146 case PTR_TO_MAP_VALUE:
4147 pointer_desc = "value ";
4148 break;
4149 case PTR_TO_CTX:
4150 pointer_desc = "context ";
4151 break;
4152 case PTR_TO_STACK:
4153 pointer_desc = "stack ";
01f810ac
AM
4154 /* The stack spill tracking logic in check_stack_write_fixed_off()
4155 * and check_stack_read_fixed_off() relies on stack accesses being
a5ec6ae1
JH
4156 * aligned.
4157 */
4158 strict = true;
f1174f77 4159 break;
c64b7983
JS
4160 case PTR_TO_SOCKET:
4161 pointer_desc = "sock ";
4162 break;
46f8bc92
MKL
4163 case PTR_TO_SOCK_COMMON:
4164 pointer_desc = "sock_common ";
4165 break;
655a51e5
MKL
4166 case PTR_TO_TCP_SOCK:
4167 pointer_desc = "tcp_sock ";
4168 break;
fada7fdc
JL
4169 case PTR_TO_XDP_SOCK:
4170 pointer_desc = "xdp_sock ";
4171 break;
79adffcd 4172 default:
f1174f77 4173 break;
79adffcd 4174 }
61bd5218
JK
4175 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
4176 strict);
79adffcd
DB
4177}
4178
f4d7e40a
AS
4179static int update_stack_depth(struct bpf_verifier_env *env,
4180 const struct bpf_func_state *func,
4181 int off)
4182{
9c8105bd 4183 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
4184
4185 if (stack >= -off)
4186 return 0;
4187
4188 /* update known max for given subprogram */
9c8105bd 4189 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
4190 return 0;
4191}
f4d7e40a 4192
70a87ffe
AS
4193/* starting from main bpf function walk all instructions of the function
4194 * and recursively walk all callees that given function can call.
4195 * Ignore jump and exit insns.
4196 * Since recursion is prevented by check_cfg() this algorithm
4197 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
4198 */
4199static int check_max_stack_depth(struct bpf_verifier_env *env)
4200{
9c8105bd
JW
4201 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
4202 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 4203 struct bpf_insn *insn = env->prog->insnsi;
ebf7d1f5 4204 bool tail_call_reachable = false;
70a87ffe
AS
4205 int ret_insn[MAX_CALL_FRAMES];
4206 int ret_prog[MAX_CALL_FRAMES];
ebf7d1f5 4207 int j;
f4d7e40a 4208
70a87ffe 4209process_func:
7f6e4312
MF
4210 /* protect against potential stack overflow that might happen when
4211 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
4212 * depth for such case down to 256 so that the worst case scenario
4213 * would result in 8k stack size (32 which is tailcall limit * 256 =
4214 * 8k).
4215 *
4216 * To get the idea what might happen, see an example:
4217 * func1 -> sub rsp, 128
4218 * subfunc1 -> sub rsp, 256
4219 * tailcall1 -> add rsp, 256
4220 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
4221 * subfunc2 -> sub rsp, 64
4222 * subfunc22 -> sub rsp, 128
4223 * tailcall2 -> add rsp, 128
4224 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
4225 *
4226 * tailcall will unwind the current stack frame but it will not get rid
4227 * of caller's stack as shown on the example above.
4228 */
4229 if (idx && subprog[idx].has_tail_call && depth >= 256) {
4230 verbose(env,
4231 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
4232 depth);
4233 return -EACCES;
4234 }
70a87ffe
AS
4235 /* round up to 32-bytes, since this is granularity
4236 * of interpreter stack size
4237 */
9c8105bd 4238 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 4239 if (depth > MAX_BPF_STACK) {
f4d7e40a 4240 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 4241 frame + 1, depth);
f4d7e40a
AS
4242 return -EACCES;
4243 }
70a87ffe 4244continue_func:
4cb3d99c 4245 subprog_end = subprog[idx + 1].start;
70a87ffe 4246 for (; i < subprog_end; i++) {
7ddc80a4
AS
4247 int next_insn;
4248
69c087ba 4249 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i))
70a87ffe
AS
4250 continue;
4251 /* remember insn and function to return to */
4252 ret_insn[frame] = i + 1;
9c8105bd 4253 ret_prog[frame] = idx;
70a87ffe
AS
4254
4255 /* find the callee */
7ddc80a4
AS
4256 next_insn = i + insn[i].imm + 1;
4257 idx = find_subprog(env, next_insn);
9c8105bd 4258 if (idx < 0) {
70a87ffe 4259 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
7ddc80a4 4260 next_insn);
70a87ffe
AS
4261 return -EFAULT;
4262 }
7ddc80a4
AS
4263 if (subprog[idx].is_async_cb) {
4264 if (subprog[idx].has_tail_call) {
4265 verbose(env, "verifier bug. subprog has tail_call and async cb\n");
4266 return -EFAULT;
4267 }
4268 /* async callbacks don't increase bpf prog stack size */
4269 continue;
4270 }
4271 i = next_insn;
ebf7d1f5
MF
4272
4273 if (subprog[idx].has_tail_call)
4274 tail_call_reachable = true;
4275
70a87ffe
AS
4276 frame++;
4277 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
4278 verbose(env, "the call stack of %d frames is too deep !\n",
4279 frame);
4280 return -E2BIG;
70a87ffe
AS
4281 }
4282 goto process_func;
4283 }
ebf7d1f5
MF
4284 /* if tail call got detected across bpf2bpf calls then mark each of the
4285 * currently present subprog frames as tail call reachable subprogs;
4286 * this info will be utilized by JIT so that we will be preserving the
4287 * tail call counter throughout bpf2bpf calls combined with tailcalls
4288 */
4289 if (tail_call_reachable)
4290 for (j = 0; j < frame; j++)
4291 subprog[ret_prog[j]].tail_call_reachable = true;
5dd0a6b8
DB
4292 if (subprog[0].tail_call_reachable)
4293 env->prog->aux->tail_call_reachable = true;
ebf7d1f5 4294
70a87ffe
AS
4295 /* end of for() loop means the last insn of the 'subprog'
4296 * was reached. Doesn't matter whether it was JA or EXIT
4297 */
4298 if (frame == 0)
4299 return 0;
9c8105bd 4300 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
4301 frame--;
4302 i = ret_insn[frame];
9c8105bd 4303 idx = ret_prog[frame];
70a87ffe 4304 goto continue_func;
f4d7e40a
AS
4305}
4306
19d28fbd 4307#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
4308static int get_callee_stack_depth(struct bpf_verifier_env *env,
4309 const struct bpf_insn *insn, int idx)
4310{
4311 int start = idx + insn->imm + 1, subprog;
4312
4313 subprog = find_subprog(env, start);
4314 if (subprog < 0) {
4315 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
4316 start);
4317 return -EFAULT;
4318 }
9c8105bd 4319 return env->subprog_info[subprog].stack_depth;
1ea47e01 4320}
19d28fbd 4321#endif
1ea47e01 4322
afbf21dc
YS
4323static int __check_buffer_access(struct bpf_verifier_env *env,
4324 const char *buf_info,
4325 const struct bpf_reg_state *reg,
4326 int regno, int off, int size)
9df1c28b
MM
4327{
4328 if (off < 0) {
4329 verbose(env,
4fc00b79 4330 "R%d invalid %s buffer access: off=%d, size=%d\n",
afbf21dc 4331 regno, buf_info, off, size);
9df1c28b
MM
4332 return -EACCES;
4333 }
4334 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4335 char tn_buf[48];
4336
4337 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4338 verbose(env,
4fc00b79 4339 "R%d invalid variable buffer offset: off=%d, var_off=%s\n",
9df1c28b
MM
4340 regno, off, tn_buf);
4341 return -EACCES;
4342 }
afbf21dc
YS
4343
4344 return 0;
4345}
4346
4347static int check_tp_buffer_access(struct bpf_verifier_env *env,
4348 const struct bpf_reg_state *reg,
4349 int regno, int off, int size)
4350{
4351 int err;
4352
4353 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size);
4354 if (err)
4355 return err;
4356
9df1c28b
MM
4357 if (off + size > env->prog->aux->max_tp_access)
4358 env->prog->aux->max_tp_access = off + size;
4359
4360 return 0;
4361}
4362
afbf21dc
YS
4363static int check_buffer_access(struct bpf_verifier_env *env,
4364 const struct bpf_reg_state *reg,
4365 int regno, int off, int size,
4366 bool zero_size_allowed,
afbf21dc
YS
4367 u32 *max_access)
4368{
44e9a741 4369 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr";
afbf21dc
YS
4370 int err;
4371
4372 err = __check_buffer_access(env, buf_info, reg, regno, off, size);
4373 if (err)
4374 return err;
4375
4376 if (off + size > *max_access)
4377 *max_access = off + size;
4378
4379 return 0;
4380}
4381
3f50f132
JF
4382/* BPF architecture zero extends alu32 ops into 64-bit registesr */
4383static void zext_32_to_64(struct bpf_reg_state *reg)
4384{
4385 reg->var_off = tnum_subreg(reg->var_off);
4386 __reg_assign_32_into_64(reg);
4387}
9df1c28b 4388
0c17d1d2
JH
4389/* truncate register to smaller size (in bytes)
4390 * must be called with size < BPF_REG_SIZE
4391 */
4392static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
4393{
4394 u64 mask;
4395
4396 /* clear high bits in bit representation */
4397 reg->var_off = tnum_cast(reg->var_off, size);
4398
4399 /* fix arithmetic bounds */
4400 mask = ((u64)1 << (size * 8)) - 1;
4401 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
4402 reg->umin_value &= mask;
4403 reg->umax_value &= mask;
4404 } else {
4405 reg->umin_value = 0;
4406 reg->umax_value = mask;
4407 }
4408 reg->smin_value = reg->umin_value;
4409 reg->smax_value = reg->umax_value;
3f50f132
JF
4410
4411 /* If size is smaller than 32bit register the 32bit register
4412 * values are also truncated so we push 64-bit bounds into
4413 * 32-bit bounds. Above were truncated < 32-bits already.
4414 */
4415 if (size >= 4)
4416 return;
4417 __reg_combine_64_into_32(reg);
0c17d1d2
JH
4418}
4419
a23740ec
AN
4420static bool bpf_map_is_rdonly(const struct bpf_map *map)
4421{
353050be
DB
4422 /* A map is considered read-only if the following condition are true:
4423 *
4424 * 1) BPF program side cannot change any of the map content. The
4425 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map
4426 * and was set at map creation time.
4427 * 2) The map value(s) have been initialized from user space by a
4428 * loader and then "frozen", such that no new map update/delete
4429 * operations from syscall side are possible for the rest of
4430 * the map's lifetime from that point onwards.
4431 * 3) Any parallel/pending map update/delete operations from syscall
4432 * side have been completed. Only after that point, it's safe to
4433 * assume that map value(s) are immutable.
4434 */
4435 return (map->map_flags & BPF_F_RDONLY_PROG) &&
4436 READ_ONCE(map->frozen) &&
4437 !bpf_map_write_active(map);
a23740ec
AN
4438}
4439
4440static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
4441{
4442 void *ptr;
4443 u64 addr;
4444 int err;
4445
4446 err = map->ops->map_direct_value_addr(map, &addr, off);
4447 if (err)
4448 return err;
2dedd7d2 4449 ptr = (void *)(long)addr + off;
a23740ec
AN
4450
4451 switch (size) {
4452 case sizeof(u8):
4453 *val = (u64)*(u8 *)ptr;
4454 break;
4455 case sizeof(u16):
4456 *val = (u64)*(u16 *)ptr;
4457 break;
4458 case sizeof(u32):
4459 *val = (u64)*(u32 *)ptr;
4460 break;
4461 case sizeof(u64):
4462 *val = *(u64 *)ptr;
4463 break;
4464 default:
4465 return -EINVAL;
4466 }
4467 return 0;
4468}
4469
9e15db66
AS
4470static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
4471 struct bpf_reg_state *regs,
4472 int regno, int off, int size,
4473 enum bpf_access_type atype,
4474 int value_regno)
4475{
4476 struct bpf_reg_state *reg = regs + regno;
22dc4a0f
AN
4477 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
4478 const char *tname = btf_name_by_offset(reg->btf, t->name_off);
c6f1bfe8 4479 enum bpf_type_flag flag = 0;
9e15db66
AS
4480 u32 btf_id;
4481 int ret;
4482
9e15db66
AS
4483 if (off < 0) {
4484 verbose(env,
4485 "R%d is ptr_%s invalid negative access: off=%d\n",
4486 regno, tname, off);
4487 return -EACCES;
4488 }
4489 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
4490 char tn_buf[48];
4491
4492 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4493 verbose(env,
4494 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
4495 regno, tname, off, tn_buf);
4496 return -EACCES;
4497 }
4498
c6f1bfe8
YS
4499 if (reg->type & MEM_USER) {
4500 verbose(env,
4501 "R%d is ptr_%s access user memory: off=%d\n",
4502 regno, tname, off);
4503 return -EACCES;
4504 }
4505
5844101a
HL
4506 if (reg->type & MEM_PERCPU) {
4507 verbose(env,
4508 "R%d is ptr_%s access percpu memory: off=%d\n",
4509 regno, tname, off);
4510 return -EACCES;
4511 }
4512
27ae7997 4513 if (env->ops->btf_struct_access) {
22dc4a0f 4514 ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
c6f1bfe8 4515 off, size, atype, &btf_id, &flag);
27ae7997
MKL
4516 } else {
4517 if (atype != BPF_READ) {
4518 verbose(env, "only read is supported\n");
4519 return -EACCES;
4520 }
4521
22dc4a0f 4522 ret = btf_struct_access(&env->log, reg->btf, t, off, size,
c6f1bfe8 4523 atype, &btf_id, &flag);
27ae7997
MKL
4524 }
4525
9e15db66
AS
4526 if (ret < 0)
4527 return ret;
4528
6efe152d
KKD
4529 /* If this is an untrusted pointer, all pointers formed by walking it
4530 * also inherit the untrusted flag.
4531 */
4532 if (type_flag(reg->type) & PTR_UNTRUSTED)
4533 flag |= PTR_UNTRUSTED;
4534
41c48f3a 4535 if (atype == BPF_READ && value_regno >= 0)
c6f1bfe8 4536 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
41c48f3a
AI
4537
4538 return 0;
4539}
4540
4541static int check_ptr_to_map_access(struct bpf_verifier_env *env,
4542 struct bpf_reg_state *regs,
4543 int regno, int off, int size,
4544 enum bpf_access_type atype,
4545 int value_regno)
4546{
4547 struct bpf_reg_state *reg = regs + regno;
4548 struct bpf_map *map = reg->map_ptr;
c6f1bfe8 4549 enum bpf_type_flag flag = 0;
41c48f3a
AI
4550 const struct btf_type *t;
4551 const char *tname;
4552 u32 btf_id;
4553 int ret;
4554
4555 if (!btf_vmlinux) {
4556 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
4557 return -ENOTSUPP;
4558 }
4559
4560 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
4561 verbose(env, "map_ptr access not supported for map type %d\n",
4562 map->map_type);
4563 return -ENOTSUPP;
4564 }
4565
4566 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
4567 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
4568
4569 if (!env->allow_ptr_to_map_access) {
4570 verbose(env,
4571 "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
4572 tname);
4573 return -EPERM;
9e15db66 4574 }
27ae7997 4575
41c48f3a
AI
4576 if (off < 0) {
4577 verbose(env, "R%d is %s invalid negative access: off=%d\n",
4578 regno, tname, off);
4579 return -EACCES;
4580 }
4581
4582 if (atype != BPF_READ) {
4583 verbose(env, "only read from %s is supported\n", tname);
4584 return -EACCES;
4585 }
4586
c6f1bfe8 4587 ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag);
41c48f3a
AI
4588 if (ret < 0)
4589 return ret;
4590
4591 if (value_regno >= 0)
c6f1bfe8 4592 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag);
41c48f3a 4593
9e15db66
AS
4594 return 0;
4595}
4596
01f810ac
AM
4597/* Check that the stack access at the given offset is within bounds. The
4598 * maximum valid offset is -1.
4599 *
4600 * The minimum valid offset is -MAX_BPF_STACK for writes, and
4601 * -state->allocated_stack for reads.
4602 */
4603static int check_stack_slot_within_bounds(int off,
4604 struct bpf_func_state *state,
4605 enum bpf_access_type t)
4606{
4607 int min_valid_off;
4608
4609 if (t == BPF_WRITE)
4610 min_valid_off = -MAX_BPF_STACK;
4611 else
4612 min_valid_off = -state->allocated_stack;
4613
4614 if (off < min_valid_off || off > -1)
4615 return -EACCES;
4616 return 0;
4617}
4618
4619/* Check that the stack access at 'regno + off' falls within the maximum stack
4620 * bounds.
4621 *
4622 * 'off' includes `regno->offset`, but not its dynamic part (if any).
4623 */
4624static int check_stack_access_within_bounds(
4625 struct bpf_verifier_env *env,
4626 int regno, int off, int access_size,
61df10c7 4627 enum bpf_access_src src, enum bpf_access_type type)
01f810ac
AM
4628{
4629 struct bpf_reg_state *regs = cur_regs(env);
4630 struct bpf_reg_state *reg = regs + regno;
4631 struct bpf_func_state *state = func(env, reg);
4632 int min_off, max_off;
4633 int err;
4634 char *err_extra;
4635
4636 if (src == ACCESS_HELPER)
4637 /* We don't know if helpers are reading or writing (or both). */
4638 err_extra = " indirect access to";
4639 else if (type == BPF_READ)
4640 err_extra = " read from";
4641 else
4642 err_extra = " write to";
4643
4644 if (tnum_is_const(reg->var_off)) {
4645 min_off = reg->var_off.value + off;
4646 if (access_size > 0)
4647 max_off = min_off + access_size - 1;
4648 else
4649 max_off = min_off;
4650 } else {
4651 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
4652 reg->smin_value <= -BPF_MAX_VAR_OFF) {
4653 verbose(env, "invalid unbounded variable-offset%s stack R%d\n",
4654 err_extra, regno);
4655 return -EACCES;
4656 }
4657 min_off = reg->smin_value + off;
4658 if (access_size > 0)
4659 max_off = reg->smax_value + off + access_size - 1;
4660 else
4661 max_off = min_off;
4662 }
4663
4664 err = check_stack_slot_within_bounds(min_off, state, type);
4665 if (!err)
4666 err = check_stack_slot_within_bounds(max_off, state, type);
4667
4668 if (err) {
4669 if (tnum_is_const(reg->var_off)) {
4670 verbose(env, "invalid%s stack R%d off=%d size=%d\n",
4671 err_extra, regno, off, access_size);
4672 } else {
4673 char tn_buf[48];
4674
4675 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
4676 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n",
4677 err_extra, regno, tn_buf, access_size);
4678 }
4679 }
4680 return err;
4681}
41c48f3a 4682
17a52670
AS
4683/* check whether memory at (regno + off) is accessible for t = (read | write)
4684 * if t==write, value_regno is a register which value is stored into memory
4685 * if t==read, value_regno is a register which will receive the value from memory
4686 * if t==write && value_regno==-1, some unknown value is stored into memory
4687 * if t==read && value_regno==-1, don't care what we read from memory
4688 */
ca369602
DB
4689static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
4690 int off, int bpf_size, enum bpf_access_type t,
4691 int value_regno, bool strict_alignment_once)
17a52670 4692{
638f5b90
AS
4693 struct bpf_reg_state *regs = cur_regs(env);
4694 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 4695 struct bpf_func_state *state;
17a52670
AS
4696 int size, err = 0;
4697
4698 size = bpf_size_to_bytes(bpf_size);
4699 if (size < 0)
4700 return size;
4701
f1174f77 4702 /* alignment checks will add in reg->off themselves */
ca369602 4703 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
4704 if (err)
4705 return err;
17a52670 4706
f1174f77
EC
4707 /* for access checks, reg->off is just part of off */
4708 off += reg->off;
4709
69c087ba
YS
4710 if (reg->type == PTR_TO_MAP_KEY) {
4711 if (t == BPF_WRITE) {
4712 verbose(env, "write to change key R%d not allowed\n", regno);
4713 return -EACCES;
4714 }
4715
4716 err = check_mem_region_access(env, regno, off, size,
4717 reg->map_ptr->key_size, false);
4718 if (err)
4719 return err;
4720 if (value_regno >= 0)
4721 mark_reg_unknown(env, regs, value_regno);
4722 } else if (reg->type == PTR_TO_MAP_VALUE) {
61df10c7
KKD
4723 struct bpf_map_value_off_desc *kptr_off_desc = NULL;
4724
1be7f75d
AS
4725 if (t == BPF_WRITE && value_regno >= 0 &&
4726 is_pointer_value(env, value_regno)) {
61bd5218 4727 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
4728 return -EACCES;
4729 }
591fe988
DB
4730 err = check_map_access_type(env, regno, off, size, t);
4731 if (err)
4732 return err;
61df10c7
KKD
4733 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT);
4734 if (err)
4735 return err;
4736 if (tnum_is_const(reg->var_off))
4737 kptr_off_desc = bpf_map_kptr_off_contains(reg->map_ptr,
4738 off + reg->var_off.value);
4739 if (kptr_off_desc) {
4740 err = check_map_kptr_access(env, regno, value_regno, insn_idx,
4741 kptr_off_desc);
4742 } else if (t == BPF_READ && value_regno >= 0) {
a23740ec
AN
4743 struct bpf_map *map = reg->map_ptr;
4744
4745 /* if map is read-only, track its contents as scalars */
4746 if (tnum_is_const(reg->var_off) &&
4747 bpf_map_is_rdonly(map) &&
4748 map->ops->map_direct_value_addr) {
4749 int map_off = off + reg->var_off.value;
4750 u64 val = 0;
4751
4752 err = bpf_map_direct_read(map, map_off, size,
4753 &val);
4754 if (err)
4755 return err;
4756
4757 regs[value_regno].type = SCALAR_VALUE;
4758 __mark_reg_known(&regs[value_regno], val);
4759 } else {
4760 mark_reg_unknown(env, regs, value_regno);
4761 }
4762 }
34d3a78c
HL
4763 } else if (base_type(reg->type) == PTR_TO_MEM) {
4764 bool rdonly_mem = type_is_rdonly_mem(reg->type);
4765
4766 if (type_may_be_null(reg->type)) {
4767 verbose(env, "R%d invalid mem access '%s'\n", regno,
4768 reg_type_str(env, reg->type));
4769 return -EACCES;
4770 }
4771
4772 if (t == BPF_WRITE && rdonly_mem) {
4773 verbose(env, "R%d cannot write into %s\n",
4774 regno, reg_type_str(env, reg->type));
4775 return -EACCES;
4776 }
4777
457f4436
AN
4778 if (t == BPF_WRITE && value_regno >= 0 &&
4779 is_pointer_value(env, value_regno)) {
4780 verbose(env, "R%d leaks addr into mem\n", value_regno);
4781 return -EACCES;
4782 }
34d3a78c 4783
457f4436
AN
4784 err = check_mem_region_access(env, regno, off, size,
4785 reg->mem_size, false);
34d3a78c 4786 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem))
457f4436 4787 mark_reg_unknown(env, regs, value_regno);
1a0dc1ac 4788 } else if (reg->type == PTR_TO_CTX) {
f1174f77 4789 enum bpf_reg_type reg_type = SCALAR_VALUE;
22dc4a0f 4790 struct btf *btf = NULL;
9e15db66 4791 u32 btf_id = 0;
19de99f7 4792
1be7f75d
AS
4793 if (t == BPF_WRITE && value_regno >= 0 &&
4794 is_pointer_value(env, value_regno)) {
61bd5218 4795 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
4796 return -EACCES;
4797 }
f1174f77 4798
be80a1d3 4799 err = check_ptr_off_reg(env, reg, regno);
58990d1f
DB
4800 if (err < 0)
4801 return err;
4802
c6f1bfe8
YS
4803 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf,
4804 &btf_id);
9e15db66
AS
4805 if (err)
4806 verbose_linfo(env, insn_idx, "; ");
969bf05e 4807 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 4808 /* ctx access returns either a scalar, or a
de8f3a83
DB
4809 * PTR_TO_PACKET[_META,_END]. In the latter
4810 * case, we know the offset is zero.
f1174f77 4811 */
46f8bc92 4812 if (reg_type == SCALAR_VALUE) {
638f5b90 4813 mark_reg_unknown(env, regs, value_regno);
46f8bc92 4814 } else {
638f5b90 4815 mark_reg_known_zero(env, regs,
61bd5218 4816 value_regno);
c25b2ae1 4817 if (type_may_be_null(reg_type))
46f8bc92 4818 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
4819 /* A load of ctx field could have different
4820 * actual load size with the one encoded in the
4821 * insn. When the dst is PTR, it is for sure not
4822 * a sub-register.
4823 */
4824 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
c25b2ae1 4825 if (base_type(reg_type) == PTR_TO_BTF_ID) {
22dc4a0f 4826 regs[value_regno].btf = btf;
9e15db66 4827 regs[value_regno].btf_id = btf_id;
22dc4a0f 4828 }
46f8bc92 4829 }
638f5b90 4830 regs[value_regno].type = reg_type;
969bf05e 4831 }
17a52670 4832
f1174f77 4833 } else if (reg->type == PTR_TO_STACK) {
01f810ac
AM
4834 /* Basic bounds checks. */
4835 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t);
e4298d25
DB
4836 if (err)
4837 return err;
8726679a 4838
f4d7e40a
AS
4839 state = func(env, reg);
4840 err = update_stack_depth(env, state, off);
4841 if (err)
4842 return err;
8726679a 4843
01f810ac
AM
4844 if (t == BPF_READ)
4845 err = check_stack_read(env, regno, off, size,
61bd5218 4846 value_regno);
01f810ac
AM
4847 else
4848 err = check_stack_write(env, regno, off, size,
4849 value_regno, insn_idx);
de8f3a83 4850 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 4851 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 4852 verbose(env, "cannot write into packet\n");
969bf05e
AS
4853 return -EACCES;
4854 }
4acf6c0b
BB
4855 if (t == BPF_WRITE && value_regno >= 0 &&
4856 is_pointer_value(env, value_regno)) {
61bd5218
JK
4857 verbose(env, "R%d leaks addr into packet\n",
4858 value_regno);
4acf6c0b
BB
4859 return -EACCES;
4860 }
9fd29c08 4861 err = check_packet_access(env, regno, off, size, false);
969bf05e 4862 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 4863 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
4864 } else if (reg->type == PTR_TO_FLOW_KEYS) {
4865 if (t == BPF_WRITE && value_regno >= 0 &&
4866 is_pointer_value(env, value_regno)) {
4867 verbose(env, "R%d leaks addr into flow keys\n",
4868 value_regno);
4869 return -EACCES;
4870 }
4871
4872 err = check_flow_keys_access(env, off, size);
4873 if (!err && t == BPF_READ && value_regno >= 0)
4874 mark_reg_unknown(env, regs, value_regno);
46f8bc92 4875 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 4876 if (t == BPF_WRITE) {
46f8bc92 4877 verbose(env, "R%d cannot write into %s\n",
c25b2ae1 4878 regno, reg_type_str(env, reg->type));
c64b7983
JS
4879 return -EACCES;
4880 }
5f456649 4881 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
4882 if (!err && value_regno >= 0)
4883 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
4884 } else if (reg->type == PTR_TO_TP_BUFFER) {
4885 err = check_tp_buffer_access(env, reg, regno, off, size);
4886 if (!err && t == BPF_READ && value_regno >= 0)
4887 mark_reg_unknown(env, regs, value_regno);
bff61f6f
HL
4888 } else if (base_type(reg->type) == PTR_TO_BTF_ID &&
4889 !type_may_be_null(reg->type)) {
9e15db66
AS
4890 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
4891 value_regno);
41c48f3a
AI
4892 } else if (reg->type == CONST_PTR_TO_MAP) {
4893 err = check_ptr_to_map_access(env, regs, regno, off, size, t,
4894 value_regno);
20b2aff4
HL
4895 } else if (base_type(reg->type) == PTR_TO_BUF) {
4896 bool rdonly_mem = type_is_rdonly_mem(reg->type);
20b2aff4
HL
4897 u32 *max_access;
4898
4899 if (rdonly_mem) {
4900 if (t == BPF_WRITE) {
4901 verbose(env, "R%d cannot write into %s\n",
4902 regno, reg_type_str(env, reg->type));
4903 return -EACCES;
4904 }
20b2aff4
HL
4905 max_access = &env->prog->aux->max_rdonly_access;
4906 } else {
20b2aff4 4907 max_access = &env->prog->aux->max_rdwr_access;
afbf21dc 4908 }
20b2aff4 4909
f6dfbe31 4910 err = check_buffer_access(env, reg, regno, off, size, false,
44e9a741 4911 max_access);
20b2aff4
HL
4912
4913 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
afbf21dc 4914 mark_reg_unknown(env, regs, value_regno);
17a52670 4915 } else {
61bd5218 4916 verbose(env, "R%d invalid mem access '%s'\n", regno,
c25b2ae1 4917 reg_type_str(env, reg->type));
17a52670
AS
4918 return -EACCES;
4919 }
969bf05e 4920
f1174f77 4921 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 4922 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 4923 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 4924 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 4925 }
17a52670
AS
4926 return err;
4927}
4928
91c960b0 4929static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 4930{
5ffa2550 4931 int load_reg;
17a52670
AS
4932 int err;
4933
5ca419f2
BJ
4934 switch (insn->imm) {
4935 case BPF_ADD:
4936 case BPF_ADD | BPF_FETCH:
981f94c3
BJ
4937 case BPF_AND:
4938 case BPF_AND | BPF_FETCH:
4939 case BPF_OR:
4940 case BPF_OR | BPF_FETCH:
4941 case BPF_XOR:
4942 case BPF_XOR | BPF_FETCH:
5ffa2550
BJ
4943 case BPF_XCHG:
4944 case BPF_CMPXCHG:
5ca419f2
BJ
4945 break;
4946 default:
91c960b0
BJ
4947 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm);
4948 return -EINVAL;
4949 }
4950
4951 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) {
4952 verbose(env, "invalid atomic operand size\n");
17a52670
AS
4953 return -EINVAL;
4954 }
4955
4956 /* check src1 operand */
dc503a8a 4957 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
4958 if (err)
4959 return err;
4960
4961 /* check src2 operand */
dc503a8a 4962 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
4963 if (err)
4964 return err;
4965
5ffa2550
BJ
4966 if (insn->imm == BPF_CMPXCHG) {
4967 /* Check comparison of R0 with memory location */
a82fe085
DB
4968 const u32 aux_reg = BPF_REG_0;
4969
4970 err = check_reg_arg(env, aux_reg, SRC_OP);
5ffa2550
BJ
4971 if (err)
4972 return err;
a82fe085
DB
4973
4974 if (is_pointer_value(env, aux_reg)) {
4975 verbose(env, "R%d leaks addr into mem\n", aux_reg);
4976 return -EACCES;
4977 }
5ffa2550
BJ
4978 }
4979
6bdf6abc 4980 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 4981 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
4982 return -EACCES;
4983 }
4984
ca369602 4985 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 4986 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
4987 is_flow_key_reg(env, insn->dst_reg) ||
4988 is_sk_reg(env, insn->dst_reg)) {
91c960b0 4989 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
2a159c6f 4990 insn->dst_reg,
c25b2ae1 4991 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
f37a8cb8
DB
4992 return -EACCES;
4993 }
4994
37086bfd
BJ
4995 if (insn->imm & BPF_FETCH) {
4996 if (insn->imm == BPF_CMPXCHG)
4997 load_reg = BPF_REG_0;
4998 else
4999 load_reg = insn->src_reg;
5000
5001 /* check and record load of old value */
5002 err = check_reg_arg(env, load_reg, DST_OP);
5003 if (err)
5004 return err;
5005 } else {
5006 /* This instruction accesses a memory location but doesn't
5007 * actually load it into a register.
5008 */
5009 load_reg = -1;
5010 }
5011
7d3baf0a
DB
5012 /* Check whether we can read the memory, with second call for fetch
5013 * case to simulate the register fill.
5014 */
31fd8581 5015 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
7d3baf0a
DB
5016 BPF_SIZE(insn->code), BPF_READ, -1, true);
5017 if (!err && load_reg >= 0)
5018 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5019 BPF_SIZE(insn->code), BPF_READ, load_reg,
5020 true);
17a52670
AS
5021 if (err)
5022 return err;
5023
7d3baf0a 5024 /* Check whether we can write into the same memory. */
5ca419f2
BJ
5025 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
5026 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
5027 if (err)
5028 return err;
5029
5ca419f2 5030 return 0;
17a52670
AS
5031}
5032
01f810ac
AM
5033/* When register 'regno' is used to read the stack (either directly or through
5034 * a helper function) make sure that it's within stack boundary and, depending
5035 * on the access type, that all elements of the stack are initialized.
5036 *
5037 * 'off' includes 'regno->off', but not its dynamic part (if any).
5038 *
5039 * All registers that have been spilled on the stack in the slots within the
5040 * read offsets are marked as read.
5041 */
5042static int check_stack_range_initialized(
5043 struct bpf_verifier_env *env, int regno, int off,
5044 int access_size, bool zero_size_allowed,
61df10c7 5045 enum bpf_access_src type, struct bpf_call_arg_meta *meta)
2011fccf
AI
5046{
5047 struct bpf_reg_state *reg = reg_state(env, regno);
01f810ac
AM
5048 struct bpf_func_state *state = func(env, reg);
5049 int err, min_off, max_off, i, j, slot, spi;
5050 char *err_extra = type == ACCESS_HELPER ? " indirect" : "";
5051 enum bpf_access_type bounds_check_type;
5052 /* Some accesses can write anything into the stack, others are
5053 * read-only.
5054 */
5055 bool clobber = false;
2011fccf 5056
01f810ac
AM
5057 if (access_size == 0 && !zero_size_allowed) {
5058 verbose(env, "invalid zero-sized read\n");
2011fccf
AI
5059 return -EACCES;
5060 }
2011fccf 5061
01f810ac
AM
5062 if (type == ACCESS_HELPER) {
5063 /* The bounds checks for writes are more permissive than for
5064 * reads. However, if raw_mode is not set, we'll do extra
5065 * checks below.
5066 */
5067 bounds_check_type = BPF_WRITE;
5068 clobber = true;
5069 } else {
5070 bounds_check_type = BPF_READ;
5071 }
5072 err = check_stack_access_within_bounds(env, regno, off, access_size,
5073 type, bounds_check_type);
5074 if (err)
5075 return err;
5076
17a52670 5077
2011fccf 5078 if (tnum_is_const(reg->var_off)) {
01f810ac 5079 min_off = max_off = reg->var_off.value + off;
2011fccf 5080 } else {
088ec26d
AI
5081 /* Variable offset is prohibited for unprivileged mode for
5082 * simplicity since it requires corresponding support in
5083 * Spectre masking for stack ALU.
5084 * See also retrieve_ptr_limit().
5085 */
2c78ee89 5086 if (!env->bypass_spec_v1) {
088ec26d 5087 char tn_buf[48];
f1174f77 5088
088ec26d 5089 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
5090 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n",
5091 regno, err_extra, tn_buf);
088ec26d
AI
5092 return -EACCES;
5093 }
f2bcd05e
AI
5094 /* Only initialized buffer on stack is allowed to be accessed
5095 * with variable offset. With uninitialized buffer it's hard to
5096 * guarantee that whole memory is marked as initialized on
5097 * helper return since specific bounds are unknown what may
5098 * cause uninitialized stack leaking.
5099 */
5100 if (meta && meta->raw_mode)
5101 meta = NULL;
5102
01f810ac
AM
5103 min_off = reg->smin_value + off;
5104 max_off = reg->smax_value + off;
17a52670
AS
5105 }
5106
435faee1
DB
5107 if (meta && meta->raw_mode) {
5108 meta->access_size = access_size;
5109 meta->regno = regno;
5110 return 0;
5111 }
5112
2011fccf 5113 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
5114 u8 *stype;
5115
2011fccf 5116 slot = -i - 1;
638f5b90 5117 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
5118 if (state->allocated_stack <= slot)
5119 goto err;
5120 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
5121 if (*stype == STACK_MISC)
5122 goto mark;
5123 if (*stype == STACK_ZERO) {
01f810ac
AM
5124 if (clobber) {
5125 /* helper can write anything into the stack */
5126 *stype = STACK_MISC;
5127 }
cc2b14d5 5128 goto mark;
17a52670 5129 }
1d68f22b 5130
27113c59 5131 if (is_spilled_reg(&state->stack[spi]) &&
5844101a 5132 base_type(state->stack[spi].spilled_ptr.type) == PTR_TO_BTF_ID)
1d68f22b
YS
5133 goto mark;
5134
27113c59 5135 if (is_spilled_reg(&state->stack[spi]) &&
cd17d38f
YS
5136 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
5137 env->allow_ptr_leaks)) {
01f810ac
AM
5138 if (clobber) {
5139 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
5140 for (j = 0; j < BPF_REG_SIZE; j++)
354e8f19 5141 scrub_spilled_slot(&state->stack[spi].slot_type[j]);
01f810ac 5142 }
f7cf25b2
AS
5143 goto mark;
5144 }
5145
cc2b14d5 5146err:
2011fccf 5147 if (tnum_is_const(reg->var_off)) {
01f810ac
AM
5148 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n",
5149 err_extra, regno, min_off, i - min_off, access_size);
2011fccf
AI
5150 } else {
5151 char tn_buf[48];
5152
5153 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
01f810ac
AM
5154 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n",
5155 err_extra, regno, tn_buf, i - min_off, access_size);
2011fccf 5156 }
cc2b14d5
AS
5157 return -EACCES;
5158mark:
5159 /* reading any byte out of 8-byte 'spill_slot' will cause
5160 * the whole slot to be marked as 'read'
5161 */
679c782d 5162 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
5163 state->stack[spi].spilled_ptr.parent,
5164 REG_LIVE_READ64);
17a52670 5165 }
2011fccf 5166 return update_stack_depth(env, state, min_off);
17a52670
AS
5167}
5168
06c1c049
GB
5169static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
5170 int access_size, bool zero_size_allowed,
5171 struct bpf_call_arg_meta *meta)
5172{
638f5b90 5173 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
20b2aff4 5174 u32 *max_access;
06c1c049 5175
20b2aff4 5176 switch (base_type(reg->type)) {
06c1c049 5177 case PTR_TO_PACKET:
de8f3a83 5178 case PTR_TO_PACKET_META:
9fd29c08
YS
5179 return check_packet_access(env, regno, reg->off, access_size,
5180 zero_size_allowed);
69c087ba 5181 case PTR_TO_MAP_KEY:
7b3552d3
KKD
5182 if (meta && meta->raw_mode) {
5183 verbose(env, "R%d cannot write into %s\n", regno,
5184 reg_type_str(env, reg->type));
5185 return -EACCES;
5186 }
69c087ba
YS
5187 return check_mem_region_access(env, regno, reg->off, access_size,
5188 reg->map_ptr->key_size, false);
06c1c049 5189 case PTR_TO_MAP_VALUE:
591fe988
DB
5190 if (check_map_access_type(env, regno, reg->off, access_size,
5191 meta && meta->raw_mode ? BPF_WRITE :
5192 BPF_READ))
5193 return -EACCES;
9fd29c08 5194 return check_map_access(env, regno, reg->off, access_size,
61df10c7 5195 zero_size_allowed, ACCESS_HELPER);
457f4436 5196 case PTR_TO_MEM:
97e6d7da
KKD
5197 if (type_is_rdonly_mem(reg->type)) {
5198 if (meta && meta->raw_mode) {
5199 verbose(env, "R%d cannot write into %s\n", regno,
5200 reg_type_str(env, reg->type));
5201 return -EACCES;
5202 }
5203 }
457f4436
AN
5204 return check_mem_region_access(env, regno, reg->off,
5205 access_size, reg->mem_size,
5206 zero_size_allowed);
20b2aff4
HL
5207 case PTR_TO_BUF:
5208 if (type_is_rdonly_mem(reg->type)) {
97e6d7da
KKD
5209 if (meta && meta->raw_mode) {
5210 verbose(env, "R%d cannot write into %s\n", regno,
5211 reg_type_str(env, reg->type));
20b2aff4 5212 return -EACCES;
97e6d7da 5213 }
20b2aff4 5214
20b2aff4
HL
5215 max_access = &env->prog->aux->max_rdonly_access;
5216 } else {
20b2aff4
HL
5217 max_access = &env->prog->aux->max_rdwr_access;
5218 }
afbf21dc
YS
5219 return check_buffer_access(env, reg, regno, reg->off,
5220 access_size, zero_size_allowed,
44e9a741 5221 max_access);
0d004c02 5222 case PTR_TO_STACK:
01f810ac
AM
5223 return check_stack_range_initialized(
5224 env,
5225 regno, reg->off, access_size,
5226 zero_size_allowed, ACCESS_HELPER, meta);
0d004c02
LB
5227 default: /* scalar_value or invalid ptr */
5228 /* Allow zero-byte read from NULL, regardless of pointer type */
5229 if (zero_size_allowed && access_size == 0 &&
5230 register_is_null(reg))
5231 return 0;
5232
c25b2ae1
HL
5233 verbose(env, "R%d type=%s ", regno,
5234 reg_type_str(env, reg->type));
5235 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK));
0d004c02 5236 return -EACCES;
06c1c049
GB
5237 }
5238}
5239
d583691c
KKD
5240static int check_mem_size_reg(struct bpf_verifier_env *env,
5241 struct bpf_reg_state *reg, u32 regno,
5242 bool zero_size_allowed,
5243 struct bpf_call_arg_meta *meta)
5244{
5245 int err;
5246
5247 /* This is used to refine r0 return value bounds for helpers
5248 * that enforce this value as an upper bound on return values.
5249 * See do_refine_retval_range() for helpers that can refine
5250 * the return value. C type of helper is u32 so we pull register
5251 * bound from umax_value however, if negative verifier errors
5252 * out. Only upper bounds can be learned because retval is an
5253 * int type and negative retvals are allowed.
5254 */
be77354a 5255 meta->msize_max_value = reg->umax_value;
d583691c
KKD
5256
5257 /* The register is SCALAR_VALUE; the access check
5258 * happens using its boundaries.
5259 */
5260 if (!tnum_is_const(reg->var_off))
5261 /* For unprivileged variable accesses, disable raw
5262 * mode so that the program is required to
5263 * initialize all the memory that the helper could
5264 * just partially fill up.
5265 */
5266 meta = NULL;
5267
5268 if (reg->smin_value < 0) {
5269 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
5270 regno);
5271 return -EACCES;
5272 }
5273
5274 if (reg->umin_value == 0) {
5275 err = check_helper_mem_access(env, regno - 1, 0,
5276 zero_size_allowed,
5277 meta);
5278 if (err)
5279 return err;
5280 }
5281
5282 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
5283 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
5284 regno);
5285 return -EACCES;
5286 }
5287 err = check_helper_mem_access(env, regno - 1,
5288 reg->umax_value,
5289 zero_size_allowed, meta);
5290 if (!err)
5291 err = mark_chain_precision(env, regno);
5292 return err;
5293}
5294
e5069b9c
DB
5295int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5296 u32 regno, u32 mem_size)
5297{
be77354a
KKD
5298 bool may_be_null = type_may_be_null(reg->type);
5299 struct bpf_reg_state saved_reg;
5300 struct bpf_call_arg_meta meta;
5301 int err;
5302
e5069b9c
DB
5303 if (register_is_null(reg))
5304 return 0;
5305
be77354a
KKD
5306 memset(&meta, 0, sizeof(meta));
5307 /* Assuming that the register contains a value check if the memory
5308 * access is safe. Temporarily save and restore the register's state as
5309 * the conversion shouldn't be visible to a caller.
5310 */
5311 if (may_be_null) {
5312 saved_reg = *reg;
e5069b9c 5313 mark_ptr_not_null_reg(reg);
e5069b9c
DB
5314 }
5315
be77354a
KKD
5316 err = check_helper_mem_access(env, regno, mem_size, true, &meta);
5317 /* Check access for BPF_WRITE */
5318 meta.raw_mode = true;
5319 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
5320
5321 if (may_be_null)
5322 *reg = saved_reg;
5323
5324 return err;
e5069b9c
DB
5325}
5326
d583691c
KKD
5327int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
5328 u32 regno)
5329{
5330 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1];
5331 bool may_be_null = type_may_be_null(mem_reg->type);
5332 struct bpf_reg_state saved_reg;
be77354a 5333 struct bpf_call_arg_meta meta;
d583691c
KKD
5334 int err;
5335
5336 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5);
5337
be77354a
KKD
5338 memset(&meta, 0, sizeof(meta));
5339
d583691c
KKD
5340 if (may_be_null) {
5341 saved_reg = *mem_reg;
5342 mark_ptr_not_null_reg(mem_reg);
5343 }
5344
be77354a
KKD
5345 err = check_mem_size_reg(env, reg, regno, true, &meta);
5346 /* Check access for BPF_WRITE */
5347 meta.raw_mode = true;
5348 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
d583691c
KKD
5349
5350 if (may_be_null)
5351 *mem_reg = saved_reg;
5352 return err;
5353}
5354
d83525ca
AS
5355/* Implementation details:
5356 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5357 * Two bpf_map_lookups (even with the same key) will have different reg->id.
5358 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5359 * value_or_null->value transition, since the verifier only cares about
5360 * the range of access to valid map value pointer and doesn't care about actual
5361 * address of the map element.
5362 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
5363 * reg->id > 0 after value_or_null->value transition. By doing so
5364 * two bpf_map_lookups will be considered two different pointers that
5365 * point to different bpf_spin_locks.
5366 * The verifier allows taking only one bpf_spin_lock at a time to avoid
5367 * dead-locks.
5368 * Since only one bpf_spin_lock is allowed the checks are simpler than
5369 * reg_is_refcounted() logic. The verifier needs to remember only
5370 * one spin_lock instead of array of acquired_refs.
5371 * cur_state->active_spin_lock remembers which map value element got locked
5372 * and clears it after bpf_spin_unlock.
5373 */
5374static int process_spin_lock(struct bpf_verifier_env *env, int regno,
5375 bool is_lock)
5376{
5377 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5378 struct bpf_verifier_state *cur = env->cur_state;
5379 bool is_const = tnum_is_const(reg->var_off);
5380 struct bpf_map *map = reg->map_ptr;
5381 u64 val = reg->var_off.value;
5382
d83525ca
AS
5383 if (!is_const) {
5384 verbose(env,
5385 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
5386 regno);
5387 return -EINVAL;
5388 }
5389 if (!map->btf) {
5390 verbose(env,
5391 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
5392 map->name);
5393 return -EINVAL;
5394 }
5395 if (!map_value_has_spin_lock(map)) {
5396 if (map->spin_lock_off == -E2BIG)
5397 verbose(env,
5398 "map '%s' has more than one 'struct bpf_spin_lock'\n",
5399 map->name);
5400 else if (map->spin_lock_off == -ENOENT)
5401 verbose(env,
5402 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
5403 map->name);
5404 else
5405 verbose(env,
5406 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
5407 map->name);
5408 return -EINVAL;
5409 }
5410 if (map->spin_lock_off != val + reg->off) {
5411 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
5412 val + reg->off);
5413 return -EINVAL;
5414 }
5415 if (is_lock) {
5416 if (cur->active_spin_lock) {
5417 verbose(env,
5418 "Locking two bpf_spin_locks are not allowed\n");
5419 return -EINVAL;
5420 }
5421 cur->active_spin_lock = reg->id;
5422 } else {
5423 if (!cur->active_spin_lock) {
5424 verbose(env, "bpf_spin_unlock without taking a lock\n");
5425 return -EINVAL;
5426 }
5427 if (cur->active_spin_lock != reg->id) {
5428 verbose(env, "bpf_spin_unlock of different lock\n");
5429 return -EINVAL;
5430 }
5431 cur->active_spin_lock = 0;
5432 }
5433 return 0;
5434}
5435
b00628b1
AS
5436static int process_timer_func(struct bpf_verifier_env *env, int regno,
5437 struct bpf_call_arg_meta *meta)
5438{
5439 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5440 bool is_const = tnum_is_const(reg->var_off);
5441 struct bpf_map *map = reg->map_ptr;
5442 u64 val = reg->var_off.value;
5443
5444 if (!is_const) {
5445 verbose(env,
5446 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n",
5447 regno);
5448 return -EINVAL;
5449 }
5450 if (!map->btf) {
5451 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n",
5452 map->name);
5453 return -EINVAL;
5454 }
68134668
AS
5455 if (!map_value_has_timer(map)) {
5456 if (map->timer_off == -E2BIG)
5457 verbose(env,
5458 "map '%s' has more than one 'struct bpf_timer'\n",
5459 map->name);
5460 else if (map->timer_off == -ENOENT)
5461 verbose(env,
5462 "map '%s' doesn't have 'struct bpf_timer'\n",
5463 map->name);
5464 else
5465 verbose(env,
5466 "map '%s' is not a struct type or bpf_timer is mangled\n",
5467 map->name);
5468 return -EINVAL;
5469 }
5470 if (map->timer_off != val + reg->off) {
5471 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n",
5472 val + reg->off, map->timer_off);
b00628b1
AS
5473 return -EINVAL;
5474 }
5475 if (meta->map_ptr) {
5476 verbose(env, "verifier bug. Two map pointers in a timer helper\n");
5477 return -EFAULT;
5478 }
3e8ce298 5479 meta->map_uid = reg->map_uid;
b00628b1
AS
5480 meta->map_ptr = map;
5481 return 0;
5482}
5483
c0a5a21c
KKD
5484static int process_kptr_func(struct bpf_verifier_env *env, int regno,
5485 struct bpf_call_arg_meta *meta)
5486{
5487 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5488 struct bpf_map_value_off_desc *off_desc;
5489 struct bpf_map *map_ptr = reg->map_ptr;
5490 u32 kptr_off;
5491 int ret;
5492
5493 if (!tnum_is_const(reg->var_off)) {
5494 verbose(env,
5495 "R%d doesn't have constant offset. kptr has to be at the constant offset\n",
5496 regno);
5497 return -EINVAL;
5498 }
5499 if (!map_ptr->btf) {
5500 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n",
5501 map_ptr->name);
5502 return -EINVAL;
5503 }
5504 if (!map_value_has_kptrs(map_ptr)) {
1ec5ee8c 5505 ret = PTR_ERR_OR_ZERO(map_ptr->kptr_off_tab);
c0a5a21c
KKD
5506 if (ret == -E2BIG)
5507 verbose(env, "map '%s' has more than %d kptr\n", map_ptr->name,
5508 BPF_MAP_VALUE_OFF_MAX);
5509 else if (ret == -EEXIST)
5510 verbose(env, "map '%s' has repeating kptr BTF tags\n", map_ptr->name);
5511 else
5512 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name);
5513 return -EINVAL;
5514 }
5515
5516 meta->map_ptr = map_ptr;
5517 kptr_off = reg->off + reg->var_off.value;
5518 off_desc = bpf_map_kptr_off_contains(map_ptr, kptr_off);
5519 if (!off_desc) {
5520 verbose(env, "off=%d doesn't point to kptr\n", kptr_off);
5521 return -EACCES;
5522 }
5523 if (off_desc->type != BPF_KPTR_REF) {
5524 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off);
5525 return -EACCES;
5526 }
5527 meta->kptr_off_desc = off_desc;
5528 return 0;
5529}
5530
90133415
DB
5531static bool arg_type_is_mem_size(enum bpf_arg_type type)
5532{
5533 return type == ARG_CONST_SIZE ||
5534 type == ARG_CONST_SIZE_OR_ZERO;
5535}
5536
457f4436
AN
5537static bool arg_type_is_alloc_size(enum bpf_arg_type type)
5538{
5539 return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
5540}
5541
57c3bb72
AI
5542static bool arg_type_is_int_ptr(enum bpf_arg_type type)
5543{
5544 return type == ARG_PTR_TO_INT ||
5545 type == ARG_PTR_TO_LONG;
5546}
5547
8f14852e
KKD
5548static bool arg_type_is_release(enum bpf_arg_type type)
5549{
5550 return type & OBJ_RELEASE;
5551}
5552
97e03f52
JK
5553static bool arg_type_is_dynptr(enum bpf_arg_type type)
5554{
5555 return base_type(type) == ARG_PTR_TO_DYNPTR;
5556}
5557
57c3bb72
AI
5558static int int_ptr_type_to_size(enum bpf_arg_type type)
5559{
5560 if (type == ARG_PTR_TO_INT)
5561 return sizeof(u32);
5562 else if (type == ARG_PTR_TO_LONG)
5563 return sizeof(u64);
5564
5565 return -EINVAL;
5566}
5567
912f442c
LB
5568static int resolve_map_arg_type(struct bpf_verifier_env *env,
5569 const struct bpf_call_arg_meta *meta,
5570 enum bpf_arg_type *arg_type)
5571{
5572 if (!meta->map_ptr) {
5573 /* kernel subsystem misconfigured verifier */
5574 verbose(env, "invalid map_ptr to access map->type\n");
5575 return -EACCES;
5576 }
5577
5578 switch (meta->map_ptr->map_type) {
5579 case BPF_MAP_TYPE_SOCKMAP:
5580 case BPF_MAP_TYPE_SOCKHASH:
5581 if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
6550f2dd 5582 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
912f442c
LB
5583 } else {
5584 verbose(env, "invalid arg_type for sockmap/sockhash\n");
5585 return -EINVAL;
5586 }
5587 break;
9330986c
JK
5588 case BPF_MAP_TYPE_BLOOM_FILTER:
5589 if (meta->func_id == BPF_FUNC_map_peek_elem)
5590 *arg_type = ARG_PTR_TO_MAP_VALUE;
5591 break;
912f442c
LB
5592 default:
5593 break;
5594 }
5595 return 0;
5596}
5597
f79e7ea5
LB
5598struct bpf_reg_types {
5599 const enum bpf_reg_type types[10];
1df8f55a 5600 u32 *btf_id;
f79e7ea5
LB
5601};
5602
5603static const struct bpf_reg_types map_key_value_types = {
5604 .types = {
5605 PTR_TO_STACK,
5606 PTR_TO_PACKET,
5607 PTR_TO_PACKET_META,
69c087ba 5608 PTR_TO_MAP_KEY,
f79e7ea5
LB
5609 PTR_TO_MAP_VALUE,
5610 },
5611};
5612
5613static const struct bpf_reg_types sock_types = {
5614 .types = {
5615 PTR_TO_SOCK_COMMON,
5616 PTR_TO_SOCKET,
5617 PTR_TO_TCP_SOCK,
5618 PTR_TO_XDP_SOCK,
5619 },
5620};
5621
49a2a4d4 5622#ifdef CONFIG_NET
1df8f55a
MKL
5623static const struct bpf_reg_types btf_id_sock_common_types = {
5624 .types = {
5625 PTR_TO_SOCK_COMMON,
5626 PTR_TO_SOCKET,
5627 PTR_TO_TCP_SOCK,
5628 PTR_TO_XDP_SOCK,
5629 PTR_TO_BTF_ID,
5630 },
5631 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
5632};
49a2a4d4 5633#endif
1df8f55a 5634
f79e7ea5
LB
5635static const struct bpf_reg_types mem_types = {
5636 .types = {
5637 PTR_TO_STACK,
5638 PTR_TO_PACKET,
5639 PTR_TO_PACKET_META,
69c087ba 5640 PTR_TO_MAP_KEY,
f79e7ea5
LB
5641 PTR_TO_MAP_VALUE,
5642 PTR_TO_MEM,
a672b2e3 5643 PTR_TO_MEM | MEM_ALLOC,
20b2aff4 5644 PTR_TO_BUF,
f79e7ea5
LB
5645 },
5646};
5647
5648static const struct bpf_reg_types int_ptr_types = {
5649 .types = {
5650 PTR_TO_STACK,
5651 PTR_TO_PACKET,
5652 PTR_TO_PACKET_META,
69c087ba 5653 PTR_TO_MAP_KEY,
f79e7ea5
LB
5654 PTR_TO_MAP_VALUE,
5655 },
5656};
5657
5658static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
5659static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
5660static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
a672b2e3 5661static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
f79e7ea5
LB
5662static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
5663static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5664static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
5844101a 5665static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
69c087ba
YS
5666static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
5667static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
fff13c4b 5668static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } };
b00628b1 5669static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
c0a5a21c 5670static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } };
f79e7ea5 5671
0789e13b 5672static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
f79e7ea5
LB
5673 [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
5674 [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
f79e7ea5
LB
5675 [ARG_CONST_SIZE] = &scalar_types,
5676 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
5677 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
5678 [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
5679 [ARG_PTR_TO_CTX] = &context_types,
f79e7ea5 5680 [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
49a2a4d4 5681#ifdef CONFIG_NET
1df8f55a 5682 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
49a2a4d4 5683#endif
f79e7ea5 5684 [ARG_PTR_TO_SOCKET] = &fullsock_types,
f79e7ea5
LB
5685 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
5686 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
5687 [ARG_PTR_TO_MEM] = &mem_types,
f79e7ea5 5688 [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
f79e7ea5
LB
5689 [ARG_PTR_TO_INT] = &int_ptr_types,
5690 [ARG_PTR_TO_LONG] = &int_ptr_types,
eaa6bcb7 5691 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
69c087ba 5692 [ARG_PTR_TO_FUNC] = &func_ptr_types,
48946bd6 5693 [ARG_PTR_TO_STACK] = &stack_ptr_types,
fff13c4b 5694 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types,
b00628b1 5695 [ARG_PTR_TO_TIMER] = &timer_types,
c0a5a21c 5696 [ARG_PTR_TO_KPTR] = &kptr_types,
97e03f52 5697 [ARG_PTR_TO_DYNPTR] = &stack_ptr_types,
f79e7ea5
LB
5698};
5699
5700static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
a968d5e2 5701 enum bpf_arg_type arg_type,
c0a5a21c
KKD
5702 const u32 *arg_btf_id,
5703 struct bpf_call_arg_meta *meta)
f79e7ea5
LB
5704{
5705 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
5706 enum bpf_reg_type expected, type = reg->type;
a968d5e2 5707 const struct bpf_reg_types *compatible;
f79e7ea5
LB
5708 int i, j;
5709
48946bd6 5710 compatible = compatible_reg_types[base_type(arg_type)];
a968d5e2
MKL
5711 if (!compatible) {
5712 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
5713 return -EFAULT;
5714 }
5715
216e3cd2
HL
5716 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY,
5717 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY
5718 *
5719 * Same for MAYBE_NULL:
5720 *
5721 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL,
5722 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL
5723 *
5724 * Therefore we fold these flags depending on the arg_type before comparison.
5725 */
5726 if (arg_type & MEM_RDONLY)
5727 type &= ~MEM_RDONLY;
5728 if (arg_type & PTR_MAYBE_NULL)
5729 type &= ~PTR_MAYBE_NULL;
5730
f79e7ea5
LB
5731 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
5732 expected = compatible->types[i];
5733 if (expected == NOT_INIT)
5734 break;
5735
5736 if (type == expected)
a968d5e2 5737 goto found;
f79e7ea5
LB
5738 }
5739
216e3cd2 5740 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type));
f79e7ea5 5741 for (j = 0; j + 1 < i; j++)
c25b2ae1
HL
5742 verbose(env, "%s, ", reg_type_str(env, compatible->types[j]));
5743 verbose(env, "%s\n", reg_type_str(env, compatible->types[j]));
f79e7ea5 5744 return -EACCES;
a968d5e2
MKL
5745
5746found:
216e3cd2 5747 if (reg->type == PTR_TO_BTF_ID) {
2ab3b380
KKD
5748 /* For bpf_sk_release, it needs to match against first member
5749 * 'struct sock_common', hence make an exception for it. This
5750 * allows bpf_sk_release to work for multiple socket types.
5751 */
5752 bool strict_type_match = arg_type_is_release(arg_type) &&
5753 meta->func_id != BPF_FUNC_sk_release;
5754
1df8f55a
MKL
5755 if (!arg_btf_id) {
5756 if (!compatible->btf_id) {
5757 verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
5758 return -EFAULT;
5759 }
5760 arg_btf_id = compatible->btf_id;
5761 }
5762
c0a5a21c
KKD
5763 if (meta->func_id == BPF_FUNC_kptr_xchg) {
5764 if (map_kptr_match_type(env, meta->kptr_off_desc, reg, regno))
5765 return -EACCES;
5766 } else if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
2ab3b380
KKD
5767 btf_vmlinux, *arg_btf_id,
5768 strict_type_match)) {
a968d5e2 5769 verbose(env, "R%d is of type %s but %s is expected\n",
22dc4a0f
AN
5770 regno, kernel_type_name(reg->btf, reg->btf_id),
5771 kernel_type_name(btf_vmlinux, *arg_btf_id));
a968d5e2
MKL
5772 return -EACCES;
5773 }
a968d5e2
MKL
5774 }
5775
5776 return 0;
f79e7ea5
LB
5777}
5778
25b35dd2
KKD
5779int check_func_arg_reg_off(struct bpf_verifier_env *env,
5780 const struct bpf_reg_state *reg, int regno,
8f14852e 5781 enum bpf_arg_type arg_type)
25b35dd2
KKD
5782{
5783 enum bpf_reg_type type = reg->type;
8f14852e 5784 bool fixed_off_ok = false;
25b35dd2
KKD
5785
5786 switch ((u32)type) {
25b35dd2 5787 /* Pointer types where reg offset is explicitly allowed: */
97e03f52
JK
5788 case PTR_TO_STACK:
5789 if (arg_type_is_dynptr(arg_type) && reg->off % BPF_REG_SIZE) {
5790 verbose(env, "cannot pass in dynptr at an offset\n");
5791 return -EINVAL;
5792 }
5793 fallthrough;
25b35dd2
KKD
5794 case PTR_TO_PACKET:
5795 case PTR_TO_PACKET_META:
5796 case PTR_TO_MAP_KEY:
5797 case PTR_TO_MAP_VALUE:
5798 case PTR_TO_MEM:
5799 case PTR_TO_MEM | MEM_RDONLY:
5800 case PTR_TO_MEM | MEM_ALLOC:
5801 case PTR_TO_BUF:
5802 case PTR_TO_BUF | MEM_RDONLY:
97e03f52 5803 case SCALAR_VALUE:
25b35dd2
KKD
5804 /* Some of the argument types nevertheless require a
5805 * zero register offset.
5806 */
8f14852e 5807 if (base_type(arg_type) != ARG_PTR_TO_ALLOC_MEM)
25b35dd2
KKD
5808 return 0;
5809 break;
5810 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows
5811 * fixed offset.
5812 */
5813 case PTR_TO_BTF_ID:
24d5bb80 5814 /* When referenced PTR_TO_BTF_ID is passed to release function,
8f14852e
KKD
5815 * it's fixed offset must be 0. In the other cases, fixed offset
5816 * can be non-zero.
24d5bb80 5817 */
8f14852e 5818 if (arg_type_is_release(arg_type) && reg->off) {
24d5bb80
KKD
5819 verbose(env, "R%d must have zero offset when passed to release func\n",
5820 regno);
5821 return -EINVAL;
5822 }
8f14852e
KKD
5823 /* For arg is release pointer, fixed_off_ok must be false, but
5824 * we already checked and rejected reg->off != 0 above, so set
5825 * to true to allow fixed offset for all other cases.
24d5bb80 5826 */
25b35dd2
KKD
5827 fixed_off_ok = true;
5828 break;
5829 default:
5830 break;
5831 }
5832 return __check_ptr_off_reg(env, reg, regno, fixed_off_ok);
5833}
5834
34d4ef57
JK
5835static u32 stack_slot_get_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
5836{
5837 struct bpf_func_state *state = func(env, reg);
5838 int spi = get_spi(reg->off);
5839
5840 return state->stack[spi].spilled_ptr.id;
5841}
5842
af7ec138
YS
5843static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
5844 struct bpf_call_arg_meta *meta,
5845 const struct bpf_func_proto *fn)
17a52670 5846{
af7ec138 5847 u32 regno = BPF_REG_1 + arg;
638f5b90 5848 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
af7ec138 5849 enum bpf_arg_type arg_type = fn->arg_type[arg];
f79e7ea5 5850 enum bpf_reg_type type = reg->type;
17a52670
AS
5851 int err = 0;
5852
80f1d68c 5853 if (arg_type == ARG_DONTCARE)
17a52670
AS
5854 return 0;
5855
dc503a8a
EC
5856 err = check_reg_arg(env, regno, SRC_OP);
5857 if (err)
5858 return err;
17a52670 5859
1be7f75d
AS
5860 if (arg_type == ARG_ANYTHING) {
5861 if (is_pointer_value(env, regno)) {
61bd5218
JK
5862 verbose(env, "R%d leaks addr into helper function\n",
5863 regno);
1be7f75d
AS
5864 return -EACCES;
5865 }
80f1d68c 5866 return 0;
1be7f75d 5867 }
80f1d68c 5868
de8f3a83 5869 if (type_is_pkt_pointer(type) &&
3a0af8fd 5870 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 5871 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
5872 return -EACCES;
5873 }
5874
16d1e00c 5875 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
912f442c
LB
5876 err = resolve_map_arg_type(env, meta, &arg_type);
5877 if (err)
5878 return err;
5879 }
5880
48946bd6 5881 if (register_is_null(reg) && type_may_be_null(arg_type))
fd1b0d60
LB
5882 /* A NULL register has a SCALAR_VALUE type, so skip
5883 * type checking.
5884 */
5885 goto skip_type_check;
5886
c0a5a21c 5887 err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
f79e7ea5
LB
5888 if (err)
5889 return err;
5890
8f14852e 5891 err = check_func_arg_reg_off(env, reg, regno, arg_type);
25b35dd2
KKD
5892 if (err)
5893 return err;
d7b9454a 5894
fd1b0d60 5895skip_type_check:
8f14852e 5896 if (arg_type_is_release(arg_type)) {
bc34dee6
JK
5897 if (arg_type_is_dynptr(arg_type)) {
5898 struct bpf_func_state *state = func(env, reg);
5899 int spi = get_spi(reg->off);
5900
5901 if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
5902 !state->stack[spi].spilled_ptr.id) {
5903 verbose(env, "arg %d is an unacquired reference\n", regno);
5904 return -EINVAL;
5905 }
5906 } else if (!reg->ref_obj_id && !register_is_null(reg)) {
8f14852e
KKD
5907 verbose(env, "R%d must be referenced when passed to release function\n",
5908 regno);
5909 return -EINVAL;
5910 }
5911 if (meta->release_regno) {
5912 verbose(env, "verifier internal error: more than one release argument\n");
5913 return -EFAULT;
5914 }
5915 meta->release_regno = regno;
5916 }
5917
02f7c958 5918 if (reg->ref_obj_id) {
457f4436
AN
5919 if (meta->ref_obj_id) {
5920 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
5921 regno, reg->ref_obj_id,
5922 meta->ref_obj_id);
5923 return -EFAULT;
5924 }
5925 meta->ref_obj_id = reg->ref_obj_id;
17a52670
AS
5926 }
5927
17a52670
AS
5928 if (arg_type == ARG_CONST_MAP_PTR) {
5929 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
3e8ce298
AS
5930 if (meta->map_ptr) {
5931 /* Use map_uid (which is unique id of inner map) to reject:
5932 * inner_map1 = bpf_map_lookup_elem(outer_map, key1)
5933 * inner_map2 = bpf_map_lookup_elem(outer_map, key2)
5934 * if (inner_map1 && inner_map2) {
5935 * timer = bpf_map_lookup_elem(inner_map1);
5936 * if (timer)
5937 * // mismatch would have been allowed
5938 * bpf_timer_init(timer, inner_map2);
5939 * }
5940 *
5941 * Comparing map_ptr is enough to distinguish normal and outer maps.
5942 */
5943 if (meta->map_ptr != reg->map_ptr ||
5944 meta->map_uid != reg->map_uid) {
5945 verbose(env,
5946 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n",
5947 meta->map_uid, reg->map_uid);
5948 return -EINVAL;
5949 }
b00628b1 5950 }
33ff9823 5951 meta->map_ptr = reg->map_ptr;
3e8ce298 5952 meta->map_uid = reg->map_uid;
17a52670
AS
5953 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
5954 /* bpf_map_xxx(..., map_ptr, ..., key) call:
5955 * check that [key, key + map->key_size) are within
5956 * stack limits and initialized
5957 */
33ff9823 5958 if (!meta->map_ptr) {
17a52670
AS
5959 /* in function declaration map_ptr must come before
5960 * map_key, so that it's verified and known before
5961 * we have to check map_key here. Otherwise it means
5962 * that kernel subsystem misconfigured verifier
5963 */
61bd5218 5964 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
5965 return -EACCES;
5966 }
d71962f3
PC
5967 err = check_helper_mem_access(env, regno,
5968 meta->map_ptr->key_size, false,
5969 NULL);
16d1e00c 5970 } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
48946bd6
HL
5971 if (type_may_be_null(arg_type) && register_is_null(reg))
5972 return 0;
5973
17a52670
AS
5974 /* bpf_map_xxx(..., map_ptr, ..., value) call:
5975 * check [value, value + map->value_size) validity
5976 */
33ff9823 5977 if (!meta->map_ptr) {
17a52670 5978 /* kernel subsystem misconfigured verifier */
61bd5218 5979 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
5980 return -EACCES;
5981 }
16d1e00c 5982 meta->raw_mode = arg_type & MEM_UNINIT;
d71962f3
PC
5983 err = check_helper_mem_access(env, regno,
5984 meta->map_ptr->value_size, false,
2ea864c5 5985 meta);
eaa6bcb7
HL
5986 } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
5987 if (!reg->btf_id) {
5988 verbose(env, "Helper has invalid btf_id in R%d\n", regno);
5989 return -EACCES;
5990 }
22dc4a0f 5991 meta->ret_btf = reg->btf;
eaa6bcb7 5992 meta->ret_btf_id = reg->btf_id;
c18f0b6a
LB
5993 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
5994 if (meta->func_id == BPF_FUNC_spin_lock) {
5995 if (process_spin_lock(env, regno, true))
5996 return -EACCES;
5997 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
5998 if (process_spin_lock(env, regno, false))
5999 return -EACCES;
6000 } else {
6001 verbose(env, "verifier internal error\n");
6002 return -EFAULT;
6003 }
b00628b1
AS
6004 } else if (arg_type == ARG_PTR_TO_TIMER) {
6005 if (process_timer_func(env, regno, meta))
6006 return -EACCES;
69c087ba
YS
6007 } else if (arg_type == ARG_PTR_TO_FUNC) {
6008 meta->subprogno = reg->subprogno;
16d1e00c 6009 } else if (base_type(arg_type) == ARG_PTR_TO_MEM) {
a2bbe7cc
LB
6010 /* The access to this pointer is only checked when we hit the
6011 * next is_mem_size argument below.
6012 */
16d1e00c 6013 meta->raw_mode = arg_type & MEM_UNINIT;
90133415 6014 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 6015 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 6016
d583691c 6017 err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
97e03f52
JK
6018 } else if (arg_type_is_dynptr(arg_type)) {
6019 if (arg_type & MEM_UNINIT) {
6020 if (!is_dynptr_reg_valid_uninit(env, reg)) {
6021 verbose(env, "Dynptr has to be an uninitialized dynptr\n");
6022 return -EINVAL;
6023 }
6024
6025 /* We only support one dynptr being uninitialized at the moment,
6026 * which is sufficient for the helper functions we have right now.
6027 */
6028 if (meta->uninit_dynptr_regno) {
6029 verbose(env, "verifier internal error: multiple uninitialized dynptr args\n");
6030 return -EFAULT;
6031 }
6032
6033 meta->uninit_dynptr_regno = regno;
6034 } else if (!is_dynptr_reg_valid_init(env, reg, arg_type)) {
6035 const char *err_extra = "";
6036
6037 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
6038 case DYNPTR_TYPE_LOCAL:
6039 err_extra = "local ";
6040 break;
bc34dee6
JK
6041 case DYNPTR_TYPE_RINGBUF:
6042 err_extra = "ringbuf ";
6043 break;
97e03f52
JK
6044 default:
6045 break;
6046 }
bc34dee6 6047
97e03f52
JK
6048 verbose(env, "Expected an initialized %sdynptr as arg #%d\n",
6049 err_extra, arg + 1);
6050 return -EINVAL;
6051 }
457f4436
AN
6052 } else if (arg_type_is_alloc_size(arg_type)) {
6053 if (!tnum_is_const(reg->var_off)) {
28a8add6 6054 verbose(env, "R%d is not a known constant'\n",
457f4436
AN
6055 regno);
6056 return -EACCES;
6057 }
6058 meta->mem_size = reg->var_off.value;
57c3bb72
AI
6059 } else if (arg_type_is_int_ptr(arg_type)) {
6060 int size = int_ptr_type_to_size(arg_type);
6061
6062 err = check_helper_mem_access(env, regno, size, false, meta);
6063 if (err)
6064 return err;
6065 err = check_ptr_alignment(env, reg, 0, size, true);
fff13c4b
FR
6066 } else if (arg_type == ARG_PTR_TO_CONST_STR) {
6067 struct bpf_map *map = reg->map_ptr;
6068 int map_off;
6069 u64 map_addr;
6070 char *str_ptr;
6071
a8fad73e 6072 if (!bpf_map_is_rdonly(map)) {
fff13c4b
FR
6073 verbose(env, "R%d does not point to a readonly map'\n", regno);
6074 return -EACCES;
6075 }
6076
6077 if (!tnum_is_const(reg->var_off)) {
6078 verbose(env, "R%d is not a constant address'\n", regno);
6079 return -EACCES;
6080 }
6081
6082 if (!map->ops->map_direct_value_addr) {
6083 verbose(env, "no direct value access support for this map type\n");
6084 return -EACCES;
6085 }
6086
6087 err = check_map_access(env, regno, reg->off,
61df10c7
KKD
6088 map->value_size - reg->off, false,
6089 ACCESS_HELPER);
fff13c4b
FR
6090 if (err)
6091 return err;
6092
6093 map_off = reg->off + reg->var_off.value;
6094 err = map->ops->map_direct_value_addr(map, &map_addr, map_off);
6095 if (err) {
6096 verbose(env, "direct value access on string failed\n");
6097 return err;
6098 }
6099
6100 str_ptr = (char *)(long)(map_addr);
6101 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) {
6102 verbose(env, "string is not zero-terminated\n");
6103 return -EINVAL;
6104 }
c0a5a21c
KKD
6105 } else if (arg_type == ARG_PTR_TO_KPTR) {
6106 if (process_kptr_func(env, regno, meta))
6107 return -EACCES;
17a52670
AS
6108 }
6109
6110 return err;
6111}
6112
0126240f
LB
6113static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
6114{
6115 enum bpf_attach_type eatype = env->prog->expected_attach_type;
7e40781c 6116 enum bpf_prog_type type = resolve_prog_type(env->prog);
0126240f
LB
6117
6118 if (func_id != BPF_FUNC_map_update_elem)
6119 return false;
6120
6121 /* It's not possible to get access to a locked struct sock in these
6122 * contexts, so updating is safe.
6123 */
6124 switch (type) {
6125 case BPF_PROG_TYPE_TRACING:
6126 if (eatype == BPF_TRACE_ITER)
6127 return true;
6128 break;
6129 case BPF_PROG_TYPE_SOCKET_FILTER:
6130 case BPF_PROG_TYPE_SCHED_CLS:
6131 case BPF_PROG_TYPE_SCHED_ACT:
6132 case BPF_PROG_TYPE_XDP:
6133 case BPF_PROG_TYPE_SK_REUSEPORT:
6134 case BPF_PROG_TYPE_FLOW_DISSECTOR:
6135 case BPF_PROG_TYPE_SK_LOOKUP:
6136 return true;
6137 default:
6138 break;
6139 }
6140
6141 verbose(env, "cannot update sockmap in this context\n");
6142 return false;
6143}
6144
e411901c
MF
6145static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
6146{
6147 return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
6148}
6149
61bd5218
JK
6150static int check_map_func_compatibility(struct bpf_verifier_env *env,
6151 struct bpf_map *map, int func_id)
35578d79 6152{
35578d79
KX
6153 if (!map)
6154 return 0;
6155
6aff67c8
AS
6156 /* We need a two way check, first is from map perspective ... */
6157 switch (map->map_type) {
6158 case BPF_MAP_TYPE_PROG_ARRAY:
6159 if (func_id != BPF_FUNC_tail_call)
6160 goto error;
6161 break;
6162 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
6163 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 6164 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 6165 func_id != BPF_FUNC_skb_output &&
d831ee84
EC
6166 func_id != BPF_FUNC_perf_event_read_value &&
6167 func_id != BPF_FUNC_xdp_output)
6aff67c8
AS
6168 goto error;
6169 break;
457f4436
AN
6170 case BPF_MAP_TYPE_RINGBUF:
6171 if (func_id != BPF_FUNC_ringbuf_output &&
6172 func_id != BPF_FUNC_ringbuf_reserve &&
bc34dee6
JK
6173 func_id != BPF_FUNC_ringbuf_query &&
6174 func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
6175 func_id != BPF_FUNC_ringbuf_submit_dynptr &&
6176 func_id != BPF_FUNC_ringbuf_discard_dynptr)
457f4436
AN
6177 goto error;
6178 break;
6aff67c8
AS
6179 case BPF_MAP_TYPE_STACK_TRACE:
6180 if (func_id != BPF_FUNC_get_stackid)
6181 goto error;
6182 break;
4ed8ec52 6183 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 6184 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 6185 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
6186 goto error;
6187 break;
cd339431 6188 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 6189 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
6190 if (func_id != BPF_FUNC_get_local_storage)
6191 goto error;
6192 break;
546ac1ff 6193 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 6194 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
6195 if (func_id != BPF_FUNC_redirect_map &&
6196 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
6197 goto error;
6198 break;
fbfc504a
BT
6199 /* Restrict bpf side of cpumap and xskmap, open when use-cases
6200 * appear.
6201 */
6710e112
JDB
6202 case BPF_MAP_TYPE_CPUMAP:
6203 if (func_id != BPF_FUNC_redirect_map)
6204 goto error;
6205 break;
fada7fdc
JL
6206 case BPF_MAP_TYPE_XSKMAP:
6207 if (func_id != BPF_FUNC_redirect_map &&
6208 func_id != BPF_FUNC_map_lookup_elem)
6209 goto error;
6210 break;
56f668df 6211 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 6212 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
6213 if (func_id != BPF_FUNC_map_lookup_elem)
6214 goto error;
16a43625 6215 break;
174a79ff
JF
6216 case BPF_MAP_TYPE_SOCKMAP:
6217 if (func_id != BPF_FUNC_sk_redirect_map &&
6218 func_id != BPF_FUNC_sock_map_update &&
4f738adb 6219 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 6220 func_id != BPF_FUNC_msg_redirect_map &&
64d85290 6221 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
6222 func_id != BPF_FUNC_map_lookup_elem &&
6223 !may_update_sockmap(env, func_id))
174a79ff
JF
6224 goto error;
6225 break;
81110384
JF
6226 case BPF_MAP_TYPE_SOCKHASH:
6227 if (func_id != BPF_FUNC_sk_redirect_hash &&
6228 func_id != BPF_FUNC_sock_hash_update &&
6229 func_id != BPF_FUNC_map_delete_elem &&
9fed9000 6230 func_id != BPF_FUNC_msg_redirect_hash &&
64d85290 6231 func_id != BPF_FUNC_sk_select_reuseport &&
0126240f
LB
6232 func_id != BPF_FUNC_map_lookup_elem &&
6233 !may_update_sockmap(env, func_id))
81110384
JF
6234 goto error;
6235 break;
2dbb9b9e
MKL
6236 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
6237 if (func_id != BPF_FUNC_sk_select_reuseport)
6238 goto error;
6239 break;
f1a2e44a
MV
6240 case BPF_MAP_TYPE_QUEUE:
6241 case BPF_MAP_TYPE_STACK:
6242 if (func_id != BPF_FUNC_map_peek_elem &&
6243 func_id != BPF_FUNC_map_pop_elem &&
6244 func_id != BPF_FUNC_map_push_elem)
6245 goto error;
6246 break;
6ac99e8f
MKL
6247 case BPF_MAP_TYPE_SK_STORAGE:
6248 if (func_id != BPF_FUNC_sk_storage_get &&
6249 func_id != BPF_FUNC_sk_storage_delete)
6250 goto error;
6251 break;
8ea63684
KS
6252 case BPF_MAP_TYPE_INODE_STORAGE:
6253 if (func_id != BPF_FUNC_inode_storage_get &&
6254 func_id != BPF_FUNC_inode_storage_delete)
6255 goto error;
6256 break;
4cf1bc1f
KS
6257 case BPF_MAP_TYPE_TASK_STORAGE:
6258 if (func_id != BPF_FUNC_task_storage_get &&
6259 func_id != BPF_FUNC_task_storage_delete)
6260 goto error;
6261 break;
9330986c
JK
6262 case BPF_MAP_TYPE_BLOOM_FILTER:
6263 if (func_id != BPF_FUNC_map_peek_elem &&
6264 func_id != BPF_FUNC_map_push_elem)
6265 goto error;
6266 break;
6aff67c8
AS
6267 default:
6268 break;
6269 }
6270
6271 /* ... and second from the function itself. */
6272 switch (func_id) {
6273 case BPF_FUNC_tail_call:
6274 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
6275 goto error;
e411901c
MF
6276 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
6277 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
f4d7e40a
AS
6278 return -EINVAL;
6279 }
6aff67c8
AS
6280 break;
6281 case BPF_FUNC_perf_event_read:
6282 case BPF_FUNC_perf_event_output:
908432ca 6283 case BPF_FUNC_perf_event_read_value:
a7658e1a 6284 case BPF_FUNC_skb_output:
d831ee84 6285 case BPF_FUNC_xdp_output:
6aff67c8
AS
6286 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
6287 goto error;
6288 break;
5b029a32
DB
6289 case BPF_FUNC_ringbuf_output:
6290 case BPF_FUNC_ringbuf_reserve:
6291 case BPF_FUNC_ringbuf_query:
bc34dee6
JK
6292 case BPF_FUNC_ringbuf_reserve_dynptr:
6293 case BPF_FUNC_ringbuf_submit_dynptr:
6294 case BPF_FUNC_ringbuf_discard_dynptr:
5b029a32
DB
6295 if (map->map_type != BPF_MAP_TYPE_RINGBUF)
6296 goto error;
6297 break;
6aff67c8
AS
6298 case BPF_FUNC_get_stackid:
6299 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
6300 goto error;
6301 break;
60d20f91 6302 case BPF_FUNC_current_task_under_cgroup:
747ea55e 6303 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
6304 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
6305 goto error;
6306 break;
97f91a7c 6307 case BPF_FUNC_redirect_map:
9c270af3 6308 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 6309 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
6310 map->map_type != BPF_MAP_TYPE_CPUMAP &&
6311 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
6312 goto error;
6313 break;
174a79ff 6314 case BPF_FUNC_sk_redirect_map:
4f738adb 6315 case BPF_FUNC_msg_redirect_map:
81110384 6316 case BPF_FUNC_sock_map_update:
174a79ff
JF
6317 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
6318 goto error;
6319 break;
81110384
JF
6320 case BPF_FUNC_sk_redirect_hash:
6321 case BPF_FUNC_msg_redirect_hash:
6322 case BPF_FUNC_sock_hash_update:
6323 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
6324 goto error;
6325 break;
cd339431 6326 case BPF_FUNC_get_local_storage:
b741f163
RG
6327 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
6328 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
6329 goto error;
6330 break;
2dbb9b9e 6331 case BPF_FUNC_sk_select_reuseport:
9fed9000
JS
6332 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
6333 map->map_type != BPF_MAP_TYPE_SOCKMAP &&
6334 map->map_type != BPF_MAP_TYPE_SOCKHASH)
2dbb9b9e
MKL
6335 goto error;
6336 break;
f1a2e44a 6337 case BPF_FUNC_map_pop_elem:
f1a2e44a
MV
6338 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6339 map->map_type != BPF_MAP_TYPE_STACK)
6340 goto error;
6341 break;
9330986c
JK
6342 case BPF_FUNC_map_peek_elem:
6343 case BPF_FUNC_map_push_elem:
6344 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
6345 map->map_type != BPF_MAP_TYPE_STACK &&
6346 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER)
6347 goto error;
6348 break;
07343110
FZ
6349 case BPF_FUNC_map_lookup_percpu_elem:
6350 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY &&
6351 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
6352 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH)
6353 goto error;
6354 break;
6ac99e8f
MKL
6355 case BPF_FUNC_sk_storage_get:
6356 case BPF_FUNC_sk_storage_delete:
6357 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
6358 goto error;
6359 break;
8ea63684
KS
6360 case BPF_FUNC_inode_storage_get:
6361 case BPF_FUNC_inode_storage_delete:
6362 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
6363 goto error;
6364 break;
4cf1bc1f
KS
6365 case BPF_FUNC_task_storage_get:
6366 case BPF_FUNC_task_storage_delete:
6367 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
6368 goto error;
6369 break;
6aff67c8
AS
6370 default:
6371 break;
35578d79
KX
6372 }
6373
6374 return 0;
6aff67c8 6375error:
61bd5218 6376 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 6377 map->map_type, func_id_name(func_id), func_id);
6aff67c8 6378 return -EINVAL;
35578d79
KX
6379}
6380
90133415 6381static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
6382{
6383 int count = 0;
6384
39f19ebb 6385 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 6386 count++;
39f19ebb 6387 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 6388 count++;
39f19ebb 6389 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 6390 count++;
39f19ebb 6391 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 6392 count++;
39f19ebb 6393 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
6394 count++;
6395
90133415
DB
6396 /* We only support one arg being in raw mode at the moment,
6397 * which is sufficient for the helper functions we have
6398 * right now.
6399 */
6400 return count <= 1;
6401}
6402
6403static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
6404 enum bpf_arg_type arg_next)
6405{
16d1e00c
JK
6406 return (base_type(arg_curr) == ARG_PTR_TO_MEM) !=
6407 arg_type_is_mem_size(arg_next);
90133415
DB
6408}
6409
6410static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
6411{
6412 /* bpf_xxx(..., buf, len) call will access 'len'
6413 * bytes from memory 'buf'. Both arg types need
6414 * to be paired, so make sure there's no buggy
6415 * helper function specification.
6416 */
6417 if (arg_type_is_mem_size(fn->arg1_type) ||
16d1e00c 6418 base_type(fn->arg5_type) == ARG_PTR_TO_MEM ||
90133415
DB
6419 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
6420 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
6421 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
6422 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
6423 return false;
6424
6425 return true;
6426}
6427
1b986589 6428static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
6429{
6430 int count = 0;
6431
1b986589 6432 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 6433 count++;
1b986589 6434 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 6435 count++;
1b986589 6436 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 6437 count++;
1b986589 6438 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 6439 count++;
1b986589 6440 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
6441 count++;
6442
1b986589
MKL
6443 /* A reference acquiring function cannot acquire
6444 * another refcounted ptr.
6445 */
64d85290 6446 if (may_be_acquire_function(func_id) && count)
1b986589
MKL
6447 return false;
6448
fd978bf7
JS
6449 /* We only support one arg being unreferenced at the moment,
6450 * which is sufficient for the helper functions we have right now.
6451 */
6452 return count <= 1;
6453}
6454
9436ef6e
LB
6455static bool check_btf_id_ok(const struct bpf_func_proto *fn)
6456{
6457 int i;
6458
1df8f55a 6459 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
c0a5a21c 6460 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
9436ef6e
LB
6461 return false;
6462
c0a5a21c 6463 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
1df8f55a
MKL
6464 return false;
6465 }
6466
9436ef6e
LB
6467 return true;
6468}
6469
8f14852e
KKD
6470static int check_func_proto(const struct bpf_func_proto *fn, int func_id,
6471 struct bpf_call_arg_meta *meta)
90133415
DB
6472{
6473 return check_raw_mode_ok(fn) &&
fd978bf7 6474 check_arg_pair_ok(fn) &&
9436ef6e 6475 check_btf_id_ok(fn) &&
1b986589 6476 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
6477}
6478
de8f3a83
DB
6479/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
6480 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 6481 */
f4d7e40a
AS
6482static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
6483 struct bpf_func_state *state)
969bf05e 6484{
58e2af8b 6485 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
6486 int i;
6487
6488 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 6489 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 6490 mark_reg_unknown(env, regs, i);
969bf05e 6491
f3709f69
JS
6492 bpf_for_each_spilled_reg(i, state, reg) {
6493 if (!reg)
969bf05e 6494 continue;
de8f3a83 6495 if (reg_is_pkt_pointer_any(reg))
f54c7898 6496 __mark_reg_unknown(env, reg);
969bf05e
AS
6497 }
6498}
6499
f4d7e40a
AS
6500static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
6501{
6502 struct bpf_verifier_state *vstate = env->cur_state;
6503 int i;
6504
6505 for (i = 0; i <= vstate->curframe; i++)
6506 __clear_all_pkt_pointers(env, vstate->frame[i]);
6507}
6508
6d94e741
AS
6509enum {
6510 AT_PKT_END = -1,
6511 BEYOND_PKT_END = -2,
6512};
6513
6514static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
6515{
6516 struct bpf_func_state *state = vstate->frame[vstate->curframe];
6517 struct bpf_reg_state *reg = &state->regs[regn];
6518
6519 if (reg->type != PTR_TO_PACKET)
6520 /* PTR_TO_PACKET_META is not supported yet */
6521 return;
6522
6523 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
6524 * How far beyond pkt_end it goes is unknown.
6525 * if (!range_open) it's the case of pkt >= pkt_end
6526 * if (range_open) it's the case of pkt > pkt_end
6527 * hence this pointer is at least 1 byte bigger than pkt_end
6528 */
6529 if (range_open)
6530 reg->range = BEYOND_PKT_END;
6531 else
6532 reg->range = AT_PKT_END;
6533}
6534
fd978bf7 6535static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
6536 struct bpf_func_state *state,
6537 int ref_obj_id)
fd978bf7
JS
6538{
6539 struct bpf_reg_state *regs = state->regs, *reg;
6540 int i;
6541
6542 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 6543 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
6544 mark_reg_unknown(env, regs, i);
6545
6546 bpf_for_each_spilled_reg(i, state, reg) {
6547 if (!reg)
6548 continue;
1b986589 6549 if (reg->ref_obj_id == ref_obj_id)
f54c7898 6550 __mark_reg_unknown(env, reg);
fd978bf7
JS
6551 }
6552}
6553
6554/* The pointer with the specified id has released its reference to kernel
6555 * resources. Identify all copies of the same pointer and clear the reference.
6556 */
6557static int release_reference(struct bpf_verifier_env *env,
1b986589 6558 int ref_obj_id)
fd978bf7
JS
6559{
6560 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 6561 int err;
fd978bf7
JS
6562 int i;
6563
1b986589
MKL
6564 err = release_reference_state(cur_func(env), ref_obj_id);
6565 if (err)
6566 return err;
6567
fd978bf7 6568 for (i = 0; i <= vstate->curframe; i++)
1b986589 6569 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 6570
1b986589 6571 return 0;
fd978bf7
JS
6572}
6573
51c39bb1
AS
6574static void clear_caller_saved_regs(struct bpf_verifier_env *env,
6575 struct bpf_reg_state *regs)
6576{
6577 int i;
6578
6579 /* after the call registers r0 - r5 were scratched */
6580 for (i = 0; i < CALLER_SAVED_REGS; i++) {
6581 mark_reg_not_init(env, regs, caller_saved[i]);
6582 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6583 }
6584}
6585
14351375
YS
6586typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env,
6587 struct bpf_func_state *caller,
6588 struct bpf_func_state *callee,
6589 int insn_idx);
6590
6591static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6592 int *insn_idx, int subprog,
6593 set_callee_state_fn set_callee_state_cb)
f4d7e40a
AS
6594{
6595 struct bpf_verifier_state *state = env->cur_state;
51c39bb1 6596 struct bpf_func_info_aux *func_info_aux;
f4d7e40a 6597 struct bpf_func_state *caller, *callee;
14351375 6598 int err;
51c39bb1 6599 bool is_global = false;
f4d7e40a 6600
aada9ce6 6601 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 6602 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 6603 state->curframe + 2);
f4d7e40a
AS
6604 return -E2BIG;
6605 }
6606
f4d7e40a
AS
6607 caller = state->frame[state->curframe];
6608 if (state->frame[state->curframe + 1]) {
6609 verbose(env, "verifier bug. Frame %d already allocated\n",
6610 state->curframe + 1);
6611 return -EFAULT;
6612 }
6613
51c39bb1
AS
6614 func_info_aux = env->prog->aux->func_info_aux;
6615 if (func_info_aux)
6616 is_global = func_info_aux[subprog].linkage == BTF_FUNC_GLOBAL;
34747c41 6617 err = btf_check_subprog_arg_match(env, subprog, caller->regs);
51c39bb1
AS
6618 if (err == -EFAULT)
6619 return err;
6620 if (is_global) {
6621 if (err) {
6622 verbose(env, "Caller passes invalid args into func#%d\n",
6623 subprog);
6624 return err;
6625 } else {
6626 if (env->log.level & BPF_LOG_LEVEL)
6627 verbose(env,
6628 "Func#%d is global and valid. Skipping.\n",
6629 subprog);
6630 clear_caller_saved_regs(env, caller->regs);
6631
45159b27 6632 /* All global functions return a 64-bit SCALAR_VALUE */
51c39bb1 6633 mark_reg_unknown(env, caller->regs, BPF_REG_0);
45159b27 6634 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
51c39bb1
AS
6635
6636 /* continue with next insn after call */
6637 return 0;
6638 }
6639 }
6640
bfc6bb74 6641 if (insn->code == (BPF_JMP | BPF_CALL) &&
a5bebc4f 6642 insn->src_reg == 0 &&
bfc6bb74
AS
6643 insn->imm == BPF_FUNC_timer_set_callback) {
6644 struct bpf_verifier_state *async_cb;
6645
6646 /* there is no real recursion here. timer callbacks are async */
7ddc80a4 6647 env->subprog_info[subprog].is_async_cb = true;
bfc6bb74
AS
6648 async_cb = push_async_cb(env, env->subprog_info[subprog].start,
6649 *insn_idx, subprog);
6650 if (!async_cb)
6651 return -EFAULT;
6652 callee = async_cb->frame[0];
6653 callee->async_entry_cnt = caller->async_entry_cnt + 1;
6654
6655 /* Convert bpf_timer_set_callback() args into timer callback args */
6656 err = set_callee_state_cb(env, caller, callee, *insn_idx);
6657 if (err)
6658 return err;
6659
6660 clear_caller_saved_regs(env, caller->regs);
6661 mark_reg_unknown(env, caller->regs, BPF_REG_0);
6662 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
6663 /* continue with next insn after call */
6664 return 0;
6665 }
6666
f4d7e40a
AS
6667 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
6668 if (!callee)
6669 return -ENOMEM;
6670 state->frame[state->curframe + 1] = callee;
6671
6672 /* callee cannot access r0, r6 - r9 for reading and has to write
6673 * into its own stack before reading from it.
6674 * callee can read/write into caller's stack
6675 */
6676 init_func_state(env, callee,
6677 /* remember the callsite, it will be used by bpf_exit */
6678 *insn_idx /* callsite */,
6679 state->curframe + 1 /* frameno within this callchain */,
f910cefa 6680 subprog /* subprog number within this prog */);
f4d7e40a 6681
fd978bf7 6682 /* Transfer references to the callee */
c69431aa 6683 err = copy_reference_state(callee, caller);
fd978bf7
JS
6684 if (err)
6685 return err;
6686
14351375
YS
6687 err = set_callee_state_cb(env, caller, callee, *insn_idx);
6688 if (err)
6689 return err;
f4d7e40a 6690
51c39bb1 6691 clear_caller_saved_regs(env, caller->regs);
f4d7e40a
AS
6692
6693 /* only increment it after check_reg_arg() finished */
6694 state->curframe++;
6695
6696 /* and go analyze first insn of the callee */
14351375 6697 *insn_idx = env->subprog_info[subprog].start - 1;
f4d7e40a 6698
06ee7115 6699 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 6700 verbose(env, "caller:\n");
0f55f9ed 6701 print_verifier_state(env, caller, true);
f4d7e40a 6702 verbose(env, "callee:\n");
0f55f9ed 6703 print_verifier_state(env, callee, true);
f4d7e40a
AS
6704 }
6705 return 0;
6706}
6707
314ee05e
YS
6708int map_set_for_each_callback_args(struct bpf_verifier_env *env,
6709 struct bpf_func_state *caller,
6710 struct bpf_func_state *callee)
6711{
6712 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn,
6713 * void *callback_ctx, u64 flags);
6714 * callback_fn(struct bpf_map *map, void *key, void *value,
6715 * void *callback_ctx);
6716 */
6717 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6718
6719 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6720 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6721 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6722
6723 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6724 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6725 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr;
6726
6727 /* pointer to stack or null */
6728 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3];
6729
6730 /* unused */
6731 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6732 return 0;
6733}
6734
14351375
YS
6735static int set_callee_state(struct bpf_verifier_env *env,
6736 struct bpf_func_state *caller,
6737 struct bpf_func_state *callee, int insn_idx)
6738{
6739 int i;
6740
6741 /* copy r1 - r5 args that callee can access. The copy includes parent
6742 * pointers, which connects us up to the liveness chain
6743 */
6744 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
6745 callee->regs[i] = caller->regs[i];
6746 return 0;
6747}
6748
6749static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
6750 int *insn_idx)
6751{
6752 int subprog, target_insn;
6753
6754 target_insn = *insn_idx + insn->imm + 1;
6755 subprog = find_subprog(env, target_insn);
6756 if (subprog < 0) {
6757 verbose(env, "verifier bug. No program starts at insn %d\n",
6758 target_insn);
6759 return -EFAULT;
6760 }
6761
6762 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state);
6763}
6764
69c087ba
YS
6765static int set_map_elem_callback_state(struct bpf_verifier_env *env,
6766 struct bpf_func_state *caller,
6767 struct bpf_func_state *callee,
6768 int insn_idx)
6769{
6770 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx];
6771 struct bpf_map *map;
6772 int err;
6773
6774 if (bpf_map_ptr_poisoned(insn_aux)) {
6775 verbose(env, "tail_call abusing map_ptr\n");
6776 return -EINVAL;
6777 }
6778
6779 map = BPF_MAP_PTR(insn_aux->map_ptr_state);
6780 if (!map->ops->map_set_for_each_callback_args ||
6781 !map->ops->map_for_each_callback) {
6782 verbose(env, "callback function not allowed for map\n");
6783 return -ENOTSUPP;
6784 }
6785
6786 err = map->ops->map_set_for_each_callback_args(env, caller, callee);
6787 if (err)
6788 return err;
6789
6790 callee->in_callback_fn = true;
6791 return 0;
6792}
6793
e6f2dd0f
JK
6794static int set_loop_callback_state(struct bpf_verifier_env *env,
6795 struct bpf_func_state *caller,
6796 struct bpf_func_state *callee,
6797 int insn_idx)
6798{
6799 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx,
6800 * u64 flags);
6801 * callback_fn(u32 index, void *callback_ctx);
6802 */
6803 callee->regs[BPF_REG_1].type = SCALAR_VALUE;
6804 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3];
6805
6806 /* unused */
6807 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]);
6808 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6809 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6810
6811 callee->in_callback_fn = true;
6812 return 0;
6813}
6814
b00628b1
AS
6815static int set_timer_callback_state(struct bpf_verifier_env *env,
6816 struct bpf_func_state *caller,
6817 struct bpf_func_state *callee,
6818 int insn_idx)
6819{
6820 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr;
6821
6822 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn);
6823 * callback_fn(struct bpf_map *map, void *key, void *value);
6824 */
6825 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP;
6826 __mark_reg_known_zero(&callee->regs[BPF_REG_1]);
6827 callee->regs[BPF_REG_1].map_ptr = map_ptr;
6828
6829 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY;
6830 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6831 callee->regs[BPF_REG_2].map_ptr = map_ptr;
6832
6833 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE;
6834 __mark_reg_known_zero(&callee->regs[BPF_REG_3]);
6835 callee->regs[BPF_REG_3].map_ptr = map_ptr;
6836
6837 /* unused */
6838 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6839 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
bfc6bb74 6840 callee->in_async_callback_fn = true;
b00628b1
AS
6841 return 0;
6842}
6843
7c7e3d31
SL
6844static int set_find_vma_callback_state(struct bpf_verifier_env *env,
6845 struct bpf_func_state *caller,
6846 struct bpf_func_state *callee,
6847 int insn_idx)
6848{
6849 /* bpf_find_vma(struct task_struct *task, u64 addr,
6850 * void *callback_fn, void *callback_ctx, u64 flags)
6851 * (callback_fn)(struct task_struct *task,
6852 * struct vm_area_struct *vma, void *callback_ctx);
6853 */
6854 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
6855
6856 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
6857 __mark_reg_known_zero(&callee->regs[BPF_REG_2]);
6858 callee->regs[BPF_REG_2].btf = btf_vmlinux;
d19ddb47 6859 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
7c7e3d31
SL
6860
6861 /* pointer to stack or null */
6862 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
6863
6864 /* unused */
6865 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
6866 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
6867 callee->in_callback_fn = true;
6868 return 0;
6869}
6870
f4d7e40a
AS
6871static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
6872{
6873 struct bpf_verifier_state *state = env->cur_state;
6874 struct bpf_func_state *caller, *callee;
6875 struct bpf_reg_state *r0;
fd978bf7 6876 int err;
f4d7e40a
AS
6877
6878 callee = state->frame[state->curframe];
6879 r0 = &callee->regs[BPF_REG_0];
6880 if (r0->type == PTR_TO_STACK) {
6881 /* technically it's ok to return caller's stack pointer
6882 * (or caller's caller's pointer) back to the caller,
6883 * since these pointers are valid. Only current stack
6884 * pointer will be invalid as soon as function exits,
6885 * but let's be conservative
6886 */
6887 verbose(env, "cannot return stack pointer to the caller\n");
6888 return -EINVAL;
6889 }
6890
6891 state->curframe--;
6892 caller = state->frame[state->curframe];
69c087ba
YS
6893 if (callee->in_callback_fn) {
6894 /* enforce R0 return value range [0, 1]. */
6895 struct tnum range = tnum_range(0, 1);
6896
6897 if (r0->type != SCALAR_VALUE) {
6898 verbose(env, "R0 not a scalar value\n");
6899 return -EACCES;
6900 }
6901 if (!tnum_in(range, r0->var_off)) {
6902 verbose_invalid_scalar(env, r0, &range, "callback return", "R0");
6903 return -EINVAL;
6904 }
6905 } else {
6906 /* return to the caller whatever r0 had in the callee */
6907 caller->regs[BPF_REG_0] = *r0;
6908 }
f4d7e40a 6909
fd978bf7 6910 /* Transfer references to the caller */
c69431aa 6911 err = copy_reference_state(caller, callee);
fd978bf7
JS
6912 if (err)
6913 return err;
6914
f4d7e40a 6915 *insn_idx = callee->callsite + 1;
06ee7115 6916 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a 6917 verbose(env, "returning from callee:\n");
0f55f9ed 6918 print_verifier_state(env, callee, true);
f4d7e40a 6919 verbose(env, "to caller at %d:\n", *insn_idx);
0f55f9ed 6920 print_verifier_state(env, caller, true);
f4d7e40a
AS
6921 }
6922 /* clear everything in the callee */
6923 free_func_state(callee);
6924 state->frame[state->curframe + 1] = NULL;
6925 return 0;
6926}
6927
849fa506
YS
6928static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
6929 int func_id,
6930 struct bpf_call_arg_meta *meta)
6931{
6932 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
6933
6934 if (ret_type != RET_INTEGER ||
6935 (func_id != BPF_FUNC_get_stack &&
fd0b88f7 6936 func_id != BPF_FUNC_get_task_stack &&
47cc0ed5
DB
6937 func_id != BPF_FUNC_probe_read_str &&
6938 func_id != BPF_FUNC_probe_read_kernel_str &&
6939 func_id != BPF_FUNC_probe_read_user_str))
849fa506
YS
6940 return;
6941
10060503 6942 ret_reg->smax_value = meta->msize_max_value;
fa123ac0 6943 ret_reg->s32_max_value = meta->msize_max_value;
b0270958
AS
6944 ret_reg->smin_value = -MAX_ERRNO;
6945 ret_reg->s32_min_value = -MAX_ERRNO;
849fa506
YS
6946 __reg_deduce_bounds(ret_reg);
6947 __reg_bound_offset(ret_reg);
10060503 6948 __update_reg_bounds(ret_reg);
849fa506
YS
6949}
6950
c93552c4
DB
6951static int
6952record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6953 int func_id, int insn_idx)
6954{
6955 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 6956 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
6957
6958 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
6959 func_id != BPF_FUNC_map_lookup_elem &&
6960 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
6961 func_id != BPF_FUNC_map_delete_elem &&
6962 func_id != BPF_FUNC_map_push_elem &&
6963 func_id != BPF_FUNC_map_pop_elem &&
69c087ba 6964 func_id != BPF_FUNC_map_peek_elem &&
e6a4750f 6965 func_id != BPF_FUNC_for_each_map_elem &&
07343110
FZ
6966 func_id != BPF_FUNC_redirect_map &&
6967 func_id != BPF_FUNC_map_lookup_percpu_elem)
c93552c4 6968 return 0;
09772d92 6969
591fe988 6970 if (map == NULL) {
c93552c4
DB
6971 verbose(env, "kernel subsystem misconfigured verifier\n");
6972 return -EINVAL;
6973 }
6974
591fe988
DB
6975 /* In case of read-only, some additional restrictions
6976 * need to be applied in order to prevent altering the
6977 * state of the map from program side.
6978 */
6979 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
6980 (func_id == BPF_FUNC_map_delete_elem ||
6981 func_id == BPF_FUNC_map_update_elem ||
6982 func_id == BPF_FUNC_map_push_elem ||
6983 func_id == BPF_FUNC_map_pop_elem)) {
6984 verbose(env, "write into map forbidden\n");
6985 return -EACCES;
6986 }
6987
d2e4c1e6 6988 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4 6989 bpf_map_ptr_store(aux, meta->map_ptr,
2c78ee89 6990 !meta->map_ptr->bypass_spec_v1);
d2e4c1e6 6991 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4 6992 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
2c78ee89 6993 !meta->map_ptr->bypass_spec_v1);
c93552c4
DB
6994 return 0;
6995}
6996
d2e4c1e6
DB
6997static int
6998record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
6999 int func_id, int insn_idx)
7000{
7001 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
7002 struct bpf_reg_state *regs = cur_regs(env), *reg;
7003 struct bpf_map *map = meta->map_ptr;
7004 struct tnum range;
7005 u64 val;
cc52d914 7006 int err;
d2e4c1e6
DB
7007
7008 if (func_id != BPF_FUNC_tail_call)
7009 return 0;
7010 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
7011 verbose(env, "kernel subsystem misconfigured verifier\n");
7012 return -EINVAL;
7013 }
7014
7015 range = tnum_range(0, map->max_entries - 1);
7016 reg = &regs[BPF_REG_3];
7017
7018 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
7019 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7020 return 0;
7021 }
7022
cc52d914
DB
7023 err = mark_chain_precision(env, BPF_REG_3);
7024 if (err)
7025 return err;
7026
d2e4c1e6
DB
7027 val = reg->var_off.value;
7028 if (bpf_map_key_unseen(aux))
7029 bpf_map_key_store(aux, val);
7030 else if (!bpf_map_key_poisoned(aux) &&
7031 bpf_map_key_immediate(aux) != val)
7032 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
7033 return 0;
7034}
7035
fd978bf7
JS
7036static int check_reference_leak(struct bpf_verifier_env *env)
7037{
7038 struct bpf_func_state *state = cur_func(env);
7039 int i;
7040
7041 for (i = 0; i < state->acquired_refs; i++) {
7042 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
7043 state->refs[i].id, state->refs[i].insn_idx);
7044 }
7045 return state->acquired_refs ? -EINVAL : 0;
7046}
7047
7b15523a
FR
7048static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
7049 struct bpf_reg_state *regs)
7050{
7051 struct bpf_reg_state *fmt_reg = &regs[BPF_REG_3];
7052 struct bpf_reg_state *data_len_reg = &regs[BPF_REG_5];
7053 struct bpf_map *fmt_map = fmt_reg->map_ptr;
7054 int err, fmt_map_off, num_args;
7055 u64 fmt_addr;
7056 char *fmt;
7057
7058 /* data must be an array of u64 */
7059 if (data_len_reg->var_off.value % 8)
7060 return -EINVAL;
7061 num_args = data_len_reg->var_off.value / 8;
7062
7063 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const
7064 * and map_direct_value_addr is set.
7065 */
7066 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value;
7067 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr,
7068 fmt_map_off);
8e8ee109
FR
7069 if (err) {
7070 verbose(env, "verifier bug\n");
7071 return -EFAULT;
7072 }
7b15523a
FR
7073 fmt = (char *)(long)fmt_addr + fmt_map_off;
7074
7075 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we
7076 * can focus on validating the format specifiers.
7077 */
48cac3f4 7078 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args);
7b15523a
FR
7079 if (err < 0)
7080 verbose(env, "Invalid format string\n");
7081
7082 return err;
7083}
7084
9b99edca
JO
7085static int check_get_func_ip(struct bpf_verifier_env *env)
7086{
9b99edca
JO
7087 enum bpf_prog_type type = resolve_prog_type(env->prog);
7088 int func_id = BPF_FUNC_get_func_ip;
7089
7090 if (type == BPF_PROG_TYPE_TRACING) {
f92c1e18 7091 if (!bpf_prog_has_trampoline(env->prog)) {
9b99edca
JO
7092 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
7093 func_id_name(func_id), func_id);
7094 return -ENOTSUPP;
7095 }
7096 return 0;
9ffd9f3f
JO
7097 } else if (type == BPF_PROG_TYPE_KPROBE) {
7098 return 0;
9b99edca
JO
7099 }
7100
7101 verbose(env, "func %s#%d not supported for program type %d\n",
7102 func_id_name(func_id), func_id, type);
7103 return -ENOTSUPP;
7104}
7105
69c087ba
YS
7106static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7107 int *insn_idx_p)
17a52670 7108{
17a52670 7109 const struct bpf_func_proto *fn = NULL;
3c480732 7110 enum bpf_return_type ret_type;
c25b2ae1 7111 enum bpf_type_flag ret_flag;
638f5b90 7112 struct bpf_reg_state *regs;
33ff9823 7113 struct bpf_call_arg_meta meta;
69c087ba 7114 int insn_idx = *insn_idx_p;
969bf05e 7115 bool changes_data;
69c087ba 7116 int i, err, func_id;
17a52670
AS
7117
7118 /* find function prototype */
69c087ba 7119 func_id = insn->imm;
17a52670 7120 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
7121 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
7122 func_id);
17a52670
AS
7123 return -EINVAL;
7124 }
7125
00176a34 7126 if (env->ops->get_func_proto)
5e43f899 7127 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 7128 if (!fn) {
61bd5218
JK
7129 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
7130 func_id);
17a52670
AS
7131 return -EINVAL;
7132 }
7133
7134 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 7135 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 7136 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
7137 return -EINVAL;
7138 }
7139
eae2e83e
JO
7140 if (fn->allowed && !fn->allowed(env->prog)) {
7141 verbose(env, "helper call is not allowed in probe\n");
7142 return -EINVAL;
7143 }
7144
04514d13 7145 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 7146 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
7147 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
7148 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
7149 func_id_name(func_id), func_id);
7150 return -EINVAL;
7151 }
969bf05e 7152
33ff9823 7153 memset(&meta, 0, sizeof(meta));
36bbef52 7154 meta.pkt_access = fn->pkt_access;
33ff9823 7155
8f14852e 7156 err = check_func_proto(fn, func_id, &meta);
435faee1 7157 if (err) {
61bd5218 7158 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 7159 func_id_name(func_id), func_id);
435faee1
DB
7160 return err;
7161 }
7162
d83525ca 7163 meta.func_id = func_id;
17a52670 7164 /* check args */
523a4cf4 7165 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
af7ec138 7166 err = check_func_arg(env, i, &meta, fn);
a7658e1a
AS
7167 if (err)
7168 return err;
7169 }
17a52670 7170
c93552c4
DB
7171 err = record_func_map(env, &meta, func_id, insn_idx);
7172 if (err)
7173 return err;
7174
d2e4c1e6
DB
7175 err = record_func_key(env, &meta, func_id, insn_idx);
7176 if (err)
7177 return err;
7178
435faee1
DB
7179 /* Mark slots with STACK_MISC in case of raw mode, stack offset
7180 * is inferred from register state.
7181 */
7182 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
7183 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
7184 BPF_WRITE, -1, false);
435faee1
DB
7185 if (err)
7186 return err;
7187 }
7188
8f14852e
KKD
7189 regs = cur_regs(env);
7190
97e03f52
JK
7191 if (meta.uninit_dynptr_regno) {
7192 /* we write BPF_DW bits (8 bytes) at a time */
7193 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) {
7194 err = check_mem_access(env, insn_idx, meta.uninit_dynptr_regno,
7195 i, BPF_DW, BPF_WRITE, -1, false);
7196 if (err)
7197 return err;
7198 }
7199
7200 err = mark_stack_slots_dynptr(env, &regs[meta.uninit_dynptr_regno],
7201 fn->arg_type[meta.uninit_dynptr_regno - BPF_REG_1],
7202 insn_idx);
7203 if (err)
7204 return err;
7205 }
7206
8f14852e
KKD
7207 if (meta.release_regno) {
7208 err = -EINVAL;
97e03f52
JK
7209 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1]))
7210 err = unmark_stack_slots_dynptr(env, &regs[meta.release_regno]);
7211 else if (meta.ref_obj_id)
8f14852e
KKD
7212 err = release_reference(env, meta.ref_obj_id);
7213 /* meta.ref_obj_id can only be 0 if register that is meant to be
7214 * released is NULL, which must be > R0.
7215 */
7216 else if (register_is_null(&regs[meta.release_regno]))
7217 err = 0;
46f8bc92
MKL
7218 if (err) {
7219 verbose(env, "func %s#%d reference has not been acquired before\n",
7220 func_id_name(func_id), func_id);
fd978bf7 7221 return err;
46f8bc92 7222 }
fd978bf7
JS
7223 }
7224
e6f2dd0f
JK
7225 switch (func_id) {
7226 case BPF_FUNC_tail_call:
7227 err = check_reference_leak(env);
7228 if (err) {
7229 verbose(env, "tail_call would lead to reference leak\n");
7230 return err;
7231 }
7232 break;
7233 case BPF_FUNC_get_local_storage:
7234 /* check that flags argument in get_local_storage(map, flags) is 0,
7235 * this is required because get_local_storage() can't return an error.
7236 */
7237 if (!register_is_null(&regs[BPF_REG_2])) {
7238 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
7239 return -EINVAL;
7240 }
7241 break;
7242 case BPF_FUNC_for_each_map_elem:
69c087ba
YS
7243 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7244 set_map_elem_callback_state);
e6f2dd0f
JK
7245 break;
7246 case BPF_FUNC_timer_set_callback:
b00628b1
AS
7247 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7248 set_timer_callback_state);
e6f2dd0f
JK
7249 break;
7250 case BPF_FUNC_find_vma:
7c7e3d31
SL
7251 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7252 set_find_vma_callback_state);
e6f2dd0f
JK
7253 break;
7254 case BPF_FUNC_snprintf:
7b15523a 7255 err = check_bpf_snprintf_call(env, regs);
e6f2dd0f
JK
7256 break;
7257 case BPF_FUNC_loop:
7258 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
7259 set_loop_callback_state);
7260 break;
263ae152
JK
7261 case BPF_FUNC_dynptr_from_mem:
7262 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) {
7263 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n",
7264 reg_type_str(env, regs[BPF_REG_1].type));
7265 return -EACCES;
7266 }
7b15523a
FR
7267 }
7268
e6f2dd0f
JK
7269 if (err)
7270 return err;
7271
17a52670 7272 /* reset caller saved regs */
dc503a8a 7273 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 7274 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
7275 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
7276 }
17a52670 7277
5327ed3d
JW
7278 /* helper call returns 64-bit value. */
7279 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
7280
dc503a8a 7281 /* update return register (already marked as written above) */
3c480732 7282 ret_type = fn->ret_type;
c25b2ae1 7283 ret_flag = type_flag(fn->ret_type);
3c480732 7284 if (ret_type == RET_INTEGER) {
f1174f77 7285 /* sets type to SCALAR_VALUE */
61bd5218 7286 mark_reg_unknown(env, regs, BPF_REG_0);
3c480732 7287 } else if (ret_type == RET_VOID) {
17a52670 7288 regs[BPF_REG_0].type = NOT_INIT;
3c480732 7289 } else if (base_type(ret_type) == RET_PTR_TO_MAP_VALUE) {
f1174f77 7290 /* There is no offset yet applied, variable or fixed */
61bd5218 7291 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
7292 /* remember map_ptr, so that check_map_access()
7293 * can check 'value_size' boundary of memory access
7294 * to map element returned from bpf_map_lookup_elem()
7295 */
33ff9823 7296 if (meta.map_ptr == NULL) {
61bd5218
JK
7297 verbose(env,
7298 "kernel subsystem misconfigured verifier\n");
17a52670
AS
7299 return -EINVAL;
7300 }
33ff9823 7301 regs[BPF_REG_0].map_ptr = meta.map_ptr;
3e8ce298 7302 regs[BPF_REG_0].map_uid = meta.map_uid;
c25b2ae1
HL
7303 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag;
7304 if (!type_may_be_null(ret_type) &&
7305 map_value_has_spin_lock(meta.map_ptr)) {
7306 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301 7307 }
3c480732 7308 } else if (base_type(ret_type) == RET_PTR_TO_SOCKET) {
c64b7983 7309 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 7310 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag;
3c480732 7311 } else if (base_type(ret_type) == RET_PTR_TO_SOCK_COMMON) {
85a51f8c 7312 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 7313 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag;
3c480732 7314 } else if (base_type(ret_type) == RET_PTR_TO_TCP_SOCK) {
655a51e5 7315 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 7316 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag;
3c480732 7317 } else if (base_type(ret_type) == RET_PTR_TO_ALLOC_MEM) {
457f4436 7318 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 7319 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
457f4436 7320 regs[BPF_REG_0].mem_size = meta.mem_size;
3c480732 7321 } else if (base_type(ret_type) == RET_PTR_TO_MEM_OR_BTF_ID) {
eaa6bcb7
HL
7322 const struct btf_type *t;
7323
7324 mark_reg_known_zero(env, regs, BPF_REG_0);
22dc4a0f 7325 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
eaa6bcb7
HL
7326 if (!btf_type_is_struct(t)) {
7327 u32 tsize;
7328 const struct btf_type *ret;
7329 const char *tname;
7330
7331 /* resolve the type size of ksym. */
22dc4a0f 7332 ret = btf_resolve_size(meta.ret_btf, t, &tsize);
eaa6bcb7 7333 if (IS_ERR(ret)) {
22dc4a0f 7334 tname = btf_name_by_offset(meta.ret_btf, t->name_off);
eaa6bcb7
HL
7335 verbose(env, "unable to resolve the size of type '%s': %ld\n",
7336 tname, PTR_ERR(ret));
7337 return -EINVAL;
7338 }
c25b2ae1 7339 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag;
eaa6bcb7
HL
7340 regs[BPF_REG_0].mem_size = tsize;
7341 } else {
34d3a78c
HL
7342 /* MEM_RDONLY may be carried from ret_flag, but it
7343 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise
7344 * it will confuse the check of PTR_TO_BTF_ID in
7345 * check_mem_access().
7346 */
7347 ret_flag &= ~MEM_RDONLY;
7348
c25b2ae1 7349 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
22dc4a0f 7350 regs[BPF_REG_0].btf = meta.ret_btf;
eaa6bcb7
HL
7351 regs[BPF_REG_0].btf_id = meta.ret_btf_id;
7352 }
3c480732 7353 } else if (base_type(ret_type) == RET_PTR_TO_BTF_ID) {
c0a5a21c 7354 struct btf *ret_btf;
af7ec138
YS
7355 int ret_btf_id;
7356
7357 mark_reg_known_zero(env, regs, BPF_REG_0);
c25b2ae1 7358 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag;
c0a5a21c
KKD
7359 if (func_id == BPF_FUNC_kptr_xchg) {
7360 ret_btf = meta.kptr_off_desc->kptr.btf;
7361 ret_btf_id = meta.kptr_off_desc->kptr.btf_id;
7362 } else {
7363 ret_btf = btf_vmlinux;
7364 ret_btf_id = *fn->ret_btf_id;
7365 }
af7ec138 7366 if (ret_btf_id == 0) {
3c480732
HL
7367 verbose(env, "invalid return type %u of func %s#%d\n",
7368 base_type(ret_type), func_id_name(func_id),
7369 func_id);
af7ec138
YS
7370 return -EINVAL;
7371 }
c0a5a21c 7372 regs[BPF_REG_0].btf = ret_btf;
af7ec138 7373 regs[BPF_REG_0].btf_id = ret_btf_id;
17a52670 7374 } else {
3c480732
HL
7375 verbose(env, "unknown return type %u of func %s#%d\n",
7376 base_type(ret_type), func_id_name(func_id), func_id);
17a52670
AS
7377 return -EINVAL;
7378 }
04fd61ab 7379
c25b2ae1 7380 if (type_may_be_null(regs[BPF_REG_0].type))
93c230e3
MKL
7381 regs[BPF_REG_0].id = ++env->id_gen;
7382
0f3adc28 7383 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
7384 /* For release_reference() */
7385 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
64d85290 7386 } else if (is_acquire_function(func_id, meta.map_ptr)) {
0f3adc28
LB
7387 int id = acquire_reference_state(env, insn_idx);
7388
7389 if (id < 0)
7390 return id;
7391 /* For mark_ptr_or_null_reg() */
7392 regs[BPF_REG_0].id = id;
7393 /* For release_reference() */
7394 regs[BPF_REG_0].ref_obj_id = id;
34d4ef57
JK
7395 } else if (func_id == BPF_FUNC_dynptr_data) {
7396 int dynptr_id = 0, i;
7397
7398 /* Find the id of the dynptr we're acquiring a reference to */
7399 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) {
7400 if (arg_type_is_dynptr(fn->arg_type[i])) {
7401 if (dynptr_id) {
7402 verbose(env, "verifier internal error: multiple dynptr args in func\n");
7403 return -EFAULT;
7404 }
7405 dynptr_id = stack_slot_get_id(env, &regs[BPF_REG_1 + i]);
7406 }
7407 }
7408 /* For release_reference() */
7409 regs[BPF_REG_0].ref_obj_id = dynptr_id;
0f3adc28 7410 }
1b986589 7411
849fa506
YS
7412 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
7413
61bd5218 7414 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
7415 if (err)
7416 return err;
04fd61ab 7417
fa28dcb8
SL
7418 if ((func_id == BPF_FUNC_get_stack ||
7419 func_id == BPF_FUNC_get_task_stack) &&
7420 !env->prog->has_callchain_buf) {
c195651e
YS
7421 const char *err_str;
7422
7423#ifdef CONFIG_PERF_EVENTS
7424 err = get_callchain_buffers(sysctl_perf_event_max_stack);
7425 err_str = "cannot get callchain buffer for func %s#%d\n";
7426#else
7427 err = -ENOTSUPP;
7428 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
7429#endif
7430 if (err) {
7431 verbose(env, err_str, func_id_name(func_id), func_id);
7432 return err;
7433 }
7434
7435 env->prog->has_callchain_buf = true;
7436 }
7437
5d99cb2c
SL
7438 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack)
7439 env->prog->call_get_stack = true;
7440
9b99edca
JO
7441 if (func_id == BPF_FUNC_get_func_ip) {
7442 if (check_get_func_ip(env))
7443 return -ENOTSUPP;
7444 env->prog->call_get_func_ip = true;
7445 }
7446
969bf05e
AS
7447 if (changes_data)
7448 clear_all_pkt_pointers(env);
7449 return 0;
7450}
7451
e6ac2450
MKL
7452/* mark_btf_func_reg_size() is used when the reg size is determined by
7453 * the BTF func_proto's return value size and argument.
7454 */
7455static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno,
7456 size_t reg_size)
7457{
7458 struct bpf_reg_state *reg = &cur_regs(env)[regno];
7459
7460 if (regno == BPF_REG_0) {
7461 /* Function return value */
7462 reg->live |= REG_LIVE_WRITTEN;
7463 reg->subreg_def = reg_size == sizeof(u64) ?
7464 DEF_NOT_SUBREG : env->insn_idx + 1;
7465 } else {
7466 /* Function argument */
7467 if (reg_size == sizeof(u64)) {
7468 mark_insn_zext(env, reg);
7469 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
7470 } else {
7471 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32);
7472 }
7473 }
7474}
7475
5c073f26
KKD
7476static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
7477 int *insn_idx_p)
e6ac2450
MKL
7478{
7479 const struct btf_type *t, *func, *func_proto, *ptr_type;
7480 struct bpf_reg_state *regs = cur_regs(env);
7481 const char *func_name, *ptr_type_name;
7482 u32 i, nargs, func_id, ptr_type_id;
5c073f26 7483 int err, insn_idx = *insn_idx_p;
e6ac2450 7484 const struct btf_param *args;
2357672c 7485 struct btf *desc_btf;
5c073f26 7486 bool acq;
e6ac2450 7487
a5d82727
KKD
7488 /* skip for now, but return error when we find this in fixup_kfunc_call */
7489 if (!insn->imm)
7490 return 0;
7491
43bf0878 7492 desc_btf = find_kfunc_desc_btf(env, insn->off);
2357672c
KKD
7493 if (IS_ERR(desc_btf))
7494 return PTR_ERR(desc_btf);
7495
e6ac2450 7496 func_id = insn->imm;
2357672c
KKD
7497 func = btf_type_by_id(desc_btf, func_id);
7498 func_name = btf_name_by_offset(desc_btf, func->name_off);
7499 func_proto = btf_type_by_id(desc_btf, func->type);
e6ac2450 7500
b202d844
KKD
7501 if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7502 BTF_KFUNC_TYPE_CHECK, func_id)) {
e6ac2450
MKL
7503 verbose(env, "calling kernel function %s is not allowed\n",
7504 func_name);
7505 return -EACCES;
7506 }
7507
5c073f26
KKD
7508 acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7509 BTF_KFUNC_TYPE_ACQUIRE, func_id);
7510
e6ac2450 7511 /* Check the arguments */
2357672c 7512 err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
5c073f26 7513 if (err < 0)
e6ac2450 7514 return err;
5c073f26
KKD
7515 /* In case of release function, we get register number of refcounted
7516 * PTR_TO_BTF_ID back from btf_check_kfunc_arg_match, do the release now
7517 */
7518 if (err) {
7519 err = release_reference(env, regs[err].ref_obj_id);
7520 if (err) {
7521 verbose(env, "kfunc %s#%d reference has not been acquired before\n",
7522 func_name, func_id);
7523 return err;
7524 }
7525 }
e6ac2450
MKL
7526
7527 for (i = 0; i < CALLER_SAVED_REGS; i++)
7528 mark_reg_not_init(env, regs, caller_saved[i]);
7529
7530 /* Check return type */
2357672c 7531 t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
5c073f26
KKD
7532
7533 if (acq && !btf_type_is_ptr(t)) {
7534 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n");
7535 return -EINVAL;
7536 }
7537
e6ac2450
MKL
7538 if (btf_type_is_scalar(t)) {
7539 mark_reg_unknown(env, regs, BPF_REG_0);
7540 mark_btf_func_reg_size(env, BPF_REG_0, t->size);
7541 } else if (btf_type_is_ptr(t)) {
2357672c 7542 ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
e6ac2450
MKL
7543 &ptr_type_id);
7544 if (!btf_type_is_struct(ptr_type)) {
2357672c 7545 ptr_type_name = btf_name_by_offset(desc_btf,
e6ac2450
MKL
7546 ptr_type->name_off);
7547 verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
7548 func_name, btf_type_str(ptr_type),
7549 ptr_type_name);
7550 return -EINVAL;
7551 }
7552 mark_reg_known_zero(env, regs, BPF_REG_0);
2357672c 7553 regs[BPF_REG_0].btf = desc_btf;
e6ac2450
MKL
7554 regs[BPF_REG_0].type = PTR_TO_BTF_ID;
7555 regs[BPF_REG_0].btf_id = ptr_type_id;
5c073f26
KKD
7556 if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
7557 BTF_KFUNC_TYPE_RET_NULL, func_id)) {
7558 regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
7559 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
7560 regs[BPF_REG_0].id = ++env->id_gen;
7561 }
e6ac2450 7562 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
5c073f26
KKD
7563 if (acq) {
7564 int id = acquire_reference_state(env, insn_idx);
7565
7566 if (id < 0)
7567 return id;
7568 regs[BPF_REG_0].id = id;
7569 regs[BPF_REG_0].ref_obj_id = id;
7570 }
e6ac2450
MKL
7571 } /* else { add_kfunc_call() ensures it is btf_type_is_void(t) } */
7572
7573 nargs = btf_type_vlen(func_proto);
7574 args = (const struct btf_param *)(func_proto + 1);
7575 for (i = 0; i < nargs; i++) {
7576 u32 regno = i + 1;
7577
2357672c 7578 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
e6ac2450
MKL
7579 if (btf_type_is_ptr(t))
7580 mark_btf_func_reg_size(env, regno, sizeof(void *));
7581 else
7582 /* scalar. ensured by btf_check_kfunc_arg_match() */
7583 mark_btf_func_reg_size(env, regno, t->size);
7584 }
7585
7586 return 0;
7587}
7588
b03c9f9f
EC
7589static bool signed_add_overflows(s64 a, s64 b)
7590{
7591 /* Do the add in u64, where overflow is well-defined */
7592 s64 res = (s64)((u64)a + (u64)b);
7593
7594 if (b < 0)
7595 return res > a;
7596 return res < a;
7597}
7598
bc895e8b 7599static bool signed_add32_overflows(s32 a, s32 b)
3f50f132
JF
7600{
7601 /* Do the add in u32, where overflow is well-defined */
7602 s32 res = (s32)((u32)a + (u32)b);
7603
7604 if (b < 0)
7605 return res > a;
7606 return res < a;
7607}
7608
bc895e8b 7609static bool signed_sub_overflows(s64 a, s64 b)
b03c9f9f
EC
7610{
7611 /* Do the sub in u64, where overflow is well-defined */
7612 s64 res = (s64)((u64)a - (u64)b);
7613
7614 if (b < 0)
7615 return res < a;
7616 return res > a;
969bf05e
AS
7617}
7618
3f50f132
JF
7619static bool signed_sub32_overflows(s32 a, s32 b)
7620{
bc895e8b 7621 /* Do the sub in u32, where overflow is well-defined */
3f50f132
JF
7622 s32 res = (s32)((u32)a - (u32)b);
7623
7624 if (b < 0)
7625 return res < a;
7626 return res > a;
7627}
7628
bb7f0f98
AS
7629static bool check_reg_sane_offset(struct bpf_verifier_env *env,
7630 const struct bpf_reg_state *reg,
7631 enum bpf_reg_type type)
7632{
7633 bool known = tnum_is_const(reg->var_off);
7634 s64 val = reg->var_off.value;
7635 s64 smin = reg->smin_value;
7636
7637 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
7638 verbose(env, "math between %s pointer and %lld is not allowed\n",
c25b2ae1 7639 reg_type_str(env, type), val);
bb7f0f98
AS
7640 return false;
7641 }
7642
7643 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
7644 verbose(env, "%s pointer offset %d is not allowed\n",
c25b2ae1 7645 reg_type_str(env, type), reg->off);
bb7f0f98
AS
7646 return false;
7647 }
7648
7649 if (smin == S64_MIN) {
7650 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
c25b2ae1 7651 reg_type_str(env, type));
bb7f0f98
AS
7652 return false;
7653 }
7654
7655 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
7656 verbose(env, "value %lld makes %s pointer be out of bounds\n",
c25b2ae1 7657 smin, reg_type_str(env, type));
bb7f0f98
AS
7658 return false;
7659 }
7660
7661 return true;
7662}
7663
979d63d5
DB
7664static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
7665{
7666 return &env->insn_aux_data[env->insn_idx];
7667}
7668
a6aaece0
DB
7669enum {
7670 REASON_BOUNDS = -1,
7671 REASON_TYPE = -2,
7672 REASON_PATHS = -3,
7673 REASON_LIMIT = -4,
7674 REASON_STACK = -5,
7675};
7676
979d63d5 7677static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
bb01a1bb 7678 u32 *alu_limit, bool mask_to_left)
979d63d5 7679{
7fedb63a 7680 u32 max = 0, ptr_limit = 0;
979d63d5
DB
7681
7682 switch (ptr_reg->type) {
7683 case PTR_TO_STACK:
1b1597e6 7684 /* Offset 0 is out-of-bounds, but acceptable start for the
7fedb63a
DB
7685 * left direction, see BPF_REG_FP. Also, unknown scalar
7686 * offset where we would need to deal with min/max bounds is
7687 * currently prohibited for unprivileged.
1b1597e6
PK
7688 */
7689 max = MAX_BPF_STACK + mask_to_left;
7fedb63a 7690 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
b658bbb8 7691 break;
979d63d5 7692 case PTR_TO_MAP_VALUE:
1b1597e6 7693 max = ptr_reg->map_ptr->value_size;
7fedb63a
DB
7694 ptr_limit = (mask_to_left ?
7695 ptr_reg->smin_value :
7696 ptr_reg->umax_value) + ptr_reg->off;
b658bbb8 7697 break;
979d63d5 7698 default:
a6aaece0 7699 return REASON_TYPE;
979d63d5 7700 }
b658bbb8
DB
7701
7702 if (ptr_limit >= max)
a6aaece0 7703 return REASON_LIMIT;
b658bbb8
DB
7704 *alu_limit = ptr_limit;
7705 return 0;
979d63d5
DB
7706}
7707
d3bd7413
DB
7708static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
7709 const struct bpf_insn *insn)
7710{
2c78ee89 7711 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K;
d3bd7413
DB
7712}
7713
7714static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
7715 u32 alu_state, u32 alu_limit)
7716{
7717 /* If we arrived here from different branches with different
7718 * state or limits to sanitize, then this won't work.
7719 */
7720 if (aux->alu_state &&
7721 (aux->alu_state != alu_state ||
7722 aux->alu_limit != alu_limit))
a6aaece0 7723 return REASON_PATHS;
d3bd7413 7724
e6ac5933 7725 /* Corresponding fixup done in do_misc_fixups(). */
d3bd7413
DB
7726 aux->alu_state = alu_state;
7727 aux->alu_limit = alu_limit;
7728 return 0;
7729}
7730
7731static int sanitize_val_alu(struct bpf_verifier_env *env,
7732 struct bpf_insn *insn)
7733{
7734 struct bpf_insn_aux_data *aux = cur_aux(env);
7735
7736 if (can_skip_alu_sanitation(env, insn))
7737 return 0;
7738
7739 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
7740}
7741
f5288193
DB
7742static bool sanitize_needed(u8 opcode)
7743{
7744 return opcode == BPF_ADD || opcode == BPF_SUB;
7745}
7746
3d0220f6
DB
7747struct bpf_sanitize_info {
7748 struct bpf_insn_aux_data aux;
bb01a1bb 7749 bool mask_to_left;
3d0220f6
DB
7750};
7751
9183671a
DB
7752static struct bpf_verifier_state *
7753sanitize_speculative_path(struct bpf_verifier_env *env,
7754 const struct bpf_insn *insn,
7755 u32 next_idx, u32 curr_idx)
7756{
7757 struct bpf_verifier_state *branch;
7758 struct bpf_reg_state *regs;
7759
7760 branch = push_stack(env, next_idx, curr_idx, true);
7761 if (branch && insn) {
7762 regs = branch->frame[branch->curframe]->regs;
7763 if (BPF_SRC(insn->code) == BPF_K) {
7764 mark_reg_unknown(env, regs, insn->dst_reg);
7765 } else if (BPF_SRC(insn->code) == BPF_X) {
7766 mark_reg_unknown(env, regs, insn->dst_reg);
7767 mark_reg_unknown(env, regs, insn->src_reg);
7768 }
7769 }
7770 return branch;
7771}
7772
979d63d5
DB
7773static int sanitize_ptr_alu(struct bpf_verifier_env *env,
7774 struct bpf_insn *insn,
7775 const struct bpf_reg_state *ptr_reg,
6f55b2f2 7776 const struct bpf_reg_state *off_reg,
979d63d5 7777 struct bpf_reg_state *dst_reg,
3d0220f6 7778 struct bpf_sanitize_info *info,
7fedb63a 7779 const bool commit_window)
979d63d5 7780{
3d0220f6 7781 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
979d63d5 7782 struct bpf_verifier_state *vstate = env->cur_state;
801c6058 7783 bool off_is_imm = tnum_is_const(off_reg->var_off);
6f55b2f2 7784 bool off_is_neg = off_reg->smin_value < 0;
979d63d5
DB
7785 bool ptr_is_dst_reg = ptr_reg == dst_reg;
7786 u8 opcode = BPF_OP(insn->code);
7787 u32 alu_state, alu_limit;
7788 struct bpf_reg_state tmp;
7789 bool ret;
f232326f 7790 int err;
979d63d5 7791
d3bd7413 7792 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
7793 return 0;
7794
7795 /* We already marked aux for masking from non-speculative
7796 * paths, thus we got here in the first place. We only care
7797 * to explore bad access from here.
7798 */
7799 if (vstate->speculative)
7800 goto do_sim;
7801
bb01a1bb
DB
7802 if (!commit_window) {
7803 if (!tnum_is_const(off_reg->var_off) &&
7804 (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
7805 return REASON_BOUNDS;
7806
7807 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
7808 (opcode == BPF_SUB && !off_is_neg);
7809 }
7810
7811 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
f232326f
PK
7812 if (err < 0)
7813 return err;
7814
7fedb63a
DB
7815 if (commit_window) {
7816 /* In commit phase we narrow the masking window based on
7817 * the observed pointer move after the simulated operation.
7818 */
3d0220f6
DB
7819 alu_state = info->aux.alu_state;
7820 alu_limit = abs(info->aux.alu_limit - alu_limit);
7fedb63a
DB
7821 } else {
7822 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
801c6058 7823 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
7fedb63a
DB
7824 alu_state |= ptr_is_dst_reg ?
7825 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
e042aa53
DB
7826
7827 /* Limit pruning on unknown scalars to enable deep search for
7828 * potential masking differences from other program paths.
7829 */
7830 if (!off_is_imm)
7831 env->explore_alu_limits = true;
7fedb63a
DB
7832 }
7833
f232326f
PK
7834 err = update_alu_sanitation_state(aux, alu_state, alu_limit);
7835 if (err < 0)
7836 return err;
979d63d5 7837do_sim:
7fedb63a
DB
7838 /* If we're in commit phase, we're done here given we already
7839 * pushed the truncated dst_reg into the speculative verification
7840 * stack.
a7036191
DB
7841 *
7842 * Also, when register is a known constant, we rewrite register-based
7843 * operation to immediate-based, and thus do not need masking (and as
7844 * a consequence, do not need to simulate the zero-truncation either).
7fedb63a 7845 */
a7036191 7846 if (commit_window || off_is_imm)
7fedb63a
DB
7847 return 0;
7848
979d63d5
DB
7849 /* Simulate and find potential out-of-bounds access under
7850 * speculative execution from truncation as a result of
7851 * masking when off was not within expected range. If off
7852 * sits in dst, then we temporarily need to move ptr there
7853 * to simulate dst (== 0) +/-= ptr. Needed, for example,
7854 * for cases where we use K-based arithmetic in one direction
7855 * and truncated reg-based in the other in order to explore
7856 * bad access.
7857 */
7858 if (!ptr_is_dst_reg) {
7859 tmp = *dst_reg;
7860 *dst_reg = *ptr_reg;
7861 }
9183671a
DB
7862 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
7863 env->insn_idx);
0803278b 7864 if (!ptr_is_dst_reg && ret)
979d63d5 7865 *dst_reg = tmp;
a6aaece0
DB
7866 return !ret ? REASON_STACK : 0;
7867}
7868
fe9a5ca7
DB
7869static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
7870{
7871 struct bpf_verifier_state *vstate = env->cur_state;
7872
7873 /* If we simulate paths under speculation, we don't update the
7874 * insn as 'seen' such that when we verify unreachable paths in
7875 * the non-speculative domain, sanitize_dead_code() can still
7876 * rewrite/sanitize them.
7877 */
7878 if (!vstate->speculative)
7879 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
7880}
7881
a6aaece0
DB
7882static int sanitize_err(struct bpf_verifier_env *env,
7883 const struct bpf_insn *insn, int reason,
7884 const struct bpf_reg_state *off_reg,
7885 const struct bpf_reg_state *dst_reg)
7886{
7887 static const char *err = "pointer arithmetic with it prohibited for !root";
7888 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
7889 u32 dst = insn->dst_reg, src = insn->src_reg;
7890
7891 switch (reason) {
7892 case REASON_BOUNDS:
7893 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
7894 off_reg == dst_reg ? dst : src, err);
7895 break;
7896 case REASON_TYPE:
7897 verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
7898 off_reg == dst_reg ? src : dst, err);
7899 break;
7900 case REASON_PATHS:
7901 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
7902 dst, op, err);
7903 break;
7904 case REASON_LIMIT:
7905 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
7906 dst, op, err);
7907 break;
7908 case REASON_STACK:
7909 verbose(env, "R%d could not be pushed for speculative verification, %s\n",
7910 dst, err);
7911 break;
7912 default:
7913 verbose(env, "verifier internal error: unknown reason (%d)\n",
7914 reason);
7915 break;
7916 }
7917
7918 return -EACCES;
979d63d5
DB
7919}
7920
01f810ac
AM
7921/* check that stack access falls within stack limits and that 'reg' doesn't
7922 * have a variable offset.
7923 *
7924 * Variable offset is prohibited for unprivileged mode for simplicity since it
7925 * requires corresponding support in Spectre masking for stack ALU. See also
7926 * retrieve_ptr_limit().
7927 *
7928 *
7929 * 'off' includes 'reg->off'.
7930 */
7931static int check_stack_access_for_ptr_arithmetic(
7932 struct bpf_verifier_env *env,
7933 int regno,
7934 const struct bpf_reg_state *reg,
7935 int off)
7936{
7937 if (!tnum_is_const(reg->var_off)) {
7938 char tn_buf[48];
7939
7940 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
7941 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n",
7942 regno, tn_buf, off);
7943 return -EACCES;
7944 }
7945
7946 if (off >= 0 || off < -MAX_BPF_STACK) {
7947 verbose(env, "R%d stack pointer arithmetic goes out of range, "
7948 "prohibited for !root; off=%d\n", regno, off);
7949 return -EACCES;
7950 }
7951
7952 return 0;
7953}
7954
073815b7
DB
7955static int sanitize_check_bounds(struct bpf_verifier_env *env,
7956 const struct bpf_insn *insn,
7957 const struct bpf_reg_state *dst_reg)
7958{
7959 u32 dst = insn->dst_reg;
7960
7961 /* For unprivileged we require that resulting offset must be in bounds
7962 * in order to be able to sanitize access later on.
7963 */
7964 if (env->bypass_spec_v1)
7965 return 0;
7966
7967 switch (dst_reg->type) {
7968 case PTR_TO_STACK:
7969 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
7970 dst_reg->off + dst_reg->var_off.value))
7971 return -EACCES;
7972 break;
7973 case PTR_TO_MAP_VALUE:
61df10c7 7974 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) {
073815b7
DB
7975 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
7976 "prohibited for !root\n", dst);
7977 return -EACCES;
7978 }
7979 break;
7980 default:
7981 break;
7982 }
7983
7984 return 0;
7985}
01f810ac 7986
f1174f77 7987/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
7988 * Caller should also handle BPF_MOV case separately.
7989 * If we return -EACCES, caller may want to try again treating pointer as a
7990 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
7991 */
7992static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
7993 struct bpf_insn *insn,
7994 const struct bpf_reg_state *ptr_reg,
7995 const struct bpf_reg_state *off_reg)
969bf05e 7996{
f4d7e40a
AS
7997 struct bpf_verifier_state *vstate = env->cur_state;
7998 struct bpf_func_state *state = vstate->frame[vstate->curframe];
7999 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 8000 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
8001 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
8002 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
8003 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
8004 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
3d0220f6 8005 struct bpf_sanitize_info info = {};
969bf05e 8006 u8 opcode = BPF_OP(insn->code);
24c109bb 8007 u32 dst = insn->dst_reg;
979d63d5 8008 int ret;
969bf05e 8009
f1174f77 8010 dst_reg = &regs[dst];
969bf05e 8011
6f16101e
DB
8012 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
8013 smin_val > smax_val || umin_val > umax_val) {
8014 /* Taint dst register if offset had invalid bounds derived from
8015 * e.g. dead branches.
8016 */
f54c7898 8017 __mark_reg_unknown(env, dst_reg);
6f16101e 8018 return 0;
f1174f77
EC
8019 }
8020
8021 if (BPF_CLASS(insn->code) != BPF_ALU64) {
8022 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
6c693541
YS
8023 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
8024 __mark_reg_unknown(env, dst_reg);
8025 return 0;
8026 }
8027
82abbf8d
AS
8028 verbose(env,
8029 "R%d 32-bit pointer arithmetic prohibited\n",
8030 dst);
f1174f77 8031 return -EACCES;
969bf05e
AS
8032 }
8033
c25b2ae1 8034 if (ptr_reg->type & PTR_MAYBE_NULL) {
aad2eeaf 8035 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
c25b2ae1 8036 dst, reg_type_str(env, ptr_reg->type));
f1174f77 8037 return -EACCES;
c25b2ae1
HL
8038 }
8039
8040 switch (base_type(ptr_reg->type)) {
aad2eeaf 8041 case CONST_PTR_TO_MAP:
7c696732
YS
8042 /* smin_val represents the known value */
8043 if (known && smin_val == 0 && opcode == BPF_ADD)
8044 break;
8731745e 8045 fallthrough;
aad2eeaf 8046 case PTR_TO_PACKET_END:
c64b7983 8047 case PTR_TO_SOCKET:
46f8bc92 8048 case PTR_TO_SOCK_COMMON:
655a51e5 8049 case PTR_TO_TCP_SOCK:
fada7fdc 8050 case PTR_TO_XDP_SOCK:
aad2eeaf 8051 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
c25b2ae1 8052 dst, reg_type_str(env, ptr_reg->type));
f1174f77 8053 return -EACCES;
aad2eeaf
JS
8054 default:
8055 break;
f1174f77
EC
8056 }
8057
8058 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
8059 * The id may be overwritten later if we create a new variable offset.
969bf05e 8060 */
f1174f77
EC
8061 dst_reg->type = ptr_reg->type;
8062 dst_reg->id = ptr_reg->id;
969bf05e 8063
bb7f0f98
AS
8064 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
8065 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
8066 return -EINVAL;
8067
3f50f132
JF
8068 /* pointer types do not carry 32-bit bounds at the moment. */
8069 __mark_reg32_unbounded(dst_reg);
8070
7fedb63a
DB
8071 if (sanitize_needed(opcode)) {
8072 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
3d0220f6 8073 &info, false);
a6aaece0
DB
8074 if (ret < 0)
8075 return sanitize_err(env, insn, ret, off_reg, dst_reg);
7fedb63a 8076 }
a6aaece0 8077
f1174f77
EC
8078 switch (opcode) {
8079 case BPF_ADD:
8080 /* We can take a fixed offset as long as it doesn't overflow
8081 * the s32 'off' field
969bf05e 8082 */
b03c9f9f
EC
8083 if (known && (ptr_reg->off + smin_val ==
8084 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 8085 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
8086 dst_reg->smin_value = smin_ptr;
8087 dst_reg->smax_value = smax_ptr;
8088 dst_reg->umin_value = umin_ptr;
8089 dst_reg->umax_value = umax_ptr;
f1174f77 8090 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 8091 dst_reg->off = ptr_reg->off + smin_val;
0962590e 8092 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
8093 break;
8094 }
f1174f77
EC
8095 /* A new variable offset is created. Note that off_reg->off
8096 * == 0, since it's a scalar.
8097 * dst_reg gets the pointer type and since some positive
8098 * integer value was added to the pointer, give it a new 'id'
8099 * if it's a PTR_TO_PACKET.
8100 * this creates a new 'base' pointer, off_reg (variable) gets
8101 * added into the variable offset, and we copy the fixed offset
8102 * from ptr_reg.
969bf05e 8103 */
b03c9f9f
EC
8104 if (signed_add_overflows(smin_ptr, smin_val) ||
8105 signed_add_overflows(smax_ptr, smax_val)) {
8106 dst_reg->smin_value = S64_MIN;
8107 dst_reg->smax_value = S64_MAX;
8108 } else {
8109 dst_reg->smin_value = smin_ptr + smin_val;
8110 dst_reg->smax_value = smax_ptr + smax_val;
8111 }
8112 if (umin_ptr + umin_val < umin_ptr ||
8113 umax_ptr + umax_val < umax_ptr) {
8114 dst_reg->umin_value = 0;
8115 dst_reg->umax_value = U64_MAX;
8116 } else {
8117 dst_reg->umin_value = umin_ptr + umin_val;
8118 dst_reg->umax_value = umax_ptr + umax_val;
8119 }
f1174f77
EC
8120 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
8121 dst_reg->off = ptr_reg->off;
0962590e 8122 dst_reg->raw = ptr_reg->raw;
de8f3a83 8123 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
8124 dst_reg->id = ++env->id_gen;
8125 /* something was added to pkt_ptr, set range to zero */
22dc4a0f 8126 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
f1174f77
EC
8127 }
8128 break;
8129 case BPF_SUB:
8130 if (dst_reg == off_reg) {
8131 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
8132 verbose(env, "R%d tried to subtract pointer from scalar\n",
8133 dst);
f1174f77
EC
8134 return -EACCES;
8135 }
8136 /* We don't allow subtraction from FP, because (according to
8137 * test_verifier.c test "invalid fp arithmetic", JITs might not
8138 * be able to deal with it.
969bf05e 8139 */
f1174f77 8140 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
8141 verbose(env, "R%d subtraction from stack pointer prohibited\n",
8142 dst);
f1174f77
EC
8143 return -EACCES;
8144 }
b03c9f9f
EC
8145 if (known && (ptr_reg->off - smin_val ==
8146 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 8147 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
8148 dst_reg->smin_value = smin_ptr;
8149 dst_reg->smax_value = smax_ptr;
8150 dst_reg->umin_value = umin_ptr;
8151 dst_reg->umax_value = umax_ptr;
f1174f77
EC
8152 dst_reg->var_off = ptr_reg->var_off;
8153 dst_reg->id = ptr_reg->id;
b03c9f9f 8154 dst_reg->off = ptr_reg->off - smin_val;
0962590e 8155 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
8156 break;
8157 }
f1174f77
EC
8158 /* A new variable offset is created. If the subtrahend is known
8159 * nonnegative, then any reg->range we had before is still good.
969bf05e 8160 */
b03c9f9f
EC
8161 if (signed_sub_overflows(smin_ptr, smax_val) ||
8162 signed_sub_overflows(smax_ptr, smin_val)) {
8163 /* Overflow possible, we know nothing */
8164 dst_reg->smin_value = S64_MIN;
8165 dst_reg->smax_value = S64_MAX;
8166 } else {
8167 dst_reg->smin_value = smin_ptr - smax_val;
8168 dst_reg->smax_value = smax_ptr - smin_val;
8169 }
8170 if (umin_ptr < umax_val) {
8171 /* Overflow possible, we know nothing */
8172 dst_reg->umin_value = 0;
8173 dst_reg->umax_value = U64_MAX;
8174 } else {
8175 /* Cannot overflow (as long as bounds are consistent) */
8176 dst_reg->umin_value = umin_ptr - umax_val;
8177 dst_reg->umax_value = umax_ptr - umin_val;
8178 }
f1174f77
EC
8179 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
8180 dst_reg->off = ptr_reg->off;
0962590e 8181 dst_reg->raw = ptr_reg->raw;
de8f3a83 8182 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
8183 dst_reg->id = ++env->id_gen;
8184 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 8185 if (smin_val < 0)
22dc4a0f 8186 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
43188702 8187 }
f1174f77
EC
8188 break;
8189 case BPF_AND:
8190 case BPF_OR:
8191 case BPF_XOR:
82abbf8d
AS
8192 /* bitwise ops on pointers are troublesome, prohibit. */
8193 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
8194 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
8195 return -EACCES;
8196 default:
8197 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
8198 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
8199 dst, bpf_alu_string[opcode >> 4]);
f1174f77 8200 return -EACCES;
43188702
JF
8201 }
8202
bb7f0f98
AS
8203 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
8204 return -EINVAL;
8205
b03c9f9f
EC
8206 __update_reg_bounds(dst_reg);
8207 __reg_deduce_bounds(dst_reg);
8208 __reg_bound_offset(dst_reg);
0d6303db 8209
073815b7
DB
8210 if (sanitize_check_bounds(env, insn, dst_reg) < 0)
8211 return -EACCES;
7fedb63a
DB
8212 if (sanitize_needed(opcode)) {
8213 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
3d0220f6 8214 &info, true);
7fedb63a
DB
8215 if (ret < 0)
8216 return sanitize_err(env, insn, ret, off_reg, dst_reg);
0d6303db
DB
8217 }
8218
43188702
JF
8219 return 0;
8220}
8221
3f50f132
JF
8222static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
8223 struct bpf_reg_state *src_reg)
8224{
8225 s32 smin_val = src_reg->s32_min_value;
8226 s32 smax_val = src_reg->s32_max_value;
8227 u32 umin_val = src_reg->u32_min_value;
8228 u32 umax_val = src_reg->u32_max_value;
8229
8230 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
8231 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
8232 dst_reg->s32_min_value = S32_MIN;
8233 dst_reg->s32_max_value = S32_MAX;
8234 } else {
8235 dst_reg->s32_min_value += smin_val;
8236 dst_reg->s32_max_value += smax_val;
8237 }
8238 if (dst_reg->u32_min_value + umin_val < umin_val ||
8239 dst_reg->u32_max_value + umax_val < umax_val) {
8240 dst_reg->u32_min_value = 0;
8241 dst_reg->u32_max_value = U32_MAX;
8242 } else {
8243 dst_reg->u32_min_value += umin_val;
8244 dst_reg->u32_max_value += umax_val;
8245 }
8246}
8247
07cd2631
JF
8248static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
8249 struct bpf_reg_state *src_reg)
8250{
8251 s64 smin_val = src_reg->smin_value;
8252 s64 smax_val = src_reg->smax_value;
8253 u64 umin_val = src_reg->umin_value;
8254 u64 umax_val = src_reg->umax_value;
8255
8256 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
8257 signed_add_overflows(dst_reg->smax_value, smax_val)) {
8258 dst_reg->smin_value = S64_MIN;
8259 dst_reg->smax_value = S64_MAX;
8260 } else {
8261 dst_reg->smin_value += smin_val;
8262 dst_reg->smax_value += smax_val;
8263 }
8264 if (dst_reg->umin_value + umin_val < umin_val ||
8265 dst_reg->umax_value + umax_val < umax_val) {
8266 dst_reg->umin_value = 0;
8267 dst_reg->umax_value = U64_MAX;
8268 } else {
8269 dst_reg->umin_value += umin_val;
8270 dst_reg->umax_value += umax_val;
8271 }
3f50f132
JF
8272}
8273
8274static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
8275 struct bpf_reg_state *src_reg)
8276{
8277 s32 smin_val = src_reg->s32_min_value;
8278 s32 smax_val = src_reg->s32_max_value;
8279 u32 umin_val = src_reg->u32_min_value;
8280 u32 umax_val = src_reg->u32_max_value;
8281
8282 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
8283 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
8284 /* Overflow possible, we know nothing */
8285 dst_reg->s32_min_value = S32_MIN;
8286 dst_reg->s32_max_value = S32_MAX;
8287 } else {
8288 dst_reg->s32_min_value -= smax_val;
8289 dst_reg->s32_max_value -= smin_val;
8290 }
8291 if (dst_reg->u32_min_value < umax_val) {
8292 /* Overflow possible, we know nothing */
8293 dst_reg->u32_min_value = 0;
8294 dst_reg->u32_max_value = U32_MAX;
8295 } else {
8296 /* Cannot overflow (as long as bounds are consistent) */
8297 dst_reg->u32_min_value -= umax_val;
8298 dst_reg->u32_max_value -= umin_val;
8299 }
07cd2631
JF
8300}
8301
8302static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
8303 struct bpf_reg_state *src_reg)
8304{
8305 s64 smin_val = src_reg->smin_value;
8306 s64 smax_val = src_reg->smax_value;
8307 u64 umin_val = src_reg->umin_value;
8308 u64 umax_val = src_reg->umax_value;
8309
8310 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
8311 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
8312 /* Overflow possible, we know nothing */
8313 dst_reg->smin_value = S64_MIN;
8314 dst_reg->smax_value = S64_MAX;
8315 } else {
8316 dst_reg->smin_value -= smax_val;
8317 dst_reg->smax_value -= smin_val;
8318 }
8319 if (dst_reg->umin_value < umax_val) {
8320 /* Overflow possible, we know nothing */
8321 dst_reg->umin_value = 0;
8322 dst_reg->umax_value = U64_MAX;
8323 } else {
8324 /* Cannot overflow (as long as bounds are consistent) */
8325 dst_reg->umin_value -= umax_val;
8326 dst_reg->umax_value -= umin_val;
8327 }
3f50f132
JF
8328}
8329
8330static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg,
8331 struct bpf_reg_state *src_reg)
8332{
8333 s32 smin_val = src_reg->s32_min_value;
8334 u32 umin_val = src_reg->u32_min_value;
8335 u32 umax_val = src_reg->u32_max_value;
8336
8337 if (smin_val < 0 || dst_reg->s32_min_value < 0) {
8338 /* Ain't nobody got time to multiply that sign */
8339 __mark_reg32_unbounded(dst_reg);
8340 return;
8341 }
8342 /* Both values are positive, so we can work with unsigned and
8343 * copy the result to signed (unless it exceeds S32_MAX).
8344 */
8345 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) {
8346 /* Potential overflow, we know nothing */
8347 __mark_reg32_unbounded(dst_reg);
8348 return;
8349 }
8350 dst_reg->u32_min_value *= umin_val;
8351 dst_reg->u32_max_value *= umax_val;
8352 if (dst_reg->u32_max_value > S32_MAX) {
8353 /* Overflow possible, we know nothing */
8354 dst_reg->s32_min_value = S32_MIN;
8355 dst_reg->s32_max_value = S32_MAX;
8356 } else {
8357 dst_reg->s32_min_value = dst_reg->u32_min_value;
8358 dst_reg->s32_max_value = dst_reg->u32_max_value;
8359 }
07cd2631
JF
8360}
8361
8362static void scalar_min_max_mul(struct bpf_reg_state *dst_reg,
8363 struct bpf_reg_state *src_reg)
8364{
8365 s64 smin_val = src_reg->smin_value;
8366 u64 umin_val = src_reg->umin_value;
8367 u64 umax_val = src_reg->umax_value;
8368
07cd2631
JF
8369 if (smin_val < 0 || dst_reg->smin_value < 0) {
8370 /* Ain't nobody got time to multiply that sign */
3f50f132 8371 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
8372 return;
8373 }
8374 /* Both values are positive, so we can work with unsigned and
8375 * copy the result to signed (unless it exceeds S64_MAX).
8376 */
8377 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
8378 /* Potential overflow, we know nothing */
3f50f132 8379 __mark_reg64_unbounded(dst_reg);
07cd2631
JF
8380 return;
8381 }
8382 dst_reg->umin_value *= umin_val;
8383 dst_reg->umax_value *= umax_val;
8384 if (dst_reg->umax_value > S64_MAX) {
8385 /* Overflow possible, we know nothing */
8386 dst_reg->smin_value = S64_MIN;
8387 dst_reg->smax_value = S64_MAX;
8388 } else {
8389 dst_reg->smin_value = dst_reg->umin_value;
8390 dst_reg->smax_value = dst_reg->umax_value;
8391 }
8392}
8393
3f50f132
JF
8394static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
8395 struct bpf_reg_state *src_reg)
8396{
8397 bool src_known = tnum_subreg_is_const(src_reg->var_off);
8398 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8399 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8400 s32 smin_val = src_reg->s32_min_value;
8401 u32 umax_val = src_reg->u32_max_value;
8402
049c4e13
DB
8403 if (src_known && dst_known) {
8404 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 8405 return;
049c4e13 8406 }
3f50f132
JF
8407
8408 /* We get our minimum from the var_off, since that's inherently
8409 * bitwise. Our maximum is the minimum of the operands' maxima.
8410 */
8411 dst_reg->u32_min_value = var32_off.value;
8412 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val);
8413 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8414 /* Lose signed bounds when ANDing negative numbers,
8415 * ain't nobody got time for that.
8416 */
8417 dst_reg->s32_min_value = S32_MIN;
8418 dst_reg->s32_max_value = S32_MAX;
8419 } else {
8420 /* ANDing two positives gives a positive, so safe to
8421 * cast result into s64.
8422 */
8423 dst_reg->s32_min_value = dst_reg->u32_min_value;
8424 dst_reg->s32_max_value = dst_reg->u32_max_value;
8425 }
3f50f132
JF
8426}
8427
07cd2631
JF
8428static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
8429 struct bpf_reg_state *src_reg)
8430{
3f50f132
JF
8431 bool src_known = tnum_is_const(src_reg->var_off);
8432 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
8433 s64 smin_val = src_reg->smin_value;
8434 u64 umax_val = src_reg->umax_value;
8435
3f50f132 8436 if (src_known && dst_known) {
4fbb38a3 8437 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
8438 return;
8439 }
8440
07cd2631
JF
8441 /* We get our minimum from the var_off, since that's inherently
8442 * bitwise. Our maximum is the minimum of the operands' maxima.
8443 */
07cd2631
JF
8444 dst_reg->umin_value = dst_reg->var_off.value;
8445 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
8446 if (dst_reg->smin_value < 0 || smin_val < 0) {
8447 /* Lose signed bounds when ANDing negative numbers,
8448 * ain't nobody got time for that.
8449 */
8450 dst_reg->smin_value = S64_MIN;
8451 dst_reg->smax_value = S64_MAX;
8452 } else {
8453 /* ANDing two positives gives a positive, so safe to
8454 * cast result into s64.
8455 */
8456 dst_reg->smin_value = dst_reg->umin_value;
8457 dst_reg->smax_value = dst_reg->umax_value;
8458 }
8459 /* We may learn something more from the var_off */
8460 __update_reg_bounds(dst_reg);
8461}
8462
3f50f132
JF
8463static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
8464 struct bpf_reg_state *src_reg)
8465{
8466 bool src_known = tnum_subreg_is_const(src_reg->var_off);
8467 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8468 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
5b9fbeb7
DB
8469 s32 smin_val = src_reg->s32_min_value;
8470 u32 umin_val = src_reg->u32_min_value;
3f50f132 8471
049c4e13
DB
8472 if (src_known && dst_known) {
8473 __mark_reg32_known(dst_reg, var32_off.value);
3f50f132 8474 return;
049c4e13 8475 }
3f50f132
JF
8476
8477 /* We get our maximum from the var_off, and our minimum is the
8478 * maximum of the operands' minima
8479 */
8480 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val);
8481 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8482 if (dst_reg->s32_min_value < 0 || smin_val < 0) {
8483 /* Lose signed bounds when ORing negative numbers,
8484 * ain't nobody got time for that.
8485 */
8486 dst_reg->s32_min_value = S32_MIN;
8487 dst_reg->s32_max_value = S32_MAX;
8488 } else {
8489 /* ORing two positives gives a positive, so safe to
8490 * cast result into s64.
8491 */
5b9fbeb7
DB
8492 dst_reg->s32_min_value = dst_reg->u32_min_value;
8493 dst_reg->s32_max_value = dst_reg->u32_max_value;
3f50f132
JF
8494 }
8495}
8496
07cd2631
JF
8497static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
8498 struct bpf_reg_state *src_reg)
8499{
3f50f132
JF
8500 bool src_known = tnum_is_const(src_reg->var_off);
8501 bool dst_known = tnum_is_const(dst_reg->var_off);
07cd2631
JF
8502 s64 smin_val = src_reg->smin_value;
8503 u64 umin_val = src_reg->umin_value;
8504
3f50f132 8505 if (src_known && dst_known) {
4fbb38a3 8506 __mark_reg_known(dst_reg, dst_reg->var_off.value);
3f50f132
JF
8507 return;
8508 }
8509
07cd2631
JF
8510 /* We get our maximum from the var_off, and our minimum is the
8511 * maximum of the operands' minima
8512 */
07cd2631
JF
8513 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
8514 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8515 if (dst_reg->smin_value < 0 || smin_val < 0) {
8516 /* Lose signed bounds when ORing negative numbers,
8517 * ain't nobody got time for that.
8518 */
8519 dst_reg->smin_value = S64_MIN;
8520 dst_reg->smax_value = S64_MAX;
8521 } else {
8522 /* ORing two positives gives a positive, so safe to
8523 * cast result into s64.
8524 */
8525 dst_reg->smin_value = dst_reg->umin_value;
8526 dst_reg->smax_value = dst_reg->umax_value;
8527 }
8528 /* We may learn something more from the var_off */
8529 __update_reg_bounds(dst_reg);
8530}
8531
2921c90d
YS
8532static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
8533 struct bpf_reg_state *src_reg)
8534{
8535 bool src_known = tnum_subreg_is_const(src_reg->var_off);
8536 bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
8537 struct tnum var32_off = tnum_subreg(dst_reg->var_off);
8538 s32 smin_val = src_reg->s32_min_value;
8539
049c4e13
DB
8540 if (src_known && dst_known) {
8541 __mark_reg32_known(dst_reg, var32_off.value);
2921c90d 8542 return;
049c4e13 8543 }
2921c90d
YS
8544
8545 /* We get both minimum and maximum from the var32_off. */
8546 dst_reg->u32_min_value = var32_off.value;
8547 dst_reg->u32_max_value = var32_off.value | var32_off.mask;
8548
8549 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
8550 /* XORing two positive sign numbers gives a positive,
8551 * so safe to cast u32 result into s32.
8552 */
8553 dst_reg->s32_min_value = dst_reg->u32_min_value;
8554 dst_reg->s32_max_value = dst_reg->u32_max_value;
8555 } else {
8556 dst_reg->s32_min_value = S32_MIN;
8557 dst_reg->s32_max_value = S32_MAX;
8558 }
8559}
8560
8561static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
8562 struct bpf_reg_state *src_reg)
8563{
8564 bool src_known = tnum_is_const(src_reg->var_off);
8565 bool dst_known = tnum_is_const(dst_reg->var_off);
8566 s64 smin_val = src_reg->smin_value;
8567
8568 if (src_known && dst_known) {
8569 /* dst_reg->var_off.value has been updated earlier */
8570 __mark_reg_known(dst_reg, dst_reg->var_off.value);
8571 return;
8572 }
8573
8574 /* We get both minimum and maximum from the var_off. */
8575 dst_reg->umin_value = dst_reg->var_off.value;
8576 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
8577
8578 if (dst_reg->smin_value >= 0 && smin_val >= 0) {
8579 /* XORing two positive sign numbers gives a positive,
8580 * so safe to cast u64 result into s64.
8581 */
8582 dst_reg->smin_value = dst_reg->umin_value;
8583 dst_reg->smax_value = dst_reg->umax_value;
8584 } else {
8585 dst_reg->smin_value = S64_MIN;
8586 dst_reg->smax_value = S64_MAX;
8587 }
8588
8589 __update_reg_bounds(dst_reg);
8590}
8591
3f50f132
JF
8592static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8593 u64 umin_val, u64 umax_val)
07cd2631 8594{
07cd2631
JF
8595 /* We lose all sign bit information (except what we can pick
8596 * up from var_off)
8597 */
3f50f132
JF
8598 dst_reg->s32_min_value = S32_MIN;
8599 dst_reg->s32_max_value = S32_MAX;
8600 /* If we might shift our top bit out, then we know nothing */
8601 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) {
8602 dst_reg->u32_min_value = 0;
8603 dst_reg->u32_max_value = U32_MAX;
8604 } else {
8605 dst_reg->u32_min_value <<= umin_val;
8606 dst_reg->u32_max_value <<= umax_val;
8607 }
8608}
8609
8610static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
8611 struct bpf_reg_state *src_reg)
8612{
8613 u32 umax_val = src_reg->u32_max_value;
8614 u32 umin_val = src_reg->u32_min_value;
8615 /* u32 alu operation will zext upper bits */
8616 struct tnum subreg = tnum_subreg(dst_reg->var_off);
8617
8618 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8619 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val));
8620 /* Not required but being careful mark reg64 bounds as unknown so
8621 * that we are forced to pick them up from tnum and zext later and
8622 * if some path skips this step we are still safe.
8623 */
8624 __mark_reg64_unbounded(dst_reg);
8625 __update_reg32_bounds(dst_reg);
8626}
8627
8628static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg,
8629 u64 umin_val, u64 umax_val)
8630{
8631 /* Special case <<32 because it is a common compiler pattern to sign
8632 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are
8633 * positive we know this shift will also be positive so we can track
8634 * bounds correctly. Otherwise we lose all sign bit information except
8635 * what we can pick up from var_off. Perhaps we can generalize this
8636 * later to shifts of any length.
8637 */
8638 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0)
8639 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32;
8640 else
8641 dst_reg->smax_value = S64_MAX;
8642
8643 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0)
8644 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32;
8645 else
8646 dst_reg->smin_value = S64_MIN;
8647
07cd2631
JF
8648 /* If we might shift our top bit out, then we know nothing */
8649 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
8650 dst_reg->umin_value = 0;
8651 dst_reg->umax_value = U64_MAX;
8652 } else {
8653 dst_reg->umin_value <<= umin_val;
8654 dst_reg->umax_value <<= umax_val;
8655 }
3f50f132
JF
8656}
8657
8658static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg,
8659 struct bpf_reg_state *src_reg)
8660{
8661 u64 umax_val = src_reg->umax_value;
8662 u64 umin_val = src_reg->umin_value;
8663
8664 /* scalar64 calc uses 32bit unshifted bounds so must be called first */
8665 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val);
8666 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val);
8667
07cd2631
JF
8668 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
8669 /* We may learn something more from the var_off */
8670 __update_reg_bounds(dst_reg);
8671}
8672
3f50f132
JF
8673static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg,
8674 struct bpf_reg_state *src_reg)
8675{
8676 struct tnum subreg = tnum_subreg(dst_reg->var_off);
8677 u32 umax_val = src_reg->u32_max_value;
8678 u32 umin_val = src_reg->u32_min_value;
8679
8680 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
8681 * be negative, then either:
8682 * 1) src_reg might be zero, so the sign bit of the result is
8683 * unknown, so we lose our signed bounds
8684 * 2) it's known negative, thus the unsigned bounds capture the
8685 * signed bounds
8686 * 3) the signed bounds cross zero, so they tell us nothing
8687 * about the result
8688 * If the value in dst_reg is known nonnegative, then again the
18b24d78 8689 * unsigned bounds capture the signed bounds.
3f50f132
JF
8690 * Thus, in all cases it suffices to blow away our signed bounds
8691 * and rely on inferring new ones from the unsigned bounds and
8692 * var_off of the result.
8693 */
8694 dst_reg->s32_min_value = S32_MIN;
8695 dst_reg->s32_max_value = S32_MAX;
8696
8697 dst_reg->var_off = tnum_rshift(subreg, umin_val);
8698 dst_reg->u32_min_value >>= umax_val;
8699 dst_reg->u32_max_value >>= umin_val;
8700
8701 __mark_reg64_unbounded(dst_reg);
8702 __update_reg32_bounds(dst_reg);
8703}
8704
07cd2631
JF
8705static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg,
8706 struct bpf_reg_state *src_reg)
8707{
8708 u64 umax_val = src_reg->umax_value;
8709 u64 umin_val = src_reg->umin_value;
8710
8711 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
8712 * be negative, then either:
8713 * 1) src_reg might be zero, so the sign bit of the result is
8714 * unknown, so we lose our signed bounds
8715 * 2) it's known negative, thus the unsigned bounds capture the
8716 * signed bounds
8717 * 3) the signed bounds cross zero, so they tell us nothing
8718 * about the result
8719 * If the value in dst_reg is known nonnegative, then again the
18b24d78 8720 * unsigned bounds capture the signed bounds.
07cd2631
JF
8721 * Thus, in all cases it suffices to blow away our signed bounds
8722 * and rely on inferring new ones from the unsigned bounds and
8723 * var_off of the result.
8724 */
8725 dst_reg->smin_value = S64_MIN;
8726 dst_reg->smax_value = S64_MAX;
8727 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
8728 dst_reg->umin_value >>= umax_val;
8729 dst_reg->umax_value >>= umin_val;
3f50f132
JF
8730
8731 /* Its not easy to operate on alu32 bounds here because it depends
8732 * on bits being shifted in. Take easy way out and mark unbounded
8733 * so we can recalculate later from tnum.
8734 */
8735 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
8736 __update_reg_bounds(dst_reg);
8737}
8738
3f50f132
JF
8739static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg,
8740 struct bpf_reg_state *src_reg)
07cd2631 8741{
3f50f132 8742 u64 umin_val = src_reg->u32_min_value;
07cd2631
JF
8743
8744 /* Upon reaching here, src_known is true and
8745 * umax_val is equal to umin_val.
8746 */
3f50f132
JF
8747 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val);
8748 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val);
07cd2631 8749
3f50f132
JF
8750 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32);
8751
8752 /* blow away the dst_reg umin_value/umax_value and rely on
8753 * dst_reg var_off to refine the result.
8754 */
8755 dst_reg->u32_min_value = 0;
8756 dst_reg->u32_max_value = U32_MAX;
8757
8758 __mark_reg64_unbounded(dst_reg);
8759 __update_reg32_bounds(dst_reg);
8760}
8761
8762static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg,
8763 struct bpf_reg_state *src_reg)
8764{
8765 u64 umin_val = src_reg->umin_value;
8766
8767 /* Upon reaching here, src_known is true and umax_val is equal
8768 * to umin_val.
8769 */
8770 dst_reg->smin_value >>= umin_val;
8771 dst_reg->smax_value >>= umin_val;
8772
8773 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64);
07cd2631
JF
8774
8775 /* blow away the dst_reg umin_value/umax_value and rely on
8776 * dst_reg var_off to refine the result.
8777 */
8778 dst_reg->umin_value = 0;
8779 dst_reg->umax_value = U64_MAX;
3f50f132
JF
8780
8781 /* Its not easy to operate on alu32 bounds here because it depends
8782 * on bits being shifted in from upper 32-bits. Take easy way out
8783 * and mark unbounded so we can recalculate later from tnum.
8784 */
8785 __mark_reg32_unbounded(dst_reg);
07cd2631
JF
8786 __update_reg_bounds(dst_reg);
8787}
8788
468f6eaf
JH
8789/* WARNING: This function does calculations on 64-bit values, but the actual
8790 * execution may occur on 32-bit values. Therefore, things like bitshifts
8791 * need extra checks in the 32-bit case.
8792 */
f1174f77
EC
8793static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
8794 struct bpf_insn *insn,
8795 struct bpf_reg_state *dst_reg,
8796 struct bpf_reg_state src_reg)
969bf05e 8797{
638f5b90 8798 struct bpf_reg_state *regs = cur_regs(env);
48461135 8799 u8 opcode = BPF_OP(insn->code);
b0b3fb67 8800 bool src_known;
b03c9f9f
EC
8801 s64 smin_val, smax_val;
8802 u64 umin_val, umax_val;
3f50f132
JF
8803 s32 s32_min_val, s32_max_val;
8804 u32 u32_min_val, u32_max_val;
468f6eaf 8805 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
3f50f132 8806 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
a6aaece0 8807 int ret;
b799207e 8808
b03c9f9f
EC
8809 smin_val = src_reg.smin_value;
8810 smax_val = src_reg.smax_value;
8811 umin_val = src_reg.umin_value;
8812 umax_val = src_reg.umax_value;
f23cc643 8813
3f50f132
JF
8814 s32_min_val = src_reg.s32_min_value;
8815 s32_max_val = src_reg.s32_max_value;
8816 u32_min_val = src_reg.u32_min_value;
8817 u32_max_val = src_reg.u32_max_value;
8818
8819 if (alu32) {
8820 src_known = tnum_subreg_is_const(src_reg.var_off);
3f50f132
JF
8821 if ((src_known &&
8822 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
8823 s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
8824 /* Taint dst register if offset had invalid bounds
8825 * derived from e.g. dead branches.
8826 */
8827 __mark_reg_unknown(env, dst_reg);
8828 return 0;
8829 }
8830 } else {
8831 src_known = tnum_is_const(src_reg.var_off);
3f50f132
JF
8832 if ((src_known &&
8833 (smin_val != smax_val || umin_val != umax_val)) ||
8834 smin_val > smax_val || umin_val > umax_val) {
8835 /* Taint dst register if offset had invalid bounds
8836 * derived from e.g. dead branches.
8837 */
8838 __mark_reg_unknown(env, dst_reg);
8839 return 0;
8840 }
6f16101e
DB
8841 }
8842
bb7f0f98
AS
8843 if (!src_known &&
8844 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
f54c7898 8845 __mark_reg_unknown(env, dst_reg);
bb7f0f98
AS
8846 return 0;
8847 }
8848
f5288193
DB
8849 if (sanitize_needed(opcode)) {
8850 ret = sanitize_val_alu(env, insn);
8851 if (ret < 0)
8852 return sanitize_err(env, insn, ret, NULL, NULL);
8853 }
8854
3f50f132
JF
8855 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
8856 * There are two classes of instructions: The first class we track both
8857 * alu32 and alu64 sign/unsigned bounds independently this provides the
8858 * greatest amount of precision when alu operations are mixed with jmp32
8859 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD,
8860 * and BPF_OR. This is possible because these ops have fairly easy to
8861 * understand and calculate behavior in both 32-bit and 64-bit alu ops.
8862 * See alu32 verifier tests for examples. The second class of
8863 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy
8864 * with regards to tracking sign/unsigned bounds because the bits may
8865 * cross subreg boundaries in the alu64 case. When this happens we mark
8866 * the reg unbounded in the subreg bound space and use the resulting
8867 * tnum to calculate an approximation of the sign/unsigned bounds.
8868 */
48461135
JB
8869 switch (opcode) {
8870 case BPF_ADD:
3f50f132 8871 scalar32_min_max_add(dst_reg, &src_reg);
07cd2631 8872 scalar_min_max_add(dst_reg, &src_reg);
3f50f132 8873 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
8874 break;
8875 case BPF_SUB:
3f50f132 8876 scalar32_min_max_sub(dst_reg, &src_reg);
07cd2631 8877 scalar_min_max_sub(dst_reg, &src_reg);
3f50f132 8878 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
8879 break;
8880 case BPF_MUL:
3f50f132
JF
8881 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
8882 scalar32_min_max_mul(dst_reg, &src_reg);
07cd2631 8883 scalar_min_max_mul(dst_reg, &src_reg);
48461135
JB
8884 break;
8885 case BPF_AND:
3f50f132
JF
8886 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
8887 scalar32_min_max_and(dst_reg, &src_reg);
07cd2631 8888 scalar_min_max_and(dst_reg, &src_reg);
f1174f77
EC
8889 break;
8890 case BPF_OR:
3f50f132
JF
8891 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
8892 scalar32_min_max_or(dst_reg, &src_reg);
07cd2631 8893 scalar_min_max_or(dst_reg, &src_reg);
48461135 8894 break;
2921c90d
YS
8895 case BPF_XOR:
8896 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
8897 scalar32_min_max_xor(dst_reg, &src_reg);
8898 scalar_min_max_xor(dst_reg, &src_reg);
8899 break;
48461135 8900 case BPF_LSH:
468f6eaf
JH
8901 if (umax_val >= insn_bitness) {
8902 /* Shifts greater than 31 or 63 are undefined.
8903 * This includes shifts by a negative number.
b03c9f9f 8904 */
61bd5218 8905 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
8906 break;
8907 }
3f50f132
JF
8908 if (alu32)
8909 scalar32_min_max_lsh(dst_reg, &src_reg);
8910 else
8911 scalar_min_max_lsh(dst_reg, &src_reg);
48461135
JB
8912 break;
8913 case BPF_RSH:
468f6eaf
JH
8914 if (umax_val >= insn_bitness) {
8915 /* Shifts greater than 31 or 63 are undefined.
8916 * This includes shifts by a negative number.
b03c9f9f 8917 */
61bd5218 8918 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
8919 break;
8920 }
3f50f132
JF
8921 if (alu32)
8922 scalar32_min_max_rsh(dst_reg, &src_reg);
8923 else
8924 scalar_min_max_rsh(dst_reg, &src_reg);
48461135 8925 break;
9cbe1f5a
YS
8926 case BPF_ARSH:
8927 if (umax_val >= insn_bitness) {
8928 /* Shifts greater than 31 or 63 are undefined.
8929 * This includes shifts by a negative number.
8930 */
8931 mark_reg_unknown(env, regs, insn->dst_reg);
8932 break;
8933 }
3f50f132
JF
8934 if (alu32)
8935 scalar32_min_max_arsh(dst_reg, &src_reg);
8936 else
8937 scalar_min_max_arsh(dst_reg, &src_reg);
9cbe1f5a 8938 break;
48461135 8939 default:
61bd5218 8940 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
8941 break;
8942 }
8943
3f50f132
JF
8944 /* ALU32 ops are zero extended into 64bit register */
8945 if (alu32)
8946 zext_32_to_64(dst_reg);
468f6eaf 8947
294f2fc6 8948 __update_reg_bounds(dst_reg);
b03c9f9f
EC
8949 __reg_deduce_bounds(dst_reg);
8950 __reg_bound_offset(dst_reg);
f1174f77
EC
8951 return 0;
8952}
8953
8954/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
8955 * and var_off.
8956 */
8957static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
8958 struct bpf_insn *insn)
8959{
f4d7e40a
AS
8960 struct bpf_verifier_state *vstate = env->cur_state;
8961 struct bpf_func_state *state = vstate->frame[vstate->curframe];
8962 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
8963 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
8964 u8 opcode = BPF_OP(insn->code);
b5dc0163 8965 int err;
f1174f77
EC
8966
8967 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
8968 src_reg = NULL;
8969 if (dst_reg->type != SCALAR_VALUE)
8970 ptr_reg = dst_reg;
75748837
AS
8971 else
8972 /* Make sure ID is cleared otherwise dst_reg min/max could be
8973 * incorrectly propagated into other registers by find_equal_scalars()
8974 */
8975 dst_reg->id = 0;
f1174f77
EC
8976 if (BPF_SRC(insn->code) == BPF_X) {
8977 src_reg = &regs[insn->src_reg];
f1174f77
EC
8978 if (src_reg->type != SCALAR_VALUE) {
8979 if (dst_reg->type != SCALAR_VALUE) {
8980 /* Combining two pointers by any ALU op yields
82abbf8d
AS
8981 * an arbitrary scalar. Disallow all math except
8982 * pointer subtraction
f1174f77 8983 */
dd066823 8984 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
8985 mark_reg_unknown(env, regs, insn->dst_reg);
8986 return 0;
f1174f77 8987 }
82abbf8d
AS
8988 verbose(env, "R%d pointer %s pointer prohibited\n",
8989 insn->dst_reg,
8990 bpf_alu_string[opcode >> 4]);
8991 return -EACCES;
f1174f77
EC
8992 } else {
8993 /* scalar += pointer
8994 * This is legal, but we have to reverse our
8995 * src/dest handling in computing the range
8996 */
b5dc0163
AS
8997 err = mark_chain_precision(env, insn->dst_reg);
8998 if (err)
8999 return err;
82abbf8d
AS
9000 return adjust_ptr_min_max_vals(env, insn,
9001 src_reg, dst_reg);
f1174f77
EC
9002 }
9003 } else if (ptr_reg) {
9004 /* pointer += scalar */
b5dc0163
AS
9005 err = mark_chain_precision(env, insn->src_reg);
9006 if (err)
9007 return err;
82abbf8d
AS
9008 return adjust_ptr_min_max_vals(env, insn,
9009 dst_reg, src_reg);
f1174f77
EC
9010 }
9011 } else {
9012 /* Pretend the src is a reg with a known value, since we only
9013 * need to be able to read from this state.
9014 */
9015 off_reg.type = SCALAR_VALUE;
b03c9f9f 9016 __mark_reg_known(&off_reg, insn->imm);
f1174f77 9017 src_reg = &off_reg;
82abbf8d
AS
9018 if (ptr_reg) /* pointer += K */
9019 return adjust_ptr_min_max_vals(env, insn,
9020 ptr_reg, src_reg);
f1174f77
EC
9021 }
9022
9023 /* Got here implies adding two SCALAR_VALUEs */
9024 if (WARN_ON_ONCE(ptr_reg)) {
0f55f9ed 9025 print_verifier_state(env, state, true);
61bd5218 9026 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
9027 return -EINVAL;
9028 }
9029 if (WARN_ON(!src_reg)) {
0f55f9ed 9030 print_verifier_state(env, state, true);
61bd5218 9031 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
9032 return -EINVAL;
9033 }
9034 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
9035}
9036
17a52670 9037/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 9038static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 9039{
638f5b90 9040 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
9041 u8 opcode = BPF_OP(insn->code);
9042 int err;
9043
9044 if (opcode == BPF_END || opcode == BPF_NEG) {
9045 if (opcode == BPF_NEG) {
9046 if (BPF_SRC(insn->code) != 0 ||
9047 insn->src_reg != BPF_REG_0 ||
9048 insn->off != 0 || insn->imm != 0) {
61bd5218 9049 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
9050 return -EINVAL;
9051 }
9052 } else {
9053 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
9054 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
9055 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 9056 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
9057 return -EINVAL;
9058 }
9059 }
9060
9061 /* check src operand */
dc503a8a 9062 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
9063 if (err)
9064 return err;
9065
1be7f75d 9066 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 9067 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
9068 insn->dst_reg);
9069 return -EACCES;
9070 }
9071
17a52670 9072 /* check dest operand */
dc503a8a 9073 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
9074 if (err)
9075 return err;
9076
9077 } else if (opcode == BPF_MOV) {
9078
9079 if (BPF_SRC(insn->code) == BPF_X) {
9080 if (insn->imm != 0 || insn->off != 0) {
61bd5218 9081 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
9082 return -EINVAL;
9083 }
9084
9085 /* check src operand */
dc503a8a 9086 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
9087 if (err)
9088 return err;
9089 } else {
9090 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 9091 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
9092 return -EINVAL;
9093 }
9094 }
9095
fbeb1603
AF
9096 /* check dest operand, mark as required later */
9097 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
9098 if (err)
9099 return err;
9100
9101 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
9102 struct bpf_reg_state *src_reg = regs + insn->src_reg;
9103 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
9104
17a52670
AS
9105 if (BPF_CLASS(insn->code) == BPF_ALU64) {
9106 /* case: R1 = R2
9107 * copy register state to dest reg
9108 */
75748837
AS
9109 if (src_reg->type == SCALAR_VALUE && !src_reg->id)
9110 /* Assign src and dst registers the same ID
9111 * that will be used by find_equal_scalars()
9112 * to propagate min/max range.
9113 */
9114 src_reg->id = ++env->id_gen;
e434b8cd
JW
9115 *dst_reg = *src_reg;
9116 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 9117 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 9118 } else {
f1174f77 9119 /* R1 = (u32) R2 */
1be7f75d 9120 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
9121 verbose(env,
9122 "R%d partial copy of pointer\n",
1be7f75d
AS
9123 insn->src_reg);
9124 return -EACCES;
e434b8cd
JW
9125 } else if (src_reg->type == SCALAR_VALUE) {
9126 *dst_reg = *src_reg;
75748837
AS
9127 /* Make sure ID is cleared otherwise
9128 * dst_reg min/max could be incorrectly
9129 * propagated into src_reg by find_equal_scalars()
9130 */
9131 dst_reg->id = 0;
e434b8cd 9132 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 9133 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
9134 } else {
9135 mark_reg_unknown(env, regs,
9136 insn->dst_reg);
1be7f75d 9137 }
3f50f132 9138 zext_32_to_64(dst_reg);
3cf2b61e
DB
9139
9140 __update_reg_bounds(dst_reg);
9141 __reg_deduce_bounds(dst_reg);
9142 __reg_bound_offset(dst_reg);
17a52670
AS
9143 }
9144 } else {
9145 /* case: R = imm
9146 * remember the value we stored into this reg
9147 */
fbeb1603
AF
9148 /* clear any state __mark_reg_known doesn't set */
9149 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 9150 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
9151 if (BPF_CLASS(insn->code) == BPF_ALU64) {
9152 __mark_reg_known(regs + insn->dst_reg,
9153 insn->imm);
9154 } else {
9155 __mark_reg_known(regs + insn->dst_reg,
9156 (u32)insn->imm);
9157 }
17a52670
AS
9158 }
9159
9160 } else if (opcode > BPF_END) {
61bd5218 9161 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
9162 return -EINVAL;
9163
9164 } else { /* all other ALU ops: and, sub, xor, add, ... */
9165
17a52670
AS
9166 if (BPF_SRC(insn->code) == BPF_X) {
9167 if (insn->imm != 0 || insn->off != 0) {
61bd5218 9168 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
9169 return -EINVAL;
9170 }
9171 /* check src1 operand */
dc503a8a 9172 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
9173 if (err)
9174 return err;
9175 } else {
9176 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 9177 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
9178 return -EINVAL;
9179 }
9180 }
9181
9182 /* check src2 operand */
dc503a8a 9183 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
9184 if (err)
9185 return err;
9186
9187 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
9188 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 9189 verbose(env, "div by zero\n");
17a52670
AS
9190 return -EINVAL;
9191 }
9192
229394e8
RV
9193 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
9194 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
9195 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
9196
9197 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 9198 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
9199 return -EINVAL;
9200 }
9201 }
9202
1a0dc1ac 9203 /* check dest operand */
dc503a8a 9204 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
9205 if (err)
9206 return err;
9207
f1174f77 9208 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
9209 }
9210
9211 return 0;
9212}
9213
c6a9efa1
PC
9214static void __find_good_pkt_pointers(struct bpf_func_state *state,
9215 struct bpf_reg_state *dst_reg,
6d94e741 9216 enum bpf_reg_type type, int new_range)
c6a9efa1
PC
9217{
9218 struct bpf_reg_state *reg;
9219 int i;
9220
9221 for (i = 0; i < MAX_BPF_REG; i++) {
9222 reg = &state->regs[i];
9223 if (reg->type == type && reg->id == dst_reg->id)
9224 /* keep the maximum range already checked */
9225 reg->range = max(reg->range, new_range);
9226 }
9227
9228 bpf_for_each_spilled_reg(i, state, reg) {
9229 if (!reg)
9230 continue;
9231 if (reg->type == type && reg->id == dst_reg->id)
9232 reg->range = max(reg->range, new_range);
9233 }
9234}
9235
f4d7e40a 9236static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 9237 struct bpf_reg_state *dst_reg,
f8ddadc4 9238 enum bpf_reg_type type,
fb2a311a 9239 bool range_right_open)
969bf05e 9240{
6d94e741 9241 int new_range, i;
2d2be8ca 9242
fb2a311a
DB
9243 if (dst_reg->off < 0 ||
9244 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
9245 /* This doesn't give us any range */
9246 return;
9247
b03c9f9f
EC
9248 if (dst_reg->umax_value > MAX_PACKET_OFF ||
9249 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
9250 /* Risk of overflow. For instance, ptr + (1<<63) may be less
9251 * than pkt_end, but that's because it's also less than pkt.
9252 */
9253 return;
9254
fb2a311a
DB
9255 new_range = dst_reg->off;
9256 if (range_right_open)
2fa7d94a 9257 new_range++;
fb2a311a
DB
9258
9259 /* Examples for register markings:
2d2be8ca 9260 *
fb2a311a 9261 * pkt_data in dst register:
2d2be8ca
DB
9262 *
9263 * r2 = r3;
9264 * r2 += 8;
9265 * if (r2 > pkt_end) goto <handle exception>
9266 * <access okay>
9267 *
b4e432f1
DB
9268 * r2 = r3;
9269 * r2 += 8;
9270 * if (r2 < pkt_end) goto <access okay>
9271 * <handle exception>
9272 *
2d2be8ca
DB
9273 * Where:
9274 * r2 == dst_reg, pkt_end == src_reg
9275 * r2=pkt(id=n,off=8,r=0)
9276 * r3=pkt(id=n,off=0,r=0)
9277 *
fb2a311a 9278 * pkt_data in src register:
2d2be8ca
DB
9279 *
9280 * r2 = r3;
9281 * r2 += 8;
9282 * if (pkt_end >= r2) goto <access okay>
9283 * <handle exception>
9284 *
b4e432f1
DB
9285 * r2 = r3;
9286 * r2 += 8;
9287 * if (pkt_end <= r2) goto <handle exception>
9288 * <access okay>
9289 *
2d2be8ca
DB
9290 * Where:
9291 * pkt_end == dst_reg, r2 == src_reg
9292 * r2=pkt(id=n,off=8,r=0)
9293 * r3=pkt(id=n,off=0,r=0)
9294 *
9295 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
9296 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
9297 * and [r3, r3 + 8-1) respectively is safe to access depending on
9298 * the check.
969bf05e 9299 */
2d2be8ca 9300
f1174f77
EC
9301 /* If our ids match, then we must have the same max_value. And we
9302 * don't care about the other reg's fixed offset, since if it's too big
9303 * the range won't allow anything.
9304 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
9305 */
c6a9efa1
PC
9306 for (i = 0; i <= vstate->curframe; i++)
9307 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
9308 new_range);
969bf05e
AS
9309}
9310
3f50f132 9311static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
4f7b3e82 9312{
3f50f132
JF
9313 struct tnum subreg = tnum_subreg(reg->var_off);
9314 s32 sval = (s32)val;
a72dafaf 9315
3f50f132
JF
9316 switch (opcode) {
9317 case BPF_JEQ:
9318 if (tnum_is_const(subreg))
9319 return !!tnum_equals_const(subreg, val);
9320 break;
9321 case BPF_JNE:
9322 if (tnum_is_const(subreg))
9323 return !tnum_equals_const(subreg, val);
9324 break;
9325 case BPF_JSET:
9326 if ((~subreg.mask & subreg.value) & val)
9327 return 1;
9328 if (!((subreg.mask | subreg.value) & val))
9329 return 0;
9330 break;
9331 case BPF_JGT:
9332 if (reg->u32_min_value > val)
9333 return 1;
9334 else if (reg->u32_max_value <= val)
9335 return 0;
9336 break;
9337 case BPF_JSGT:
9338 if (reg->s32_min_value > sval)
9339 return 1;
ee114dd6 9340 else if (reg->s32_max_value <= sval)
3f50f132
JF
9341 return 0;
9342 break;
9343 case BPF_JLT:
9344 if (reg->u32_max_value < val)
9345 return 1;
9346 else if (reg->u32_min_value >= val)
9347 return 0;
9348 break;
9349 case BPF_JSLT:
9350 if (reg->s32_max_value < sval)
9351 return 1;
9352 else if (reg->s32_min_value >= sval)
9353 return 0;
9354 break;
9355 case BPF_JGE:
9356 if (reg->u32_min_value >= val)
9357 return 1;
9358 else if (reg->u32_max_value < val)
9359 return 0;
9360 break;
9361 case BPF_JSGE:
9362 if (reg->s32_min_value >= sval)
9363 return 1;
9364 else if (reg->s32_max_value < sval)
9365 return 0;
9366 break;
9367 case BPF_JLE:
9368 if (reg->u32_max_value <= val)
9369 return 1;
9370 else if (reg->u32_min_value > val)
9371 return 0;
9372 break;
9373 case BPF_JSLE:
9374 if (reg->s32_max_value <= sval)
9375 return 1;
9376 else if (reg->s32_min_value > sval)
9377 return 0;
9378 break;
9379 }
4f7b3e82 9380
3f50f132
JF
9381 return -1;
9382}
092ed096 9383
3f50f132
JF
9384
9385static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
9386{
9387 s64 sval = (s64)val;
a72dafaf 9388
4f7b3e82
AS
9389 switch (opcode) {
9390 case BPF_JEQ:
9391 if (tnum_is_const(reg->var_off))
9392 return !!tnum_equals_const(reg->var_off, val);
9393 break;
9394 case BPF_JNE:
9395 if (tnum_is_const(reg->var_off))
9396 return !tnum_equals_const(reg->var_off, val);
9397 break;
960ea056
JK
9398 case BPF_JSET:
9399 if ((~reg->var_off.mask & reg->var_off.value) & val)
9400 return 1;
9401 if (!((reg->var_off.mask | reg->var_off.value) & val))
9402 return 0;
9403 break;
4f7b3e82
AS
9404 case BPF_JGT:
9405 if (reg->umin_value > val)
9406 return 1;
9407 else if (reg->umax_value <= val)
9408 return 0;
9409 break;
9410 case BPF_JSGT:
a72dafaf 9411 if (reg->smin_value > sval)
4f7b3e82 9412 return 1;
ee114dd6 9413 else if (reg->smax_value <= sval)
4f7b3e82
AS
9414 return 0;
9415 break;
9416 case BPF_JLT:
9417 if (reg->umax_value < val)
9418 return 1;
9419 else if (reg->umin_value >= val)
9420 return 0;
9421 break;
9422 case BPF_JSLT:
a72dafaf 9423 if (reg->smax_value < sval)
4f7b3e82 9424 return 1;
a72dafaf 9425 else if (reg->smin_value >= sval)
4f7b3e82
AS
9426 return 0;
9427 break;
9428 case BPF_JGE:
9429 if (reg->umin_value >= val)
9430 return 1;
9431 else if (reg->umax_value < val)
9432 return 0;
9433 break;
9434 case BPF_JSGE:
a72dafaf 9435 if (reg->smin_value >= sval)
4f7b3e82 9436 return 1;
a72dafaf 9437 else if (reg->smax_value < sval)
4f7b3e82
AS
9438 return 0;
9439 break;
9440 case BPF_JLE:
9441 if (reg->umax_value <= val)
9442 return 1;
9443 else if (reg->umin_value > val)
9444 return 0;
9445 break;
9446 case BPF_JSLE:
a72dafaf 9447 if (reg->smax_value <= sval)
4f7b3e82 9448 return 1;
a72dafaf 9449 else if (reg->smin_value > sval)
4f7b3e82
AS
9450 return 0;
9451 break;
9452 }
9453
9454 return -1;
9455}
9456
3f50f132
JF
9457/* compute branch direction of the expression "if (reg opcode val) goto target;"
9458 * and return:
9459 * 1 - branch will be taken and "goto target" will be executed
9460 * 0 - branch will not be taken and fall-through to next insn
9461 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value
9462 * range [0,10]
604dca5e 9463 */
3f50f132
JF
9464static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
9465 bool is_jmp32)
604dca5e 9466{
cac616db
JF
9467 if (__is_pointer_value(false, reg)) {
9468 if (!reg_type_not_null(reg->type))
9469 return -1;
9470
9471 /* If pointer is valid tests against zero will fail so we can
9472 * use this to direct branch taken.
9473 */
9474 if (val != 0)
9475 return -1;
9476
9477 switch (opcode) {
9478 case BPF_JEQ:
9479 return 0;
9480 case BPF_JNE:
9481 return 1;
9482 default:
9483 return -1;
9484 }
9485 }
604dca5e 9486
3f50f132
JF
9487 if (is_jmp32)
9488 return is_branch32_taken(reg, val, opcode);
9489 return is_branch64_taken(reg, val, opcode);
604dca5e
JH
9490}
9491
6d94e741
AS
9492static int flip_opcode(u32 opcode)
9493{
9494 /* How can we transform "a <op> b" into "b <op> a"? */
9495 static const u8 opcode_flip[16] = {
9496 /* these stay the same */
9497 [BPF_JEQ >> 4] = BPF_JEQ,
9498 [BPF_JNE >> 4] = BPF_JNE,
9499 [BPF_JSET >> 4] = BPF_JSET,
9500 /* these swap "lesser" and "greater" (L and G in the opcodes) */
9501 [BPF_JGE >> 4] = BPF_JLE,
9502 [BPF_JGT >> 4] = BPF_JLT,
9503 [BPF_JLE >> 4] = BPF_JGE,
9504 [BPF_JLT >> 4] = BPF_JGT,
9505 [BPF_JSGE >> 4] = BPF_JSLE,
9506 [BPF_JSGT >> 4] = BPF_JSLT,
9507 [BPF_JSLE >> 4] = BPF_JSGE,
9508 [BPF_JSLT >> 4] = BPF_JSGT
9509 };
9510 return opcode_flip[opcode >> 4];
9511}
9512
9513static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
9514 struct bpf_reg_state *src_reg,
9515 u8 opcode)
9516{
9517 struct bpf_reg_state *pkt;
9518
9519 if (src_reg->type == PTR_TO_PACKET_END) {
9520 pkt = dst_reg;
9521 } else if (dst_reg->type == PTR_TO_PACKET_END) {
9522 pkt = src_reg;
9523 opcode = flip_opcode(opcode);
9524 } else {
9525 return -1;
9526 }
9527
9528 if (pkt->range >= 0)
9529 return -1;
9530
9531 switch (opcode) {
9532 case BPF_JLE:
9533 /* pkt <= pkt_end */
9534 fallthrough;
9535 case BPF_JGT:
9536 /* pkt > pkt_end */
9537 if (pkt->range == BEYOND_PKT_END)
9538 /* pkt has at last one extra byte beyond pkt_end */
9539 return opcode == BPF_JGT;
9540 break;
9541 case BPF_JLT:
9542 /* pkt < pkt_end */
9543 fallthrough;
9544 case BPF_JGE:
9545 /* pkt >= pkt_end */
9546 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
9547 return opcode == BPF_JGE;
9548 break;
9549 }
9550 return -1;
9551}
9552
48461135
JB
9553/* Adjusts the register min/max values in the case that the dst_reg is the
9554 * variable register that we are working on, and src_reg is a constant or we're
9555 * simply doing a BPF_K check.
f1174f77 9556 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
9557 */
9558static void reg_set_min_max(struct bpf_reg_state *true_reg,
3f50f132
JF
9559 struct bpf_reg_state *false_reg,
9560 u64 val, u32 val32,
092ed096 9561 u8 opcode, bool is_jmp32)
48461135 9562{
3f50f132
JF
9563 struct tnum false_32off = tnum_subreg(false_reg->var_off);
9564 struct tnum false_64off = false_reg->var_off;
9565 struct tnum true_32off = tnum_subreg(true_reg->var_off);
9566 struct tnum true_64off = true_reg->var_off;
9567 s64 sval = (s64)val;
9568 s32 sval32 = (s32)val32;
a72dafaf 9569
f1174f77
EC
9570 /* If the dst_reg is a pointer, we can't learn anything about its
9571 * variable offset from the compare (unless src_reg were a pointer into
9572 * the same object, but we don't bother with that.
9573 * Since false_reg and true_reg have the same type by construction, we
9574 * only need to check one of them for pointerness.
9575 */
9576 if (__is_pointer_value(false, false_reg))
9577 return;
4cabc5b1 9578
48461135
JB
9579 switch (opcode) {
9580 case BPF_JEQ:
48461135 9581 case BPF_JNE:
a72dafaf
JW
9582 {
9583 struct bpf_reg_state *reg =
9584 opcode == BPF_JEQ ? true_reg : false_reg;
9585
e688c3db
AS
9586 /* JEQ/JNE comparison doesn't change the register equivalence.
9587 * r1 = r2;
9588 * if (r1 == 42) goto label;
9589 * ...
9590 * label: // here both r1 and r2 are known to be 42.
9591 *
9592 * Hence when marking register as known preserve it's ID.
48461135 9593 */
3f50f132
JF
9594 if (is_jmp32)
9595 __mark_reg32_known(reg, val32);
9596 else
e688c3db 9597 ___mark_reg_known(reg, val);
48461135 9598 break;
a72dafaf 9599 }
960ea056 9600 case BPF_JSET:
3f50f132
JF
9601 if (is_jmp32) {
9602 false_32off = tnum_and(false_32off, tnum_const(~val32));
9603 if (is_power_of_2(val32))
9604 true_32off = tnum_or(true_32off,
9605 tnum_const(val32));
9606 } else {
9607 false_64off = tnum_and(false_64off, tnum_const(~val));
9608 if (is_power_of_2(val))
9609 true_64off = tnum_or(true_64off,
9610 tnum_const(val));
9611 }
960ea056 9612 break;
48461135 9613 case BPF_JGE:
a72dafaf
JW
9614 case BPF_JGT:
9615 {
3f50f132
JF
9616 if (is_jmp32) {
9617 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1;
9618 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32;
9619
9620 false_reg->u32_max_value = min(false_reg->u32_max_value,
9621 false_umax);
9622 true_reg->u32_min_value = max(true_reg->u32_min_value,
9623 true_umin);
9624 } else {
9625 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
9626 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
9627
9628 false_reg->umax_value = min(false_reg->umax_value, false_umax);
9629 true_reg->umin_value = max(true_reg->umin_value, true_umin);
9630 }
b03c9f9f 9631 break;
a72dafaf 9632 }
48461135 9633 case BPF_JSGE:
a72dafaf
JW
9634 case BPF_JSGT:
9635 {
3f50f132
JF
9636 if (is_jmp32) {
9637 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1;
9638 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32;
a72dafaf 9639
3f50f132
JF
9640 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax);
9641 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin);
9642 } else {
9643 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
9644 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
9645
9646 false_reg->smax_value = min(false_reg->smax_value, false_smax);
9647 true_reg->smin_value = max(true_reg->smin_value, true_smin);
9648 }
48461135 9649 break;
a72dafaf 9650 }
b4e432f1 9651 case BPF_JLE:
a72dafaf
JW
9652 case BPF_JLT:
9653 {
3f50f132
JF
9654 if (is_jmp32) {
9655 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1;
9656 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32;
9657
9658 false_reg->u32_min_value = max(false_reg->u32_min_value,
9659 false_umin);
9660 true_reg->u32_max_value = min(true_reg->u32_max_value,
9661 true_umax);
9662 } else {
9663 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
9664 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
9665
9666 false_reg->umin_value = max(false_reg->umin_value, false_umin);
9667 true_reg->umax_value = min(true_reg->umax_value, true_umax);
9668 }
b4e432f1 9669 break;
a72dafaf 9670 }
b4e432f1 9671 case BPF_JSLE:
a72dafaf
JW
9672 case BPF_JSLT:
9673 {
3f50f132
JF
9674 if (is_jmp32) {
9675 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1;
9676 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32;
a72dafaf 9677
3f50f132
JF
9678 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin);
9679 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax);
9680 } else {
9681 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
9682 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
9683
9684 false_reg->smin_value = max(false_reg->smin_value, false_smin);
9685 true_reg->smax_value = min(true_reg->smax_value, true_smax);
9686 }
b4e432f1 9687 break;
a72dafaf 9688 }
48461135 9689 default:
0fc31b10 9690 return;
48461135
JB
9691 }
9692
3f50f132
JF
9693 if (is_jmp32) {
9694 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off),
9695 tnum_subreg(false_32off));
9696 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off),
9697 tnum_subreg(true_32off));
9698 __reg_combine_32_into_64(false_reg);
9699 __reg_combine_32_into_64(true_reg);
9700 } else {
9701 false_reg->var_off = false_64off;
9702 true_reg->var_off = true_64off;
9703 __reg_combine_64_into_32(false_reg);
9704 __reg_combine_64_into_32(true_reg);
9705 }
48461135
JB
9706}
9707
f1174f77
EC
9708/* Same as above, but for the case that dst_reg holds a constant and src_reg is
9709 * the variable reg.
48461135
JB
9710 */
9711static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
3f50f132
JF
9712 struct bpf_reg_state *false_reg,
9713 u64 val, u32 val32,
092ed096 9714 u8 opcode, bool is_jmp32)
48461135 9715{
6d94e741 9716 opcode = flip_opcode(opcode);
0fc31b10
JH
9717 /* This uses zero as "not present in table"; luckily the zero opcode,
9718 * BPF_JA, can't get here.
b03c9f9f 9719 */
0fc31b10 9720 if (opcode)
3f50f132 9721 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32);
f1174f77
EC
9722}
9723
9724/* Regs are known to be equal, so intersect their min/max/var_off */
9725static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
9726 struct bpf_reg_state *dst_reg)
9727{
b03c9f9f
EC
9728 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
9729 dst_reg->umin_value);
9730 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
9731 dst_reg->umax_value);
9732 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
9733 dst_reg->smin_value);
9734 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
9735 dst_reg->smax_value);
f1174f77
EC
9736 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
9737 dst_reg->var_off);
b03c9f9f
EC
9738 /* We might have learned new bounds from the var_off. */
9739 __update_reg_bounds(src_reg);
9740 __update_reg_bounds(dst_reg);
9741 /* We might have learned something about the sign bit. */
9742 __reg_deduce_bounds(src_reg);
9743 __reg_deduce_bounds(dst_reg);
9744 /* We might have learned some bits from the bounds. */
9745 __reg_bound_offset(src_reg);
9746 __reg_bound_offset(dst_reg);
9747 /* Intersecting with the old var_off might have improved our bounds
9748 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
9749 * then new var_off is (0; 0x7f...fc) which improves our umax.
9750 */
9751 __update_reg_bounds(src_reg);
9752 __update_reg_bounds(dst_reg);
f1174f77
EC
9753}
9754
9755static void reg_combine_min_max(struct bpf_reg_state *true_src,
9756 struct bpf_reg_state *true_dst,
9757 struct bpf_reg_state *false_src,
9758 struct bpf_reg_state *false_dst,
9759 u8 opcode)
9760{
9761 switch (opcode) {
9762 case BPF_JEQ:
9763 __reg_combine_min_max(true_src, true_dst);
9764 break;
9765 case BPF_JNE:
9766 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 9767 break;
4cabc5b1 9768 }
48461135
JB
9769}
9770
fd978bf7
JS
9771static void mark_ptr_or_null_reg(struct bpf_func_state *state,
9772 struct bpf_reg_state *reg, u32 id,
840b9615 9773 bool is_null)
57a09bf0 9774{
c25b2ae1 9775 if (type_may_be_null(reg->type) && reg->id == id &&
93c230e3 9776 !WARN_ON_ONCE(!reg->id)) {
b03c9f9f
EC
9777 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
9778 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 9779 reg->off)) {
e60b0d12
DB
9780 /* Old offset (both fixed and variable parts) should
9781 * have been known-zero, because we don't allow pointer
9782 * arithmetic on pointers that might be NULL. If we
9783 * see this happening, don't convert the register.
9784 */
9785 return;
f1174f77
EC
9786 }
9787 if (is_null) {
9788 reg->type = SCALAR_VALUE;
1b986589
MKL
9789 /* We don't need id and ref_obj_id from this point
9790 * onwards anymore, thus we should better reset it,
9791 * so that state pruning has chances to take effect.
9792 */
9793 reg->id = 0;
9794 reg->ref_obj_id = 0;
4ddb7416
DB
9795
9796 return;
9797 }
9798
9799 mark_ptr_not_null_reg(reg);
9800
9801 if (!reg_may_point_to_spin_lock(reg)) {
1b986589
MKL
9802 /* For not-NULL ptr, reg->ref_obj_id will be reset
9803 * in release_reg_references().
9804 *
9805 * reg->id is still used by spin_lock ptr. Other
9806 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
9807 */
9808 reg->id = 0;
56f668df 9809 }
57a09bf0
TG
9810 }
9811}
9812
c6a9efa1
PC
9813static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
9814 bool is_null)
9815{
9816 struct bpf_reg_state *reg;
9817 int i;
9818
9819 for (i = 0; i < MAX_BPF_REG; i++)
9820 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
9821
9822 bpf_for_each_spilled_reg(i, state, reg) {
9823 if (!reg)
9824 continue;
9825 mark_ptr_or_null_reg(state, reg, id, is_null);
9826 }
9827}
9828
57a09bf0
TG
9829/* The logic is similar to find_good_pkt_pointers(), both could eventually
9830 * be folded together at some point.
9831 */
840b9615
JS
9832static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
9833 bool is_null)
57a09bf0 9834{
f4d7e40a 9835 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 9836 struct bpf_reg_state *regs = state->regs;
1b986589 9837 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 9838 u32 id = regs[regno].id;
c6a9efa1 9839 int i;
57a09bf0 9840
1b986589
MKL
9841 if (ref_obj_id && ref_obj_id == id && is_null)
9842 /* regs[regno] is in the " == NULL" branch.
9843 * No one could have freed the reference state before
9844 * doing the NULL check.
9845 */
9846 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 9847
c6a9efa1
PC
9848 for (i = 0; i <= vstate->curframe; i++)
9849 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
9850}
9851
5beca081
DB
9852static bool try_match_pkt_pointers(const struct bpf_insn *insn,
9853 struct bpf_reg_state *dst_reg,
9854 struct bpf_reg_state *src_reg,
9855 struct bpf_verifier_state *this_branch,
9856 struct bpf_verifier_state *other_branch)
9857{
9858 if (BPF_SRC(insn->code) != BPF_X)
9859 return false;
9860
092ed096
JW
9861 /* Pointers are always 64-bit. */
9862 if (BPF_CLASS(insn->code) == BPF_JMP32)
9863 return false;
9864
5beca081
DB
9865 switch (BPF_OP(insn->code)) {
9866 case BPF_JGT:
9867 if ((dst_reg->type == PTR_TO_PACKET &&
9868 src_reg->type == PTR_TO_PACKET_END) ||
9869 (dst_reg->type == PTR_TO_PACKET_META &&
9870 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9871 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
9872 find_good_pkt_pointers(this_branch, dst_reg,
9873 dst_reg->type, false);
6d94e741 9874 mark_pkt_end(other_branch, insn->dst_reg, true);
5beca081
DB
9875 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9876 src_reg->type == PTR_TO_PACKET) ||
9877 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9878 src_reg->type == PTR_TO_PACKET_META)) {
9879 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
9880 find_good_pkt_pointers(other_branch, src_reg,
9881 src_reg->type, true);
6d94e741 9882 mark_pkt_end(this_branch, insn->src_reg, false);
5beca081
DB
9883 } else {
9884 return false;
9885 }
9886 break;
9887 case BPF_JLT:
9888 if ((dst_reg->type == PTR_TO_PACKET &&
9889 src_reg->type == PTR_TO_PACKET_END) ||
9890 (dst_reg->type == PTR_TO_PACKET_META &&
9891 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9892 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
9893 find_good_pkt_pointers(other_branch, dst_reg,
9894 dst_reg->type, true);
6d94e741 9895 mark_pkt_end(this_branch, insn->dst_reg, false);
5beca081
DB
9896 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9897 src_reg->type == PTR_TO_PACKET) ||
9898 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9899 src_reg->type == PTR_TO_PACKET_META)) {
9900 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
9901 find_good_pkt_pointers(this_branch, src_reg,
9902 src_reg->type, false);
6d94e741 9903 mark_pkt_end(other_branch, insn->src_reg, true);
5beca081
DB
9904 } else {
9905 return false;
9906 }
9907 break;
9908 case BPF_JGE:
9909 if ((dst_reg->type == PTR_TO_PACKET &&
9910 src_reg->type == PTR_TO_PACKET_END) ||
9911 (dst_reg->type == PTR_TO_PACKET_META &&
9912 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9913 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
9914 find_good_pkt_pointers(this_branch, dst_reg,
9915 dst_reg->type, true);
6d94e741 9916 mark_pkt_end(other_branch, insn->dst_reg, false);
5beca081
DB
9917 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9918 src_reg->type == PTR_TO_PACKET) ||
9919 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9920 src_reg->type == PTR_TO_PACKET_META)) {
9921 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
9922 find_good_pkt_pointers(other_branch, src_reg,
9923 src_reg->type, false);
6d94e741 9924 mark_pkt_end(this_branch, insn->src_reg, true);
5beca081
DB
9925 } else {
9926 return false;
9927 }
9928 break;
9929 case BPF_JLE:
9930 if ((dst_reg->type == PTR_TO_PACKET &&
9931 src_reg->type == PTR_TO_PACKET_END) ||
9932 (dst_reg->type == PTR_TO_PACKET_META &&
9933 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
9934 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
9935 find_good_pkt_pointers(other_branch, dst_reg,
9936 dst_reg->type, false);
6d94e741 9937 mark_pkt_end(this_branch, insn->dst_reg, true);
5beca081
DB
9938 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
9939 src_reg->type == PTR_TO_PACKET) ||
9940 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
9941 src_reg->type == PTR_TO_PACKET_META)) {
9942 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
9943 find_good_pkt_pointers(this_branch, src_reg,
9944 src_reg->type, true);
6d94e741 9945 mark_pkt_end(other_branch, insn->src_reg, false);
5beca081
DB
9946 } else {
9947 return false;
9948 }
9949 break;
9950 default:
9951 return false;
9952 }
9953
9954 return true;
9955}
9956
75748837
AS
9957static void find_equal_scalars(struct bpf_verifier_state *vstate,
9958 struct bpf_reg_state *known_reg)
9959{
9960 struct bpf_func_state *state;
9961 struct bpf_reg_state *reg;
9962 int i, j;
9963
9964 for (i = 0; i <= vstate->curframe; i++) {
9965 state = vstate->frame[i];
9966 for (j = 0; j < MAX_BPF_REG; j++) {
9967 reg = &state->regs[j];
9968 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9969 *reg = *known_reg;
9970 }
9971
9972 bpf_for_each_spilled_reg(j, state, reg) {
9973 if (!reg)
9974 continue;
9975 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
9976 *reg = *known_reg;
9977 }
9978 }
9979}
9980
58e2af8b 9981static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
9982 struct bpf_insn *insn, int *insn_idx)
9983{
f4d7e40a
AS
9984 struct bpf_verifier_state *this_branch = env->cur_state;
9985 struct bpf_verifier_state *other_branch;
9986 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 9987 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 9988 u8 opcode = BPF_OP(insn->code);
092ed096 9989 bool is_jmp32;
fb8d251e 9990 int pred = -1;
17a52670
AS
9991 int err;
9992
092ed096
JW
9993 /* Only conditional jumps are expected to reach here. */
9994 if (opcode == BPF_JA || opcode > BPF_JSLE) {
9995 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
9996 return -EINVAL;
9997 }
9998
9999 if (BPF_SRC(insn->code) == BPF_X) {
10000 if (insn->imm != 0) {
092ed096 10001 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
10002 return -EINVAL;
10003 }
10004
10005 /* check src1 operand */
dc503a8a 10006 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
10007 if (err)
10008 return err;
1be7f75d
AS
10009
10010 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 10011 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
10012 insn->src_reg);
10013 return -EACCES;
10014 }
fb8d251e 10015 src_reg = &regs[insn->src_reg];
17a52670
AS
10016 } else {
10017 if (insn->src_reg != BPF_REG_0) {
092ed096 10018 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
10019 return -EINVAL;
10020 }
10021 }
10022
10023 /* check src2 operand */
dc503a8a 10024 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
10025 if (err)
10026 return err;
10027
1a0dc1ac 10028 dst_reg = &regs[insn->dst_reg];
092ed096 10029 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 10030
3f50f132
JF
10031 if (BPF_SRC(insn->code) == BPF_K) {
10032 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32);
10033 } else if (src_reg->type == SCALAR_VALUE &&
10034 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) {
10035 pred = is_branch_taken(dst_reg,
10036 tnum_subreg(src_reg->var_off).value,
10037 opcode,
10038 is_jmp32);
10039 } else if (src_reg->type == SCALAR_VALUE &&
10040 !is_jmp32 && tnum_is_const(src_reg->var_off)) {
10041 pred = is_branch_taken(dst_reg,
10042 src_reg->var_off.value,
10043 opcode,
10044 is_jmp32);
6d94e741
AS
10045 } else if (reg_is_pkt_pointer_any(dst_reg) &&
10046 reg_is_pkt_pointer_any(src_reg) &&
10047 !is_jmp32) {
10048 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
3f50f132
JF
10049 }
10050
b5dc0163 10051 if (pred >= 0) {
cac616db
JF
10052 /* If we get here with a dst_reg pointer type it is because
10053 * above is_branch_taken() special cased the 0 comparison.
10054 */
10055 if (!__is_pointer_value(false, dst_reg))
10056 err = mark_chain_precision(env, insn->dst_reg);
6d94e741
AS
10057 if (BPF_SRC(insn->code) == BPF_X && !err &&
10058 !__is_pointer_value(false, src_reg))
b5dc0163
AS
10059 err = mark_chain_precision(env, insn->src_reg);
10060 if (err)
10061 return err;
10062 }
9183671a 10063
fb8d251e 10064 if (pred == 1) {
9183671a
DB
10065 /* Only follow the goto, ignore fall-through. If needed, push
10066 * the fall-through branch for simulation under speculative
10067 * execution.
10068 */
10069 if (!env->bypass_spec_v1 &&
10070 !sanitize_speculative_path(env, insn, *insn_idx + 1,
10071 *insn_idx))
10072 return -EFAULT;
fb8d251e
AS
10073 *insn_idx += insn->off;
10074 return 0;
10075 } else if (pred == 0) {
9183671a
DB
10076 /* Only follow the fall-through branch, since that's where the
10077 * program will go. If needed, push the goto branch for
10078 * simulation under speculative execution.
fb8d251e 10079 */
9183671a
DB
10080 if (!env->bypass_spec_v1 &&
10081 !sanitize_speculative_path(env, insn,
10082 *insn_idx + insn->off + 1,
10083 *insn_idx))
10084 return -EFAULT;
fb8d251e 10085 return 0;
17a52670
AS
10086 }
10087
979d63d5
DB
10088 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
10089 false);
17a52670
AS
10090 if (!other_branch)
10091 return -EFAULT;
f4d7e40a 10092 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 10093
48461135
JB
10094 /* detect if we are comparing against a constant value so we can adjust
10095 * our min/max values for our dst register.
f1174f77
EC
10096 * this is only legit if both are scalars (or pointers to the same
10097 * object, I suppose, but we don't support that right now), because
10098 * otherwise the different base pointers mean the offsets aren't
10099 * comparable.
48461135
JB
10100 */
10101 if (BPF_SRC(insn->code) == BPF_X) {
092ed096 10102 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
092ed096 10103
f1174f77 10104 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
10105 src_reg->type == SCALAR_VALUE) {
10106 if (tnum_is_const(src_reg->var_off) ||
3f50f132
JF
10107 (is_jmp32 &&
10108 tnum_is_const(tnum_subreg(src_reg->var_off))))
f4d7e40a 10109 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 10110 dst_reg,
3f50f132
JF
10111 src_reg->var_off.value,
10112 tnum_subreg(src_reg->var_off).value,
092ed096
JW
10113 opcode, is_jmp32);
10114 else if (tnum_is_const(dst_reg->var_off) ||
3f50f132
JF
10115 (is_jmp32 &&
10116 tnum_is_const(tnum_subreg(dst_reg->var_off))))
f4d7e40a 10117 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096 10118 src_reg,
3f50f132
JF
10119 dst_reg->var_off.value,
10120 tnum_subreg(dst_reg->var_off).value,
092ed096
JW
10121 opcode, is_jmp32);
10122 else if (!is_jmp32 &&
10123 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 10124 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
10125 reg_combine_min_max(&other_branch_regs[insn->src_reg],
10126 &other_branch_regs[insn->dst_reg],
092ed096 10127 src_reg, dst_reg, opcode);
e688c3db
AS
10128 if (src_reg->id &&
10129 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
75748837
AS
10130 find_equal_scalars(this_branch, src_reg);
10131 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
10132 }
10133
f1174f77
EC
10134 }
10135 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 10136 reg_set_min_max(&other_branch_regs[insn->dst_reg],
3f50f132
JF
10137 dst_reg, insn->imm, (u32)insn->imm,
10138 opcode, is_jmp32);
48461135
JB
10139 }
10140
e688c3db
AS
10141 if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
10142 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
75748837
AS
10143 find_equal_scalars(this_branch, dst_reg);
10144 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
10145 }
10146
092ed096
JW
10147 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
10148 * NOTE: these optimizations below are related with pointer comparison
10149 * which will never be JMP32.
10150 */
10151 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 10152 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
c25b2ae1 10153 type_may_be_null(dst_reg->type)) {
840b9615 10154 /* Mark all identical registers in each branch as either
57a09bf0
TG
10155 * safe or unknown depending R == 0 or R != 0 conditional.
10156 */
840b9615
JS
10157 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
10158 opcode == BPF_JNE);
10159 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
10160 opcode == BPF_JEQ);
5beca081
DB
10161 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
10162 this_branch, other_branch) &&
10163 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
10164 verbose(env, "R%d pointer comparison prohibited\n",
10165 insn->dst_reg);
1be7f75d 10166 return -EACCES;
17a52670 10167 }
06ee7115 10168 if (env->log.level & BPF_LOG_LEVEL)
2e576648 10169 print_insn_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
10170 return 0;
10171}
10172
17a52670 10173/* verify BPF_LD_IMM64 instruction */
58e2af8b 10174static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 10175{
d8eca5bb 10176 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 10177 struct bpf_reg_state *regs = cur_regs(env);
4976b718 10178 struct bpf_reg_state *dst_reg;
d8eca5bb 10179 struct bpf_map *map;
17a52670
AS
10180 int err;
10181
10182 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 10183 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
10184 return -EINVAL;
10185 }
10186 if (insn->off != 0) {
61bd5218 10187 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
10188 return -EINVAL;
10189 }
10190
dc503a8a 10191 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
10192 if (err)
10193 return err;
10194
4976b718 10195 dst_reg = &regs[insn->dst_reg];
6b173873 10196 if (insn->src_reg == 0) {
6b173873
JK
10197 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
10198
4976b718 10199 dst_reg->type = SCALAR_VALUE;
b03c9f9f 10200 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 10201 return 0;
6b173873 10202 }
17a52670 10203
d400a6cf
DB
10204 /* All special src_reg cases are listed below. From this point onwards
10205 * we either succeed and assign a corresponding dst_reg->type after
10206 * zeroing the offset, or fail and reject the program.
10207 */
10208 mark_reg_known_zero(env, regs, insn->dst_reg);
4976b718 10209
d400a6cf 10210 if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
4976b718 10211 dst_reg->type = aux->btf_var.reg_type;
34d3a78c 10212 switch (base_type(dst_reg->type)) {
4976b718
HL
10213 case PTR_TO_MEM:
10214 dst_reg->mem_size = aux->btf_var.mem_size;
10215 break;
10216 case PTR_TO_BTF_ID:
22dc4a0f 10217 dst_reg->btf = aux->btf_var.btf;
4976b718
HL
10218 dst_reg->btf_id = aux->btf_var.btf_id;
10219 break;
10220 default:
10221 verbose(env, "bpf verifier is misconfigured\n");
10222 return -EFAULT;
10223 }
10224 return 0;
10225 }
10226
69c087ba
YS
10227 if (insn->src_reg == BPF_PSEUDO_FUNC) {
10228 struct bpf_prog_aux *aux = env->prog->aux;
3990ed4c
MKL
10229 u32 subprogno = find_subprog(env,
10230 env->insn_idx + insn->imm + 1);
69c087ba
YS
10231
10232 if (!aux->func_info) {
10233 verbose(env, "missing btf func_info\n");
10234 return -EINVAL;
10235 }
10236 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) {
10237 verbose(env, "callback function not static\n");
10238 return -EINVAL;
10239 }
10240
10241 dst_reg->type = PTR_TO_FUNC;
10242 dst_reg->subprogno = subprogno;
10243 return 0;
10244 }
10245
d8eca5bb 10246 map = env->used_maps[aux->map_index];
4976b718 10247 dst_reg->map_ptr = map;
d8eca5bb 10248
387544bf
AS
10249 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
10250 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
4976b718
HL
10251 dst_reg->type = PTR_TO_MAP_VALUE;
10252 dst_reg->off = aux->map_off;
d8eca5bb 10253 if (map_value_has_spin_lock(map))
4976b718 10254 dst_reg->id = ++env->id_gen;
387544bf
AS
10255 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
10256 insn->src_reg == BPF_PSEUDO_MAP_IDX) {
4976b718 10257 dst_reg->type = CONST_PTR_TO_MAP;
d8eca5bb
DB
10258 } else {
10259 verbose(env, "bpf verifier is misconfigured\n");
10260 return -EINVAL;
10261 }
17a52670 10262
17a52670
AS
10263 return 0;
10264}
10265
96be4325
DB
10266static bool may_access_skb(enum bpf_prog_type type)
10267{
10268 switch (type) {
10269 case BPF_PROG_TYPE_SOCKET_FILTER:
10270 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 10271 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
10272 return true;
10273 default:
10274 return false;
10275 }
10276}
10277
ddd872bc
AS
10278/* verify safety of LD_ABS|LD_IND instructions:
10279 * - they can only appear in the programs where ctx == skb
10280 * - since they are wrappers of function calls, they scratch R1-R5 registers,
10281 * preserve R6-R9, and store return value into R0
10282 *
10283 * Implicit input:
10284 * ctx == skb == R6 == CTX
10285 *
10286 * Explicit input:
10287 * SRC == any register
10288 * IMM == 32-bit immediate
10289 *
10290 * Output:
10291 * R0 - 8/16/32-bit skb data converted to cpu endianness
10292 */
58e2af8b 10293static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 10294{
638f5b90 10295 struct bpf_reg_state *regs = cur_regs(env);
6d4f151a 10296 static const int ctx_reg = BPF_REG_6;
ddd872bc 10297 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
10298 int i, err;
10299
7e40781c 10300 if (!may_access_skb(resolve_prog_type(env->prog))) {
61bd5218 10301 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
10302 return -EINVAL;
10303 }
10304
e0cea7ce
DB
10305 if (!env->ops->gen_ld_abs) {
10306 verbose(env, "bpf verifier is misconfigured\n");
10307 return -EINVAL;
10308 }
10309
ddd872bc 10310 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 10311 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 10312 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 10313 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
10314 return -EINVAL;
10315 }
10316
10317 /* check whether implicit source operand (register R6) is readable */
6d4f151a 10318 err = check_reg_arg(env, ctx_reg, SRC_OP);
ddd872bc
AS
10319 if (err)
10320 return err;
10321
fd978bf7
JS
10322 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
10323 * gen_ld_abs() may terminate the program at runtime, leading to
10324 * reference leak.
10325 */
10326 err = check_reference_leak(env);
10327 if (err) {
10328 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
10329 return err;
10330 }
10331
d83525ca
AS
10332 if (env->cur_state->active_spin_lock) {
10333 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
10334 return -EINVAL;
10335 }
10336
6d4f151a 10337 if (regs[ctx_reg].type != PTR_TO_CTX) {
61bd5218
JK
10338 verbose(env,
10339 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
10340 return -EINVAL;
10341 }
10342
10343 if (mode == BPF_IND) {
10344 /* check explicit source operand */
dc503a8a 10345 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
10346 if (err)
10347 return err;
10348 }
10349
be80a1d3 10350 err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
6d4f151a
DB
10351 if (err < 0)
10352 return err;
10353
ddd872bc 10354 /* reset caller saved regs to unreadable */
dc503a8a 10355 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 10356 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
10357 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
10358 }
ddd872bc
AS
10359
10360 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
10361 * the value fetched from the packet.
10362 * Already marked as written above.
ddd872bc 10363 */
61bd5218 10364 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
10365 /* ld_abs load up to 32-bit skb data. */
10366 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
10367 return 0;
10368}
10369
390ee7e2
AS
10370static int check_return_code(struct bpf_verifier_env *env)
10371{
5cf1e914 10372 struct tnum enforce_attach_type_range = tnum_unknown;
27ae7997 10373 const struct bpf_prog *prog = env->prog;
390ee7e2
AS
10374 struct bpf_reg_state *reg;
10375 struct tnum range = tnum_range(0, 1);
7e40781c 10376 enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
27ae7997 10377 int err;
bfc6bb74
AS
10378 struct bpf_func_state *frame = env->cur_state->frame[0];
10379 const bool is_subprog = frame->subprogno;
27ae7997 10380
9e4e01df 10381 /* LSM and struct_ops func-ptr's return type could be "void" */
f782e2c3
DB
10382 if (!is_subprog &&
10383 (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
7e40781c 10384 prog_type == BPF_PROG_TYPE_LSM) &&
27ae7997
MKL
10385 !prog->aux->attach_func_proto->type)
10386 return 0;
10387
8fb33b60 10388 /* eBPF calling convention is such that R0 is used
27ae7997
MKL
10389 * to return the value from eBPF program.
10390 * Make sure that it's readable at this time
10391 * of bpf_exit, which means that program wrote
10392 * something into it earlier
10393 */
10394 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
10395 if (err)
10396 return err;
10397
10398 if (is_pointer_value(env, BPF_REG_0)) {
10399 verbose(env, "R0 leaks addr as return value\n");
10400 return -EACCES;
10401 }
390ee7e2 10402
f782e2c3 10403 reg = cur_regs(env) + BPF_REG_0;
bfc6bb74
AS
10404
10405 if (frame->in_async_callback_fn) {
10406 /* enforce return zero from async callbacks like timer */
10407 if (reg->type != SCALAR_VALUE) {
10408 verbose(env, "In async callback the register R0 is not a known value (%s)\n",
c25b2ae1 10409 reg_type_str(env, reg->type));
bfc6bb74
AS
10410 return -EINVAL;
10411 }
10412
10413 if (!tnum_in(tnum_const(0), reg->var_off)) {
10414 verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
10415 return -EINVAL;
10416 }
10417 return 0;
10418 }
10419
f782e2c3
DB
10420 if (is_subprog) {
10421 if (reg->type != SCALAR_VALUE) {
10422 verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n",
c25b2ae1 10423 reg_type_str(env, reg->type));
f782e2c3
DB
10424 return -EINVAL;
10425 }
10426 return 0;
10427 }
10428
7e40781c 10429 switch (prog_type) {
983695fa
DB
10430 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
10431 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
1b66d253
DB
10432 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
10433 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME ||
10434 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME ||
10435 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME ||
10436 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME)
983695fa 10437 range = tnum_range(1, 1);
77241217
SF
10438 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND ||
10439 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND)
10440 range = tnum_range(0, 3);
ed4ed404 10441 break;
390ee7e2 10442 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 10443 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
10444 range = tnum_range(0, 3);
10445 enforce_attach_type_range = tnum_range(2, 3);
10446 }
ed4ed404 10447 break;
390ee7e2
AS
10448 case BPF_PROG_TYPE_CGROUP_SOCK:
10449 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 10450 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 10451 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 10452 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 10453 break;
15ab09bd
AS
10454 case BPF_PROG_TYPE_RAW_TRACEPOINT:
10455 if (!env->prog->aux->attach_btf_id)
10456 return 0;
10457 range = tnum_const(0);
10458 break;
15d83c4d 10459 case BPF_PROG_TYPE_TRACING:
e92888c7
YS
10460 switch (env->prog->expected_attach_type) {
10461 case BPF_TRACE_FENTRY:
10462 case BPF_TRACE_FEXIT:
10463 range = tnum_const(0);
10464 break;
10465 case BPF_TRACE_RAW_TP:
10466 case BPF_MODIFY_RETURN:
15d83c4d 10467 return 0;
2ec0616e
DB
10468 case BPF_TRACE_ITER:
10469 break;
e92888c7
YS
10470 default:
10471 return -ENOTSUPP;
10472 }
15d83c4d 10473 break;
e9ddbb77
JS
10474 case BPF_PROG_TYPE_SK_LOOKUP:
10475 range = tnum_range(SK_DROP, SK_PASS);
10476 break;
e92888c7
YS
10477 case BPF_PROG_TYPE_EXT:
10478 /* freplace program can return anything as its return value
10479 * depends on the to-be-replaced kernel func or bpf program.
10480 */
390ee7e2
AS
10481 default:
10482 return 0;
10483 }
10484
390ee7e2 10485 if (reg->type != SCALAR_VALUE) {
61bd5218 10486 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
c25b2ae1 10487 reg_type_str(env, reg->type));
390ee7e2
AS
10488 return -EINVAL;
10489 }
10490
10491 if (!tnum_in(range, reg->var_off)) {
bc2591d6 10492 verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
390ee7e2
AS
10493 return -EINVAL;
10494 }
5cf1e914 10495
10496 if (!tnum_is_unknown(enforce_attach_type_range) &&
10497 tnum_in(enforce_attach_type_range, reg->var_off))
10498 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
10499 return 0;
10500}
10501
475fb78f
AS
10502/* non-recursive DFS pseudo code
10503 * 1 procedure DFS-iterative(G,v):
10504 * 2 label v as discovered
10505 * 3 let S be a stack
10506 * 4 S.push(v)
10507 * 5 while S is not empty
10508 * 6 t <- S.pop()
10509 * 7 if t is what we're looking for:
10510 * 8 return t
10511 * 9 for all edges e in G.adjacentEdges(t) do
10512 * 10 if edge e is already labelled
10513 * 11 continue with the next edge
10514 * 12 w <- G.adjacentVertex(t,e)
10515 * 13 if vertex w is not discovered and not explored
10516 * 14 label e as tree-edge
10517 * 15 label w as discovered
10518 * 16 S.push(w)
10519 * 17 continue at 5
10520 * 18 else if vertex w is discovered
10521 * 19 label e as back-edge
10522 * 20 else
10523 * 21 // vertex w is explored
10524 * 22 label e as forward- or cross-edge
10525 * 23 label t as explored
10526 * 24 S.pop()
10527 *
10528 * convention:
10529 * 0x10 - discovered
10530 * 0x11 - discovered and fall-through edge labelled
10531 * 0x12 - discovered and fall-through and branch edges labelled
10532 * 0x20 - explored
10533 */
10534
10535enum {
10536 DISCOVERED = 0x10,
10537 EXPLORED = 0x20,
10538 FALLTHROUGH = 1,
10539 BRANCH = 2,
10540};
10541
dc2a4ebc
AS
10542static u32 state_htab_size(struct bpf_verifier_env *env)
10543{
10544 return env->prog->len;
10545}
10546
5d839021
AS
10547static struct bpf_verifier_state_list **explored_state(
10548 struct bpf_verifier_env *env,
10549 int idx)
10550{
dc2a4ebc
AS
10551 struct bpf_verifier_state *cur = env->cur_state;
10552 struct bpf_func_state *state = cur->frame[cur->curframe];
10553
10554 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
10555}
10556
10557static void init_explored_state(struct bpf_verifier_env *env, int idx)
10558{
a8f500af 10559 env->insn_aux_data[idx].prune_point = true;
5d839021 10560}
f1bca824 10561
59e2e27d
WAF
10562enum {
10563 DONE_EXPLORING = 0,
10564 KEEP_EXPLORING = 1,
10565};
10566
475fb78f
AS
10567/* t, w, e - match pseudo-code above:
10568 * t - index of current instruction
10569 * w - next instruction
10570 * e - edge
10571 */
2589726d
AS
10572static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
10573 bool loop_ok)
475fb78f 10574{
7df737e9
AS
10575 int *insn_stack = env->cfg.insn_stack;
10576 int *insn_state = env->cfg.insn_state;
10577
475fb78f 10578 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
59e2e27d 10579 return DONE_EXPLORING;
475fb78f
AS
10580
10581 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
59e2e27d 10582 return DONE_EXPLORING;
475fb78f
AS
10583
10584 if (w < 0 || w >= env->prog->len) {
d9762e84 10585 verbose_linfo(env, t, "%d: ", t);
61bd5218 10586 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
10587 return -EINVAL;
10588 }
10589
f1bca824
AS
10590 if (e == BRANCH)
10591 /* mark branch target for state pruning */
5d839021 10592 init_explored_state(env, w);
f1bca824 10593
475fb78f
AS
10594 if (insn_state[w] == 0) {
10595 /* tree-edge */
10596 insn_state[t] = DISCOVERED | e;
10597 insn_state[w] = DISCOVERED;
7df737e9 10598 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 10599 return -E2BIG;
7df737e9 10600 insn_stack[env->cfg.cur_stack++] = w;
59e2e27d 10601 return KEEP_EXPLORING;
475fb78f 10602 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2c78ee89 10603 if (loop_ok && env->bpf_capable)
59e2e27d 10604 return DONE_EXPLORING;
d9762e84
MKL
10605 verbose_linfo(env, t, "%d: ", t);
10606 verbose_linfo(env, w, "%d: ", w);
61bd5218 10607 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
10608 return -EINVAL;
10609 } else if (insn_state[w] == EXPLORED) {
10610 /* forward- or cross-edge */
10611 insn_state[t] = DISCOVERED | e;
10612 } else {
61bd5218 10613 verbose(env, "insn state internal bug\n");
475fb78f
AS
10614 return -EFAULT;
10615 }
59e2e27d
WAF
10616 return DONE_EXPLORING;
10617}
10618
efdb22de
YS
10619static int visit_func_call_insn(int t, int insn_cnt,
10620 struct bpf_insn *insns,
10621 struct bpf_verifier_env *env,
10622 bool visit_callee)
10623{
10624 int ret;
10625
10626 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
10627 if (ret)
10628 return ret;
10629
10630 if (t + 1 < insn_cnt)
10631 init_explored_state(env, t + 1);
10632 if (visit_callee) {
10633 init_explored_state(env, t);
86fc6ee6
AS
10634 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
10635 /* It's ok to allow recursion from CFG point of
10636 * view. __check_func_call() will do the actual
10637 * check.
10638 */
10639 bpf_pseudo_func(insns + t));
efdb22de
YS
10640 }
10641 return ret;
10642}
10643
59e2e27d
WAF
10644/* Visits the instruction at index t and returns one of the following:
10645 * < 0 - an error occurred
10646 * DONE_EXPLORING - the instruction was fully explored
10647 * KEEP_EXPLORING - there is still work to be done before it is fully explored
10648 */
10649static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
10650{
10651 struct bpf_insn *insns = env->prog->insnsi;
10652 int ret;
10653
69c087ba
YS
10654 if (bpf_pseudo_func(insns + t))
10655 return visit_func_call_insn(t, insn_cnt, insns, env, true);
10656
59e2e27d
WAF
10657 /* All non-branch instructions have a single fall-through edge. */
10658 if (BPF_CLASS(insns[t].code) != BPF_JMP &&
10659 BPF_CLASS(insns[t].code) != BPF_JMP32)
10660 return push_insn(t, t + 1, FALLTHROUGH, env, false);
10661
10662 switch (BPF_OP(insns[t].code)) {
10663 case BPF_EXIT:
10664 return DONE_EXPLORING;
10665
10666 case BPF_CALL:
bfc6bb74
AS
10667 if (insns[t].imm == BPF_FUNC_timer_set_callback)
10668 /* Mark this call insn to trigger is_state_visited() check
10669 * before call itself is processed by __check_func_call().
10670 * Otherwise new async state will be pushed for further
10671 * exploration.
10672 */
10673 init_explored_state(env, t);
efdb22de
YS
10674 return visit_func_call_insn(t, insn_cnt, insns, env,
10675 insns[t].src_reg == BPF_PSEUDO_CALL);
59e2e27d
WAF
10676
10677 case BPF_JA:
10678 if (BPF_SRC(insns[t].code) != BPF_K)
10679 return -EINVAL;
10680
10681 /* unconditional jump with single edge */
10682 ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
10683 true);
10684 if (ret)
10685 return ret;
10686
10687 /* unconditional jmp is not a good pruning point,
10688 * but it's marked, since backtracking needs
10689 * to record jmp history in is_state_visited().
10690 */
10691 init_explored_state(env, t + insns[t].off + 1);
10692 /* tell verifier to check for equivalent states
10693 * after every call and jump
10694 */
10695 if (t + 1 < insn_cnt)
10696 init_explored_state(env, t + 1);
10697
10698 return ret;
10699
10700 default:
10701 /* conditional jump with two edges */
10702 init_explored_state(env, t);
10703 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
10704 if (ret)
10705 return ret;
10706
10707 return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
10708 }
475fb78f
AS
10709}
10710
10711/* non-recursive depth-first-search to detect loops in BPF program
10712 * loop == back-edge in directed graph
10713 */
58e2af8b 10714static int check_cfg(struct bpf_verifier_env *env)
475fb78f 10715{
475fb78f 10716 int insn_cnt = env->prog->len;
7df737e9 10717 int *insn_stack, *insn_state;
475fb78f 10718 int ret = 0;
59e2e27d 10719 int i;
475fb78f 10720
7df737e9 10721 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
10722 if (!insn_state)
10723 return -ENOMEM;
10724
7df737e9 10725 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 10726 if (!insn_stack) {
71dde681 10727 kvfree(insn_state);
475fb78f
AS
10728 return -ENOMEM;
10729 }
10730
10731 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
10732 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 10733 env->cfg.cur_stack = 1;
475fb78f 10734
59e2e27d
WAF
10735 while (env->cfg.cur_stack > 0) {
10736 int t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 10737
59e2e27d
WAF
10738 ret = visit_insn(t, insn_cnt, env);
10739 switch (ret) {
10740 case DONE_EXPLORING:
10741 insn_state[t] = EXPLORED;
10742 env->cfg.cur_stack--;
10743 break;
10744 case KEEP_EXPLORING:
10745 break;
10746 default:
10747 if (ret > 0) {
10748 verbose(env, "visit_insn internal bug\n");
10749 ret = -EFAULT;
475fb78f 10750 }
475fb78f 10751 goto err_free;
59e2e27d 10752 }
475fb78f
AS
10753 }
10754
59e2e27d 10755 if (env->cfg.cur_stack < 0) {
61bd5218 10756 verbose(env, "pop stack internal bug\n");
475fb78f
AS
10757 ret = -EFAULT;
10758 goto err_free;
10759 }
475fb78f 10760
475fb78f
AS
10761 for (i = 0; i < insn_cnt; i++) {
10762 if (insn_state[i] != EXPLORED) {
61bd5218 10763 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
10764 ret = -EINVAL;
10765 goto err_free;
10766 }
10767 }
10768 ret = 0; /* cfg looks good */
10769
10770err_free:
71dde681
AS
10771 kvfree(insn_state);
10772 kvfree(insn_stack);
7df737e9 10773 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
10774 return ret;
10775}
10776
09b28d76
AS
10777static int check_abnormal_return(struct bpf_verifier_env *env)
10778{
10779 int i;
10780
10781 for (i = 1; i < env->subprog_cnt; i++) {
10782 if (env->subprog_info[i].has_ld_abs) {
10783 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
10784 return -EINVAL;
10785 }
10786 if (env->subprog_info[i].has_tail_call) {
10787 verbose(env, "tail_call is not allowed in subprogs without BTF\n");
10788 return -EINVAL;
10789 }
10790 }
10791 return 0;
10792}
10793
838e9690
YS
10794/* The minimum supported BTF func info size */
10795#define MIN_BPF_FUNCINFO_SIZE 8
10796#define MAX_FUNCINFO_REC_SIZE 252
10797
c454a46b
MKL
10798static int check_btf_func(struct bpf_verifier_env *env,
10799 const union bpf_attr *attr,
af2ac3e1 10800 bpfptr_t uattr)
838e9690 10801{
09b28d76 10802 const struct btf_type *type, *func_proto, *ret_type;
d0b2818e 10803 u32 i, nfuncs, urec_size, min_size;
838e9690 10804 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 10805 struct bpf_func_info *krecord;
8c1b6e69 10806 struct bpf_func_info_aux *info_aux = NULL;
c454a46b
MKL
10807 struct bpf_prog *prog;
10808 const struct btf *btf;
af2ac3e1 10809 bpfptr_t urecord;
d0b2818e 10810 u32 prev_offset = 0;
09b28d76 10811 bool scalar_return;
e7ed83d6 10812 int ret = -ENOMEM;
838e9690
YS
10813
10814 nfuncs = attr->func_info_cnt;
09b28d76
AS
10815 if (!nfuncs) {
10816 if (check_abnormal_return(env))
10817 return -EINVAL;
838e9690 10818 return 0;
09b28d76 10819 }
838e9690
YS
10820
10821 if (nfuncs != env->subprog_cnt) {
10822 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
10823 return -EINVAL;
10824 }
10825
10826 urec_size = attr->func_info_rec_size;
10827 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
10828 urec_size > MAX_FUNCINFO_REC_SIZE ||
10829 urec_size % sizeof(u32)) {
10830 verbose(env, "invalid func info rec size %u\n", urec_size);
10831 return -EINVAL;
10832 }
10833
c454a46b
MKL
10834 prog = env->prog;
10835 btf = prog->aux->btf;
838e9690 10836
af2ac3e1 10837 urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
838e9690
YS
10838 min_size = min_t(u32, krec_size, urec_size);
10839
ba64e7d8 10840 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
10841 if (!krecord)
10842 return -ENOMEM;
8c1b6e69
AS
10843 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
10844 if (!info_aux)
10845 goto err_free;
ba64e7d8 10846
838e9690
YS
10847 for (i = 0; i < nfuncs; i++) {
10848 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
10849 if (ret) {
10850 if (ret == -E2BIG) {
10851 verbose(env, "nonzero tailing record in func info");
10852 /* set the size kernel expects so loader can zero
10853 * out the rest of the record.
10854 */
af2ac3e1
AS
10855 if (copy_to_bpfptr_offset(uattr,
10856 offsetof(union bpf_attr, func_info_rec_size),
10857 &min_size, sizeof(min_size)))
838e9690
YS
10858 ret = -EFAULT;
10859 }
c454a46b 10860 goto err_free;
838e9690
YS
10861 }
10862
af2ac3e1 10863 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
838e9690 10864 ret = -EFAULT;
c454a46b 10865 goto err_free;
838e9690
YS
10866 }
10867
d30d42e0 10868 /* check insn_off */
09b28d76 10869 ret = -EINVAL;
838e9690 10870 if (i == 0) {
d30d42e0 10871 if (krecord[i].insn_off) {
838e9690 10872 verbose(env,
d30d42e0
MKL
10873 "nonzero insn_off %u for the first func info record",
10874 krecord[i].insn_off);
c454a46b 10875 goto err_free;
838e9690 10876 }
d30d42e0 10877 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
10878 verbose(env,
10879 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 10880 krecord[i].insn_off, prev_offset);
c454a46b 10881 goto err_free;
838e9690
YS
10882 }
10883
d30d42e0 10884 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690 10885 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
c454a46b 10886 goto err_free;
838e9690
YS
10887 }
10888
10889 /* check type_id */
ba64e7d8 10890 type = btf_type_by_id(btf, krecord[i].type_id);
51c39bb1 10891 if (!type || !btf_type_is_func(type)) {
838e9690 10892 verbose(env, "invalid type id %d in func info",
ba64e7d8 10893 krecord[i].type_id);
c454a46b 10894 goto err_free;
838e9690 10895 }
51c39bb1 10896 info_aux[i].linkage = BTF_INFO_VLEN(type->info);
09b28d76
AS
10897
10898 func_proto = btf_type_by_id(btf, type->type);
10899 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
10900 /* btf_func_check() already verified it during BTF load */
10901 goto err_free;
10902 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
10903 scalar_return =
10904 btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
10905 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
10906 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
10907 goto err_free;
10908 }
10909 if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
10910 verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
10911 goto err_free;
10912 }
10913
d30d42e0 10914 prev_offset = krecord[i].insn_off;
af2ac3e1 10915 bpfptr_add(&urecord, urec_size);
838e9690
YS
10916 }
10917
ba64e7d8
YS
10918 prog->aux->func_info = krecord;
10919 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 10920 prog->aux->func_info_aux = info_aux;
838e9690
YS
10921 return 0;
10922
c454a46b 10923err_free:
ba64e7d8 10924 kvfree(krecord);
8c1b6e69 10925 kfree(info_aux);
838e9690
YS
10926 return ret;
10927}
10928
ba64e7d8
YS
10929static void adjust_btf_func(struct bpf_verifier_env *env)
10930{
8c1b6e69 10931 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
10932 int i;
10933
8c1b6e69 10934 if (!aux->func_info)
ba64e7d8
YS
10935 return;
10936
10937 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 10938 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
10939}
10940
1b773d00 10941#define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col)
c454a46b
MKL
10942#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
10943
10944static int check_btf_line(struct bpf_verifier_env *env,
10945 const union bpf_attr *attr,
af2ac3e1 10946 bpfptr_t uattr)
c454a46b
MKL
10947{
10948 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
10949 struct bpf_subprog_info *sub;
10950 struct bpf_line_info *linfo;
10951 struct bpf_prog *prog;
10952 const struct btf *btf;
af2ac3e1 10953 bpfptr_t ulinfo;
c454a46b
MKL
10954 int err;
10955
10956 nr_linfo = attr->line_info_cnt;
10957 if (!nr_linfo)
10958 return 0;
0e6491b5
BC
10959 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info))
10960 return -EINVAL;
c454a46b
MKL
10961
10962 rec_size = attr->line_info_rec_size;
10963 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
10964 rec_size > MAX_LINEINFO_REC_SIZE ||
10965 rec_size & (sizeof(u32) - 1))
10966 return -EINVAL;
10967
10968 /* Need to zero it in case the userspace may
10969 * pass in a smaller bpf_line_info object.
10970 */
10971 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
10972 GFP_KERNEL | __GFP_NOWARN);
10973 if (!linfo)
10974 return -ENOMEM;
10975
10976 prog = env->prog;
10977 btf = prog->aux->btf;
10978
10979 s = 0;
10980 sub = env->subprog_info;
af2ac3e1 10981 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
c454a46b
MKL
10982 expected_size = sizeof(struct bpf_line_info);
10983 ncopy = min_t(u32, expected_size, rec_size);
10984 for (i = 0; i < nr_linfo; i++) {
10985 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
10986 if (err) {
10987 if (err == -E2BIG) {
10988 verbose(env, "nonzero tailing record in line_info");
af2ac3e1
AS
10989 if (copy_to_bpfptr_offset(uattr,
10990 offsetof(union bpf_attr, line_info_rec_size),
10991 &expected_size, sizeof(expected_size)))
c454a46b
MKL
10992 err = -EFAULT;
10993 }
10994 goto err_free;
10995 }
10996
af2ac3e1 10997 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
c454a46b
MKL
10998 err = -EFAULT;
10999 goto err_free;
11000 }
11001
11002 /*
11003 * Check insn_off to ensure
11004 * 1) strictly increasing AND
11005 * 2) bounded by prog->len
11006 *
11007 * The linfo[0].insn_off == 0 check logically falls into
11008 * the later "missing bpf_line_info for func..." case
11009 * because the first linfo[0].insn_off must be the
11010 * first sub also and the first sub must have
11011 * subprog_info[0].start == 0.
11012 */
11013 if ((i && linfo[i].insn_off <= prev_offset) ||
11014 linfo[i].insn_off >= prog->len) {
11015 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
11016 i, linfo[i].insn_off, prev_offset,
11017 prog->len);
11018 err = -EINVAL;
11019 goto err_free;
11020 }
11021
fdbaa0be
MKL
11022 if (!prog->insnsi[linfo[i].insn_off].code) {
11023 verbose(env,
11024 "Invalid insn code at line_info[%u].insn_off\n",
11025 i);
11026 err = -EINVAL;
11027 goto err_free;
11028 }
11029
23127b33
MKL
11030 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
11031 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
11032 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
11033 err = -EINVAL;
11034 goto err_free;
11035 }
11036
11037 if (s != env->subprog_cnt) {
11038 if (linfo[i].insn_off == sub[s].start) {
11039 sub[s].linfo_idx = i;
11040 s++;
11041 } else if (sub[s].start < linfo[i].insn_off) {
11042 verbose(env, "missing bpf_line_info for func#%u\n", s);
11043 err = -EINVAL;
11044 goto err_free;
11045 }
11046 }
11047
11048 prev_offset = linfo[i].insn_off;
af2ac3e1 11049 bpfptr_add(&ulinfo, rec_size);
c454a46b
MKL
11050 }
11051
11052 if (s != env->subprog_cnt) {
11053 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
11054 env->subprog_cnt - s, s);
11055 err = -EINVAL;
11056 goto err_free;
11057 }
11058
11059 prog->aux->linfo = linfo;
11060 prog->aux->nr_linfo = nr_linfo;
11061
11062 return 0;
11063
11064err_free:
11065 kvfree(linfo);
11066 return err;
11067}
11068
fbd94c7a
AS
11069#define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo)
11070#define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE
11071
11072static int check_core_relo(struct bpf_verifier_env *env,
11073 const union bpf_attr *attr,
11074 bpfptr_t uattr)
11075{
11076 u32 i, nr_core_relo, ncopy, expected_size, rec_size;
11077 struct bpf_core_relo core_relo = {};
11078 struct bpf_prog *prog = env->prog;
11079 const struct btf *btf = prog->aux->btf;
11080 struct bpf_core_ctx ctx = {
11081 .log = &env->log,
11082 .btf = btf,
11083 };
11084 bpfptr_t u_core_relo;
11085 int err;
11086
11087 nr_core_relo = attr->core_relo_cnt;
11088 if (!nr_core_relo)
11089 return 0;
11090 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo))
11091 return -EINVAL;
11092
11093 rec_size = attr->core_relo_rec_size;
11094 if (rec_size < MIN_CORE_RELO_SIZE ||
11095 rec_size > MAX_CORE_RELO_SIZE ||
11096 rec_size % sizeof(u32))
11097 return -EINVAL;
11098
11099 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel);
11100 expected_size = sizeof(struct bpf_core_relo);
11101 ncopy = min_t(u32, expected_size, rec_size);
11102
11103 /* Unlike func_info and line_info, copy and apply each CO-RE
11104 * relocation record one at a time.
11105 */
11106 for (i = 0; i < nr_core_relo; i++) {
11107 /* future proofing when sizeof(bpf_core_relo) changes */
11108 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size);
11109 if (err) {
11110 if (err == -E2BIG) {
11111 verbose(env, "nonzero tailing record in core_relo");
11112 if (copy_to_bpfptr_offset(uattr,
11113 offsetof(union bpf_attr, core_relo_rec_size),
11114 &expected_size, sizeof(expected_size)))
11115 err = -EFAULT;
11116 }
11117 break;
11118 }
11119
11120 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) {
11121 err = -EFAULT;
11122 break;
11123 }
11124
11125 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) {
11126 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n",
11127 i, core_relo.insn_off, prog->len);
11128 err = -EINVAL;
11129 break;
11130 }
11131
11132 err = bpf_core_apply(&ctx, &core_relo, i,
11133 &prog->insnsi[core_relo.insn_off / 8]);
11134 if (err)
11135 break;
11136 bpfptr_add(&u_core_relo, rec_size);
11137 }
11138 return err;
11139}
11140
c454a46b
MKL
11141static int check_btf_info(struct bpf_verifier_env *env,
11142 const union bpf_attr *attr,
af2ac3e1 11143 bpfptr_t uattr)
c454a46b
MKL
11144{
11145 struct btf *btf;
11146 int err;
11147
09b28d76
AS
11148 if (!attr->func_info_cnt && !attr->line_info_cnt) {
11149 if (check_abnormal_return(env))
11150 return -EINVAL;
c454a46b 11151 return 0;
09b28d76 11152 }
c454a46b
MKL
11153
11154 btf = btf_get_by_fd(attr->prog_btf_fd);
11155 if (IS_ERR(btf))
11156 return PTR_ERR(btf);
350a5c4d
AS
11157 if (btf_is_kernel(btf)) {
11158 btf_put(btf);
11159 return -EACCES;
11160 }
c454a46b
MKL
11161 env->prog->aux->btf = btf;
11162
11163 err = check_btf_func(env, attr, uattr);
11164 if (err)
11165 return err;
11166
11167 err = check_btf_line(env, attr, uattr);
11168 if (err)
11169 return err;
11170
fbd94c7a
AS
11171 err = check_core_relo(env, attr, uattr);
11172 if (err)
11173 return err;
11174
c454a46b 11175 return 0;
ba64e7d8
YS
11176}
11177
f1174f77
EC
11178/* check %cur's range satisfies %old's */
11179static bool range_within(struct bpf_reg_state *old,
11180 struct bpf_reg_state *cur)
11181{
b03c9f9f
EC
11182 return old->umin_value <= cur->umin_value &&
11183 old->umax_value >= cur->umax_value &&
11184 old->smin_value <= cur->smin_value &&
fd675184
DB
11185 old->smax_value >= cur->smax_value &&
11186 old->u32_min_value <= cur->u32_min_value &&
11187 old->u32_max_value >= cur->u32_max_value &&
11188 old->s32_min_value <= cur->s32_min_value &&
11189 old->s32_max_value >= cur->s32_max_value;
f1174f77
EC
11190}
11191
f1174f77
EC
11192/* If in the old state two registers had the same id, then they need to have
11193 * the same id in the new state as well. But that id could be different from
11194 * the old state, so we need to track the mapping from old to new ids.
11195 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
11196 * regs with old id 5 must also have new id 9 for the new state to be safe. But
11197 * regs with a different old id could still have new id 9, we don't care about
11198 * that.
11199 * So we look through our idmap to see if this old id has been seen before. If
11200 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 11201 */
c9e73e3d 11202static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
969bf05e 11203{
f1174f77 11204 unsigned int i;
969bf05e 11205
c9e73e3d 11206 for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
f1174f77
EC
11207 if (!idmap[i].old) {
11208 /* Reached an empty slot; haven't seen this id before */
11209 idmap[i].old = old_id;
11210 idmap[i].cur = cur_id;
11211 return true;
11212 }
11213 if (idmap[i].old == old_id)
11214 return idmap[i].cur == cur_id;
11215 }
11216 /* We ran out of idmap slots, which should be impossible */
11217 WARN_ON_ONCE(1);
11218 return false;
11219}
11220
9242b5f5
AS
11221static void clean_func_state(struct bpf_verifier_env *env,
11222 struct bpf_func_state *st)
11223{
11224 enum bpf_reg_liveness live;
11225 int i, j;
11226
11227 for (i = 0; i < BPF_REG_FP; i++) {
11228 live = st->regs[i].live;
11229 /* liveness must not touch this register anymore */
11230 st->regs[i].live |= REG_LIVE_DONE;
11231 if (!(live & REG_LIVE_READ))
11232 /* since the register is unused, clear its state
11233 * to make further comparison simpler
11234 */
f54c7898 11235 __mark_reg_not_init(env, &st->regs[i]);
9242b5f5
AS
11236 }
11237
11238 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
11239 live = st->stack[i].spilled_ptr.live;
11240 /* liveness must not touch this stack slot anymore */
11241 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
11242 if (!(live & REG_LIVE_READ)) {
f54c7898 11243 __mark_reg_not_init(env, &st->stack[i].spilled_ptr);
9242b5f5
AS
11244 for (j = 0; j < BPF_REG_SIZE; j++)
11245 st->stack[i].slot_type[j] = STACK_INVALID;
11246 }
11247 }
11248}
11249
11250static void clean_verifier_state(struct bpf_verifier_env *env,
11251 struct bpf_verifier_state *st)
11252{
11253 int i;
11254
11255 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
11256 /* all regs in this state in all frames were already marked */
11257 return;
11258
11259 for (i = 0; i <= st->curframe; i++)
11260 clean_func_state(env, st->frame[i]);
11261}
11262
11263/* the parentage chains form a tree.
11264 * the verifier states are added to state lists at given insn and
11265 * pushed into state stack for future exploration.
11266 * when the verifier reaches bpf_exit insn some of the verifer states
11267 * stored in the state lists have their final liveness state already,
11268 * but a lot of states will get revised from liveness point of view when
11269 * the verifier explores other branches.
11270 * Example:
11271 * 1: r0 = 1
11272 * 2: if r1 == 100 goto pc+1
11273 * 3: r0 = 2
11274 * 4: exit
11275 * when the verifier reaches exit insn the register r0 in the state list of
11276 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
11277 * of insn 2 and goes exploring further. At the insn 4 it will walk the
11278 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
11279 *
11280 * Since the verifier pushes the branch states as it sees them while exploring
11281 * the program the condition of walking the branch instruction for the second
11282 * time means that all states below this branch were already explored and
8fb33b60 11283 * their final liveness marks are already propagated.
9242b5f5
AS
11284 * Hence when the verifier completes the search of state list in is_state_visited()
11285 * we can call this clean_live_states() function to mark all liveness states
11286 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
11287 * will not be used.
11288 * This function also clears the registers and stack for states that !READ
11289 * to simplify state merging.
11290 *
11291 * Important note here that walking the same branch instruction in the callee
11292 * doesn't meant that the states are DONE. The verifier has to compare
11293 * the callsites
11294 */
11295static void clean_live_states(struct bpf_verifier_env *env, int insn,
11296 struct bpf_verifier_state *cur)
11297{
11298 struct bpf_verifier_state_list *sl;
11299 int i;
11300
5d839021 11301 sl = *explored_state(env, insn);
a8f500af 11302 while (sl) {
2589726d
AS
11303 if (sl->state.branches)
11304 goto next;
dc2a4ebc
AS
11305 if (sl->state.insn_idx != insn ||
11306 sl->state.curframe != cur->curframe)
9242b5f5
AS
11307 goto next;
11308 for (i = 0; i <= cur->curframe; i++)
11309 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
11310 goto next;
11311 clean_verifier_state(env, &sl->state);
11312next:
11313 sl = sl->next;
11314 }
11315}
11316
f1174f77 11317/* Returns true if (rold safe implies rcur safe) */
e042aa53
DB
11318static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
11319 struct bpf_reg_state *rcur, struct bpf_id_pair *idmap)
f1174f77 11320{
f4d7e40a
AS
11321 bool equal;
11322
dc503a8a
EC
11323 if (!(rold->live & REG_LIVE_READ))
11324 /* explored state didn't use this */
11325 return true;
11326
679c782d 11327 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
11328
11329 if (rold->type == PTR_TO_STACK)
11330 /* two stack pointers are equal only if they're pointing to
11331 * the same stack frame, since fp-8 in foo != fp-8 in bar
11332 */
11333 return equal && rold->frameno == rcur->frameno;
11334
11335 if (equal)
969bf05e
AS
11336 return true;
11337
f1174f77
EC
11338 if (rold->type == NOT_INIT)
11339 /* explored state can't have used this */
969bf05e 11340 return true;
f1174f77
EC
11341 if (rcur->type == NOT_INIT)
11342 return false;
c25b2ae1 11343 switch (base_type(rold->type)) {
f1174f77 11344 case SCALAR_VALUE:
e042aa53
DB
11345 if (env->explore_alu_limits)
11346 return false;
f1174f77 11347 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
11348 if (!rold->precise && !rcur->precise)
11349 return true;
f1174f77
EC
11350 /* new val must satisfy old val knowledge */
11351 return range_within(rold, rcur) &&
11352 tnum_in(rold->var_off, rcur->var_off);
11353 } else {
179d1c56
JH
11354 /* We're trying to use a pointer in place of a scalar.
11355 * Even if the scalar was unbounded, this could lead to
11356 * pointer leaks because scalars are allowed to leak
11357 * while pointers are not. We could make this safe in
11358 * special cases if root is calling us, but it's
11359 * probably not worth the hassle.
f1174f77 11360 */
179d1c56 11361 return false;
f1174f77 11362 }
69c087ba 11363 case PTR_TO_MAP_KEY:
f1174f77 11364 case PTR_TO_MAP_VALUE:
c25b2ae1
HL
11365 /* a PTR_TO_MAP_VALUE could be safe to use as a
11366 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
11367 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
11368 * checked, doing so could have affected others with the same
11369 * id, and we can't check for that because we lost the id when
11370 * we converted to a PTR_TO_MAP_VALUE.
11371 */
11372 if (type_may_be_null(rold->type)) {
11373 if (!type_may_be_null(rcur->type))
11374 return false;
11375 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
11376 return false;
11377 /* Check our ids match any regs they're supposed to */
11378 return check_ids(rold->id, rcur->id, idmap);
11379 }
11380
1b688a19
EC
11381 /* If the new min/max/var_off satisfy the old ones and
11382 * everything else matches, we are OK.
d83525ca
AS
11383 * 'id' is not compared, since it's only used for maps with
11384 * bpf_spin_lock inside map element and in such cases if
11385 * the rest of the prog is valid for one map element then
11386 * it's valid for all map elements regardless of the key
11387 * used in bpf_map_lookup()
1b688a19
EC
11388 */
11389 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
11390 range_within(rold, rcur) &&
11391 tnum_in(rold->var_off, rcur->var_off);
de8f3a83 11392 case PTR_TO_PACKET_META:
f1174f77 11393 case PTR_TO_PACKET:
de8f3a83 11394 if (rcur->type != rold->type)
f1174f77
EC
11395 return false;
11396 /* We must have at least as much range as the old ptr
11397 * did, so that any accesses which were safe before are
11398 * still safe. This is true even if old range < old off,
11399 * since someone could have accessed through (ptr - k), or
11400 * even done ptr -= k in a register, to get a safe access.
11401 */
11402 if (rold->range > rcur->range)
11403 return false;
11404 /* If the offsets don't match, we can't trust our alignment;
11405 * nor can we be sure that we won't fall out of range.
11406 */
11407 if (rold->off != rcur->off)
11408 return false;
11409 /* id relations must be preserved */
11410 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
11411 return false;
11412 /* new val must satisfy old val knowledge */
11413 return range_within(rold, rcur) &&
11414 tnum_in(rold->var_off, rcur->var_off);
11415 case PTR_TO_CTX:
11416 case CONST_PTR_TO_MAP:
f1174f77 11417 case PTR_TO_PACKET_END:
d58e468b 11418 case PTR_TO_FLOW_KEYS:
c64b7983 11419 case PTR_TO_SOCKET:
46f8bc92 11420 case PTR_TO_SOCK_COMMON:
655a51e5 11421 case PTR_TO_TCP_SOCK:
fada7fdc 11422 case PTR_TO_XDP_SOCK:
f1174f77
EC
11423 /* Only valid matches are exact, which memcmp() above
11424 * would have accepted
11425 */
11426 default:
11427 /* Don't know what's going on, just say it's not safe */
11428 return false;
11429 }
969bf05e 11430
f1174f77
EC
11431 /* Shouldn't get here; if we do, say it's not safe */
11432 WARN_ON_ONCE(1);
969bf05e
AS
11433 return false;
11434}
11435
e042aa53
DB
11436static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
11437 struct bpf_func_state *cur, struct bpf_id_pair *idmap)
638f5b90
AS
11438{
11439 int i, spi;
11440
638f5b90
AS
11441 /* walk slots of the explored stack and ignore any additional
11442 * slots in the current stack, since explored(safe) state
11443 * didn't use them
11444 */
11445 for (i = 0; i < old->allocated_stack; i++) {
11446 spi = i / BPF_REG_SIZE;
11447
b233920c
AS
11448 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
11449 i += BPF_REG_SIZE - 1;
cc2b14d5 11450 /* explored state didn't use this */
fd05e57b 11451 continue;
b233920c 11452 }
cc2b14d5 11453
638f5b90
AS
11454 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
11455 continue;
19e2dbb7
AS
11456
11457 /* explored stack has more populated slots than current stack
11458 * and these slots were used
11459 */
11460 if (i >= cur->allocated_stack)
11461 return false;
11462
cc2b14d5
AS
11463 /* if old state was safe with misc data in the stack
11464 * it will be safe with zero-initialized stack.
11465 * The opposite is not true
11466 */
11467 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
11468 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
11469 continue;
638f5b90
AS
11470 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
11471 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
11472 /* Ex: old explored (safe) state has STACK_SPILL in
b8c1a309 11473 * this stack slot, but current has STACK_MISC ->
638f5b90
AS
11474 * this verifier states are not equivalent,
11475 * return false to continue verification of this path
11476 */
11477 return false;
27113c59 11478 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1)
638f5b90 11479 continue;
27113c59 11480 if (!is_spilled_reg(&old->stack[spi]))
638f5b90 11481 continue;
e042aa53
DB
11482 if (!regsafe(env, &old->stack[spi].spilled_ptr,
11483 &cur->stack[spi].spilled_ptr, idmap))
638f5b90
AS
11484 /* when explored and current stack slot are both storing
11485 * spilled registers, check that stored pointers types
11486 * are the same as well.
11487 * Ex: explored safe path could have stored
11488 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
11489 * but current path has stored:
11490 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
11491 * such verifier states are not equivalent.
11492 * return false to continue verification of this path
11493 */
11494 return false;
11495 }
11496 return true;
11497}
11498
fd978bf7
JS
11499static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
11500{
11501 if (old->acquired_refs != cur->acquired_refs)
11502 return false;
11503 return !memcmp(old->refs, cur->refs,
11504 sizeof(*old->refs) * old->acquired_refs);
11505}
11506
f1bca824
AS
11507/* compare two verifier states
11508 *
11509 * all states stored in state_list are known to be valid, since
11510 * verifier reached 'bpf_exit' instruction through them
11511 *
11512 * this function is called when verifier exploring different branches of
11513 * execution popped from the state stack. If it sees an old state that has
11514 * more strict register state and more strict stack state then this execution
11515 * branch doesn't need to be explored further, since verifier already
11516 * concluded that more strict state leads to valid finish.
11517 *
11518 * Therefore two states are equivalent if register state is more conservative
11519 * and explored stack state is more conservative than the current one.
11520 * Example:
11521 * explored current
11522 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
11523 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
11524 *
11525 * In other words if current stack state (one being explored) has more
11526 * valid slots than old one that already passed validation, it means
11527 * the verifier can stop exploring and conclude that current state is valid too
11528 *
11529 * Similarly with registers. If explored state has register type as invalid
11530 * whereas register type in current state is meaningful, it means that
11531 * the current state will reach 'bpf_exit' instruction safely
11532 */
c9e73e3d 11533static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
f4d7e40a 11534 struct bpf_func_state *cur)
f1bca824
AS
11535{
11536 int i;
11537
c9e73e3d
LB
11538 memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
11539 for (i = 0; i < MAX_BPF_REG; i++)
e042aa53
DB
11540 if (!regsafe(env, &old->regs[i], &cur->regs[i],
11541 env->idmap_scratch))
c9e73e3d 11542 return false;
f1bca824 11543
e042aa53 11544 if (!stacksafe(env, old, cur, env->idmap_scratch))
c9e73e3d 11545 return false;
fd978bf7
JS
11546
11547 if (!refsafe(old, cur))
c9e73e3d
LB
11548 return false;
11549
11550 return true;
f1bca824
AS
11551}
11552
f4d7e40a
AS
11553static bool states_equal(struct bpf_verifier_env *env,
11554 struct bpf_verifier_state *old,
11555 struct bpf_verifier_state *cur)
11556{
11557 int i;
11558
11559 if (old->curframe != cur->curframe)
11560 return false;
11561
979d63d5
DB
11562 /* Verification state from speculative execution simulation
11563 * must never prune a non-speculative execution one.
11564 */
11565 if (old->speculative && !cur->speculative)
11566 return false;
11567
d83525ca
AS
11568 if (old->active_spin_lock != cur->active_spin_lock)
11569 return false;
11570
f4d7e40a
AS
11571 /* for states to be equal callsites have to be the same
11572 * and all frame states need to be equivalent
11573 */
11574 for (i = 0; i <= old->curframe; i++) {
11575 if (old->frame[i]->callsite != cur->frame[i]->callsite)
11576 return false;
c9e73e3d 11577 if (!func_states_equal(env, old->frame[i], cur->frame[i]))
f4d7e40a
AS
11578 return false;
11579 }
11580 return true;
11581}
11582
5327ed3d
JW
11583/* Return 0 if no propagation happened. Return negative error code if error
11584 * happened. Otherwise, return the propagated bit.
11585 */
55e7f3b5
JW
11586static int propagate_liveness_reg(struct bpf_verifier_env *env,
11587 struct bpf_reg_state *reg,
11588 struct bpf_reg_state *parent_reg)
11589{
5327ed3d
JW
11590 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
11591 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
11592 int err;
11593
5327ed3d
JW
11594 /* When comes here, read flags of PARENT_REG or REG could be any of
11595 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
11596 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
11597 */
11598 if (parent_flag == REG_LIVE_READ64 ||
11599 /* Or if there is no read flag from REG. */
11600 !flag ||
11601 /* Or if the read flag from REG is the same as PARENT_REG. */
11602 parent_flag == flag)
55e7f3b5
JW
11603 return 0;
11604
5327ed3d 11605 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
11606 if (err)
11607 return err;
11608
5327ed3d 11609 return flag;
55e7f3b5
JW
11610}
11611
8e9cd9ce 11612/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
11613 * straight-line code between a state and its parent. When we arrive at an
11614 * equivalent state (jump target or such) we didn't arrive by the straight-line
11615 * code, so read marks in the state must propagate to the parent regardless
11616 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 11617 * in mark_reg_read() is for.
8e9cd9ce 11618 */
f4d7e40a
AS
11619static int propagate_liveness(struct bpf_verifier_env *env,
11620 const struct bpf_verifier_state *vstate,
11621 struct bpf_verifier_state *vparent)
dc503a8a 11622{
3f8cafa4 11623 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 11624 struct bpf_func_state *state, *parent;
3f8cafa4 11625 int i, frame, err = 0;
dc503a8a 11626
f4d7e40a
AS
11627 if (vparent->curframe != vstate->curframe) {
11628 WARN(1, "propagate_live: parent frame %d current frame %d\n",
11629 vparent->curframe, vstate->curframe);
11630 return -EFAULT;
11631 }
dc503a8a
EC
11632 /* Propagate read liveness of registers... */
11633 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 11634 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
11635 parent = vparent->frame[frame];
11636 state = vstate->frame[frame];
11637 parent_reg = parent->regs;
11638 state_reg = state->regs;
83d16312
JK
11639 /* We don't need to worry about FP liveness, it's read-only */
11640 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
11641 err = propagate_liveness_reg(env, &state_reg[i],
11642 &parent_reg[i]);
5327ed3d 11643 if (err < 0)
3f8cafa4 11644 return err;
5327ed3d
JW
11645 if (err == REG_LIVE_READ64)
11646 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 11647 }
f4d7e40a 11648
1b04aee7 11649 /* Propagate stack slots. */
f4d7e40a
AS
11650 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
11651 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
11652 parent_reg = &parent->stack[i].spilled_ptr;
11653 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
11654 err = propagate_liveness_reg(env, state_reg,
11655 parent_reg);
5327ed3d 11656 if (err < 0)
3f8cafa4 11657 return err;
dc503a8a
EC
11658 }
11659 }
5327ed3d 11660 return 0;
dc503a8a
EC
11661}
11662
a3ce685d
AS
11663/* find precise scalars in the previous equivalent state and
11664 * propagate them into the current state
11665 */
11666static int propagate_precision(struct bpf_verifier_env *env,
11667 const struct bpf_verifier_state *old)
11668{
11669 struct bpf_reg_state *state_reg;
11670 struct bpf_func_state *state;
11671 int i, err = 0;
11672
11673 state = old->frame[old->curframe];
11674 state_reg = state->regs;
11675 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
11676 if (state_reg->type != SCALAR_VALUE ||
11677 !state_reg->precise)
11678 continue;
11679 if (env->log.level & BPF_LOG_LEVEL2)
11680 verbose(env, "propagating r%d\n", i);
11681 err = mark_chain_precision(env, i);
11682 if (err < 0)
11683 return err;
11684 }
11685
11686 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
27113c59 11687 if (!is_spilled_reg(&state->stack[i]))
a3ce685d
AS
11688 continue;
11689 state_reg = &state->stack[i].spilled_ptr;
11690 if (state_reg->type != SCALAR_VALUE ||
11691 !state_reg->precise)
11692 continue;
11693 if (env->log.level & BPF_LOG_LEVEL2)
11694 verbose(env, "propagating fp%d\n",
11695 (-i - 1) * BPF_REG_SIZE);
11696 err = mark_chain_precision_stack(env, i);
11697 if (err < 0)
11698 return err;
11699 }
11700 return 0;
11701}
11702
2589726d
AS
11703static bool states_maybe_looping(struct bpf_verifier_state *old,
11704 struct bpf_verifier_state *cur)
11705{
11706 struct bpf_func_state *fold, *fcur;
11707 int i, fr = cur->curframe;
11708
11709 if (old->curframe != fr)
11710 return false;
11711
11712 fold = old->frame[fr];
11713 fcur = cur->frame[fr];
11714 for (i = 0; i < MAX_BPF_REG; i++)
11715 if (memcmp(&fold->regs[i], &fcur->regs[i],
11716 offsetof(struct bpf_reg_state, parent)))
11717 return false;
11718 return true;
11719}
11720
11721
58e2af8b 11722static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 11723{
58e2af8b 11724 struct bpf_verifier_state_list *new_sl;
9f4686c4 11725 struct bpf_verifier_state_list *sl, **pprev;
679c782d 11726 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 11727 int i, j, err, states_cnt = 0;
10d274e8 11728 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 11729
b5dc0163 11730 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 11731 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
11732 /* this 'insn_idx' instruction wasn't marked, so we will not
11733 * be doing state search here
11734 */
11735 return 0;
11736
2589726d
AS
11737 /* bpf progs typically have pruning point every 4 instructions
11738 * http://vger.kernel.org/bpfconf2019.html#session-1
11739 * Do not add new state for future pruning if the verifier hasn't seen
11740 * at least 2 jumps and at least 8 instructions.
11741 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
11742 * In tests that amounts to up to 50% reduction into total verifier
11743 * memory consumption and 20% verifier time speedup.
11744 */
11745 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
11746 env->insn_processed - env->prev_insn_processed >= 8)
11747 add_new_state = true;
11748
a8f500af
AS
11749 pprev = explored_state(env, insn_idx);
11750 sl = *pprev;
11751
9242b5f5
AS
11752 clean_live_states(env, insn_idx, cur);
11753
a8f500af 11754 while (sl) {
dc2a4ebc
AS
11755 states_cnt++;
11756 if (sl->state.insn_idx != insn_idx)
11757 goto next;
bfc6bb74 11758
2589726d 11759 if (sl->state.branches) {
bfc6bb74
AS
11760 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe];
11761
11762 if (frame->in_async_callback_fn &&
11763 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) {
11764 /* Different async_entry_cnt means that the verifier is
11765 * processing another entry into async callback.
11766 * Seeing the same state is not an indication of infinite
11767 * loop or infinite recursion.
11768 * But finding the same state doesn't mean that it's safe
11769 * to stop processing the current state. The previous state
11770 * hasn't yet reached bpf_exit, since state.branches > 0.
11771 * Checking in_async_callback_fn alone is not enough either.
11772 * Since the verifier still needs to catch infinite loops
11773 * inside async callbacks.
11774 */
11775 } else if (states_maybe_looping(&sl->state, cur) &&
11776 states_equal(env, &sl->state, cur)) {
2589726d
AS
11777 verbose_linfo(env, insn_idx, "; ");
11778 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
11779 return -EINVAL;
11780 }
11781 /* if the verifier is processing a loop, avoid adding new state
11782 * too often, since different loop iterations have distinct
11783 * states and may not help future pruning.
11784 * This threshold shouldn't be too low to make sure that
11785 * a loop with large bound will be rejected quickly.
11786 * The most abusive loop will be:
11787 * r1 += 1
11788 * if r1 < 1000000 goto pc-2
11789 * 1M insn_procssed limit / 100 == 10k peak states.
11790 * This threshold shouldn't be too high either, since states
11791 * at the end of the loop are likely to be useful in pruning.
11792 */
11793 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
11794 env->insn_processed - env->prev_insn_processed < 100)
11795 add_new_state = false;
11796 goto miss;
11797 }
638f5b90 11798 if (states_equal(env, &sl->state, cur)) {
9f4686c4 11799 sl->hit_cnt++;
f1bca824 11800 /* reached equivalent register/stack state,
dc503a8a
EC
11801 * prune the search.
11802 * Registers read by the continuation are read by us.
8e9cd9ce
EC
11803 * If we have any write marks in env->cur_state, they
11804 * will prevent corresponding reads in the continuation
11805 * from reaching our parent (an explored_state). Our
11806 * own state will get the read marks recorded, but
11807 * they'll be immediately forgotten as we're pruning
11808 * this state and will pop a new one.
f1bca824 11809 */
f4d7e40a 11810 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
11811
11812 /* if previous state reached the exit with precision and
11813 * current state is equivalent to it (except precsion marks)
11814 * the precision needs to be propagated back in
11815 * the current state.
11816 */
11817 err = err ? : push_jmp_history(env, cur);
11818 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
11819 if (err)
11820 return err;
f1bca824 11821 return 1;
dc503a8a 11822 }
2589726d
AS
11823miss:
11824 /* when new state is not going to be added do not increase miss count.
11825 * Otherwise several loop iterations will remove the state
11826 * recorded earlier. The goal of these heuristics is to have
11827 * states from some iterations of the loop (some in the beginning
11828 * and some at the end) to help pruning.
11829 */
11830 if (add_new_state)
11831 sl->miss_cnt++;
9f4686c4
AS
11832 /* heuristic to determine whether this state is beneficial
11833 * to keep checking from state equivalence point of view.
11834 * Higher numbers increase max_states_per_insn and verification time,
11835 * but do not meaningfully decrease insn_processed.
11836 */
11837 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
11838 /* the state is unlikely to be useful. Remove it to
11839 * speed up verification
11840 */
11841 *pprev = sl->next;
11842 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
11843 u32 br = sl->state.branches;
11844
11845 WARN_ONCE(br,
11846 "BUG live_done but branches_to_explore %d\n",
11847 br);
9f4686c4
AS
11848 free_verifier_state(&sl->state, false);
11849 kfree(sl);
11850 env->peak_states--;
11851 } else {
11852 /* cannot free this state, since parentage chain may
11853 * walk it later. Add it for free_list instead to
11854 * be freed at the end of verification
11855 */
11856 sl->next = env->free_list;
11857 env->free_list = sl;
11858 }
11859 sl = *pprev;
11860 continue;
11861 }
dc2a4ebc 11862next:
9f4686c4
AS
11863 pprev = &sl->next;
11864 sl = *pprev;
f1bca824
AS
11865 }
11866
06ee7115
AS
11867 if (env->max_states_per_insn < states_cnt)
11868 env->max_states_per_insn = states_cnt;
11869
2c78ee89 11870 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 11871 return push_jmp_history(env, cur);
ceefbc96 11872
2589726d 11873 if (!add_new_state)
b5dc0163 11874 return push_jmp_history(env, cur);
ceefbc96 11875
2589726d
AS
11876 /* There were no equivalent states, remember the current one.
11877 * Technically the current state is not proven to be safe yet,
f4d7e40a 11878 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 11879 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 11880 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
11881 * again on the way to bpf_exit.
11882 * When looping the sl->state.branches will be > 0 and this state
11883 * will not be considered for equivalence until branches == 0.
f1bca824 11884 */
638f5b90 11885 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
11886 if (!new_sl)
11887 return -ENOMEM;
06ee7115
AS
11888 env->total_states++;
11889 env->peak_states++;
2589726d
AS
11890 env->prev_jmps_processed = env->jmps_processed;
11891 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
11892
11893 /* add new state to the head of linked list */
679c782d
EC
11894 new = &new_sl->state;
11895 err = copy_verifier_state(new, cur);
1969db47 11896 if (err) {
679c782d 11897 free_verifier_state(new, false);
1969db47
AS
11898 kfree(new_sl);
11899 return err;
11900 }
dc2a4ebc 11901 new->insn_idx = insn_idx;
2589726d
AS
11902 WARN_ONCE(new->branches != 1,
11903 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 11904
2589726d 11905 cur->parent = new;
b5dc0163
AS
11906 cur->first_insn_idx = insn_idx;
11907 clear_jmp_history(cur);
5d839021
AS
11908 new_sl->next = *explored_state(env, insn_idx);
11909 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
11910 /* connect new state to parentage chain. Current frame needs all
11911 * registers connected. Only r6 - r9 of the callers are alive (pushed
11912 * to the stack implicitly by JITs) so in callers' frames connect just
11913 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
11914 * the state of the call instruction (with WRITTEN set), and r0 comes
11915 * from callee with its full parentage chain, anyway.
11916 */
8e9cd9ce
EC
11917 /* clear write marks in current state: the writes we did are not writes
11918 * our child did, so they don't screen off its reads from us.
11919 * (There are no read marks in current state, because reads always mark
11920 * their parent and current state never has children yet. Only
11921 * explored_states can get read marks.)
11922 */
eea1c227
AS
11923 for (j = 0; j <= cur->curframe; j++) {
11924 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
11925 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
11926 for (i = 0; i < BPF_REG_FP; i++)
11927 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
11928 }
f4d7e40a
AS
11929
11930 /* all stack frames are accessible from callee, clear them all */
11931 for (j = 0; j <= cur->curframe; j++) {
11932 struct bpf_func_state *frame = cur->frame[j];
679c782d 11933 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 11934
679c782d 11935 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 11936 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
11937 frame->stack[i].spilled_ptr.parent =
11938 &newframe->stack[i].spilled_ptr;
11939 }
f4d7e40a 11940 }
f1bca824
AS
11941 return 0;
11942}
11943
c64b7983
JS
11944/* Return true if it's OK to have the same insn return a different type. */
11945static bool reg_type_mismatch_ok(enum bpf_reg_type type)
11946{
c25b2ae1 11947 switch (base_type(type)) {
c64b7983
JS
11948 case PTR_TO_CTX:
11949 case PTR_TO_SOCKET:
46f8bc92 11950 case PTR_TO_SOCK_COMMON:
655a51e5 11951 case PTR_TO_TCP_SOCK:
fada7fdc 11952 case PTR_TO_XDP_SOCK:
2a02759e 11953 case PTR_TO_BTF_ID:
c64b7983
JS
11954 return false;
11955 default:
11956 return true;
11957 }
11958}
11959
11960/* If an instruction was previously used with particular pointer types, then we
11961 * need to be careful to avoid cases such as the below, where it may be ok
11962 * for one branch accessing the pointer, but not ok for the other branch:
11963 *
11964 * R1 = sock_ptr
11965 * goto X;
11966 * ...
11967 * R1 = some_other_valid_ptr;
11968 * goto X;
11969 * ...
11970 * R2 = *(u32 *)(R1 + 0);
11971 */
11972static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
11973{
11974 return src != prev && (!reg_type_mismatch_ok(src) ||
11975 !reg_type_mismatch_ok(prev));
11976}
11977
58e2af8b 11978static int do_check(struct bpf_verifier_env *env)
17a52670 11979{
6f8a57cc 11980 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1 11981 struct bpf_verifier_state *state = env->cur_state;
17a52670 11982 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 11983 struct bpf_reg_state *regs;
06ee7115 11984 int insn_cnt = env->prog->len;
17a52670 11985 bool do_print_state = false;
b5dc0163 11986 int prev_insn_idx = -1;
17a52670 11987
17a52670
AS
11988 for (;;) {
11989 struct bpf_insn *insn;
11990 u8 class;
11991 int err;
11992
b5dc0163 11993 env->prev_insn_idx = prev_insn_idx;
c08435ec 11994 if (env->insn_idx >= insn_cnt) {
61bd5218 11995 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 11996 env->insn_idx, insn_cnt);
17a52670
AS
11997 return -EFAULT;
11998 }
11999
c08435ec 12000 insn = &insns[env->insn_idx];
17a52670
AS
12001 class = BPF_CLASS(insn->code);
12002
06ee7115 12003 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
12004 verbose(env,
12005 "BPF program is too large. Processed %d insn\n",
06ee7115 12006 env->insn_processed);
17a52670
AS
12007 return -E2BIG;
12008 }
12009
c08435ec 12010 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
12011 if (err < 0)
12012 return err;
12013 if (err == 1) {
12014 /* found equivalent state, can prune the search */
06ee7115 12015 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 12016 if (do_print_state)
979d63d5
DB
12017 verbose(env, "\nfrom %d to %d%s: safe\n",
12018 env->prev_insn_idx, env->insn_idx,
12019 env->cur_state->speculative ?
12020 " (speculative execution)" : "");
f1bca824 12021 else
c08435ec 12022 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
12023 }
12024 goto process_bpf_exit;
12025 }
12026
c3494801
AS
12027 if (signal_pending(current))
12028 return -EAGAIN;
12029
3c2ce60b
DB
12030 if (need_resched())
12031 cond_resched();
12032
2e576648
CL
12033 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) {
12034 verbose(env, "\nfrom %d to %d%s:",
12035 env->prev_insn_idx, env->insn_idx,
12036 env->cur_state->speculative ?
12037 " (speculative execution)" : "");
12038 print_verifier_state(env, state->frame[state->curframe], true);
17a52670
AS
12039 do_print_state = false;
12040 }
12041
06ee7115 12042 if (env->log.level & BPF_LOG_LEVEL) {
7105e828 12043 const struct bpf_insn_cbs cbs = {
e6ac2450 12044 .cb_call = disasm_kfunc_name,
7105e828 12045 .cb_print = verbose,
abe08840 12046 .private_data = env,
7105e828
DB
12047 };
12048
2e576648
CL
12049 if (verifier_state_scratched(env))
12050 print_insn_state(env, state->frame[state->curframe]);
12051
c08435ec 12052 verbose_linfo(env, env->insn_idx, "; ");
2e576648 12053 env->prev_log_len = env->log.len_used;
c08435ec 12054 verbose(env, "%d: ", env->insn_idx);
abe08840 12055 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
2e576648
CL
12056 env->prev_insn_print_len = env->log.len_used - env->prev_log_len;
12057 env->prev_log_len = env->log.len_used;
17a52670
AS
12058 }
12059
cae1927c 12060 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
12061 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
12062 env->prev_insn_idx);
cae1927c
JK
12063 if (err)
12064 return err;
12065 }
13a27dfc 12066
638f5b90 12067 regs = cur_regs(env);
fe9a5ca7 12068 sanitize_mark_insn_seen(env);
b5dc0163 12069 prev_insn_idx = env->insn_idx;
fd978bf7 12070
17a52670 12071 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 12072 err = check_alu_op(env, insn);
17a52670
AS
12073 if (err)
12074 return err;
12075
12076 } else if (class == BPF_LDX) {
3df126f3 12077 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
12078
12079 /* check for reserved fields is already done */
12080
17a52670 12081 /* check src operand */
dc503a8a 12082 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
12083 if (err)
12084 return err;
12085
dc503a8a 12086 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
12087 if (err)
12088 return err;
12089
725f9dcd
AS
12090 src_reg_type = regs[insn->src_reg].type;
12091
17a52670
AS
12092 /* check that memory (src_reg + off) is readable,
12093 * the state of dst_reg will be updated by this func
12094 */
c08435ec
DB
12095 err = check_mem_access(env, env->insn_idx, insn->src_reg,
12096 insn->off, BPF_SIZE(insn->code),
12097 BPF_READ, insn->dst_reg, false);
17a52670
AS
12098 if (err)
12099 return err;
12100
c08435ec 12101 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
12102
12103 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
12104 /* saw a valid insn
12105 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 12106 * save type to validate intersecting paths
9bac3d6d 12107 */
3df126f3 12108 *prev_src_type = src_reg_type;
9bac3d6d 12109
c64b7983 12110 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
12111 /* ABuser program is trying to use the same insn
12112 * dst_reg = *(u32*) (src_reg + off)
12113 * with different pointer types:
12114 * src_reg == ctx in one branch and
12115 * src_reg == stack|map in some other branch.
12116 * Reject it.
12117 */
61bd5218 12118 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
12119 return -EINVAL;
12120 }
12121
17a52670 12122 } else if (class == BPF_STX) {
3df126f3 12123 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 12124
91c960b0
BJ
12125 if (BPF_MODE(insn->code) == BPF_ATOMIC) {
12126 err = check_atomic(env, env->insn_idx, insn);
17a52670
AS
12127 if (err)
12128 return err;
c08435ec 12129 env->insn_idx++;
17a52670
AS
12130 continue;
12131 }
12132
5ca419f2
BJ
12133 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) {
12134 verbose(env, "BPF_STX uses reserved fields\n");
12135 return -EINVAL;
12136 }
12137
17a52670 12138 /* check src1 operand */
dc503a8a 12139 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
12140 if (err)
12141 return err;
12142 /* check src2 operand */
dc503a8a 12143 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
12144 if (err)
12145 return err;
12146
d691f9e8
AS
12147 dst_reg_type = regs[insn->dst_reg].type;
12148
17a52670 12149 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
12150 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12151 insn->off, BPF_SIZE(insn->code),
12152 BPF_WRITE, insn->src_reg, false);
17a52670
AS
12153 if (err)
12154 return err;
12155
c08435ec 12156 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
12157
12158 if (*prev_dst_type == NOT_INIT) {
12159 *prev_dst_type = dst_reg_type;
c64b7983 12160 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 12161 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
12162 return -EINVAL;
12163 }
12164
17a52670
AS
12165 } else if (class == BPF_ST) {
12166 if (BPF_MODE(insn->code) != BPF_MEM ||
12167 insn->src_reg != BPF_REG_0) {
61bd5218 12168 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
12169 return -EINVAL;
12170 }
12171 /* check src operand */
dc503a8a 12172 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
12173 if (err)
12174 return err;
12175
f37a8cb8 12176 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 12177 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f 12178 insn->dst_reg,
c25b2ae1 12179 reg_type_str(env, reg_state(env, insn->dst_reg)->type));
f37a8cb8
DB
12180 return -EACCES;
12181 }
12182
17a52670 12183 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
12184 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
12185 insn->off, BPF_SIZE(insn->code),
12186 BPF_WRITE, -1, false);
17a52670
AS
12187 if (err)
12188 return err;
12189
092ed096 12190 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
12191 u8 opcode = BPF_OP(insn->code);
12192
2589726d 12193 env->jmps_processed++;
17a52670
AS
12194 if (opcode == BPF_CALL) {
12195 if (BPF_SRC(insn->code) != BPF_K ||
2357672c
KKD
12196 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL
12197 && insn->off != 0) ||
f4d7e40a 12198 (insn->src_reg != BPF_REG_0 &&
e6ac2450
MKL
12199 insn->src_reg != BPF_PSEUDO_CALL &&
12200 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
092ed096
JW
12201 insn->dst_reg != BPF_REG_0 ||
12202 class == BPF_JMP32) {
61bd5218 12203 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
12204 return -EINVAL;
12205 }
12206
d83525ca
AS
12207 if (env->cur_state->active_spin_lock &&
12208 (insn->src_reg == BPF_PSEUDO_CALL ||
12209 insn->imm != BPF_FUNC_spin_unlock)) {
12210 verbose(env, "function calls are not allowed while holding a lock\n");
12211 return -EINVAL;
12212 }
f4d7e40a 12213 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 12214 err = check_func_call(env, insn, &env->insn_idx);
e6ac2450 12215 else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL)
5c073f26 12216 err = check_kfunc_call(env, insn, &env->insn_idx);
f4d7e40a 12217 else
69c087ba 12218 err = check_helper_call(env, insn, &env->insn_idx);
17a52670
AS
12219 if (err)
12220 return err;
17a52670
AS
12221 } else if (opcode == BPF_JA) {
12222 if (BPF_SRC(insn->code) != BPF_K ||
12223 insn->imm != 0 ||
12224 insn->src_reg != BPF_REG_0 ||
092ed096
JW
12225 insn->dst_reg != BPF_REG_0 ||
12226 class == BPF_JMP32) {
61bd5218 12227 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
12228 return -EINVAL;
12229 }
12230
c08435ec 12231 env->insn_idx += insn->off + 1;
17a52670
AS
12232 continue;
12233
12234 } else if (opcode == BPF_EXIT) {
12235 if (BPF_SRC(insn->code) != BPF_K ||
12236 insn->imm != 0 ||
12237 insn->src_reg != BPF_REG_0 ||
092ed096
JW
12238 insn->dst_reg != BPF_REG_0 ||
12239 class == BPF_JMP32) {
61bd5218 12240 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
12241 return -EINVAL;
12242 }
12243
d83525ca
AS
12244 if (env->cur_state->active_spin_lock) {
12245 verbose(env, "bpf_spin_unlock is missing\n");
12246 return -EINVAL;
12247 }
12248
f4d7e40a
AS
12249 if (state->curframe) {
12250 /* exit from nested function */
c08435ec 12251 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
12252 if (err)
12253 return err;
12254 do_print_state = true;
12255 continue;
12256 }
12257
fd978bf7
JS
12258 err = check_reference_leak(env);
12259 if (err)
12260 return err;
12261
390ee7e2
AS
12262 err = check_return_code(env);
12263 if (err)
12264 return err;
f1bca824 12265process_bpf_exit:
0f55f9ed 12266 mark_verifier_state_scratched(env);
2589726d 12267 update_branch_counts(env, env->cur_state);
b5dc0163 12268 err = pop_stack(env, &prev_insn_idx,
6f8a57cc 12269 &env->insn_idx, pop_log);
638f5b90
AS
12270 if (err < 0) {
12271 if (err != -ENOENT)
12272 return err;
17a52670
AS
12273 break;
12274 } else {
12275 do_print_state = true;
12276 continue;
12277 }
12278 } else {
c08435ec 12279 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
12280 if (err)
12281 return err;
12282 }
12283 } else if (class == BPF_LD) {
12284 u8 mode = BPF_MODE(insn->code);
12285
12286 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
12287 err = check_ld_abs(env, insn);
12288 if (err)
12289 return err;
12290
17a52670
AS
12291 } else if (mode == BPF_IMM) {
12292 err = check_ld_imm(env, insn);
12293 if (err)
12294 return err;
12295
c08435ec 12296 env->insn_idx++;
fe9a5ca7 12297 sanitize_mark_insn_seen(env);
17a52670 12298 } else {
61bd5218 12299 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
12300 return -EINVAL;
12301 }
12302 } else {
61bd5218 12303 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
12304 return -EINVAL;
12305 }
12306
c08435ec 12307 env->insn_idx++;
17a52670
AS
12308 }
12309
12310 return 0;
12311}
12312
541c3bad
AN
12313static int find_btf_percpu_datasec(struct btf *btf)
12314{
12315 const struct btf_type *t;
12316 const char *tname;
12317 int i, n;
12318
12319 /*
12320 * Both vmlinux and module each have their own ".data..percpu"
12321 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF
12322 * types to look at only module's own BTF types.
12323 */
12324 n = btf_nr_types(btf);
12325 if (btf_is_module(btf))
12326 i = btf_nr_types(btf_vmlinux);
12327 else
12328 i = 1;
12329
12330 for(; i < n; i++) {
12331 t = btf_type_by_id(btf, i);
12332 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC)
12333 continue;
12334
12335 tname = btf_name_by_offset(btf, t->name_off);
12336 if (!strcmp(tname, ".data..percpu"))
12337 return i;
12338 }
12339
12340 return -ENOENT;
12341}
12342
4976b718
HL
12343/* replace pseudo btf_id with kernel symbol address */
12344static int check_pseudo_btf_id(struct bpf_verifier_env *env,
12345 struct bpf_insn *insn,
12346 struct bpf_insn_aux_data *aux)
12347{
eaa6bcb7
HL
12348 const struct btf_var_secinfo *vsi;
12349 const struct btf_type *datasec;
541c3bad 12350 struct btf_mod_pair *btf_mod;
4976b718
HL
12351 const struct btf_type *t;
12352 const char *sym_name;
eaa6bcb7 12353 bool percpu = false;
f16e6313 12354 u32 type, id = insn->imm;
541c3bad 12355 struct btf *btf;
f16e6313 12356 s32 datasec_id;
4976b718 12357 u64 addr;
541c3bad 12358 int i, btf_fd, err;
4976b718 12359
541c3bad
AN
12360 btf_fd = insn[1].imm;
12361 if (btf_fd) {
12362 btf = btf_get_by_fd(btf_fd);
12363 if (IS_ERR(btf)) {
12364 verbose(env, "invalid module BTF object FD specified.\n");
12365 return -EINVAL;
12366 }
12367 } else {
12368 if (!btf_vmlinux) {
12369 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
12370 return -EINVAL;
12371 }
12372 btf = btf_vmlinux;
12373 btf_get(btf);
4976b718
HL
12374 }
12375
541c3bad 12376 t = btf_type_by_id(btf, id);
4976b718
HL
12377 if (!t) {
12378 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
541c3bad
AN
12379 err = -ENOENT;
12380 goto err_put;
4976b718
HL
12381 }
12382
12383 if (!btf_type_is_var(t)) {
541c3bad
AN
12384 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n", id);
12385 err = -EINVAL;
12386 goto err_put;
4976b718
HL
12387 }
12388
541c3bad 12389 sym_name = btf_name_by_offset(btf, t->name_off);
4976b718
HL
12390 addr = kallsyms_lookup_name(sym_name);
12391 if (!addr) {
12392 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
12393 sym_name);
541c3bad
AN
12394 err = -ENOENT;
12395 goto err_put;
4976b718
HL
12396 }
12397
541c3bad 12398 datasec_id = find_btf_percpu_datasec(btf);
eaa6bcb7 12399 if (datasec_id > 0) {
541c3bad 12400 datasec = btf_type_by_id(btf, datasec_id);
eaa6bcb7
HL
12401 for_each_vsi(i, datasec, vsi) {
12402 if (vsi->type == id) {
12403 percpu = true;
12404 break;
12405 }
12406 }
12407 }
12408
4976b718
HL
12409 insn[0].imm = (u32)addr;
12410 insn[1].imm = addr >> 32;
12411
12412 type = t->type;
541c3bad 12413 t = btf_type_skip_modifiers(btf, type, NULL);
eaa6bcb7 12414 if (percpu) {
5844101a 12415 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU;
541c3bad 12416 aux->btf_var.btf = btf;
eaa6bcb7
HL
12417 aux->btf_var.btf_id = type;
12418 } else if (!btf_type_is_struct(t)) {
4976b718
HL
12419 const struct btf_type *ret;
12420 const char *tname;
12421 u32 tsize;
12422
12423 /* resolve the type size of ksym. */
541c3bad 12424 ret = btf_resolve_size(btf, t, &tsize);
4976b718 12425 if (IS_ERR(ret)) {
541c3bad 12426 tname = btf_name_by_offset(btf, t->name_off);
4976b718
HL
12427 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
12428 tname, PTR_ERR(ret));
541c3bad
AN
12429 err = -EINVAL;
12430 goto err_put;
4976b718 12431 }
34d3a78c 12432 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY;
4976b718
HL
12433 aux->btf_var.mem_size = tsize;
12434 } else {
12435 aux->btf_var.reg_type = PTR_TO_BTF_ID;
541c3bad 12436 aux->btf_var.btf = btf;
4976b718
HL
12437 aux->btf_var.btf_id = type;
12438 }
541c3bad
AN
12439
12440 /* check whether we recorded this BTF (and maybe module) already */
12441 for (i = 0; i < env->used_btf_cnt; i++) {
12442 if (env->used_btfs[i].btf == btf) {
12443 btf_put(btf);
12444 return 0;
12445 }
12446 }
12447
12448 if (env->used_btf_cnt >= MAX_USED_BTFS) {
12449 err = -E2BIG;
12450 goto err_put;
12451 }
12452
12453 btf_mod = &env->used_btfs[env->used_btf_cnt];
12454 btf_mod->btf = btf;
12455 btf_mod->module = NULL;
12456
12457 /* if we reference variables from kernel module, bump its refcount */
12458 if (btf_is_module(btf)) {
12459 btf_mod->module = btf_try_get_module(btf);
12460 if (!btf_mod->module) {
12461 err = -ENXIO;
12462 goto err_put;
12463 }
12464 }
12465
12466 env->used_btf_cnt++;
12467
4976b718 12468 return 0;
541c3bad
AN
12469err_put:
12470 btf_put(btf);
12471 return err;
4976b718
HL
12472}
12473
56f668df
MKL
12474static int check_map_prealloc(struct bpf_map *map)
12475{
12476 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
12477 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
12478 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
12479 !(map->map_flags & BPF_F_NO_PREALLOC);
12480}
12481
d83525ca
AS
12482static bool is_tracing_prog_type(enum bpf_prog_type type)
12483{
12484 switch (type) {
12485 case BPF_PROG_TYPE_KPROBE:
12486 case BPF_PROG_TYPE_TRACEPOINT:
12487 case BPF_PROG_TYPE_PERF_EVENT:
12488 case BPF_PROG_TYPE_RAW_TRACEPOINT:
12489 return true;
12490 default:
12491 return false;
12492 }
12493}
12494
94dacdbd
TG
12495static bool is_preallocated_map(struct bpf_map *map)
12496{
12497 if (!check_map_prealloc(map))
12498 return false;
12499 if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta))
12500 return false;
12501 return true;
12502}
12503
61bd5218
JK
12504static int check_map_prog_compatibility(struct bpf_verifier_env *env,
12505 struct bpf_map *map,
fdc15d38
AS
12506 struct bpf_prog *prog)
12507
12508{
7e40781c 12509 enum bpf_prog_type prog_type = resolve_prog_type(prog);
94dacdbd
TG
12510 /*
12511 * Validate that trace type programs use preallocated hash maps.
12512 *
12513 * For programs attached to PERF events this is mandatory as the
12514 * perf NMI can hit any arbitrary code sequence.
12515 *
12516 * All other trace types using preallocated hash maps are unsafe as
12517 * well because tracepoint or kprobes can be inside locked regions
12518 * of the memory allocator or at a place where a recursion into the
12519 * memory allocator would see inconsistent state.
12520 *
2ed905c5
TG
12521 * On RT enabled kernels run-time allocation of all trace type
12522 * programs is strictly prohibited due to lock type constraints. On
12523 * !RT kernels it is allowed for backwards compatibility reasons for
12524 * now, but warnings are emitted so developers are made aware of
12525 * the unsafety and can fix their programs before this is enforced.
56f668df 12526 */
7e40781c
UP
12527 if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
12528 if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
61bd5218 12529 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
12530 return -EINVAL;
12531 }
2ed905c5
TG
12532 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
12533 verbose(env, "trace type programs can only use preallocated hash map\n");
12534 return -EINVAL;
12535 }
94dacdbd
TG
12536 WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
12537 verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
fdc15d38 12538 }
a3884572 12539
9e7a4d98
KS
12540 if (map_value_has_spin_lock(map)) {
12541 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
12542 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
12543 return -EINVAL;
12544 }
12545
12546 if (is_tracing_prog_type(prog_type)) {
12547 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
12548 return -EINVAL;
12549 }
12550
12551 if (prog->aux->sleepable) {
12552 verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
12553 return -EINVAL;
12554 }
d83525ca
AS
12555 }
12556
5e0bc308
DB
12557 if (map_value_has_timer(map)) {
12558 if (is_tracing_prog_type(prog_type)) {
12559 verbose(env, "tracing progs cannot use bpf_timer yet\n");
12560 return -EINVAL;
12561 }
12562 }
12563
a3884572 12564 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 12565 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
12566 verbose(env, "offload device mismatch between prog and map\n");
12567 return -EINVAL;
12568 }
12569
85d33df3
MKL
12570 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
12571 verbose(env, "bpf_struct_ops map cannot be used in prog\n");
12572 return -EINVAL;
12573 }
12574
1e6c62a8
AS
12575 if (prog->aux->sleepable)
12576 switch (map->map_type) {
12577 case BPF_MAP_TYPE_HASH:
12578 case BPF_MAP_TYPE_LRU_HASH:
12579 case BPF_MAP_TYPE_ARRAY:
638e4b82
AS
12580 case BPF_MAP_TYPE_PERCPU_HASH:
12581 case BPF_MAP_TYPE_PERCPU_ARRAY:
12582 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
12583 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
12584 case BPF_MAP_TYPE_HASH_OF_MAPS:
1e6c62a8
AS
12585 if (!is_preallocated_map(map)) {
12586 verbose(env,
638e4b82 12587 "Sleepable programs can only use preallocated maps\n");
1e6c62a8
AS
12588 return -EINVAL;
12589 }
12590 break;
ba90c2cc 12591 case BPF_MAP_TYPE_RINGBUF:
0fe4b381
KS
12592 case BPF_MAP_TYPE_INODE_STORAGE:
12593 case BPF_MAP_TYPE_SK_STORAGE:
12594 case BPF_MAP_TYPE_TASK_STORAGE:
ba90c2cc 12595 break;
1e6c62a8
AS
12596 default:
12597 verbose(env,
ba90c2cc 12598 "Sleepable programs can only use array, hash, and ringbuf maps\n");
1e6c62a8
AS
12599 return -EINVAL;
12600 }
12601
fdc15d38
AS
12602 return 0;
12603}
12604
b741f163
RG
12605static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
12606{
12607 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
12608 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
12609}
12610
4976b718
HL
12611/* find and rewrite pseudo imm in ld_imm64 instructions:
12612 *
12613 * 1. if it accesses map FD, replace it with actual map pointer.
12614 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
12615 *
12616 * NOTE: btf_vmlinux is required for converting pseudo btf_id.
0246e64d 12617 */
4976b718 12618static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
0246e64d
AS
12619{
12620 struct bpf_insn *insn = env->prog->insnsi;
12621 int insn_cnt = env->prog->len;
fdc15d38 12622 int i, j, err;
0246e64d 12623
f1f7714e 12624 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
12625 if (err)
12626 return err;
12627
0246e64d 12628 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 12629 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 12630 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 12631 verbose(env, "BPF_LDX uses reserved fields\n");
d691f9e8
AS
12632 return -EINVAL;
12633 }
12634
0246e64d 12635 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 12636 struct bpf_insn_aux_data *aux;
0246e64d
AS
12637 struct bpf_map *map;
12638 struct fd f;
d8eca5bb 12639 u64 addr;
387544bf 12640 u32 fd;
0246e64d
AS
12641
12642 if (i == insn_cnt - 1 || insn[1].code != 0 ||
12643 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
12644 insn[1].off != 0) {
61bd5218 12645 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
12646 return -EINVAL;
12647 }
12648
d8eca5bb 12649 if (insn[0].src_reg == 0)
0246e64d
AS
12650 /* valid generic load 64-bit imm */
12651 goto next_insn;
12652
4976b718
HL
12653 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
12654 aux = &env->insn_aux_data[i];
12655 err = check_pseudo_btf_id(env, insn, aux);
12656 if (err)
12657 return err;
12658 goto next_insn;
12659 }
12660
69c087ba
YS
12661 if (insn[0].src_reg == BPF_PSEUDO_FUNC) {
12662 aux = &env->insn_aux_data[i];
12663 aux->ptr_type = PTR_TO_FUNC;
12664 goto next_insn;
12665 }
12666
d8eca5bb
DB
12667 /* In final convert_pseudo_ld_imm64() step, this is
12668 * converted into regular 64-bit imm load insn.
12669 */
387544bf
AS
12670 switch (insn[0].src_reg) {
12671 case BPF_PSEUDO_MAP_VALUE:
12672 case BPF_PSEUDO_MAP_IDX_VALUE:
12673 break;
12674 case BPF_PSEUDO_MAP_FD:
12675 case BPF_PSEUDO_MAP_IDX:
12676 if (insn[1].imm == 0)
12677 break;
12678 fallthrough;
12679 default:
12680 verbose(env, "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
12681 return -EINVAL;
12682 }
12683
387544bf
AS
12684 switch (insn[0].src_reg) {
12685 case BPF_PSEUDO_MAP_IDX_VALUE:
12686 case BPF_PSEUDO_MAP_IDX:
12687 if (bpfptr_is_null(env->fd_array)) {
12688 verbose(env, "fd_idx without fd_array is invalid\n");
12689 return -EPROTO;
12690 }
12691 if (copy_from_bpfptr_offset(&fd, env->fd_array,
12692 insn[0].imm * sizeof(fd),
12693 sizeof(fd)))
12694 return -EFAULT;
12695 break;
12696 default:
12697 fd = insn[0].imm;
12698 break;
12699 }
12700
12701 f = fdget(fd);
c2101297 12702 map = __bpf_map_get(f);
0246e64d 12703 if (IS_ERR(map)) {
61bd5218 12704 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 12705 insn[0].imm);
0246e64d
AS
12706 return PTR_ERR(map);
12707 }
12708
61bd5218 12709 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
12710 if (err) {
12711 fdput(f);
12712 return err;
12713 }
12714
d8eca5bb 12715 aux = &env->insn_aux_data[i];
387544bf
AS
12716 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
12717 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
d8eca5bb
DB
12718 addr = (unsigned long)map;
12719 } else {
12720 u32 off = insn[1].imm;
12721
12722 if (off >= BPF_MAX_VAR_OFF) {
12723 verbose(env, "direct value offset of %u is not allowed\n", off);
12724 fdput(f);
12725 return -EINVAL;
12726 }
12727
12728 if (!map->ops->map_direct_value_addr) {
12729 verbose(env, "no direct value access support for this map type\n");
12730 fdput(f);
12731 return -EINVAL;
12732 }
12733
12734 err = map->ops->map_direct_value_addr(map, &addr, off);
12735 if (err) {
12736 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
12737 map->value_size, off);
12738 fdput(f);
12739 return err;
12740 }
12741
12742 aux->map_off = off;
12743 addr += off;
12744 }
12745
12746 insn[0].imm = (u32)addr;
12747 insn[1].imm = addr >> 32;
0246e64d
AS
12748
12749 /* check whether we recorded this map already */
d8eca5bb 12750 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 12751 if (env->used_maps[j] == map) {
d8eca5bb 12752 aux->map_index = j;
0246e64d
AS
12753 fdput(f);
12754 goto next_insn;
12755 }
d8eca5bb 12756 }
0246e64d
AS
12757
12758 if (env->used_map_cnt >= MAX_USED_MAPS) {
12759 fdput(f);
12760 return -E2BIG;
12761 }
12762
0246e64d
AS
12763 /* hold the map. If the program is rejected by verifier,
12764 * the map will be released by release_maps() or it
12765 * will be used by the valid program until it's unloaded
ab7f5bf0 12766 * and all maps are released in free_used_maps()
0246e64d 12767 */
1e0bd5a0 12768 bpf_map_inc(map);
d8eca5bb
DB
12769
12770 aux->map_index = env->used_map_cnt;
92117d84
AS
12771 env->used_maps[env->used_map_cnt++] = map;
12772
b741f163 12773 if (bpf_map_is_cgroup_storage(map) &&
e4730423 12774 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 12775 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
12776 fdput(f);
12777 return -EBUSY;
12778 }
12779
0246e64d
AS
12780 fdput(f);
12781next_insn:
12782 insn++;
12783 i++;
5e581dad
DB
12784 continue;
12785 }
12786
12787 /* Basic sanity check before we invest more work here. */
12788 if (!bpf_opcode_in_insntable(insn->code)) {
12789 verbose(env, "unknown opcode %02x\n", insn->code);
12790 return -EINVAL;
0246e64d
AS
12791 }
12792 }
12793
12794 /* now all pseudo BPF_LD_IMM64 instructions load valid
12795 * 'struct bpf_map *' into a register instead of user map_fd.
12796 * These pointers will be used later by verifier to validate map access.
12797 */
12798 return 0;
12799}
12800
12801/* drop refcnt of maps used by the rejected program */
58e2af8b 12802static void release_maps(struct bpf_verifier_env *env)
0246e64d 12803{
a2ea0746
DB
12804 __bpf_free_used_maps(env->prog->aux, env->used_maps,
12805 env->used_map_cnt);
0246e64d
AS
12806}
12807
541c3bad
AN
12808/* drop refcnt of maps used by the rejected program */
12809static void release_btfs(struct bpf_verifier_env *env)
12810{
12811 __bpf_free_used_btfs(env->prog->aux, env->used_btfs,
12812 env->used_btf_cnt);
12813}
12814
0246e64d 12815/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 12816static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
12817{
12818 struct bpf_insn *insn = env->prog->insnsi;
12819 int insn_cnt = env->prog->len;
12820 int i;
12821
69c087ba
YS
12822 for (i = 0; i < insn_cnt; i++, insn++) {
12823 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW))
12824 continue;
12825 if (insn->src_reg == BPF_PSEUDO_FUNC)
12826 continue;
12827 insn->src_reg = 0;
12828 }
0246e64d
AS
12829}
12830
8041902d
AS
12831/* single env->prog->insni[off] instruction was replaced with the range
12832 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
12833 * [0, off) and [off, end) to new locations, so the patched range stays zero
12834 */
75f0fc7b
HF
12835static void adjust_insn_aux_data(struct bpf_verifier_env *env,
12836 struct bpf_insn_aux_data *new_data,
12837 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d 12838{
75f0fc7b 12839 struct bpf_insn_aux_data *old_data = env->insn_aux_data;
b325fbca 12840 struct bpf_insn *insn = new_prog->insnsi;
d203b0fd 12841 u32 old_seen = old_data[off].seen;
b325fbca 12842 u32 prog_len;
c131187d 12843 int i;
8041902d 12844
b325fbca
JW
12845 /* aux info at OFF always needs adjustment, no matter fast path
12846 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
12847 * original insn at old prog.
12848 */
12849 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
12850
8041902d 12851 if (cnt == 1)
75f0fc7b 12852 return;
b325fbca 12853 prog_len = new_prog->len;
75f0fc7b 12854
8041902d
AS
12855 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
12856 memcpy(new_data + off + cnt - 1, old_data + off,
12857 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 12858 for (i = off; i < off + cnt - 1; i++) {
d203b0fd
DB
12859 /* Expand insni[off]'s seen count to the patched range. */
12860 new_data[i].seen = old_seen;
b325fbca
JW
12861 new_data[i].zext_dst = insn_has_def32(env, insn + i);
12862 }
8041902d
AS
12863 env->insn_aux_data = new_data;
12864 vfree(old_data);
8041902d
AS
12865}
12866
cc8b0b92
AS
12867static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
12868{
12869 int i;
12870
12871 if (len == 1)
12872 return;
4cb3d99c
JW
12873 /* NOTE: fake 'exit' subprog should be updated as well. */
12874 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 12875 if (env->subprog_info[i].start <= off)
cc8b0b92 12876 continue;
9c8105bd 12877 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
12878 }
12879}
12880
7506d211 12881static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len)
a748c697
MF
12882{
12883 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
12884 int i, sz = prog->aux->size_poke_tab;
12885 struct bpf_jit_poke_descriptor *desc;
12886
12887 for (i = 0; i < sz; i++) {
12888 desc = &tab[i];
7506d211
JF
12889 if (desc->insn_idx <= off)
12890 continue;
a748c697
MF
12891 desc->insn_idx += len - 1;
12892 }
12893}
12894
8041902d
AS
12895static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
12896 const struct bpf_insn *patch, u32 len)
12897{
12898 struct bpf_prog *new_prog;
75f0fc7b
HF
12899 struct bpf_insn_aux_data *new_data = NULL;
12900
12901 if (len > 1) {
12902 new_data = vzalloc(array_size(env->prog->len + len - 1,
12903 sizeof(struct bpf_insn_aux_data)));
12904 if (!new_data)
12905 return NULL;
12906 }
8041902d
AS
12907
12908 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
12909 if (IS_ERR(new_prog)) {
12910 if (PTR_ERR(new_prog) == -ERANGE)
12911 verbose(env,
12912 "insn %d cannot be patched due to 16-bit range\n",
12913 env->insn_aux_data[off].orig_idx);
75f0fc7b 12914 vfree(new_data);
8041902d 12915 return NULL;
4f73379e 12916 }
75f0fc7b 12917 adjust_insn_aux_data(env, new_data, new_prog, off, len);
cc8b0b92 12918 adjust_subprog_starts(env, off, len);
7506d211 12919 adjust_poke_descs(new_prog, off, len);
8041902d
AS
12920 return new_prog;
12921}
12922
52875a04
JK
12923static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
12924 u32 off, u32 cnt)
12925{
12926 int i, j;
12927
12928 /* find first prog starting at or after off (first to remove) */
12929 for (i = 0; i < env->subprog_cnt; i++)
12930 if (env->subprog_info[i].start >= off)
12931 break;
12932 /* find first prog starting at or after off + cnt (first to stay) */
12933 for (j = i; j < env->subprog_cnt; j++)
12934 if (env->subprog_info[j].start >= off + cnt)
12935 break;
12936 /* if j doesn't start exactly at off + cnt, we are just removing
12937 * the front of previous prog
12938 */
12939 if (env->subprog_info[j].start != off + cnt)
12940 j--;
12941
12942 if (j > i) {
12943 struct bpf_prog_aux *aux = env->prog->aux;
12944 int move;
12945
12946 /* move fake 'exit' subprog as well */
12947 move = env->subprog_cnt + 1 - j;
12948
12949 memmove(env->subprog_info + i,
12950 env->subprog_info + j,
12951 sizeof(*env->subprog_info) * move);
12952 env->subprog_cnt -= j - i;
12953
12954 /* remove func_info */
12955 if (aux->func_info) {
12956 move = aux->func_info_cnt - j;
12957
12958 memmove(aux->func_info + i,
12959 aux->func_info + j,
12960 sizeof(*aux->func_info) * move);
12961 aux->func_info_cnt -= j - i;
12962 /* func_info->insn_off is set after all code rewrites,
12963 * in adjust_btf_func() - no need to adjust
12964 */
12965 }
12966 } else {
12967 /* convert i from "first prog to remove" to "first to adjust" */
12968 if (env->subprog_info[i].start == off)
12969 i++;
12970 }
12971
12972 /* update fake 'exit' subprog as well */
12973 for (; i <= env->subprog_cnt; i++)
12974 env->subprog_info[i].start -= cnt;
12975
12976 return 0;
12977}
12978
12979static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
12980 u32 cnt)
12981{
12982 struct bpf_prog *prog = env->prog;
12983 u32 i, l_off, l_cnt, nr_linfo;
12984 struct bpf_line_info *linfo;
12985
12986 nr_linfo = prog->aux->nr_linfo;
12987 if (!nr_linfo)
12988 return 0;
12989
12990 linfo = prog->aux->linfo;
12991
12992 /* find first line info to remove, count lines to be removed */
12993 for (i = 0; i < nr_linfo; i++)
12994 if (linfo[i].insn_off >= off)
12995 break;
12996
12997 l_off = i;
12998 l_cnt = 0;
12999 for (; i < nr_linfo; i++)
13000 if (linfo[i].insn_off < off + cnt)
13001 l_cnt++;
13002 else
13003 break;
13004
13005 /* First live insn doesn't match first live linfo, it needs to "inherit"
13006 * last removed linfo. prog is already modified, so prog->len == off
13007 * means no live instructions after (tail of the program was removed).
13008 */
13009 if (prog->len != off && l_cnt &&
13010 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
13011 l_cnt--;
13012 linfo[--i].insn_off = off + cnt;
13013 }
13014
13015 /* remove the line info which refer to the removed instructions */
13016 if (l_cnt) {
13017 memmove(linfo + l_off, linfo + i,
13018 sizeof(*linfo) * (nr_linfo - i));
13019
13020 prog->aux->nr_linfo -= l_cnt;
13021 nr_linfo = prog->aux->nr_linfo;
13022 }
13023
13024 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
13025 for (i = l_off; i < nr_linfo; i++)
13026 linfo[i].insn_off -= cnt;
13027
13028 /* fix up all subprogs (incl. 'exit') which start >= off */
13029 for (i = 0; i <= env->subprog_cnt; i++)
13030 if (env->subprog_info[i].linfo_idx > l_off) {
13031 /* program may have started in the removed region but
13032 * may not be fully removed
13033 */
13034 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
13035 env->subprog_info[i].linfo_idx -= l_cnt;
13036 else
13037 env->subprog_info[i].linfo_idx = l_off;
13038 }
13039
13040 return 0;
13041}
13042
13043static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
13044{
13045 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13046 unsigned int orig_prog_len = env->prog->len;
13047 int err;
13048
08ca90af
JK
13049 if (bpf_prog_is_dev_bound(env->prog->aux))
13050 bpf_prog_offload_remove_insns(env, off, cnt);
13051
52875a04
JK
13052 err = bpf_remove_insns(env->prog, off, cnt);
13053 if (err)
13054 return err;
13055
13056 err = adjust_subprog_starts_after_remove(env, off, cnt);
13057 if (err)
13058 return err;
13059
13060 err = bpf_adj_linfo_after_remove(env, off, cnt);
13061 if (err)
13062 return err;
13063
13064 memmove(aux_data + off, aux_data + off + cnt,
13065 sizeof(*aux_data) * (orig_prog_len - off - cnt));
13066
13067 return 0;
13068}
13069
2a5418a1
DB
13070/* The verifier does more data flow analysis than llvm and will not
13071 * explore branches that are dead at run time. Malicious programs can
13072 * have dead code too. Therefore replace all dead at-run-time code
13073 * with 'ja -1'.
13074 *
13075 * Just nops are not optimal, e.g. if they would sit at the end of the
13076 * program and through another bug we would manage to jump there, then
13077 * we'd execute beyond program memory otherwise. Returning exception
13078 * code also wouldn't work since we can have subprogs where the dead
13079 * code could be located.
c131187d
AS
13080 */
13081static void sanitize_dead_code(struct bpf_verifier_env *env)
13082{
13083 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 13084 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
13085 struct bpf_insn *insn = env->prog->insnsi;
13086 const int insn_cnt = env->prog->len;
13087 int i;
13088
13089 for (i = 0; i < insn_cnt; i++) {
13090 if (aux_data[i].seen)
13091 continue;
2a5418a1 13092 memcpy(insn + i, &trap, sizeof(trap));
45c709f8 13093 aux_data[i].zext_dst = false;
c131187d
AS
13094 }
13095}
13096
e2ae4ca2
JK
13097static bool insn_is_cond_jump(u8 code)
13098{
13099 u8 op;
13100
092ed096
JW
13101 if (BPF_CLASS(code) == BPF_JMP32)
13102 return true;
13103
e2ae4ca2
JK
13104 if (BPF_CLASS(code) != BPF_JMP)
13105 return false;
13106
13107 op = BPF_OP(code);
13108 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
13109}
13110
13111static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
13112{
13113 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13114 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13115 struct bpf_insn *insn = env->prog->insnsi;
13116 const int insn_cnt = env->prog->len;
13117 int i;
13118
13119 for (i = 0; i < insn_cnt; i++, insn++) {
13120 if (!insn_is_cond_jump(insn->code))
13121 continue;
13122
13123 if (!aux_data[i + 1].seen)
13124 ja.off = insn->off;
13125 else if (!aux_data[i + 1 + insn->off].seen)
13126 ja.off = 0;
13127 else
13128 continue;
13129
08ca90af
JK
13130 if (bpf_prog_is_dev_bound(env->prog->aux))
13131 bpf_prog_offload_replace_insn(env, i, &ja);
13132
e2ae4ca2
JK
13133 memcpy(insn, &ja, sizeof(ja));
13134 }
13135}
13136
52875a04
JK
13137static int opt_remove_dead_code(struct bpf_verifier_env *env)
13138{
13139 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
13140 int insn_cnt = env->prog->len;
13141 int i, err;
13142
13143 for (i = 0; i < insn_cnt; i++) {
13144 int j;
13145
13146 j = 0;
13147 while (i + j < insn_cnt && !aux_data[i + j].seen)
13148 j++;
13149 if (!j)
13150 continue;
13151
13152 err = verifier_remove_insns(env, i, j);
13153 if (err)
13154 return err;
13155 insn_cnt = env->prog->len;
13156 }
13157
13158 return 0;
13159}
13160
a1b14abc
JK
13161static int opt_remove_nops(struct bpf_verifier_env *env)
13162{
13163 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
13164 struct bpf_insn *insn = env->prog->insnsi;
13165 int insn_cnt = env->prog->len;
13166 int i, err;
13167
13168 for (i = 0; i < insn_cnt; i++) {
13169 if (memcmp(&insn[i], &ja, sizeof(ja)))
13170 continue;
13171
13172 err = verifier_remove_insns(env, i, 1);
13173 if (err)
13174 return err;
13175 insn_cnt--;
13176 i--;
13177 }
13178
13179 return 0;
13180}
13181
d6c2308c
JW
13182static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
13183 const union bpf_attr *attr)
a4b1d3c1 13184{
d6c2308c 13185 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 13186 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 13187 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 13188 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 13189 struct bpf_prog *new_prog;
d6c2308c 13190 bool rnd_hi32;
a4b1d3c1 13191
d6c2308c 13192 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 13193 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
13194 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
13195 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
13196 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
13197 for (i = 0; i < len; i++) {
13198 int adj_idx = i + delta;
13199 struct bpf_insn insn;
83a28819 13200 int load_reg;
a4b1d3c1 13201
d6c2308c 13202 insn = insns[adj_idx];
83a28819 13203 load_reg = insn_def_regno(&insn);
d6c2308c
JW
13204 if (!aux[adj_idx].zext_dst) {
13205 u8 code, class;
13206 u32 imm_rnd;
13207
13208 if (!rnd_hi32)
13209 continue;
13210
13211 code = insn.code;
13212 class = BPF_CLASS(code);
83a28819 13213 if (load_reg == -1)
d6c2308c
JW
13214 continue;
13215
13216 /* NOTE: arg "reg" (the fourth one) is only used for
83a28819
IL
13217 * BPF_STX + SRC_OP, so it is safe to pass NULL
13218 * here.
d6c2308c 13219 */
83a28819 13220 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) {
d6c2308c
JW
13221 if (class == BPF_LD &&
13222 BPF_MODE(code) == BPF_IMM)
13223 i++;
13224 continue;
13225 }
13226
13227 /* ctx load could be transformed into wider load. */
13228 if (class == BPF_LDX &&
13229 aux[adj_idx].ptr_type == PTR_TO_CTX)
13230 continue;
13231
13232 imm_rnd = get_random_int();
13233 rnd_hi32_patch[0] = insn;
13234 rnd_hi32_patch[1].imm = imm_rnd;
83a28819 13235 rnd_hi32_patch[3].dst_reg = load_reg;
d6c2308c
JW
13236 patch = rnd_hi32_patch;
13237 patch_len = 4;
13238 goto apply_patch_buffer;
13239 }
13240
39491867
BJ
13241 /* Add in an zero-extend instruction if a) the JIT has requested
13242 * it or b) it's a CMPXCHG.
13243 *
13244 * The latter is because: BPF_CMPXCHG always loads a value into
13245 * R0, therefore always zero-extends. However some archs'
13246 * equivalent instruction only does this load when the
13247 * comparison is successful. This detail of CMPXCHG is
13248 * orthogonal to the general zero-extension behaviour of the
13249 * CPU, so it's treated independently of bpf_jit_needs_zext.
13250 */
13251 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn))
a4b1d3c1
JW
13252 continue;
13253
83a28819
IL
13254 if (WARN_ON(load_reg == -1)) {
13255 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n");
13256 return -EFAULT;
b2e37a71
IL
13257 }
13258
a4b1d3c1 13259 zext_patch[0] = insn;
b2e37a71
IL
13260 zext_patch[1].dst_reg = load_reg;
13261 zext_patch[1].src_reg = load_reg;
d6c2308c
JW
13262 patch = zext_patch;
13263 patch_len = 2;
13264apply_patch_buffer:
13265 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
13266 if (!new_prog)
13267 return -ENOMEM;
13268 env->prog = new_prog;
13269 insns = new_prog->insnsi;
13270 aux = env->insn_aux_data;
d6c2308c 13271 delta += patch_len - 1;
a4b1d3c1
JW
13272 }
13273
13274 return 0;
13275}
13276
c64b7983
JS
13277/* convert load instructions that access fields of a context type into a
13278 * sequence of instructions that access fields of the underlying structure:
13279 * struct __sk_buff -> struct sk_buff
13280 * struct bpf_sock_ops -> struct sock
9bac3d6d 13281 */
58e2af8b 13282static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 13283{
00176a34 13284 const struct bpf_verifier_ops *ops = env->ops;
f96da094 13285 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 13286 const int insn_cnt = env->prog->len;
36bbef52 13287 struct bpf_insn insn_buf[16], *insn;
46f53a65 13288 u32 target_size, size_default, off;
9bac3d6d 13289 struct bpf_prog *new_prog;
d691f9e8 13290 enum bpf_access_type type;
f96da094 13291 bool is_narrower_load;
9bac3d6d 13292
b09928b9
DB
13293 if (ops->gen_prologue || env->seen_direct_write) {
13294 if (!ops->gen_prologue) {
13295 verbose(env, "bpf verifier is misconfigured\n");
13296 return -EINVAL;
13297 }
36bbef52
DB
13298 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
13299 env->prog);
13300 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 13301 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
13302 return -EINVAL;
13303 } else if (cnt) {
8041902d 13304 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
13305 if (!new_prog)
13306 return -ENOMEM;
8041902d 13307
36bbef52 13308 env->prog = new_prog;
3df126f3 13309 delta += cnt - 1;
36bbef52
DB
13310 }
13311 }
13312
c64b7983 13313 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
13314 return 0;
13315
3df126f3 13316 insn = env->prog->insnsi + delta;
36bbef52 13317
9bac3d6d 13318 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983 13319 bpf_convert_ctx_access_t convert_ctx_access;
2039f26f 13320 bool ctx_access;
c64b7983 13321
62c7989b
DB
13322 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
13323 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
13324 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
2039f26f 13325 insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) {
d691f9e8 13326 type = BPF_READ;
2039f26f
DB
13327 ctx_access = true;
13328 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
13329 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
13330 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
13331 insn->code == (BPF_STX | BPF_MEM | BPF_DW) ||
13332 insn->code == (BPF_ST | BPF_MEM | BPF_B) ||
13333 insn->code == (BPF_ST | BPF_MEM | BPF_H) ||
13334 insn->code == (BPF_ST | BPF_MEM | BPF_W) ||
13335 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) {
d691f9e8 13336 type = BPF_WRITE;
2039f26f
DB
13337 ctx_access = BPF_CLASS(insn->code) == BPF_STX;
13338 } else {
9bac3d6d 13339 continue;
2039f26f 13340 }
9bac3d6d 13341
af86ca4e 13342 if (type == BPF_WRITE &&
2039f26f 13343 env->insn_aux_data[i + delta].sanitize_stack_spill) {
af86ca4e 13344 struct bpf_insn patch[] = {
af86ca4e 13345 *insn,
2039f26f 13346 BPF_ST_NOSPEC(),
af86ca4e
AS
13347 };
13348
13349 cnt = ARRAY_SIZE(patch);
13350 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
13351 if (!new_prog)
13352 return -ENOMEM;
13353
13354 delta += cnt - 1;
13355 env->prog = new_prog;
13356 insn = new_prog->insnsi + i + delta;
13357 continue;
13358 }
13359
2039f26f
DB
13360 if (!ctx_access)
13361 continue;
13362
6efe152d 13363 switch ((int)env->insn_aux_data[i + delta].ptr_type) {
c64b7983
JS
13364 case PTR_TO_CTX:
13365 if (!ops->convert_ctx_access)
13366 continue;
13367 convert_ctx_access = ops->convert_ctx_access;
13368 break;
13369 case PTR_TO_SOCKET:
46f8bc92 13370 case PTR_TO_SOCK_COMMON:
c64b7983
JS
13371 convert_ctx_access = bpf_sock_convert_ctx_access;
13372 break;
655a51e5
MKL
13373 case PTR_TO_TCP_SOCK:
13374 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
13375 break;
fada7fdc
JL
13376 case PTR_TO_XDP_SOCK:
13377 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
13378 break;
2a02759e 13379 case PTR_TO_BTF_ID:
6efe152d 13380 case PTR_TO_BTF_ID | PTR_UNTRUSTED:
27ae7997
MKL
13381 if (type == BPF_READ) {
13382 insn->code = BPF_LDX | BPF_PROBE_MEM |
13383 BPF_SIZE((insn)->code);
13384 env->prog->aux->num_exentries++;
7e40781c 13385 } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
2a02759e
AS
13386 verbose(env, "Writes through BTF pointers are not allowed\n");
13387 return -EINVAL;
13388 }
2a02759e 13389 continue;
c64b7983 13390 default:
9bac3d6d 13391 continue;
c64b7983 13392 }
9bac3d6d 13393
31fd8581 13394 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 13395 size = BPF_LDST_BYTES(insn);
31fd8581
YS
13396
13397 /* If the read access is a narrower load of the field,
13398 * convert to a 4/8-byte load, to minimum program type specific
13399 * convert_ctx_access changes. If conversion is successful,
13400 * we will apply proper mask to the result.
13401 */
f96da094 13402 is_narrower_load = size < ctx_field_size;
46f53a65
AI
13403 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
13404 off = insn->off;
31fd8581 13405 if (is_narrower_load) {
f96da094
DB
13406 u8 size_code;
13407
13408 if (type == BPF_WRITE) {
61bd5218 13409 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
13410 return -EINVAL;
13411 }
31fd8581 13412
f96da094 13413 size_code = BPF_H;
31fd8581
YS
13414 if (ctx_field_size == 4)
13415 size_code = BPF_W;
13416 else if (ctx_field_size == 8)
13417 size_code = BPF_DW;
f96da094 13418
bc23105c 13419 insn->off = off & ~(size_default - 1);
31fd8581
YS
13420 insn->code = BPF_LDX | BPF_MEM | size_code;
13421 }
f96da094
DB
13422
13423 target_size = 0;
c64b7983
JS
13424 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
13425 &target_size);
f96da094
DB
13426 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
13427 (ctx_field_size && !target_size)) {
61bd5218 13428 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
13429 return -EINVAL;
13430 }
f96da094
DB
13431
13432 if (is_narrower_load && size < target_size) {
d895a0f1
IL
13433 u8 shift = bpf_ctx_narrow_access_offset(
13434 off, size, size_default) * 8;
d7af7e49
AI
13435 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
13436 verbose(env, "bpf verifier narrow ctx load misconfigured\n");
13437 return -EINVAL;
13438 }
46f53a65
AI
13439 if (ctx_field_size <= 4) {
13440 if (shift)
13441 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
13442 insn->dst_reg,
13443 shift);
31fd8581 13444 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 13445 (1 << size * 8) - 1);
46f53a65
AI
13446 } else {
13447 if (shift)
13448 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
13449 insn->dst_reg,
13450 shift);
31fd8581 13451 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 13452 (1ULL << size * 8) - 1);
46f53a65 13453 }
31fd8581 13454 }
9bac3d6d 13455
8041902d 13456 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
13457 if (!new_prog)
13458 return -ENOMEM;
13459
3df126f3 13460 delta += cnt - 1;
9bac3d6d
AS
13461
13462 /* keep walking new program and skip insns we just inserted */
13463 env->prog = new_prog;
3df126f3 13464 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
13465 }
13466
13467 return 0;
13468}
13469
1c2a088a
AS
13470static int jit_subprogs(struct bpf_verifier_env *env)
13471{
13472 struct bpf_prog *prog = env->prog, **func, *tmp;
13473 int i, j, subprog_start, subprog_end = 0, len, subprog;
a748c697 13474 struct bpf_map *map_ptr;
7105e828 13475 struct bpf_insn *insn;
1c2a088a 13476 void *old_bpf_func;
c4c0bdc0 13477 int err, num_exentries;
1c2a088a 13478
f910cefa 13479 if (env->subprog_cnt <= 1)
1c2a088a
AS
13480 return 0;
13481
7105e828 13482 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
3990ed4c 13483 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn))
69c087ba 13484 continue;
69c087ba 13485
c7a89784
DB
13486 /* Upon error here we cannot fall back to interpreter but
13487 * need a hard reject of the program. Thus -EFAULT is
13488 * propagated in any case.
13489 */
1c2a088a
AS
13490 subprog = find_subprog(env, i + insn->imm + 1);
13491 if (subprog < 0) {
13492 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
13493 i + insn->imm + 1);
13494 return -EFAULT;
13495 }
13496 /* temporarily remember subprog id inside insn instead of
13497 * aux_data, since next loop will split up all insns into funcs
13498 */
f910cefa 13499 insn->off = subprog;
1c2a088a
AS
13500 /* remember original imm in case JIT fails and fallback
13501 * to interpreter will be needed
13502 */
13503 env->insn_aux_data[i].call_imm = insn->imm;
13504 /* point imm to __bpf_call_base+1 from JITs point of view */
13505 insn->imm = 1;
3990ed4c
MKL
13506 if (bpf_pseudo_func(insn))
13507 /* jit (e.g. x86_64) may emit fewer instructions
13508 * if it learns a u32 imm is the same as a u64 imm.
13509 * Force a non zero here.
13510 */
13511 insn[1].imm = 1;
1c2a088a
AS
13512 }
13513
c454a46b
MKL
13514 err = bpf_prog_alloc_jited_linfo(prog);
13515 if (err)
13516 goto out_undo_insn;
13517
13518 err = -ENOMEM;
6396bb22 13519 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 13520 if (!func)
c7a89784 13521 goto out_undo_insn;
1c2a088a 13522
f910cefa 13523 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 13524 subprog_start = subprog_end;
4cb3d99c 13525 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
13526
13527 len = subprog_end - subprog_start;
fb7dd8bc 13528 /* bpf_prog_run() doesn't call subprogs directly,
492ecee8
AS
13529 * hence main prog stats include the runtime of subprogs.
13530 * subprogs don't have IDs and not reachable via prog_get_next_id
700d4796 13531 * func[i]->stats will never be accessed and stays NULL
492ecee8
AS
13532 */
13533 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
13534 if (!func[i])
13535 goto out_free;
13536 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
13537 len * sizeof(struct bpf_insn));
4f74d809 13538 func[i]->type = prog->type;
1c2a088a 13539 func[i]->len = len;
4f74d809
DB
13540 if (bpf_prog_calc_tag(func[i]))
13541 goto out_free;
1c2a088a 13542 func[i]->is_func = 1;
ba64e7d8 13543 func[i]->aux->func_idx = i;
f263a814 13544 /* Below members will be freed only at prog->aux */
ba64e7d8
YS
13545 func[i]->aux->btf = prog->aux->btf;
13546 func[i]->aux->func_info = prog->aux->func_info;
f263a814
JF
13547 func[i]->aux->poke_tab = prog->aux->poke_tab;
13548 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
ba64e7d8 13549
a748c697 13550 for (j = 0; j < prog->aux->size_poke_tab; j++) {
f263a814 13551 struct bpf_jit_poke_descriptor *poke;
a748c697 13552
f263a814
JF
13553 poke = &prog->aux->poke_tab[j];
13554 if (poke->insn_idx < subprog_end &&
13555 poke->insn_idx >= subprog_start)
13556 poke->aux = func[i]->aux;
a748c697
MF
13557 }
13558
1c2a088a
AS
13559 /* Use bpf_prog_F_tag to indicate functions in stack traces.
13560 * Long term would need debug info to populate names
13561 */
13562 func[i]->aux->name[0] = 'F';
9c8105bd 13563 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 13564 func[i]->jit_requested = 1;
d2a3b7c5 13565 func[i]->blinding_requested = prog->blinding_requested;
e6ac2450 13566 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
2357672c 13567 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
c454a46b
MKL
13568 func[i]->aux->linfo = prog->aux->linfo;
13569 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
13570 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
13571 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
c4c0bdc0
YS
13572 num_exentries = 0;
13573 insn = func[i]->insnsi;
13574 for (j = 0; j < func[i]->len; j++, insn++) {
13575 if (BPF_CLASS(insn->code) == BPF_LDX &&
13576 BPF_MODE(insn->code) == BPF_PROBE_MEM)
13577 num_exentries++;
13578 }
13579 func[i]->aux->num_exentries = num_exentries;
ebf7d1f5 13580 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
1c2a088a
AS
13581 func[i] = bpf_int_jit_compile(func[i]);
13582 if (!func[i]->jited) {
13583 err = -ENOTSUPP;
13584 goto out_free;
13585 }
13586 cond_resched();
13587 }
a748c697 13588
1c2a088a
AS
13589 /* at this point all bpf functions were successfully JITed
13590 * now populate all bpf_calls with correct addresses and
13591 * run last pass of JIT
13592 */
f910cefa 13593 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
13594 insn = func[i]->insnsi;
13595 for (j = 0; j < func[i]->len; j++, insn++) {
69c087ba 13596 if (bpf_pseudo_func(insn)) {
3990ed4c 13597 subprog = insn->off;
69c087ba
YS
13598 insn[0].imm = (u32)(long)func[subprog]->bpf_func;
13599 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32;
13600 continue;
13601 }
23a2d70c 13602 if (!bpf_pseudo_call(insn))
1c2a088a
AS
13603 continue;
13604 subprog = insn->off;
3d717fad 13605 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func);
1c2a088a 13606 }
2162fed4
SD
13607
13608 /* we use the aux data to keep a list of the start addresses
13609 * of the JITed images for each function in the program
13610 *
13611 * for some architectures, such as powerpc64, the imm field
13612 * might not be large enough to hold the offset of the start
13613 * address of the callee's JITed image from __bpf_call_base
13614 *
13615 * in such cases, we can lookup the start address of a callee
13616 * by using its subprog id, available from the off field of
13617 * the call instruction, as an index for this list
13618 */
13619 func[i]->aux->func = func;
13620 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 13621 }
f910cefa 13622 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
13623 old_bpf_func = func[i]->bpf_func;
13624 tmp = bpf_int_jit_compile(func[i]);
13625 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
13626 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 13627 err = -ENOTSUPP;
1c2a088a
AS
13628 goto out_free;
13629 }
13630 cond_resched();
13631 }
13632
13633 /* finally lock prog and jit images for all functions and
13634 * populate kallsysm
13635 */
f910cefa 13636 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
13637 bpf_prog_lock_ro(func[i]);
13638 bpf_prog_kallsyms_add(func[i]);
13639 }
7105e828
DB
13640
13641 /* Last step: make now unused interpreter insns from main
13642 * prog consistent for later dump requests, so they can
13643 * later look the same as if they were interpreted only.
13644 */
13645 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
69c087ba
YS
13646 if (bpf_pseudo_func(insn)) {
13647 insn[0].imm = env->insn_aux_data[i].call_imm;
3990ed4c
MKL
13648 insn[1].imm = insn->off;
13649 insn->off = 0;
69c087ba
YS
13650 continue;
13651 }
23a2d70c 13652 if (!bpf_pseudo_call(insn))
7105e828
DB
13653 continue;
13654 insn->off = env->insn_aux_data[i].call_imm;
13655 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 13656 insn->imm = subprog;
7105e828
DB
13657 }
13658
1c2a088a
AS
13659 prog->jited = 1;
13660 prog->bpf_func = func[0]->bpf_func;
d00c6473 13661 prog->jited_len = func[0]->jited_len;
1c2a088a 13662 prog->aux->func = func;
f910cefa 13663 prog->aux->func_cnt = env->subprog_cnt;
e16301fb 13664 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
13665 return 0;
13666out_free:
f263a814
JF
13667 /* We failed JIT'ing, so at this point we need to unregister poke
13668 * descriptors from subprogs, so that kernel is not attempting to
13669 * patch it anymore as we're freeing the subprog JIT memory.
13670 */
13671 for (i = 0; i < prog->aux->size_poke_tab; i++) {
13672 map_ptr = prog->aux->poke_tab[i].tail_call.map;
13673 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
13674 }
13675 /* At this point we're guaranteed that poke descriptors are not
13676 * live anymore. We can just unlink its descriptor table as it's
13677 * released with the main prog.
13678 */
a748c697
MF
13679 for (i = 0; i < env->subprog_cnt; i++) {
13680 if (!func[i])
13681 continue;
f263a814 13682 func[i]->aux->poke_tab = NULL;
a748c697
MF
13683 bpf_jit_free(func[i]);
13684 }
1c2a088a 13685 kfree(func);
c7a89784 13686out_undo_insn:
1c2a088a
AS
13687 /* cleanup main prog to be interpreted */
13688 prog->jit_requested = 0;
d2a3b7c5 13689 prog->blinding_requested = 0;
1c2a088a 13690 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
23a2d70c 13691 if (!bpf_pseudo_call(insn))
1c2a088a
AS
13692 continue;
13693 insn->off = 0;
13694 insn->imm = env->insn_aux_data[i].call_imm;
13695 }
e16301fb 13696 bpf_prog_jit_attempt_done(prog);
1c2a088a
AS
13697 return err;
13698}
13699
1ea47e01
AS
13700static int fixup_call_args(struct bpf_verifier_env *env)
13701{
19d28fbd 13702#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
13703 struct bpf_prog *prog = env->prog;
13704 struct bpf_insn *insn = prog->insnsi;
e6ac2450 13705 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog);
1ea47e01 13706 int i, depth;
19d28fbd 13707#endif
e4052d06 13708 int err = 0;
1ea47e01 13709
e4052d06
QM
13710 if (env->prog->jit_requested &&
13711 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
13712 err = jit_subprogs(env);
13713 if (err == 0)
1c2a088a 13714 return 0;
c7a89784
DB
13715 if (err == -EFAULT)
13716 return err;
19d28fbd
DM
13717 }
13718#ifndef CONFIG_BPF_JIT_ALWAYS_ON
e6ac2450
MKL
13719 if (has_kfunc_call) {
13720 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n");
13721 return -EINVAL;
13722 }
e411901c
MF
13723 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
13724 /* When JIT fails the progs with bpf2bpf calls and tail_calls
13725 * have to be rejected, since interpreter doesn't support them yet.
13726 */
13727 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
13728 return -EINVAL;
13729 }
1ea47e01 13730 for (i = 0; i < prog->len; i++, insn++) {
69c087ba
YS
13731 if (bpf_pseudo_func(insn)) {
13732 /* When JIT fails the progs with callback calls
13733 * have to be rejected, since interpreter doesn't support them yet.
13734 */
13735 verbose(env, "callbacks are not allowed in non-JITed programs\n");
13736 return -EINVAL;
13737 }
13738
23a2d70c 13739 if (!bpf_pseudo_call(insn))
1ea47e01
AS
13740 continue;
13741 depth = get_callee_stack_depth(env, insn, i);
13742 if (depth < 0)
13743 return depth;
13744 bpf_patch_call_args(insn, depth);
13745 }
19d28fbd
DM
13746 err = 0;
13747#endif
13748 return err;
1ea47e01
AS
13749}
13750
e6ac2450
MKL
13751static int fixup_kfunc_call(struct bpf_verifier_env *env,
13752 struct bpf_insn *insn)
13753{
13754 const struct bpf_kfunc_desc *desc;
13755
a5d82727
KKD
13756 if (!insn->imm) {
13757 verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
13758 return -EINVAL;
13759 }
13760
e6ac2450
MKL
13761 /* insn->imm has the btf func_id. Replace it with
13762 * an address (relative to __bpf_base_call).
13763 */
2357672c 13764 desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
e6ac2450
MKL
13765 if (!desc) {
13766 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
13767 insn->imm);
13768 return -EFAULT;
13769 }
13770
13771 insn->imm = desc->imm;
13772
13773 return 0;
13774}
13775
e6ac5933
BJ
13776/* Do various post-verification rewrites in a single program pass.
13777 * These rewrites simplify JIT and interpreter implementations.
e245c5c6 13778 */
e6ac5933 13779static int do_misc_fixups(struct bpf_verifier_env *env)
e245c5c6 13780{
79741b3b 13781 struct bpf_prog *prog = env->prog;
f92c1e18 13782 enum bpf_attach_type eatype = prog->expected_attach_type;
9b99edca 13783 enum bpf_prog_type prog_type = resolve_prog_type(prog);
79741b3b 13784 struct bpf_insn *insn = prog->insnsi;
e245c5c6 13785 const struct bpf_func_proto *fn;
79741b3b 13786 const int insn_cnt = prog->len;
09772d92 13787 const struct bpf_map_ops *ops;
c93552c4 13788 struct bpf_insn_aux_data *aux;
81ed18ab
AS
13789 struct bpf_insn insn_buf[16];
13790 struct bpf_prog *new_prog;
13791 struct bpf_map *map_ptr;
d2e4c1e6 13792 int i, ret, cnt, delta = 0;
e245c5c6 13793
79741b3b 13794 for (i = 0; i < insn_cnt; i++, insn++) {
e6ac5933 13795 /* Make divide-by-zero exceptions impossible. */
f6b1b3bf
DB
13796 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
13797 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
13798 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 13799 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf 13800 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
e88b2c6e
DB
13801 bool isdiv = BPF_OP(insn->code) == BPF_DIV;
13802 struct bpf_insn *patchlet;
13803 struct bpf_insn chk_and_div[] = {
9b00f1b7 13804 /* [R,W]x div 0 -> 0 */
e88b2c6e
DB
13805 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13806 BPF_JNE | BPF_K, insn->src_reg,
13807 0, 2, 0),
f6b1b3bf
DB
13808 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
13809 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13810 *insn,
13811 };
e88b2c6e 13812 struct bpf_insn chk_and_mod[] = {
9b00f1b7 13813 /* [R,W]x mod 0 -> [R,W]x */
e88b2c6e
DB
13814 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
13815 BPF_JEQ | BPF_K, insn->src_reg,
9b00f1b7 13816 0, 1 + (is64 ? 0 : 1), 0),
f6b1b3bf 13817 *insn,
9b00f1b7
DB
13818 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
13819 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg),
f6b1b3bf 13820 };
f6b1b3bf 13821
e88b2c6e
DB
13822 patchlet = isdiv ? chk_and_div : chk_and_mod;
13823 cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
9b00f1b7 13824 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0);
f6b1b3bf
DB
13825
13826 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
13827 if (!new_prog)
13828 return -ENOMEM;
13829
13830 delta += cnt - 1;
13831 env->prog = prog = new_prog;
13832 insn = new_prog->insnsi + i + delta;
13833 continue;
13834 }
13835
e6ac5933 13836 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
e0cea7ce
DB
13837 if (BPF_CLASS(insn->code) == BPF_LD &&
13838 (BPF_MODE(insn->code) == BPF_ABS ||
13839 BPF_MODE(insn->code) == BPF_IND)) {
13840 cnt = env->ops->gen_ld_abs(insn, insn_buf);
13841 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
13842 verbose(env, "bpf verifier is misconfigured\n");
13843 return -EINVAL;
13844 }
13845
13846 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13847 if (!new_prog)
13848 return -ENOMEM;
13849
13850 delta += cnt - 1;
13851 env->prog = prog = new_prog;
13852 insn = new_prog->insnsi + i + delta;
13853 continue;
13854 }
13855
e6ac5933 13856 /* Rewrite pointer arithmetic to mitigate speculation attacks. */
979d63d5
DB
13857 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
13858 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
13859 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
13860 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
979d63d5 13861 struct bpf_insn *patch = &insn_buf[0];
801c6058 13862 bool issrc, isneg, isimm;
979d63d5
DB
13863 u32 off_reg;
13864
13865 aux = &env->insn_aux_data[i + delta];
3612af78
DB
13866 if (!aux->alu_state ||
13867 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
13868 continue;
13869
13870 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
13871 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
13872 BPF_ALU_SANITIZE_SRC;
801c6058 13873 isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
979d63d5
DB
13874
13875 off_reg = issrc ? insn->src_reg : insn->dst_reg;
801c6058
DB
13876 if (isimm) {
13877 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13878 } else {
13879 if (isneg)
13880 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13881 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
13882 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
13883 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
13884 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
13885 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
13886 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
13887 }
b9b34ddb
DB
13888 if (!issrc)
13889 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
13890 insn->src_reg = BPF_REG_AX;
979d63d5
DB
13891 if (isneg)
13892 insn->code = insn->code == code_add ?
13893 code_sub : code_add;
13894 *patch++ = *insn;
801c6058 13895 if (issrc && isneg && !isimm)
979d63d5
DB
13896 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
13897 cnt = patch - insn_buf;
13898
13899 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13900 if (!new_prog)
13901 return -ENOMEM;
13902
13903 delta += cnt - 1;
13904 env->prog = prog = new_prog;
13905 insn = new_prog->insnsi + i + delta;
13906 continue;
13907 }
13908
79741b3b
AS
13909 if (insn->code != (BPF_JMP | BPF_CALL))
13910 continue;
cc8b0b92
AS
13911 if (insn->src_reg == BPF_PSEUDO_CALL)
13912 continue;
e6ac2450
MKL
13913 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
13914 ret = fixup_kfunc_call(env, insn);
13915 if (ret)
13916 return ret;
13917 continue;
13918 }
e245c5c6 13919
79741b3b
AS
13920 if (insn->imm == BPF_FUNC_get_route_realm)
13921 prog->dst_needed = 1;
13922 if (insn->imm == BPF_FUNC_get_prandom_u32)
13923 bpf_user_rnd_init_once();
9802d865
JB
13924 if (insn->imm == BPF_FUNC_override_return)
13925 prog->kprobe_override = 1;
79741b3b 13926 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
13927 /* If we tail call into other programs, we
13928 * cannot make any assumptions since they can
13929 * be replaced dynamically during runtime in
13930 * the program array.
13931 */
13932 prog->cb_access = 1;
e411901c
MF
13933 if (!allow_tail_call_in_subprogs(env))
13934 prog->aux->stack_depth = MAX_BPF_STACK;
13935 prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 13936
79741b3b 13937 /* mark bpf_tail_call as different opcode to avoid
8fb33b60 13938 * conditional branch in the interpreter for every normal
79741b3b
AS
13939 * call and to prevent accidental JITing by JIT compiler
13940 * that doesn't support bpf_tail_call yet
e245c5c6 13941 */
79741b3b 13942 insn->imm = 0;
71189fa9 13943 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 13944
c93552c4 13945 aux = &env->insn_aux_data[i + delta];
d2a3b7c5 13946 if (env->bpf_capable && !prog->blinding_requested &&
cc52d914 13947 prog->jit_requested &&
d2e4c1e6
DB
13948 !bpf_map_key_poisoned(aux) &&
13949 !bpf_map_ptr_poisoned(aux) &&
13950 !bpf_map_ptr_unpriv(aux)) {
13951 struct bpf_jit_poke_descriptor desc = {
13952 .reason = BPF_POKE_REASON_TAIL_CALL,
13953 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
13954 .tail_call.key = bpf_map_key_immediate(aux),
a748c697 13955 .insn_idx = i + delta,
d2e4c1e6
DB
13956 };
13957
13958 ret = bpf_jit_add_poke_descriptor(prog, &desc);
13959 if (ret < 0) {
13960 verbose(env, "adding tail call poke descriptor failed\n");
13961 return ret;
13962 }
13963
13964 insn->imm = ret + 1;
13965 continue;
13966 }
13967
c93552c4
DB
13968 if (!bpf_map_ptr_unpriv(aux))
13969 continue;
13970
b2157399
AS
13971 /* instead of changing every JIT dealing with tail_call
13972 * emit two extra insns:
13973 * if (index >= max_entries) goto out;
13974 * index &= array->index_mask;
13975 * to avoid out-of-bounds cpu speculation
13976 */
c93552c4 13977 if (bpf_map_ptr_poisoned(aux)) {
40950343 13978 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
13979 return -EINVAL;
13980 }
c93552c4 13981
d2e4c1e6 13982 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
13983 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
13984 map_ptr->max_entries, 2);
13985 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
13986 container_of(map_ptr,
13987 struct bpf_array,
13988 map)->index_mask);
13989 insn_buf[2] = *insn;
13990 cnt = 3;
13991 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
13992 if (!new_prog)
13993 return -ENOMEM;
13994
13995 delta += cnt - 1;
13996 env->prog = prog = new_prog;
13997 insn = new_prog->insnsi + i + delta;
79741b3b
AS
13998 continue;
13999 }
e245c5c6 14000
b00628b1
AS
14001 if (insn->imm == BPF_FUNC_timer_set_callback) {
14002 /* The verifier will process callback_fn as many times as necessary
14003 * with different maps and the register states prepared by
14004 * set_timer_callback_state will be accurate.
14005 *
14006 * The following use case is valid:
14007 * map1 is shared by prog1, prog2, prog3.
14008 * prog1 calls bpf_timer_init for some map1 elements
14009 * prog2 calls bpf_timer_set_callback for some map1 elements.
14010 * Those that were not bpf_timer_init-ed will return -EINVAL.
14011 * prog3 calls bpf_timer_start for some map1 elements.
14012 * Those that were not both bpf_timer_init-ed and
14013 * bpf_timer_set_callback-ed will return -EINVAL.
14014 */
14015 struct bpf_insn ld_addrs[2] = {
14016 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux),
14017 };
14018
14019 insn_buf[0] = ld_addrs[0];
14020 insn_buf[1] = ld_addrs[1];
14021 insn_buf[2] = *insn;
14022 cnt = 3;
14023
14024 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14025 if (!new_prog)
14026 return -ENOMEM;
14027
14028 delta += cnt - 1;
14029 env->prog = prog = new_prog;
14030 insn = new_prog->insnsi + i + delta;
14031 goto patch_call_imm;
14032 }
14033
b00fa38a
JK
14034 if (insn->imm == BPF_FUNC_task_storage_get ||
14035 insn->imm == BPF_FUNC_sk_storage_get ||
14036 insn->imm == BPF_FUNC_inode_storage_get) {
14037 if (env->prog->aux->sleepable)
d56c9fe6 14038 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL);
b00fa38a 14039 else
d56c9fe6 14040 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
b00fa38a
JK
14041 insn_buf[1] = *insn;
14042 cnt = 2;
14043
14044 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14045 if (!new_prog)
14046 return -ENOMEM;
14047
14048 delta += cnt - 1;
14049 env->prog = prog = new_prog;
14050 insn = new_prog->insnsi + i + delta;
14051 goto patch_call_imm;
14052 }
14053
89c63074 14054 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
14055 * and other inlining handlers are currently limited to 64 bit
14056 * only.
89c63074 14057 */
60b58afc 14058 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
14059 (insn->imm == BPF_FUNC_map_lookup_elem ||
14060 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
14061 insn->imm == BPF_FUNC_map_delete_elem ||
14062 insn->imm == BPF_FUNC_map_push_elem ||
14063 insn->imm == BPF_FUNC_map_pop_elem ||
e6a4750f 14064 insn->imm == BPF_FUNC_map_peek_elem ||
0640c77c 14065 insn->imm == BPF_FUNC_redirect_map ||
07343110
FZ
14066 insn->imm == BPF_FUNC_for_each_map_elem ||
14067 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) {
c93552c4
DB
14068 aux = &env->insn_aux_data[i + delta];
14069 if (bpf_map_ptr_poisoned(aux))
14070 goto patch_call_imm;
14071
d2e4c1e6 14072 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
14073 ops = map_ptr->ops;
14074 if (insn->imm == BPF_FUNC_map_lookup_elem &&
14075 ops->map_gen_lookup) {
14076 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
4a8f87e6
DB
14077 if (cnt == -EOPNOTSUPP)
14078 goto patch_map_ops_generic;
14079 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
09772d92
DB
14080 verbose(env, "bpf verifier is misconfigured\n");
14081 return -EINVAL;
14082 }
81ed18ab 14083
09772d92
DB
14084 new_prog = bpf_patch_insn_data(env, i + delta,
14085 insn_buf, cnt);
14086 if (!new_prog)
14087 return -ENOMEM;
81ed18ab 14088
09772d92
DB
14089 delta += cnt - 1;
14090 env->prog = prog = new_prog;
14091 insn = new_prog->insnsi + i + delta;
14092 continue;
14093 }
81ed18ab 14094
09772d92
DB
14095 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
14096 (void *(*)(struct bpf_map *map, void *key))NULL));
14097 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
14098 (int (*)(struct bpf_map *map, void *key))NULL));
14099 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
14100 (int (*)(struct bpf_map *map, void *key, void *value,
14101 u64 flags))NULL));
84430d42
DB
14102 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
14103 (int (*)(struct bpf_map *map, void *value,
14104 u64 flags))NULL));
14105 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
14106 (int (*)(struct bpf_map *map, void *value))NULL));
14107 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
14108 (int (*)(struct bpf_map *map, void *value))NULL));
e6a4750f
BT
14109 BUILD_BUG_ON(!__same_type(ops->map_redirect,
14110 (int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
0640c77c
AI
14111 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback,
14112 (int (*)(struct bpf_map *map,
14113 bpf_callback_t callback_fn,
14114 void *callback_ctx,
14115 u64 flags))NULL));
07343110
FZ
14116 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem,
14117 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL));
e6a4750f 14118
4a8f87e6 14119patch_map_ops_generic:
09772d92
DB
14120 switch (insn->imm) {
14121 case BPF_FUNC_map_lookup_elem:
3d717fad 14122 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem);
09772d92
DB
14123 continue;
14124 case BPF_FUNC_map_update_elem:
3d717fad 14125 insn->imm = BPF_CALL_IMM(ops->map_update_elem);
09772d92
DB
14126 continue;
14127 case BPF_FUNC_map_delete_elem:
3d717fad 14128 insn->imm = BPF_CALL_IMM(ops->map_delete_elem);
09772d92 14129 continue;
84430d42 14130 case BPF_FUNC_map_push_elem:
3d717fad 14131 insn->imm = BPF_CALL_IMM(ops->map_push_elem);
84430d42
DB
14132 continue;
14133 case BPF_FUNC_map_pop_elem:
3d717fad 14134 insn->imm = BPF_CALL_IMM(ops->map_pop_elem);
84430d42
DB
14135 continue;
14136 case BPF_FUNC_map_peek_elem:
3d717fad 14137 insn->imm = BPF_CALL_IMM(ops->map_peek_elem);
84430d42 14138 continue;
e6a4750f 14139 case BPF_FUNC_redirect_map:
3d717fad 14140 insn->imm = BPF_CALL_IMM(ops->map_redirect);
e6a4750f 14141 continue;
0640c77c
AI
14142 case BPF_FUNC_for_each_map_elem:
14143 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback);
e6a4750f 14144 continue;
07343110
FZ
14145 case BPF_FUNC_map_lookup_percpu_elem:
14146 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem);
14147 continue;
09772d92 14148 }
81ed18ab 14149
09772d92 14150 goto patch_call_imm;
81ed18ab
AS
14151 }
14152
e6ac5933 14153 /* Implement bpf_jiffies64 inline. */
5576b991
MKL
14154 if (prog->jit_requested && BITS_PER_LONG == 64 &&
14155 insn->imm == BPF_FUNC_jiffies64) {
14156 struct bpf_insn ld_jiffies_addr[2] = {
14157 BPF_LD_IMM64(BPF_REG_0,
14158 (unsigned long)&jiffies),
14159 };
14160
14161 insn_buf[0] = ld_jiffies_addr[0];
14162 insn_buf[1] = ld_jiffies_addr[1];
14163 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0,
14164 BPF_REG_0, 0);
14165 cnt = 3;
14166
14167 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
14168 cnt);
14169 if (!new_prog)
14170 return -ENOMEM;
14171
14172 delta += cnt - 1;
14173 env->prog = prog = new_prog;
14174 insn = new_prog->insnsi + i + delta;
14175 continue;
14176 }
14177
f92c1e18
JO
14178 /* Implement bpf_get_func_arg inline. */
14179 if (prog_type == BPF_PROG_TYPE_TRACING &&
14180 insn->imm == BPF_FUNC_get_func_arg) {
14181 /* Load nr_args from ctx - 8 */
14182 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14183 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
14184 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
14185 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
14186 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
14187 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14188 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
14189 insn_buf[7] = BPF_JMP_A(1);
14190 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
14191 cnt = 9;
14192
14193 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14194 if (!new_prog)
14195 return -ENOMEM;
14196
14197 delta += cnt - 1;
14198 env->prog = prog = new_prog;
14199 insn = new_prog->insnsi + i + delta;
14200 continue;
14201 }
14202
14203 /* Implement bpf_get_func_ret inline. */
14204 if (prog_type == BPF_PROG_TYPE_TRACING &&
14205 insn->imm == BPF_FUNC_get_func_ret) {
14206 if (eatype == BPF_TRACE_FEXIT ||
14207 eatype == BPF_MODIFY_RETURN) {
14208 /* Load nr_args from ctx - 8 */
14209 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14210 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
14211 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
14212 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
14213 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
14214 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
14215 cnt = 6;
14216 } else {
14217 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
14218 cnt = 1;
14219 }
14220
14221 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
14222 if (!new_prog)
14223 return -ENOMEM;
14224
14225 delta += cnt - 1;
14226 env->prog = prog = new_prog;
14227 insn = new_prog->insnsi + i + delta;
14228 continue;
14229 }
14230
14231 /* Implement get_func_arg_cnt inline. */
14232 if (prog_type == BPF_PROG_TYPE_TRACING &&
14233 insn->imm == BPF_FUNC_get_func_arg_cnt) {
14234 /* Load nr_args from ctx - 8 */
14235 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
14236
14237 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14238 if (!new_prog)
14239 return -ENOMEM;
14240
14241 env->prog = prog = new_prog;
14242 insn = new_prog->insnsi + i + delta;
14243 continue;
14244 }
14245
f705ec76 14246 /* Implement bpf_get_func_ip inline. */
9b99edca
JO
14247 if (prog_type == BPF_PROG_TYPE_TRACING &&
14248 insn->imm == BPF_FUNC_get_func_ip) {
f92c1e18
JO
14249 /* Load IP address from ctx - 16 */
14250 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
9b99edca
JO
14251
14252 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
14253 if (!new_prog)
14254 return -ENOMEM;
14255
14256 env->prog = prog = new_prog;
14257 insn = new_prog->insnsi + i + delta;
14258 continue;
14259 }
14260
81ed18ab 14261patch_call_imm:
5e43f899 14262 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
14263 /* all functions that have prototype and verifier allowed
14264 * programs to call them, must be real in-kernel functions
14265 */
14266 if (!fn->func) {
61bd5218
JK
14267 verbose(env,
14268 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
14269 func_id_name(insn->imm), insn->imm);
14270 return -EFAULT;
e245c5c6 14271 }
79741b3b 14272 insn->imm = fn->func - __bpf_call_base;
e245c5c6 14273 }
e245c5c6 14274
d2e4c1e6
DB
14275 /* Since poke tab is now finalized, publish aux to tracker. */
14276 for (i = 0; i < prog->aux->size_poke_tab; i++) {
14277 map_ptr = prog->aux->poke_tab[i].tail_call.map;
14278 if (!map_ptr->ops->map_poke_track ||
14279 !map_ptr->ops->map_poke_untrack ||
14280 !map_ptr->ops->map_poke_run) {
14281 verbose(env, "bpf verifier is misconfigured\n");
14282 return -EINVAL;
14283 }
14284
14285 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
14286 if (ret < 0) {
14287 verbose(env, "tracking tail call prog failed\n");
14288 return ret;
14289 }
14290 }
14291
e6ac2450
MKL
14292 sort_kfunc_descs_by_imm(env->prog);
14293
79741b3b
AS
14294 return 0;
14295}
e245c5c6 14296
58e2af8b 14297static void free_states(struct bpf_verifier_env *env)
f1bca824 14298{
58e2af8b 14299 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
14300 int i;
14301
9f4686c4
AS
14302 sl = env->free_list;
14303 while (sl) {
14304 sln = sl->next;
14305 free_verifier_state(&sl->state, false);
14306 kfree(sl);
14307 sl = sln;
14308 }
51c39bb1 14309 env->free_list = NULL;
9f4686c4 14310
f1bca824
AS
14311 if (!env->explored_states)
14312 return;
14313
dc2a4ebc 14314 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
14315 sl = env->explored_states[i];
14316
a8f500af
AS
14317 while (sl) {
14318 sln = sl->next;
14319 free_verifier_state(&sl->state, false);
14320 kfree(sl);
14321 sl = sln;
14322 }
51c39bb1 14323 env->explored_states[i] = NULL;
f1bca824 14324 }
51c39bb1 14325}
f1bca824 14326
51c39bb1
AS
14327static int do_check_common(struct bpf_verifier_env *env, int subprog)
14328{
6f8a57cc 14329 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
51c39bb1
AS
14330 struct bpf_verifier_state *state;
14331 struct bpf_reg_state *regs;
14332 int ret, i;
14333
14334 env->prev_linfo = NULL;
14335 env->pass_cnt++;
14336
14337 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
14338 if (!state)
14339 return -ENOMEM;
14340 state->curframe = 0;
14341 state->speculative = false;
14342 state->branches = 1;
14343 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
14344 if (!state->frame[0]) {
14345 kfree(state);
14346 return -ENOMEM;
14347 }
14348 env->cur_state = state;
14349 init_func_state(env, state->frame[0],
14350 BPF_MAIN_FUNC /* callsite */,
14351 0 /* frameno */,
14352 subprog);
14353
14354 regs = state->frame[state->curframe]->regs;
be8704ff 14355 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
51c39bb1
AS
14356 ret = btf_prepare_func_args(env, subprog, regs);
14357 if (ret)
14358 goto out;
14359 for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
14360 if (regs[i].type == PTR_TO_CTX)
14361 mark_reg_known_zero(env, regs, i);
14362 else if (regs[i].type == SCALAR_VALUE)
14363 mark_reg_unknown(env, regs, i);
cf9f2f8d 14364 else if (base_type(regs[i].type) == PTR_TO_MEM) {
e5069b9c
DB
14365 const u32 mem_size = regs[i].mem_size;
14366
14367 mark_reg_known_zero(env, regs, i);
14368 regs[i].mem_size = mem_size;
14369 regs[i].id = ++env->id_gen;
14370 }
51c39bb1
AS
14371 }
14372 } else {
14373 /* 1st arg to a function */
14374 regs[BPF_REG_1].type = PTR_TO_CTX;
14375 mark_reg_known_zero(env, regs, BPF_REG_1);
34747c41 14376 ret = btf_check_subprog_arg_match(env, subprog, regs);
51c39bb1
AS
14377 if (ret == -EFAULT)
14378 /* unlikely verifier bug. abort.
14379 * ret == 0 and ret < 0 are sadly acceptable for
14380 * main() function due to backward compatibility.
14381 * Like socket filter program may be written as:
14382 * int bpf_prog(struct pt_regs *ctx)
14383 * and never dereference that ctx in the program.
14384 * 'struct pt_regs' is a type mismatch for socket
14385 * filter that should be using 'struct __sk_buff'.
14386 */
14387 goto out;
14388 }
14389
14390 ret = do_check(env);
14391out:
f59bbfc2
AS
14392 /* check for NULL is necessary, since cur_state can be freed inside
14393 * do_check() under memory pressure.
14394 */
14395 if (env->cur_state) {
14396 free_verifier_state(env->cur_state, true);
14397 env->cur_state = NULL;
14398 }
6f8a57cc
AN
14399 while (!pop_stack(env, NULL, NULL, false));
14400 if (!ret && pop_log)
14401 bpf_vlog_reset(&env->log, 0);
51c39bb1 14402 free_states(env);
51c39bb1
AS
14403 return ret;
14404}
14405
14406/* Verify all global functions in a BPF program one by one based on their BTF.
14407 * All global functions must pass verification. Otherwise the whole program is rejected.
14408 * Consider:
14409 * int bar(int);
14410 * int foo(int f)
14411 * {
14412 * return bar(f);
14413 * }
14414 * int bar(int b)
14415 * {
14416 * ...
14417 * }
14418 * foo() will be verified first for R1=any_scalar_value. During verification it
14419 * will be assumed that bar() already verified successfully and call to bar()
14420 * from foo() will be checked for type match only. Later bar() will be verified
14421 * independently to check that it's safe for R1=any_scalar_value.
14422 */
14423static int do_check_subprogs(struct bpf_verifier_env *env)
14424{
14425 struct bpf_prog_aux *aux = env->prog->aux;
14426 int i, ret;
14427
14428 if (!aux->func_info)
14429 return 0;
14430
14431 for (i = 1; i < env->subprog_cnt; i++) {
14432 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL)
14433 continue;
14434 env->insn_idx = env->subprog_info[i].start;
14435 WARN_ON_ONCE(env->insn_idx == 0);
14436 ret = do_check_common(env, i);
14437 if (ret) {
14438 return ret;
14439 } else if (env->log.level & BPF_LOG_LEVEL) {
14440 verbose(env,
14441 "Func#%d is safe for any args that match its prototype\n",
14442 i);
14443 }
14444 }
14445 return 0;
14446}
14447
14448static int do_check_main(struct bpf_verifier_env *env)
14449{
14450 int ret;
14451
14452 env->insn_idx = 0;
14453 ret = do_check_common(env, 0);
14454 if (!ret)
14455 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
14456 return ret;
14457}
14458
14459
06ee7115
AS
14460static void print_verification_stats(struct bpf_verifier_env *env)
14461{
14462 int i;
14463
14464 if (env->log.level & BPF_LOG_STATS) {
14465 verbose(env, "verification time %lld usec\n",
14466 div_u64(env->verification_time, 1000));
14467 verbose(env, "stack depth ");
14468 for (i = 0; i < env->subprog_cnt; i++) {
14469 u32 depth = env->subprog_info[i].stack_depth;
14470
14471 verbose(env, "%d", depth);
14472 if (i + 1 < env->subprog_cnt)
14473 verbose(env, "+");
14474 }
14475 verbose(env, "\n");
14476 }
14477 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
14478 "total_states %d peak_states %d mark_read %d\n",
14479 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
14480 env->max_states_per_insn, env->total_states,
14481 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
14482}
14483
27ae7997
MKL
14484static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
14485{
14486 const struct btf_type *t, *func_proto;
14487 const struct bpf_struct_ops *st_ops;
14488 const struct btf_member *member;
14489 struct bpf_prog *prog = env->prog;
14490 u32 btf_id, member_idx;
14491 const char *mname;
14492
12aa8a94
THJ
14493 if (!prog->gpl_compatible) {
14494 verbose(env, "struct ops programs must have a GPL compatible license\n");
14495 return -EINVAL;
14496 }
14497
27ae7997
MKL
14498 btf_id = prog->aux->attach_btf_id;
14499 st_ops = bpf_struct_ops_find(btf_id);
14500 if (!st_ops) {
14501 verbose(env, "attach_btf_id %u is not a supported struct\n",
14502 btf_id);
14503 return -ENOTSUPP;
14504 }
14505
14506 t = st_ops->type;
14507 member_idx = prog->expected_attach_type;
14508 if (member_idx >= btf_type_vlen(t)) {
14509 verbose(env, "attach to invalid member idx %u of struct %s\n",
14510 member_idx, st_ops->name);
14511 return -EINVAL;
14512 }
14513
14514 member = &btf_type_member(t)[member_idx];
14515 mname = btf_name_by_offset(btf_vmlinux, member->name_off);
14516 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type,
14517 NULL);
14518 if (!func_proto) {
14519 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n",
14520 mname, member_idx, st_ops->name);
14521 return -EINVAL;
14522 }
14523
14524 if (st_ops->check_member) {
14525 int err = st_ops->check_member(t, member);
14526
14527 if (err) {
14528 verbose(env, "attach to unsupported member %s of struct %s\n",
14529 mname, st_ops->name);
14530 return err;
14531 }
14532 }
14533
14534 prog->aux->attach_func_proto = func_proto;
14535 prog->aux->attach_func_name = mname;
14536 env->ops = st_ops->verifier_ops;
14537
14538 return 0;
14539}
6ba43b76
KS
14540#define SECURITY_PREFIX "security_"
14541
f7b12b6f 14542static int check_attach_modify_return(unsigned long addr, const char *func_name)
6ba43b76 14543{
69191754 14544 if (within_error_injection_list(addr) ||
f7b12b6f 14545 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
6ba43b76 14546 return 0;
6ba43b76 14547
6ba43b76
KS
14548 return -EINVAL;
14549}
27ae7997 14550
1e6c62a8
AS
14551/* list of non-sleepable functions that are otherwise on
14552 * ALLOW_ERROR_INJECTION list
14553 */
14554BTF_SET_START(btf_non_sleepable_error_inject)
14555/* Three functions below can be called from sleepable and non-sleepable context.
14556 * Assume non-sleepable from bpf safety point of view.
14557 */
9dd3d069 14558BTF_ID(func, __filemap_add_folio)
1e6c62a8
AS
14559BTF_ID(func, should_fail_alloc_page)
14560BTF_ID(func, should_failslab)
14561BTF_SET_END(btf_non_sleepable_error_inject)
14562
14563static int check_non_sleepable_error_inject(u32 btf_id)
14564{
14565 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
14566}
14567
f7b12b6f
THJ
14568int bpf_check_attach_target(struct bpf_verifier_log *log,
14569 const struct bpf_prog *prog,
14570 const struct bpf_prog *tgt_prog,
14571 u32 btf_id,
14572 struct bpf_attach_target_info *tgt_info)
38207291 14573{
be8704ff 14574 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
f1b9509c 14575 const char prefix[] = "btf_trace_";
5b92a28a 14576 int ret = 0, subprog = -1, i;
38207291 14577 const struct btf_type *t;
5b92a28a 14578 bool conservative = true;
38207291 14579 const char *tname;
5b92a28a 14580 struct btf *btf;
f7b12b6f 14581 long addr = 0;
38207291 14582
f1b9509c 14583 if (!btf_id) {
efc68158 14584 bpf_log(log, "Tracing programs must provide btf_id\n");
f1b9509c
AS
14585 return -EINVAL;
14586 }
22dc4a0f 14587 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
5b92a28a 14588 if (!btf) {
efc68158 14589 bpf_log(log,
5b92a28a
AS
14590 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
14591 return -EINVAL;
14592 }
14593 t = btf_type_by_id(btf, btf_id);
f1b9509c 14594 if (!t) {
efc68158 14595 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
f1b9509c
AS
14596 return -EINVAL;
14597 }
5b92a28a 14598 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c 14599 if (!tname) {
efc68158 14600 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
f1b9509c
AS
14601 return -EINVAL;
14602 }
5b92a28a
AS
14603 if (tgt_prog) {
14604 struct bpf_prog_aux *aux = tgt_prog->aux;
14605
14606 for (i = 0; i < aux->func_info_cnt; i++)
14607 if (aux->func_info[i].type_id == btf_id) {
14608 subprog = i;
14609 break;
14610 }
14611 if (subprog == -1) {
efc68158 14612 bpf_log(log, "Subprog %s doesn't exist\n", tname);
5b92a28a
AS
14613 return -EINVAL;
14614 }
14615 conservative = aux->func_info_aux[subprog].unreliable;
be8704ff
AS
14616 if (prog_extension) {
14617 if (conservative) {
efc68158 14618 bpf_log(log,
be8704ff
AS
14619 "Cannot replace static functions\n");
14620 return -EINVAL;
14621 }
14622 if (!prog->jit_requested) {
efc68158 14623 bpf_log(log,
be8704ff
AS
14624 "Extension programs should be JITed\n");
14625 return -EINVAL;
14626 }
be8704ff
AS
14627 }
14628 if (!tgt_prog->jited) {
efc68158 14629 bpf_log(log, "Can attach to only JITed progs\n");
be8704ff
AS
14630 return -EINVAL;
14631 }
14632 if (tgt_prog->type == prog->type) {
14633 /* Cannot fentry/fexit another fentry/fexit program.
14634 * Cannot attach program extension to another extension.
14635 * It's ok to attach fentry/fexit to extension program.
14636 */
efc68158 14637 bpf_log(log, "Cannot recursively attach\n");
be8704ff
AS
14638 return -EINVAL;
14639 }
14640 if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
14641 prog_extension &&
14642 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY ||
14643 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) {
14644 /* Program extensions can extend all program types
14645 * except fentry/fexit. The reason is the following.
14646 * The fentry/fexit programs are used for performance
14647 * analysis, stats and can be attached to any program
14648 * type except themselves. When extension program is
14649 * replacing XDP function it is necessary to allow
14650 * performance analysis of all functions. Both original
14651 * XDP program and its program extension. Hence
14652 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is
14653 * allowed. If extending of fentry/fexit was allowed it
14654 * would be possible to create long call chain
14655 * fentry->extension->fentry->extension beyond
14656 * reasonable stack size. Hence extending fentry is not
14657 * allowed.
14658 */
efc68158 14659 bpf_log(log, "Cannot extend fentry/fexit\n");
be8704ff
AS
14660 return -EINVAL;
14661 }
5b92a28a 14662 } else {
be8704ff 14663 if (prog_extension) {
efc68158 14664 bpf_log(log, "Cannot replace kernel functions\n");
be8704ff
AS
14665 return -EINVAL;
14666 }
5b92a28a 14667 }
f1b9509c
AS
14668
14669 switch (prog->expected_attach_type) {
14670 case BPF_TRACE_RAW_TP:
5b92a28a 14671 if (tgt_prog) {
efc68158 14672 bpf_log(log,
5b92a28a
AS
14673 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
14674 return -EINVAL;
14675 }
38207291 14676 if (!btf_type_is_typedef(t)) {
efc68158 14677 bpf_log(log, "attach_btf_id %u is not a typedef\n",
38207291
MKL
14678 btf_id);
14679 return -EINVAL;
14680 }
f1b9509c 14681 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
efc68158 14682 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
38207291
MKL
14683 btf_id, tname);
14684 return -EINVAL;
14685 }
14686 tname += sizeof(prefix) - 1;
5b92a28a 14687 t = btf_type_by_id(btf, t->type);
38207291
MKL
14688 if (!btf_type_is_ptr(t))
14689 /* should never happen in valid vmlinux build */
14690 return -EINVAL;
5b92a28a 14691 t = btf_type_by_id(btf, t->type);
38207291
MKL
14692 if (!btf_type_is_func_proto(t))
14693 /* should never happen in valid vmlinux build */
14694 return -EINVAL;
14695
f7b12b6f 14696 break;
15d83c4d
YS
14697 case BPF_TRACE_ITER:
14698 if (!btf_type_is_func(t)) {
efc68158 14699 bpf_log(log, "attach_btf_id %u is not a function\n",
15d83c4d
YS
14700 btf_id);
14701 return -EINVAL;
14702 }
14703 t = btf_type_by_id(btf, t->type);
14704 if (!btf_type_is_func_proto(t))
14705 return -EINVAL;
f7b12b6f
THJ
14706 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
14707 if (ret)
14708 return ret;
14709 break;
be8704ff
AS
14710 default:
14711 if (!prog_extension)
14712 return -EINVAL;
df561f66 14713 fallthrough;
ae240823 14714 case BPF_MODIFY_RETURN:
9e4e01df 14715 case BPF_LSM_MAC:
fec56f58
AS
14716 case BPF_TRACE_FENTRY:
14717 case BPF_TRACE_FEXIT:
14718 if (!btf_type_is_func(t)) {
efc68158 14719 bpf_log(log, "attach_btf_id %u is not a function\n",
fec56f58
AS
14720 btf_id);
14721 return -EINVAL;
14722 }
be8704ff 14723 if (prog_extension &&
efc68158 14724 btf_check_type_match(log, prog, btf, t))
be8704ff 14725 return -EINVAL;
5b92a28a 14726 t = btf_type_by_id(btf, t->type);
fec56f58
AS
14727 if (!btf_type_is_func_proto(t))
14728 return -EINVAL;
f7b12b6f 14729
4a1e7c0c
THJ
14730 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
14731 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
14732 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
14733 return -EINVAL;
14734
f7b12b6f 14735 if (tgt_prog && conservative)
5b92a28a 14736 t = NULL;
f7b12b6f
THJ
14737
14738 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
fec56f58 14739 if (ret < 0)
f7b12b6f
THJ
14740 return ret;
14741
5b92a28a 14742 if (tgt_prog) {
e9eeec58
YS
14743 if (subprog == 0)
14744 addr = (long) tgt_prog->bpf_func;
14745 else
14746 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
14747 } else {
14748 addr = kallsyms_lookup_name(tname);
14749 if (!addr) {
efc68158 14750 bpf_log(log,
5b92a28a
AS
14751 "The address of function %s cannot be found\n",
14752 tname);
f7b12b6f 14753 return -ENOENT;
5b92a28a 14754 }
fec56f58 14755 }
18644cec 14756
1e6c62a8
AS
14757 if (prog->aux->sleepable) {
14758 ret = -EINVAL;
14759 switch (prog->type) {
14760 case BPF_PROG_TYPE_TRACING:
14761 /* fentry/fexit/fmod_ret progs can be sleepable only if they are
14762 * attached to ALLOW_ERROR_INJECTION and are not in denylist.
14763 */
14764 if (!check_non_sleepable_error_inject(btf_id) &&
14765 within_error_injection_list(addr))
14766 ret = 0;
14767 break;
14768 case BPF_PROG_TYPE_LSM:
14769 /* LSM progs check that they are attached to bpf_lsm_*() funcs.
14770 * Only some of them are sleepable.
14771 */
423f1610 14772 if (bpf_lsm_is_sleepable_hook(btf_id))
1e6c62a8
AS
14773 ret = 0;
14774 break;
14775 default:
14776 break;
14777 }
f7b12b6f
THJ
14778 if (ret) {
14779 bpf_log(log, "%s is not sleepable\n", tname);
14780 return ret;
14781 }
1e6c62a8 14782 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
1af9270e 14783 if (tgt_prog) {
efc68158 14784 bpf_log(log, "can't modify return codes of BPF programs\n");
f7b12b6f
THJ
14785 return -EINVAL;
14786 }
14787 ret = check_attach_modify_return(addr, tname);
14788 if (ret) {
14789 bpf_log(log, "%s() is not modifiable\n", tname);
14790 return ret;
1af9270e 14791 }
18644cec 14792 }
f7b12b6f
THJ
14793
14794 break;
14795 }
14796 tgt_info->tgt_addr = addr;
14797 tgt_info->tgt_name = tname;
14798 tgt_info->tgt_type = t;
14799 return 0;
14800}
14801
35e3815f
JO
14802BTF_SET_START(btf_id_deny)
14803BTF_ID_UNUSED
14804#ifdef CONFIG_SMP
14805BTF_ID(func, migrate_disable)
14806BTF_ID(func, migrate_enable)
14807#endif
14808#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
14809BTF_ID(func, rcu_read_unlock_strict)
14810#endif
14811BTF_SET_END(btf_id_deny)
14812
f7b12b6f
THJ
14813static int check_attach_btf_id(struct bpf_verifier_env *env)
14814{
14815 struct bpf_prog *prog = env->prog;
3aac1ead 14816 struct bpf_prog *tgt_prog = prog->aux->dst_prog;
f7b12b6f
THJ
14817 struct bpf_attach_target_info tgt_info = {};
14818 u32 btf_id = prog->aux->attach_btf_id;
14819 struct bpf_trampoline *tr;
14820 int ret;
14821 u64 key;
14822
79a7f8bd
AS
14823 if (prog->type == BPF_PROG_TYPE_SYSCALL) {
14824 if (prog->aux->sleepable)
14825 /* attach_btf_id checked to be zero already */
14826 return 0;
14827 verbose(env, "Syscall programs can only be sleepable\n");
14828 return -EINVAL;
14829 }
14830
f7b12b6f
THJ
14831 if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
14832 prog->type != BPF_PROG_TYPE_LSM) {
14833 verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
14834 return -EINVAL;
14835 }
14836
14837 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
14838 return check_struct_ops_btf_id(env);
14839
14840 if (prog->type != BPF_PROG_TYPE_TRACING &&
14841 prog->type != BPF_PROG_TYPE_LSM &&
14842 prog->type != BPF_PROG_TYPE_EXT)
14843 return 0;
14844
14845 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
14846 if (ret)
fec56f58 14847 return ret;
f7b12b6f
THJ
14848
14849 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
3aac1ead
THJ
14850 /* to make freplace equivalent to their targets, they need to
14851 * inherit env->ops and expected_attach_type for the rest of the
14852 * verification
14853 */
f7b12b6f
THJ
14854 env->ops = bpf_verifier_ops[tgt_prog->type];
14855 prog->expected_attach_type = tgt_prog->expected_attach_type;
14856 }
14857
14858 /* store info about the attachment target that will be used later */
14859 prog->aux->attach_func_proto = tgt_info.tgt_type;
14860 prog->aux->attach_func_name = tgt_info.tgt_name;
14861
4a1e7c0c
THJ
14862 if (tgt_prog) {
14863 prog->aux->saved_dst_prog_type = tgt_prog->type;
14864 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
14865 }
14866
f7b12b6f
THJ
14867 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
14868 prog->aux->attach_btf_trace = true;
14869 return 0;
14870 } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
14871 if (!bpf_iter_prog_supported(prog))
14872 return -EINVAL;
14873 return 0;
14874 }
14875
14876 if (prog->type == BPF_PROG_TYPE_LSM) {
14877 ret = bpf_lsm_verify_prog(&env->log, prog);
14878 if (ret < 0)
14879 return ret;
35e3815f
JO
14880 } else if (prog->type == BPF_PROG_TYPE_TRACING &&
14881 btf_id_set_contains(&btf_id_deny, btf_id)) {
14882 return -EINVAL;
38207291 14883 }
f7b12b6f 14884
22dc4a0f 14885 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
f7b12b6f
THJ
14886 tr = bpf_trampoline_get(key, &tgt_info);
14887 if (!tr)
14888 return -ENOMEM;
14889
3aac1ead 14890 prog->aux->dst_trampoline = tr;
f7b12b6f 14891 return 0;
38207291
MKL
14892}
14893
76654e67
AM
14894struct btf *bpf_get_btf_vmlinux(void)
14895{
14896 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
14897 mutex_lock(&bpf_verifier_lock);
14898 if (!btf_vmlinux)
14899 btf_vmlinux = btf_parse_vmlinux();
14900 mutex_unlock(&bpf_verifier_lock);
14901 }
14902 return btf_vmlinux;
14903}
14904
af2ac3e1 14905int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
51580e79 14906{
06ee7115 14907 u64 start_time = ktime_get_ns();
58e2af8b 14908 struct bpf_verifier_env *env;
b9193c1b 14909 struct bpf_verifier_log *log;
9e4c24e7 14910 int i, len, ret = -EINVAL;
e2ae4ca2 14911 bool is_priv;
51580e79 14912
eba0c929
AB
14913 /* no program is valid */
14914 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
14915 return -EINVAL;
14916
58e2af8b 14917 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
14918 * allocate/free it every time bpf_check() is called
14919 */
58e2af8b 14920 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
14921 if (!env)
14922 return -ENOMEM;
61bd5218 14923 log = &env->log;
cbd35700 14924
9e4c24e7 14925 len = (*prog)->len;
fad953ce 14926 env->insn_aux_data =
9e4c24e7 14927 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
14928 ret = -ENOMEM;
14929 if (!env->insn_aux_data)
14930 goto err_free_env;
9e4c24e7
JK
14931 for (i = 0; i < len; i++)
14932 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 14933 env->prog = *prog;
00176a34 14934 env->ops = bpf_verifier_ops[env->prog->type];
387544bf 14935 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
2c78ee89 14936 is_priv = bpf_capable();
0246e64d 14937
76654e67 14938 bpf_get_btf_vmlinux();
8580ac94 14939
cbd35700 14940 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
14941 if (!is_priv)
14942 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
14943
14944 if (attr->log_level || attr->log_buf || attr->log_size) {
14945 /* user requested verbose verifier output
14946 * and supplied buffer to store the verification trace
14947 */
e7bf8249
JK
14948 log->level = attr->log_level;
14949 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
14950 log->len_total = attr->log_size;
cbd35700 14951
e7bf8249 14952 /* log attributes have to be sane */
866de407
HT
14953 if (!bpf_verifier_log_attr_valid(log)) {
14954 ret = -EINVAL;
3df126f3 14955 goto err_unlock;
866de407 14956 }
cbd35700 14957 }
1ad2f583 14958
0f55f9ed
CL
14959 mark_verifier_state_clean(env);
14960
8580ac94
AS
14961 if (IS_ERR(btf_vmlinux)) {
14962 /* Either gcc or pahole or kernel are broken. */
14963 verbose(env, "in-kernel BTF is malformed\n");
14964 ret = PTR_ERR(btf_vmlinux);
38207291 14965 goto skip_full_check;
8580ac94
AS
14966 }
14967
1ad2f583
DB
14968 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
14969 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 14970 env->strict_alignment = true;
e9ee9efc
DM
14971 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
14972 env->strict_alignment = false;
cbd35700 14973
2c78ee89 14974 env->allow_ptr_leaks = bpf_allow_ptr_leaks();
01f810ac 14975 env->allow_uninit_stack = bpf_allow_uninit_stack();
41c48f3a 14976 env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
2c78ee89
AS
14977 env->bypass_spec_v1 = bpf_bypass_spec_v1();
14978 env->bypass_spec_v4 = bpf_bypass_spec_v4();
14979 env->bpf_capable = bpf_capable();
e2ae4ca2 14980
10d274e8
AS
14981 if (is_priv)
14982 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
14983
dc2a4ebc 14984 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 14985 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
14986 GFP_USER);
14987 ret = -ENOMEM;
14988 if (!env->explored_states)
14989 goto skip_full_check;
14990
e6ac2450
MKL
14991 ret = add_subprog_and_kfunc(env);
14992 if (ret < 0)
14993 goto skip_full_check;
14994
d9762e84 14995 ret = check_subprogs(env);
475fb78f
AS
14996 if (ret < 0)
14997 goto skip_full_check;
14998
c454a46b 14999 ret = check_btf_info(env, attr, uattr);
838e9690
YS
15000 if (ret < 0)
15001 goto skip_full_check;
15002
be8704ff
AS
15003 ret = check_attach_btf_id(env);
15004 if (ret)
15005 goto skip_full_check;
15006
4976b718
HL
15007 ret = resolve_pseudo_ldimm64(env);
15008 if (ret < 0)
15009 goto skip_full_check;
15010
ceb11679
YZ
15011 if (bpf_prog_is_dev_bound(env->prog->aux)) {
15012 ret = bpf_prog_offload_verifier_prep(env->prog);
15013 if (ret)
15014 goto skip_full_check;
15015 }
15016
d9762e84
MKL
15017 ret = check_cfg(env);
15018 if (ret < 0)
15019 goto skip_full_check;
15020
51c39bb1
AS
15021 ret = do_check_subprogs(env);
15022 ret = ret ?: do_check_main(env);
cbd35700 15023
c941ce9c
QM
15024 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
15025 ret = bpf_prog_offload_finalize(env);
15026
0246e64d 15027skip_full_check:
51c39bb1 15028 kvfree(env->explored_states);
0246e64d 15029
c131187d 15030 if (ret == 0)
9b38c405 15031 ret = check_max_stack_depth(env);
c131187d 15032
9b38c405 15033 /* instruction rewrites happen after this point */
e2ae4ca2
JK
15034 if (is_priv) {
15035 if (ret == 0)
15036 opt_hard_wire_dead_code_branches(env);
52875a04
JK
15037 if (ret == 0)
15038 ret = opt_remove_dead_code(env);
a1b14abc
JK
15039 if (ret == 0)
15040 ret = opt_remove_nops(env);
52875a04
JK
15041 } else {
15042 if (ret == 0)
15043 sanitize_dead_code(env);
e2ae4ca2
JK
15044 }
15045
9bac3d6d
AS
15046 if (ret == 0)
15047 /* program is valid, convert *(u32*)(ctx + off) accesses */
15048 ret = convert_ctx_accesses(env);
15049
e245c5c6 15050 if (ret == 0)
e6ac5933 15051 ret = do_misc_fixups(env);
e245c5c6 15052
a4b1d3c1
JW
15053 /* do 32-bit optimization after insn patching has done so those patched
15054 * insns could be handled correctly.
15055 */
d6c2308c
JW
15056 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
15057 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
15058 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
15059 : false;
a4b1d3c1
JW
15060 }
15061
1ea47e01
AS
15062 if (ret == 0)
15063 ret = fixup_call_args(env);
15064
06ee7115
AS
15065 env->verification_time = ktime_get_ns() - start_time;
15066 print_verification_stats(env);
aba64c7d 15067 env->prog->aux->verified_insns = env->insn_processed;
06ee7115 15068
a2a7d570 15069 if (log->level && bpf_verifier_log_full(log))
cbd35700 15070 ret = -ENOSPC;
a2a7d570 15071 if (log->level && !log->ubuf) {
cbd35700 15072 ret = -EFAULT;
a2a7d570 15073 goto err_release_maps;
cbd35700
AS
15074 }
15075
541c3bad
AN
15076 if (ret)
15077 goto err_release_maps;
15078
15079 if (env->used_map_cnt) {
0246e64d 15080 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
15081 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
15082 sizeof(env->used_maps[0]),
15083 GFP_KERNEL);
0246e64d 15084
9bac3d6d 15085 if (!env->prog->aux->used_maps) {
0246e64d 15086 ret = -ENOMEM;
a2a7d570 15087 goto err_release_maps;
0246e64d
AS
15088 }
15089
9bac3d6d 15090 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 15091 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 15092 env->prog->aux->used_map_cnt = env->used_map_cnt;
541c3bad
AN
15093 }
15094 if (env->used_btf_cnt) {
15095 /* if program passed verifier, update used_btfs in bpf_prog_aux */
15096 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
15097 sizeof(env->used_btfs[0]),
15098 GFP_KERNEL);
15099 if (!env->prog->aux->used_btfs) {
15100 ret = -ENOMEM;
15101 goto err_release_maps;
15102 }
0246e64d 15103
541c3bad
AN
15104 memcpy(env->prog->aux->used_btfs, env->used_btfs,
15105 sizeof(env->used_btfs[0]) * env->used_btf_cnt);
15106 env->prog->aux->used_btf_cnt = env->used_btf_cnt;
15107 }
15108 if (env->used_map_cnt || env->used_btf_cnt) {
0246e64d
AS
15109 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
15110 * bpf_ld_imm64 instructions
15111 */
15112 convert_pseudo_ld_imm64(env);
15113 }
cbd35700 15114
541c3bad 15115 adjust_btf_func(env);
ba64e7d8 15116
a2a7d570 15117err_release_maps:
9bac3d6d 15118 if (!env->prog->aux->used_maps)
0246e64d 15119 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 15120 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
15121 */
15122 release_maps(env);
541c3bad
AN
15123 if (!env->prog->aux->used_btfs)
15124 release_btfs(env);
03f87c0b
THJ
15125
15126 /* extension progs temporarily inherit the attach_type of their targets
15127 for verification purposes, so set it back to zero before returning
15128 */
15129 if (env->prog->type == BPF_PROG_TYPE_EXT)
15130 env->prog->expected_attach_type = 0;
15131
9bac3d6d 15132 *prog = env->prog;
3df126f3 15133err_unlock:
45a73c17
AS
15134 if (!is_priv)
15135 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
15136 vfree(env->insn_aux_data);
15137err_free_env:
15138 kfree(env);
51580e79
AS
15139 return ret;
15140}