Merge tag 'xfs-5.5-fixes-2' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-block.git] / kernel / bpf / verifier.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
51580e79 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
969bf05e 3 * Copyright (c) 2016 Facebook
fd978bf7 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
51580e79 5 */
838e9690 6#include <uapi/linux/btf.h>
51580e79
AS
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/bpf.h>
838e9690 11#include <linux/btf.h>
58e2af8b 12#include <linux/bpf_verifier.h>
51580e79
AS
13#include <linux/filter.h>
14#include <net/netlink.h>
15#include <linux/file.h>
16#include <linux/vmalloc.h>
ebb676da 17#include <linux/stringify.h>
cc8b0b92
AS
18#include <linux/bsearch.h>
19#include <linux/sort.h>
c195651e 20#include <linux/perf_event.h>
d9762e84 21#include <linux/ctype.h>
51580e79 22
f4ac7e0b
JK
23#include "disasm.h"
24
00176a34 25static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
91cc1a99 26#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
00176a34
JK
27 [_id] = & _name ## _verifier_ops,
28#define BPF_MAP_TYPE(_id, _ops)
29#include <linux/bpf_types.h>
30#undef BPF_PROG_TYPE
31#undef BPF_MAP_TYPE
32};
33
51580e79
AS
34/* bpf_check() is a static code analyzer that walks eBPF program
35 * instruction by instruction and updates register/stack state.
36 * All paths of conditional branches are analyzed until 'bpf_exit' insn.
37 *
38 * The first pass is depth-first-search to check that the program is a DAG.
39 * It rejects the following programs:
40 * - larger than BPF_MAXINSNS insns
41 * - if loop is present (detected via back-edge)
42 * - unreachable insns exist (shouldn't be a forest. program = one function)
43 * - out of bounds or malformed jumps
44 * The second pass is all possible path descent from the 1st insn.
45 * Since it's analyzing all pathes through the program, the length of the
eba38a96 46 * analysis is limited to 64k insn, which may be hit even if total number of
51580e79
AS
47 * insn is less then 4K, but there are too many branches that change stack/regs.
48 * Number of 'branches to be analyzed' is limited to 1k
49 *
50 * On entry to each instruction, each register has a type, and the instruction
51 * changes the types of the registers depending on instruction semantics.
52 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is
53 * copied to R1.
54 *
55 * All registers are 64-bit.
56 * R0 - return register
57 * R1-R5 argument passing registers
58 * R6-R9 callee saved registers
59 * R10 - frame pointer read-only
60 *
61 * At the start of BPF program the register R1 contains a pointer to bpf_context
62 * and has type PTR_TO_CTX.
63 *
64 * Verifier tracks arithmetic operations on pointers in case:
65 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
66 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20),
67 * 1st insn copies R10 (which has FRAME_PTR) type into R1
68 * and 2nd arithmetic instruction is pattern matched to recognize
69 * that it wants to construct a pointer to some element within stack.
70 * So after 2nd insn, the register R1 has type PTR_TO_STACK
71 * (and -20 constant is saved for further stack bounds checking).
72 * Meaning that this reg is a pointer to stack plus known immediate constant.
73 *
f1174f77 74 * Most of the time the registers have SCALAR_VALUE type, which
51580e79 75 * means the register has some value, but it's not a valid pointer.
f1174f77 76 * (like pointer plus pointer becomes SCALAR_VALUE type)
51580e79
AS
77 *
78 * When verifier sees load or store instructions the type of base register
c64b7983
JS
79 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are
80 * four pointer types recognized by check_mem_access() function.
51580e79
AS
81 *
82 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value'
83 * and the range of [ptr, ptr + map's value_size) is accessible.
84 *
85 * registers used to pass values to function calls are checked against
86 * function argument constraints.
87 *
88 * ARG_PTR_TO_MAP_KEY is one of such argument constraints.
89 * It means that the register type passed to this function must be
90 * PTR_TO_STACK and it will be used inside the function as
91 * 'pointer to map element key'
92 *
93 * For example the argument constraints for bpf_map_lookup_elem():
94 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
95 * .arg1_type = ARG_CONST_MAP_PTR,
96 * .arg2_type = ARG_PTR_TO_MAP_KEY,
97 *
98 * ret_type says that this function returns 'pointer to map elem value or null'
99 * function expects 1st argument to be a const pointer to 'struct bpf_map' and
100 * 2nd argument should be a pointer to stack, which will be used inside
101 * the helper function as a pointer to map element key.
102 *
103 * On the kernel side the helper function looks like:
104 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
105 * {
106 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
107 * void *key = (void *) (unsigned long) r2;
108 * void *value;
109 *
110 * here kernel can access 'key' and 'map' pointers safely, knowing that
111 * [key, key + map->key_size) bytes are valid and were initialized on
112 * the stack of eBPF program.
113 * }
114 *
115 * Corresponding eBPF program may look like:
116 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR
117 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK
118 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP
119 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
120 * here verifier looks at prototype of map_lookup_elem() and sees:
121 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok,
122 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes
123 *
124 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far,
125 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits
126 * and were initialized prior to this call.
127 * If it's ok, then verifier allows this BPF_CALL insn and looks at
128 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets
129 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function
130 * returns ether pointer to map value or NULL.
131 *
132 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off'
133 * insn, the register holding that pointer in the true branch changes state to
134 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false
135 * branch. See check_cond_jmp_op().
136 *
137 * After the call R0 is set to return type of the function and registers R1-R5
138 * are set to NOT_INIT to indicate that they are no longer readable.
fd978bf7
JS
139 *
140 * The following reference types represent a potential reference to a kernel
141 * resource which, after first being allocated, must be checked and freed by
142 * the BPF program:
143 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
144 *
145 * When the verifier sees a helper call return a reference type, it allocates a
146 * pointer id for the reference and stores it in the current function state.
147 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
148 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
149 * passes through a NULL-check conditional. For the branch wherein the state is
150 * changed to CONST_IMM, the verifier releases the reference.
6acc9b43
JS
151 *
152 * For each helper function that allocates a reference, such as
153 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as
154 * bpf_sk_release(). When a reference type passes into the release function,
155 * the verifier also releases the reference. If any unchecked or unreleased
156 * reference remains at the end of the program, the verifier rejects it.
51580e79
AS
157 */
158
17a52670 159/* verifier_state + insn_idx are pushed to stack when branch is encountered */
58e2af8b 160struct bpf_verifier_stack_elem {
17a52670
AS
161 /* verifer state is 'st'
162 * before processing instruction 'insn_idx'
163 * and after processing instruction 'prev_insn_idx'
164 */
58e2af8b 165 struct bpf_verifier_state st;
17a52670
AS
166 int insn_idx;
167 int prev_insn_idx;
58e2af8b 168 struct bpf_verifier_stack_elem *next;
cbd35700
AS
169};
170
b285fcb7 171#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
ceefbc96 172#define BPF_COMPLEXITY_LIMIT_STATES 64
07016151 173
d2e4c1e6
DB
174#define BPF_MAP_KEY_POISON (1ULL << 63)
175#define BPF_MAP_KEY_SEEN (1ULL << 62)
176
c93552c4
DB
177#define BPF_MAP_PTR_UNPRIV 1UL
178#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
179 POISON_POINTER_DELTA))
180#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
181
182static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
183{
d2e4c1e6 184 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
c93552c4
DB
185}
186
187static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
188{
d2e4c1e6 189 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV;
c93552c4
DB
190}
191
192static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
193 const struct bpf_map *map, bool unpriv)
194{
195 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
196 unpriv |= bpf_map_ptr_unpriv(aux);
d2e4c1e6
DB
197 aux->map_ptr_state = (unsigned long)map |
198 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
199}
200
201static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux)
202{
203 return aux->map_key_state & BPF_MAP_KEY_POISON;
204}
205
206static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux)
207{
208 return !(aux->map_key_state & BPF_MAP_KEY_SEEN);
209}
210
211static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux)
212{
213 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON);
214}
215
216static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state)
217{
218 bool poisoned = bpf_map_key_poisoned(aux);
219
220 aux->map_key_state = state | BPF_MAP_KEY_SEEN |
221 (poisoned ? BPF_MAP_KEY_POISON : 0ULL);
c93552c4 222}
fad73a1a 223
33ff9823
DB
224struct bpf_call_arg_meta {
225 struct bpf_map *map_ptr;
435faee1 226 bool raw_mode;
36bbef52 227 bool pkt_access;
435faee1
DB
228 int regno;
229 int access_size;
849fa506
YS
230 s64 msize_smax_value;
231 u64 msize_umax_value;
1b986589 232 int ref_obj_id;
d83525ca 233 int func_id;
a7658e1a 234 u32 btf_id;
33ff9823
DB
235};
236
8580ac94
AS
237struct btf *btf_vmlinux;
238
cbd35700
AS
239static DEFINE_MUTEX(bpf_verifier_lock);
240
d9762e84
MKL
241static const struct bpf_line_info *
242find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
243{
244 const struct bpf_line_info *linfo;
245 const struct bpf_prog *prog;
246 u32 i, nr_linfo;
247
248 prog = env->prog;
249 nr_linfo = prog->aux->nr_linfo;
250
251 if (!nr_linfo || insn_off >= prog->len)
252 return NULL;
253
254 linfo = prog->aux->linfo;
255 for (i = 1; i < nr_linfo; i++)
256 if (insn_off < linfo[i].insn_off)
257 break;
258
259 return &linfo[i - 1];
260}
261
77d2e05a
MKL
262void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
263 va_list args)
cbd35700 264{
a2a7d570 265 unsigned int n;
cbd35700 266
a2a7d570 267 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
a2a7d570
JK
268
269 WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
270 "verifier log line truncated - local buffer too short\n");
271
272 n = min(log->len_total - log->len_used - 1, n);
273 log->kbuf[n] = '\0';
274
8580ac94
AS
275 if (log->level == BPF_LOG_KERNEL) {
276 pr_err("BPF:%s\n", log->kbuf);
277 return;
278 }
a2a7d570
JK
279 if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1))
280 log->len_used += n;
281 else
282 log->ubuf = NULL;
cbd35700 283}
abe08840
JO
284
285/* log_level controls verbosity level of eBPF verifier.
286 * bpf_verifier_log_write() is used to dump the verification trace to the log,
287 * so the user can figure out what's wrong with the program
430e68d1 288 */
abe08840
JO
289__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
290 const char *fmt, ...)
291{
292 va_list args;
293
77d2e05a
MKL
294 if (!bpf_verifier_log_needed(&env->log))
295 return;
296
abe08840 297 va_start(args, fmt);
77d2e05a 298 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
299 va_end(args);
300}
301EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
302
303__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
304{
77d2e05a 305 struct bpf_verifier_env *env = private_data;
abe08840
JO
306 va_list args;
307
77d2e05a
MKL
308 if (!bpf_verifier_log_needed(&env->log))
309 return;
310
abe08840 311 va_start(args, fmt);
77d2e05a 312 bpf_verifier_vlog(&env->log, fmt, args);
abe08840
JO
313 va_end(args);
314}
cbd35700 315
9e15db66
AS
316__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
317 const char *fmt, ...)
318{
319 va_list args;
320
321 if (!bpf_verifier_log_needed(log))
322 return;
323
324 va_start(args, fmt);
325 bpf_verifier_vlog(log, fmt, args);
326 va_end(args);
327}
328
d9762e84
MKL
329static const char *ltrim(const char *s)
330{
331 while (isspace(*s))
332 s++;
333
334 return s;
335}
336
337__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
338 u32 insn_off,
339 const char *prefix_fmt, ...)
340{
341 const struct bpf_line_info *linfo;
342
343 if (!bpf_verifier_log_needed(&env->log))
344 return;
345
346 linfo = find_linfo(env, insn_off);
347 if (!linfo || linfo == env->prev_linfo)
348 return;
349
350 if (prefix_fmt) {
351 va_list args;
352
353 va_start(args, prefix_fmt);
354 bpf_verifier_vlog(&env->log, prefix_fmt, args);
355 va_end(args);
356 }
357
358 verbose(env, "%s\n",
359 ltrim(btf_name_by_offset(env->prog->aux->btf,
360 linfo->line_off)));
361
362 env->prev_linfo = linfo;
363}
364
de8f3a83
DB
365static bool type_is_pkt_pointer(enum bpf_reg_type type)
366{
367 return type == PTR_TO_PACKET ||
368 type == PTR_TO_PACKET_META;
369}
370
46f8bc92
MKL
371static bool type_is_sk_pointer(enum bpf_reg_type type)
372{
373 return type == PTR_TO_SOCKET ||
655a51e5 374 type == PTR_TO_SOCK_COMMON ||
fada7fdc
JL
375 type == PTR_TO_TCP_SOCK ||
376 type == PTR_TO_XDP_SOCK;
46f8bc92
MKL
377}
378
840b9615
JS
379static bool reg_type_may_be_null(enum bpf_reg_type type)
380{
fd978bf7 381 return type == PTR_TO_MAP_VALUE_OR_NULL ||
46f8bc92 382 type == PTR_TO_SOCKET_OR_NULL ||
655a51e5
MKL
383 type == PTR_TO_SOCK_COMMON_OR_NULL ||
384 type == PTR_TO_TCP_SOCK_OR_NULL;
fd978bf7
JS
385}
386
d83525ca
AS
387static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
388{
389 return reg->type == PTR_TO_MAP_VALUE &&
390 map_value_has_spin_lock(reg->map_ptr);
391}
392
cba368c1
MKL
393static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
394{
395 return type == PTR_TO_SOCKET ||
396 type == PTR_TO_SOCKET_OR_NULL ||
397 type == PTR_TO_TCP_SOCK ||
398 type == PTR_TO_TCP_SOCK_OR_NULL;
399}
400
1b986589 401static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
fd978bf7 402{
1b986589 403 return type == ARG_PTR_TO_SOCK_COMMON;
fd978bf7
JS
404}
405
406/* Determine whether the function releases some resources allocated by another
407 * function call. The first reference type argument will be assumed to be
408 * released by release_reference().
409 */
410static bool is_release_function(enum bpf_func_id func_id)
411{
6acc9b43 412 return func_id == BPF_FUNC_sk_release;
840b9615
JS
413}
414
46f8bc92
MKL
415static bool is_acquire_function(enum bpf_func_id func_id)
416{
417 return func_id == BPF_FUNC_sk_lookup_tcp ||
edbf8c01
LB
418 func_id == BPF_FUNC_sk_lookup_udp ||
419 func_id == BPF_FUNC_skc_lookup_tcp;
46f8bc92
MKL
420}
421
1b986589
MKL
422static bool is_ptr_cast_function(enum bpf_func_id func_id)
423{
424 return func_id == BPF_FUNC_tcp_sock ||
425 func_id == BPF_FUNC_sk_fullsock;
426}
427
17a52670
AS
428/* string representation of 'enum bpf_reg_type' */
429static const char * const reg_type_str[] = {
430 [NOT_INIT] = "?",
f1174f77 431 [SCALAR_VALUE] = "inv",
17a52670
AS
432 [PTR_TO_CTX] = "ctx",
433 [CONST_PTR_TO_MAP] = "map_ptr",
434 [PTR_TO_MAP_VALUE] = "map_value",
435 [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null",
17a52670 436 [PTR_TO_STACK] = "fp",
969bf05e 437 [PTR_TO_PACKET] = "pkt",
de8f3a83 438 [PTR_TO_PACKET_META] = "pkt_meta",
969bf05e 439 [PTR_TO_PACKET_END] = "pkt_end",
d58e468b 440 [PTR_TO_FLOW_KEYS] = "flow_keys",
c64b7983
JS
441 [PTR_TO_SOCKET] = "sock",
442 [PTR_TO_SOCKET_OR_NULL] = "sock_or_null",
46f8bc92
MKL
443 [PTR_TO_SOCK_COMMON] = "sock_common",
444 [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null",
655a51e5
MKL
445 [PTR_TO_TCP_SOCK] = "tcp_sock",
446 [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null",
9df1c28b 447 [PTR_TO_TP_BUFFER] = "tp_buffer",
fada7fdc 448 [PTR_TO_XDP_SOCK] = "xdp_sock",
9e15db66 449 [PTR_TO_BTF_ID] = "ptr_",
17a52670
AS
450};
451
8efea21d
EC
452static char slot_type_char[] = {
453 [STACK_INVALID] = '?',
454 [STACK_SPILL] = 'r',
455 [STACK_MISC] = 'm',
456 [STACK_ZERO] = '0',
457};
458
4e92024a
AS
459static void print_liveness(struct bpf_verifier_env *env,
460 enum bpf_reg_liveness live)
461{
9242b5f5 462 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
4e92024a
AS
463 verbose(env, "_");
464 if (live & REG_LIVE_READ)
465 verbose(env, "r");
466 if (live & REG_LIVE_WRITTEN)
467 verbose(env, "w");
9242b5f5
AS
468 if (live & REG_LIVE_DONE)
469 verbose(env, "D");
4e92024a
AS
470}
471
f4d7e40a
AS
472static struct bpf_func_state *func(struct bpf_verifier_env *env,
473 const struct bpf_reg_state *reg)
474{
475 struct bpf_verifier_state *cur = env->cur_state;
476
477 return cur->frame[reg->frameno];
478}
479
9e15db66
AS
480const char *kernel_type_name(u32 id)
481{
482 return btf_name_by_offset(btf_vmlinux,
483 btf_type_by_id(btf_vmlinux, id)->name_off);
484}
485
61bd5218 486static void print_verifier_state(struct bpf_verifier_env *env,
f4d7e40a 487 const struct bpf_func_state *state)
17a52670 488{
f4d7e40a 489 const struct bpf_reg_state *reg;
17a52670
AS
490 enum bpf_reg_type t;
491 int i;
492
f4d7e40a
AS
493 if (state->frameno)
494 verbose(env, " frame%d:", state->frameno);
17a52670 495 for (i = 0; i < MAX_BPF_REG; i++) {
1a0dc1ac
AS
496 reg = &state->regs[i];
497 t = reg->type;
17a52670
AS
498 if (t == NOT_INIT)
499 continue;
4e92024a
AS
500 verbose(env, " R%d", i);
501 print_liveness(env, reg->live);
502 verbose(env, "=%s", reg_type_str[t]);
b5dc0163
AS
503 if (t == SCALAR_VALUE && reg->precise)
504 verbose(env, "P");
f1174f77
EC
505 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
506 tnum_is_const(reg->var_off)) {
507 /* reg->off should be 0 for SCALAR_VALUE */
61bd5218 508 verbose(env, "%lld", reg->var_off.value + reg->off);
f1174f77 509 } else {
9e15db66
AS
510 if (t == PTR_TO_BTF_ID)
511 verbose(env, "%s", kernel_type_name(reg->btf_id));
cba368c1
MKL
512 verbose(env, "(id=%d", reg->id);
513 if (reg_type_may_be_refcounted_or_null(t))
514 verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
f1174f77 515 if (t != SCALAR_VALUE)
61bd5218 516 verbose(env, ",off=%d", reg->off);
de8f3a83 517 if (type_is_pkt_pointer(t))
61bd5218 518 verbose(env, ",r=%d", reg->range);
f1174f77
EC
519 else if (t == CONST_PTR_TO_MAP ||
520 t == PTR_TO_MAP_VALUE ||
521 t == PTR_TO_MAP_VALUE_OR_NULL)
61bd5218 522 verbose(env, ",ks=%d,vs=%d",
f1174f77
EC
523 reg->map_ptr->key_size,
524 reg->map_ptr->value_size);
7d1238f2
EC
525 if (tnum_is_const(reg->var_off)) {
526 /* Typically an immediate SCALAR_VALUE, but
527 * could be a pointer whose offset is too big
528 * for reg->off
529 */
61bd5218 530 verbose(env, ",imm=%llx", reg->var_off.value);
7d1238f2
EC
531 } else {
532 if (reg->smin_value != reg->umin_value &&
533 reg->smin_value != S64_MIN)
61bd5218 534 verbose(env, ",smin_value=%lld",
7d1238f2
EC
535 (long long)reg->smin_value);
536 if (reg->smax_value != reg->umax_value &&
537 reg->smax_value != S64_MAX)
61bd5218 538 verbose(env, ",smax_value=%lld",
7d1238f2
EC
539 (long long)reg->smax_value);
540 if (reg->umin_value != 0)
61bd5218 541 verbose(env, ",umin_value=%llu",
7d1238f2
EC
542 (unsigned long long)reg->umin_value);
543 if (reg->umax_value != U64_MAX)
61bd5218 544 verbose(env, ",umax_value=%llu",
7d1238f2
EC
545 (unsigned long long)reg->umax_value);
546 if (!tnum_is_unknown(reg->var_off)) {
547 char tn_buf[48];
f1174f77 548
7d1238f2 549 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 550 verbose(env, ",var_off=%s", tn_buf);
7d1238f2 551 }
f1174f77 552 }
61bd5218 553 verbose(env, ")");
f1174f77 554 }
17a52670 555 }
638f5b90 556 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
8efea21d
EC
557 char types_buf[BPF_REG_SIZE + 1];
558 bool valid = false;
559 int j;
560
561 for (j = 0; j < BPF_REG_SIZE; j++) {
562 if (state->stack[i].slot_type[j] != STACK_INVALID)
563 valid = true;
564 types_buf[j] = slot_type_char[
565 state->stack[i].slot_type[j]];
566 }
567 types_buf[BPF_REG_SIZE] = 0;
568 if (!valid)
569 continue;
570 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
571 print_liveness(env, state->stack[i].spilled_ptr.live);
b5dc0163
AS
572 if (state->stack[i].slot_type[0] == STACK_SPILL) {
573 reg = &state->stack[i].spilled_ptr;
574 t = reg->type;
575 verbose(env, "=%s", reg_type_str[t]);
576 if (t == SCALAR_VALUE && reg->precise)
577 verbose(env, "P");
578 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))
579 verbose(env, "%lld", reg->var_off.value + reg->off);
580 } else {
8efea21d 581 verbose(env, "=%s", types_buf);
b5dc0163 582 }
17a52670 583 }
fd978bf7
JS
584 if (state->acquired_refs && state->refs[0].id) {
585 verbose(env, " refs=%d", state->refs[0].id);
586 for (i = 1; i < state->acquired_refs; i++)
587 if (state->refs[i].id)
588 verbose(env, ",%d", state->refs[i].id);
589 }
61bd5218 590 verbose(env, "\n");
17a52670
AS
591}
592
84dbf350
JS
593#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
594static int copy_##NAME##_state(struct bpf_func_state *dst, \
595 const struct bpf_func_state *src) \
596{ \
597 if (!src->FIELD) \
598 return 0; \
599 if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
600 /* internal bug, make state invalid to reject the program */ \
601 memset(dst, 0, sizeof(*dst)); \
602 return -EFAULT; \
603 } \
604 memcpy(dst->FIELD, src->FIELD, \
605 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
606 return 0; \
638f5b90 607}
fd978bf7
JS
608/* copy_reference_state() */
609COPY_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
610/* copy_stack_state() */
611COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
612#undef COPY_STATE_FN
613
614#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
615static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
616 bool copy_old) \
617{ \
618 u32 old_size = state->COUNT; \
619 struct bpf_##NAME##_state *new_##FIELD; \
620 int slot = size / SIZE; \
621 \
622 if (size <= old_size || !size) { \
623 if (copy_old) \
624 return 0; \
625 state->COUNT = slot * SIZE; \
626 if (!size && old_size) { \
627 kfree(state->FIELD); \
628 state->FIELD = NULL; \
629 } \
630 return 0; \
631 } \
632 new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
633 GFP_KERNEL); \
634 if (!new_##FIELD) \
635 return -ENOMEM; \
636 if (copy_old) { \
637 if (state->FIELD) \
638 memcpy(new_##FIELD, state->FIELD, \
639 sizeof(*new_##FIELD) * (old_size / SIZE)); \
640 memset(new_##FIELD + old_size / SIZE, 0, \
641 sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
642 } \
643 state->COUNT = slot * SIZE; \
644 kfree(state->FIELD); \
645 state->FIELD = new_##FIELD; \
646 return 0; \
647}
fd978bf7
JS
648/* realloc_reference_state() */
649REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
84dbf350
JS
650/* realloc_stack_state() */
651REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
652#undef REALLOC_STATE_FN
638f5b90
AS
653
654/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
655 * make it consume minimal amount of memory. check_stack_write() access from
f4d7e40a 656 * the program calls into realloc_func_state() to grow the stack size.
84dbf350
JS
657 * Note there is a non-zero 'parent' pointer inside bpf_verifier_state
658 * which realloc_stack_state() copies over. It points to previous
659 * bpf_verifier_state which is never reallocated.
638f5b90 660 */
fd978bf7
JS
661static int realloc_func_state(struct bpf_func_state *state, int stack_size,
662 int refs_size, bool copy_old)
638f5b90 663{
fd978bf7
JS
664 int err = realloc_reference_state(state, refs_size, copy_old);
665 if (err)
666 return err;
667 return realloc_stack_state(state, stack_size, copy_old);
668}
669
670/* Acquire a pointer id from the env and update the state->refs to include
671 * this new pointer reference.
672 * On success, returns a valid pointer id to associate with the register
673 * On failure, returns a negative errno.
638f5b90 674 */
fd978bf7 675static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
638f5b90 676{
fd978bf7
JS
677 struct bpf_func_state *state = cur_func(env);
678 int new_ofs = state->acquired_refs;
679 int id, err;
680
681 err = realloc_reference_state(state, state->acquired_refs + 1, true);
682 if (err)
683 return err;
684 id = ++env->id_gen;
685 state->refs[new_ofs].id = id;
686 state->refs[new_ofs].insn_idx = insn_idx;
638f5b90 687
fd978bf7
JS
688 return id;
689}
690
691/* release function corresponding to acquire_reference_state(). Idempotent. */
46f8bc92 692static int release_reference_state(struct bpf_func_state *state, int ptr_id)
fd978bf7
JS
693{
694 int i, last_idx;
695
fd978bf7
JS
696 last_idx = state->acquired_refs - 1;
697 for (i = 0; i < state->acquired_refs; i++) {
698 if (state->refs[i].id == ptr_id) {
699 if (last_idx && i != last_idx)
700 memcpy(&state->refs[i], &state->refs[last_idx],
701 sizeof(*state->refs));
702 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
703 state->acquired_refs--;
638f5b90 704 return 0;
638f5b90 705 }
638f5b90 706 }
46f8bc92 707 return -EINVAL;
fd978bf7
JS
708}
709
710static int transfer_reference_state(struct bpf_func_state *dst,
711 struct bpf_func_state *src)
712{
713 int err = realloc_reference_state(dst, src->acquired_refs, false);
714 if (err)
715 return err;
716 err = copy_reference_state(dst, src);
717 if (err)
718 return err;
638f5b90
AS
719 return 0;
720}
721
f4d7e40a
AS
722static void free_func_state(struct bpf_func_state *state)
723{
5896351e
AS
724 if (!state)
725 return;
fd978bf7 726 kfree(state->refs);
f4d7e40a
AS
727 kfree(state->stack);
728 kfree(state);
729}
730
b5dc0163
AS
731static void clear_jmp_history(struct bpf_verifier_state *state)
732{
733 kfree(state->jmp_history);
734 state->jmp_history = NULL;
735 state->jmp_history_cnt = 0;
736}
737
1969db47
AS
738static void free_verifier_state(struct bpf_verifier_state *state,
739 bool free_self)
638f5b90 740{
f4d7e40a
AS
741 int i;
742
743 for (i = 0; i <= state->curframe; i++) {
744 free_func_state(state->frame[i]);
745 state->frame[i] = NULL;
746 }
b5dc0163 747 clear_jmp_history(state);
1969db47
AS
748 if (free_self)
749 kfree(state);
638f5b90
AS
750}
751
752/* copy verifier state from src to dst growing dst stack space
753 * when necessary to accommodate larger src stack
754 */
f4d7e40a
AS
755static int copy_func_state(struct bpf_func_state *dst,
756 const struct bpf_func_state *src)
638f5b90
AS
757{
758 int err;
759
fd978bf7
JS
760 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
761 false);
762 if (err)
763 return err;
764 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
765 err = copy_reference_state(dst, src);
638f5b90
AS
766 if (err)
767 return err;
638f5b90
AS
768 return copy_stack_state(dst, src);
769}
770
f4d7e40a
AS
771static int copy_verifier_state(struct bpf_verifier_state *dst_state,
772 const struct bpf_verifier_state *src)
773{
774 struct bpf_func_state *dst;
b5dc0163 775 u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
f4d7e40a
AS
776 int i, err;
777
b5dc0163
AS
778 if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
779 kfree(dst_state->jmp_history);
780 dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
781 if (!dst_state->jmp_history)
782 return -ENOMEM;
783 }
784 memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
785 dst_state->jmp_history_cnt = src->jmp_history_cnt;
786
f4d7e40a
AS
787 /* if dst has more stack frames then src frame, free them */
788 for (i = src->curframe + 1; i <= dst_state->curframe; i++) {
789 free_func_state(dst_state->frame[i]);
790 dst_state->frame[i] = NULL;
791 }
979d63d5 792 dst_state->speculative = src->speculative;
f4d7e40a 793 dst_state->curframe = src->curframe;
d83525ca 794 dst_state->active_spin_lock = src->active_spin_lock;
2589726d
AS
795 dst_state->branches = src->branches;
796 dst_state->parent = src->parent;
b5dc0163
AS
797 dst_state->first_insn_idx = src->first_insn_idx;
798 dst_state->last_insn_idx = src->last_insn_idx;
f4d7e40a
AS
799 for (i = 0; i <= src->curframe; i++) {
800 dst = dst_state->frame[i];
801 if (!dst) {
802 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
803 if (!dst)
804 return -ENOMEM;
805 dst_state->frame[i] = dst;
806 }
807 err = copy_func_state(dst, src->frame[i]);
808 if (err)
809 return err;
810 }
811 return 0;
812}
813
2589726d
AS
814static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
815{
816 while (st) {
817 u32 br = --st->branches;
818
819 /* WARN_ON(br > 1) technically makes sense here,
820 * but see comment in push_stack(), hence:
821 */
822 WARN_ONCE((int)br < 0,
823 "BUG update_branch_counts:branches_to_explore=%d\n",
824 br);
825 if (br)
826 break;
827 st = st->parent;
828 }
829}
830
638f5b90
AS
831static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
832 int *insn_idx)
833{
834 struct bpf_verifier_state *cur = env->cur_state;
835 struct bpf_verifier_stack_elem *elem, *head = env->head;
836 int err;
17a52670
AS
837
838 if (env->head == NULL)
638f5b90 839 return -ENOENT;
17a52670 840
638f5b90
AS
841 if (cur) {
842 err = copy_verifier_state(cur, &head->st);
843 if (err)
844 return err;
845 }
846 if (insn_idx)
847 *insn_idx = head->insn_idx;
17a52670 848 if (prev_insn_idx)
638f5b90
AS
849 *prev_insn_idx = head->prev_insn_idx;
850 elem = head->next;
1969db47 851 free_verifier_state(&head->st, false);
638f5b90 852 kfree(head);
17a52670
AS
853 env->head = elem;
854 env->stack_size--;
638f5b90 855 return 0;
17a52670
AS
856}
857
58e2af8b 858static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
979d63d5
DB
859 int insn_idx, int prev_insn_idx,
860 bool speculative)
17a52670 861{
638f5b90 862 struct bpf_verifier_state *cur = env->cur_state;
58e2af8b 863 struct bpf_verifier_stack_elem *elem;
638f5b90 864 int err;
17a52670 865
638f5b90 866 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
17a52670
AS
867 if (!elem)
868 goto err;
869
17a52670
AS
870 elem->insn_idx = insn_idx;
871 elem->prev_insn_idx = prev_insn_idx;
872 elem->next = env->head;
873 env->head = elem;
874 env->stack_size++;
1969db47
AS
875 err = copy_verifier_state(&elem->st, cur);
876 if (err)
877 goto err;
979d63d5 878 elem->st.speculative |= speculative;
b285fcb7
AS
879 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) {
880 verbose(env, "The sequence of %d jumps is too complex.\n",
881 env->stack_size);
17a52670
AS
882 goto err;
883 }
2589726d
AS
884 if (elem->st.parent) {
885 ++elem->st.parent->branches;
886 /* WARN_ON(branches > 2) technically makes sense here,
887 * but
888 * 1. speculative states will bump 'branches' for non-branch
889 * instructions
890 * 2. is_state_visited() heuristics may decide not to create
891 * a new state for a sequence of branches and all such current
892 * and cloned states will be pointing to a single parent state
893 * which might have large 'branches' count.
894 */
895 }
17a52670
AS
896 return &elem->st;
897err:
5896351e
AS
898 free_verifier_state(env->cur_state, true);
899 env->cur_state = NULL;
17a52670 900 /* pop all elements and return */
638f5b90 901 while (!pop_stack(env, NULL, NULL));
17a52670
AS
902 return NULL;
903}
904
905#define CALLER_SAVED_REGS 6
906static const int caller_saved[CALLER_SAVED_REGS] = {
907 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
908};
909
f1174f77
EC
910static void __mark_reg_not_init(struct bpf_reg_state *reg);
911
b03c9f9f
EC
912/* Mark the unknown part of a register (variable offset or scalar value) as
913 * known to have the value @imm.
914 */
915static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
916{
a9c676bc
AS
917 /* Clear id, off, and union(map_ptr, range) */
918 memset(((u8 *)reg) + sizeof(reg->type), 0,
919 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
b03c9f9f
EC
920 reg->var_off = tnum_const(imm);
921 reg->smin_value = (s64)imm;
922 reg->smax_value = (s64)imm;
923 reg->umin_value = imm;
924 reg->umax_value = imm;
925}
926
f1174f77
EC
927/* Mark the 'variable offset' part of a register as zero. This should be
928 * used only on registers holding a pointer type.
929 */
930static void __mark_reg_known_zero(struct bpf_reg_state *reg)
a9789ef9 931{
b03c9f9f 932 __mark_reg_known(reg, 0);
f1174f77 933}
a9789ef9 934
cc2b14d5
AS
935static void __mark_reg_const_zero(struct bpf_reg_state *reg)
936{
937 __mark_reg_known(reg, 0);
cc2b14d5
AS
938 reg->type = SCALAR_VALUE;
939}
940
61bd5218
JK
941static void mark_reg_known_zero(struct bpf_verifier_env *env,
942 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
943{
944 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 945 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno);
f1174f77
EC
946 /* Something bad happened, let's kill all regs */
947 for (regno = 0; regno < MAX_BPF_REG; regno++)
948 __mark_reg_not_init(regs + regno);
949 return;
950 }
951 __mark_reg_known_zero(regs + regno);
952}
953
de8f3a83
DB
954static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg)
955{
956 return type_is_pkt_pointer(reg->type);
957}
958
959static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg)
960{
961 return reg_is_pkt_pointer(reg) ||
962 reg->type == PTR_TO_PACKET_END;
963}
964
965/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */
966static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg,
967 enum bpf_reg_type which)
968{
969 /* The register can already have a range from prior markings.
970 * This is fine as long as it hasn't been advanced from its
971 * origin.
972 */
973 return reg->type == which &&
974 reg->id == 0 &&
975 reg->off == 0 &&
976 tnum_equals_const(reg->var_off, 0);
977}
978
b03c9f9f
EC
979/* Attempts to improve min/max values based on var_off information */
980static void __update_reg_bounds(struct bpf_reg_state *reg)
981{
982 /* min signed is max(sign bit) | min(other bits) */
983 reg->smin_value = max_t(s64, reg->smin_value,
984 reg->var_off.value | (reg->var_off.mask & S64_MIN));
985 /* max signed is min(sign bit) | max(other bits) */
986 reg->smax_value = min_t(s64, reg->smax_value,
987 reg->var_off.value | (reg->var_off.mask & S64_MAX));
988 reg->umin_value = max(reg->umin_value, reg->var_off.value);
989 reg->umax_value = min(reg->umax_value,
990 reg->var_off.value | reg->var_off.mask);
991}
992
993/* Uses signed min/max values to inform unsigned, and vice-versa */
994static void __reg_deduce_bounds(struct bpf_reg_state *reg)
995{
996 /* Learn sign from signed bounds.
997 * If we cannot cross the sign boundary, then signed and unsigned bounds
998 * are the same, so combine. This works even in the negative case, e.g.
999 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff.
1000 */
1001 if (reg->smin_value >= 0 || reg->smax_value < 0) {
1002 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1003 reg->umin_value);
1004 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1005 reg->umax_value);
1006 return;
1007 }
1008 /* Learn sign from unsigned bounds. Signed bounds cross the sign
1009 * boundary, so we must be careful.
1010 */
1011 if ((s64)reg->umax_value >= 0) {
1012 /* Positive. We can't learn anything from the smin, but smax
1013 * is positive, hence safe.
1014 */
1015 reg->smin_value = reg->umin_value;
1016 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value,
1017 reg->umax_value);
1018 } else if ((s64)reg->umin_value < 0) {
1019 /* Negative. We can't learn anything from the smax, but smin
1020 * is negative, hence safe.
1021 */
1022 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value,
1023 reg->umin_value);
1024 reg->smax_value = reg->umax_value;
1025 }
1026}
1027
1028/* Attempts to improve var_off based on unsigned min/max information */
1029static void __reg_bound_offset(struct bpf_reg_state *reg)
1030{
1031 reg->var_off = tnum_intersect(reg->var_off,
1032 tnum_range(reg->umin_value,
1033 reg->umax_value));
1034}
1035
581738a6
YS
1036static void __reg_bound_offset32(struct bpf_reg_state *reg)
1037{
1038 u64 mask = 0xffffFFFF;
1039 struct tnum range = tnum_range(reg->umin_value & mask,
1040 reg->umax_value & mask);
1041 struct tnum lo32 = tnum_cast(reg->var_off, 4);
1042 struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
1043
1044 reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
1045}
1046
b03c9f9f
EC
1047/* Reset the min/max bounds of a register */
1048static void __mark_reg_unbounded(struct bpf_reg_state *reg)
1049{
1050 reg->smin_value = S64_MIN;
1051 reg->smax_value = S64_MAX;
1052 reg->umin_value = 0;
1053 reg->umax_value = U64_MAX;
1054}
1055
f1174f77
EC
1056/* Mark a register as having a completely unknown (scalar) value. */
1057static void __mark_reg_unknown(struct bpf_reg_state *reg)
1058{
a9c676bc
AS
1059 /*
1060 * Clear type, id, off, and union(map_ptr, range) and
1061 * padding between 'type' and union
1062 */
1063 memset(reg, 0, offsetof(struct bpf_reg_state, var_off));
f1174f77 1064 reg->type = SCALAR_VALUE;
f1174f77 1065 reg->var_off = tnum_unknown;
f4d7e40a 1066 reg->frameno = 0;
b03c9f9f 1067 __mark_reg_unbounded(reg);
f1174f77
EC
1068}
1069
61bd5218
JK
1070static void mark_reg_unknown(struct bpf_verifier_env *env,
1071 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1072{
1073 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1074 verbose(env, "mark_reg_unknown(regs, %u)\n", regno);
19ceb417
AS
1075 /* Something bad happened, let's kill all regs except FP */
1076 for (regno = 0; regno < BPF_REG_FP; regno++)
f1174f77
EC
1077 __mark_reg_not_init(regs + regno);
1078 return;
1079 }
6754172c
AS
1080 regs += regno;
1081 __mark_reg_unknown(regs);
1082 /* constant backtracking is enabled for root without bpf2bpf calls */
1083 regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
1084 true : false;
f1174f77
EC
1085}
1086
1087static void __mark_reg_not_init(struct bpf_reg_state *reg)
1088{
1089 __mark_reg_unknown(reg);
1090 reg->type = NOT_INIT;
1091}
1092
61bd5218
JK
1093static void mark_reg_not_init(struct bpf_verifier_env *env,
1094 struct bpf_reg_state *regs, u32 regno)
f1174f77
EC
1095{
1096 if (WARN_ON(regno >= MAX_BPF_REG)) {
61bd5218 1097 verbose(env, "mark_reg_not_init(regs, %u)\n", regno);
19ceb417
AS
1098 /* Something bad happened, let's kill all regs except FP */
1099 for (regno = 0; regno < BPF_REG_FP; regno++)
f1174f77
EC
1100 __mark_reg_not_init(regs + regno);
1101 return;
1102 }
1103 __mark_reg_not_init(regs + regno);
a9789ef9
DB
1104}
1105
5327ed3d 1106#define DEF_NOT_SUBREG (0)
61bd5218 1107static void init_reg_state(struct bpf_verifier_env *env,
f4d7e40a 1108 struct bpf_func_state *state)
17a52670 1109{
f4d7e40a 1110 struct bpf_reg_state *regs = state->regs;
17a52670
AS
1111 int i;
1112
dc503a8a 1113 for (i = 0; i < MAX_BPF_REG; i++) {
61bd5218 1114 mark_reg_not_init(env, regs, i);
dc503a8a 1115 regs[i].live = REG_LIVE_NONE;
679c782d 1116 regs[i].parent = NULL;
5327ed3d 1117 regs[i].subreg_def = DEF_NOT_SUBREG;
dc503a8a 1118 }
17a52670
AS
1119
1120 /* frame pointer */
f1174f77 1121 regs[BPF_REG_FP].type = PTR_TO_STACK;
61bd5218 1122 mark_reg_known_zero(env, regs, BPF_REG_FP);
f4d7e40a 1123 regs[BPF_REG_FP].frameno = state->frameno;
17a52670
AS
1124
1125 /* 1st arg to a function */
1126 regs[BPF_REG_1].type = PTR_TO_CTX;
61bd5218 1127 mark_reg_known_zero(env, regs, BPF_REG_1);
6760bf2d
DB
1128}
1129
f4d7e40a
AS
1130#define BPF_MAIN_FUNC (-1)
1131static void init_func_state(struct bpf_verifier_env *env,
1132 struct bpf_func_state *state,
1133 int callsite, int frameno, int subprogno)
1134{
1135 state->callsite = callsite;
1136 state->frameno = frameno;
1137 state->subprogno = subprogno;
1138 init_reg_state(env, state);
1139}
1140
17a52670
AS
1141enum reg_arg_type {
1142 SRC_OP, /* register is used as source operand */
1143 DST_OP, /* register is used as destination operand */
1144 DST_OP_NO_MARK /* same as above, check only, don't mark */
1145};
1146
cc8b0b92
AS
1147static int cmp_subprogs(const void *a, const void *b)
1148{
9c8105bd
JW
1149 return ((struct bpf_subprog_info *)a)->start -
1150 ((struct bpf_subprog_info *)b)->start;
cc8b0b92
AS
1151}
1152
1153static int find_subprog(struct bpf_verifier_env *env, int off)
1154{
9c8105bd 1155 struct bpf_subprog_info *p;
cc8b0b92 1156
9c8105bd
JW
1157 p = bsearch(&off, env->subprog_info, env->subprog_cnt,
1158 sizeof(env->subprog_info[0]), cmp_subprogs);
cc8b0b92
AS
1159 if (!p)
1160 return -ENOENT;
9c8105bd 1161 return p - env->subprog_info;
cc8b0b92
AS
1162
1163}
1164
1165static int add_subprog(struct bpf_verifier_env *env, int off)
1166{
1167 int insn_cnt = env->prog->len;
1168 int ret;
1169
1170 if (off >= insn_cnt || off < 0) {
1171 verbose(env, "call to invalid destination\n");
1172 return -EINVAL;
1173 }
1174 ret = find_subprog(env, off);
1175 if (ret >= 0)
1176 return 0;
4cb3d99c 1177 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) {
cc8b0b92
AS
1178 verbose(env, "too many subprograms\n");
1179 return -E2BIG;
1180 }
9c8105bd
JW
1181 env->subprog_info[env->subprog_cnt++].start = off;
1182 sort(env->subprog_info, env->subprog_cnt,
1183 sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
cc8b0b92
AS
1184 return 0;
1185}
1186
1187static int check_subprogs(struct bpf_verifier_env *env)
1188{
1189 int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
9c8105bd 1190 struct bpf_subprog_info *subprog = env->subprog_info;
cc8b0b92
AS
1191 struct bpf_insn *insn = env->prog->insnsi;
1192 int insn_cnt = env->prog->len;
1193
f910cefa
JW
1194 /* Add entry function. */
1195 ret = add_subprog(env, 0);
1196 if (ret < 0)
1197 return ret;
1198
cc8b0b92
AS
1199 /* determine subprog starts. The end is one before the next starts */
1200 for (i = 0; i < insn_cnt; i++) {
1201 if (insn[i].code != (BPF_JMP | BPF_CALL))
1202 continue;
1203 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1204 continue;
1205 if (!env->allow_ptr_leaks) {
1206 verbose(env, "function calls to other bpf functions are allowed for root only\n");
1207 return -EPERM;
1208 }
cc8b0b92
AS
1209 ret = add_subprog(env, i + insn[i].imm + 1);
1210 if (ret < 0)
1211 return ret;
1212 }
1213
4cb3d99c
JW
1214 /* Add a fake 'exit' subprog which could simplify subprog iteration
1215 * logic. 'subprog_cnt' should not be increased.
1216 */
1217 subprog[env->subprog_cnt].start = insn_cnt;
1218
06ee7115 1219 if (env->log.level & BPF_LOG_LEVEL2)
cc8b0b92 1220 for (i = 0; i < env->subprog_cnt; i++)
9c8105bd 1221 verbose(env, "func#%d @%d\n", i, subprog[i].start);
cc8b0b92
AS
1222
1223 /* now check that all jumps are within the same subprog */
4cb3d99c
JW
1224 subprog_start = subprog[cur_subprog].start;
1225 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1226 for (i = 0; i < insn_cnt; i++) {
1227 u8 code = insn[i].code;
1228
092ed096 1229 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
cc8b0b92
AS
1230 goto next;
1231 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
1232 goto next;
1233 off = i + insn[i].off + 1;
1234 if (off < subprog_start || off >= subprog_end) {
1235 verbose(env, "jump out of range from insn %d to %d\n", i, off);
1236 return -EINVAL;
1237 }
1238next:
1239 if (i == subprog_end - 1) {
1240 /* to avoid fall-through from one subprog into another
1241 * the last insn of the subprog should be either exit
1242 * or unconditional jump back
1243 */
1244 if (code != (BPF_JMP | BPF_EXIT) &&
1245 code != (BPF_JMP | BPF_JA)) {
1246 verbose(env, "last insn is not an exit or jmp\n");
1247 return -EINVAL;
1248 }
1249 subprog_start = subprog_end;
4cb3d99c
JW
1250 cur_subprog++;
1251 if (cur_subprog < env->subprog_cnt)
9c8105bd 1252 subprog_end = subprog[cur_subprog + 1].start;
cc8b0b92
AS
1253 }
1254 }
1255 return 0;
1256}
1257
679c782d
EC
1258/* Parentage chain of this register (or stack slot) should take care of all
1259 * issues like callee-saved registers, stack slot allocation time, etc.
1260 */
f4d7e40a 1261static int mark_reg_read(struct bpf_verifier_env *env,
679c782d 1262 const struct bpf_reg_state *state,
5327ed3d 1263 struct bpf_reg_state *parent, u8 flag)
f4d7e40a
AS
1264{
1265 bool writes = parent == state->parent; /* Observe write marks */
06ee7115 1266 int cnt = 0;
dc503a8a
EC
1267
1268 while (parent) {
1269 /* if read wasn't screened by an earlier write ... */
679c782d 1270 if (writes && state->live & REG_LIVE_WRITTEN)
dc503a8a 1271 break;
9242b5f5
AS
1272 if (parent->live & REG_LIVE_DONE) {
1273 verbose(env, "verifier BUG type %s var_off %lld off %d\n",
1274 reg_type_str[parent->type],
1275 parent->var_off.value, parent->off);
1276 return -EFAULT;
1277 }
5327ed3d
JW
1278 /* The first condition is more likely to be true than the
1279 * second, checked it first.
1280 */
1281 if ((parent->live & REG_LIVE_READ) == flag ||
1282 parent->live & REG_LIVE_READ64)
25af32da
AS
1283 /* The parentage chain never changes and
1284 * this parent was already marked as LIVE_READ.
1285 * There is no need to keep walking the chain again and
1286 * keep re-marking all parents as LIVE_READ.
1287 * This case happens when the same register is read
1288 * multiple times without writes into it in-between.
5327ed3d
JW
1289 * Also, if parent has the stronger REG_LIVE_READ64 set,
1290 * then no need to set the weak REG_LIVE_READ32.
25af32da
AS
1291 */
1292 break;
dc503a8a 1293 /* ... then we depend on parent's value */
5327ed3d
JW
1294 parent->live |= flag;
1295 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */
1296 if (flag == REG_LIVE_READ64)
1297 parent->live &= ~REG_LIVE_READ32;
dc503a8a
EC
1298 state = parent;
1299 parent = state->parent;
f4d7e40a 1300 writes = true;
06ee7115 1301 cnt++;
dc503a8a 1302 }
06ee7115
AS
1303
1304 if (env->longest_mark_read_walk < cnt)
1305 env->longest_mark_read_walk = cnt;
f4d7e40a 1306 return 0;
dc503a8a
EC
1307}
1308
5327ed3d
JW
1309/* This function is supposed to be used by the following 32-bit optimization
1310 * code only. It returns TRUE if the source or destination register operates
1311 * on 64-bit, otherwise return FALSE.
1312 */
1313static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
1314 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t)
1315{
1316 u8 code, class, op;
1317
1318 code = insn->code;
1319 class = BPF_CLASS(code);
1320 op = BPF_OP(code);
1321 if (class == BPF_JMP) {
1322 /* BPF_EXIT for "main" will reach here. Return TRUE
1323 * conservatively.
1324 */
1325 if (op == BPF_EXIT)
1326 return true;
1327 if (op == BPF_CALL) {
1328 /* BPF to BPF call will reach here because of marking
1329 * caller saved clobber with DST_OP_NO_MARK for which we
1330 * don't care the register def because they are anyway
1331 * marked as NOT_INIT already.
1332 */
1333 if (insn->src_reg == BPF_PSEUDO_CALL)
1334 return false;
1335 /* Helper call will reach here because of arg type
1336 * check, conservatively return TRUE.
1337 */
1338 if (t == SRC_OP)
1339 return true;
1340
1341 return false;
1342 }
1343 }
1344
1345 if (class == BPF_ALU64 || class == BPF_JMP ||
1346 /* BPF_END always use BPF_ALU class. */
1347 (class == BPF_ALU && op == BPF_END && insn->imm == 64))
1348 return true;
1349
1350 if (class == BPF_ALU || class == BPF_JMP32)
1351 return false;
1352
1353 if (class == BPF_LDX) {
1354 if (t != SRC_OP)
1355 return BPF_SIZE(code) == BPF_DW;
1356 /* LDX source must be ptr. */
1357 return true;
1358 }
1359
1360 if (class == BPF_STX) {
1361 if (reg->type != SCALAR_VALUE)
1362 return true;
1363 return BPF_SIZE(code) == BPF_DW;
1364 }
1365
1366 if (class == BPF_LD) {
1367 u8 mode = BPF_MODE(code);
1368
1369 /* LD_IMM64 */
1370 if (mode == BPF_IMM)
1371 return true;
1372
1373 /* Both LD_IND and LD_ABS return 32-bit data. */
1374 if (t != SRC_OP)
1375 return false;
1376
1377 /* Implicit ctx ptr. */
1378 if (regno == BPF_REG_6)
1379 return true;
1380
1381 /* Explicit source could be any width. */
1382 return true;
1383 }
1384
1385 if (class == BPF_ST)
1386 /* The only source register for BPF_ST is a ptr. */
1387 return true;
1388
1389 /* Conservatively return true at default. */
1390 return true;
1391}
1392
b325fbca
JW
1393/* Return TRUE if INSN doesn't have explicit value define. */
1394static bool insn_no_def(struct bpf_insn *insn)
1395{
1396 u8 class = BPF_CLASS(insn->code);
1397
1398 return (class == BPF_JMP || class == BPF_JMP32 ||
1399 class == BPF_STX || class == BPF_ST);
1400}
1401
1402/* Return TRUE if INSN has defined any 32-bit value explicitly. */
1403static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn)
1404{
1405 if (insn_no_def(insn))
1406 return false;
1407
1408 return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP);
1409}
1410
5327ed3d
JW
1411static void mark_insn_zext(struct bpf_verifier_env *env,
1412 struct bpf_reg_state *reg)
1413{
1414 s32 def_idx = reg->subreg_def;
1415
1416 if (def_idx == DEF_NOT_SUBREG)
1417 return;
1418
1419 env->insn_aux_data[def_idx - 1].zext_dst = true;
1420 /* The dst will be zero extended, so won't be sub-register anymore. */
1421 reg->subreg_def = DEF_NOT_SUBREG;
1422}
1423
dc503a8a 1424static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
17a52670
AS
1425 enum reg_arg_type t)
1426{
f4d7e40a
AS
1427 struct bpf_verifier_state *vstate = env->cur_state;
1428 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5327ed3d 1429 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx;
c342dc10 1430 struct bpf_reg_state *reg, *regs = state->regs;
5327ed3d 1431 bool rw64;
dc503a8a 1432
17a52670 1433 if (regno >= MAX_BPF_REG) {
61bd5218 1434 verbose(env, "R%d is invalid\n", regno);
17a52670
AS
1435 return -EINVAL;
1436 }
1437
c342dc10 1438 reg = &regs[regno];
5327ed3d 1439 rw64 = is_reg64(env, insn, regno, reg, t);
17a52670
AS
1440 if (t == SRC_OP) {
1441 /* check whether register used as source operand can be read */
c342dc10 1442 if (reg->type == NOT_INIT) {
61bd5218 1443 verbose(env, "R%d !read_ok\n", regno);
17a52670
AS
1444 return -EACCES;
1445 }
679c782d 1446 /* We don't need to worry about FP liveness because it's read-only */
c342dc10
JW
1447 if (regno == BPF_REG_FP)
1448 return 0;
1449
5327ed3d
JW
1450 if (rw64)
1451 mark_insn_zext(env, reg);
1452
1453 return mark_reg_read(env, reg, reg->parent,
1454 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32);
17a52670
AS
1455 } else {
1456 /* check whether register used as dest operand can be written to */
1457 if (regno == BPF_REG_FP) {
61bd5218 1458 verbose(env, "frame pointer is read only\n");
17a52670
AS
1459 return -EACCES;
1460 }
c342dc10 1461 reg->live |= REG_LIVE_WRITTEN;
5327ed3d 1462 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1;
17a52670 1463 if (t == DST_OP)
61bd5218 1464 mark_reg_unknown(env, regs, regno);
17a52670
AS
1465 }
1466 return 0;
1467}
1468
b5dc0163
AS
1469/* for any branch, call, exit record the history of jmps in the given state */
1470static int push_jmp_history(struct bpf_verifier_env *env,
1471 struct bpf_verifier_state *cur)
1472{
1473 u32 cnt = cur->jmp_history_cnt;
1474 struct bpf_idx_pair *p;
1475
1476 cnt++;
1477 p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER);
1478 if (!p)
1479 return -ENOMEM;
1480 p[cnt - 1].idx = env->insn_idx;
1481 p[cnt - 1].prev_idx = env->prev_insn_idx;
1482 cur->jmp_history = p;
1483 cur->jmp_history_cnt = cnt;
1484 return 0;
1485}
1486
1487/* Backtrack one insn at a time. If idx is not at the top of recorded
1488 * history then previous instruction came from straight line execution.
1489 */
1490static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
1491 u32 *history)
1492{
1493 u32 cnt = *history;
1494
1495 if (cnt && st->jmp_history[cnt - 1].idx == i) {
1496 i = st->jmp_history[cnt - 1].prev_idx;
1497 (*history)--;
1498 } else {
1499 i--;
1500 }
1501 return i;
1502}
1503
1504/* For given verifier state backtrack_insn() is called from the last insn to
1505 * the first insn. Its purpose is to compute a bitmask of registers and
1506 * stack slots that needs precision in the parent verifier state.
1507 */
1508static int backtrack_insn(struct bpf_verifier_env *env, int idx,
1509 u32 *reg_mask, u64 *stack_mask)
1510{
1511 const struct bpf_insn_cbs cbs = {
1512 .cb_print = verbose,
1513 .private_data = env,
1514 };
1515 struct bpf_insn *insn = env->prog->insnsi + idx;
1516 u8 class = BPF_CLASS(insn->code);
1517 u8 opcode = BPF_OP(insn->code);
1518 u8 mode = BPF_MODE(insn->code);
1519 u32 dreg = 1u << insn->dst_reg;
1520 u32 sreg = 1u << insn->src_reg;
1521 u32 spi;
1522
1523 if (insn->code == 0)
1524 return 0;
1525 if (env->log.level & BPF_LOG_LEVEL) {
1526 verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask);
1527 verbose(env, "%d: ", idx);
1528 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
1529 }
1530
1531 if (class == BPF_ALU || class == BPF_ALU64) {
1532 if (!(*reg_mask & dreg))
1533 return 0;
1534 if (opcode == BPF_MOV) {
1535 if (BPF_SRC(insn->code) == BPF_X) {
1536 /* dreg = sreg
1537 * dreg needs precision after this insn
1538 * sreg needs precision before this insn
1539 */
1540 *reg_mask &= ~dreg;
1541 *reg_mask |= sreg;
1542 } else {
1543 /* dreg = K
1544 * dreg needs precision after this insn.
1545 * Corresponding register is already marked
1546 * as precise=true in this verifier state.
1547 * No further markings in parent are necessary
1548 */
1549 *reg_mask &= ~dreg;
1550 }
1551 } else {
1552 if (BPF_SRC(insn->code) == BPF_X) {
1553 /* dreg += sreg
1554 * both dreg and sreg need precision
1555 * before this insn
1556 */
1557 *reg_mask |= sreg;
1558 } /* else dreg += K
1559 * dreg still needs precision before this insn
1560 */
1561 }
1562 } else if (class == BPF_LDX) {
1563 if (!(*reg_mask & dreg))
1564 return 0;
1565 *reg_mask &= ~dreg;
1566
1567 /* scalars can only be spilled into stack w/o losing precision.
1568 * Load from any other memory can be zero extended.
1569 * The desire to keep that precision is already indicated
1570 * by 'precise' mark in corresponding register of this state.
1571 * No further tracking necessary.
1572 */
1573 if (insn->src_reg != BPF_REG_FP)
1574 return 0;
1575 if (BPF_SIZE(insn->code) != BPF_DW)
1576 return 0;
1577
1578 /* dreg = *(u64 *)[fp - off] was a fill from the stack.
1579 * that [fp - off] slot contains scalar that needs to be
1580 * tracked with precision
1581 */
1582 spi = (-insn->off - 1) / BPF_REG_SIZE;
1583 if (spi >= 64) {
1584 verbose(env, "BUG spi %d\n", spi);
1585 WARN_ONCE(1, "verifier backtracking bug");
1586 return -EFAULT;
1587 }
1588 *stack_mask |= 1ull << spi;
b3b50f05 1589 } else if (class == BPF_STX || class == BPF_ST) {
b5dc0163 1590 if (*reg_mask & dreg)
b3b50f05 1591 /* stx & st shouldn't be using _scalar_ dst_reg
b5dc0163
AS
1592 * to access memory. It means backtracking
1593 * encountered a case of pointer subtraction.
1594 */
1595 return -ENOTSUPP;
1596 /* scalars can only be spilled into stack */
1597 if (insn->dst_reg != BPF_REG_FP)
1598 return 0;
1599 if (BPF_SIZE(insn->code) != BPF_DW)
1600 return 0;
1601 spi = (-insn->off - 1) / BPF_REG_SIZE;
1602 if (spi >= 64) {
1603 verbose(env, "BUG spi %d\n", spi);
1604 WARN_ONCE(1, "verifier backtracking bug");
1605 return -EFAULT;
1606 }
1607 if (!(*stack_mask & (1ull << spi)))
1608 return 0;
1609 *stack_mask &= ~(1ull << spi);
b3b50f05
AN
1610 if (class == BPF_STX)
1611 *reg_mask |= sreg;
b5dc0163
AS
1612 } else if (class == BPF_JMP || class == BPF_JMP32) {
1613 if (opcode == BPF_CALL) {
1614 if (insn->src_reg == BPF_PSEUDO_CALL)
1615 return -ENOTSUPP;
1616 /* regular helper call sets R0 */
1617 *reg_mask &= ~1;
1618 if (*reg_mask & 0x3f) {
1619 /* if backtracing was looking for registers R1-R5
1620 * they should have been found already.
1621 */
1622 verbose(env, "BUG regs %x\n", *reg_mask);
1623 WARN_ONCE(1, "verifier backtracking bug");
1624 return -EFAULT;
1625 }
1626 } else if (opcode == BPF_EXIT) {
1627 return -ENOTSUPP;
1628 }
1629 } else if (class == BPF_LD) {
1630 if (!(*reg_mask & dreg))
1631 return 0;
1632 *reg_mask &= ~dreg;
1633 /* It's ld_imm64 or ld_abs or ld_ind.
1634 * For ld_imm64 no further tracking of precision
1635 * into parent is necessary
1636 */
1637 if (mode == BPF_IND || mode == BPF_ABS)
1638 /* to be analyzed */
1639 return -ENOTSUPP;
b5dc0163
AS
1640 }
1641 return 0;
1642}
1643
1644/* the scalar precision tracking algorithm:
1645 * . at the start all registers have precise=false.
1646 * . scalar ranges are tracked as normal through alu and jmp insns.
1647 * . once precise value of the scalar register is used in:
1648 * . ptr + scalar alu
1649 * . if (scalar cond K|scalar)
1650 * . helper_call(.., scalar, ...) where ARG_CONST is expected
1651 * backtrack through the verifier states and mark all registers and
1652 * stack slots with spilled constants that these scalar regisers
1653 * should be precise.
1654 * . during state pruning two registers (or spilled stack slots)
1655 * are equivalent if both are not precise.
1656 *
1657 * Note the verifier cannot simply walk register parentage chain,
1658 * since many different registers and stack slots could have been
1659 * used to compute single precise scalar.
1660 *
1661 * The approach of starting with precise=true for all registers and then
1662 * backtrack to mark a register as not precise when the verifier detects
1663 * that program doesn't care about specific value (e.g., when helper
1664 * takes register as ARG_ANYTHING parameter) is not safe.
1665 *
1666 * It's ok to walk single parentage chain of the verifier states.
1667 * It's possible that this backtracking will go all the way till 1st insn.
1668 * All other branches will be explored for needing precision later.
1669 *
1670 * The backtracking needs to deal with cases like:
1671 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0)
1672 * r9 -= r8
1673 * r5 = r9
1674 * if r5 > 0x79f goto pc+7
1675 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff))
1676 * r5 += 1
1677 * ...
1678 * call bpf_perf_event_output#25
1679 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO
1680 *
1681 * and this case:
1682 * r6 = 1
1683 * call foo // uses callee's r6 inside to compute r0
1684 * r0 += r6
1685 * if r0 == 0 goto
1686 *
1687 * to track above reg_mask/stack_mask needs to be independent for each frame.
1688 *
1689 * Also if parent's curframe > frame where backtracking started,
1690 * the verifier need to mark registers in both frames, otherwise callees
1691 * may incorrectly prune callers. This is similar to
1692 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences")
1693 *
1694 * For now backtracking falls back into conservative marking.
1695 */
1696static void mark_all_scalars_precise(struct bpf_verifier_env *env,
1697 struct bpf_verifier_state *st)
1698{
1699 struct bpf_func_state *func;
1700 struct bpf_reg_state *reg;
1701 int i, j;
1702
1703 /* big hammer: mark all scalars precise in this path.
1704 * pop_stack may still get !precise scalars.
1705 */
1706 for (; st; st = st->parent)
1707 for (i = 0; i <= st->curframe; i++) {
1708 func = st->frame[i];
1709 for (j = 0; j < BPF_REG_FP; j++) {
1710 reg = &func->regs[j];
1711 if (reg->type != SCALAR_VALUE)
1712 continue;
1713 reg->precise = true;
1714 }
1715 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
1716 if (func->stack[j].slot_type[0] != STACK_SPILL)
1717 continue;
1718 reg = &func->stack[j].spilled_ptr;
1719 if (reg->type != SCALAR_VALUE)
1720 continue;
1721 reg->precise = true;
1722 }
1723 }
1724}
1725
a3ce685d
AS
1726static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1727 int spi)
b5dc0163
AS
1728{
1729 struct bpf_verifier_state *st = env->cur_state;
1730 int first_idx = st->first_insn_idx;
1731 int last_idx = env->insn_idx;
1732 struct bpf_func_state *func;
1733 struct bpf_reg_state *reg;
a3ce685d
AS
1734 u32 reg_mask = regno >= 0 ? 1u << regno : 0;
1735 u64 stack_mask = spi >= 0 ? 1ull << spi : 0;
b5dc0163 1736 bool skip_first = true;
a3ce685d 1737 bool new_marks = false;
b5dc0163
AS
1738 int i, err;
1739
1740 if (!env->allow_ptr_leaks)
1741 /* backtracking is root only for now */
1742 return 0;
1743
1744 func = st->frame[st->curframe];
a3ce685d
AS
1745 if (regno >= 0) {
1746 reg = &func->regs[regno];
1747 if (reg->type != SCALAR_VALUE) {
1748 WARN_ONCE(1, "backtracing misuse");
1749 return -EFAULT;
1750 }
1751 if (!reg->precise)
1752 new_marks = true;
1753 else
1754 reg_mask = 0;
1755 reg->precise = true;
b5dc0163 1756 }
b5dc0163 1757
a3ce685d
AS
1758 while (spi >= 0) {
1759 if (func->stack[spi].slot_type[0] != STACK_SPILL) {
1760 stack_mask = 0;
1761 break;
1762 }
1763 reg = &func->stack[spi].spilled_ptr;
1764 if (reg->type != SCALAR_VALUE) {
1765 stack_mask = 0;
1766 break;
1767 }
1768 if (!reg->precise)
1769 new_marks = true;
1770 else
1771 stack_mask = 0;
1772 reg->precise = true;
1773 break;
1774 }
1775
1776 if (!new_marks)
1777 return 0;
1778 if (!reg_mask && !stack_mask)
1779 return 0;
b5dc0163
AS
1780 for (;;) {
1781 DECLARE_BITMAP(mask, 64);
b5dc0163
AS
1782 u32 history = st->jmp_history_cnt;
1783
1784 if (env->log.level & BPF_LOG_LEVEL)
1785 verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
1786 for (i = last_idx;;) {
1787 if (skip_first) {
1788 err = 0;
1789 skip_first = false;
1790 } else {
1791 err = backtrack_insn(env, i, &reg_mask, &stack_mask);
1792 }
1793 if (err == -ENOTSUPP) {
1794 mark_all_scalars_precise(env, st);
1795 return 0;
1796 } else if (err) {
1797 return err;
1798 }
1799 if (!reg_mask && !stack_mask)
1800 /* Found assignment(s) into tracked register in this state.
1801 * Since this state is already marked, just return.
1802 * Nothing to be tracked further in the parent state.
1803 */
1804 return 0;
1805 if (i == first_idx)
1806 break;
1807 i = get_prev_insn_idx(st, i, &history);
1808 if (i >= env->prog->len) {
1809 /* This can happen if backtracking reached insn 0
1810 * and there are still reg_mask or stack_mask
1811 * to backtrack.
1812 * It means the backtracking missed the spot where
1813 * particular register was initialized with a constant.
1814 */
1815 verbose(env, "BUG backtracking idx %d\n", i);
1816 WARN_ONCE(1, "verifier backtracking bug");
1817 return -EFAULT;
1818 }
1819 }
1820 st = st->parent;
1821 if (!st)
1822 break;
1823
a3ce685d 1824 new_marks = false;
b5dc0163
AS
1825 func = st->frame[st->curframe];
1826 bitmap_from_u64(mask, reg_mask);
1827 for_each_set_bit(i, mask, 32) {
1828 reg = &func->regs[i];
a3ce685d
AS
1829 if (reg->type != SCALAR_VALUE) {
1830 reg_mask &= ~(1u << i);
b5dc0163 1831 continue;
a3ce685d 1832 }
b5dc0163
AS
1833 if (!reg->precise)
1834 new_marks = true;
1835 reg->precise = true;
1836 }
1837
1838 bitmap_from_u64(mask, stack_mask);
1839 for_each_set_bit(i, mask, 64) {
1840 if (i >= func->allocated_stack / BPF_REG_SIZE) {
2339cd6c
AS
1841 /* the sequence of instructions:
1842 * 2: (bf) r3 = r10
1843 * 3: (7b) *(u64 *)(r3 -8) = r0
1844 * 4: (79) r4 = *(u64 *)(r10 -8)
1845 * doesn't contain jmps. It's backtracked
1846 * as a single block.
1847 * During backtracking insn 3 is not recognized as
1848 * stack access, so at the end of backtracking
1849 * stack slot fp-8 is still marked in stack_mask.
1850 * However the parent state may not have accessed
1851 * fp-8 and it's "unallocated" stack space.
1852 * In such case fallback to conservative.
b5dc0163 1853 */
2339cd6c
AS
1854 mark_all_scalars_precise(env, st);
1855 return 0;
b5dc0163
AS
1856 }
1857
a3ce685d
AS
1858 if (func->stack[i].slot_type[0] != STACK_SPILL) {
1859 stack_mask &= ~(1ull << i);
b5dc0163 1860 continue;
a3ce685d 1861 }
b5dc0163 1862 reg = &func->stack[i].spilled_ptr;
a3ce685d
AS
1863 if (reg->type != SCALAR_VALUE) {
1864 stack_mask &= ~(1ull << i);
b5dc0163 1865 continue;
a3ce685d 1866 }
b5dc0163
AS
1867 if (!reg->precise)
1868 new_marks = true;
1869 reg->precise = true;
1870 }
1871 if (env->log.level & BPF_LOG_LEVEL) {
1872 print_verifier_state(env, func);
1873 verbose(env, "parent %s regs=%x stack=%llx marks\n",
1874 new_marks ? "didn't have" : "already had",
1875 reg_mask, stack_mask);
1876 }
1877
a3ce685d
AS
1878 if (!reg_mask && !stack_mask)
1879 break;
b5dc0163
AS
1880 if (!new_marks)
1881 break;
1882
1883 last_idx = st->last_insn_idx;
1884 first_idx = st->first_insn_idx;
1885 }
1886 return 0;
1887}
1888
a3ce685d
AS
1889static int mark_chain_precision(struct bpf_verifier_env *env, int regno)
1890{
1891 return __mark_chain_precision(env, regno, -1);
1892}
1893
1894static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi)
1895{
1896 return __mark_chain_precision(env, -1, spi);
1897}
b5dc0163 1898
1be7f75d
AS
1899static bool is_spillable_regtype(enum bpf_reg_type type)
1900{
1901 switch (type) {
1902 case PTR_TO_MAP_VALUE:
1903 case PTR_TO_MAP_VALUE_OR_NULL:
1904 case PTR_TO_STACK:
1905 case PTR_TO_CTX:
969bf05e 1906 case PTR_TO_PACKET:
de8f3a83 1907 case PTR_TO_PACKET_META:
969bf05e 1908 case PTR_TO_PACKET_END:
d58e468b 1909 case PTR_TO_FLOW_KEYS:
1be7f75d 1910 case CONST_PTR_TO_MAP:
c64b7983
JS
1911 case PTR_TO_SOCKET:
1912 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
1913 case PTR_TO_SOCK_COMMON:
1914 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
1915 case PTR_TO_TCP_SOCK:
1916 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 1917 case PTR_TO_XDP_SOCK:
1be7f75d
AS
1918 return true;
1919 default:
1920 return false;
1921 }
1922}
1923
cc2b14d5
AS
1924/* Does this register contain a constant zero? */
1925static bool register_is_null(struct bpf_reg_state *reg)
1926{
1927 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0);
1928}
1929
f7cf25b2
AS
1930static bool register_is_const(struct bpf_reg_state *reg)
1931{
1932 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
1933}
1934
1935static void save_register_state(struct bpf_func_state *state,
1936 int spi, struct bpf_reg_state *reg)
1937{
1938 int i;
1939
1940 state->stack[spi].spilled_ptr = *reg;
1941 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
1942
1943 for (i = 0; i < BPF_REG_SIZE; i++)
1944 state->stack[spi].slot_type[i] = STACK_SPILL;
1945}
1946
17a52670
AS
1947/* check_stack_read/write functions track spill/fill of registers,
1948 * stack boundary and alignment are checked in check_mem_access()
1949 */
61bd5218 1950static int check_stack_write(struct bpf_verifier_env *env,
f4d7e40a 1951 struct bpf_func_state *state, /* func where register points to */
af86ca4e 1952 int off, int size, int value_regno, int insn_idx)
17a52670 1953{
f4d7e40a 1954 struct bpf_func_state *cur; /* state of the current function */
638f5b90 1955 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
b5dc0163 1956 u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
f7cf25b2 1957 struct bpf_reg_state *reg = NULL;
638f5b90 1958
f4d7e40a 1959 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
fd978bf7 1960 state->acquired_refs, true);
638f5b90
AS
1961 if (err)
1962 return err;
9c399760
AS
1963 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
1964 * so it's aligned access and [off, off + size) are within stack limits
1965 */
638f5b90
AS
1966 if (!env->allow_ptr_leaks &&
1967 state->stack[spi].slot_type[0] == STACK_SPILL &&
1968 size != BPF_REG_SIZE) {
1969 verbose(env, "attempt to corrupt spilled pointer on stack\n");
1970 return -EACCES;
1971 }
17a52670 1972
f4d7e40a 1973 cur = env->cur_state->frame[env->cur_state->curframe];
f7cf25b2
AS
1974 if (value_regno >= 0)
1975 reg = &cur->regs[value_regno];
17a52670 1976
f7cf25b2
AS
1977 if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
1978 !register_is_null(reg) && env->allow_ptr_leaks) {
b5dc0163
AS
1979 if (dst_reg != BPF_REG_FP) {
1980 /* The backtracking logic can only recognize explicit
1981 * stack slot address like [fp - 8]. Other spill of
1982 * scalar via different register has to be conervative.
1983 * Backtrack from here and mark all registers as precise
1984 * that contributed into 'reg' being a constant.
1985 */
1986 err = mark_chain_precision(env, value_regno);
1987 if (err)
1988 return err;
1989 }
f7cf25b2
AS
1990 save_register_state(state, spi, reg);
1991 } else if (reg && is_spillable_regtype(reg->type)) {
17a52670 1992 /* register containing pointer is being spilled into stack */
9c399760 1993 if (size != BPF_REG_SIZE) {
f7cf25b2 1994 verbose_linfo(env, insn_idx, "; ");
61bd5218 1995 verbose(env, "invalid size of register spill\n");
17a52670
AS
1996 return -EACCES;
1997 }
1998
f7cf25b2 1999 if (state != cur && reg->type == PTR_TO_STACK) {
f4d7e40a
AS
2000 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n");
2001 return -EINVAL;
2002 }
2003
f7cf25b2
AS
2004 if (!env->allow_ptr_leaks) {
2005 bool sanitize = false;
17a52670 2006
f7cf25b2
AS
2007 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
2008 register_is_const(&state->stack[spi].spilled_ptr))
2009 sanitize = true;
2010 for (i = 0; i < BPF_REG_SIZE; i++)
2011 if (state->stack[spi].slot_type[i] == STACK_MISC) {
2012 sanitize = true;
2013 break;
2014 }
2015 if (sanitize) {
af86ca4e
AS
2016 int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
2017 int soff = (-spi - 1) * BPF_REG_SIZE;
2018
2019 /* detected reuse of integer stack slot with a pointer
2020 * which means either llvm is reusing stack slot or
2021 * an attacker is trying to exploit CVE-2018-3639
2022 * (speculative store bypass)
2023 * Have to sanitize that slot with preemptive
2024 * store of zero.
2025 */
2026 if (*poff && *poff != soff) {
2027 /* disallow programs where single insn stores
2028 * into two different stack slots, since verifier
2029 * cannot sanitize them
2030 */
2031 verbose(env,
2032 "insn %d cannot access two stack slots fp%d and fp%d",
2033 insn_idx, *poff, soff);
2034 return -EINVAL;
2035 }
2036 *poff = soff;
2037 }
af86ca4e 2038 }
f7cf25b2 2039 save_register_state(state, spi, reg);
9c399760 2040 } else {
cc2b14d5
AS
2041 u8 type = STACK_MISC;
2042
679c782d
EC
2043 /* regular write of data into stack destroys any spilled ptr */
2044 state->stack[spi].spilled_ptr.type = NOT_INIT;
0bae2d4d
JW
2045 /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
2046 if (state->stack[spi].slot_type[0] == STACK_SPILL)
2047 for (i = 0; i < BPF_REG_SIZE; i++)
2048 state->stack[spi].slot_type[i] = STACK_MISC;
9c399760 2049
cc2b14d5
AS
2050 /* only mark the slot as written if all 8 bytes were written
2051 * otherwise read propagation may incorrectly stop too soon
2052 * when stack slots are partially written.
2053 * This heuristic means that read propagation will be
2054 * conservative, since it will add reg_live_read marks
2055 * to stack slots all the way to first state when programs
2056 * writes+reads less than 8 bytes
2057 */
2058 if (size == BPF_REG_SIZE)
2059 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
2060
2061 /* when we zero initialize stack slots mark them as such */
b5dc0163
AS
2062 if (reg && register_is_null(reg)) {
2063 /* backtracking doesn't work for STACK_ZERO yet. */
2064 err = mark_chain_precision(env, value_regno);
2065 if (err)
2066 return err;
cc2b14d5 2067 type = STACK_ZERO;
b5dc0163 2068 }
cc2b14d5 2069
0bae2d4d 2070 /* Mark slots affected by this stack write. */
9c399760 2071 for (i = 0; i < size; i++)
638f5b90 2072 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
cc2b14d5 2073 type;
17a52670
AS
2074 }
2075 return 0;
2076}
2077
61bd5218 2078static int check_stack_read(struct bpf_verifier_env *env,
f4d7e40a
AS
2079 struct bpf_func_state *reg_state /* func where register points to */,
2080 int off, int size, int value_regno)
17a52670 2081{
f4d7e40a
AS
2082 struct bpf_verifier_state *vstate = env->cur_state;
2083 struct bpf_func_state *state = vstate->frame[vstate->curframe];
638f5b90 2084 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
f7cf25b2 2085 struct bpf_reg_state *reg;
638f5b90 2086 u8 *stype;
17a52670 2087
f4d7e40a 2088 if (reg_state->allocated_stack <= slot) {
638f5b90
AS
2089 verbose(env, "invalid read from stack off %d+0 size %d\n",
2090 off, size);
2091 return -EACCES;
2092 }
f4d7e40a 2093 stype = reg_state->stack[spi].slot_type;
f7cf25b2 2094 reg = &reg_state->stack[spi].spilled_ptr;
17a52670 2095
638f5b90 2096 if (stype[0] == STACK_SPILL) {
9c399760 2097 if (size != BPF_REG_SIZE) {
f7cf25b2
AS
2098 if (reg->type != SCALAR_VALUE) {
2099 verbose_linfo(env, env->insn_idx, "; ");
2100 verbose(env, "invalid size of register fill\n");
2101 return -EACCES;
2102 }
2103 if (value_regno >= 0) {
2104 mark_reg_unknown(env, state->regs, value_regno);
2105 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
2106 }
2107 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
2108 return 0;
17a52670 2109 }
9c399760 2110 for (i = 1; i < BPF_REG_SIZE; i++) {
638f5b90 2111 if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) {
61bd5218 2112 verbose(env, "corrupted spill memory\n");
17a52670
AS
2113 return -EACCES;
2114 }
2115 }
2116
dc503a8a 2117 if (value_regno >= 0) {
17a52670 2118 /* restore register state from stack */
f7cf25b2 2119 state->regs[value_regno] = *reg;
2f18f62e
AS
2120 /* mark reg as written since spilled pointer state likely
2121 * has its liveness marks cleared by is_state_visited()
2122 * which resets stack/reg liveness for state transitions
2123 */
2124 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
dc503a8a 2125 }
f7cf25b2 2126 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
17a52670 2127 } else {
cc2b14d5
AS
2128 int zeros = 0;
2129
17a52670 2130 for (i = 0; i < size; i++) {
cc2b14d5
AS
2131 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC)
2132 continue;
2133 if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) {
2134 zeros++;
2135 continue;
17a52670 2136 }
cc2b14d5
AS
2137 verbose(env, "invalid read from stack off %d+%d size %d\n",
2138 off, i, size);
2139 return -EACCES;
2140 }
f7cf25b2 2141 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
cc2b14d5
AS
2142 if (value_regno >= 0) {
2143 if (zeros == size) {
2144 /* any size read into register is zero extended,
2145 * so the whole register == const_zero
2146 */
2147 __mark_reg_const_zero(&state->regs[value_regno]);
b5dc0163
AS
2148 /* backtracking doesn't support STACK_ZERO yet,
2149 * so mark it precise here, so that later
2150 * backtracking can stop here.
2151 * Backtracking may not need this if this register
2152 * doesn't participate in pointer adjustment.
2153 * Forward propagation of precise flag is not
2154 * necessary either. This mark is only to stop
2155 * backtracking. Any register that contributed
2156 * to const 0 was marked precise before spill.
2157 */
2158 state->regs[value_regno].precise = true;
cc2b14d5
AS
2159 } else {
2160 /* have read misc data from the stack */
2161 mark_reg_unknown(env, state->regs, value_regno);
2162 }
2163 state->regs[value_regno].live |= REG_LIVE_WRITTEN;
17a52670 2164 }
17a52670 2165 }
f7cf25b2 2166 return 0;
17a52670
AS
2167}
2168
e4298d25
DB
2169static int check_stack_access(struct bpf_verifier_env *env,
2170 const struct bpf_reg_state *reg,
2171 int off, int size)
2172{
2173 /* Stack accesses must be at a fixed offset, so that we
2174 * can determine what type of data were returned. See
2175 * check_stack_read().
2176 */
2177 if (!tnum_is_const(reg->var_off)) {
2178 char tn_buf[48];
2179
2180 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
1fbd20f8 2181 verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
e4298d25
DB
2182 tn_buf, off, size);
2183 return -EACCES;
2184 }
2185
2186 if (off >= 0 || off < -MAX_BPF_STACK) {
2187 verbose(env, "invalid stack off=%d size=%d\n", off, size);
2188 return -EACCES;
2189 }
2190
2191 return 0;
2192}
2193
591fe988
DB
2194static int check_map_access_type(struct bpf_verifier_env *env, u32 regno,
2195 int off, int size, enum bpf_access_type type)
2196{
2197 struct bpf_reg_state *regs = cur_regs(env);
2198 struct bpf_map *map = regs[regno].map_ptr;
2199 u32 cap = bpf_map_flags_to_cap(map);
2200
2201 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) {
2202 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n",
2203 map->value_size, off, size);
2204 return -EACCES;
2205 }
2206
2207 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) {
2208 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n",
2209 map->value_size, off, size);
2210 return -EACCES;
2211 }
2212
2213 return 0;
2214}
2215
17a52670 2216/* check read/write into map element returned by bpf_map_lookup_elem() */
f1174f77 2217static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2218 int size, bool zero_size_allowed)
17a52670 2219{
638f5b90
AS
2220 struct bpf_reg_state *regs = cur_regs(env);
2221 struct bpf_map *map = regs[regno].map_ptr;
17a52670 2222
9fd29c08
YS
2223 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2224 off + size > map->value_size) {
61bd5218 2225 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n",
17a52670
AS
2226 map->value_size, off, size);
2227 return -EACCES;
2228 }
2229 return 0;
2230}
2231
f1174f77
EC
2232/* check read/write into a map element with possible variable offset */
2233static int check_map_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2234 int off, int size, bool zero_size_allowed)
dbcfe5f7 2235{
f4d7e40a
AS
2236 struct bpf_verifier_state *vstate = env->cur_state;
2237 struct bpf_func_state *state = vstate->frame[vstate->curframe];
dbcfe5f7
GB
2238 struct bpf_reg_state *reg = &state->regs[regno];
2239 int err;
2240
f1174f77
EC
2241 /* We may have adjusted the register to this map value, so we
2242 * need to try adding each of min_value and max_value to off
2243 * to make sure our theoretical access will be safe.
dbcfe5f7 2244 */
06ee7115 2245 if (env->log.level & BPF_LOG_LEVEL)
61bd5218 2246 print_verifier_state(env, state);
b7137c4e 2247
dbcfe5f7
GB
2248 /* The minimum value is only important with signed
2249 * comparisons where we can't assume the floor of a
2250 * value is 0. If we are using signed variables for our
2251 * index'es we need to make sure that whatever we use
2252 * will have a set floor within our range.
2253 */
b7137c4e
DB
2254 if (reg->smin_value < 0 &&
2255 (reg->smin_value == S64_MIN ||
2256 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
2257 reg->smin_value + off < 0)) {
61bd5218 2258 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
dbcfe5f7
GB
2259 regno);
2260 return -EACCES;
2261 }
9fd29c08
YS
2262 err = __check_map_access(env, regno, reg->smin_value + off, size,
2263 zero_size_allowed);
dbcfe5f7 2264 if (err) {
61bd5218
JK
2265 verbose(env, "R%d min value is outside of the array range\n",
2266 regno);
dbcfe5f7
GB
2267 return err;
2268 }
2269
b03c9f9f
EC
2270 /* If we haven't set a max value then we need to bail since we can't be
2271 * sure we won't do bad things.
2272 * If reg->umax_value + off could overflow, treat that as unbounded too.
dbcfe5f7 2273 */
b03c9f9f 2274 if (reg->umax_value >= BPF_MAX_VAR_OFF) {
61bd5218 2275 verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n",
dbcfe5f7
GB
2276 regno);
2277 return -EACCES;
2278 }
9fd29c08
YS
2279 err = __check_map_access(env, regno, reg->umax_value + off, size,
2280 zero_size_allowed);
f1174f77 2281 if (err)
61bd5218
JK
2282 verbose(env, "R%d max value is outside of the array range\n",
2283 regno);
d83525ca
AS
2284
2285 if (map_value_has_spin_lock(reg->map_ptr)) {
2286 u32 lock = reg->map_ptr->spin_lock_off;
2287
2288 /* if any part of struct bpf_spin_lock can be touched by
2289 * load/store reject this program.
2290 * To check that [x1, x2) overlaps with [y1, y2)
2291 * it is sufficient to check x1 < y2 && y1 < x2.
2292 */
2293 if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) &&
2294 lock < reg->umax_value + off + size) {
2295 verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n");
2296 return -EACCES;
2297 }
2298 }
f1174f77 2299 return err;
dbcfe5f7
GB
2300}
2301
969bf05e
AS
2302#define MAX_PACKET_OFF 0xffff
2303
58e2af8b 2304static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
3a0af8fd
TG
2305 const struct bpf_call_arg_meta *meta,
2306 enum bpf_access_type t)
4acf6c0b 2307{
36bbef52 2308 switch (env->prog->type) {
5d66fa7d 2309 /* Program types only with direct read access go here! */
3a0af8fd
TG
2310 case BPF_PROG_TYPE_LWT_IN:
2311 case BPF_PROG_TYPE_LWT_OUT:
004d4b27 2312 case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2dbb9b9e 2313 case BPF_PROG_TYPE_SK_REUSEPORT:
5d66fa7d 2314 case BPF_PROG_TYPE_FLOW_DISSECTOR:
d5563d36 2315 case BPF_PROG_TYPE_CGROUP_SKB:
3a0af8fd
TG
2316 if (t == BPF_WRITE)
2317 return false;
7e57fbb2 2318 /* fallthrough */
5d66fa7d
DB
2319
2320 /* Program types with direct read + write access go here! */
36bbef52
DB
2321 case BPF_PROG_TYPE_SCHED_CLS:
2322 case BPF_PROG_TYPE_SCHED_ACT:
4acf6c0b 2323 case BPF_PROG_TYPE_XDP:
3a0af8fd 2324 case BPF_PROG_TYPE_LWT_XMIT:
8a31db56 2325 case BPF_PROG_TYPE_SK_SKB:
4f738adb 2326 case BPF_PROG_TYPE_SK_MSG:
36bbef52
DB
2327 if (meta)
2328 return meta->pkt_access;
2329
2330 env->seen_direct_write = true;
4acf6c0b 2331 return true;
0d01da6a
SF
2332
2333 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2334 if (t == BPF_WRITE)
2335 env->seen_direct_write = true;
2336
2337 return true;
2338
4acf6c0b
BB
2339 default:
2340 return false;
2341 }
2342}
2343
f1174f77 2344static int __check_packet_access(struct bpf_verifier_env *env, u32 regno,
9fd29c08 2345 int off, int size, bool zero_size_allowed)
969bf05e 2346{
638f5b90 2347 struct bpf_reg_state *regs = cur_regs(env);
58e2af8b 2348 struct bpf_reg_state *reg = &regs[regno];
969bf05e 2349
9fd29c08
YS
2350 if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) ||
2351 (u64)off + size > reg->range) {
61bd5218 2352 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n",
d91b28ed 2353 off, size, regno, reg->id, reg->off, reg->range);
969bf05e
AS
2354 return -EACCES;
2355 }
2356 return 0;
2357}
2358
f1174f77 2359static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
9fd29c08 2360 int size, bool zero_size_allowed)
f1174f77 2361{
638f5b90 2362 struct bpf_reg_state *regs = cur_regs(env);
f1174f77
EC
2363 struct bpf_reg_state *reg = &regs[regno];
2364 int err;
2365
2366 /* We may have added a variable offset to the packet pointer; but any
2367 * reg->range we have comes after that. We are only checking the fixed
2368 * offset.
2369 */
2370
2371 /* We don't allow negative numbers, because we aren't tracking enough
2372 * detail to prove they're safe.
2373 */
b03c9f9f 2374 if (reg->smin_value < 0) {
61bd5218 2375 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
f1174f77
EC
2376 regno);
2377 return -EACCES;
2378 }
9fd29c08 2379 err = __check_packet_access(env, regno, off, size, zero_size_allowed);
f1174f77 2380 if (err) {
61bd5218 2381 verbose(env, "R%d offset is outside of the packet\n", regno);
f1174f77
EC
2382 return err;
2383 }
e647815a
JW
2384
2385 /* __check_packet_access has made sure "off + size - 1" is within u16.
2386 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
2387 * otherwise find_good_pkt_pointers would have refused to set range info
2388 * that __check_packet_access would have rejected this pkt access.
2389 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
2390 */
2391 env->prog->aux->max_pkt_offset =
2392 max_t(u32, env->prog->aux->max_pkt_offset,
2393 off + reg->umax_value + size - 1);
2394
f1174f77
EC
2395 return err;
2396}
2397
2398/* check access to 'struct bpf_context' fields. Supports fixed offsets only */
31fd8581 2399static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
9e15db66
AS
2400 enum bpf_access_type t, enum bpf_reg_type *reg_type,
2401 u32 *btf_id)
17a52670 2402{
f96da094
DB
2403 struct bpf_insn_access_aux info = {
2404 .reg_type = *reg_type,
9e15db66 2405 .log = &env->log,
f96da094 2406 };
31fd8581 2407
4f9218aa 2408 if (env->ops->is_valid_access &&
5e43f899 2409 env->ops->is_valid_access(off, size, t, env->prog, &info)) {
f96da094
DB
2410 /* A non zero info.ctx_field_size indicates that this field is a
2411 * candidate for later verifier transformation to load the whole
2412 * field and then apply a mask when accessed with a narrower
2413 * access than actual ctx access size. A zero info.ctx_field_size
2414 * will only allow for whole field access and rejects any other
2415 * type of narrower access.
31fd8581 2416 */
23994631 2417 *reg_type = info.reg_type;
31fd8581 2418
9e15db66
AS
2419 if (*reg_type == PTR_TO_BTF_ID)
2420 *btf_id = info.btf_id;
2421 else
2422 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
32bbe007
AS
2423 /* remember the offset of last byte accessed in ctx */
2424 if (env->prog->aux->max_ctx_offset < off + size)
2425 env->prog->aux->max_ctx_offset = off + size;
17a52670 2426 return 0;
32bbe007 2427 }
17a52670 2428
61bd5218 2429 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size);
17a52670
AS
2430 return -EACCES;
2431}
2432
d58e468b
PP
2433static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
2434 int size)
2435{
2436 if (size < 0 || off < 0 ||
2437 (u64)off + size > sizeof(struct bpf_flow_keys)) {
2438 verbose(env, "invalid access to flow keys off=%d size=%d\n",
2439 off, size);
2440 return -EACCES;
2441 }
2442 return 0;
2443}
2444
5f456649
MKL
2445static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
2446 u32 regno, int off, int size,
2447 enum bpf_access_type t)
c64b7983
JS
2448{
2449 struct bpf_reg_state *regs = cur_regs(env);
2450 struct bpf_reg_state *reg = &regs[regno];
5f456649 2451 struct bpf_insn_access_aux info = {};
46f8bc92 2452 bool valid;
c64b7983
JS
2453
2454 if (reg->smin_value < 0) {
2455 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
2456 regno);
2457 return -EACCES;
2458 }
2459
46f8bc92
MKL
2460 switch (reg->type) {
2461 case PTR_TO_SOCK_COMMON:
2462 valid = bpf_sock_common_is_valid_access(off, size, t, &info);
2463 break;
2464 case PTR_TO_SOCKET:
2465 valid = bpf_sock_is_valid_access(off, size, t, &info);
2466 break;
655a51e5
MKL
2467 case PTR_TO_TCP_SOCK:
2468 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info);
2469 break;
fada7fdc
JL
2470 case PTR_TO_XDP_SOCK:
2471 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info);
2472 break;
46f8bc92
MKL
2473 default:
2474 valid = false;
c64b7983
JS
2475 }
2476
5f456649 2477
46f8bc92
MKL
2478 if (valid) {
2479 env->insn_aux_data[insn_idx].ctx_field_size =
2480 info.ctx_field_size;
2481 return 0;
2482 }
2483
2484 verbose(env, "R%d invalid %s access off=%d size=%d\n",
2485 regno, reg_type_str[reg->type], off, size);
2486
2487 return -EACCES;
c64b7983
JS
2488}
2489
4cabc5b1
DB
2490static bool __is_pointer_value(bool allow_ptr_leaks,
2491 const struct bpf_reg_state *reg)
1be7f75d 2492{
4cabc5b1 2493 if (allow_ptr_leaks)
1be7f75d
AS
2494 return false;
2495
f1174f77 2496 return reg->type != SCALAR_VALUE;
1be7f75d
AS
2497}
2498
2a159c6f
DB
2499static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
2500{
2501 return cur_regs(env) + regno;
2502}
2503
4cabc5b1
DB
2504static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
2505{
2a159c6f 2506 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno));
4cabc5b1
DB
2507}
2508
f37a8cb8
DB
2509static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
2510{
2a159c6f 2511 const struct bpf_reg_state *reg = reg_state(env, regno);
f37a8cb8 2512
46f8bc92
MKL
2513 return reg->type == PTR_TO_CTX;
2514}
2515
2516static bool is_sk_reg(struct bpf_verifier_env *env, int regno)
2517{
2518 const struct bpf_reg_state *reg = reg_state(env, regno);
2519
2520 return type_is_sk_pointer(reg->type);
f37a8cb8
DB
2521}
2522
ca369602
DB
2523static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
2524{
2a159c6f 2525 const struct bpf_reg_state *reg = reg_state(env, regno);
ca369602
DB
2526
2527 return type_is_pkt_pointer(reg->type);
2528}
2529
4b5defde
DB
2530static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno)
2531{
2532 const struct bpf_reg_state *reg = reg_state(env, regno);
2533
2534 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */
2535 return reg->type == PTR_TO_FLOW_KEYS;
2536}
2537
61bd5218
JK
2538static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
2539 const struct bpf_reg_state *reg,
d1174416 2540 int off, int size, bool strict)
969bf05e 2541{
f1174f77 2542 struct tnum reg_off;
e07b98d9 2543 int ip_align;
d1174416
DM
2544
2545 /* Byte size accesses are always allowed. */
2546 if (!strict || size == 1)
2547 return 0;
2548
e4eda884
DM
2549 /* For platforms that do not have a Kconfig enabling
2550 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of
2551 * NET_IP_ALIGN is universally set to '2'. And on platforms
2552 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get
2553 * to this code only in strict mode where we want to emulate
2554 * the NET_IP_ALIGN==2 checking. Therefore use an
2555 * unconditional IP align value of '2'.
e07b98d9 2556 */
e4eda884 2557 ip_align = 2;
f1174f77
EC
2558
2559 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off));
2560 if (!tnum_is_aligned(reg_off, size)) {
2561 char tn_buf[48];
2562
2563 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218
JK
2564 verbose(env,
2565 "misaligned packet access off %d+%s+%d+%d size %d\n",
f1174f77 2566 ip_align, tn_buf, reg->off, off, size);
969bf05e
AS
2567 return -EACCES;
2568 }
79adffcd 2569
969bf05e
AS
2570 return 0;
2571}
2572
61bd5218
JK
2573static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
2574 const struct bpf_reg_state *reg,
f1174f77
EC
2575 const char *pointer_desc,
2576 int off, int size, bool strict)
79adffcd 2577{
f1174f77
EC
2578 struct tnum reg_off;
2579
2580 /* Byte size accesses are always allowed. */
2581 if (!strict || size == 1)
2582 return 0;
2583
2584 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off));
2585 if (!tnum_is_aligned(reg_off, size)) {
2586 char tn_buf[48];
2587
2588 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 2589 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n",
f1174f77 2590 pointer_desc, tn_buf, reg->off, off, size);
79adffcd
DB
2591 return -EACCES;
2592 }
2593
969bf05e
AS
2594 return 0;
2595}
2596
e07b98d9 2597static int check_ptr_alignment(struct bpf_verifier_env *env,
ca369602
DB
2598 const struct bpf_reg_state *reg, int off,
2599 int size, bool strict_alignment_once)
79adffcd 2600{
ca369602 2601 bool strict = env->strict_alignment || strict_alignment_once;
f1174f77 2602 const char *pointer_desc = "";
d1174416 2603
79adffcd
DB
2604 switch (reg->type) {
2605 case PTR_TO_PACKET:
de8f3a83
DB
2606 case PTR_TO_PACKET_META:
2607 /* Special case, because of NET_IP_ALIGN. Given metadata sits
2608 * right in front, treat it the very same way.
2609 */
61bd5218 2610 return check_pkt_ptr_alignment(env, reg, off, size, strict);
d58e468b
PP
2611 case PTR_TO_FLOW_KEYS:
2612 pointer_desc = "flow keys ";
2613 break;
f1174f77
EC
2614 case PTR_TO_MAP_VALUE:
2615 pointer_desc = "value ";
2616 break;
2617 case PTR_TO_CTX:
2618 pointer_desc = "context ";
2619 break;
2620 case PTR_TO_STACK:
2621 pointer_desc = "stack ";
a5ec6ae1
JH
2622 /* The stack spill tracking logic in check_stack_write()
2623 * and check_stack_read() relies on stack accesses being
2624 * aligned.
2625 */
2626 strict = true;
f1174f77 2627 break;
c64b7983
JS
2628 case PTR_TO_SOCKET:
2629 pointer_desc = "sock ";
2630 break;
46f8bc92
MKL
2631 case PTR_TO_SOCK_COMMON:
2632 pointer_desc = "sock_common ";
2633 break;
655a51e5
MKL
2634 case PTR_TO_TCP_SOCK:
2635 pointer_desc = "tcp_sock ";
2636 break;
fada7fdc
JL
2637 case PTR_TO_XDP_SOCK:
2638 pointer_desc = "xdp_sock ";
2639 break;
79adffcd 2640 default:
f1174f77 2641 break;
79adffcd 2642 }
61bd5218
JK
2643 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size,
2644 strict);
79adffcd
DB
2645}
2646
f4d7e40a
AS
2647static int update_stack_depth(struct bpf_verifier_env *env,
2648 const struct bpf_func_state *func,
2649 int off)
2650{
9c8105bd 2651 u16 stack = env->subprog_info[func->subprogno].stack_depth;
f4d7e40a
AS
2652
2653 if (stack >= -off)
2654 return 0;
2655
2656 /* update known max for given subprogram */
9c8105bd 2657 env->subprog_info[func->subprogno].stack_depth = -off;
70a87ffe
AS
2658 return 0;
2659}
f4d7e40a 2660
70a87ffe
AS
2661/* starting from main bpf function walk all instructions of the function
2662 * and recursively walk all callees that given function can call.
2663 * Ignore jump and exit insns.
2664 * Since recursion is prevented by check_cfg() this algorithm
2665 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
2666 */
2667static int check_max_stack_depth(struct bpf_verifier_env *env)
2668{
9c8105bd
JW
2669 int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
2670 struct bpf_subprog_info *subprog = env->subprog_info;
70a87ffe 2671 struct bpf_insn *insn = env->prog->insnsi;
70a87ffe
AS
2672 int ret_insn[MAX_CALL_FRAMES];
2673 int ret_prog[MAX_CALL_FRAMES];
f4d7e40a 2674
70a87ffe
AS
2675process_func:
2676 /* round up to 32-bytes, since this is granularity
2677 * of interpreter stack size
2678 */
9c8105bd 2679 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe 2680 if (depth > MAX_BPF_STACK) {
f4d7e40a 2681 verbose(env, "combined stack size of %d calls is %d. Too large\n",
70a87ffe 2682 frame + 1, depth);
f4d7e40a
AS
2683 return -EACCES;
2684 }
70a87ffe 2685continue_func:
4cb3d99c 2686 subprog_end = subprog[idx + 1].start;
70a87ffe
AS
2687 for (; i < subprog_end; i++) {
2688 if (insn[i].code != (BPF_JMP | BPF_CALL))
2689 continue;
2690 if (insn[i].src_reg != BPF_PSEUDO_CALL)
2691 continue;
2692 /* remember insn and function to return to */
2693 ret_insn[frame] = i + 1;
9c8105bd 2694 ret_prog[frame] = idx;
70a87ffe
AS
2695
2696 /* find the callee */
2697 i = i + insn[i].imm + 1;
9c8105bd
JW
2698 idx = find_subprog(env, i);
2699 if (idx < 0) {
70a87ffe
AS
2700 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2701 i);
2702 return -EFAULT;
2703 }
70a87ffe
AS
2704 frame++;
2705 if (frame >= MAX_CALL_FRAMES) {
927cb781
PC
2706 verbose(env, "the call stack of %d frames is too deep !\n",
2707 frame);
2708 return -E2BIG;
70a87ffe
AS
2709 }
2710 goto process_func;
2711 }
2712 /* end of for() loop means the last insn of the 'subprog'
2713 * was reached. Doesn't matter whether it was JA or EXIT
2714 */
2715 if (frame == 0)
2716 return 0;
9c8105bd 2717 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
70a87ffe
AS
2718 frame--;
2719 i = ret_insn[frame];
9c8105bd 2720 idx = ret_prog[frame];
70a87ffe 2721 goto continue_func;
f4d7e40a
AS
2722}
2723
19d28fbd 2724#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
2725static int get_callee_stack_depth(struct bpf_verifier_env *env,
2726 const struct bpf_insn *insn, int idx)
2727{
2728 int start = idx + insn->imm + 1, subprog;
2729
2730 subprog = find_subprog(env, start);
2731 if (subprog < 0) {
2732 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
2733 start);
2734 return -EFAULT;
2735 }
9c8105bd 2736 return env->subprog_info[subprog].stack_depth;
1ea47e01 2737}
19d28fbd 2738#endif
1ea47e01 2739
58990d1f
DB
2740static int check_ctx_reg(struct bpf_verifier_env *env,
2741 const struct bpf_reg_state *reg, int regno)
2742{
2743 /* Access to ctx or passing it to a helper is only allowed in
2744 * its original, unmodified form.
2745 */
2746
2747 if (reg->off) {
2748 verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
2749 regno, reg->off);
2750 return -EACCES;
2751 }
2752
2753 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2754 char tn_buf[48];
2755
2756 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2757 verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
2758 return -EACCES;
2759 }
2760
2761 return 0;
2762}
2763
9df1c28b
MM
2764static int check_tp_buffer_access(struct bpf_verifier_env *env,
2765 const struct bpf_reg_state *reg,
2766 int regno, int off, int size)
2767{
2768 if (off < 0) {
2769 verbose(env,
2770 "R%d invalid tracepoint buffer access: off=%d, size=%d",
2771 regno, off, size);
2772 return -EACCES;
2773 }
2774 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2775 char tn_buf[48];
2776
2777 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2778 verbose(env,
2779 "R%d invalid variable buffer offset: off=%d, var_off=%s",
2780 regno, off, tn_buf);
2781 return -EACCES;
2782 }
2783 if (off + size > env->prog->aux->max_tp_access)
2784 env->prog->aux->max_tp_access = off + size;
2785
2786 return 0;
2787}
2788
2789
0c17d1d2
JH
2790/* truncate register to smaller size (in bytes)
2791 * must be called with size < BPF_REG_SIZE
2792 */
2793static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
2794{
2795 u64 mask;
2796
2797 /* clear high bits in bit representation */
2798 reg->var_off = tnum_cast(reg->var_off, size);
2799
2800 /* fix arithmetic bounds */
2801 mask = ((u64)1 << (size * 8)) - 1;
2802 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
2803 reg->umin_value &= mask;
2804 reg->umax_value &= mask;
2805 } else {
2806 reg->umin_value = 0;
2807 reg->umax_value = mask;
2808 }
2809 reg->smin_value = reg->umin_value;
2810 reg->smax_value = reg->umax_value;
2811}
2812
a23740ec
AN
2813static bool bpf_map_is_rdonly(const struct bpf_map *map)
2814{
2815 return (map->map_flags & BPF_F_RDONLY_PROG) && map->frozen;
2816}
2817
2818static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
2819{
2820 void *ptr;
2821 u64 addr;
2822 int err;
2823
2824 err = map->ops->map_direct_value_addr(map, &addr, off);
2825 if (err)
2826 return err;
2dedd7d2 2827 ptr = (void *)(long)addr + off;
a23740ec
AN
2828
2829 switch (size) {
2830 case sizeof(u8):
2831 *val = (u64)*(u8 *)ptr;
2832 break;
2833 case sizeof(u16):
2834 *val = (u64)*(u16 *)ptr;
2835 break;
2836 case sizeof(u32):
2837 *val = (u64)*(u32 *)ptr;
2838 break;
2839 case sizeof(u64):
2840 *val = *(u64 *)ptr;
2841 break;
2842 default:
2843 return -EINVAL;
2844 }
2845 return 0;
2846}
2847
9e15db66
AS
2848static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
2849 struct bpf_reg_state *regs,
2850 int regno, int off, int size,
2851 enum bpf_access_type atype,
2852 int value_regno)
2853{
2854 struct bpf_reg_state *reg = regs + regno;
2855 const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
2856 const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
2857 u32 btf_id;
2858 int ret;
2859
2860 if (atype != BPF_READ) {
2861 verbose(env, "only read is supported\n");
2862 return -EACCES;
2863 }
2864
2865 if (off < 0) {
2866 verbose(env,
2867 "R%d is ptr_%s invalid negative access: off=%d\n",
2868 regno, tname, off);
2869 return -EACCES;
2870 }
2871 if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
2872 char tn_buf[48];
2873
2874 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
2875 verbose(env,
2876 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n",
2877 regno, tname, off, tn_buf);
2878 return -EACCES;
2879 }
2880
2881 ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
2882 if (ret < 0)
2883 return ret;
2884
2885 if (ret == SCALAR_VALUE) {
2886 mark_reg_unknown(env, regs, value_regno);
2887 return 0;
2888 }
2889 mark_reg_known_zero(env, regs, value_regno);
2890 regs[value_regno].type = PTR_TO_BTF_ID;
2891 regs[value_regno].btf_id = btf_id;
2892 return 0;
2893}
2894
17a52670
AS
2895/* check whether memory at (regno + off) is accessible for t = (read | write)
2896 * if t==write, value_regno is a register which value is stored into memory
2897 * if t==read, value_regno is a register which will receive the value from memory
2898 * if t==write && value_regno==-1, some unknown value is stored into memory
2899 * if t==read && value_regno==-1, don't care what we read from memory
2900 */
ca369602
DB
2901static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
2902 int off, int bpf_size, enum bpf_access_type t,
2903 int value_regno, bool strict_alignment_once)
17a52670 2904{
638f5b90
AS
2905 struct bpf_reg_state *regs = cur_regs(env);
2906 struct bpf_reg_state *reg = regs + regno;
f4d7e40a 2907 struct bpf_func_state *state;
17a52670
AS
2908 int size, err = 0;
2909
2910 size = bpf_size_to_bytes(bpf_size);
2911 if (size < 0)
2912 return size;
2913
f1174f77 2914 /* alignment checks will add in reg->off themselves */
ca369602 2915 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
969bf05e
AS
2916 if (err)
2917 return err;
17a52670 2918
f1174f77
EC
2919 /* for access checks, reg->off is just part of off */
2920 off += reg->off;
2921
2922 if (reg->type == PTR_TO_MAP_VALUE) {
1be7f75d
AS
2923 if (t == BPF_WRITE && value_regno >= 0 &&
2924 is_pointer_value(env, value_regno)) {
61bd5218 2925 verbose(env, "R%d leaks addr into map\n", value_regno);
1be7f75d
AS
2926 return -EACCES;
2927 }
591fe988
DB
2928 err = check_map_access_type(env, regno, off, size, t);
2929 if (err)
2930 return err;
9fd29c08 2931 err = check_map_access(env, regno, off, size, false);
a23740ec
AN
2932 if (!err && t == BPF_READ && value_regno >= 0) {
2933 struct bpf_map *map = reg->map_ptr;
2934
2935 /* if map is read-only, track its contents as scalars */
2936 if (tnum_is_const(reg->var_off) &&
2937 bpf_map_is_rdonly(map) &&
2938 map->ops->map_direct_value_addr) {
2939 int map_off = off + reg->var_off.value;
2940 u64 val = 0;
2941
2942 err = bpf_map_direct_read(map, map_off, size,
2943 &val);
2944 if (err)
2945 return err;
2946
2947 regs[value_regno].type = SCALAR_VALUE;
2948 __mark_reg_known(&regs[value_regno], val);
2949 } else {
2950 mark_reg_unknown(env, regs, value_regno);
2951 }
2952 }
1a0dc1ac 2953 } else if (reg->type == PTR_TO_CTX) {
f1174f77 2954 enum bpf_reg_type reg_type = SCALAR_VALUE;
9e15db66 2955 u32 btf_id = 0;
19de99f7 2956
1be7f75d
AS
2957 if (t == BPF_WRITE && value_regno >= 0 &&
2958 is_pointer_value(env, value_regno)) {
61bd5218 2959 verbose(env, "R%d leaks addr into ctx\n", value_regno);
1be7f75d
AS
2960 return -EACCES;
2961 }
f1174f77 2962
58990d1f
DB
2963 err = check_ctx_reg(env, reg, regno);
2964 if (err < 0)
2965 return err;
2966
9e15db66
AS
2967 err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
2968 if (err)
2969 verbose_linfo(env, insn_idx, "; ");
969bf05e 2970 if (!err && t == BPF_READ && value_regno >= 0) {
f1174f77 2971 /* ctx access returns either a scalar, or a
de8f3a83
DB
2972 * PTR_TO_PACKET[_META,_END]. In the latter
2973 * case, we know the offset is zero.
f1174f77 2974 */
46f8bc92 2975 if (reg_type == SCALAR_VALUE) {
638f5b90 2976 mark_reg_unknown(env, regs, value_regno);
46f8bc92 2977 } else {
638f5b90 2978 mark_reg_known_zero(env, regs,
61bd5218 2979 value_regno);
46f8bc92
MKL
2980 if (reg_type_may_be_null(reg_type))
2981 regs[value_regno].id = ++env->id_gen;
5327ed3d
JW
2982 /* A load of ctx field could have different
2983 * actual load size with the one encoded in the
2984 * insn. When the dst is PTR, it is for sure not
2985 * a sub-register.
2986 */
2987 regs[value_regno].subreg_def = DEF_NOT_SUBREG;
9e15db66
AS
2988 if (reg_type == PTR_TO_BTF_ID)
2989 regs[value_regno].btf_id = btf_id;
46f8bc92 2990 }
638f5b90 2991 regs[value_regno].type = reg_type;
969bf05e 2992 }
17a52670 2993
f1174f77 2994 } else if (reg->type == PTR_TO_STACK) {
f1174f77 2995 off += reg->var_off.value;
e4298d25
DB
2996 err = check_stack_access(env, reg, off, size);
2997 if (err)
2998 return err;
8726679a 2999
f4d7e40a
AS
3000 state = func(env, reg);
3001 err = update_stack_depth(env, state, off);
3002 if (err)
3003 return err;
8726679a 3004
638f5b90 3005 if (t == BPF_WRITE)
61bd5218 3006 err = check_stack_write(env, state, off, size,
af86ca4e 3007 value_regno, insn_idx);
638f5b90 3008 else
61bd5218
JK
3009 err = check_stack_read(env, state, off, size,
3010 value_regno);
de8f3a83 3011 } else if (reg_is_pkt_pointer(reg)) {
3a0af8fd 3012 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) {
61bd5218 3013 verbose(env, "cannot write into packet\n");
969bf05e
AS
3014 return -EACCES;
3015 }
4acf6c0b
BB
3016 if (t == BPF_WRITE && value_regno >= 0 &&
3017 is_pointer_value(env, value_regno)) {
61bd5218
JK
3018 verbose(env, "R%d leaks addr into packet\n",
3019 value_regno);
4acf6c0b
BB
3020 return -EACCES;
3021 }
9fd29c08 3022 err = check_packet_access(env, regno, off, size, false);
969bf05e 3023 if (!err && t == BPF_READ && value_regno >= 0)
638f5b90 3024 mark_reg_unknown(env, regs, value_regno);
d58e468b
PP
3025 } else if (reg->type == PTR_TO_FLOW_KEYS) {
3026 if (t == BPF_WRITE && value_regno >= 0 &&
3027 is_pointer_value(env, value_regno)) {
3028 verbose(env, "R%d leaks addr into flow keys\n",
3029 value_regno);
3030 return -EACCES;
3031 }
3032
3033 err = check_flow_keys_access(env, off, size);
3034 if (!err && t == BPF_READ && value_regno >= 0)
3035 mark_reg_unknown(env, regs, value_regno);
46f8bc92 3036 } else if (type_is_sk_pointer(reg->type)) {
c64b7983 3037 if (t == BPF_WRITE) {
46f8bc92
MKL
3038 verbose(env, "R%d cannot write into %s\n",
3039 regno, reg_type_str[reg->type]);
c64b7983
JS
3040 return -EACCES;
3041 }
5f456649 3042 err = check_sock_access(env, insn_idx, regno, off, size, t);
c64b7983
JS
3043 if (!err && value_regno >= 0)
3044 mark_reg_unknown(env, regs, value_regno);
9df1c28b
MM
3045 } else if (reg->type == PTR_TO_TP_BUFFER) {
3046 err = check_tp_buffer_access(env, reg, regno, off, size);
3047 if (!err && t == BPF_READ && value_regno >= 0)
3048 mark_reg_unknown(env, regs, value_regno);
9e15db66
AS
3049 } else if (reg->type == PTR_TO_BTF_ID) {
3050 err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
3051 value_regno);
17a52670 3052 } else {
61bd5218
JK
3053 verbose(env, "R%d invalid mem access '%s'\n", regno,
3054 reg_type_str[reg->type]);
17a52670
AS
3055 return -EACCES;
3056 }
969bf05e 3057
f1174f77 3058 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
638f5b90 3059 regs[value_regno].type == SCALAR_VALUE) {
f1174f77 3060 /* b/h/w load zero-extends, mark upper bits as known 0 */
0c17d1d2 3061 coerce_reg_to_size(&regs[value_regno], size);
969bf05e 3062 }
17a52670
AS
3063 return err;
3064}
3065
31fd8581 3066static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn)
17a52670 3067{
17a52670
AS
3068 int err;
3069
3070 if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) ||
3071 insn->imm != 0) {
61bd5218 3072 verbose(env, "BPF_XADD uses reserved fields\n");
17a52670
AS
3073 return -EINVAL;
3074 }
3075
3076 /* check src1 operand */
dc503a8a 3077 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
3078 if (err)
3079 return err;
3080
3081 /* check src2 operand */
dc503a8a 3082 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
3083 if (err)
3084 return err;
3085
6bdf6abc 3086 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 3087 verbose(env, "R%d leaks addr into mem\n", insn->src_reg);
6bdf6abc
DB
3088 return -EACCES;
3089 }
3090
ca369602 3091 if (is_ctx_reg(env, insn->dst_reg) ||
4b5defde 3092 is_pkt_reg(env, insn->dst_reg) ||
46f8bc92
MKL
3093 is_flow_key_reg(env, insn->dst_reg) ||
3094 is_sk_reg(env, insn->dst_reg)) {
ca369602 3095 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
2a159c6f
DB
3096 insn->dst_reg,
3097 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
3098 return -EACCES;
3099 }
3100
17a52670 3101 /* check whether atomic_add can read the memory */
31fd8581 3102 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3103 BPF_SIZE(insn->code), BPF_READ, -1, true);
17a52670
AS
3104 if (err)
3105 return err;
3106
3107 /* check whether atomic_add can write into the same memory */
31fd8581 3108 return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
ca369602 3109 BPF_SIZE(insn->code), BPF_WRITE, -1, true);
17a52670
AS
3110}
3111
2011fccf
AI
3112static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno,
3113 int off, int access_size,
3114 bool zero_size_allowed)
3115{
3116 struct bpf_reg_state *reg = reg_state(env, regno);
3117
3118 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
3119 access_size < 0 || (access_size == 0 && !zero_size_allowed)) {
3120 if (tnum_is_const(reg->var_off)) {
3121 verbose(env, "invalid stack type R%d off=%d access_size=%d\n",
3122 regno, off, access_size);
3123 } else {
3124 char tn_buf[48];
3125
3126 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3127 verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n",
3128 regno, tn_buf, access_size);
3129 }
3130 return -EACCES;
3131 }
3132 return 0;
3133}
3134
17a52670
AS
3135/* when register 'regno' is passed into function that will read 'access_size'
3136 * bytes from that pointer, make sure that it's within stack boundary
f1174f77
EC
3137 * and all elements of stack are initialized.
3138 * Unlike most pointer bounds-checking functions, this one doesn't take an
3139 * 'off' argument, so it has to add in reg->off itself.
17a52670 3140 */
58e2af8b 3141static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
435faee1
DB
3142 int access_size, bool zero_size_allowed,
3143 struct bpf_call_arg_meta *meta)
17a52670 3144{
2a159c6f 3145 struct bpf_reg_state *reg = reg_state(env, regno);
f4d7e40a 3146 struct bpf_func_state *state = func(env, reg);
f7cf25b2 3147 int err, min_off, max_off, i, j, slot, spi;
17a52670 3148
914cb781 3149 if (reg->type != PTR_TO_STACK) {
f1174f77 3150 /* Allow zero-byte read from NULL, regardless of pointer type */
8e2fe1d9 3151 if (zero_size_allowed && access_size == 0 &&
914cb781 3152 register_is_null(reg))
8e2fe1d9
DB
3153 return 0;
3154
61bd5218 3155 verbose(env, "R%d type=%s expected=%s\n", regno,
914cb781 3156 reg_type_str[reg->type],
8e2fe1d9 3157 reg_type_str[PTR_TO_STACK]);
17a52670 3158 return -EACCES;
8e2fe1d9 3159 }
17a52670 3160
2011fccf
AI
3161 if (tnum_is_const(reg->var_off)) {
3162 min_off = max_off = reg->var_off.value + reg->off;
3163 err = __check_stack_boundary(env, regno, min_off, access_size,
3164 zero_size_allowed);
3165 if (err)
3166 return err;
3167 } else {
088ec26d
AI
3168 /* Variable offset is prohibited for unprivileged mode for
3169 * simplicity since it requires corresponding support in
3170 * Spectre masking for stack ALU.
3171 * See also retrieve_ptr_limit().
3172 */
3173 if (!env->allow_ptr_leaks) {
3174 char tn_buf[48];
f1174f77 3175
088ec26d
AI
3176 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3177 verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n",
3178 regno, tn_buf);
3179 return -EACCES;
3180 }
f2bcd05e
AI
3181 /* Only initialized buffer on stack is allowed to be accessed
3182 * with variable offset. With uninitialized buffer it's hard to
3183 * guarantee that whole memory is marked as initialized on
3184 * helper return since specific bounds are unknown what may
3185 * cause uninitialized stack leaking.
3186 */
3187 if (meta && meta->raw_mode)
3188 meta = NULL;
3189
107c26a7
AI
3190 if (reg->smax_value >= BPF_MAX_VAR_OFF ||
3191 reg->smax_value <= -BPF_MAX_VAR_OFF) {
3192 verbose(env, "R%d unbounded indirect variable offset stack access\n",
3193 regno);
3194 return -EACCES;
3195 }
2011fccf 3196 min_off = reg->smin_value + reg->off;
107c26a7 3197 max_off = reg->smax_value + reg->off;
2011fccf
AI
3198 err = __check_stack_boundary(env, regno, min_off, access_size,
3199 zero_size_allowed);
107c26a7
AI
3200 if (err) {
3201 verbose(env, "R%d min value is outside of stack bound\n",
3202 regno);
2011fccf 3203 return err;
107c26a7 3204 }
2011fccf
AI
3205 err = __check_stack_boundary(env, regno, max_off, access_size,
3206 zero_size_allowed);
107c26a7
AI
3207 if (err) {
3208 verbose(env, "R%d max value is outside of stack bound\n",
3209 regno);
2011fccf 3210 return err;
107c26a7 3211 }
17a52670
AS
3212 }
3213
435faee1
DB
3214 if (meta && meta->raw_mode) {
3215 meta->access_size = access_size;
3216 meta->regno = regno;
3217 return 0;
3218 }
3219
2011fccf 3220 for (i = min_off; i < max_off + access_size; i++) {
cc2b14d5
AS
3221 u8 *stype;
3222
2011fccf 3223 slot = -i - 1;
638f5b90 3224 spi = slot / BPF_REG_SIZE;
cc2b14d5
AS
3225 if (state->allocated_stack <= slot)
3226 goto err;
3227 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
3228 if (*stype == STACK_MISC)
3229 goto mark;
3230 if (*stype == STACK_ZERO) {
3231 /* helper can write anything into the stack */
3232 *stype = STACK_MISC;
3233 goto mark;
17a52670 3234 }
f7cf25b2
AS
3235 if (state->stack[spi].slot_type[0] == STACK_SPILL &&
3236 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
3237 __mark_reg_unknown(&state->stack[spi].spilled_ptr);
3238 for (j = 0; j < BPF_REG_SIZE; j++)
3239 state->stack[spi].slot_type[j] = STACK_MISC;
3240 goto mark;
3241 }
3242
cc2b14d5 3243err:
2011fccf
AI
3244 if (tnum_is_const(reg->var_off)) {
3245 verbose(env, "invalid indirect read from stack off %d+%d size %d\n",
3246 min_off, i - min_off, access_size);
3247 } else {
3248 char tn_buf[48];
3249
3250 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
3251 verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n",
3252 tn_buf, i - min_off, access_size);
3253 }
cc2b14d5
AS
3254 return -EACCES;
3255mark:
3256 /* reading any byte out of 8-byte 'spill_slot' will cause
3257 * the whole slot to be marked as 'read'
3258 */
679c782d 3259 mark_reg_read(env, &state->stack[spi].spilled_ptr,
5327ed3d
JW
3260 state->stack[spi].spilled_ptr.parent,
3261 REG_LIVE_READ64);
17a52670 3262 }
2011fccf 3263 return update_stack_depth(env, state, min_off);
17a52670
AS
3264}
3265
06c1c049
GB
3266static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
3267 int access_size, bool zero_size_allowed,
3268 struct bpf_call_arg_meta *meta)
3269{
638f5b90 3270 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
06c1c049 3271
f1174f77 3272 switch (reg->type) {
06c1c049 3273 case PTR_TO_PACKET:
de8f3a83 3274 case PTR_TO_PACKET_META:
9fd29c08
YS
3275 return check_packet_access(env, regno, reg->off, access_size,
3276 zero_size_allowed);
06c1c049 3277 case PTR_TO_MAP_VALUE:
591fe988
DB
3278 if (check_map_access_type(env, regno, reg->off, access_size,
3279 meta && meta->raw_mode ? BPF_WRITE :
3280 BPF_READ))
3281 return -EACCES;
9fd29c08
YS
3282 return check_map_access(env, regno, reg->off, access_size,
3283 zero_size_allowed);
f1174f77 3284 default: /* scalar_value|ptr_to_stack or invalid ptr */
06c1c049
GB
3285 return check_stack_boundary(env, regno, access_size,
3286 zero_size_allowed, meta);
3287 }
3288}
3289
d83525ca
AS
3290/* Implementation details:
3291 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
3292 * Two bpf_map_lookups (even with the same key) will have different reg->id.
3293 * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
3294 * value_or_null->value transition, since the verifier only cares about
3295 * the range of access to valid map value pointer and doesn't care about actual
3296 * address of the map element.
3297 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
3298 * reg->id > 0 after value_or_null->value transition. By doing so
3299 * two bpf_map_lookups will be considered two different pointers that
3300 * point to different bpf_spin_locks.
3301 * The verifier allows taking only one bpf_spin_lock at a time to avoid
3302 * dead-locks.
3303 * Since only one bpf_spin_lock is allowed the checks are simpler than
3304 * reg_is_refcounted() logic. The verifier needs to remember only
3305 * one spin_lock instead of array of acquired_refs.
3306 * cur_state->active_spin_lock remembers which map value element got locked
3307 * and clears it after bpf_spin_unlock.
3308 */
3309static int process_spin_lock(struct bpf_verifier_env *env, int regno,
3310 bool is_lock)
3311{
3312 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
3313 struct bpf_verifier_state *cur = env->cur_state;
3314 bool is_const = tnum_is_const(reg->var_off);
3315 struct bpf_map *map = reg->map_ptr;
3316 u64 val = reg->var_off.value;
3317
3318 if (reg->type != PTR_TO_MAP_VALUE) {
3319 verbose(env, "R%d is not a pointer to map_value\n", regno);
3320 return -EINVAL;
3321 }
3322 if (!is_const) {
3323 verbose(env,
3324 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
3325 regno);
3326 return -EINVAL;
3327 }
3328 if (!map->btf) {
3329 verbose(env,
3330 "map '%s' has to have BTF in order to use bpf_spin_lock\n",
3331 map->name);
3332 return -EINVAL;
3333 }
3334 if (!map_value_has_spin_lock(map)) {
3335 if (map->spin_lock_off == -E2BIG)
3336 verbose(env,
3337 "map '%s' has more than one 'struct bpf_spin_lock'\n",
3338 map->name);
3339 else if (map->spin_lock_off == -ENOENT)
3340 verbose(env,
3341 "map '%s' doesn't have 'struct bpf_spin_lock'\n",
3342 map->name);
3343 else
3344 verbose(env,
3345 "map '%s' is not a struct type or bpf_spin_lock is mangled\n",
3346 map->name);
3347 return -EINVAL;
3348 }
3349 if (map->spin_lock_off != val + reg->off) {
3350 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n",
3351 val + reg->off);
3352 return -EINVAL;
3353 }
3354 if (is_lock) {
3355 if (cur->active_spin_lock) {
3356 verbose(env,
3357 "Locking two bpf_spin_locks are not allowed\n");
3358 return -EINVAL;
3359 }
3360 cur->active_spin_lock = reg->id;
3361 } else {
3362 if (!cur->active_spin_lock) {
3363 verbose(env, "bpf_spin_unlock without taking a lock\n");
3364 return -EINVAL;
3365 }
3366 if (cur->active_spin_lock != reg->id) {
3367 verbose(env, "bpf_spin_unlock of different lock\n");
3368 return -EINVAL;
3369 }
3370 cur->active_spin_lock = 0;
3371 }
3372 return 0;
3373}
3374
90133415
DB
3375static bool arg_type_is_mem_ptr(enum bpf_arg_type type)
3376{
3377 return type == ARG_PTR_TO_MEM ||
3378 type == ARG_PTR_TO_MEM_OR_NULL ||
3379 type == ARG_PTR_TO_UNINIT_MEM;
3380}
3381
3382static bool arg_type_is_mem_size(enum bpf_arg_type type)
3383{
3384 return type == ARG_CONST_SIZE ||
3385 type == ARG_CONST_SIZE_OR_ZERO;
3386}
3387
57c3bb72
AI
3388static bool arg_type_is_int_ptr(enum bpf_arg_type type)
3389{
3390 return type == ARG_PTR_TO_INT ||
3391 type == ARG_PTR_TO_LONG;
3392}
3393
3394static int int_ptr_type_to_size(enum bpf_arg_type type)
3395{
3396 if (type == ARG_PTR_TO_INT)
3397 return sizeof(u32);
3398 else if (type == ARG_PTR_TO_LONG)
3399 return sizeof(u64);
3400
3401 return -EINVAL;
3402}
3403
58e2af8b 3404static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
33ff9823
DB
3405 enum bpf_arg_type arg_type,
3406 struct bpf_call_arg_meta *meta)
17a52670 3407{
638f5b90 3408 struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
6841de8b 3409 enum bpf_reg_type expected_type, type = reg->type;
17a52670
AS
3410 int err = 0;
3411
80f1d68c 3412 if (arg_type == ARG_DONTCARE)
17a52670
AS
3413 return 0;
3414
dc503a8a
EC
3415 err = check_reg_arg(env, regno, SRC_OP);
3416 if (err)
3417 return err;
17a52670 3418
1be7f75d
AS
3419 if (arg_type == ARG_ANYTHING) {
3420 if (is_pointer_value(env, regno)) {
61bd5218
JK
3421 verbose(env, "R%d leaks addr into helper function\n",
3422 regno);
1be7f75d
AS
3423 return -EACCES;
3424 }
80f1d68c 3425 return 0;
1be7f75d 3426 }
80f1d68c 3427
de8f3a83 3428 if (type_is_pkt_pointer(type) &&
3a0af8fd 3429 !may_access_direct_pkt_data(env, meta, BPF_READ)) {
61bd5218 3430 verbose(env, "helper access to the packet is not allowed\n");
6841de8b
AS
3431 return -EACCES;
3432 }
3433
8e2fe1d9 3434 if (arg_type == ARG_PTR_TO_MAP_KEY ||
2ea864c5 3435 arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3436 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
3437 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
17a52670 3438 expected_type = PTR_TO_STACK;
6ac99e8f
MKL
3439 if (register_is_null(reg) &&
3440 arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
3441 /* final test in check_stack_boundary() */;
3442 else if (!type_is_pkt_pointer(type) &&
3443 type != PTR_TO_MAP_VALUE &&
3444 type != expected_type)
6841de8b 3445 goto err_type;
39f19ebb
AS
3446 } else if (arg_type == ARG_CONST_SIZE ||
3447 arg_type == ARG_CONST_SIZE_OR_ZERO) {
f1174f77
EC
3448 expected_type = SCALAR_VALUE;
3449 if (type != expected_type)
6841de8b 3450 goto err_type;
17a52670
AS
3451 } else if (arg_type == ARG_CONST_MAP_PTR) {
3452 expected_type = CONST_PTR_TO_MAP;
6841de8b
AS
3453 if (type != expected_type)
3454 goto err_type;
608cd71a
AS
3455 } else if (arg_type == ARG_PTR_TO_CTX) {
3456 expected_type = PTR_TO_CTX;
6841de8b
AS
3457 if (type != expected_type)
3458 goto err_type;
58990d1f
DB
3459 err = check_ctx_reg(env, reg, regno);
3460 if (err < 0)
3461 return err;
46f8bc92
MKL
3462 } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
3463 expected_type = PTR_TO_SOCK_COMMON;
3464 /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
3465 if (!type_is_sk_pointer(type))
3466 goto err_type;
1b986589
MKL
3467 if (reg->ref_obj_id) {
3468 if (meta->ref_obj_id) {
3469 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
3470 regno, reg->ref_obj_id,
3471 meta->ref_obj_id);
3472 return -EFAULT;
3473 }
3474 meta->ref_obj_id = reg->ref_obj_id;
fd978bf7 3475 }
6ac99e8f
MKL
3476 } else if (arg_type == ARG_PTR_TO_SOCKET) {
3477 expected_type = PTR_TO_SOCKET;
3478 if (type != expected_type)
3479 goto err_type;
a7658e1a
AS
3480 } else if (arg_type == ARG_PTR_TO_BTF_ID) {
3481 expected_type = PTR_TO_BTF_ID;
3482 if (type != expected_type)
3483 goto err_type;
3484 if (reg->btf_id != meta->btf_id) {
3485 verbose(env, "Helper has type %s got %s in R%d\n",
3486 kernel_type_name(meta->btf_id),
3487 kernel_type_name(reg->btf_id), regno);
3488
3489 return -EACCES;
3490 }
3491 if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
3492 verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
3493 regno);
3494 return -EACCES;
3495 }
d83525ca
AS
3496 } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
3497 if (meta->func_id == BPF_FUNC_spin_lock) {
3498 if (process_spin_lock(env, regno, true))
3499 return -EACCES;
3500 } else if (meta->func_id == BPF_FUNC_spin_unlock) {
3501 if (process_spin_lock(env, regno, false))
3502 return -EACCES;
3503 } else {
3504 verbose(env, "verifier internal error\n");
3505 return -EFAULT;
3506 }
90133415 3507 } else if (arg_type_is_mem_ptr(arg_type)) {
8e2fe1d9
DB
3508 expected_type = PTR_TO_STACK;
3509 /* One exception here. In case function allows for NULL to be
f1174f77 3510 * passed in as argument, it's a SCALAR_VALUE type. Final test
8e2fe1d9
DB
3511 * happens during stack boundary checking.
3512 */
914cb781 3513 if (register_is_null(reg) &&
db1ac496 3514 arg_type == ARG_PTR_TO_MEM_OR_NULL)
6841de8b 3515 /* final test in check_stack_boundary() */;
de8f3a83
DB
3516 else if (!type_is_pkt_pointer(type) &&
3517 type != PTR_TO_MAP_VALUE &&
f1174f77 3518 type != expected_type)
6841de8b 3519 goto err_type;
39f19ebb 3520 meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
57c3bb72
AI
3521 } else if (arg_type_is_int_ptr(arg_type)) {
3522 expected_type = PTR_TO_STACK;
3523 if (!type_is_pkt_pointer(type) &&
3524 type != PTR_TO_MAP_VALUE &&
3525 type != expected_type)
3526 goto err_type;
17a52670 3527 } else {
61bd5218 3528 verbose(env, "unsupported arg_type %d\n", arg_type);
17a52670
AS
3529 return -EFAULT;
3530 }
3531
17a52670
AS
3532 if (arg_type == ARG_CONST_MAP_PTR) {
3533 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
33ff9823 3534 meta->map_ptr = reg->map_ptr;
17a52670
AS
3535 } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
3536 /* bpf_map_xxx(..., map_ptr, ..., key) call:
3537 * check that [key, key + map->key_size) are within
3538 * stack limits and initialized
3539 */
33ff9823 3540 if (!meta->map_ptr) {
17a52670
AS
3541 /* in function declaration map_ptr must come before
3542 * map_key, so that it's verified and known before
3543 * we have to check map_key here. Otherwise it means
3544 * that kernel subsystem misconfigured verifier
3545 */
61bd5218 3546 verbose(env, "invalid map_ptr to access map->key\n");
17a52670
AS
3547 return -EACCES;
3548 }
d71962f3
PC
3549 err = check_helper_mem_access(env, regno,
3550 meta->map_ptr->key_size, false,
3551 NULL);
2ea864c5 3552 } else if (arg_type == ARG_PTR_TO_MAP_VALUE ||
6ac99e8f
MKL
3553 (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL &&
3554 !register_is_null(reg)) ||
2ea864c5 3555 arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) {
17a52670
AS
3556 /* bpf_map_xxx(..., map_ptr, ..., value) call:
3557 * check [value, value + map->value_size) validity
3558 */
33ff9823 3559 if (!meta->map_ptr) {
17a52670 3560 /* kernel subsystem misconfigured verifier */
61bd5218 3561 verbose(env, "invalid map_ptr to access map->value\n");
17a52670
AS
3562 return -EACCES;
3563 }
2ea864c5 3564 meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE);
d71962f3
PC
3565 err = check_helper_mem_access(env, regno,
3566 meta->map_ptr->value_size, false,
2ea864c5 3567 meta);
90133415 3568 } else if (arg_type_is_mem_size(arg_type)) {
39f19ebb 3569 bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
17a52670 3570
849fa506
YS
3571 /* remember the mem_size which may be used later
3572 * to refine return values.
3573 */
3574 meta->msize_smax_value = reg->smax_value;
3575 meta->msize_umax_value = reg->umax_value;
3576
f1174f77
EC
3577 /* The register is SCALAR_VALUE; the access check
3578 * happens using its boundaries.
06c1c049 3579 */
f1174f77 3580 if (!tnum_is_const(reg->var_off))
06c1c049
GB
3581 /* For unprivileged variable accesses, disable raw
3582 * mode so that the program is required to
3583 * initialize all the memory that the helper could
3584 * just partially fill up.
3585 */
3586 meta = NULL;
3587
b03c9f9f 3588 if (reg->smin_value < 0) {
61bd5218 3589 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n",
f1174f77
EC
3590 regno);
3591 return -EACCES;
3592 }
06c1c049 3593
b03c9f9f 3594 if (reg->umin_value == 0) {
f1174f77
EC
3595 err = check_helper_mem_access(env, regno - 1, 0,
3596 zero_size_allowed,
3597 meta);
06c1c049
GB
3598 if (err)
3599 return err;
06c1c049 3600 }
f1174f77 3601
b03c9f9f 3602 if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
61bd5218 3603 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
f1174f77
EC
3604 regno);
3605 return -EACCES;
3606 }
3607 err = check_helper_mem_access(env, regno - 1,
b03c9f9f 3608 reg->umax_value,
f1174f77 3609 zero_size_allowed, meta);
b5dc0163
AS
3610 if (!err)
3611 err = mark_chain_precision(env, regno);
57c3bb72
AI
3612 } else if (arg_type_is_int_ptr(arg_type)) {
3613 int size = int_ptr_type_to_size(arg_type);
3614
3615 err = check_helper_mem_access(env, regno, size, false, meta);
3616 if (err)
3617 return err;
3618 err = check_ptr_alignment(env, reg, 0, size, true);
17a52670
AS
3619 }
3620
3621 return err;
6841de8b 3622err_type:
61bd5218 3623 verbose(env, "R%d type=%s expected=%s\n", regno,
6841de8b
AS
3624 reg_type_str[type], reg_type_str[expected_type]);
3625 return -EACCES;
17a52670
AS
3626}
3627
61bd5218
JK
3628static int check_map_func_compatibility(struct bpf_verifier_env *env,
3629 struct bpf_map *map, int func_id)
35578d79 3630{
35578d79
KX
3631 if (!map)
3632 return 0;
3633
6aff67c8
AS
3634 /* We need a two way check, first is from map perspective ... */
3635 switch (map->map_type) {
3636 case BPF_MAP_TYPE_PROG_ARRAY:
3637 if (func_id != BPF_FUNC_tail_call)
3638 goto error;
3639 break;
3640 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
3641 if (func_id != BPF_FUNC_perf_event_read &&
908432ca 3642 func_id != BPF_FUNC_perf_event_output &&
a7658e1a 3643 func_id != BPF_FUNC_skb_output &&
908432ca 3644 func_id != BPF_FUNC_perf_event_read_value)
6aff67c8
AS
3645 goto error;
3646 break;
3647 case BPF_MAP_TYPE_STACK_TRACE:
3648 if (func_id != BPF_FUNC_get_stackid)
3649 goto error;
3650 break;
4ed8ec52 3651 case BPF_MAP_TYPE_CGROUP_ARRAY:
60747ef4 3652 if (func_id != BPF_FUNC_skb_under_cgroup &&
60d20f91 3653 func_id != BPF_FUNC_current_task_under_cgroup)
4a482f34
MKL
3654 goto error;
3655 break;
cd339431 3656 case BPF_MAP_TYPE_CGROUP_STORAGE:
b741f163 3657 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
cd339431
RG
3658 if (func_id != BPF_FUNC_get_local_storage)
3659 goto error;
3660 break;
546ac1ff 3661 case BPF_MAP_TYPE_DEVMAP:
6f9d451a 3662 case BPF_MAP_TYPE_DEVMAP_HASH:
0cdbb4b0
THJ
3663 if (func_id != BPF_FUNC_redirect_map &&
3664 func_id != BPF_FUNC_map_lookup_elem)
546ac1ff
JF
3665 goto error;
3666 break;
fbfc504a
BT
3667 /* Restrict bpf side of cpumap and xskmap, open when use-cases
3668 * appear.
3669 */
6710e112
JDB
3670 case BPF_MAP_TYPE_CPUMAP:
3671 if (func_id != BPF_FUNC_redirect_map)
3672 goto error;
3673 break;
fada7fdc
JL
3674 case BPF_MAP_TYPE_XSKMAP:
3675 if (func_id != BPF_FUNC_redirect_map &&
3676 func_id != BPF_FUNC_map_lookup_elem)
3677 goto error;
3678 break;
56f668df 3679 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
bcc6b1b7 3680 case BPF_MAP_TYPE_HASH_OF_MAPS:
56f668df
MKL
3681 if (func_id != BPF_FUNC_map_lookup_elem)
3682 goto error;
16a43625 3683 break;
174a79ff
JF
3684 case BPF_MAP_TYPE_SOCKMAP:
3685 if (func_id != BPF_FUNC_sk_redirect_map &&
3686 func_id != BPF_FUNC_sock_map_update &&
4f738adb
JF
3687 func_id != BPF_FUNC_map_delete_elem &&
3688 func_id != BPF_FUNC_msg_redirect_map)
174a79ff
JF
3689 goto error;
3690 break;
81110384
JF
3691 case BPF_MAP_TYPE_SOCKHASH:
3692 if (func_id != BPF_FUNC_sk_redirect_hash &&
3693 func_id != BPF_FUNC_sock_hash_update &&
3694 func_id != BPF_FUNC_map_delete_elem &&
3695 func_id != BPF_FUNC_msg_redirect_hash)
3696 goto error;
3697 break;
2dbb9b9e
MKL
3698 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
3699 if (func_id != BPF_FUNC_sk_select_reuseport)
3700 goto error;
3701 break;
f1a2e44a
MV
3702 case BPF_MAP_TYPE_QUEUE:
3703 case BPF_MAP_TYPE_STACK:
3704 if (func_id != BPF_FUNC_map_peek_elem &&
3705 func_id != BPF_FUNC_map_pop_elem &&
3706 func_id != BPF_FUNC_map_push_elem)
3707 goto error;
3708 break;
6ac99e8f
MKL
3709 case BPF_MAP_TYPE_SK_STORAGE:
3710 if (func_id != BPF_FUNC_sk_storage_get &&
3711 func_id != BPF_FUNC_sk_storage_delete)
3712 goto error;
3713 break;
6aff67c8
AS
3714 default:
3715 break;
3716 }
3717
3718 /* ... and second from the function itself. */
3719 switch (func_id) {
3720 case BPF_FUNC_tail_call:
3721 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
3722 goto error;
f910cefa 3723 if (env->subprog_cnt > 1) {
f4d7e40a
AS
3724 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
3725 return -EINVAL;
3726 }
6aff67c8
AS
3727 break;
3728 case BPF_FUNC_perf_event_read:
3729 case BPF_FUNC_perf_event_output:
908432ca 3730 case BPF_FUNC_perf_event_read_value:
a7658e1a 3731 case BPF_FUNC_skb_output:
6aff67c8
AS
3732 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
3733 goto error;
3734 break;
3735 case BPF_FUNC_get_stackid:
3736 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
3737 goto error;
3738 break;
60d20f91 3739 case BPF_FUNC_current_task_under_cgroup:
747ea55e 3740 case BPF_FUNC_skb_under_cgroup:
4a482f34
MKL
3741 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY)
3742 goto error;
3743 break;
97f91a7c 3744 case BPF_FUNC_redirect_map:
9c270af3 3745 if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
6f9d451a 3746 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH &&
fbfc504a
BT
3747 map->map_type != BPF_MAP_TYPE_CPUMAP &&
3748 map->map_type != BPF_MAP_TYPE_XSKMAP)
97f91a7c
JF
3749 goto error;
3750 break;
174a79ff 3751 case BPF_FUNC_sk_redirect_map:
4f738adb 3752 case BPF_FUNC_msg_redirect_map:
81110384 3753 case BPF_FUNC_sock_map_update:
174a79ff
JF
3754 if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
3755 goto error;
3756 break;
81110384
JF
3757 case BPF_FUNC_sk_redirect_hash:
3758 case BPF_FUNC_msg_redirect_hash:
3759 case BPF_FUNC_sock_hash_update:
3760 if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
174a79ff
JF
3761 goto error;
3762 break;
cd339431 3763 case BPF_FUNC_get_local_storage:
b741f163
RG
3764 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
3765 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
cd339431
RG
3766 goto error;
3767 break;
2dbb9b9e
MKL
3768 case BPF_FUNC_sk_select_reuseport:
3769 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
3770 goto error;
3771 break;
f1a2e44a
MV
3772 case BPF_FUNC_map_peek_elem:
3773 case BPF_FUNC_map_pop_elem:
3774 case BPF_FUNC_map_push_elem:
3775 if (map->map_type != BPF_MAP_TYPE_QUEUE &&
3776 map->map_type != BPF_MAP_TYPE_STACK)
3777 goto error;
3778 break;
6ac99e8f
MKL
3779 case BPF_FUNC_sk_storage_get:
3780 case BPF_FUNC_sk_storage_delete:
3781 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
3782 goto error;
3783 break;
6aff67c8
AS
3784 default:
3785 break;
35578d79
KX
3786 }
3787
3788 return 0;
6aff67c8 3789error:
61bd5218 3790 verbose(env, "cannot pass map_type %d into func %s#%d\n",
ebb676da 3791 map->map_type, func_id_name(func_id), func_id);
6aff67c8 3792 return -EINVAL;
35578d79
KX
3793}
3794
90133415 3795static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
435faee1
DB
3796{
3797 int count = 0;
3798
39f19ebb 3799 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3800 count++;
39f19ebb 3801 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3802 count++;
39f19ebb 3803 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3804 count++;
39f19ebb 3805 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
435faee1 3806 count++;
39f19ebb 3807 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
435faee1
DB
3808 count++;
3809
90133415
DB
3810 /* We only support one arg being in raw mode at the moment,
3811 * which is sufficient for the helper functions we have
3812 * right now.
3813 */
3814 return count <= 1;
3815}
3816
3817static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
3818 enum bpf_arg_type arg_next)
3819{
3820 return (arg_type_is_mem_ptr(arg_curr) &&
3821 !arg_type_is_mem_size(arg_next)) ||
3822 (!arg_type_is_mem_ptr(arg_curr) &&
3823 arg_type_is_mem_size(arg_next));
3824}
3825
3826static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
3827{
3828 /* bpf_xxx(..., buf, len) call will access 'len'
3829 * bytes from memory 'buf'. Both arg types need
3830 * to be paired, so make sure there's no buggy
3831 * helper function specification.
3832 */
3833 if (arg_type_is_mem_size(fn->arg1_type) ||
3834 arg_type_is_mem_ptr(fn->arg5_type) ||
3835 check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
3836 check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
3837 check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
3838 check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
3839 return false;
3840
3841 return true;
3842}
3843
1b986589 3844static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
fd978bf7
JS
3845{
3846 int count = 0;
3847
1b986589 3848 if (arg_type_may_be_refcounted(fn->arg1_type))
fd978bf7 3849 count++;
1b986589 3850 if (arg_type_may_be_refcounted(fn->arg2_type))
fd978bf7 3851 count++;
1b986589 3852 if (arg_type_may_be_refcounted(fn->arg3_type))
fd978bf7 3853 count++;
1b986589 3854 if (arg_type_may_be_refcounted(fn->arg4_type))
fd978bf7 3855 count++;
1b986589 3856 if (arg_type_may_be_refcounted(fn->arg5_type))
fd978bf7
JS
3857 count++;
3858
1b986589
MKL
3859 /* A reference acquiring function cannot acquire
3860 * another refcounted ptr.
3861 */
3862 if (is_acquire_function(func_id) && count)
3863 return false;
3864
fd978bf7
JS
3865 /* We only support one arg being unreferenced at the moment,
3866 * which is sufficient for the helper functions we have right now.
3867 */
3868 return count <= 1;
3869}
3870
1b986589 3871static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
90133415
DB
3872{
3873 return check_raw_mode_ok(fn) &&
fd978bf7 3874 check_arg_pair_ok(fn) &&
1b986589 3875 check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
435faee1
DB
3876}
3877
de8f3a83
DB
3878/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
3879 * are now invalid, so turn them into unknown SCALAR_VALUE.
f1174f77 3880 */
f4d7e40a
AS
3881static void __clear_all_pkt_pointers(struct bpf_verifier_env *env,
3882 struct bpf_func_state *state)
969bf05e 3883{
58e2af8b 3884 struct bpf_reg_state *regs = state->regs, *reg;
969bf05e
AS
3885 int i;
3886
3887 for (i = 0; i < MAX_BPF_REG; i++)
de8f3a83 3888 if (reg_is_pkt_pointer_any(&regs[i]))
61bd5218 3889 mark_reg_unknown(env, regs, i);
969bf05e 3890
f3709f69
JS
3891 bpf_for_each_spilled_reg(i, state, reg) {
3892 if (!reg)
969bf05e 3893 continue;
de8f3a83
DB
3894 if (reg_is_pkt_pointer_any(reg))
3895 __mark_reg_unknown(reg);
969bf05e
AS
3896 }
3897}
3898
f4d7e40a
AS
3899static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
3900{
3901 struct bpf_verifier_state *vstate = env->cur_state;
3902 int i;
3903
3904 for (i = 0; i <= vstate->curframe; i++)
3905 __clear_all_pkt_pointers(env, vstate->frame[i]);
3906}
3907
fd978bf7 3908static void release_reg_references(struct bpf_verifier_env *env,
1b986589
MKL
3909 struct bpf_func_state *state,
3910 int ref_obj_id)
fd978bf7
JS
3911{
3912 struct bpf_reg_state *regs = state->regs, *reg;
3913 int i;
3914
3915 for (i = 0; i < MAX_BPF_REG; i++)
1b986589 3916 if (regs[i].ref_obj_id == ref_obj_id)
fd978bf7
JS
3917 mark_reg_unknown(env, regs, i);
3918
3919 bpf_for_each_spilled_reg(i, state, reg) {
3920 if (!reg)
3921 continue;
1b986589 3922 if (reg->ref_obj_id == ref_obj_id)
fd978bf7
JS
3923 __mark_reg_unknown(reg);
3924 }
3925}
3926
3927/* The pointer with the specified id has released its reference to kernel
3928 * resources. Identify all copies of the same pointer and clear the reference.
3929 */
3930static int release_reference(struct bpf_verifier_env *env,
1b986589 3931 int ref_obj_id)
fd978bf7
JS
3932{
3933 struct bpf_verifier_state *vstate = env->cur_state;
1b986589 3934 int err;
fd978bf7
JS
3935 int i;
3936
1b986589
MKL
3937 err = release_reference_state(cur_func(env), ref_obj_id);
3938 if (err)
3939 return err;
3940
fd978bf7 3941 for (i = 0; i <= vstate->curframe; i++)
1b986589 3942 release_reg_references(env, vstate->frame[i], ref_obj_id);
fd978bf7 3943
1b986589 3944 return 0;
fd978bf7
JS
3945}
3946
f4d7e40a
AS
3947static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
3948 int *insn_idx)
3949{
3950 struct bpf_verifier_state *state = env->cur_state;
3951 struct bpf_func_state *caller, *callee;
fd978bf7 3952 int i, err, subprog, target_insn;
f4d7e40a 3953
aada9ce6 3954 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
f4d7e40a 3955 verbose(env, "the call stack of %d frames is too deep\n",
aada9ce6 3956 state->curframe + 2);
f4d7e40a
AS
3957 return -E2BIG;
3958 }
3959
3960 target_insn = *insn_idx + insn->imm;
3961 subprog = find_subprog(env, target_insn + 1);
3962 if (subprog < 0) {
3963 verbose(env, "verifier bug. No program starts at insn %d\n",
3964 target_insn + 1);
3965 return -EFAULT;
3966 }
3967
3968 caller = state->frame[state->curframe];
3969 if (state->frame[state->curframe + 1]) {
3970 verbose(env, "verifier bug. Frame %d already allocated\n",
3971 state->curframe + 1);
3972 return -EFAULT;
3973 }
3974
3975 callee = kzalloc(sizeof(*callee), GFP_KERNEL);
3976 if (!callee)
3977 return -ENOMEM;
3978 state->frame[state->curframe + 1] = callee;
3979
3980 /* callee cannot access r0, r6 - r9 for reading and has to write
3981 * into its own stack before reading from it.
3982 * callee can read/write into caller's stack
3983 */
3984 init_func_state(env, callee,
3985 /* remember the callsite, it will be used by bpf_exit */
3986 *insn_idx /* callsite */,
3987 state->curframe + 1 /* frameno within this callchain */,
f910cefa 3988 subprog /* subprog number within this prog */);
f4d7e40a 3989
fd978bf7
JS
3990 /* Transfer references to the callee */
3991 err = transfer_reference_state(callee, caller);
3992 if (err)
3993 return err;
3994
679c782d
EC
3995 /* copy r1 - r5 args that callee can access. The copy includes parent
3996 * pointers, which connects us up to the liveness chain
3997 */
f4d7e40a
AS
3998 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
3999 callee->regs[i] = caller->regs[i];
4000
679c782d 4001 /* after the call registers r0 - r5 were scratched */
f4d7e40a
AS
4002 for (i = 0; i < CALLER_SAVED_REGS; i++) {
4003 mark_reg_not_init(env, caller->regs, caller_saved[i]);
4004 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4005 }
4006
4007 /* only increment it after check_reg_arg() finished */
4008 state->curframe++;
4009
8c1b6e69
AS
4010 if (btf_check_func_arg_match(env, subprog))
4011 return -EINVAL;
4012
f4d7e40a
AS
4013 /* and go analyze first insn of the callee */
4014 *insn_idx = target_insn;
4015
06ee7115 4016 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4017 verbose(env, "caller:\n");
4018 print_verifier_state(env, caller);
4019 verbose(env, "callee:\n");
4020 print_verifier_state(env, callee);
4021 }
4022 return 0;
4023}
4024
4025static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
4026{
4027 struct bpf_verifier_state *state = env->cur_state;
4028 struct bpf_func_state *caller, *callee;
4029 struct bpf_reg_state *r0;
fd978bf7 4030 int err;
f4d7e40a
AS
4031
4032 callee = state->frame[state->curframe];
4033 r0 = &callee->regs[BPF_REG_0];
4034 if (r0->type == PTR_TO_STACK) {
4035 /* technically it's ok to return caller's stack pointer
4036 * (or caller's caller's pointer) back to the caller,
4037 * since these pointers are valid. Only current stack
4038 * pointer will be invalid as soon as function exits,
4039 * but let's be conservative
4040 */
4041 verbose(env, "cannot return stack pointer to the caller\n");
4042 return -EINVAL;
4043 }
4044
4045 state->curframe--;
4046 caller = state->frame[state->curframe];
4047 /* return to the caller whatever r0 had in the callee */
4048 caller->regs[BPF_REG_0] = *r0;
4049
fd978bf7
JS
4050 /* Transfer references to the caller */
4051 err = transfer_reference_state(caller, callee);
4052 if (err)
4053 return err;
4054
f4d7e40a 4055 *insn_idx = callee->callsite + 1;
06ee7115 4056 if (env->log.level & BPF_LOG_LEVEL) {
f4d7e40a
AS
4057 verbose(env, "returning from callee:\n");
4058 print_verifier_state(env, callee);
4059 verbose(env, "to caller at %d:\n", *insn_idx);
4060 print_verifier_state(env, caller);
4061 }
4062 /* clear everything in the callee */
4063 free_func_state(callee);
4064 state->frame[state->curframe + 1] = NULL;
4065 return 0;
4066}
4067
849fa506
YS
4068static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
4069 int func_id,
4070 struct bpf_call_arg_meta *meta)
4071{
4072 struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
4073
4074 if (ret_type != RET_INTEGER ||
4075 (func_id != BPF_FUNC_get_stack &&
4076 func_id != BPF_FUNC_probe_read_str))
4077 return;
4078
4079 ret_reg->smax_value = meta->msize_smax_value;
4080 ret_reg->umax_value = meta->msize_umax_value;
4081 __reg_deduce_bounds(ret_reg);
4082 __reg_bound_offset(ret_reg);
4083}
4084
c93552c4
DB
4085static int
4086record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4087 int func_id, int insn_idx)
4088{
4089 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
591fe988 4090 struct bpf_map *map = meta->map_ptr;
c93552c4
DB
4091
4092 if (func_id != BPF_FUNC_tail_call &&
09772d92
DB
4093 func_id != BPF_FUNC_map_lookup_elem &&
4094 func_id != BPF_FUNC_map_update_elem &&
f1a2e44a
MV
4095 func_id != BPF_FUNC_map_delete_elem &&
4096 func_id != BPF_FUNC_map_push_elem &&
4097 func_id != BPF_FUNC_map_pop_elem &&
4098 func_id != BPF_FUNC_map_peek_elem)
c93552c4 4099 return 0;
09772d92 4100
591fe988 4101 if (map == NULL) {
c93552c4
DB
4102 verbose(env, "kernel subsystem misconfigured verifier\n");
4103 return -EINVAL;
4104 }
4105
591fe988
DB
4106 /* In case of read-only, some additional restrictions
4107 * need to be applied in order to prevent altering the
4108 * state of the map from program side.
4109 */
4110 if ((map->map_flags & BPF_F_RDONLY_PROG) &&
4111 (func_id == BPF_FUNC_map_delete_elem ||
4112 func_id == BPF_FUNC_map_update_elem ||
4113 func_id == BPF_FUNC_map_push_elem ||
4114 func_id == BPF_FUNC_map_pop_elem)) {
4115 verbose(env, "write into map forbidden\n");
4116 return -EACCES;
4117 }
4118
d2e4c1e6 4119 if (!BPF_MAP_PTR(aux->map_ptr_state))
c93552c4
DB
4120 bpf_map_ptr_store(aux, meta->map_ptr,
4121 meta->map_ptr->unpriv_array);
d2e4c1e6 4122 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr)
c93552c4
DB
4123 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
4124 meta->map_ptr->unpriv_array);
4125 return 0;
4126}
4127
d2e4c1e6
DB
4128static int
4129record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
4130 int func_id, int insn_idx)
4131{
4132 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
4133 struct bpf_reg_state *regs = cur_regs(env), *reg;
4134 struct bpf_map *map = meta->map_ptr;
4135 struct tnum range;
4136 u64 val;
cc52d914 4137 int err;
d2e4c1e6
DB
4138
4139 if (func_id != BPF_FUNC_tail_call)
4140 return 0;
4141 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) {
4142 verbose(env, "kernel subsystem misconfigured verifier\n");
4143 return -EINVAL;
4144 }
4145
4146 range = tnum_range(0, map->max_entries - 1);
4147 reg = &regs[BPF_REG_3];
4148
4149 if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
4150 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4151 return 0;
4152 }
4153
cc52d914
DB
4154 err = mark_chain_precision(env, BPF_REG_3);
4155 if (err)
4156 return err;
4157
d2e4c1e6
DB
4158 val = reg->var_off.value;
4159 if (bpf_map_key_unseen(aux))
4160 bpf_map_key_store(aux, val);
4161 else if (!bpf_map_key_poisoned(aux) &&
4162 bpf_map_key_immediate(aux) != val)
4163 bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
4164 return 0;
4165}
4166
fd978bf7
JS
4167static int check_reference_leak(struct bpf_verifier_env *env)
4168{
4169 struct bpf_func_state *state = cur_func(env);
4170 int i;
4171
4172 for (i = 0; i < state->acquired_refs; i++) {
4173 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
4174 state->refs[i].id, state->refs[i].insn_idx);
4175 }
4176 return state->acquired_refs ? -EINVAL : 0;
4177}
4178
f4d7e40a 4179static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
17a52670 4180{
17a52670 4181 const struct bpf_func_proto *fn = NULL;
638f5b90 4182 struct bpf_reg_state *regs;
33ff9823 4183 struct bpf_call_arg_meta meta;
969bf05e 4184 bool changes_data;
17a52670
AS
4185 int i, err;
4186
4187 /* find function prototype */
4188 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) {
61bd5218
JK
4189 verbose(env, "invalid func %s#%d\n", func_id_name(func_id),
4190 func_id);
17a52670
AS
4191 return -EINVAL;
4192 }
4193
00176a34 4194 if (env->ops->get_func_proto)
5e43f899 4195 fn = env->ops->get_func_proto(func_id, env->prog);
17a52670 4196 if (!fn) {
61bd5218
JK
4197 verbose(env, "unknown func %s#%d\n", func_id_name(func_id),
4198 func_id);
17a52670
AS
4199 return -EINVAL;
4200 }
4201
4202 /* eBPF programs must be GPL compatible to use GPL-ed functions */
24701ece 4203 if (!env->prog->gpl_compatible && fn->gpl_only) {
3fe2867c 4204 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n");
17a52670
AS
4205 return -EINVAL;
4206 }
4207
04514d13 4208 /* With LD_ABS/IND some JITs save/restore skb from r1. */
17bedab2 4209 changes_data = bpf_helper_changes_pkt_data(fn->func);
04514d13
DB
4210 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
4211 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
4212 func_id_name(func_id), func_id);
4213 return -EINVAL;
4214 }
969bf05e 4215
33ff9823 4216 memset(&meta, 0, sizeof(meta));
36bbef52 4217 meta.pkt_access = fn->pkt_access;
33ff9823 4218
1b986589 4219 err = check_func_proto(fn, func_id);
435faee1 4220 if (err) {
61bd5218 4221 verbose(env, "kernel subsystem misconfigured func %s#%d\n",
ebb676da 4222 func_id_name(func_id), func_id);
435faee1
DB
4223 return err;
4224 }
4225
d83525ca 4226 meta.func_id = func_id;
17a52670 4227 /* check args */
a7658e1a 4228 for (i = 0; i < 5; i++) {
9cc31b3a
AS
4229 err = btf_resolve_helper_id(&env->log, fn, i);
4230 if (err > 0)
4231 meta.btf_id = err;
a7658e1a
AS
4232 err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
4233 if (err)
4234 return err;
4235 }
17a52670 4236
c93552c4
DB
4237 err = record_func_map(env, &meta, func_id, insn_idx);
4238 if (err)
4239 return err;
4240
d2e4c1e6
DB
4241 err = record_func_key(env, &meta, func_id, insn_idx);
4242 if (err)
4243 return err;
4244
435faee1
DB
4245 /* Mark slots with STACK_MISC in case of raw mode, stack offset
4246 * is inferred from register state.
4247 */
4248 for (i = 0; i < meta.access_size; i++) {
ca369602
DB
4249 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
4250 BPF_WRITE, -1, false);
435faee1
DB
4251 if (err)
4252 return err;
4253 }
4254
fd978bf7
JS
4255 if (func_id == BPF_FUNC_tail_call) {
4256 err = check_reference_leak(env);
4257 if (err) {
4258 verbose(env, "tail_call would lead to reference leak\n");
4259 return err;
4260 }
4261 } else if (is_release_function(func_id)) {
1b986589 4262 err = release_reference(env, meta.ref_obj_id);
46f8bc92
MKL
4263 if (err) {
4264 verbose(env, "func %s#%d reference has not been acquired before\n",
4265 func_id_name(func_id), func_id);
fd978bf7 4266 return err;
46f8bc92 4267 }
fd978bf7
JS
4268 }
4269
638f5b90 4270 regs = cur_regs(env);
cd339431
RG
4271
4272 /* check that flags argument in get_local_storage(map, flags) is 0,
4273 * this is required because get_local_storage() can't return an error.
4274 */
4275 if (func_id == BPF_FUNC_get_local_storage &&
4276 !register_is_null(&regs[BPF_REG_2])) {
4277 verbose(env, "get_local_storage() doesn't support non-zero flags\n");
4278 return -EINVAL;
4279 }
4280
17a52670 4281 /* reset caller saved regs */
dc503a8a 4282 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 4283 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
4284 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
4285 }
17a52670 4286
5327ed3d
JW
4287 /* helper call returns 64-bit value. */
4288 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG;
4289
dc503a8a 4290 /* update return register (already marked as written above) */
17a52670 4291 if (fn->ret_type == RET_INTEGER) {
f1174f77 4292 /* sets type to SCALAR_VALUE */
61bd5218 4293 mark_reg_unknown(env, regs, BPF_REG_0);
17a52670
AS
4294 } else if (fn->ret_type == RET_VOID) {
4295 regs[BPF_REG_0].type = NOT_INIT;
3e6a4b3e
RG
4296 } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
4297 fn->ret_type == RET_PTR_TO_MAP_VALUE) {
f1174f77 4298 /* There is no offset yet applied, variable or fixed */
61bd5218 4299 mark_reg_known_zero(env, regs, BPF_REG_0);
17a52670
AS
4300 /* remember map_ptr, so that check_map_access()
4301 * can check 'value_size' boundary of memory access
4302 * to map element returned from bpf_map_lookup_elem()
4303 */
33ff9823 4304 if (meta.map_ptr == NULL) {
61bd5218
JK
4305 verbose(env,
4306 "kernel subsystem misconfigured verifier\n");
17a52670
AS
4307 return -EINVAL;
4308 }
33ff9823 4309 regs[BPF_REG_0].map_ptr = meta.map_ptr;
4d31f301
DB
4310 if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
4311 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
e16d2f1a
AS
4312 if (map_value_has_spin_lock(meta.map_ptr))
4313 regs[BPF_REG_0].id = ++env->id_gen;
4d31f301
DB
4314 } else {
4315 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
4316 regs[BPF_REG_0].id = ++env->id_gen;
4317 }
c64b7983
JS
4318 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
4319 mark_reg_known_zero(env, regs, BPF_REG_0);
4320 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
0f3adc28 4321 regs[BPF_REG_0].id = ++env->id_gen;
85a51f8c
LB
4322 } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
4323 mark_reg_known_zero(env, regs, BPF_REG_0);
4324 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
4325 regs[BPF_REG_0].id = ++env->id_gen;
655a51e5
MKL
4326 } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
4327 mark_reg_known_zero(env, regs, BPF_REG_0);
4328 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
4329 regs[BPF_REG_0].id = ++env->id_gen;
17a52670 4330 } else {
61bd5218 4331 verbose(env, "unknown return type %d of func %s#%d\n",
ebb676da 4332 fn->ret_type, func_id_name(func_id), func_id);
17a52670
AS
4333 return -EINVAL;
4334 }
04fd61ab 4335
0f3adc28 4336 if (is_ptr_cast_function(func_id)) {
1b986589
MKL
4337 /* For release_reference() */
4338 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
0f3adc28
LB
4339 } else if (is_acquire_function(func_id)) {
4340 int id = acquire_reference_state(env, insn_idx);
4341
4342 if (id < 0)
4343 return id;
4344 /* For mark_ptr_or_null_reg() */
4345 regs[BPF_REG_0].id = id;
4346 /* For release_reference() */
4347 regs[BPF_REG_0].ref_obj_id = id;
4348 }
1b986589 4349
849fa506
YS
4350 do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
4351
61bd5218 4352 err = check_map_func_compatibility(env, meta.map_ptr, func_id);
35578d79
KX
4353 if (err)
4354 return err;
04fd61ab 4355
c195651e
YS
4356 if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
4357 const char *err_str;
4358
4359#ifdef CONFIG_PERF_EVENTS
4360 err = get_callchain_buffers(sysctl_perf_event_max_stack);
4361 err_str = "cannot get callchain buffer for func %s#%d\n";
4362#else
4363 err = -ENOTSUPP;
4364 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
4365#endif
4366 if (err) {
4367 verbose(env, err_str, func_id_name(func_id), func_id);
4368 return err;
4369 }
4370
4371 env->prog->has_callchain_buf = true;
4372 }
4373
969bf05e
AS
4374 if (changes_data)
4375 clear_all_pkt_pointers(env);
4376 return 0;
4377}
4378
b03c9f9f
EC
4379static bool signed_add_overflows(s64 a, s64 b)
4380{
4381 /* Do the add in u64, where overflow is well-defined */
4382 s64 res = (s64)((u64)a + (u64)b);
4383
4384 if (b < 0)
4385 return res > a;
4386 return res < a;
4387}
4388
4389static bool signed_sub_overflows(s64 a, s64 b)
4390{
4391 /* Do the sub in u64, where overflow is well-defined */
4392 s64 res = (s64)((u64)a - (u64)b);
4393
4394 if (b < 0)
4395 return res < a;
4396 return res > a;
969bf05e
AS
4397}
4398
bb7f0f98
AS
4399static bool check_reg_sane_offset(struct bpf_verifier_env *env,
4400 const struct bpf_reg_state *reg,
4401 enum bpf_reg_type type)
4402{
4403 bool known = tnum_is_const(reg->var_off);
4404 s64 val = reg->var_off.value;
4405 s64 smin = reg->smin_value;
4406
4407 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
4408 verbose(env, "math between %s pointer and %lld is not allowed\n",
4409 reg_type_str[type], val);
4410 return false;
4411 }
4412
4413 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
4414 verbose(env, "%s pointer offset %d is not allowed\n",
4415 reg_type_str[type], reg->off);
4416 return false;
4417 }
4418
4419 if (smin == S64_MIN) {
4420 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
4421 reg_type_str[type]);
4422 return false;
4423 }
4424
4425 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
4426 verbose(env, "value %lld makes %s pointer be out of bounds\n",
4427 smin, reg_type_str[type]);
4428 return false;
4429 }
4430
4431 return true;
4432}
4433
979d63d5
DB
4434static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
4435{
4436 return &env->insn_aux_data[env->insn_idx];
4437}
4438
4439static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
4440 u32 *ptr_limit, u8 opcode, bool off_is_neg)
4441{
4442 bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
4443 (opcode == BPF_SUB && !off_is_neg);
4444 u32 off;
4445
4446 switch (ptr_reg->type) {
4447 case PTR_TO_STACK:
088ec26d
AI
4448 /* Indirect variable offset stack access is prohibited in
4449 * unprivileged mode so it's not handled here.
4450 */
979d63d5
DB
4451 off = ptr_reg->off + ptr_reg->var_off.value;
4452 if (mask_to_left)
4453 *ptr_limit = MAX_BPF_STACK + off;
4454 else
4455 *ptr_limit = -off;
4456 return 0;
4457 case PTR_TO_MAP_VALUE:
4458 if (mask_to_left) {
4459 *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
4460 } else {
4461 off = ptr_reg->smin_value + ptr_reg->off;
4462 *ptr_limit = ptr_reg->map_ptr->value_size - off;
4463 }
4464 return 0;
4465 default:
4466 return -EINVAL;
4467 }
4468}
4469
d3bd7413
DB
4470static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
4471 const struct bpf_insn *insn)
4472{
4473 return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K;
4474}
4475
4476static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
4477 u32 alu_state, u32 alu_limit)
4478{
4479 /* If we arrived here from different branches with different
4480 * state or limits to sanitize, then this won't work.
4481 */
4482 if (aux->alu_state &&
4483 (aux->alu_state != alu_state ||
4484 aux->alu_limit != alu_limit))
4485 return -EACCES;
4486
4487 /* Corresponding fixup done in fixup_bpf_calls(). */
4488 aux->alu_state = alu_state;
4489 aux->alu_limit = alu_limit;
4490 return 0;
4491}
4492
4493static int sanitize_val_alu(struct bpf_verifier_env *env,
4494 struct bpf_insn *insn)
4495{
4496 struct bpf_insn_aux_data *aux = cur_aux(env);
4497
4498 if (can_skip_alu_sanitation(env, insn))
4499 return 0;
4500
4501 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
4502}
4503
979d63d5
DB
4504static int sanitize_ptr_alu(struct bpf_verifier_env *env,
4505 struct bpf_insn *insn,
4506 const struct bpf_reg_state *ptr_reg,
4507 struct bpf_reg_state *dst_reg,
4508 bool off_is_neg)
4509{
4510 struct bpf_verifier_state *vstate = env->cur_state;
4511 struct bpf_insn_aux_data *aux = cur_aux(env);
4512 bool ptr_is_dst_reg = ptr_reg == dst_reg;
4513 u8 opcode = BPF_OP(insn->code);
4514 u32 alu_state, alu_limit;
4515 struct bpf_reg_state tmp;
4516 bool ret;
4517
d3bd7413 4518 if (can_skip_alu_sanitation(env, insn))
979d63d5
DB
4519 return 0;
4520
4521 /* We already marked aux for masking from non-speculative
4522 * paths, thus we got here in the first place. We only care
4523 * to explore bad access from here.
4524 */
4525 if (vstate->speculative)
4526 goto do_sim;
4527
4528 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
4529 alu_state |= ptr_is_dst_reg ?
4530 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
4531
4532 if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
4533 return 0;
d3bd7413 4534 if (update_alu_sanitation_state(aux, alu_state, alu_limit))
979d63d5 4535 return -EACCES;
979d63d5
DB
4536do_sim:
4537 /* Simulate and find potential out-of-bounds access under
4538 * speculative execution from truncation as a result of
4539 * masking when off was not within expected range. If off
4540 * sits in dst, then we temporarily need to move ptr there
4541 * to simulate dst (== 0) +/-= ptr. Needed, for example,
4542 * for cases where we use K-based arithmetic in one direction
4543 * and truncated reg-based in the other in order to explore
4544 * bad access.
4545 */
4546 if (!ptr_is_dst_reg) {
4547 tmp = *dst_reg;
4548 *dst_reg = *ptr_reg;
4549 }
4550 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
0803278b 4551 if (!ptr_is_dst_reg && ret)
979d63d5
DB
4552 *dst_reg = tmp;
4553 return !ret ? -EFAULT : 0;
4554}
4555
f1174f77 4556/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
f1174f77
EC
4557 * Caller should also handle BPF_MOV case separately.
4558 * If we return -EACCES, caller may want to try again treating pointer as a
4559 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks.
4560 */
4561static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
4562 struct bpf_insn *insn,
4563 const struct bpf_reg_state *ptr_reg,
4564 const struct bpf_reg_state *off_reg)
969bf05e 4565{
f4d7e40a
AS
4566 struct bpf_verifier_state *vstate = env->cur_state;
4567 struct bpf_func_state *state = vstate->frame[vstate->curframe];
4568 struct bpf_reg_state *regs = state->regs, *dst_reg;
f1174f77 4569 bool known = tnum_is_const(off_reg->var_off);
b03c9f9f
EC
4570 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value,
4571 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
4572 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
4573 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
9d7eceed 4574 u32 dst = insn->dst_reg, src = insn->src_reg;
969bf05e 4575 u8 opcode = BPF_OP(insn->code);
979d63d5 4576 int ret;
969bf05e 4577
f1174f77 4578 dst_reg = &regs[dst];
969bf05e 4579
6f16101e
DB
4580 if ((known && (smin_val != smax_val || umin_val != umax_val)) ||
4581 smin_val > smax_val || umin_val > umax_val) {
4582 /* Taint dst register if offset had invalid bounds derived from
4583 * e.g. dead branches.
4584 */
4585 __mark_reg_unknown(dst_reg);
4586 return 0;
f1174f77
EC
4587 }
4588
4589 if (BPF_CLASS(insn->code) != BPF_ALU64) {
4590 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
82abbf8d
AS
4591 verbose(env,
4592 "R%d 32-bit pointer arithmetic prohibited\n",
4593 dst);
f1174f77 4594 return -EACCES;
969bf05e
AS
4595 }
4596
aad2eeaf
JS
4597 switch (ptr_reg->type) {
4598 case PTR_TO_MAP_VALUE_OR_NULL:
4599 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
4600 dst, reg_type_str[ptr_reg->type]);
f1174f77 4601 return -EACCES;
aad2eeaf
JS
4602 case CONST_PTR_TO_MAP:
4603 case PTR_TO_PACKET_END:
c64b7983
JS
4604 case PTR_TO_SOCKET:
4605 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
4606 case PTR_TO_SOCK_COMMON:
4607 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
4608 case PTR_TO_TCP_SOCK:
4609 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 4610 case PTR_TO_XDP_SOCK:
aad2eeaf
JS
4611 verbose(env, "R%d pointer arithmetic on %s prohibited\n",
4612 dst, reg_type_str[ptr_reg->type]);
f1174f77 4613 return -EACCES;
9d7eceed
DB
4614 case PTR_TO_MAP_VALUE:
4615 if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
4616 verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
4617 off_reg == dst_reg ? dst : src);
4618 return -EACCES;
4619 }
4620 /* fall-through */
aad2eeaf
JS
4621 default:
4622 break;
f1174f77
EC
4623 }
4624
4625 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
4626 * The id may be overwritten later if we create a new variable offset.
969bf05e 4627 */
f1174f77
EC
4628 dst_reg->type = ptr_reg->type;
4629 dst_reg->id = ptr_reg->id;
969bf05e 4630
bb7f0f98
AS
4631 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
4632 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
4633 return -EINVAL;
4634
f1174f77
EC
4635 switch (opcode) {
4636 case BPF_ADD:
979d63d5
DB
4637 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4638 if (ret < 0) {
4639 verbose(env, "R%d tried to add from different maps or paths\n", dst);
4640 return ret;
4641 }
f1174f77
EC
4642 /* We can take a fixed offset as long as it doesn't overflow
4643 * the s32 'off' field
969bf05e 4644 */
b03c9f9f
EC
4645 if (known && (ptr_reg->off + smin_val ==
4646 (s64)(s32)(ptr_reg->off + smin_val))) {
f1174f77 4647 /* pointer += K. Accumulate it into fixed offset */
b03c9f9f
EC
4648 dst_reg->smin_value = smin_ptr;
4649 dst_reg->smax_value = smax_ptr;
4650 dst_reg->umin_value = umin_ptr;
4651 dst_reg->umax_value = umax_ptr;
f1174f77 4652 dst_reg->var_off = ptr_reg->var_off;
b03c9f9f 4653 dst_reg->off = ptr_reg->off + smin_val;
0962590e 4654 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4655 break;
4656 }
f1174f77
EC
4657 /* A new variable offset is created. Note that off_reg->off
4658 * == 0, since it's a scalar.
4659 * dst_reg gets the pointer type and since some positive
4660 * integer value was added to the pointer, give it a new 'id'
4661 * if it's a PTR_TO_PACKET.
4662 * this creates a new 'base' pointer, off_reg (variable) gets
4663 * added into the variable offset, and we copy the fixed offset
4664 * from ptr_reg.
969bf05e 4665 */
b03c9f9f
EC
4666 if (signed_add_overflows(smin_ptr, smin_val) ||
4667 signed_add_overflows(smax_ptr, smax_val)) {
4668 dst_reg->smin_value = S64_MIN;
4669 dst_reg->smax_value = S64_MAX;
4670 } else {
4671 dst_reg->smin_value = smin_ptr + smin_val;
4672 dst_reg->smax_value = smax_ptr + smax_val;
4673 }
4674 if (umin_ptr + umin_val < umin_ptr ||
4675 umax_ptr + umax_val < umax_ptr) {
4676 dst_reg->umin_value = 0;
4677 dst_reg->umax_value = U64_MAX;
4678 } else {
4679 dst_reg->umin_value = umin_ptr + umin_val;
4680 dst_reg->umax_value = umax_ptr + umax_val;
4681 }
f1174f77
EC
4682 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
4683 dst_reg->off = ptr_reg->off;
0962590e 4684 dst_reg->raw = ptr_reg->raw;
de8f3a83 4685 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4686 dst_reg->id = ++env->id_gen;
4687 /* something was added to pkt_ptr, set range to zero */
0962590e 4688 dst_reg->raw = 0;
f1174f77
EC
4689 }
4690 break;
4691 case BPF_SUB:
979d63d5
DB
4692 ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
4693 if (ret < 0) {
4694 verbose(env, "R%d tried to sub from different maps or paths\n", dst);
4695 return ret;
4696 }
f1174f77
EC
4697 if (dst_reg == off_reg) {
4698 /* scalar -= pointer. Creates an unknown scalar */
82abbf8d
AS
4699 verbose(env, "R%d tried to subtract pointer from scalar\n",
4700 dst);
f1174f77
EC
4701 return -EACCES;
4702 }
4703 /* We don't allow subtraction from FP, because (according to
4704 * test_verifier.c test "invalid fp arithmetic", JITs might not
4705 * be able to deal with it.
969bf05e 4706 */
f1174f77 4707 if (ptr_reg->type == PTR_TO_STACK) {
82abbf8d
AS
4708 verbose(env, "R%d subtraction from stack pointer prohibited\n",
4709 dst);
f1174f77
EC
4710 return -EACCES;
4711 }
b03c9f9f
EC
4712 if (known && (ptr_reg->off - smin_val ==
4713 (s64)(s32)(ptr_reg->off - smin_val))) {
f1174f77 4714 /* pointer -= K. Subtract it from fixed offset */
b03c9f9f
EC
4715 dst_reg->smin_value = smin_ptr;
4716 dst_reg->smax_value = smax_ptr;
4717 dst_reg->umin_value = umin_ptr;
4718 dst_reg->umax_value = umax_ptr;
f1174f77
EC
4719 dst_reg->var_off = ptr_reg->var_off;
4720 dst_reg->id = ptr_reg->id;
b03c9f9f 4721 dst_reg->off = ptr_reg->off - smin_val;
0962590e 4722 dst_reg->raw = ptr_reg->raw;
f1174f77
EC
4723 break;
4724 }
f1174f77
EC
4725 /* A new variable offset is created. If the subtrahend is known
4726 * nonnegative, then any reg->range we had before is still good.
969bf05e 4727 */
b03c9f9f
EC
4728 if (signed_sub_overflows(smin_ptr, smax_val) ||
4729 signed_sub_overflows(smax_ptr, smin_val)) {
4730 /* Overflow possible, we know nothing */
4731 dst_reg->smin_value = S64_MIN;
4732 dst_reg->smax_value = S64_MAX;
4733 } else {
4734 dst_reg->smin_value = smin_ptr - smax_val;
4735 dst_reg->smax_value = smax_ptr - smin_val;
4736 }
4737 if (umin_ptr < umax_val) {
4738 /* Overflow possible, we know nothing */
4739 dst_reg->umin_value = 0;
4740 dst_reg->umax_value = U64_MAX;
4741 } else {
4742 /* Cannot overflow (as long as bounds are consistent) */
4743 dst_reg->umin_value = umin_ptr - umax_val;
4744 dst_reg->umax_value = umax_ptr - umin_val;
4745 }
f1174f77
EC
4746 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
4747 dst_reg->off = ptr_reg->off;
0962590e 4748 dst_reg->raw = ptr_reg->raw;
de8f3a83 4749 if (reg_is_pkt_pointer(ptr_reg)) {
f1174f77
EC
4750 dst_reg->id = ++env->id_gen;
4751 /* something was added to pkt_ptr, set range to zero */
b03c9f9f 4752 if (smin_val < 0)
0962590e 4753 dst_reg->raw = 0;
43188702 4754 }
f1174f77
EC
4755 break;
4756 case BPF_AND:
4757 case BPF_OR:
4758 case BPF_XOR:
82abbf8d
AS
4759 /* bitwise ops on pointers are troublesome, prohibit. */
4760 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
4761 dst, bpf_alu_string[opcode >> 4]);
f1174f77
EC
4762 return -EACCES;
4763 default:
4764 /* other operators (e.g. MUL,LSH) produce non-pointer results */
82abbf8d
AS
4765 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
4766 dst, bpf_alu_string[opcode >> 4]);
f1174f77 4767 return -EACCES;
43188702
JF
4768 }
4769
bb7f0f98
AS
4770 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
4771 return -EINVAL;
4772
b03c9f9f
EC
4773 __update_reg_bounds(dst_reg);
4774 __reg_deduce_bounds(dst_reg);
4775 __reg_bound_offset(dst_reg);
0d6303db
DB
4776
4777 /* For unprivileged we require that resulting offset must be in bounds
4778 * in order to be able to sanitize access later on.
4779 */
e4298d25
DB
4780 if (!env->allow_ptr_leaks) {
4781 if (dst_reg->type == PTR_TO_MAP_VALUE &&
4782 check_map_access(env, dst, dst_reg->off, 1, false)) {
4783 verbose(env, "R%d pointer arithmetic of map value goes out of range, "
4784 "prohibited for !root\n", dst);
4785 return -EACCES;
4786 } else if (dst_reg->type == PTR_TO_STACK &&
4787 check_stack_access(env, dst_reg, dst_reg->off +
4788 dst_reg->var_off.value, 1)) {
4789 verbose(env, "R%d stack pointer arithmetic goes out of range, "
4790 "prohibited for !root\n", dst);
4791 return -EACCES;
4792 }
0d6303db
DB
4793 }
4794
43188702
JF
4795 return 0;
4796}
4797
468f6eaf
JH
4798/* WARNING: This function does calculations on 64-bit values, but the actual
4799 * execution may occur on 32-bit values. Therefore, things like bitshifts
4800 * need extra checks in the 32-bit case.
4801 */
f1174f77
EC
4802static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
4803 struct bpf_insn *insn,
4804 struct bpf_reg_state *dst_reg,
4805 struct bpf_reg_state src_reg)
969bf05e 4806{
638f5b90 4807 struct bpf_reg_state *regs = cur_regs(env);
48461135 4808 u8 opcode = BPF_OP(insn->code);
f1174f77 4809 bool src_known, dst_known;
b03c9f9f
EC
4810 s64 smin_val, smax_val;
4811 u64 umin_val, umax_val;
468f6eaf 4812 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
d3bd7413
DB
4813 u32 dst = insn->dst_reg;
4814 int ret;
48461135 4815
b799207e
JH
4816 if (insn_bitness == 32) {
4817 /* Relevant for 32-bit RSH: Information can propagate towards
4818 * LSB, so it isn't sufficient to only truncate the output to
4819 * 32 bits.
4820 */
4821 coerce_reg_to_size(dst_reg, 4);
4822 coerce_reg_to_size(&src_reg, 4);
4823 }
4824
b03c9f9f
EC
4825 smin_val = src_reg.smin_value;
4826 smax_val = src_reg.smax_value;
4827 umin_val = src_reg.umin_value;
4828 umax_val = src_reg.umax_value;
f1174f77
EC
4829 src_known = tnum_is_const(src_reg.var_off);
4830 dst_known = tnum_is_const(dst_reg->var_off);
f23cc643 4831
6f16101e
DB
4832 if ((src_known && (smin_val != smax_val || umin_val != umax_val)) ||
4833 smin_val > smax_val || umin_val > umax_val) {
4834 /* Taint dst register if offset had invalid bounds derived from
4835 * e.g. dead branches.
4836 */
4837 __mark_reg_unknown(dst_reg);
4838 return 0;
4839 }
4840
bb7f0f98
AS
4841 if (!src_known &&
4842 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
4843 __mark_reg_unknown(dst_reg);
4844 return 0;
4845 }
4846
48461135
JB
4847 switch (opcode) {
4848 case BPF_ADD:
d3bd7413
DB
4849 ret = sanitize_val_alu(env, insn);
4850 if (ret < 0) {
4851 verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
4852 return ret;
4853 }
b03c9f9f
EC
4854 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
4855 signed_add_overflows(dst_reg->smax_value, smax_val)) {
4856 dst_reg->smin_value = S64_MIN;
4857 dst_reg->smax_value = S64_MAX;
4858 } else {
4859 dst_reg->smin_value += smin_val;
4860 dst_reg->smax_value += smax_val;
4861 }
4862 if (dst_reg->umin_value + umin_val < umin_val ||
4863 dst_reg->umax_value + umax_val < umax_val) {
4864 dst_reg->umin_value = 0;
4865 dst_reg->umax_value = U64_MAX;
4866 } else {
4867 dst_reg->umin_value += umin_val;
4868 dst_reg->umax_value += umax_val;
4869 }
f1174f77 4870 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
48461135
JB
4871 break;
4872 case BPF_SUB:
d3bd7413
DB
4873 ret = sanitize_val_alu(env, insn);
4874 if (ret < 0) {
4875 verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
4876 return ret;
4877 }
b03c9f9f
EC
4878 if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
4879 signed_sub_overflows(dst_reg->smax_value, smin_val)) {
4880 /* Overflow possible, we know nothing */
4881 dst_reg->smin_value = S64_MIN;
4882 dst_reg->smax_value = S64_MAX;
4883 } else {
4884 dst_reg->smin_value -= smax_val;
4885 dst_reg->smax_value -= smin_val;
4886 }
4887 if (dst_reg->umin_value < umax_val) {
4888 /* Overflow possible, we know nothing */
4889 dst_reg->umin_value = 0;
4890 dst_reg->umax_value = U64_MAX;
4891 } else {
4892 /* Cannot overflow (as long as bounds are consistent) */
4893 dst_reg->umin_value -= umax_val;
4894 dst_reg->umax_value -= umin_val;
4895 }
f1174f77 4896 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
48461135
JB
4897 break;
4898 case BPF_MUL:
b03c9f9f
EC
4899 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
4900 if (smin_val < 0 || dst_reg->smin_value < 0) {
f1174f77 4901 /* Ain't nobody got time to multiply that sign */
b03c9f9f
EC
4902 __mark_reg_unbounded(dst_reg);
4903 __update_reg_bounds(dst_reg);
f1174f77
EC
4904 break;
4905 }
b03c9f9f
EC
4906 /* Both values are positive, so we can work with unsigned and
4907 * copy the result to signed (unless it exceeds S64_MAX).
f1174f77 4908 */
b03c9f9f
EC
4909 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) {
4910 /* Potential overflow, we know nothing */
4911 __mark_reg_unbounded(dst_reg);
4912 /* (except what we can learn from the var_off) */
4913 __update_reg_bounds(dst_reg);
4914 break;
4915 }
4916 dst_reg->umin_value *= umin_val;
4917 dst_reg->umax_value *= umax_val;
4918 if (dst_reg->umax_value > S64_MAX) {
4919 /* Overflow possible, we know nothing */
4920 dst_reg->smin_value = S64_MIN;
4921 dst_reg->smax_value = S64_MAX;
4922 } else {
4923 dst_reg->smin_value = dst_reg->umin_value;
4924 dst_reg->smax_value = dst_reg->umax_value;
4925 }
48461135
JB
4926 break;
4927 case BPF_AND:
f1174f77 4928 if (src_known && dst_known) {
b03c9f9f
EC
4929 __mark_reg_known(dst_reg, dst_reg->var_off.value &
4930 src_reg.var_off.value);
f1174f77
EC
4931 break;
4932 }
b03c9f9f
EC
4933 /* We get our minimum from the var_off, since that's inherently
4934 * bitwise. Our maximum is the minimum of the operands' maxima.
f23cc643 4935 */
f1174f77 4936 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
4937 dst_reg->umin_value = dst_reg->var_off.value;
4938 dst_reg->umax_value = min(dst_reg->umax_value, umax_val);
4939 if (dst_reg->smin_value < 0 || smin_val < 0) {
4940 /* Lose signed bounds when ANDing negative numbers,
4941 * ain't nobody got time for that.
4942 */
4943 dst_reg->smin_value = S64_MIN;
4944 dst_reg->smax_value = S64_MAX;
4945 } else {
4946 /* ANDing two positives gives a positive, so safe to
4947 * cast result into s64.
4948 */
4949 dst_reg->smin_value = dst_reg->umin_value;
4950 dst_reg->smax_value = dst_reg->umax_value;
4951 }
4952 /* We may learn something more from the var_off */
4953 __update_reg_bounds(dst_reg);
f1174f77
EC
4954 break;
4955 case BPF_OR:
4956 if (src_known && dst_known) {
b03c9f9f
EC
4957 __mark_reg_known(dst_reg, dst_reg->var_off.value |
4958 src_reg.var_off.value);
f1174f77
EC
4959 break;
4960 }
b03c9f9f
EC
4961 /* We get our maximum from the var_off, and our minimum is the
4962 * maximum of the operands' minima
f1174f77
EC
4963 */
4964 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off);
b03c9f9f
EC
4965 dst_reg->umin_value = max(dst_reg->umin_value, umin_val);
4966 dst_reg->umax_value = dst_reg->var_off.value |
4967 dst_reg->var_off.mask;
4968 if (dst_reg->smin_value < 0 || smin_val < 0) {
4969 /* Lose signed bounds when ORing negative numbers,
4970 * ain't nobody got time for that.
4971 */
4972 dst_reg->smin_value = S64_MIN;
4973 dst_reg->smax_value = S64_MAX;
f1174f77 4974 } else {
b03c9f9f
EC
4975 /* ORing two positives gives a positive, so safe to
4976 * cast result into s64.
4977 */
4978 dst_reg->smin_value = dst_reg->umin_value;
4979 dst_reg->smax_value = dst_reg->umax_value;
f1174f77 4980 }
b03c9f9f
EC
4981 /* We may learn something more from the var_off */
4982 __update_reg_bounds(dst_reg);
48461135
JB
4983 break;
4984 case BPF_LSH:
468f6eaf
JH
4985 if (umax_val >= insn_bitness) {
4986 /* Shifts greater than 31 or 63 are undefined.
4987 * This includes shifts by a negative number.
b03c9f9f 4988 */
61bd5218 4989 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
4990 break;
4991 }
b03c9f9f
EC
4992 /* We lose all sign bit information (except what we can pick
4993 * up from var_off)
48461135 4994 */
b03c9f9f
EC
4995 dst_reg->smin_value = S64_MIN;
4996 dst_reg->smax_value = S64_MAX;
4997 /* If we might shift our top bit out, then we know nothing */
4998 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) {
4999 dst_reg->umin_value = 0;
5000 dst_reg->umax_value = U64_MAX;
d1174416 5001 } else {
b03c9f9f
EC
5002 dst_reg->umin_value <<= umin_val;
5003 dst_reg->umax_value <<= umax_val;
d1174416 5004 }
afbe1a5b 5005 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5006 /* We may learn something more from the var_off */
5007 __update_reg_bounds(dst_reg);
48461135
JB
5008 break;
5009 case BPF_RSH:
468f6eaf
JH
5010 if (umax_val >= insn_bitness) {
5011 /* Shifts greater than 31 or 63 are undefined.
5012 * This includes shifts by a negative number.
b03c9f9f 5013 */
61bd5218 5014 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77
EC
5015 break;
5016 }
4374f256
EC
5017 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
5018 * be negative, then either:
5019 * 1) src_reg might be zero, so the sign bit of the result is
5020 * unknown, so we lose our signed bounds
5021 * 2) it's known negative, thus the unsigned bounds capture the
5022 * signed bounds
5023 * 3) the signed bounds cross zero, so they tell us nothing
5024 * about the result
5025 * If the value in dst_reg is known nonnegative, then again the
5026 * unsigned bounts capture the signed bounds.
5027 * Thus, in all cases it suffices to blow away our signed bounds
5028 * and rely on inferring new ones from the unsigned bounds and
5029 * var_off of the result.
5030 */
5031 dst_reg->smin_value = S64_MIN;
5032 dst_reg->smax_value = S64_MAX;
afbe1a5b 5033 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
b03c9f9f
EC
5034 dst_reg->umin_value >>= umax_val;
5035 dst_reg->umax_value >>= umin_val;
5036 /* We may learn something more from the var_off */
5037 __update_reg_bounds(dst_reg);
48461135 5038 break;
9cbe1f5a
YS
5039 case BPF_ARSH:
5040 if (umax_val >= insn_bitness) {
5041 /* Shifts greater than 31 or 63 are undefined.
5042 * This includes shifts by a negative number.
5043 */
5044 mark_reg_unknown(env, regs, insn->dst_reg);
5045 break;
5046 }
5047
5048 /* Upon reaching here, src_known is true and
5049 * umax_val is equal to umin_val.
5050 */
5051 dst_reg->smin_value >>= umin_val;
5052 dst_reg->smax_value >>= umin_val;
5053 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
5054
5055 /* blow away the dst_reg umin_value/umax_value and rely on
5056 * dst_reg var_off to refine the result.
5057 */
5058 dst_reg->umin_value = 0;
5059 dst_reg->umax_value = U64_MAX;
5060 __update_reg_bounds(dst_reg);
5061 break;
48461135 5062 default:
61bd5218 5063 mark_reg_unknown(env, regs, insn->dst_reg);
48461135
JB
5064 break;
5065 }
5066
468f6eaf
JH
5067 if (BPF_CLASS(insn->code) != BPF_ALU64) {
5068 /* 32-bit ALU ops are (32,32)->32 */
5069 coerce_reg_to_size(dst_reg, 4);
468f6eaf
JH
5070 }
5071
b03c9f9f
EC
5072 __reg_deduce_bounds(dst_reg);
5073 __reg_bound_offset(dst_reg);
f1174f77
EC
5074 return 0;
5075}
5076
5077/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max
5078 * and var_off.
5079 */
5080static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
5081 struct bpf_insn *insn)
5082{
f4d7e40a
AS
5083 struct bpf_verifier_state *vstate = env->cur_state;
5084 struct bpf_func_state *state = vstate->frame[vstate->curframe];
5085 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg;
f1174f77
EC
5086 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
5087 u8 opcode = BPF_OP(insn->code);
b5dc0163 5088 int err;
f1174f77
EC
5089
5090 dst_reg = &regs[insn->dst_reg];
f1174f77
EC
5091 src_reg = NULL;
5092 if (dst_reg->type != SCALAR_VALUE)
5093 ptr_reg = dst_reg;
5094 if (BPF_SRC(insn->code) == BPF_X) {
5095 src_reg = &regs[insn->src_reg];
f1174f77
EC
5096 if (src_reg->type != SCALAR_VALUE) {
5097 if (dst_reg->type != SCALAR_VALUE) {
5098 /* Combining two pointers by any ALU op yields
82abbf8d
AS
5099 * an arbitrary scalar. Disallow all math except
5100 * pointer subtraction
f1174f77 5101 */
dd066823 5102 if (opcode == BPF_SUB && env->allow_ptr_leaks) {
82abbf8d
AS
5103 mark_reg_unknown(env, regs, insn->dst_reg);
5104 return 0;
f1174f77 5105 }
82abbf8d
AS
5106 verbose(env, "R%d pointer %s pointer prohibited\n",
5107 insn->dst_reg,
5108 bpf_alu_string[opcode >> 4]);
5109 return -EACCES;
f1174f77
EC
5110 } else {
5111 /* scalar += pointer
5112 * This is legal, but we have to reverse our
5113 * src/dest handling in computing the range
5114 */
b5dc0163
AS
5115 err = mark_chain_precision(env, insn->dst_reg);
5116 if (err)
5117 return err;
82abbf8d
AS
5118 return adjust_ptr_min_max_vals(env, insn,
5119 src_reg, dst_reg);
f1174f77
EC
5120 }
5121 } else if (ptr_reg) {
5122 /* pointer += scalar */
b5dc0163
AS
5123 err = mark_chain_precision(env, insn->src_reg);
5124 if (err)
5125 return err;
82abbf8d
AS
5126 return adjust_ptr_min_max_vals(env, insn,
5127 dst_reg, src_reg);
f1174f77
EC
5128 }
5129 } else {
5130 /* Pretend the src is a reg with a known value, since we only
5131 * need to be able to read from this state.
5132 */
5133 off_reg.type = SCALAR_VALUE;
b03c9f9f 5134 __mark_reg_known(&off_reg, insn->imm);
f1174f77 5135 src_reg = &off_reg;
82abbf8d
AS
5136 if (ptr_reg) /* pointer += K */
5137 return adjust_ptr_min_max_vals(env, insn,
5138 ptr_reg, src_reg);
f1174f77
EC
5139 }
5140
5141 /* Got here implies adding two SCALAR_VALUEs */
5142 if (WARN_ON_ONCE(ptr_reg)) {
f4d7e40a 5143 print_verifier_state(env, state);
61bd5218 5144 verbose(env, "verifier internal error: unexpected ptr_reg\n");
f1174f77
EC
5145 return -EINVAL;
5146 }
5147 if (WARN_ON(!src_reg)) {
f4d7e40a 5148 print_verifier_state(env, state);
61bd5218 5149 verbose(env, "verifier internal error: no src_reg\n");
f1174f77
EC
5150 return -EINVAL;
5151 }
5152 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg);
48461135
JB
5153}
5154
17a52670 5155/* check validity of 32-bit and 64-bit arithmetic operations */
58e2af8b 5156static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 5157{
638f5b90 5158 struct bpf_reg_state *regs = cur_regs(env);
17a52670
AS
5159 u8 opcode = BPF_OP(insn->code);
5160 int err;
5161
5162 if (opcode == BPF_END || opcode == BPF_NEG) {
5163 if (opcode == BPF_NEG) {
5164 if (BPF_SRC(insn->code) != 0 ||
5165 insn->src_reg != BPF_REG_0 ||
5166 insn->off != 0 || insn->imm != 0) {
61bd5218 5167 verbose(env, "BPF_NEG uses reserved fields\n");
17a52670
AS
5168 return -EINVAL;
5169 }
5170 } else {
5171 if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
e67b8a68
EC
5172 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
5173 BPF_CLASS(insn->code) == BPF_ALU64) {
61bd5218 5174 verbose(env, "BPF_END uses reserved fields\n");
17a52670
AS
5175 return -EINVAL;
5176 }
5177 }
5178
5179 /* check src operand */
dc503a8a 5180 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5181 if (err)
5182 return err;
5183
1be7f75d 5184 if (is_pointer_value(env, insn->dst_reg)) {
61bd5218 5185 verbose(env, "R%d pointer arithmetic prohibited\n",
1be7f75d
AS
5186 insn->dst_reg);
5187 return -EACCES;
5188 }
5189
17a52670 5190 /* check dest operand */
dc503a8a 5191 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
5192 if (err)
5193 return err;
5194
5195 } else if (opcode == BPF_MOV) {
5196
5197 if (BPF_SRC(insn->code) == BPF_X) {
5198 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5199 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5200 return -EINVAL;
5201 }
5202
5203 /* check src operand */
dc503a8a 5204 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5205 if (err)
5206 return err;
5207 } else {
5208 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5209 verbose(env, "BPF_MOV uses reserved fields\n");
17a52670
AS
5210 return -EINVAL;
5211 }
5212 }
5213
fbeb1603
AF
5214 /* check dest operand, mark as required later */
5215 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
5216 if (err)
5217 return err;
5218
5219 if (BPF_SRC(insn->code) == BPF_X) {
e434b8cd
JW
5220 struct bpf_reg_state *src_reg = regs + insn->src_reg;
5221 struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
5222
17a52670
AS
5223 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5224 /* case: R1 = R2
5225 * copy register state to dest reg
5226 */
e434b8cd
JW
5227 *dst_reg = *src_reg;
5228 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5229 dst_reg->subreg_def = DEF_NOT_SUBREG;
17a52670 5230 } else {
f1174f77 5231 /* R1 = (u32) R2 */
1be7f75d 5232 if (is_pointer_value(env, insn->src_reg)) {
61bd5218
JK
5233 verbose(env,
5234 "R%d partial copy of pointer\n",
1be7f75d
AS
5235 insn->src_reg);
5236 return -EACCES;
e434b8cd
JW
5237 } else if (src_reg->type == SCALAR_VALUE) {
5238 *dst_reg = *src_reg;
5239 dst_reg->live |= REG_LIVE_WRITTEN;
5327ed3d 5240 dst_reg->subreg_def = env->insn_idx + 1;
e434b8cd
JW
5241 } else {
5242 mark_reg_unknown(env, regs,
5243 insn->dst_reg);
1be7f75d 5244 }
e434b8cd 5245 coerce_reg_to_size(dst_reg, 4);
17a52670
AS
5246 }
5247 } else {
5248 /* case: R = imm
5249 * remember the value we stored into this reg
5250 */
fbeb1603
AF
5251 /* clear any state __mark_reg_known doesn't set */
5252 mark_reg_unknown(env, regs, insn->dst_reg);
f1174f77 5253 regs[insn->dst_reg].type = SCALAR_VALUE;
95a762e2
JH
5254 if (BPF_CLASS(insn->code) == BPF_ALU64) {
5255 __mark_reg_known(regs + insn->dst_reg,
5256 insn->imm);
5257 } else {
5258 __mark_reg_known(regs + insn->dst_reg,
5259 (u32)insn->imm);
5260 }
17a52670
AS
5261 }
5262
5263 } else if (opcode > BPF_END) {
61bd5218 5264 verbose(env, "invalid BPF_ALU opcode %x\n", opcode);
17a52670
AS
5265 return -EINVAL;
5266
5267 } else { /* all other ALU ops: and, sub, xor, add, ... */
5268
17a52670
AS
5269 if (BPF_SRC(insn->code) == BPF_X) {
5270 if (insn->imm != 0 || insn->off != 0) {
61bd5218 5271 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5272 return -EINVAL;
5273 }
5274 /* check src1 operand */
dc503a8a 5275 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
5276 if (err)
5277 return err;
5278 } else {
5279 if (insn->src_reg != BPF_REG_0 || insn->off != 0) {
61bd5218 5280 verbose(env, "BPF_ALU uses reserved fields\n");
17a52670
AS
5281 return -EINVAL;
5282 }
5283 }
5284
5285 /* check src2 operand */
dc503a8a 5286 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
5287 if (err)
5288 return err;
5289
5290 if ((opcode == BPF_MOD || opcode == BPF_DIV) &&
5291 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) {
61bd5218 5292 verbose(env, "div by zero\n");
17a52670
AS
5293 return -EINVAL;
5294 }
5295
229394e8
RV
5296 if ((opcode == BPF_LSH || opcode == BPF_RSH ||
5297 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
5298 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
5299
5300 if (insn->imm < 0 || insn->imm >= size) {
61bd5218 5301 verbose(env, "invalid shift %d\n", insn->imm);
229394e8
RV
5302 return -EINVAL;
5303 }
5304 }
5305
1a0dc1ac 5306 /* check dest operand */
dc503a8a 5307 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
1a0dc1ac
AS
5308 if (err)
5309 return err;
5310
f1174f77 5311 return adjust_reg_min_max_vals(env, insn);
17a52670
AS
5312 }
5313
5314 return 0;
5315}
5316
c6a9efa1
PC
5317static void __find_good_pkt_pointers(struct bpf_func_state *state,
5318 struct bpf_reg_state *dst_reg,
5319 enum bpf_reg_type type, u16 new_range)
5320{
5321 struct bpf_reg_state *reg;
5322 int i;
5323
5324 for (i = 0; i < MAX_BPF_REG; i++) {
5325 reg = &state->regs[i];
5326 if (reg->type == type && reg->id == dst_reg->id)
5327 /* keep the maximum range already checked */
5328 reg->range = max(reg->range, new_range);
5329 }
5330
5331 bpf_for_each_spilled_reg(i, state, reg) {
5332 if (!reg)
5333 continue;
5334 if (reg->type == type && reg->id == dst_reg->id)
5335 reg->range = max(reg->range, new_range);
5336 }
5337}
5338
f4d7e40a 5339static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
de8f3a83 5340 struct bpf_reg_state *dst_reg,
f8ddadc4 5341 enum bpf_reg_type type,
fb2a311a 5342 bool range_right_open)
969bf05e 5343{
fb2a311a 5344 u16 new_range;
c6a9efa1 5345 int i;
2d2be8ca 5346
fb2a311a
DB
5347 if (dst_reg->off < 0 ||
5348 (dst_reg->off == 0 && range_right_open))
f1174f77
EC
5349 /* This doesn't give us any range */
5350 return;
5351
b03c9f9f
EC
5352 if (dst_reg->umax_value > MAX_PACKET_OFF ||
5353 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF)
f1174f77
EC
5354 /* Risk of overflow. For instance, ptr + (1<<63) may be less
5355 * than pkt_end, but that's because it's also less than pkt.
5356 */
5357 return;
5358
fb2a311a
DB
5359 new_range = dst_reg->off;
5360 if (range_right_open)
5361 new_range--;
5362
5363 /* Examples for register markings:
2d2be8ca 5364 *
fb2a311a 5365 * pkt_data in dst register:
2d2be8ca
DB
5366 *
5367 * r2 = r3;
5368 * r2 += 8;
5369 * if (r2 > pkt_end) goto <handle exception>
5370 * <access okay>
5371 *
b4e432f1
DB
5372 * r2 = r3;
5373 * r2 += 8;
5374 * if (r2 < pkt_end) goto <access okay>
5375 * <handle exception>
5376 *
2d2be8ca
DB
5377 * Where:
5378 * r2 == dst_reg, pkt_end == src_reg
5379 * r2=pkt(id=n,off=8,r=0)
5380 * r3=pkt(id=n,off=0,r=0)
5381 *
fb2a311a 5382 * pkt_data in src register:
2d2be8ca
DB
5383 *
5384 * r2 = r3;
5385 * r2 += 8;
5386 * if (pkt_end >= r2) goto <access okay>
5387 * <handle exception>
5388 *
b4e432f1
DB
5389 * r2 = r3;
5390 * r2 += 8;
5391 * if (pkt_end <= r2) goto <handle exception>
5392 * <access okay>
5393 *
2d2be8ca
DB
5394 * Where:
5395 * pkt_end == dst_reg, r2 == src_reg
5396 * r2=pkt(id=n,off=8,r=0)
5397 * r3=pkt(id=n,off=0,r=0)
5398 *
5399 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
fb2a311a
DB
5400 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
5401 * and [r3, r3 + 8-1) respectively is safe to access depending on
5402 * the check.
969bf05e 5403 */
2d2be8ca 5404
f1174f77
EC
5405 /* If our ids match, then we must have the same max_value. And we
5406 * don't care about the other reg's fixed offset, since if it's too big
5407 * the range won't allow anything.
5408 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16.
5409 */
c6a9efa1
PC
5410 for (i = 0; i <= vstate->curframe; i++)
5411 __find_good_pkt_pointers(vstate->frame[i], dst_reg, type,
5412 new_range);
969bf05e
AS
5413}
5414
4f7b3e82
AS
5415/* compute branch direction of the expression "if (reg opcode val) goto target;"
5416 * and return:
5417 * 1 - branch will be taken and "goto target" will be executed
5418 * 0 - branch will not be taken and fall-through to next insn
5419 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
5420 */
092ed096
JW
5421static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
5422 bool is_jmp32)
4f7b3e82 5423{
092ed096 5424 struct bpf_reg_state reg_lo;
a72dafaf
JW
5425 s64 sval;
5426
4f7b3e82
AS
5427 if (__is_pointer_value(false, reg))
5428 return -1;
5429
092ed096
JW
5430 if (is_jmp32) {
5431 reg_lo = *reg;
5432 reg = &reg_lo;
5433 /* For JMP32, only low 32 bits are compared, coerce_reg_to_size
5434 * could truncate high bits and update umin/umax according to
5435 * information of low bits.
5436 */
5437 coerce_reg_to_size(reg, 4);
5438 /* smin/smax need special handling. For example, after coerce,
5439 * if smin_value is 0x00000000ffffffffLL, the value is -1 when
5440 * used as operand to JMP32. It is a negative number from s32's
5441 * point of view, while it is a positive number when seen as
5442 * s64. The smin/smax are kept as s64, therefore, when used with
5443 * JMP32, they need to be transformed into s32, then sign
5444 * extended back to s64.
5445 *
5446 * Also, smin/smax were copied from umin/umax. If umin/umax has
5447 * different sign bit, then min/max relationship doesn't
5448 * maintain after casting into s32, for this case, set smin/smax
5449 * to safest range.
5450 */
5451 if ((reg->umax_value ^ reg->umin_value) &
5452 (1ULL << 31)) {
5453 reg->smin_value = S32_MIN;
5454 reg->smax_value = S32_MAX;
5455 }
5456 reg->smin_value = (s64)(s32)reg->smin_value;
5457 reg->smax_value = (s64)(s32)reg->smax_value;
5458
5459 val = (u32)val;
5460 sval = (s64)(s32)val;
5461 } else {
5462 sval = (s64)val;
5463 }
a72dafaf 5464
4f7b3e82
AS
5465 switch (opcode) {
5466 case BPF_JEQ:
5467 if (tnum_is_const(reg->var_off))
5468 return !!tnum_equals_const(reg->var_off, val);
5469 break;
5470 case BPF_JNE:
5471 if (tnum_is_const(reg->var_off))
5472 return !tnum_equals_const(reg->var_off, val);
5473 break;
960ea056
JK
5474 case BPF_JSET:
5475 if ((~reg->var_off.mask & reg->var_off.value) & val)
5476 return 1;
5477 if (!((reg->var_off.mask | reg->var_off.value) & val))
5478 return 0;
5479 break;
4f7b3e82
AS
5480 case BPF_JGT:
5481 if (reg->umin_value > val)
5482 return 1;
5483 else if (reg->umax_value <= val)
5484 return 0;
5485 break;
5486 case BPF_JSGT:
a72dafaf 5487 if (reg->smin_value > sval)
4f7b3e82 5488 return 1;
a72dafaf 5489 else if (reg->smax_value < sval)
4f7b3e82
AS
5490 return 0;
5491 break;
5492 case BPF_JLT:
5493 if (reg->umax_value < val)
5494 return 1;
5495 else if (reg->umin_value >= val)
5496 return 0;
5497 break;
5498 case BPF_JSLT:
a72dafaf 5499 if (reg->smax_value < sval)
4f7b3e82 5500 return 1;
a72dafaf 5501 else if (reg->smin_value >= sval)
4f7b3e82
AS
5502 return 0;
5503 break;
5504 case BPF_JGE:
5505 if (reg->umin_value >= val)
5506 return 1;
5507 else if (reg->umax_value < val)
5508 return 0;
5509 break;
5510 case BPF_JSGE:
a72dafaf 5511 if (reg->smin_value >= sval)
4f7b3e82 5512 return 1;
a72dafaf 5513 else if (reg->smax_value < sval)
4f7b3e82
AS
5514 return 0;
5515 break;
5516 case BPF_JLE:
5517 if (reg->umax_value <= val)
5518 return 1;
5519 else if (reg->umin_value > val)
5520 return 0;
5521 break;
5522 case BPF_JSLE:
a72dafaf 5523 if (reg->smax_value <= sval)
4f7b3e82 5524 return 1;
a72dafaf 5525 else if (reg->smin_value > sval)
4f7b3e82
AS
5526 return 0;
5527 break;
5528 }
5529
5530 return -1;
5531}
5532
092ed096
JW
5533/* Generate min value of the high 32-bit from TNUM info. */
5534static u64 gen_hi_min(struct tnum var)
5535{
5536 return var.value & ~0xffffffffULL;
5537}
5538
5539/* Generate max value of the high 32-bit from TNUM info. */
5540static u64 gen_hi_max(struct tnum var)
5541{
5542 return (var.value | var.mask) & ~0xffffffffULL;
5543}
5544
5545/* Return true if VAL is compared with a s64 sign extended from s32, and they
5546 * are with the same signedness.
5547 */
5548static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg)
5549{
5550 return ((s32)sval >= 0 &&
5551 reg->smin_value >= 0 && reg->smax_value <= S32_MAX) ||
5552 ((s32)sval < 0 &&
5553 reg->smax_value <= 0 && reg->smin_value >= S32_MIN);
5554}
5555
48461135
JB
5556/* Adjusts the register min/max values in the case that the dst_reg is the
5557 * variable register that we are working on, and src_reg is a constant or we're
5558 * simply doing a BPF_K check.
f1174f77 5559 * In JEQ/JNE cases we also adjust the var_off values.
48461135
JB
5560 */
5561static void reg_set_min_max(struct bpf_reg_state *true_reg,
5562 struct bpf_reg_state *false_reg, u64 val,
092ed096 5563 u8 opcode, bool is_jmp32)
48461135 5564{
a72dafaf
JW
5565 s64 sval;
5566
f1174f77
EC
5567 /* If the dst_reg is a pointer, we can't learn anything about its
5568 * variable offset from the compare (unless src_reg were a pointer into
5569 * the same object, but we don't bother with that.
5570 * Since false_reg and true_reg have the same type by construction, we
5571 * only need to check one of them for pointerness.
5572 */
5573 if (__is_pointer_value(false, false_reg))
5574 return;
4cabc5b1 5575
092ed096
JW
5576 val = is_jmp32 ? (u32)val : val;
5577 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5578
48461135
JB
5579 switch (opcode) {
5580 case BPF_JEQ:
48461135 5581 case BPF_JNE:
a72dafaf
JW
5582 {
5583 struct bpf_reg_state *reg =
5584 opcode == BPF_JEQ ? true_reg : false_reg;
5585
5586 /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
5587 * if it is true we know the value for sure. Likewise for
5588 * BPF_JNE.
48461135 5589 */
092ed096
JW
5590 if (is_jmp32) {
5591 u64 old_v = reg->var_off.value;
5592 u64 hi_mask = ~0xffffffffULL;
5593
5594 reg->var_off.value = (old_v & hi_mask) | val;
5595 reg->var_off.mask &= hi_mask;
5596 } else {
5597 __mark_reg_known(reg, val);
5598 }
48461135 5599 break;
a72dafaf 5600 }
960ea056
JK
5601 case BPF_JSET:
5602 false_reg->var_off = tnum_and(false_reg->var_off,
5603 tnum_const(~val));
5604 if (is_power_of_2(val))
5605 true_reg->var_off = tnum_or(true_reg->var_off,
5606 tnum_const(val));
5607 break;
48461135 5608 case BPF_JGE:
a72dafaf
JW
5609 case BPF_JGT:
5610 {
5611 u64 false_umax = opcode == BPF_JGT ? val : val - 1;
5612 u64 true_umin = opcode == BPF_JGT ? val + 1 : val;
5613
092ed096
JW
5614 if (is_jmp32) {
5615 false_umax += gen_hi_max(false_reg->var_off);
5616 true_umin += gen_hi_min(true_reg->var_off);
5617 }
a72dafaf
JW
5618 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5619 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b03c9f9f 5620 break;
a72dafaf 5621 }
48461135 5622 case BPF_JSGE:
a72dafaf
JW
5623 case BPF_JSGT:
5624 {
5625 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1;
5626 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval;
5627
092ed096
JW
5628 /* If the full s64 was not sign-extended from s32 then don't
5629 * deduct further info.
5630 */
5631 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5632 break;
a72dafaf
JW
5633 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5634 true_reg->smin_value = max(true_reg->smin_value, true_smin);
48461135 5635 break;
a72dafaf 5636 }
b4e432f1 5637 case BPF_JLE:
a72dafaf
JW
5638 case BPF_JLT:
5639 {
5640 u64 false_umin = opcode == BPF_JLT ? val : val + 1;
5641 u64 true_umax = opcode == BPF_JLT ? val - 1 : val;
5642
092ed096
JW
5643 if (is_jmp32) {
5644 false_umin += gen_hi_min(false_reg->var_off);
5645 true_umax += gen_hi_max(true_reg->var_off);
5646 }
a72dafaf
JW
5647 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5648 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b4e432f1 5649 break;
a72dafaf 5650 }
b4e432f1 5651 case BPF_JSLE:
a72dafaf
JW
5652 case BPF_JSLT:
5653 {
5654 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1;
5655 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval;
5656
092ed096
JW
5657 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5658 break;
a72dafaf
JW
5659 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5660 true_reg->smax_value = min(true_reg->smax_value, true_smax);
b4e432f1 5661 break;
a72dafaf 5662 }
48461135
JB
5663 default:
5664 break;
5665 }
5666
b03c9f9f
EC
5667 __reg_deduce_bounds(false_reg);
5668 __reg_deduce_bounds(true_reg);
5669 /* We might have learned some bits from the bounds. */
5670 __reg_bound_offset(false_reg);
5671 __reg_bound_offset(true_reg);
581738a6
YS
5672 if (is_jmp32) {
5673 __reg_bound_offset32(false_reg);
5674 __reg_bound_offset32(true_reg);
5675 }
b03c9f9f
EC
5676 /* Intersecting with the old var_off might have improved our bounds
5677 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5678 * then new var_off is (0; 0x7f...fc) which improves our umax.
5679 */
5680 __update_reg_bounds(false_reg);
5681 __update_reg_bounds(true_reg);
48461135
JB
5682}
5683
f1174f77
EC
5684/* Same as above, but for the case that dst_reg holds a constant and src_reg is
5685 * the variable reg.
48461135
JB
5686 */
5687static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
5688 struct bpf_reg_state *false_reg, u64 val,
092ed096 5689 u8 opcode, bool is_jmp32)
48461135 5690{
a72dafaf
JW
5691 s64 sval;
5692
f1174f77
EC
5693 if (__is_pointer_value(false, false_reg))
5694 return;
4cabc5b1 5695
092ed096
JW
5696 val = is_jmp32 ? (u32)val : val;
5697 sval = is_jmp32 ? (s64)(s32)val : (s64)val;
a72dafaf 5698
48461135
JB
5699 switch (opcode) {
5700 case BPF_JEQ:
48461135 5701 case BPF_JNE:
a72dafaf
JW
5702 {
5703 struct bpf_reg_state *reg =
5704 opcode == BPF_JEQ ? true_reg : false_reg;
5705
092ed096
JW
5706 if (is_jmp32) {
5707 u64 old_v = reg->var_off.value;
5708 u64 hi_mask = ~0xffffffffULL;
5709
5710 reg->var_off.value = (old_v & hi_mask) | val;
5711 reg->var_off.mask &= hi_mask;
5712 } else {
5713 __mark_reg_known(reg, val);
5714 }
48461135 5715 break;
a72dafaf 5716 }
960ea056
JK
5717 case BPF_JSET:
5718 false_reg->var_off = tnum_and(false_reg->var_off,
5719 tnum_const(~val));
5720 if (is_power_of_2(val))
5721 true_reg->var_off = tnum_or(true_reg->var_off,
5722 tnum_const(val));
5723 break;
48461135 5724 case BPF_JGE:
a72dafaf
JW
5725 case BPF_JGT:
5726 {
5727 u64 false_umin = opcode == BPF_JGT ? val : val + 1;
5728 u64 true_umax = opcode == BPF_JGT ? val - 1 : val;
5729
092ed096
JW
5730 if (is_jmp32) {
5731 false_umin += gen_hi_min(false_reg->var_off);
5732 true_umax += gen_hi_max(true_reg->var_off);
5733 }
a72dafaf
JW
5734 false_reg->umin_value = max(false_reg->umin_value, false_umin);
5735 true_reg->umax_value = min(true_reg->umax_value, true_umax);
b03c9f9f 5736 break;
a72dafaf 5737 }
48461135 5738 case BPF_JSGE:
a72dafaf
JW
5739 case BPF_JSGT:
5740 {
5741 s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1;
5742 s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval;
5743
092ed096
JW
5744 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5745 break;
a72dafaf
JW
5746 false_reg->smin_value = max(false_reg->smin_value, false_smin);
5747 true_reg->smax_value = min(true_reg->smax_value, true_smax);
48461135 5748 break;
a72dafaf 5749 }
b4e432f1 5750 case BPF_JLE:
a72dafaf
JW
5751 case BPF_JLT:
5752 {
5753 u64 false_umax = opcode == BPF_JLT ? val : val - 1;
5754 u64 true_umin = opcode == BPF_JLT ? val + 1 : val;
5755
092ed096
JW
5756 if (is_jmp32) {
5757 false_umax += gen_hi_max(false_reg->var_off);
5758 true_umin += gen_hi_min(true_reg->var_off);
5759 }
a72dafaf
JW
5760 false_reg->umax_value = min(false_reg->umax_value, false_umax);
5761 true_reg->umin_value = max(true_reg->umin_value, true_umin);
b4e432f1 5762 break;
a72dafaf 5763 }
b4e432f1 5764 case BPF_JSLE:
a72dafaf
JW
5765 case BPF_JSLT:
5766 {
5767 s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1;
5768 s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval;
5769
092ed096
JW
5770 if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg))
5771 break;
a72dafaf
JW
5772 false_reg->smax_value = min(false_reg->smax_value, false_smax);
5773 true_reg->smin_value = max(true_reg->smin_value, true_smin);
b4e432f1 5774 break;
a72dafaf 5775 }
48461135
JB
5776 default:
5777 break;
5778 }
5779
b03c9f9f
EC
5780 __reg_deduce_bounds(false_reg);
5781 __reg_deduce_bounds(true_reg);
5782 /* We might have learned some bits from the bounds. */
5783 __reg_bound_offset(false_reg);
5784 __reg_bound_offset(true_reg);
581738a6
YS
5785 if (is_jmp32) {
5786 __reg_bound_offset32(false_reg);
5787 __reg_bound_offset32(true_reg);
5788 }
b03c9f9f
EC
5789 /* Intersecting with the old var_off might have improved our bounds
5790 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5791 * then new var_off is (0; 0x7f...fc) which improves our umax.
5792 */
5793 __update_reg_bounds(false_reg);
5794 __update_reg_bounds(true_reg);
f1174f77
EC
5795}
5796
5797/* Regs are known to be equal, so intersect their min/max/var_off */
5798static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
5799 struct bpf_reg_state *dst_reg)
5800{
b03c9f9f
EC
5801 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value,
5802 dst_reg->umin_value);
5803 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value,
5804 dst_reg->umax_value);
5805 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value,
5806 dst_reg->smin_value);
5807 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value,
5808 dst_reg->smax_value);
f1174f77
EC
5809 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
5810 dst_reg->var_off);
b03c9f9f
EC
5811 /* We might have learned new bounds from the var_off. */
5812 __update_reg_bounds(src_reg);
5813 __update_reg_bounds(dst_reg);
5814 /* We might have learned something about the sign bit. */
5815 __reg_deduce_bounds(src_reg);
5816 __reg_deduce_bounds(dst_reg);
5817 /* We might have learned some bits from the bounds. */
5818 __reg_bound_offset(src_reg);
5819 __reg_bound_offset(dst_reg);
5820 /* Intersecting with the old var_off might have improved our bounds
5821 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
5822 * then new var_off is (0; 0x7f...fc) which improves our umax.
5823 */
5824 __update_reg_bounds(src_reg);
5825 __update_reg_bounds(dst_reg);
f1174f77
EC
5826}
5827
5828static void reg_combine_min_max(struct bpf_reg_state *true_src,
5829 struct bpf_reg_state *true_dst,
5830 struct bpf_reg_state *false_src,
5831 struct bpf_reg_state *false_dst,
5832 u8 opcode)
5833{
5834 switch (opcode) {
5835 case BPF_JEQ:
5836 __reg_combine_min_max(true_src, true_dst);
5837 break;
5838 case BPF_JNE:
5839 __reg_combine_min_max(false_src, false_dst);
b03c9f9f 5840 break;
4cabc5b1 5841 }
48461135
JB
5842}
5843
fd978bf7
JS
5844static void mark_ptr_or_null_reg(struct bpf_func_state *state,
5845 struct bpf_reg_state *reg, u32 id,
840b9615 5846 bool is_null)
57a09bf0 5847{
840b9615 5848 if (reg_type_may_be_null(reg->type) && reg->id == id) {
f1174f77
EC
5849 /* Old offset (both fixed and variable parts) should
5850 * have been known-zero, because we don't allow pointer
5851 * arithmetic on pointers that might be NULL.
5852 */
b03c9f9f
EC
5853 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value ||
5854 !tnum_equals_const(reg->var_off, 0) ||
f1174f77 5855 reg->off)) {
b03c9f9f
EC
5856 __mark_reg_known_zero(reg);
5857 reg->off = 0;
f1174f77
EC
5858 }
5859 if (is_null) {
5860 reg->type = SCALAR_VALUE;
840b9615
JS
5861 } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
5862 if (reg->map_ptr->inner_map_meta) {
5863 reg->type = CONST_PTR_TO_MAP;
5864 reg->map_ptr = reg->map_ptr->inner_map_meta;
fada7fdc
JL
5865 } else if (reg->map_ptr->map_type ==
5866 BPF_MAP_TYPE_XSKMAP) {
5867 reg->type = PTR_TO_XDP_SOCK;
840b9615
JS
5868 } else {
5869 reg->type = PTR_TO_MAP_VALUE;
5870 }
c64b7983
JS
5871 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
5872 reg->type = PTR_TO_SOCKET;
46f8bc92
MKL
5873 } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) {
5874 reg->type = PTR_TO_SOCK_COMMON;
655a51e5
MKL
5875 } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
5876 reg->type = PTR_TO_TCP_SOCK;
56f668df 5877 }
1b986589
MKL
5878 if (is_null) {
5879 /* We don't need id and ref_obj_id from this point
5880 * onwards anymore, thus we should better reset it,
5881 * so that state pruning has chances to take effect.
5882 */
5883 reg->id = 0;
5884 reg->ref_obj_id = 0;
5885 } else if (!reg_may_point_to_spin_lock(reg)) {
5886 /* For not-NULL ptr, reg->ref_obj_id will be reset
5887 * in release_reg_references().
5888 *
5889 * reg->id is still used by spin_lock ptr. Other
5890 * than spin_lock ptr type, reg->id can be reset.
fd978bf7
JS
5891 */
5892 reg->id = 0;
56f668df 5893 }
57a09bf0
TG
5894 }
5895}
5896
c6a9efa1
PC
5897static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id,
5898 bool is_null)
5899{
5900 struct bpf_reg_state *reg;
5901 int i;
5902
5903 for (i = 0; i < MAX_BPF_REG; i++)
5904 mark_ptr_or_null_reg(state, &state->regs[i], id, is_null);
5905
5906 bpf_for_each_spilled_reg(i, state, reg) {
5907 if (!reg)
5908 continue;
5909 mark_ptr_or_null_reg(state, reg, id, is_null);
5910 }
5911}
5912
57a09bf0
TG
5913/* The logic is similar to find_good_pkt_pointers(), both could eventually
5914 * be folded together at some point.
5915 */
840b9615
JS
5916static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
5917 bool is_null)
57a09bf0 5918{
f4d7e40a 5919 struct bpf_func_state *state = vstate->frame[vstate->curframe];
c6a9efa1 5920 struct bpf_reg_state *regs = state->regs;
1b986589 5921 u32 ref_obj_id = regs[regno].ref_obj_id;
a08dd0da 5922 u32 id = regs[regno].id;
c6a9efa1 5923 int i;
57a09bf0 5924
1b986589
MKL
5925 if (ref_obj_id && ref_obj_id == id && is_null)
5926 /* regs[regno] is in the " == NULL" branch.
5927 * No one could have freed the reference state before
5928 * doing the NULL check.
5929 */
5930 WARN_ON_ONCE(release_reference_state(state, id));
fd978bf7 5931
c6a9efa1
PC
5932 for (i = 0; i <= vstate->curframe; i++)
5933 __mark_ptr_or_null_regs(vstate->frame[i], id, is_null);
57a09bf0
TG
5934}
5935
5beca081
DB
5936static bool try_match_pkt_pointers(const struct bpf_insn *insn,
5937 struct bpf_reg_state *dst_reg,
5938 struct bpf_reg_state *src_reg,
5939 struct bpf_verifier_state *this_branch,
5940 struct bpf_verifier_state *other_branch)
5941{
5942 if (BPF_SRC(insn->code) != BPF_X)
5943 return false;
5944
092ed096
JW
5945 /* Pointers are always 64-bit. */
5946 if (BPF_CLASS(insn->code) == BPF_JMP32)
5947 return false;
5948
5beca081
DB
5949 switch (BPF_OP(insn->code)) {
5950 case BPF_JGT:
5951 if ((dst_reg->type == PTR_TO_PACKET &&
5952 src_reg->type == PTR_TO_PACKET_END) ||
5953 (dst_reg->type == PTR_TO_PACKET_META &&
5954 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5955 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
5956 find_good_pkt_pointers(this_branch, dst_reg,
5957 dst_reg->type, false);
5958 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5959 src_reg->type == PTR_TO_PACKET) ||
5960 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5961 src_reg->type == PTR_TO_PACKET_META)) {
5962 /* pkt_end > pkt_data', pkt_data > pkt_meta' */
5963 find_good_pkt_pointers(other_branch, src_reg,
5964 src_reg->type, true);
5965 } else {
5966 return false;
5967 }
5968 break;
5969 case BPF_JLT:
5970 if ((dst_reg->type == PTR_TO_PACKET &&
5971 src_reg->type == PTR_TO_PACKET_END) ||
5972 (dst_reg->type == PTR_TO_PACKET_META &&
5973 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5974 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
5975 find_good_pkt_pointers(other_branch, dst_reg,
5976 dst_reg->type, true);
5977 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5978 src_reg->type == PTR_TO_PACKET) ||
5979 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5980 src_reg->type == PTR_TO_PACKET_META)) {
5981 /* pkt_end < pkt_data', pkt_data > pkt_meta' */
5982 find_good_pkt_pointers(this_branch, src_reg,
5983 src_reg->type, false);
5984 } else {
5985 return false;
5986 }
5987 break;
5988 case BPF_JGE:
5989 if ((dst_reg->type == PTR_TO_PACKET &&
5990 src_reg->type == PTR_TO_PACKET_END) ||
5991 (dst_reg->type == PTR_TO_PACKET_META &&
5992 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
5993 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
5994 find_good_pkt_pointers(this_branch, dst_reg,
5995 dst_reg->type, true);
5996 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
5997 src_reg->type == PTR_TO_PACKET) ||
5998 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
5999 src_reg->type == PTR_TO_PACKET_META)) {
6000 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
6001 find_good_pkt_pointers(other_branch, src_reg,
6002 src_reg->type, false);
6003 } else {
6004 return false;
6005 }
6006 break;
6007 case BPF_JLE:
6008 if ((dst_reg->type == PTR_TO_PACKET &&
6009 src_reg->type == PTR_TO_PACKET_END) ||
6010 (dst_reg->type == PTR_TO_PACKET_META &&
6011 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) {
6012 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
6013 find_good_pkt_pointers(other_branch, dst_reg,
6014 dst_reg->type, false);
6015 } else if ((dst_reg->type == PTR_TO_PACKET_END &&
6016 src_reg->type == PTR_TO_PACKET) ||
6017 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
6018 src_reg->type == PTR_TO_PACKET_META)) {
6019 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
6020 find_good_pkt_pointers(this_branch, src_reg,
6021 src_reg->type, true);
6022 } else {
6023 return false;
6024 }
6025 break;
6026 default:
6027 return false;
6028 }
6029
6030 return true;
6031}
6032
58e2af8b 6033static int check_cond_jmp_op(struct bpf_verifier_env *env,
17a52670
AS
6034 struct bpf_insn *insn, int *insn_idx)
6035{
f4d7e40a
AS
6036 struct bpf_verifier_state *this_branch = env->cur_state;
6037 struct bpf_verifier_state *other_branch;
6038 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
fb8d251e 6039 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
17a52670 6040 u8 opcode = BPF_OP(insn->code);
092ed096 6041 bool is_jmp32;
fb8d251e 6042 int pred = -1;
17a52670
AS
6043 int err;
6044
092ed096
JW
6045 /* Only conditional jumps are expected to reach here. */
6046 if (opcode == BPF_JA || opcode > BPF_JSLE) {
6047 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode);
17a52670
AS
6048 return -EINVAL;
6049 }
6050
6051 if (BPF_SRC(insn->code) == BPF_X) {
6052 if (insn->imm != 0) {
092ed096 6053 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6054 return -EINVAL;
6055 }
6056
6057 /* check src1 operand */
dc503a8a 6058 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
6059 if (err)
6060 return err;
1be7f75d
AS
6061
6062 if (is_pointer_value(env, insn->src_reg)) {
61bd5218 6063 verbose(env, "R%d pointer comparison prohibited\n",
1be7f75d
AS
6064 insn->src_reg);
6065 return -EACCES;
6066 }
fb8d251e 6067 src_reg = &regs[insn->src_reg];
17a52670
AS
6068 } else {
6069 if (insn->src_reg != BPF_REG_0) {
092ed096 6070 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
17a52670
AS
6071 return -EINVAL;
6072 }
6073 }
6074
6075 /* check src2 operand */
dc503a8a 6076 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
6077 if (err)
6078 return err;
6079
1a0dc1ac 6080 dst_reg = &regs[insn->dst_reg];
092ed096 6081 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
1a0dc1ac 6082
fb8d251e
AS
6083 if (BPF_SRC(insn->code) == BPF_K)
6084 pred = is_branch_taken(dst_reg, insn->imm,
6085 opcode, is_jmp32);
6086 else if (src_reg->type == SCALAR_VALUE &&
6087 tnum_is_const(src_reg->var_off))
6088 pred = is_branch_taken(dst_reg, src_reg->var_off.value,
6089 opcode, is_jmp32);
b5dc0163
AS
6090 if (pred >= 0) {
6091 err = mark_chain_precision(env, insn->dst_reg);
6092 if (BPF_SRC(insn->code) == BPF_X && !err)
6093 err = mark_chain_precision(env, insn->src_reg);
6094 if (err)
6095 return err;
6096 }
fb8d251e
AS
6097 if (pred == 1) {
6098 /* only follow the goto, ignore fall-through */
6099 *insn_idx += insn->off;
6100 return 0;
6101 } else if (pred == 0) {
6102 /* only follow fall-through branch, since
6103 * that's where the program will go
6104 */
6105 return 0;
17a52670
AS
6106 }
6107
979d63d5
DB
6108 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
6109 false);
17a52670
AS
6110 if (!other_branch)
6111 return -EFAULT;
f4d7e40a 6112 other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
17a52670 6113
48461135
JB
6114 /* detect if we are comparing against a constant value so we can adjust
6115 * our min/max values for our dst register.
f1174f77
EC
6116 * this is only legit if both are scalars (or pointers to the same
6117 * object, I suppose, but we don't support that right now), because
6118 * otherwise the different base pointers mean the offsets aren't
6119 * comparable.
48461135
JB
6120 */
6121 if (BPF_SRC(insn->code) == BPF_X) {
092ed096
JW
6122 struct bpf_reg_state *src_reg = &regs[insn->src_reg];
6123 struct bpf_reg_state lo_reg0 = *dst_reg;
6124 struct bpf_reg_state lo_reg1 = *src_reg;
6125 struct bpf_reg_state *src_lo, *dst_lo;
6126
6127 dst_lo = &lo_reg0;
6128 src_lo = &lo_reg1;
6129 coerce_reg_to_size(dst_lo, 4);
6130 coerce_reg_to_size(src_lo, 4);
6131
f1174f77 6132 if (dst_reg->type == SCALAR_VALUE &&
092ed096
JW
6133 src_reg->type == SCALAR_VALUE) {
6134 if (tnum_is_const(src_reg->var_off) ||
6135 (is_jmp32 && tnum_is_const(src_lo->var_off)))
f4d7e40a 6136 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096
JW
6137 dst_reg,
6138 is_jmp32
6139 ? src_lo->var_off.value
6140 : src_reg->var_off.value,
6141 opcode, is_jmp32);
6142 else if (tnum_is_const(dst_reg->var_off) ||
6143 (is_jmp32 && tnum_is_const(dst_lo->var_off)))
f4d7e40a 6144 reg_set_min_max_inv(&other_branch_regs[insn->src_reg],
092ed096
JW
6145 src_reg,
6146 is_jmp32
6147 ? dst_lo->var_off.value
6148 : dst_reg->var_off.value,
6149 opcode, is_jmp32);
6150 else if (!is_jmp32 &&
6151 (opcode == BPF_JEQ || opcode == BPF_JNE))
f1174f77 6152 /* Comparing for equality, we can combine knowledge */
f4d7e40a
AS
6153 reg_combine_min_max(&other_branch_regs[insn->src_reg],
6154 &other_branch_regs[insn->dst_reg],
092ed096 6155 src_reg, dst_reg, opcode);
f1174f77
EC
6156 }
6157 } else if (dst_reg->type == SCALAR_VALUE) {
f4d7e40a 6158 reg_set_min_max(&other_branch_regs[insn->dst_reg],
092ed096 6159 dst_reg, insn->imm, opcode, is_jmp32);
48461135
JB
6160 }
6161
092ed096
JW
6162 /* detect if R == 0 where R is returned from bpf_map_lookup_elem().
6163 * NOTE: these optimizations below are related with pointer comparison
6164 * which will never be JMP32.
6165 */
6166 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K &&
1a0dc1ac 6167 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) &&
840b9615
JS
6168 reg_type_may_be_null(dst_reg->type)) {
6169 /* Mark all identical registers in each branch as either
57a09bf0
TG
6170 * safe or unknown depending R == 0 or R != 0 conditional.
6171 */
840b9615
JS
6172 mark_ptr_or_null_regs(this_branch, insn->dst_reg,
6173 opcode == BPF_JNE);
6174 mark_ptr_or_null_regs(other_branch, insn->dst_reg,
6175 opcode == BPF_JEQ);
5beca081
DB
6176 } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg],
6177 this_branch, other_branch) &&
6178 is_pointer_value(env, insn->dst_reg)) {
61bd5218
JK
6179 verbose(env, "R%d pointer comparison prohibited\n",
6180 insn->dst_reg);
1be7f75d 6181 return -EACCES;
17a52670 6182 }
06ee7115 6183 if (env->log.level & BPF_LOG_LEVEL)
f4d7e40a 6184 print_verifier_state(env, this_branch->frame[this_branch->curframe]);
17a52670
AS
6185 return 0;
6186}
6187
17a52670 6188/* verify BPF_LD_IMM64 instruction */
58e2af8b 6189static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
17a52670 6190{
d8eca5bb 6191 struct bpf_insn_aux_data *aux = cur_aux(env);
638f5b90 6192 struct bpf_reg_state *regs = cur_regs(env);
d8eca5bb 6193 struct bpf_map *map;
17a52670
AS
6194 int err;
6195
6196 if (BPF_SIZE(insn->code) != BPF_DW) {
61bd5218 6197 verbose(env, "invalid BPF_LD_IMM insn\n");
17a52670
AS
6198 return -EINVAL;
6199 }
6200 if (insn->off != 0) {
61bd5218 6201 verbose(env, "BPF_LD_IMM64 uses reserved fields\n");
17a52670
AS
6202 return -EINVAL;
6203 }
6204
dc503a8a 6205 err = check_reg_arg(env, insn->dst_reg, DST_OP);
17a52670
AS
6206 if (err)
6207 return err;
6208
6b173873 6209 if (insn->src_reg == 0) {
6b173873
JK
6210 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
6211
f1174f77 6212 regs[insn->dst_reg].type = SCALAR_VALUE;
b03c9f9f 6213 __mark_reg_known(&regs[insn->dst_reg], imm);
17a52670 6214 return 0;
6b173873 6215 }
17a52670 6216
d8eca5bb
DB
6217 map = env->used_maps[aux->map_index];
6218 mark_reg_known_zero(env, regs, insn->dst_reg);
6219 regs[insn->dst_reg].map_ptr = map;
6220
6221 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
6222 regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
6223 regs[insn->dst_reg].off = aux->map_off;
6224 if (map_value_has_spin_lock(map))
6225 regs[insn->dst_reg].id = ++env->id_gen;
6226 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
6227 regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
6228 } else {
6229 verbose(env, "bpf verifier is misconfigured\n");
6230 return -EINVAL;
6231 }
17a52670 6232
17a52670
AS
6233 return 0;
6234}
6235
96be4325
DB
6236static bool may_access_skb(enum bpf_prog_type type)
6237{
6238 switch (type) {
6239 case BPF_PROG_TYPE_SOCKET_FILTER:
6240 case BPF_PROG_TYPE_SCHED_CLS:
94caee8c 6241 case BPF_PROG_TYPE_SCHED_ACT:
96be4325
DB
6242 return true;
6243 default:
6244 return false;
6245 }
6246}
6247
ddd872bc
AS
6248/* verify safety of LD_ABS|LD_IND instructions:
6249 * - they can only appear in the programs where ctx == skb
6250 * - since they are wrappers of function calls, they scratch R1-R5 registers,
6251 * preserve R6-R9, and store return value into R0
6252 *
6253 * Implicit input:
6254 * ctx == skb == R6 == CTX
6255 *
6256 * Explicit input:
6257 * SRC == any register
6258 * IMM == 32-bit immediate
6259 *
6260 * Output:
6261 * R0 - 8/16/32-bit skb data converted to cpu endianness
6262 */
58e2af8b 6263static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
ddd872bc 6264{
638f5b90 6265 struct bpf_reg_state *regs = cur_regs(env);
ddd872bc 6266 u8 mode = BPF_MODE(insn->code);
ddd872bc
AS
6267 int i, err;
6268
24701ece 6269 if (!may_access_skb(env->prog->type)) {
61bd5218 6270 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
ddd872bc
AS
6271 return -EINVAL;
6272 }
6273
e0cea7ce
DB
6274 if (!env->ops->gen_ld_abs) {
6275 verbose(env, "bpf verifier is misconfigured\n");
6276 return -EINVAL;
6277 }
6278
f910cefa 6279 if (env->subprog_cnt > 1) {
f4d7e40a
AS
6280 /* when program has LD_ABS insn JITs and interpreter assume
6281 * that r1 == ctx == skb which is not the case for callees
6282 * that can have arbitrary arguments. It's problematic
6283 * for main prog as well since JITs would need to analyze
6284 * all functions in order to make proper register save/restore
6285 * decisions in the main prog. Hence disallow LD_ABS with calls
6286 */
6287 verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
6288 return -EINVAL;
6289 }
6290
ddd872bc 6291 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
d82bccc6 6292 BPF_SIZE(insn->code) == BPF_DW ||
ddd872bc 6293 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
61bd5218 6294 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n");
ddd872bc
AS
6295 return -EINVAL;
6296 }
6297
6298 /* check whether implicit source operand (register R6) is readable */
dc503a8a 6299 err = check_reg_arg(env, BPF_REG_6, SRC_OP);
ddd872bc
AS
6300 if (err)
6301 return err;
6302
fd978bf7
JS
6303 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
6304 * gen_ld_abs() may terminate the program at runtime, leading to
6305 * reference leak.
6306 */
6307 err = check_reference_leak(env);
6308 if (err) {
6309 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
6310 return err;
6311 }
6312
d83525ca
AS
6313 if (env->cur_state->active_spin_lock) {
6314 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n");
6315 return -EINVAL;
6316 }
6317
ddd872bc 6318 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
61bd5218
JK
6319 verbose(env,
6320 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
ddd872bc
AS
6321 return -EINVAL;
6322 }
6323
6324 if (mode == BPF_IND) {
6325 /* check explicit source operand */
dc503a8a 6326 err = check_reg_arg(env, insn->src_reg, SRC_OP);
ddd872bc
AS
6327 if (err)
6328 return err;
6329 }
6330
6331 /* reset caller saved regs to unreadable */
dc503a8a 6332 for (i = 0; i < CALLER_SAVED_REGS; i++) {
61bd5218 6333 mark_reg_not_init(env, regs, caller_saved[i]);
dc503a8a
EC
6334 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
6335 }
ddd872bc
AS
6336
6337 /* mark destination R0 register as readable, since it contains
dc503a8a
EC
6338 * the value fetched from the packet.
6339 * Already marked as written above.
ddd872bc 6340 */
61bd5218 6341 mark_reg_unknown(env, regs, BPF_REG_0);
5327ed3d
JW
6342 /* ld_abs load up to 32-bit skb data. */
6343 regs[BPF_REG_0].subreg_def = env->insn_idx + 1;
ddd872bc
AS
6344 return 0;
6345}
6346
390ee7e2
AS
6347static int check_return_code(struct bpf_verifier_env *env)
6348{
5cf1e914 6349 struct tnum enforce_attach_type_range = tnum_unknown;
390ee7e2
AS
6350 struct bpf_reg_state *reg;
6351 struct tnum range = tnum_range(0, 1);
6352
6353 switch (env->prog->type) {
983695fa
DB
6354 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
6355 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
6356 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
6357 range = tnum_range(1, 1);
ed4ed404 6358 break;
390ee7e2 6359 case BPF_PROG_TYPE_CGROUP_SKB:
5cf1e914 6360 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
6361 range = tnum_range(0, 3);
6362 enforce_attach_type_range = tnum_range(2, 3);
6363 }
ed4ed404 6364 break;
390ee7e2
AS
6365 case BPF_PROG_TYPE_CGROUP_SOCK:
6366 case BPF_PROG_TYPE_SOCK_OPS:
ebc614f6 6367 case BPF_PROG_TYPE_CGROUP_DEVICE:
7b146ceb 6368 case BPF_PROG_TYPE_CGROUP_SYSCTL:
0d01da6a 6369 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
390ee7e2 6370 break;
15ab09bd
AS
6371 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6372 if (!env->prog->aux->attach_btf_id)
6373 return 0;
6374 range = tnum_const(0);
6375 break;
390ee7e2
AS
6376 default:
6377 return 0;
6378 }
6379
638f5b90 6380 reg = cur_regs(env) + BPF_REG_0;
390ee7e2 6381 if (reg->type != SCALAR_VALUE) {
61bd5218 6382 verbose(env, "At program exit the register R0 is not a known value (%s)\n",
390ee7e2
AS
6383 reg_type_str[reg->type]);
6384 return -EINVAL;
6385 }
6386
6387 if (!tnum_in(range, reg->var_off)) {
5cf1e914 6388 char tn_buf[48];
6389
61bd5218 6390 verbose(env, "At program exit the register R0 ");
390ee7e2 6391 if (!tnum_is_unknown(reg->var_off)) {
390ee7e2 6392 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
61bd5218 6393 verbose(env, "has value %s", tn_buf);
390ee7e2 6394 } else {
61bd5218 6395 verbose(env, "has unknown scalar value");
390ee7e2 6396 }
5cf1e914 6397 tnum_strn(tn_buf, sizeof(tn_buf), range);
983695fa 6398 verbose(env, " should have been in %s\n", tn_buf);
390ee7e2
AS
6399 return -EINVAL;
6400 }
5cf1e914 6401
6402 if (!tnum_is_unknown(enforce_attach_type_range) &&
6403 tnum_in(enforce_attach_type_range, reg->var_off))
6404 env->prog->enforce_expected_attach_type = 1;
390ee7e2
AS
6405 return 0;
6406}
6407
475fb78f
AS
6408/* non-recursive DFS pseudo code
6409 * 1 procedure DFS-iterative(G,v):
6410 * 2 label v as discovered
6411 * 3 let S be a stack
6412 * 4 S.push(v)
6413 * 5 while S is not empty
6414 * 6 t <- S.pop()
6415 * 7 if t is what we're looking for:
6416 * 8 return t
6417 * 9 for all edges e in G.adjacentEdges(t) do
6418 * 10 if edge e is already labelled
6419 * 11 continue with the next edge
6420 * 12 w <- G.adjacentVertex(t,e)
6421 * 13 if vertex w is not discovered and not explored
6422 * 14 label e as tree-edge
6423 * 15 label w as discovered
6424 * 16 S.push(w)
6425 * 17 continue at 5
6426 * 18 else if vertex w is discovered
6427 * 19 label e as back-edge
6428 * 20 else
6429 * 21 // vertex w is explored
6430 * 22 label e as forward- or cross-edge
6431 * 23 label t as explored
6432 * 24 S.pop()
6433 *
6434 * convention:
6435 * 0x10 - discovered
6436 * 0x11 - discovered and fall-through edge labelled
6437 * 0x12 - discovered and fall-through and branch edges labelled
6438 * 0x20 - explored
6439 */
6440
6441enum {
6442 DISCOVERED = 0x10,
6443 EXPLORED = 0x20,
6444 FALLTHROUGH = 1,
6445 BRANCH = 2,
6446};
6447
dc2a4ebc
AS
6448static u32 state_htab_size(struct bpf_verifier_env *env)
6449{
6450 return env->prog->len;
6451}
6452
5d839021
AS
6453static struct bpf_verifier_state_list **explored_state(
6454 struct bpf_verifier_env *env,
6455 int idx)
6456{
dc2a4ebc
AS
6457 struct bpf_verifier_state *cur = env->cur_state;
6458 struct bpf_func_state *state = cur->frame[cur->curframe];
6459
6460 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
5d839021
AS
6461}
6462
6463static void init_explored_state(struct bpf_verifier_env *env, int idx)
6464{
a8f500af 6465 env->insn_aux_data[idx].prune_point = true;
5d839021 6466}
f1bca824 6467
475fb78f
AS
6468/* t, w, e - match pseudo-code above:
6469 * t - index of current instruction
6470 * w - next instruction
6471 * e - edge
6472 */
2589726d
AS
6473static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
6474 bool loop_ok)
475fb78f 6475{
7df737e9
AS
6476 int *insn_stack = env->cfg.insn_stack;
6477 int *insn_state = env->cfg.insn_state;
6478
475fb78f
AS
6479 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
6480 return 0;
6481
6482 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
6483 return 0;
6484
6485 if (w < 0 || w >= env->prog->len) {
d9762e84 6486 verbose_linfo(env, t, "%d: ", t);
61bd5218 6487 verbose(env, "jump out of range from insn %d to %d\n", t, w);
475fb78f
AS
6488 return -EINVAL;
6489 }
6490
f1bca824
AS
6491 if (e == BRANCH)
6492 /* mark branch target for state pruning */
5d839021 6493 init_explored_state(env, w);
f1bca824 6494
475fb78f
AS
6495 if (insn_state[w] == 0) {
6496 /* tree-edge */
6497 insn_state[t] = DISCOVERED | e;
6498 insn_state[w] = DISCOVERED;
7df737e9 6499 if (env->cfg.cur_stack >= env->prog->len)
475fb78f 6500 return -E2BIG;
7df737e9 6501 insn_stack[env->cfg.cur_stack++] = w;
475fb78f
AS
6502 return 1;
6503 } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
2589726d
AS
6504 if (loop_ok && env->allow_ptr_leaks)
6505 return 0;
d9762e84
MKL
6506 verbose_linfo(env, t, "%d: ", t);
6507 verbose_linfo(env, w, "%d: ", w);
61bd5218 6508 verbose(env, "back-edge from insn %d to %d\n", t, w);
475fb78f
AS
6509 return -EINVAL;
6510 } else if (insn_state[w] == EXPLORED) {
6511 /* forward- or cross-edge */
6512 insn_state[t] = DISCOVERED | e;
6513 } else {
61bd5218 6514 verbose(env, "insn state internal bug\n");
475fb78f
AS
6515 return -EFAULT;
6516 }
6517 return 0;
6518}
6519
6520/* non-recursive depth-first-search to detect loops in BPF program
6521 * loop == back-edge in directed graph
6522 */
58e2af8b 6523static int check_cfg(struct bpf_verifier_env *env)
475fb78f
AS
6524{
6525 struct bpf_insn *insns = env->prog->insnsi;
6526 int insn_cnt = env->prog->len;
7df737e9 6527 int *insn_stack, *insn_state;
475fb78f
AS
6528 int ret = 0;
6529 int i, t;
6530
7df737e9 6531 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f
AS
6532 if (!insn_state)
6533 return -ENOMEM;
6534
7df737e9 6535 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
475fb78f 6536 if (!insn_stack) {
71dde681 6537 kvfree(insn_state);
475fb78f
AS
6538 return -ENOMEM;
6539 }
6540
6541 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */
6542 insn_stack[0] = 0; /* 0 is the first instruction */
7df737e9 6543 env->cfg.cur_stack = 1;
475fb78f
AS
6544
6545peek_stack:
7df737e9 6546 if (env->cfg.cur_stack == 0)
475fb78f 6547 goto check_state;
7df737e9 6548 t = insn_stack[env->cfg.cur_stack - 1];
475fb78f 6549
092ed096
JW
6550 if (BPF_CLASS(insns[t].code) == BPF_JMP ||
6551 BPF_CLASS(insns[t].code) == BPF_JMP32) {
475fb78f
AS
6552 u8 opcode = BPF_OP(insns[t].code);
6553
6554 if (opcode == BPF_EXIT) {
6555 goto mark_explored;
6556 } else if (opcode == BPF_CALL) {
2589726d 6557 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6558 if (ret == 1)
6559 goto peek_stack;
6560 else if (ret < 0)
6561 goto err_free;
07016151 6562 if (t + 1 < insn_cnt)
5d839021 6563 init_explored_state(env, t + 1);
cc8b0b92 6564 if (insns[t].src_reg == BPF_PSEUDO_CALL) {
5d839021 6565 init_explored_state(env, t);
2589726d
AS
6566 ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
6567 env, false);
cc8b0b92
AS
6568 if (ret == 1)
6569 goto peek_stack;
6570 else if (ret < 0)
6571 goto err_free;
6572 }
475fb78f
AS
6573 } else if (opcode == BPF_JA) {
6574 if (BPF_SRC(insns[t].code) != BPF_K) {
6575 ret = -EINVAL;
6576 goto err_free;
6577 }
6578 /* unconditional jump with single edge */
6579 ret = push_insn(t, t + insns[t].off + 1,
2589726d 6580 FALLTHROUGH, env, true);
475fb78f
AS
6581 if (ret == 1)
6582 goto peek_stack;
6583 else if (ret < 0)
6584 goto err_free;
b5dc0163
AS
6585 /* unconditional jmp is not a good pruning point,
6586 * but it's marked, since backtracking needs
6587 * to record jmp history in is_state_visited().
6588 */
6589 init_explored_state(env, t + insns[t].off + 1);
f1bca824
AS
6590 /* tell verifier to check for equivalent states
6591 * after every call and jump
6592 */
c3de6317 6593 if (t + 1 < insn_cnt)
5d839021 6594 init_explored_state(env, t + 1);
475fb78f
AS
6595 } else {
6596 /* conditional jump with two edges */
5d839021 6597 init_explored_state(env, t);
2589726d 6598 ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
475fb78f
AS
6599 if (ret == 1)
6600 goto peek_stack;
6601 else if (ret < 0)
6602 goto err_free;
6603
2589726d 6604 ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
475fb78f
AS
6605 if (ret == 1)
6606 goto peek_stack;
6607 else if (ret < 0)
6608 goto err_free;
6609 }
6610 } else {
6611 /* all other non-branch instructions with single
6612 * fall-through edge
6613 */
2589726d 6614 ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
475fb78f
AS
6615 if (ret == 1)
6616 goto peek_stack;
6617 else if (ret < 0)
6618 goto err_free;
6619 }
6620
6621mark_explored:
6622 insn_state[t] = EXPLORED;
7df737e9 6623 if (env->cfg.cur_stack-- <= 0) {
61bd5218 6624 verbose(env, "pop stack internal bug\n");
475fb78f
AS
6625 ret = -EFAULT;
6626 goto err_free;
6627 }
6628 goto peek_stack;
6629
6630check_state:
6631 for (i = 0; i < insn_cnt; i++) {
6632 if (insn_state[i] != EXPLORED) {
61bd5218 6633 verbose(env, "unreachable insn %d\n", i);
475fb78f
AS
6634 ret = -EINVAL;
6635 goto err_free;
6636 }
6637 }
6638 ret = 0; /* cfg looks good */
6639
6640err_free:
71dde681
AS
6641 kvfree(insn_state);
6642 kvfree(insn_stack);
7df737e9 6643 env->cfg.insn_state = env->cfg.insn_stack = NULL;
475fb78f
AS
6644 return ret;
6645}
6646
838e9690
YS
6647/* The minimum supported BTF func info size */
6648#define MIN_BPF_FUNCINFO_SIZE 8
6649#define MAX_FUNCINFO_REC_SIZE 252
6650
c454a46b
MKL
6651static int check_btf_func(struct bpf_verifier_env *env,
6652 const union bpf_attr *attr,
6653 union bpf_attr __user *uattr)
838e9690 6654{
d0b2818e 6655 u32 i, nfuncs, urec_size, min_size;
838e9690 6656 u32 krec_size = sizeof(struct bpf_func_info);
c454a46b 6657 struct bpf_func_info *krecord;
8c1b6e69 6658 struct bpf_func_info_aux *info_aux = NULL;
838e9690 6659 const struct btf_type *type;
c454a46b
MKL
6660 struct bpf_prog *prog;
6661 const struct btf *btf;
838e9690 6662 void __user *urecord;
d0b2818e 6663 u32 prev_offset = 0;
838e9690
YS
6664 int ret = 0;
6665
6666 nfuncs = attr->func_info_cnt;
6667 if (!nfuncs)
6668 return 0;
6669
6670 if (nfuncs != env->subprog_cnt) {
6671 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
6672 return -EINVAL;
6673 }
6674
6675 urec_size = attr->func_info_rec_size;
6676 if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
6677 urec_size > MAX_FUNCINFO_REC_SIZE ||
6678 urec_size % sizeof(u32)) {
6679 verbose(env, "invalid func info rec size %u\n", urec_size);
6680 return -EINVAL;
6681 }
6682
c454a46b
MKL
6683 prog = env->prog;
6684 btf = prog->aux->btf;
838e9690
YS
6685
6686 urecord = u64_to_user_ptr(attr->func_info);
6687 min_size = min_t(u32, krec_size, urec_size);
6688
ba64e7d8 6689 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
c454a46b
MKL
6690 if (!krecord)
6691 return -ENOMEM;
8c1b6e69
AS
6692 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
6693 if (!info_aux)
6694 goto err_free;
ba64e7d8 6695
838e9690
YS
6696 for (i = 0; i < nfuncs; i++) {
6697 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
6698 if (ret) {
6699 if (ret == -E2BIG) {
6700 verbose(env, "nonzero tailing record in func info");
6701 /* set the size kernel expects so loader can zero
6702 * out the rest of the record.
6703 */
6704 if (put_user(min_size, &uattr->func_info_rec_size))
6705 ret = -EFAULT;
6706 }
c454a46b 6707 goto err_free;
838e9690
YS
6708 }
6709
ba64e7d8 6710 if (copy_from_user(&krecord[i], urecord, min_size)) {
838e9690 6711 ret = -EFAULT;
c454a46b 6712 goto err_free;
838e9690
YS
6713 }
6714
d30d42e0 6715 /* check insn_off */
838e9690 6716 if (i == 0) {
d30d42e0 6717 if (krecord[i].insn_off) {
838e9690 6718 verbose(env,
d30d42e0
MKL
6719 "nonzero insn_off %u for the first func info record",
6720 krecord[i].insn_off);
838e9690 6721 ret = -EINVAL;
c454a46b 6722 goto err_free;
838e9690 6723 }
d30d42e0 6724 } else if (krecord[i].insn_off <= prev_offset) {
838e9690
YS
6725 verbose(env,
6726 "same or smaller insn offset (%u) than previous func info record (%u)",
d30d42e0 6727 krecord[i].insn_off, prev_offset);
838e9690 6728 ret = -EINVAL;
c454a46b 6729 goto err_free;
838e9690
YS
6730 }
6731
d30d42e0 6732 if (env->subprog_info[i].start != krecord[i].insn_off) {
838e9690
YS
6733 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
6734 ret = -EINVAL;
c454a46b 6735 goto err_free;
838e9690
YS
6736 }
6737
6738 /* check type_id */
ba64e7d8 6739 type = btf_type_by_id(btf, krecord[i].type_id);
838e9690
YS
6740 if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
6741 verbose(env, "invalid type id %d in func info",
ba64e7d8 6742 krecord[i].type_id);
838e9690 6743 ret = -EINVAL;
c454a46b 6744 goto err_free;
838e9690 6745 }
d30d42e0 6746 prev_offset = krecord[i].insn_off;
838e9690
YS
6747 urecord += urec_size;
6748 }
6749
ba64e7d8
YS
6750 prog->aux->func_info = krecord;
6751 prog->aux->func_info_cnt = nfuncs;
8c1b6e69 6752 prog->aux->func_info_aux = info_aux;
838e9690
YS
6753 return 0;
6754
c454a46b 6755err_free:
ba64e7d8 6756 kvfree(krecord);
8c1b6e69 6757 kfree(info_aux);
838e9690
YS
6758 return ret;
6759}
6760
ba64e7d8
YS
6761static void adjust_btf_func(struct bpf_verifier_env *env)
6762{
8c1b6e69 6763 struct bpf_prog_aux *aux = env->prog->aux;
ba64e7d8
YS
6764 int i;
6765
8c1b6e69 6766 if (!aux->func_info)
ba64e7d8
YS
6767 return;
6768
6769 for (i = 0; i < env->subprog_cnt; i++)
8c1b6e69 6770 aux->func_info[i].insn_off = env->subprog_info[i].start;
ba64e7d8
YS
6771}
6772
c454a46b
MKL
6773#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
6774 sizeof(((struct bpf_line_info *)(0))->line_col))
6775#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
6776
6777static int check_btf_line(struct bpf_verifier_env *env,
6778 const union bpf_attr *attr,
6779 union bpf_attr __user *uattr)
6780{
6781 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
6782 struct bpf_subprog_info *sub;
6783 struct bpf_line_info *linfo;
6784 struct bpf_prog *prog;
6785 const struct btf *btf;
6786 void __user *ulinfo;
6787 int err;
6788
6789 nr_linfo = attr->line_info_cnt;
6790 if (!nr_linfo)
6791 return 0;
6792
6793 rec_size = attr->line_info_rec_size;
6794 if (rec_size < MIN_BPF_LINEINFO_SIZE ||
6795 rec_size > MAX_LINEINFO_REC_SIZE ||
6796 rec_size & (sizeof(u32) - 1))
6797 return -EINVAL;
6798
6799 /* Need to zero it in case the userspace may
6800 * pass in a smaller bpf_line_info object.
6801 */
6802 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
6803 GFP_KERNEL | __GFP_NOWARN);
6804 if (!linfo)
6805 return -ENOMEM;
6806
6807 prog = env->prog;
6808 btf = prog->aux->btf;
6809
6810 s = 0;
6811 sub = env->subprog_info;
6812 ulinfo = u64_to_user_ptr(attr->line_info);
6813 expected_size = sizeof(struct bpf_line_info);
6814 ncopy = min_t(u32, expected_size, rec_size);
6815 for (i = 0; i < nr_linfo; i++) {
6816 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
6817 if (err) {
6818 if (err == -E2BIG) {
6819 verbose(env, "nonzero tailing record in line_info");
6820 if (put_user(expected_size,
6821 &uattr->line_info_rec_size))
6822 err = -EFAULT;
6823 }
6824 goto err_free;
6825 }
6826
6827 if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
6828 err = -EFAULT;
6829 goto err_free;
6830 }
6831
6832 /*
6833 * Check insn_off to ensure
6834 * 1) strictly increasing AND
6835 * 2) bounded by prog->len
6836 *
6837 * The linfo[0].insn_off == 0 check logically falls into
6838 * the later "missing bpf_line_info for func..." case
6839 * because the first linfo[0].insn_off must be the
6840 * first sub also and the first sub must have
6841 * subprog_info[0].start == 0.
6842 */
6843 if ((i && linfo[i].insn_off <= prev_offset) ||
6844 linfo[i].insn_off >= prog->len) {
6845 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
6846 i, linfo[i].insn_off, prev_offset,
6847 prog->len);
6848 err = -EINVAL;
6849 goto err_free;
6850 }
6851
fdbaa0be
MKL
6852 if (!prog->insnsi[linfo[i].insn_off].code) {
6853 verbose(env,
6854 "Invalid insn code at line_info[%u].insn_off\n",
6855 i);
6856 err = -EINVAL;
6857 goto err_free;
6858 }
6859
23127b33
MKL
6860 if (!btf_name_by_offset(btf, linfo[i].line_off) ||
6861 !btf_name_by_offset(btf, linfo[i].file_name_off)) {
c454a46b
MKL
6862 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
6863 err = -EINVAL;
6864 goto err_free;
6865 }
6866
6867 if (s != env->subprog_cnt) {
6868 if (linfo[i].insn_off == sub[s].start) {
6869 sub[s].linfo_idx = i;
6870 s++;
6871 } else if (sub[s].start < linfo[i].insn_off) {
6872 verbose(env, "missing bpf_line_info for func#%u\n", s);
6873 err = -EINVAL;
6874 goto err_free;
6875 }
6876 }
6877
6878 prev_offset = linfo[i].insn_off;
6879 ulinfo += rec_size;
6880 }
6881
6882 if (s != env->subprog_cnt) {
6883 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
6884 env->subprog_cnt - s, s);
6885 err = -EINVAL;
6886 goto err_free;
6887 }
6888
6889 prog->aux->linfo = linfo;
6890 prog->aux->nr_linfo = nr_linfo;
6891
6892 return 0;
6893
6894err_free:
6895 kvfree(linfo);
6896 return err;
6897}
6898
6899static int check_btf_info(struct bpf_verifier_env *env,
6900 const union bpf_attr *attr,
6901 union bpf_attr __user *uattr)
6902{
6903 struct btf *btf;
6904 int err;
6905
6906 if (!attr->func_info_cnt && !attr->line_info_cnt)
6907 return 0;
6908
6909 btf = btf_get_by_fd(attr->prog_btf_fd);
6910 if (IS_ERR(btf))
6911 return PTR_ERR(btf);
6912 env->prog->aux->btf = btf;
6913
6914 err = check_btf_func(env, attr, uattr);
6915 if (err)
6916 return err;
6917
6918 err = check_btf_line(env, attr, uattr);
6919 if (err)
6920 return err;
6921
6922 return 0;
ba64e7d8
YS
6923}
6924
f1174f77
EC
6925/* check %cur's range satisfies %old's */
6926static bool range_within(struct bpf_reg_state *old,
6927 struct bpf_reg_state *cur)
6928{
b03c9f9f
EC
6929 return old->umin_value <= cur->umin_value &&
6930 old->umax_value >= cur->umax_value &&
6931 old->smin_value <= cur->smin_value &&
6932 old->smax_value >= cur->smax_value;
f1174f77
EC
6933}
6934
6935/* Maximum number of register states that can exist at once */
6936#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
6937struct idpair {
6938 u32 old;
6939 u32 cur;
6940};
6941
6942/* If in the old state two registers had the same id, then they need to have
6943 * the same id in the new state as well. But that id could be different from
6944 * the old state, so we need to track the mapping from old to new ids.
6945 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent
6946 * regs with old id 5 must also have new id 9 for the new state to be safe. But
6947 * regs with a different old id could still have new id 9, we don't care about
6948 * that.
6949 * So we look through our idmap to see if this old id has been seen before. If
6950 * so, we require the new id to match; otherwise, we add the id pair to the map.
969bf05e 6951 */
f1174f77 6952static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
969bf05e 6953{
f1174f77 6954 unsigned int i;
969bf05e 6955
f1174f77
EC
6956 for (i = 0; i < ID_MAP_SIZE; i++) {
6957 if (!idmap[i].old) {
6958 /* Reached an empty slot; haven't seen this id before */
6959 idmap[i].old = old_id;
6960 idmap[i].cur = cur_id;
6961 return true;
6962 }
6963 if (idmap[i].old == old_id)
6964 return idmap[i].cur == cur_id;
6965 }
6966 /* We ran out of idmap slots, which should be impossible */
6967 WARN_ON_ONCE(1);
6968 return false;
6969}
6970
9242b5f5
AS
6971static void clean_func_state(struct bpf_verifier_env *env,
6972 struct bpf_func_state *st)
6973{
6974 enum bpf_reg_liveness live;
6975 int i, j;
6976
6977 for (i = 0; i < BPF_REG_FP; i++) {
6978 live = st->regs[i].live;
6979 /* liveness must not touch this register anymore */
6980 st->regs[i].live |= REG_LIVE_DONE;
6981 if (!(live & REG_LIVE_READ))
6982 /* since the register is unused, clear its state
6983 * to make further comparison simpler
6984 */
6985 __mark_reg_not_init(&st->regs[i]);
6986 }
6987
6988 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
6989 live = st->stack[i].spilled_ptr.live;
6990 /* liveness must not touch this stack slot anymore */
6991 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
6992 if (!(live & REG_LIVE_READ)) {
6993 __mark_reg_not_init(&st->stack[i].spilled_ptr);
6994 for (j = 0; j < BPF_REG_SIZE; j++)
6995 st->stack[i].slot_type[j] = STACK_INVALID;
6996 }
6997 }
6998}
6999
7000static void clean_verifier_state(struct bpf_verifier_env *env,
7001 struct bpf_verifier_state *st)
7002{
7003 int i;
7004
7005 if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
7006 /* all regs in this state in all frames were already marked */
7007 return;
7008
7009 for (i = 0; i <= st->curframe; i++)
7010 clean_func_state(env, st->frame[i]);
7011}
7012
7013/* the parentage chains form a tree.
7014 * the verifier states are added to state lists at given insn and
7015 * pushed into state stack for future exploration.
7016 * when the verifier reaches bpf_exit insn some of the verifer states
7017 * stored in the state lists have their final liveness state already,
7018 * but a lot of states will get revised from liveness point of view when
7019 * the verifier explores other branches.
7020 * Example:
7021 * 1: r0 = 1
7022 * 2: if r1 == 100 goto pc+1
7023 * 3: r0 = 2
7024 * 4: exit
7025 * when the verifier reaches exit insn the register r0 in the state list of
7026 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
7027 * of insn 2 and goes exploring further. At the insn 4 it will walk the
7028 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
7029 *
7030 * Since the verifier pushes the branch states as it sees them while exploring
7031 * the program the condition of walking the branch instruction for the second
7032 * time means that all states below this branch were already explored and
7033 * their final liveness markes are already propagated.
7034 * Hence when the verifier completes the search of state list in is_state_visited()
7035 * we can call this clean_live_states() function to mark all liveness states
7036 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
7037 * will not be used.
7038 * This function also clears the registers and stack for states that !READ
7039 * to simplify state merging.
7040 *
7041 * Important note here that walking the same branch instruction in the callee
7042 * doesn't meant that the states are DONE. The verifier has to compare
7043 * the callsites
7044 */
7045static void clean_live_states(struct bpf_verifier_env *env, int insn,
7046 struct bpf_verifier_state *cur)
7047{
7048 struct bpf_verifier_state_list *sl;
7049 int i;
7050
5d839021 7051 sl = *explored_state(env, insn);
a8f500af 7052 while (sl) {
2589726d
AS
7053 if (sl->state.branches)
7054 goto next;
dc2a4ebc
AS
7055 if (sl->state.insn_idx != insn ||
7056 sl->state.curframe != cur->curframe)
9242b5f5
AS
7057 goto next;
7058 for (i = 0; i <= cur->curframe; i++)
7059 if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
7060 goto next;
7061 clean_verifier_state(env, &sl->state);
7062next:
7063 sl = sl->next;
7064 }
7065}
7066
f1174f77 7067/* Returns true if (rold safe implies rcur safe) */
1b688a19
EC
7068static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
7069 struct idpair *idmap)
f1174f77 7070{
f4d7e40a
AS
7071 bool equal;
7072
dc503a8a
EC
7073 if (!(rold->live & REG_LIVE_READ))
7074 /* explored state didn't use this */
7075 return true;
7076
679c782d 7077 equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0;
f4d7e40a
AS
7078
7079 if (rold->type == PTR_TO_STACK)
7080 /* two stack pointers are equal only if they're pointing to
7081 * the same stack frame, since fp-8 in foo != fp-8 in bar
7082 */
7083 return equal && rold->frameno == rcur->frameno;
7084
7085 if (equal)
969bf05e
AS
7086 return true;
7087
f1174f77
EC
7088 if (rold->type == NOT_INIT)
7089 /* explored state can't have used this */
969bf05e 7090 return true;
f1174f77
EC
7091 if (rcur->type == NOT_INIT)
7092 return false;
7093 switch (rold->type) {
7094 case SCALAR_VALUE:
7095 if (rcur->type == SCALAR_VALUE) {
b5dc0163
AS
7096 if (!rold->precise && !rcur->precise)
7097 return true;
f1174f77
EC
7098 /* new val must satisfy old val knowledge */
7099 return range_within(rold, rcur) &&
7100 tnum_in(rold->var_off, rcur->var_off);
7101 } else {
179d1c56
JH
7102 /* We're trying to use a pointer in place of a scalar.
7103 * Even if the scalar was unbounded, this could lead to
7104 * pointer leaks because scalars are allowed to leak
7105 * while pointers are not. We could make this safe in
7106 * special cases if root is calling us, but it's
7107 * probably not worth the hassle.
f1174f77 7108 */
179d1c56 7109 return false;
f1174f77
EC
7110 }
7111 case PTR_TO_MAP_VALUE:
1b688a19
EC
7112 /* If the new min/max/var_off satisfy the old ones and
7113 * everything else matches, we are OK.
d83525ca
AS
7114 * 'id' is not compared, since it's only used for maps with
7115 * bpf_spin_lock inside map element and in such cases if
7116 * the rest of the prog is valid for one map element then
7117 * it's valid for all map elements regardless of the key
7118 * used in bpf_map_lookup()
1b688a19
EC
7119 */
7120 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
7121 range_within(rold, rcur) &&
7122 tnum_in(rold->var_off, rcur->var_off);
f1174f77
EC
7123 case PTR_TO_MAP_VALUE_OR_NULL:
7124 /* a PTR_TO_MAP_VALUE could be safe to use as a
7125 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
7126 * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL-
7127 * checked, doing so could have affected others with the same
7128 * id, and we can't check for that because we lost the id when
7129 * we converted to a PTR_TO_MAP_VALUE.
7130 */
7131 if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL)
7132 return false;
7133 if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)))
7134 return false;
7135 /* Check our ids match any regs they're supposed to */
7136 return check_ids(rold->id, rcur->id, idmap);
de8f3a83 7137 case PTR_TO_PACKET_META:
f1174f77 7138 case PTR_TO_PACKET:
de8f3a83 7139 if (rcur->type != rold->type)
f1174f77
EC
7140 return false;
7141 /* We must have at least as much range as the old ptr
7142 * did, so that any accesses which were safe before are
7143 * still safe. This is true even if old range < old off,
7144 * since someone could have accessed through (ptr - k), or
7145 * even done ptr -= k in a register, to get a safe access.
7146 */
7147 if (rold->range > rcur->range)
7148 return false;
7149 /* If the offsets don't match, we can't trust our alignment;
7150 * nor can we be sure that we won't fall out of range.
7151 */
7152 if (rold->off != rcur->off)
7153 return false;
7154 /* id relations must be preserved */
7155 if (rold->id && !check_ids(rold->id, rcur->id, idmap))
7156 return false;
7157 /* new val must satisfy old val knowledge */
7158 return range_within(rold, rcur) &&
7159 tnum_in(rold->var_off, rcur->var_off);
7160 case PTR_TO_CTX:
7161 case CONST_PTR_TO_MAP:
f1174f77 7162 case PTR_TO_PACKET_END:
d58e468b 7163 case PTR_TO_FLOW_KEYS:
c64b7983
JS
7164 case PTR_TO_SOCKET:
7165 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7166 case PTR_TO_SOCK_COMMON:
7167 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7168 case PTR_TO_TCP_SOCK:
7169 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7170 case PTR_TO_XDP_SOCK:
f1174f77
EC
7171 /* Only valid matches are exact, which memcmp() above
7172 * would have accepted
7173 */
7174 default:
7175 /* Don't know what's going on, just say it's not safe */
7176 return false;
7177 }
969bf05e 7178
f1174f77
EC
7179 /* Shouldn't get here; if we do, say it's not safe */
7180 WARN_ON_ONCE(1);
969bf05e
AS
7181 return false;
7182}
7183
f4d7e40a
AS
7184static bool stacksafe(struct bpf_func_state *old,
7185 struct bpf_func_state *cur,
638f5b90
AS
7186 struct idpair *idmap)
7187{
7188 int i, spi;
7189
638f5b90
AS
7190 /* walk slots of the explored stack and ignore any additional
7191 * slots in the current stack, since explored(safe) state
7192 * didn't use them
7193 */
7194 for (i = 0; i < old->allocated_stack; i++) {
7195 spi = i / BPF_REG_SIZE;
7196
b233920c
AS
7197 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
7198 i += BPF_REG_SIZE - 1;
cc2b14d5 7199 /* explored state didn't use this */
fd05e57b 7200 continue;
b233920c 7201 }
cc2b14d5 7202
638f5b90
AS
7203 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
7204 continue;
19e2dbb7
AS
7205
7206 /* explored stack has more populated slots than current stack
7207 * and these slots were used
7208 */
7209 if (i >= cur->allocated_stack)
7210 return false;
7211
cc2b14d5
AS
7212 /* if old state was safe with misc data in the stack
7213 * it will be safe with zero-initialized stack.
7214 * The opposite is not true
7215 */
7216 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC &&
7217 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO)
7218 continue;
638f5b90
AS
7219 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
7220 cur->stack[spi].slot_type[i % BPF_REG_SIZE])
7221 /* Ex: old explored (safe) state has STACK_SPILL in
7222 * this stack slot, but current has has STACK_MISC ->
7223 * this verifier states are not equivalent,
7224 * return false to continue verification of this path
7225 */
7226 return false;
7227 if (i % BPF_REG_SIZE)
7228 continue;
7229 if (old->stack[spi].slot_type[0] != STACK_SPILL)
7230 continue;
7231 if (!regsafe(&old->stack[spi].spilled_ptr,
7232 &cur->stack[spi].spilled_ptr,
7233 idmap))
7234 /* when explored and current stack slot are both storing
7235 * spilled registers, check that stored pointers types
7236 * are the same as well.
7237 * Ex: explored safe path could have stored
7238 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8}
7239 * but current path has stored:
7240 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16}
7241 * such verifier states are not equivalent.
7242 * return false to continue verification of this path
7243 */
7244 return false;
7245 }
7246 return true;
7247}
7248
fd978bf7
JS
7249static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
7250{
7251 if (old->acquired_refs != cur->acquired_refs)
7252 return false;
7253 return !memcmp(old->refs, cur->refs,
7254 sizeof(*old->refs) * old->acquired_refs);
7255}
7256
f1bca824
AS
7257/* compare two verifier states
7258 *
7259 * all states stored in state_list are known to be valid, since
7260 * verifier reached 'bpf_exit' instruction through them
7261 *
7262 * this function is called when verifier exploring different branches of
7263 * execution popped from the state stack. If it sees an old state that has
7264 * more strict register state and more strict stack state then this execution
7265 * branch doesn't need to be explored further, since verifier already
7266 * concluded that more strict state leads to valid finish.
7267 *
7268 * Therefore two states are equivalent if register state is more conservative
7269 * and explored stack state is more conservative than the current one.
7270 * Example:
7271 * explored current
7272 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC)
7273 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC)
7274 *
7275 * In other words if current stack state (one being explored) has more
7276 * valid slots than old one that already passed validation, it means
7277 * the verifier can stop exploring and conclude that current state is valid too
7278 *
7279 * Similarly with registers. If explored state has register type as invalid
7280 * whereas register type in current state is meaningful, it means that
7281 * the current state will reach 'bpf_exit' instruction safely
7282 */
f4d7e40a
AS
7283static bool func_states_equal(struct bpf_func_state *old,
7284 struct bpf_func_state *cur)
f1bca824 7285{
f1174f77
EC
7286 struct idpair *idmap;
7287 bool ret = false;
f1bca824
AS
7288 int i;
7289
f1174f77
EC
7290 idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
7291 /* If we failed to allocate the idmap, just say it's not safe */
7292 if (!idmap)
1a0dc1ac 7293 return false;
f1174f77
EC
7294
7295 for (i = 0; i < MAX_BPF_REG; i++) {
1b688a19 7296 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
f1174f77 7297 goto out_free;
f1bca824
AS
7298 }
7299
638f5b90
AS
7300 if (!stacksafe(old, cur, idmap))
7301 goto out_free;
fd978bf7
JS
7302
7303 if (!refsafe(old, cur))
7304 goto out_free;
f1174f77
EC
7305 ret = true;
7306out_free:
7307 kfree(idmap);
7308 return ret;
f1bca824
AS
7309}
7310
f4d7e40a
AS
7311static bool states_equal(struct bpf_verifier_env *env,
7312 struct bpf_verifier_state *old,
7313 struct bpf_verifier_state *cur)
7314{
7315 int i;
7316
7317 if (old->curframe != cur->curframe)
7318 return false;
7319
979d63d5
DB
7320 /* Verification state from speculative execution simulation
7321 * must never prune a non-speculative execution one.
7322 */
7323 if (old->speculative && !cur->speculative)
7324 return false;
7325
d83525ca
AS
7326 if (old->active_spin_lock != cur->active_spin_lock)
7327 return false;
7328
f4d7e40a
AS
7329 /* for states to be equal callsites have to be the same
7330 * and all frame states need to be equivalent
7331 */
7332 for (i = 0; i <= old->curframe; i++) {
7333 if (old->frame[i]->callsite != cur->frame[i]->callsite)
7334 return false;
7335 if (!func_states_equal(old->frame[i], cur->frame[i]))
7336 return false;
7337 }
7338 return true;
7339}
7340
5327ed3d
JW
7341/* Return 0 if no propagation happened. Return negative error code if error
7342 * happened. Otherwise, return the propagated bit.
7343 */
55e7f3b5
JW
7344static int propagate_liveness_reg(struct bpf_verifier_env *env,
7345 struct bpf_reg_state *reg,
7346 struct bpf_reg_state *parent_reg)
7347{
5327ed3d
JW
7348 u8 parent_flag = parent_reg->live & REG_LIVE_READ;
7349 u8 flag = reg->live & REG_LIVE_READ;
55e7f3b5
JW
7350 int err;
7351
5327ed3d
JW
7352 /* When comes here, read flags of PARENT_REG or REG could be any of
7353 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need
7354 * of propagation if PARENT_REG has strongest REG_LIVE_READ64.
7355 */
7356 if (parent_flag == REG_LIVE_READ64 ||
7357 /* Or if there is no read flag from REG. */
7358 !flag ||
7359 /* Or if the read flag from REG is the same as PARENT_REG. */
7360 parent_flag == flag)
55e7f3b5
JW
7361 return 0;
7362
5327ed3d 7363 err = mark_reg_read(env, reg, parent_reg, flag);
55e7f3b5
JW
7364 if (err)
7365 return err;
7366
5327ed3d 7367 return flag;
55e7f3b5
JW
7368}
7369
8e9cd9ce 7370/* A write screens off any subsequent reads; but write marks come from the
f4d7e40a
AS
7371 * straight-line code between a state and its parent. When we arrive at an
7372 * equivalent state (jump target or such) we didn't arrive by the straight-line
7373 * code, so read marks in the state must propagate to the parent regardless
7374 * of the state's write marks. That's what 'parent == state->parent' comparison
679c782d 7375 * in mark_reg_read() is for.
8e9cd9ce 7376 */
f4d7e40a
AS
7377static int propagate_liveness(struct bpf_verifier_env *env,
7378 const struct bpf_verifier_state *vstate,
7379 struct bpf_verifier_state *vparent)
dc503a8a 7380{
3f8cafa4 7381 struct bpf_reg_state *state_reg, *parent_reg;
f4d7e40a 7382 struct bpf_func_state *state, *parent;
3f8cafa4 7383 int i, frame, err = 0;
dc503a8a 7384
f4d7e40a
AS
7385 if (vparent->curframe != vstate->curframe) {
7386 WARN(1, "propagate_live: parent frame %d current frame %d\n",
7387 vparent->curframe, vstate->curframe);
7388 return -EFAULT;
7389 }
dc503a8a
EC
7390 /* Propagate read liveness of registers... */
7391 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
83d16312 7392 for (frame = 0; frame <= vstate->curframe; frame++) {
3f8cafa4
JW
7393 parent = vparent->frame[frame];
7394 state = vstate->frame[frame];
7395 parent_reg = parent->regs;
7396 state_reg = state->regs;
83d16312
JK
7397 /* We don't need to worry about FP liveness, it's read-only */
7398 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
55e7f3b5
JW
7399 err = propagate_liveness_reg(env, &state_reg[i],
7400 &parent_reg[i]);
5327ed3d 7401 if (err < 0)
3f8cafa4 7402 return err;
5327ed3d
JW
7403 if (err == REG_LIVE_READ64)
7404 mark_insn_zext(env, &parent_reg[i]);
dc503a8a 7405 }
f4d7e40a 7406
1b04aee7 7407 /* Propagate stack slots. */
f4d7e40a
AS
7408 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE &&
7409 i < parent->allocated_stack / BPF_REG_SIZE; i++) {
3f8cafa4
JW
7410 parent_reg = &parent->stack[i].spilled_ptr;
7411 state_reg = &state->stack[i].spilled_ptr;
55e7f3b5
JW
7412 err = propagate_liveness_reg(env, state_reg,
7413 parent_reg);
5327ed3d 7414 if (err < 0)
3f8cafa4 7415 return err;
dc503a8a
EC
7416 }
7417 }
5327ed3d 7418 return 0;
dc503a8a
EC
7419}
7420
a3ce685d
AS
7421/* find precise scalars in the previous equivalent state and
7422 * propagate them into the current state
7423 */
7424static int propagate_precision(struct bpf_verifier_env *env,
7425 const struct bpf_verifier_state *old)
7426{
7427 struct bpf_reg_state *state_reg;
7428 struct bpf_func_state *state;
7429 int i, err = 0;
7430
7431 state = old->frame[old->curframe];
7432 state_reg = state->regs;
7433 for (i = 0; i < BPF_REG_FP; i++, state_reg++) {
7434 if (state_reg->type != SCALAR_VALUE ||
7435 !state_reg->precise)
7436 continue;
7437 if (env->log.level & BPF_LOG_LEVEL2)
7438 verbose(env, "propagating r%d\n", i);
7439 err = mark_chain_precision(env, i);
7440 if (err < 0)
7441 return err;
7442 }
7443
7444 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
7445 if (state->stack[i].slot_type[0] != STACK_SPILL)
7446 continue;
7447 state_reg = &state->stack[i].spilled_ptr;
7448 if (state_reg->type != SCALAR_VALUE ||
7449 !state_reg->precise)
7450 continue;
7451 if (env->log.level & BPF_LOG_LEVEL2)
7452 verbose(env, "propagating fp%d\n",
7453 (-i - 1) * BPF_REG_SIZE);
7454 err = mark_chain_precision_stack(env, i);
7455 if (err < 0)
7456 return err;
7457 }
7458 return 0;
7459}
7460
2589726d
AS
7461static bool states_maybe_looping(struct bpf_verifier_state *old,
7462 struct bpf_verifier_state *cur)
7463{
7464 struct bpf_func_state *fold, *fcur;
7465 int i, fr = cur->curframe;
7466
7467 if (old->curframe != fr)
7468 return false;
7469
7470 fold = old->frame[fr];
7471 fcur = cur->frame[fr];
7472 for (i = 0; i < MAX_BPF_REG; i++)
7473 if (memcmp(&fold->regs[i], &fcur->regs[i],
7474 offsetof(struct bpf_reg_state, parent)))
7475 return false;
7476 return true;
7477}
7478
7479
58e2af8b 7480static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
f1bca824 7481{
58e2af8b 7482 struct bpf_verifier_state_list *new_sl;
9f4686c4 7483 struct bpf_verifier_state_list *sl, **pprev;
679c782d 7484 struct bpf_verifier_state *cur = env->cur_state, *new;
ceefbc96 7485 int i, j, err, states_cnt = 0;
10d274e8 7486 bool add_new_state = env->test_state_freq ? true : false;
f1bca824 7487
b5dc0163 7488 cur->last_insn_idx = env->prev_insn_idx;
a8f500af 7489 if (!env->insn_aux_data[insn_idx].prune_point)
f1bca824
AS
7490 /* this 'insn_idx' instruction wasn't marked, so we will not
7491 * be doing state search here
7492 */
7493 return 0;
7494
2589726d
AS
7495 /* bpf progs typically have pruning point every 4 instructions
7496 * http://vger.kernel.org/bpfconf2019.html#session-1
7497 * Do not add new state for future pruning if the verifier hasn't seen
7498 * at least 2 jumps and at least 8 instructions.
7499 * This heuristics helps decrease 'total_states' and 'peak_states' metric.
7500 * In tests that amounts to up to 50% reduction into total verifier
7501 * memory consumption and 20% verifier time speedup.
7502 */
7503 if (env->jmps_processed - env->prev_jmps_processed >= 2 &&
7504 env->insn_processed - env->prev_insn_processed >= 8)
7505 add_new_state = true;
7506
a8f500af
AS
7507 pprev = explored_state(env, insn_idx);
7508 sl = *pprev;
7509
9242b5f5
AS
7510 clean_live_states(env, insn_idx, cur);
7511
a8f500af 7512 while (sl) {
dc2a4ebc
AS
7513 states_cnt++;
7514 if (sl->state.insn_idx != insn_idx)
7515 goto next;
2589726d
AS
7516 if (sl->state.branches) {
7517 if (states_maybe_looping(&sl->state, cur) &&
7518 states_equal(env, &sl->state, cur)) {
7519 verbose_linfo(env, insn_idx, "; ");
7520 verbose(env, "infinite loop detected at insn %d\n", insn_idx);
7521 return -EINVAL;
7522 }
7523 /* if the verifier is processing a loop, avoid adding new state
7524 * too often, since different loop iterations have distinct
7525 * states and may not help future pruning.
7526 * This threshold shouldn't be too low to make sure that
7527 * a loop with large bound will be rejected quickly.
7528 * The most abusive loop will be:
7529 * r1 += 1
7530 * if r1 < 1000000 goto pc-2
7531 * 1M insn_procssed limit / 100 == 10k peak states.
7532 * This threshold shouldn't be too high either, since states
7533 * at the end of the loop are likely to be useful in pruning.
7534 */
7535 if (env->jmps_processed - env->prev_jmps_processed < 20 &&
7536 env->insn_processed - env->prev_insn_processed < 100)
7537 add_new_state = false;
7538 goto miss;
7539 }
638f5b90 7540 if (states_equal(env, &sl->state, cur)) {
9f4686c4 7541 sl->hit_cnt++;
f1bca824 7542 /* reached equivalent register/stack state,
dc503a8a
EC
7543 * prune the search.
7544 * Registers read by the continuation are read by us.
8e9cd9ce
EC
7545 * If we have any write marks in env->cur_state, they
7546 * will prevent corresponding reads in the continuation
7547 * from reaching our parent (an explored_state). Our
7548 * own state will get the read marks recorded, but
7549 * they'll be immediately forgotten as we're pruning
7550 * this state and will pop a new one.
f1bca824 7551 */
f4d7e40a 7552 err = propagate_liveness(env, &sl->state, cur);
a3ce685d
AS
7553
7554 /* if previous state reached the exit with precision and
7555 * current state is equivalent to it (except precsion marks)
7556 * the precision needs to be propagated back in
7557 * the current state.
7558 */
7559 err = err ? : push_jmp_history(env, cur);
7560 err = err ? : propagate_precision(env, &sl->state);
f4d7e40a
AS
7561 if (err)
7562 return err;
f1bca824 7563 return 1;
dc503a8a 7564 }
2589726d
AS
7565miss:
7566 /* when new state is not going to be added do not increase miss count.
7567 * Otherwise several loop iterations will remove the state
7568 * recorded earlier. The goal of these heuristics is to have
7569 * states from some iterations of the loop (some in the beginning
7570 * and some at the end) to help pruning.
7571 */
7572 if (add_new_state)
7573 sl->miss_cnt++;
9f4686c4
AS
7574 /* heuristic to determine whether this state is beneficial
7575 * to keep checking from state equivalence point of view.
7576 * Higher numbers increase max_states_per_insn and verification time,
7577 * but do not meaningfully decrease insn_processed.
7578 */
7579 if (sl->miss_cnt > sl->hit_cnt * 3 + 3) {
7580 /* the state is unlikely to be useful. Remove it to
7581 * speed up verification
7582 */
7583 *pprev = sl->next;
7584 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) {
2589726d
AS
7585 u32 br = sl->state.branches;
7586
7587 WARN_ONCE(br,
7588 "BUG live_done but branches_to_explore %d\n",
7589 br);
9f4686c4
AS
7590 free_verifier_state(&sl->state, false);
7591 kfree(sl);
7592 env->peak_states--;
7593 } else {
7594 /* cannot free this state, since parentage chain may
7595 * walk it later. Add it for free_list instead to
7596 * be freed at the end of verification
7597 */
7598 sl->next = env->free_list;
7599 env->free_list = sl;
7600 }
7601 sl = *pprev;
7602 continue;
7603 }
dc2a4ebc 7604next:
9f4686c4
AS
7605 pprev = &sl->next;
7606 sl = *pprev;
f1bca824
AS
7607 }
7608
06ee7115
AS
7609 if (env->max_states_per_insn < states_cnt)
7610 env->max_states_per_insn = states_cnt;
7611
ceefbc96 7612 if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
b5dc0163 7613 return push_jmp_history(env, cur);
ceefbc96 7614
2589726d 7615 if (!add_new_state)
b5dc0163 7616 return push_jmp_history(env, cur);
ceefbc96 7617
2589726d
AS
7618 /* There were no equivalent states, remember the current one.
7619 * Technically the current state is not proven to be safe yet,
f4d7e40a 7620 * but it will either reach outer most bpf_exit (which means it's safe)
2589726d 7621 * or it will be rejected. When there are no loops the verifier won't be
f4d7e40a 7622 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx)
2589726d
AS
7623 * again on the way to bpf_exit.
7624 * When looping the sl->state.branches will be > 0 and this state
7625 * will not be considered for equivalence until branches == 0.
f1bca824 7626 */
638f5b90 7627 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
f1bca824
AS
7628 if (!new_sl)
7629 return -ENOMEM;
06ee7115
AS
7630 env->total_states++;
7631 env->peak_states++;
2589726d
AS
7632 env->prev_jmps_processed = env->jmps_processed;
7633 env->prev_insn_processed = env->insn_processed;
f1bca824
AS
7634
7635 /* add new state to the head of linked list */
679c782d
EC
7636 new = &new_sl->state;
7637 err = copy_verifier_state(new, cur);
1969db47 7638 if (err) {
679c782d 7639 free_verifier_state(new, false);
1969db47
AS
7640 kfree(new_sl);
7641 return err;
7642 }
dc2a4ebc 7643 new->insn_idx = insn_idx;
2589726d
AS
7644 WARN_ONCE(new->branches != 1,
7645 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx);
b5dc0163 7646
2589726d 7647 cur->parent = new;
b5dc0163
AS
7648 cur->first_insn_idx = insn_idx;
7649 clear_jmp_history(cur);
5d839021
AS
7650 new_sl->next = *explored_state(env, insn_idx);
7651 *explored_state(env, insn_idx) = new_sl;
7640ead9
JK
7652 /* connect new state to parentage chain. Current frame needs all
7653 * registers connected. Only r6 - r9 of the callers are alive (pushed
7654 * to the stack implicitly by JITs) so in callers' frames connect just
7655 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
7656 * the state of the call instruction (with WRITTEN set), and r0 comes
7657 * from callee with its full parentage chain, anyway.
7658 */
8e9cd9ce
EC
7659 /* clear write marks in current state: the writes we did are not writes
7660 * our child did, so they don't screen off its reads from us.
7661 * (There are no read marks in current state, because reads always mark
7662 * their parent and current state never has children yet. Only
7663 * explored_states can get read marks.)
7664 */
eea1c227
AS
7665 for (j = 0; j <= cur->curframe; j++) {
7666 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
7667 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
7668 for (i = 0; i < BPF_REG_FP; i++)
7669 cur->frame[j]->regs[i].live = REG_LIVE_NONE;
7670 }
f4d7e40a
AS
7671
7672 /* all stack frames are accessible from callee, clear them all */
7673 for (j = 0; j <= cur->curframe; j++) {
7674 struct bpf_func_state *frame = cur->frame[j];
679c782d 7675 struct bpf_func_state *newframe = new->frame[j];
f4d7e40a 7676
679c782d 7677 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) {
cc2b14d5 7678 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE;
679c782d
EC
7679 frame->stack[i].spilled_ptr.parent =
7680 &newframe->stack[i].spilled_ptr;
7681 }
f4d7e40a 7682 }
f1bca824
AS
7683 return 0;
7684}
7685
c64b7983
JS
7686/* Return true if it's OK to have the same insn return a different type. */
7687static bool reg_type_mismatch_ok(enum bpf_reg_type type)
7688{
7689 switch (type) {
7690 case PTR_TO_CTX:
7691 case PTR_TO_SOCKET:
7692 case PTR_TO_SOCKET_OR_NULL:
46f8bc92
MKL
7693 case PTR_TO_SOCK_COMMON:
7694 case PTR_TO_SOCK_COMMON_OR_NULL:
655a51e5
MKL
7695 case PTR_TO_TCP_SOCK:
7696 case PTR_TO_TCP_SOCK_OR_NULL:
fada7fdc 7697 case PTR_TO_XDP_SOCK:
2a02759e 7698 case PTR_TO_BTF_ID:
c64b7983
JS
7699 return false;
7700 default:
7701 return true;
7702 }
7703}
7704
7705/* If an instruction was previously used with particular pointer types, then we
7706 * need to be careful to avoid cases such as the below, where it may be ok
7707 * for one branch accessing the pointer, but not ok for the other branch:
7708 *
7709 * R1 = sock_ptr
7710 * goto X;
7711 * ...
7712 * R1 = some_other_valid_ptr;
7713 * goto X;
7714 * ...
7715 * R2 = *(u32 *)(R1 + 0);
7716 */
7717static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
7718{
7719 return src != prev && (!reg_type_mismatch_ok(src) ||
7720 !reg_type_mismatch_ok(prev));
7721}
7722
58e2af8b 7723static int do_check(struct bpf_verifier_env *env)
17a52670 7724{
638f5b90 7725 struct bpf_verifier_state *state;
17a52670 7726 struct bpf_insn *insns = env->prog->insnsi;
638f5b90 7727 struct bpf_reg_state *regs;
06ee7115 7728 int insn_cnt = env->prog->len;
17a52670 7729 bool do_print_state = false;
b5dc0163 7730 int prev_insn_idx = -1;
17a52670 7731
d9762e84
MKL
7732 env->prev_linfo = NULL;
7733
638f5b90
AS
7734 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
7735 if (!state)
7736 return -ENOMEM;
f4d7e40a 7737 state->curframe = 0;
979d63d5 7738 state->speculative = false;
2589726d 7739 state->branches = 1;
f4d7e40a
AS
7740 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
7741 if (!state->frame[0]) {
7742 kfree(state);
7743 return -ENOMEM;
7744 }
7745 env->cur_state = state;
7746 init_func_state(env, state->frame[0],
7747 BPF_MAIN_FUNC /* callsite */,
7748 0 /* frameno */,
7749 0 /* subprogno, zero == main subprog */);
c08435ec 7750
8c1b6e69
AS
7751 if (btf_check_func_arg_match(env, 0))
7752 return -EINVAL;
7753
17a52670
AS
7754 for (;;) {
7755 struct bpf_insn *insn;
7756 u8 class;
7757 int err;
7758
b5dc0163 7759 env->prev_insn_idx = prev_insn_idx;
c08435ec 7760 if (env->insn_idx >= insn_cnt) {
61bd5218 7761 verbose(env, "invalid insn idx %d insn_cnt %d\n",
c08435ec 7762 env->insn_idx, insn_cnt);
17a52670
AS
7763 return -EFAULT;
7764 }
7765
c08435ec 7766 insn = &insns[env->insn_idx];
17a52670
AS
7767 class = BPF_CLASS(insn->code);
7768
06ee7115 7769 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
61bd5218
JK
7770 verbose(env,
7771 "BPF program is too large. Processed %d insn\n",
06ee7115 7772 env->insn_processed);
17a52670
AS
7773 return -E2BIG;
7774 }
7775
c08435ec 7776 err = is_state_visited(env, env->insn_idx);
f1bca824
AS
7777 if (err < 0)
7778 return err;
7779 if (err == 1) {
7780 /* found equivalent state, can prune the search */
06ee7115 7781 if (env->log.level & BPF_LOG_LEVEL) {
f1bca824 7782 if (do_print_state)
979d63d5
DB
7783 verbose(env, "\nfrom %d to %d%s: safe\n",
7784 env->prev_insn_idx, env->insn_idx,
7785 env->cur_state->speculative ?
7786 " (speculative execution)" : "");
f1bca824 7787 else
c08435ec 7788 verbose(env, "%d: safe\n", env->insn_idx);
f1bca824
AS
7789 }
7790 goto process_bpf_exit;
7791 }
7792
c3494801
AS
7793 if (signal_pending(current))
7794 return -EAGAIN;
7795
3c2ce60b
DB
7796 if (need_resched())
7797 cond_resched();
7798
06ee7115
AS
7799 if (env->log.level & BPF_LOG_LEVEL2 ||
7800 (env->log.level & BPF_LOG_LEVEL && do_print_state)) {
7801 if (env->log.level & BPF_LOG_LEVEL2)
c08435ec 7802 verbose(env, "%d:", env->insn_idx);
c5fc9692 7803 else
979d63d5
DB
7804 verbose(env, "\nfrom %d to %d%s:",
7805 env->prev_insn_idx, env->insn_idx,
7806 env->cur_state->speculative ?
7807 " (speculative execution)" : "");
f4d7e40a 7808 print_verifier_state(env, state->frame[state->curframe]);
17a52670
AS
7809 do_print_state = false;
7810 }
7811
06ee7115 7812 if (env->log.level & BPF_LOG_LEVEL) {
7105e828
DB
7813 const struct bpf_insn_cbs cbs = {
7814 .cb_print = verbose,
abe08840 7815 .private_data = env,
7105e828
DB
7816 };
7817
c08435ec
DB
7818 verbose_linfo(env, env->insn_idx, "; ");
7819 verbose(env, "%d: ", env->insn_idx);
abe08840 7820 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
17a52670
AS
7821 }
7822
cae1927c 7823 if (bpf_prog_is_dev_bound(env->prog->aux)) {
c08435ec
DB
7824 err = bpf_prog_offload_verify_insn(env, env->insn_idx,
7825 env->prev_insn_idx);
cae1927c
JK
7826 if (err)
7827 return err;
7828 }
13a27dfc 7829
638f5b90 7830 regs = cur_regs(env);
c08435ec 7831 env->insn_aux_data[env->insn_idx].seen = true;
b5dc0163 7832 prev_insn_idx = env->insn_idx;
fd978bf7 7833
17a52670 7834 if (class == BPF_ALU || class == BPF_ALU64) {
1be7f75d 7835 err = check_alu_op(env, insn);
17a52670
AS
7836 if (err)
7837 return err;
7838
7839 } else if (class == BPF_LDX) {
3df126f3 7840 enum bpf_reg_type *prev_src_type, src_reg_type;
9bac3d6d
AS
7841
7842 /* check for reserved fields is already done */
7843
17a52670 7844 /* check src operand */
dc503a8a 7845 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7846 if (err)
7847 return err;
7848
dc503a8a 7849 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
17a52670
AS
7850 if (err)
7851 return err;
7852
725f9dcd
AS
7853 src_reg_type = regs[insn->src_reg].type;
7854
17a52670
AS
7855 /* check that memory (src_reg + off) is readable,
7856 * the state of dst_reg will be updated by this func
7857 */
c08435ec
DB
7858 err = check_mem_access(env, env->insn_idx, insn->src_reg,
7859 insn->off, BPF_SIZE(insn->code),
7860 BPF_READ, insn->dst_reg, false);
17a52670
AS
7861 if (err)
7862 return err;
7863
c08435ec 7864 prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7865
7866 if (*prev_src_type == NOT_INIT) {
9bac3d6d
AS
7867 /* saw a valid insn
7868 * dst_reg = *(u32 *)(src_reg + off)
3df126f3 7869 * save type to validate intersecting paths
9bac3d6d 7870 */
3df126f3 7871 *prev_src_type = src_reg_type;
9bac3d6d 7872
c64b7983 7873 } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) {
9bac3d6d
AS
7874 /* ABuser program is trying to use the same insn
7875 * dst_reg = *(u32*) (src_reg + off)
7876 * with different pointer types:
7877 * src_reg == ctx in one branch and
7878 * src_reg == stack|map in some other branch.
7879 * Reject it.
7880 */
61bd5218 7881 verbose(env, "same insn cannot be used with different pointers\n");
9bac3d6d
AS
7882 return -EINVAL;
7883 }
7884
17a52670 7885 } else if (class == BPF_STX) {
3df126f3 7886 enum bpf_reg_type *prev_dst_type, dst_reg_type;
d691f9e8 7887
17a52670 7888 if (BPF_MODE(insn->code) == BPF_XADD) {
c08435ec 7889 err = check_xadd(env, env->insn_idx, insn);
17a52670
AS
7890 if (err)
7891 return err;
c08435ec 7892 env->insn_idx++;
17a52670
AS
7893 continue;
7894 }
7895
17a52670 7896 /* check src1 operand */
dc503a8a 7897 err = check_reg_arg(env, insn->src_reg, SRC_OP);
17a52670
AS
7898 if (err)
7899 return err;
7900 /* check src2 operand */
dc503a8a 7901 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7902 if (err)
7903 return err;
7904
d691f9e8
AS
7905 dst_reg_type = regs[insn->dst_reg].type;
7906
17a52670 7907 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
7908 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7909 insn->off, BPF_SIZE(insn->code),
7910 BPF_WRITE, insn->src_reg, false);
17a52670
AS
7911 if (err)
7912 return err;
7913
c08435ec 7914 prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
3df126f3
JK
7915
7916 if (*prev_dst_type == NOT_INIT) {
7917 *prev_dst_type = dst_reg_type;
c64b7983 7918 } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) {
61bd5218 7919 verbose(env, "same insn cannot be used with different pointers\n");
d691f9e8
AS
7920 return -EINVAL;
7921 }
7922
17a52670
AS
7923 } else if (class == BPF_ST) {
7924 if (BPF_MODE(insn->code) != BPF_MEM ||
7925 insn->src_reg != BPF_REG_0) {
61bd5218 7926 verbose(env, "BPF_ST uses reserved fields\n");
17a52670
AS
7927 return -EINVAL;
7928 }
7929 /* check src operand */
dc503a8a 7930 err = check_reg_arg(env, insn->dst_reg, SRC_OP);
17a52670
AS
7931 if (err)
7932 return err;
7933
f37a8cb8 7934 if (is_ctx_reg(env, insn->dst_reg)) {
9d2be44a 7935 verbose(env, "BPF_ST stores into R%d %s is not allowed\n",
2a159c6f
DB
7936 insn->dst_reg,
7937 reg_type_str[reg_state(env, insn->dst_reg)->type]);
f37a8cb8
DB
7938 return -EACCES;
7939 }
7940
17a52670 7941 /* check that memory (dst_reg + off) is writeable */
c08435ec
DB
7942 err = check_mem_access(env, env->insn_idx, insn->dst_reg,
7943 insn->off, BPF_SIZE(insn->code),
7944 BPF_WRITE, -1, false);
17a52670
AS
7945 if (err)
7946 return err;
7947
092ed096 7948 } else if (class == BPF_JMP || class == BPF_JMP32) {
17a52670
AS
7949 u8 opcode = BPF_OP(insn->code);
7950
2589726d 7951 env->jmps_processed++;
17a52670
AS
7952 if (opcode == BPF_CALL) {
7953 if (BPF_SRC(insn->code) != BPF_K ||
7954 insn->off != 0 ||
f4d7e40a
AS
7955 (insn->src_reg != BPF_REG_0 &&
7956 insn->src_reg != BPF_PSEUDO_CALL) ||
092ed096
JW
7957 insn->dst_reg != BPF_REG_0 ||
7958 class == BPF_JMP32) {
61bd5218 7959 verbose(env, "BPF_CALL uses reserved fields\n");
17a52670
AS
7960 return -EINVAL;
7961 }
7962
d83525ca
AS
7963 if (env->cur_state->active_spin_lock &&
7964 (insn->src_reg == BPF_PSEUDO_CALL ||
7965 insn->imm != BPF_FUNC_spin_unlock)) {
7966 verbose(env, "function calls are not allowed while holding a lock\n");
7967 return -EINVAL;
7968 }
f4d7e40a 7969 if (insn->src_reg == BPF_PSEUDO_CALL)
c08435ec 7970 err = check_func_call(env, insn, &env->insn_idx);
f4d7e40a 7971 else
c08435ec 7972 err = check_helper_call(env, insn->imm, env->insn_idx);
17a52670
AS
7973 if (err)
7974 return err;
7975
7976 } else if (opcode == BPF_JA) {
7977 if (BPF_SRC(insn->code) != BPF_K ||
7978 insn->imm != 0 ||
7979 insn->src_reg != BPF_REG_0 ||
092ed096
JW
7980 insn->dst_reg != BPF_REG_0 ||
7981 class == BPF_JMP32) {
61bd5218 7982 verbose(env, "BPF_JA uses reserved fields\n");
17a52670
AS
7983 return -EINVAL;
7984 }
7985
c08435ec 7986 env->insn_idx += insn->off + 1;
17a52670
AS
7987 continue;
7988
7989 } else if (opcode == BPF_EXIT) {
7990 if (BPF_SRC(insn->code) != BPF_K ||
7991 insn->imm != 0 ||
7992 insn->src_reg != BPF_REG_0 ||
092ed096
JW
7993 insn->dst_reg != BPF_REG_0 ||
7994 class == BPF_JMP32) {
61bd5218 7995 verbose(env, "BPF_EXIT uses reserved fields\n");
17a52670
AS
7996 return -EINVAL;
7997 }
7998
d83525ca
AS
7999 if (env->cur_state->active_spin_lock) {
8000 verbose(env, "bpf_spin_unlock is missing\n");
8001 return -EINVAL;
8002 }
8003
f4d7e40a
AS
8004 if (state->curframe) {
8005 /* exit from nested function */
c08435ec 8006 err = prepare_func_exit(env, &env->insn_idx);
f4d7e40a
AS
8007 if (err)
8008 return err;
8009 do_print_state = true;
8010 continue;
8011 }
8012
fd978bf7
JS
8013 err = check_reference_leak(env);
8014 if (err)
8015 return err;
8016
17a52670
AS
8017 /* eBPF calling convetion is such that R0 is used
8018 * to return the value from eBPF program.
8019 * Make sure that it's readable at this time
8020 * of bpf_exit, which means that program wrote
8021 * something into it earlier
8022 */
dc503a8a 8023 err = check_reg_arg(env, BPF_REG_0, SRC_OP);
17a52670
AS
8024 if (err)
8025 return err;
8026
1be7f75d 8027 if (is_pointer_value(env, BPF_REG_0)) {
61bd5218 8028 verbose(env, "R0 leaks addr as return value\n");
1be7f75d
AS
8029 return -EACCES;
8030 }
8031
390ee7e2
AS
8032 err = check_return_code(env);
8033 if (err)
8034 return err;
f1bca824 8035process_bpf_exit:
2589726d 8036 update_branch_counts(env, env->cur_state);
b5dc0163 8037 err = pop_stack(env, &prev_insn_idx,
c08435ec 8038 &env->insn_idx);
638f5b90
AS
8039 if (err < 0) {
8040 if (err != -ENOENT)
8041 return err;
17a52670
AS
8042 break;
8043 } else {
8044 do_print_state = true;
8045 continue;
8046 }
8047 } else {
c08435ec 8048 err = check_cond_jmp_op(env, insn, &env->insn_idx);
17a52670
AS
8049 if (err)
8050 return err;
8051 }
8052 } else if (class == BPF_LD) {
8053 u8 mode = BPF_MODE(insn->code);
8054
8055 if (mode == BPF_ABS || mode == BPF_IND) {
ddd872bc
AS
8056 err = check_ld_abs(env, insn);
8057 if (err)
8058 return err;
8059
17a52670
AS
8060 } else if (mode == BPF_IMM) {
8061 err = check_ld_imm(env, insn);
8062 if (err)
8063 return err;
8064
c08435ec
DB
8065 env->insn_idx++;
8066 env->insn_aux_data[env->insn_idx].seen = true;
17a52670 8067 } else {
61bd5218 8068 verbose(env, "invalid BPF_LD mode\n");
17a52670
AS
8069 return -EINVAL;
8070 }
8071 } else {
61bd5218 8072 verbose(env, "unknown insn class %d\n", class);
17a52670
AS
8073 return -EINVAL;
8074 }
8075
c08435ec 8076 env->insn_idx++;
17a52670
AS
8077 }
8078
9c8105bd 8079 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
17a52670
AS
8080 return 0;
8081}
8082
56f668df
MKL
8083static int check_map_prealloc(struct bpf_map *map)
8084{
8085 return (map->map_type != BPF_MAP_TYPE_HASH &&
bcc6b1b7
MKL
8086 map->map_type != BPF_MAP_TYPE_PERCPU_HASH &&
8087 map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) ||
56f668df
MKL
8088 !(map->map_flags & BPF_F_NO_PREALLOC);
8089}
8090
d83525ca
AS
8091static bool is_tracing_prog_type(enum bpf_prog_type type)
8092{
8093 switch (type) {
8094 case BPF_PROG_TYPE_KPROBE:
8095 case BPF_PROG_TYPE_TRACEPOINT:
8096 case BPF_PROG_TYPE_PERF_EVENT:
8097 case BPF_PROG_TYPE_RAW_TRACEPOINT:
8098 return true;
8099 default:
8100 return false;
8101 }
8102}
8103
61bd5218
JK
8104static int check_map_prog_compatibility(struct bpf_verifier_env *env,
8105 struct bpf_map *map,
fdc15d38
AS
8106 struct bpf_prog *prog)
8107
8108{
56f668df
MKL
8109 /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use
8110 * preallocated hash maps, since doing memory allocation
8111 * in overflow_handler can crash depending on where nmi got
8112 * triggered.
8113 */
8114 if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
8115 if (!check_map_prealloc(map)) {
61bd5218 8116 verbose(env, "perf_event programs can only use preallocated hash map\n");
56f668df
MKL
8117 return -EINVAL;
8118 }
8119 if (map->inner_map_meta &&
8120 !check_map_prealloc(map->inner_map_meta)) {
61bd5218 8121 verbose(env, "perf_event programs can only use preallocated inner hash map\n");
56f668df
MKL
8122 return -EINVAL;
8123 }
fdc15d38 8124 }
a3884572 8125
d83525ca
AS
8126 if ((is_tracing_prog_type(prog->type) ||
8127 prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
8128 map_value_has_spin_lock(map)) {
8129 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
8130 return -EINVAL;
8131 }
8132
a3884572 8133 if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
09728266 8134 !bpf_offload_prog_map_match(prog, map)) {
a3884572
JK
8135 verbose(env, "offload device mismatch between prog and map\n");
8136 return -EINVAL;
8137 }
8138
fdc15d38
AS
8139 return 0;
8140}
8141
b741f163
RG
8142static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
8143{
8144 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
8145 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
8146}
8147
0246e64d
AS
8148/* look for pseudo eBPF instructions that access map FDs and
8149 * replace them with actual map pointers
8150 */
58e2af8b 8151static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
0246e64d
AS
8152{
8153 struct bpf_insn *insn = env->prog->insnsi;
8154 int insn_cnt = env->prog->len;
fdc15d38 8155 int i, j, err;
0246e64d 8156
f1f7714e 8157 err = bpf_prog_calc_tag(env->prog);
aafe6ae9
DB
8158 if (err)
8159 return err;
8160
0246e64d 8161 for (i = 0; i < insn_cnt; i++, insn++) {
9bac3d6d 8162 if (BPF_CLASS(insn->code) == BPF_LDX &&
d691f9e8 8163 (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
61bd5218 8164 verbose(env, "BPF_LDX uses reserved fields\n");
9bac3d6d
AS
8165 return -EINVAL;
8166 }
8167
d691f9e8
AS
8168 if (BPF_CLASS(insn->code) == BPF_STX &&
8169 ((BPF_MODE(insn->code) != BPF_MEM &&
8170 BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) {
61bd5218 8171 verbose(env, "BPF_STX uses reserved fields\n");
d691f9e8
AS
8172 return -EINVAL;
8173 }
8174
0246e64d 8175 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
d8eca5bb 8176 struct bpf_insn_aux_data *aux;
0246e64d
AS
8177 struct bpf_map *map;
8178 struct fd f;
d8eca5bb 8179 u64 addr;
0246e64d
AS
8180
8181 if (i == insn_cnt - 1 || insn[1].code != 0 ||
8182 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
8183 insn[1].off != 0) {
61bd5218 8184 verbose(env, "invalid bpf_ld_imm64 insn\n");
0246e64d
AS
8185 return -EINVAL;
8186 }
8187
d8eca5bb 8188 if (insn[0].src_reg == 0)
0246e64d
AS
8189 /* valid generic load 64-bit imm */
8190 goto next_insn;
8191
d8eca5bb
DB
8192 /* In final convert_pseudo_ld_imm64() step, this is
8193 * converted into regular 64-bit imm load insn.
8194 */
8195 if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
8196 insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
8197 (insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
8198 insn[1].imm != 0)) {
8199 verbose(env,
8200 "unrecognized bpf_ld_imm64 insn\n");
0246e64d
AS
8201 return -EINVAL;
8202 }
8203
20182390 8204 f = fdget(insn[0].imm);
c2101297 8205 map = __bpf_map_get(f);
0246e64d 8206 if (IS_ERR(map)) {
61bd5218 8207 verbose(env, "fd %d is not pointing to valid bpf_map\n",
20182390 8208 insn[0].imm);
0246e64d
AS
8209 return PTR_ERR(map);
8210 }
8211
61bd5218 8212 err = check_map_prog_compatibility(env, map, env->prog);
fdc15d38
AS
8213 if (err) {
8214 fdput(f);
8215 return err;
8216 }
8217
d8eca5bb
DB
8218 aux = &env->insn_aux_data[i];
8219 if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
8220 addr = (unsigned long)map;
8221 } else {
8222 u32 off = insn[1].imm;
8223
8224 if (off >= BPF_MAX_VAR_OFF) {
8225 verbose(env, "direct value offset of %u is not allowed\n", off);
8226 fdput(f);
8227 return -EINVAL;
8228 }
8229
8230 if (!map->ops->map_direct_value_addr) {
8231 verbose(env, "no direct value access support for this map type\n");
8232 fdput(f);
8233 return -EINVAL;
8234 }
8235
8236 err = map->ops->map_direct_value_addr(map, &addr, off);
8237 if (err) {
8238 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n",
8239 map->value_size, off);
8240 fdput(f);
8241 return err;
8242 }
8243
8244 aux->map_off = off;
8245 addr += off;
8246 }
8247
8248 insn[0].imm = (u32)addr;
8249 insn[1].imm = addr >> 32;
0246e64d
AS
8250
8251 /* check whether we recorded this map already */
d8eca5bb 8252 for (j = 0; j < env->used_map_cnt; j++) {
0246e64d 8253 if (env->used_maps[j] == map) {
d8eca5bb 8254 aux->map_index = j;
0246e64d
AS
8255 fdput(f);
8256 goto next_insn;
8257 }
d8eca5bb 8258 }
0246e64d
AS
8259
8260 if (env->used_map_cnt >= MAX_USED_MAPS) {
8261 fdput(f);
8262 return -E2BIG;
8263 }
8264
0246e64d
AS
8265 /* hold the map. If the program is rejected by verifier,
8266 * the map will be released by release_maps() or it
8267 * will be used by the valid program until it's unloaded
ab7f5bf0 8268 * and all maps are released in free_used_maps()
0246e64d 8269 */
1e0bd5a0 8270 bpf_map_inc(map);
d8eca5bb
DB
8271
8272 aux->map_index = env->used_map_cnt;
92117d84
AS
8273 env->used_maps[env->used_map_cnt++] = map;
8274
b741f163 8275 if (bpf_map_is_cgroup_storage(map) &&
e4730423 8276 bpf_cgroup_storage_assign(env->prog->aux, map)) {
b741f163 8277 verbose(env, "only one cgroup storage of each type is allowed\n");
de9cbbaa
RG
8278 fdput(f);
8279 return -EBUSY;
8280 }
8281
0246e64d
AS
8282 fdput(f);
8283next_insn:
8284 insn++;
8285 i++;
5e581dad
DB
8286 continue;
8287 }
8288
8289 /* Basic sanity check before we invest more work here. */
8290 if (!bpf_opcode_in_insntable(insn->code)) {
8291 verbose(env, "unknown opcode %02x\n", insn->code);
8292 return -EINVAL;
0246e64d
AS
8293 }
8294 }
8295
8296 /* now all pseudo BPF_LD_IMM64 instructions load valid
8297 * 'struct bpf_map *' into a register instead of user map_fd.
8298 * These pointers will be used later by verifier to validate map access.
8299 */
8300 return 0;
8301}
8302
8303/* drop refcnt of maps used by the rejected program */
58e2af8b 8304static void release_maps(struct bpf_verifier_env *env)
0246e64d 8305{
a2ea0746
DB
8306 __bpf_free_used_maps(env->prog->aux, env->used_maps,
8307 env->used_map_cnt);
0246e64d
AS
8308}
8309
8310/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
58e2af8b 8311static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
0246e64d
AS
8312{
8313 struct bpf_insn *insn = env->prog->insnsi;
8314 int insn_cnt = env->prog->len;
8315 int i;
8316
8317 for (i = 0; i < insn_cnt; i++, insn++)
8318 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
8319 insn->src_reg = 0;
8320}
8321
8041902d
AS
8322/* single env->prog->insni[off] instruction was replaced with the range
8323 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8324 * [0, off) and [off, end) to new locations, so the patched range stays zero
8325 */
b325fbca
JW
8326static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8327 struct bpf_prog *new_prog, u32 off, u32 cnt)
8041902d
AS
8328{
8329 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
b325fbca
JW
8330 struct bpf_insn *insn = new_prog->insnsi;
8331 u32 prog_len;
c131187d 8332 int i;
8041902d 8333
b325fbca
JW
8334 /* aux info at OFF always needs adjustment, no matter fast path
8335 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the
8336 * original insn at old prog.
8337 */
8338 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
8339
8041902d
AS
8340 if (cnt == 1)
8341 return 0;
b325fbca 8342 prog_len = new_prog->len;
fad953ce
KC
8343 new_data = vzalloc(array_size(prog_len,
8344 sizeof(struct bpf_insn_aux_data)));
8041902d
AS
8345 if (!new_data)
8346 return -ENOMEM;
8347 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
8348 memcpy(new_data + off + cnt - 1, old_data + off,
8349 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
b325fbca 8350 for (i = off; i < off + cnt - 1; i++) {
c131187d 8351 new_data[i].seen = true;
b325fbca
JW
8352 new_data[i].zext_dst = insn_has_def32(env, insn + i);
8353 }
8041902d
AS
8354 env->insn_aux_data = new_data;
8355 vfree(old_data);
8356 return 0;
8357}
8358
cc8b0b92
AS
8359static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
8360{
8361 int i;
8362
8363 if (len == 1)
8364 return;
4cb3d99c
JW
8365 /* NOTE: fake 'exit' subprog should be updated as well. */
8366 for (i = 0; i <= env->subprog_cnt; i++) {
afd59424 8367 if (env->subprog_info[i].start <= off)
cc8b0b92 8368 continue;
9c8105bd 8369 env->subprog_info[i].start += len - 1;
cc8b0b92
AS
8370 }
8371}
8372
8041902d
AS
8373static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
8374 const struct bpf_insn *patch, u32 len)
8375{
8376 struct bpf_prog *new_prog;
8377
8378 new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
4f73379e
AS
8379 if (IS_ERR(new_prog)) {
8380 if (PTR_ERR(new_prog) == -ERANGE)
8381 verbose(env,
8382 "insn %d cannot be patched due to 16-bit range\n",
8383 env->insn_aux_data[off].orig_idx);
8041902d 8384 return NULL;
4f73379e 8385 }
b325fbca 8386 if (adjust_insn_aux_data(env, new_prog, off, len))
8041902d 8387 return NULL;
cc8b0b92 8388 adjust_subprog_starts(env, off, len);
8041902d
AS
8389 return new_prog;
8390}
8391
52875a04
JK
8392static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env,
8393 u32 off, u32 cnt)
8394{
8395 int i, j;
8396
8397 /* find first prog starting at or after off (first to remove) */
8398 for (i = 0; i < env->subprog_cnt; i++)
8399 if (env->subprog_info[i].start >= off)
8400 break;
8401 /* find first prog starting at or after off + cnt (first to stay) */
8402 for (j = i; j < env->subprog_cnt; j++)
8403 if (env->subprog_info[j].start >= off + cnt)
8404 break;
8405 /* if j doesn't start exactly at off + cnt, we are just removing
8406 * the front of previous prog
8407 */
8408 if (env->subprog_info[j].start != off + cnt)
8409 j--;
8410
8411 if (j > i) {
8412 struct bpf_prog_aux *aux = env->prog->aux;
8413 int move;
8414
8415 /* move fake 'exit' subprog as well */
8416 move = env->subprog_cnt + 1 - j;
8417
8418 memmove(env->subprog_info + i,
8419 env->subprog_info + j,
8420 sizeof(*env->subprog_info) * move);
8421 env->subprog_cnt -= j - i;
8422
8423 /* remove func_info */
8424 if (aux->func_info) {
8425 move = aux->func_info_cnt - j;
8426
8427 memmove(aux->func_info + i,
8428 aux->func_info + j,
8429 sizeof(*aux->func_info) * move);
8430 aux->func_info_cnt -= j - i;
8431 /* func_info->insn_off is set after all code rewrites,
8432 * in adjust_btf_func() - no need to adjust
8433 */
8434 }
8435 } else {
8436 /* convert i from "first prog to remove" to "first to adjust" */
8437 if (env->subprog_info[i].start == off)
8438 i++;
8439 }
8440
8441 /* update fake 'exit' subprog as well */
8442 for (; i <= env->subprog_cnt; i++)
8443 env->subprog_info[i].start -= cnt;
8444
8445 return 0;
8446}
8447
8448static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off,
8449 u32 cnt)
8450{
8451 struct bpf_prog *prog = env->prog;
8452 u32 i, l_off, l_cnt, nr_linfo;
8453 struct bpf_line_info *linfo;
8454
8455 nr_linfo = prog->aux->nr_linfo;
8456 if (!nr_linfo)
8457 return 0;
8458
8459 linfo = prog->aux->linfo;
8460
8461 /* find first line info to remove, count lines to be removed */
8462 for (i = 0; i < nr_linfo; i++)
8463 if (linfo[i].insn_off >= off)
8464 break;
8465
8466 l_off = i;
8467 l_cnt = 0;
8468 for (; i < nr_linfo; i++)
8469 if (linfo[i].insn_off < off + cnt)
8470 l_cnt++;
8471 else
8472 break;
8473
8474 /* First live insn doesn't match first live linfo, it needs to "inherit"
8475 * last removed linfo. prog is already modified, so prog->len == off
8476 * means no live instructions after (tail of the program was removed).
8477 */
8478 if (prog->len != off && l_cnt &&
8479 (i == nr_linfo || linfo[i].insn_off != off + cnt)) {
8480 l_cnt--;
8481 linfo[--i].insn_off = off + cnt;
8482 }
8483
8484 /* remove the line info which refer to the removed instructions */
8485 if (l_cnt) {
8486 memmove(linfo + l_off, linfo + i,
8487 sizeof(*linfo) * (nr_linfo - i));
8488
8489 prog->aux->nr_linfo -= l_cnt;
8490 nr_linfo = prog->aux->nr_linfo;
8491 }
8492
8493 /* pull all linfo[i].insn_off >= off + cnt in by cnt */
8494 for (i = l_off; i < nr_linfo; i++)
8495 linfo[i].insn_off -= cnt;
8496
8497 /* fix up all subprogs (incl. 'exit') which start >= off */
8498 for (i = 0; i <= env->subprog_cnt; i++)
8499 if (env->subprog_info[i].linfo_idx > l_off) {
8500 /* program may have started in the removed region but
8501 * may not be fully removed
8502 */
8503 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt)
8504 env->subprog_info[i].linfo_idx -= l_cnt;
8505 else
8506 env->subprog_info[i].linfo_idx = l_off;
8507 }
8508
8509 return 0;
8510}
8511
8512static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
8513{
8514 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8515 unsigned int orig_prog_len = env->prog->len;
8516 int err;
8517
08ca90af
JK
8518 if (bpf_prog_is_dev_bound(env->prog->aux))
8519 bpf_prog_offload_remove_insns(env, off, cnt);
8520
52875a04
JK
8521 err = bpf_remove_insns(env->prog, off, cnt);
8522 if (err)
8523 return err;
8524
8525 err = adjust_subprog_starts_after_remove(env, off, cnt);
8526 if (err)
8527 return err;
8528
8529 err = bpf_adj_linfo_after_remove(env, off, cnt);
8530 if (err)
8531 return err;
8532
8533 memmove(aux_data + off, aux_data + off + cnt,
8534 sizeof(*aux_data) * (orig_prog_len - off - cnt));
8535
8536 return 0;
8537}
8538
2a5418a1
DB
8539/* The verifier does more data flow analysis than llvm and will not
8540 * explore branches that are dead at run time. Malicious programs can
8541 * have dead code too. Therefore replace all dead at-run-time code
8542 * with 'ja -1'.
8543 *
8544 * Just nops are not optimal, e.g. if they would sit at the end of the
8545 * program and through another bug we would manage to jump there, then
8546 * we'd execute beyond program memory otherwise. Returning exception
8547 * code also wouldn't work since we can have subprogs where the dead
8548 * code could be located.
c131187d
AS
8549 */
8550static void sanitize_dead_code(struct bpf_verifier_env *env)
8551{
8552 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
2a5418a1 8553 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1);
c131187d
AS
8554 struct bpf_insn *insn = env->prog->insnsi;
8555 const int insn_cnt = env->prog->len;
8556 int i;
8557
8558 for (i = 0; i < insn_cnt; i++) {
8559 if (aux_data[i].seen)
8560 continue;
2a5418a1 8561 memcpy(insn + i, &trap, sizeof(trap));
c131187d
AS
8562 }
8563}
8564
e2ae4ca2
JK
8565static bool insn_is_cond_jump(u8 code)
8566{
8567 u8 op;
8568
092ed096
JW
8569 if (BPF_CLASS(code) == BPF_JMP32)
8570 return true;
8571
e2ae4ca2
JK
8572 if (BPF_CLASS(code) != BPF_JMP)
8573 return false;
8574
8575 op = BPF_OP(code);
8576 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL;
8577}
8578
8579static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
8580{
8581 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8582 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8583 struct bpf_insn *insn = env->prog->insnsi;
8584 const int insn_cnt = env->prog->len;
8585 int i;
8586
8587 for (i = 0; i < insn_cnt; i++, insn++) {
8588 if (!insn_is_cond_jump(insn->code))
8589 continue;
8590
8591 if (!aux_data[i + 1].seen)
8592 ja.off = insn->off;
8593 else if (!aux_data[i + 1 + insn->off].seen)
8594 ja.off = 0;
8595 else
8596 continue;
8597
08ca90af
JK
8598 if (bpf_prog_is_dev_bound(env->prog->aux))
8599 bpf_prog_offload_replace_insn(env, i, &ja);
8600
e2ae4ca2
JK
8601 memcpy(insn, &ja, sizeof(ja));
8602 }
8603}
8604
52875a04
JK
8605static int opt_remove_dead_code(struct bpf_verifier_env *env)
8606{
8607 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
8608 int insn_cnt = env->prog->len;
8609 int i, err;
8610
8611 for (i = 0; i < insn_cnt; i++) {
8612 int j;
8613
8614 j = 0;
8615 while (i + j < insn_cnt && !aux_data[i + j].seen)
8616 j++;
8617 if (!j)
8618 continue;
8619
8620 err = verifier_remove_insns(env, i, j);
8621 if (err)
8622 return err;
8623 insn_cnt = env->prog->len;
8624 }
8625
8626 return 0;
8627}
8628
a1b14abc
JK
8629static int opt_remove_nops(struct bpf_verifier_env *env)
8630{
8631 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
8632 struct bpf_insn *insn = env->prog->insnsi;
8633 int insn_cnt = env->prog->len;
8634 int i, err;
8635
8636 for (i = 0; i < insn_cnt; i++) {
8637 if (memcmp(&insn[i], &ja, sizeof(ja)))
8638 continue;
8639
8640 err = verifier_remove_insns(env, i, 1);
8641 if (err)
8642 return err;
8643 insn_cnt--;
8644 i--;
8645 }
8646
8647 return 0;
8648}
8649
d6c2308c
JW
8650static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
8651 const union bpf_attr *attr)
a4b1d3c1 8652{
d6c2308c 8653 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
a4b1d3c1 8654 struct bpf_insn_aux_data *aux = env->insn_aux_data;
d6c2308c 8655 int i, patch_len, delta = 0, len = env->prog->len;
a4b1d3c1 8656 struct bpf_insn *insns = env->prog->insnsi;
a4b1d3c1 8657 struct bpf_prog *new_prog;
d6c2308c 8658 bool rnd_hi32;
a4b1d3c1 8659
d6c2308c 8660 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
a4b1d3c1 8661 zext_patch[1] = BPF_ZEXT_REG(0);
d6c2308c
JW
8662 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
8663 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
8664 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
a4b1d3c1
JW
8665 for (i = 0; i < len; i++) {
8666 int adj_idx = i + delta;
8667 struct bpf_insn insn;
8668
d6c2308c
JW
8669 insn = insns[adj_idx];
8670 if (!aux[adj_idx].zext_dst) {
8671 u8 code, class;
8672 u32 imm_rnd;
8673
8674 if (!rnd_hi32)
8675 continue;
8676
8677 code = insn.code;
8678 class = BPF_CLASS(code);
8679 if (insn_no_def(&insn))
8680 continue;
8681
8682 /* NOTE: arg "reg" (the fourth one) is only used for
8683 * BPF_STX which has been ruled out in above
8684 * check, it is safe to pass NULL here.
8685 */
8686 if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
8687 if (class == BPF_LD &&
8688 BPF_MODE(code) == BPF_IMM)
8689 i++;
8690 continue;
8691 }
8692
8693 /* ctx load could be transformed into wider load. */
8694 if (class == BPF_LDX &&
8695 aux[adj_idx].ptr_type == PTR_TO_CTX)
8696 continue;
8697
8698 imm_rnd = get_random_int();
8699 rnd_hi32_patch[0] = insn;
8700 rnd_hi32_patch[1].imm = imm_rnd;
8701 rnd_hi32_patch[3].dst_reg = insn.dst_reg;
8702 patch = rnd_hi32_patch;
8703 patch_len = 4;
8704 goto apply_patch_buffer;
8705 }
8706
8707 if (!bpf_jit_needs_zext())
a4b1d3c1
JW
8708 continue;
8709
a4b1d3c1
JW
8710 zext_patch[0] = insn;
8711 zext_patch[1].dst_reg = insn.dst_reg;
8712 zext_patch[1].src_reg = insn.dst_reg;
d6c2308c
JW
8713 patch = zext_patch;
8714 patch_len = 2;
8715apply_patch_buffer:
8716 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
a4b1d3c1
JW
8717 if (!new_prog)
8718 return -ENOMEM;
8719 env->prog = new_prog;
8720 insns = new_prog->insnsi;
8721 aux = env->insn_aux_data;
d6c2308c 8722 delta += patch_len - 1;
a4b1d3c1
JW
8723 }
8724
8725 return 0;
8726}
8727
c64b7983
JS
8728/* convert load instructions that access fields of a context type into a
8729 * sequence of instructions that access fields of the underlying structure:
8730 * struct __sk_buff -> struct sk_buff
8731 * struct bpf_sock_ops -> struct sock
9bac3d6d 8732 */
58e2af8b 8733static int convert_ctx_accesses(struct bpf_verifier_env *env)
9bac3d6d 8734{
00176a34 8735 const struct bpf_verifier_ops *ops = env->ops;
f96da094 8736 int i, cnt, size, ctx_field_size, delta = 0;
3df126f3 8737 const int insn_cnt = env->prog->len;
36bbef52 8738 struct bpf_insn insn_buf[16], *insn;
46f53a65 8739 u32 target_size, size_default, off;
9bac3d6d 8740 struct bpf_prog *new_prog;
d691f9e8 8741 enum bpf_access_type type;
f96da094 8742 bool is_narrower_load;
9bac3d6d 8743
b09928b9
DB
8744 if (ops->gen_prologue || env->seen_direct_write) {
8745 if (!ops->gen_prologue) {
8746 verbose(env, "bpf verifier is misconfigured\n");
8747 return -EINVAL;
8748 }
36bbef52
DB
8749 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
8750 env->prog);
8751 if (cnt >= ARRAY_SIZE(insn_buf)) {
61bd5218 8752 verbose(env, "bpf verifier is misconfigured\n");
36bbef52
DB
8753 return -EINVAL;
8754 } else if (cnt) {
8041902d 8755 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
36bbef52
DB
8756 if (!new_prog)
8757 return -ENOMEM;
8041902d 8758
36bbef52 8759 env->prog = new_prog;
3df126f3 8760 delta += cnt - 1;
36bbef52
DB
8761 }
8762 }
8763
c64b7983 8764 if (bpf_prog_is_dev_bound(env->prog->aux))
9bac3d6d
AS
8765 return 0;
8766
3df126f3 8767 insn = env->prog->insnsi + delta;
36bbef52 8768
9bac3d6d 8769 for (i = 0; i < insn_cnt; i++, insn++) {
c64b7983
JS
8770 bpf_convert_ctx_access_t convert_ctx_access;
8771
62c7989b
DB
8772 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
8773 insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
8774 insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
ea2e7ce5 8775 insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
d691f9e8 8776 type = BPF_READ;
62c7989b
DB
8777 else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
8778 insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
8779 insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
ea2e7ce5 8780 insn->code == (BPF_STX | BPF_MEM | BPF_DW))
d691f9e8
AS
8781 type = BPF_WRITE;
8782 else
9bac3d6d
AS
8783 continue;
8784
af86ca4e
AS
8785 if (type == BPF_WRITE &&
8786 env->insn_aux_data[i + delta].sanitize_stack_off) {
8787 struct bpf_insn patch[] = {
8788 /* Sanitize suspicious stack slot with zero.
8789 * There are no memory dependencies for this store,
8790 * since it's only using frame pointer and immediate
8791 * constant of zero
8792 */
8793 BPF_ST_MEM(BPF_DW, BPF_REG_FP,
8794 env->insn_aux_data[i + delta].sanitize_stack_off,
8795 0),
8796 /* the original STX instruction will immediately
8797 * overwrite the same stack slot with appropriate value
8798 */
8799 *insn,
8800 };
8801
8802 cnt = ARRAY_SIZE(patch);
8803 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
8804 if (!new_prog)
8805 return -ENOMEM;
8806
8807 delta += cnt - 1;
8808 env->prog = new_prog;
8809 insn = new_prog->insnsi + i + delta;
8810 continue;
8811 }
8812
c64b7983
JS
8813 switch (env->insn_aux_data[i + delta].ptr_type) {
8814 case PTR_TO_CTX:
8815 if (!ops->convert_ctx_access)
8816 continue;
8817 convert_ctx_access = ops->convert_ctx_access;
8818 break;
8819 case PTR_TO_SOCKET:
46f8bc92 8820 case PTR_TO_SOCK_COMMON:
c64b7983
JS
8821 convert_ctx_access = bpf_sock_convert_ctx_access;
8822 break;
655a51e5
MKL
8823 case PTR_TO_TCP_SOCK:
8824 convert_ctx_access = bpf_tcp_sock_convert_ctx_access;
8825 break;
fada7fdc
JL
8826 case PTR_TO_XDP_SOCK:
8827 convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
8828 break;
2a02759e
AS
8829 case PTR_TO_BTF_ID:
8830 if (type == BPF_WRITE) {
8831 verbose(env, "Writes through BTF pointers are not allowed\n");
8832 return -EINVAL;
8833 }
8834 insn->code = BPF_LDX | BPF_PROBE_MEM | BPF_SIZE((insn)->code);
3dec541b 8835 env->prog->aux->num_exentries++;
2a02759e 8836 continue;
c64b7983 8837 default:
9bac3d6d 8838 continue;
c64b7983 8839 }
9bac3d6d 8840
31fd8581 8841 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size;
f96da094 8842 size = BPF_LDST_BYTES(insn);
31fd8581
YS
8843
8844 /* If the read access is a narrower load of the field,
8845 * convert to a 4/8-byte load, to minimum program type specific
8846 * convert_ctx_access changes. If conversion is successful,
8847 * we will apply proper mask to the result.
8848 */
f96da094 8849 is_narrower_load = size < ctx_field_size;
46f53a65
AI
8850 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
8851 off = insn->off;
31fd8581 8852 if (is_narrower_load) {
f96da094
DB
8853 u8 size_code;
8854
8855 if (type == BPF_WRITE) {
61bd5218 8856 verbose(env, "bpf verifier narrow ctx access misconfigured\n");
f96da094
DB
8857 return -EINVAL;
8858 }
31fd8581 8859
f96da094 8860 size_code = BPF_H;
31fd8581
YS
8861 if (ctx_field_size == 4)
8862 size_code = BPF_W;
8863 else if (ctx_field_size == 8)
8864 size_code = BPF_DW;
f96da094 8865
bc23105c 8866 insn->off = off & ~(size_default - 1);
31fd8581
YS
8867 insn->code = BPF_LDX | BPF_MEM | size_code;
8868 }
f96da094
DB
8869
8870 target_size = 0;
c64b7983
JS
8871 cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
8872 &target_size);
f96da094
DB
8873 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) ||
8874 (ctx_field_size && !target_size)) {
61bd5218 8875 verbose(env, "bpf verifier is misconfigured\n");
9bac3d6d
AS
8876 return -EINVAL;
8877 }
f96da094
DB
8878
8879 if (is_narrower_load && size < target_size) {
d895a0f1
IL
8880 u8 shift = bpf_ctx_narrow_access_offset(
8881 off, size, size_default) * 8;
46f53a65
AI
8882 if (ctx_field_size <= 4) {
8883 if (shift)
8884 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
8885 insn->dst_reg,
8886 shift);
31fd8581 8887 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
f96da094 8888 (1 << size * 8) - 1);
46f53a65
AI
8889 } else {
8890 if (shift)
8891 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
8892 insn->dst_reg,
8893 shift);
31fd8581 8894 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
e2f7fc0a 8895 (1ULL << size * 8) - 1);
46f53a65 8896 }
31fd8581 8897 }
9bac3d6d 8898
8041902d 8899 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9bac3d6d
AS
8900 if (!new_prog)
8901 return -ENOMEM;
8902
3df126f3 8903 delta += cnt - 1;
9bac3d6d
AS
8904
8905 /* keep walking new program and skip insns we just inserted */
8906 env->prog = new_prog;
3df126f3 8907 insn = new_prog->insnsi + i + delta;
9bac3d6d
AS
8908 }
8909
8910 return 0;
8911}
8912
1c2a088a
AS
8913static int jit_subprogs(struct bpf_verifier_env *env)
8914{
8915 struct bpf_prog *prog = env->prog, **func, *tmp;
8916 int i, j, subprog_start, subprog_end = 0, len, subprog;
7105e828 8917 struct bpf_insn *insn;
1c2a088a 8918 void *old_bpf_func;
c454a46b 8919 int err;
1c2a088a 8920
f910cefa 8921 if (env->subprog_cnt <= 1)
1c2a088a
AS
8922 return 0;
8923
7105e828 8924 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
1c2a088a
AS
8925 if (insn->code != (BPF_JMP | BPF_CALL) ||
8926 insn->src_reg != BPF_PSEUDO_CALL)
8927 continue;
c7a89784
DB
8928 /* Upon error here we cannot fall back to interpreter but
8929 * need a hard reject of the program. Thus -EFAULT is
8930 * propagated in any case.
8931 */
1c2a088a
AS
8932 subprog = find_subprog(env, i + insn->imm + 1);
8933 if (subprog < 0) {
8934 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
8935 i + insn->imm + 1);
8936 return -EFAULT;
8937 }
8938 /* temporarily remember subprog id inside insn instead of
8939 * aux_data, since next loop will split up all insns into funcs
8940 */
f910cefa 8941 insn->off = subprog;
1c2a088a
AS
8942 /* remember original imm in case JIT fails and fallback
8943 * to interpreter will be needed
8944 */
8945 env->insn_aux_data[i].call_imm = insn->imm;
8946 /* point imm to __bpf_call_base+1 from JITs point of view */
8947 insn->imm = 1;
8948 }
8949
c454a46b
MKL
8950 err = bpf_prog_alloc_jited_linfo(prog);
8951 if (err)
8952 goto out_undo_insn;
8953
8954 err = -ENOMEM;
6396bb22 8955 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
1c2a088a 8956 if (!func)
c7a89784 8957 goto out_undo_insn;
1c2a088a 8958
f910cefa 8959 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a 8960 subprog_start = subprog_end;
4cb3d99c 8961 subprog_end = env->subprog_info[i + 1].start;
1c2a088a
AS
8962
8963 len = subprog_end - subprog_start;
492ecee8
AS
8964 /* BPF_PROG_RUN doesn't call subprogs directly,
8965 * hence main prog stats include the runtime of subprogs.
8966 * subprogs don't have IDs and not reachable via prog_get_next_id
8967 * func[i]->aux->stats will never be accessed and stays NULL
8968 */
8969 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER);
1c2a088a
AS
8970 if (!func[i])
8971 goto out_free;
8972 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
8973 len * sizeof(struct bpf_insn));
4f74d809 8974 func[i]->type = prog->type;
1c2a088a 8975 func[i]->len = len;
4f74d809
DB
8976 if (bpf_prog_calc_tag(func[i]))
8977 goto out_free;
1c2a088a 8978 func[i]->is_func = 1;
ba64e7d8
YS
8979 func[i]->aux->func_idx = i;
8980 /* the btf and func_info will be freed only at prog->aux */
8981 func[i]->aux->btf = prog->aux->btf;
8982 func[i]->aux->func_info = prog->aux->func_info;
8983
1c2a088a
AS
8984 /* Use bpf_prog_F_tag to indicate functions in stack traces.
8985 * Long term would need debug info to populate names
8986 */
8987 func[i]->aux->name[0] = 'F';
9c8105bd 8988 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
1c2a088a 8989 func[i]->jit_requested = 1;
c454a46b
MKL
8990 func[i]->aux->linfo = prog->aux->linfo;
8991 func[i]->aux->nr_linfo = prog->aux->nr_linfo;
8992 func[i]->aux->jited_linfo = prog->aux->jited_linfo;
8993 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
1c2a088a
AS
8994 func[i] = bpf_int_jit_compile(func[i]);
8995 if (!func[i]->jited) {
8996 err = -ENOTSUPP;
8997 goto out_free;
8998 }
8999 cond_resched();
9000 }
9001 /* at this point all bpf functions were successfully JITed
9002 * now populate all bpf_calls with correct addresses and
9003 * run last pass of JIT
9004 */
f910cefa 9005 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9006 insn = func[i]->insnsi;
9007 for (j = 0; j < func[i]->len; j++, insn++) {
9008 if (insn->code != (BPF_JMP | BPF_CALL) ||
9009 insn->src_reg != BPF_PSEUDO_CALL)
9010 continue;
9011 subprog = insn->off;
0d306c31
PB
9012 insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) -
9013 __bpf_call_base;
1c2a088a 9014 }
2162fed4
SD
9015
9016 /* we use the aux data to keep a list of the start addresses
9017 * of the JITed images for each function in the program
9018 *
9019 * for some architectures, such as powerpc64, the imm field
9020 * might not be large enough to hold the offset of the start
9021 * address of the callee's JITed image from __bpf_call_base
9022 *
9023 * in such cases, we can lookup the start address of a callee
9024 * by using its subprog id, available from the off field of
9025 * the call instruction, as an index for this list
9026 */
9027 func[i]->aux->func = func;
9028 func[i]->aux->func_cnt = env->subprog_cnt;
1c2a088a 9029 }
f910cefa 9030 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9031 old_bpf_func = func[i]->bpf_func;
9032 tmp = bpf_int_jit_compile(func[i]);
9033 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
9034 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
c7a89784 9035 err = -ENOTSUPP;
1c2a088a
AS
9036 goto out_free;
9037 }
9038 cond_resched();
9039 }
9040
9041 /* finally lock prog and jit images for all functions and
9042 * populate kallsysm
9043 */
f910cefa 9044 for (i = 0; i < env->subprog_cnt; i++) {
1c2a088a
AS
9045 bpf_prog_lock_ro(func[i]);
9046 bpf_prog_kallsyms_add(func[i]);
9047 }
7105e828
DB
9048
9049 /* Last step: make now unused interpreter insns from main
9050 * prog consistent for later dump requests, so they can
9051 * later look the same as if they were interpreted only.
9052 */
9053 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
7105e828
DB
9054 if (insn->code != (BPF_JMP | BPF_CALL) ||
9055 insn->src_reg != BPF_PSEUDO_CALL)
9056 continue;
9057 insn->off = env->insn_aux_data[i].call_imm;
9058 subprog = find_subprog(env, i + insn->off + 1);
dbecd738 9059 insn->imm = subprog;
7105e828
DB
9060 }
9061
1c2a088a
AS
9062 prog->jited = 1;
9063 prog->bpf_func = func[0]->bpf_func;
9064 prog->aux->func = func;
f910cefa 9065 prog->aux->func_cnt = env->subprog_cnt;
c454a46b 9066 bpf_prog_free_unused_jited_linfo(prog);
1c2a088a
AS
9067 return 0;
9068out_free:
f910cefa 9069 for (i = 0; i < env->subprog_cnt; i++)
1c2a088a
AS
9070 if (func[i])
9071 bpf_jit_free(func[i]);
9072 kfree(func);
c7a89784 9073out_undo_insn:
1c2a088a
AS
9074 /* cleanup main prog to be interpreted */
9075 prog->jit_requested = 0;
9076 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
9077 if (insn->code != (BPF_JMP | BPF_CALL) ||
9078 insn->src_reg != BPF_PSEUDO_CALL)
9079 continue;
9080 insn->off = 0;
9081 insn->imm = env->insn_aux_data[i].call_imm;
9082 }
c454a46b 9083 bpf_prog_free_jited_linfo(prog);
1c2a088a
AS
9084 return err;
9085}
9086
1ea47e01
AS
9087static int fixup_call_args(struct bpf_verifier_env *env)
9088{
19d28fbd 9089#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9090 struct bpf_prog *prog = env->prog;
9091 struct bpf_insn *insn = prog->insnsi;
9092 int i, depth;
19d28fbd 9093#endif
e4052d06 9094 int err = 0;
1ea47e01 9095
e4052d06
QM
9096 if (env->prog->jit_requested &&
9097 !bpf_prog_is_dev_bound(env->prog->aux)) {
19d28fbd
DM
9098 err = jit_subprogs(env);
9099 if (err == 0)
1c2a088a 9100 return 0;
c7a89784
DB
9101 if (err == -EFAULT)
9102 return err;
19d28fbd
DM
9103 }
9104#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1ea47e01
AS
9105 for (i = 0; i < prog->len; i++, insn++) {
9106 if (insn->code != (BPF_JMP | BPF_CALL) ||
9107 insn->src_reg != BPF_PSEUDO_CALL)
9108 continue;
9109 depth = get_callee_stack_depth(env, insn, i);
9110 if (depth < 0)
9111 return depth;
9112 bpf_patch_call_args(insn, depth);
9113 }
19d28fbd
DM
9114 err = 0;
9115#endif
9116 return err;
1ea47e01
AS
9117}
9118
79741b3b 9119/* fixup insn->imm field of bpf_call instructions
81ed18ab 9120 * and inline eligible helpers as explicit sequence of BPF instructions
e245c5c6
AS
9121 *
9122 * this function is called after eBPF program passed verification
9123 */
79741b3b 9124static int fixup_bpf_calls(struct bpf_verifier_env *env)
e245c5c6 9125{
79741b3b 9126 struct bpf_prog *prog = env->prog;
d2e4c1e6 9127 bool expect_blinding = bpf_jit_blinding_enabled(prog);
79741b3b 9128 struct bpf_insn *insn = prog->insnsi;
e245c5c6 9129 const struct bpf_func_proto *fn;
79741b3b 9130 const int insn_cnt = prog->len;
09772d92 9131 const struct bpf_map_ops *ops;
c93552c4 9132 struct bpf_insn_aux_data *aux;
81ed18ab
AS
9133 struct bpf_insn insn_buf[16];
9134 struct bpf_prog *new_prog;
9135 struct bpf_map *map_ptr;
d2e4c1e6 9136 int i, ret, cnt, delta = 0;
e245c5c6 9137
79741b3b 9138 for (i = 0; i < insn_cnt; i++, insn++) {
f6b1b3bf
DB
9139 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
9140 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9141 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
68fda450 9142 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
f6b1b3bf
DB
9143 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
9144 struct bpf_insn mask_and_div[] = {
9145 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9146 /* Rx div 0 -> 0 */
9147 BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
9148 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
9149 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
9150 *insn,
9151 };
9152 struct bpf_insn mask_and_mod[] = {
9153 BPF_MOV32_REG(insn->src_reg, insn->src_reg),
9154 /* Rx mod 0 -> Rx */
9155 BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
9156 *insn,
9157 };
9158 struct bpf_insn *patchlet;
9159
9160 if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
9161 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
9162 patchlet = mask_and_div + (is64 ? 1 : 0);
9163 cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
9164 } else {
9165 patchlet = mask_and_mod + (is64 ? 1 : 0);
9166 cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
9167 }
9168
9169 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
68fda450
AS
9170 if (!new_prog)
9171 return -ENOMEM;
9172
9173 delta += cnt - 1;
9174 env->prog = prog = new_prog;
9175 insn = new_prog->insnsi + i + delta;
9176 continue;
9177 }
9178
e0cea7ce
DB
9179 if (BPF_CLASS(insn->code) == BPF_LD &&
9180 (BPF_MODE(insn->code) == BPF_ABS ||
9181 BPF_MODE(insn->code) == BPF_IND)) {
9182 cnt = env->ops->gen_ld_abs(insn, insn_buf);
9183 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9184 verbose(env, "bpf verifier is misconfigured\n");
9185 return -EINVAL;
9186 }
9187
9188 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9189 if (!new_prog)
9190 return -ENOMEM;
9191
9192 delta += cnt - 1;
9193 env->prog = prog = new_prog;
9194 insn = new_prog->insnsi + i + delta;
9195 continue;
9196 }
9197
979d63d5
DB
9198 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
9199 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
9200 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
9201 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
9202 struct bpf_insn insn_buf[16];
9203 struct bpf_insn *patch = &insn_buf[0];
9204 bool issrc, isneg;
9205 u32 off_reg;
9206
9207 aux = &env->insn_aux_data[i + delta];
3612af78
DB
9208 if (!aux->alu_state ||
9209 aux->alu_state == BPF_ALU_NON_POINTER)
979d63d5
DB
9210 continue;
9211
9212 isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
9213 issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
9214 BPF_ALU_SANITIZE_SRC;
9215
9216 off_reg = issrc ? insn->src_reg : insn->dst_reg;
9217 if (isneg)
9218 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9219 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
9220 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
9221 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
9222 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
9223 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
9224 if (issrc) {
9225 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
9226 off_reg);
9227 insn->src_reg = BPF_REG_AX;
9228 } else {
9229 *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
9230 BPF_REG_AX);
9231 }
9232 if (isneg)
9233 insn->code = insn->code == code_add ?
9234 code_sub : code_add;
9235 *patch++ = *insn;
9236 if (issrc && isneg)
9237 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
9238 cnt = patch - insn_buf;
9239
9240 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9241 if (!new_prog)
9242 return -ENOMEM;
9243
9244 delta += cnt - 1;
9245 env->prog = prog = new_prog;
9246 insn = new_prog->insnsi + i + delta;
9247 continue;
9248 }
9249
79741b3b
AS
9250 if (insn->code != (BPF_JMP | BPF_CALL))
9251 continue;
cc8b0b92
AS
9252 if (insn->src_reg == BPF_PSEUDO_CALL)
9253 continue;
e245c5c6 9254
79741b3b
AS
9255 if (insn->imm == BPF_FUNC_get_route_realm)
9256 prog->dst_needed = 1;
9257 if (insn->imm == BPF_FUNC_get_prandom_u32)
9258 bpf_user_rnd_init_once();
9802d865
JB
9259 if (insn->imm == BPF_FUNC_override_return)
9260 prog->kprobe_override = 1;
79741b3b 9261 if (insn->imm == BPF_FUNC_tail_call) {
7b9f6da1
DM
9262 /* If we tail call into other programs, we
9263 * cannot make any assumptions since they can
9264 * be replaced dynamically during runtime in
9265 * the program array.
9266 */
9267 prog->cb_access = 1;
80a58d02 9268 env->prog->aux->stack_depth = MAX_BPF_STACK;
e647815a 9269 env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
7b9f6da1 9270
79741b3b
AS
9271 /* mark bpf_tail_call as different opcode to avoid
9272 * conditional branch in the interpeter for every normal
9273 * call and to prevent accidental JITing by JIT compiler
9274 * that doesn't support bpf_tail_call yet
e245c5c6 9275 */
79741b3b 9276 insn->imm = 0;
71189fa9 9277 insn->code = BPF_JMP | BPF_TAIL_CALL;
b2157399 9278
c93552c4 9279 aux = &env->insn_aux_data[i + delta];
cc52d914
DB
9280 if (env->allow_ptr_leaks && !expect_blinding &&
9281 prog->jit_requested &&
d2e4c1e6
DB
9282 !bpf_map_key_poisoned(aux) &&
9283 !bpf_map_ptr_poisoned(aux) &&
9284 !bpf_map_ptr_unpriv(aux)) {
9285 struct bpf_jit_poke_descriptor desc = {
9286 .reason = BPF_POKE_REASON_TAIL_CALL,
9287 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
9288 .tail_call.key = bpf_map_key_immediate(aux),
9289 };
9290
9291 ret = bpf_jit_add_poke_descriptor(prog, &desc);
9292 if (ret < 0) {
9293 verbose(env, "adding tail call poke descriptor failed\n");
9294 return ret;
9295 }
9296
9297 insn->imm = ret + 1;
9298 continue;
9299 }
9300
c93552c4
DB
9301 if (!bpf_map_ptr_unpriv(aux))
9302 continue;
9303
b2157399
AS
9304 /* instead of changing every JIT dealing with tail_call
9305 * emit two extra insns:
9306 * if (index >= max_entries) goto out;
9307 * index &= array->index_mask;
9308 * to avoid out-of-bounds cpu speculation
9309 */
c93552c4 9310 if (bpf_map_ptr_poisoned(aux)) {
40950343 9311 verbose(env, "tail_call abusing map_ptr\n");
b2157399
AS
9312 return -EINVAL;
9313 }
c93552c4 9314
d2e4c1e6 9315 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
b2157399
AS
9316 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
9317 map_ptr->max_entries, 2);
9318 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
9319 container_of(map_ptr,
9320 struct bpf_array,
9321 map)->index_mask);
9322 insn_buf[2] = *insn;
9323 cnt = 3;
9324 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
9325 if (!new_prog)
9326 return -ENOMEM;
9327
9328 delta += cnt - 1;
9329 env->prog = prog = new_prog;
9330 insn = new_prog->insnsi + i + delta;
79741b3b
AS
9331 continue;
9332 }
e245c5c6 9333
89c63074 9334 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup
09772d92
DB
9335 * and other inlining handlers are currently limited to 64 bit
9336 * only.
89c63074 9337 */
60b58afc 9338 if (prog->jit_requested && BITS_PER_LONG == 64 &&
09772d92
DB
9339 (insn->imm == BPF_FUNC_map_lookup_elem ||
9340 insn->imm == BPF_FUNC_map_update_elem ||
84430d42
DB
9341 insn->imm == BPF_FUNC_map_delete_elem ||
9342 insn->imm == BPF_FUNC_map_push_elem ||
9343 insn->imm == BPF_FUNC_map_pop_elem ||
9344 insn->imm == BPF_FUNC_map_peek_elem)) {
c93552c4
DB
9345 aux = &env->insn_aux_data[i + delta];
9346 if (bpf_map_ptr_poisoned(aux))
9347 goto patch_call_imm;
9348
d2e4c1e6 9349 map_ptr = BPF_MAP_PTR(aux->map_ptr_state);
09772d92
DB
9350 ops = map_ptr->ops;
9351 if (insn->imm == BPF_FUNC_map_lookup_elem &&
9352 ops->map_gen_lookup) {
9353 cnt = ops->map_gen_lookup(map_ptr, insn_buf);
9354 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
9355 verbose(env, "bpf verifier is misconfigured\n");
9356 return -EINVAL;
9357 }
81ed18ab 9358
09772d92
DB
9359 new_prog = bpf_patch_insn_data(env, i + delta,
9360 insn_buf, cnt);
9361 if (!new_prog)
9362 return -ENOMEM;
81ed18ab 9363
09772d92
DB
9364 delta += cnt - 1;
9365 env->prog = prog = new_prog;
9366 insn = new_prog->insnsi + i + delta;
9367 continue;
9368 }
81ed18ab 9369
09772d92
DB
9370 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem,
9371 (void *(*)(struct bpf_map *map, void *key))NULL));
9372 BUILD_BUG_ON(!__same_type(ops->map_delete_elem,
9373 (int (*)(struct bpf_map *map, void *key))NULL));
9374 BUILD_BUG_ON(!__same_type(ops->map_update_elem,
9375 (int (*)(struct bpf_map *map, void *key, void *value,
9376 u64 flags))NULL));
84430d42
DB
9377 BUILD_BUG_ON(!__same_type(ops->map_push_elem,
9378 (int (*)(struct bpf_map *map, void *value,
9379 u64 flags))NULL));
9380 BUILD_BUG_ON(!__same_type(ops->map_pop_elem,
9381 (int (*)(struct bpf_map *map, void *value))NULL));
9382 BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
9383 (int (*)(struct bpf_map *map, void *value))NULL));
9384
09772d92
DB
9385 switch (insn->imm) {
9386 case BPF_FUNC_map_lookup_elem:
9387 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
9388 __bpf_call_base;
9389 continue;
9390 case BPF_FUNC_map_update_elem:
9391 insn->imm = BPF_CAST_CALL(ops->map_update_elem) -
9392 __bpf_call_base;
9393 continue;
9394 case BPF_FUNC_map_delete_elem:
9395 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) -
9396 __bpf_call_base;
9397 continue;
84430d42
DB
9398 case BPF_FUNC_map_push_elem:
9399 insn->imm = BPF_CAST_CALL(ops->map_push_elem) -
9400 __bpf_call_base;
9401 continue;
9402 case BPF_FUNC_map_pop_elem:
9403 insn->imm = BPF_CAST_CALL(ops->map_pop_elem) -
9404 __bpf_call_base;
9405 continue;
9406 case BPF_FUNC_map_peek_elem:
9407 insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
9408 __bpf_call_base;
9409 continue;
09772d92 9410 }
81ed18ab 9411
09772d92 9412 goto patch_call_imm;
81ed18ab
AS
9413 }
9414
9415patch_call_imm:
5e43f899 9416 fn = env->ops->get_func_proto(insn->imm, env->prog);
79741b3b
AS
9417 /* all functions that have prototype and verifier allowed
9418 * programs to call them, must be real in-kernel functions
9419 */
9420 if (!fn->func) {
61bd5218
JK
9421 verbose(env,
9422 "kernel subsystem misconfigured func %s#%d\n",
79741b3b
AS
9423 func_id_name(insn->imm), insn->imm);
9424 return -EFAULT;
e245c5c6 9425 }
79741b3b 9426 insn->imm = fn->func - __bpf_call_base;
e245c5c6 9427 }
e245c5c6 9428
d2e4c1e6
DB
9429 /* Since poke tab is now finalized, publish aux to tracker. */
9430 for (i = 0; i < prog->aux->size_poke_tab; i++) {
9431 map_ptr = prog->aux->poke_tab[i].tail_call.map;
9432 if (!map_ptr->ops->map_poke_track ||
9433 !map_ptr->ops->map_poke_untrack ||
9434 !map_ptr->ops->map_poke_run) {
9435 verbose(env, "bpf verifier is misconfigured\n");
9436 return -EINVAL;
9437 }
9438
9439 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux);
9440 if (ret < 0) {
9441 verbose(env, "tracking tail call prog failed\n");
9442 return ret;
9443 }
9444 }
9445
79741b3b
AS
9446 return 0;
9447}
e245c5c6 9448
58e2af8b 9449static void free_states(struct bpf_verifier_env *env)
f1bca824 9450{
58e2af8b 9451 struct bpf_verifier_state_list *sl, *sln;
f1bca824
AS
9452 int i;
9453
9f4686c4
AS
9454 sl = env->free_list;
9455 while (sl) {
9456 sln = sl->next;
9457 free_verifier_state(&sl->state, false);
9458 kfree(sl);
9459 sl = sln;
9460 }
9461
f1bca824
AS
9462 if (!env->explored_states)
9463 return;
9464
dc2a4ebc 9465 for (i = 0; i < state_htab_size(env); i++) {
f1bca824
AS
9466 sl = env->explored_states[i];
9467
a8f500af
AS
9468 while (sl) {
9469 sln = sl->next;
9470 free_verifier_state(&sl->state, false);
9471 kfree(sl);
9472 sl = sln;
9473 }
f1bca824
AS
9474 }
9475
71dde681 9476 kvfree(env->explored_states);
f1bca824
AS
9477}
9478
06ee7115
AS
9479static void print_verification_stats(struct bpf_verifier_env *env)
9480{
9481 int i;
9482
9483 if (env->log.level & BPF_LOG_STATS) {
9484 verbose(env, "verification time %lld usec\n",
9485 div_u64(env->verification_time, 1000));
9486 verbose(env, "stack depth ");
9487 for (i = 0; i < env->subprog_cnt; i++) {
9488 u32 depth = env->subprog_info[i].stack_depth;
9489
9490 verbose(env, "%d", depth);
9491 if (i + 1 < env->subprog_cnt)
9492 verbose(env, "+");
9493 }
9494 verbose(env, "\n");
9495 }
9496 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d "
9497 "total_states %d peak_states %d mark_read %d\n",
9498 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS,
9499 env->max_states_per_insn, env->total_states,
9500 env->peak_states, env->longest_mark_read_walk);
f1bca824
AS
9501}
9502
38207291
MKL
9503static int check_attach_btf_id(struct bpf_verifier_env *env)
9504{
9505 struct bpf_prog *prog = env->prog;
5b92a28a 9506 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
38207291 9507 u32 btf_id = prog->aux->attach_btf_id;
f1b9509c 9508 const char prefix[] = "btf_trace_";
5b92a28a 9509 int ret = 0, subprog = -1, i;
fec56f58 9510 struct bpf_trampoline *tr;
38207291 9511 const struct btf_type *t;
5b92a28a 9512 bool conservative = true;
38207291 9513 const char *tname;
5b92a28a 9514 struct btf *btf;
fec56f58 9515 long addr;
5b92a28a 9516 u64 key;
38207291 9517
f1b9509c
AS
9518 if (prog->type != BPF_PROG_TYPE_TRACING)
9519 return 0;
38207291 9520
f1b9509c
AS
9521 if (!btf_id) {
9522 verbose(env, "Tracing programs must provide btf_id\n");
9523 return -EINVAL;
9524 }
5b92a28a
AS
9525 btf = bpf_prog_get_target_btf(prog);
9526 if (!btf) {
9527 verbose(env,
9528 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
9529 return -EINVAL;
9530 }
9531 t = btf_type_by_id(btf, btf_id);
f1b9509c
AS
9532 if (!t) {
9533 verbose(env, "attach_btf_id %u is invalid\n", btf_id);
9534 return -EINVAL;
9535 }
5b92a28a 9536 tname = btf_name_by_offset(btf, t->name_off);
f1b9509c
AS
9537 if (!tname) {
9538 verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
9539 return -EINVAL;
9540 }
5b92a28a
AS
9541 if (tgt_prog) {
9542 struct bpf_prog_aux *aux = tgt_prog->aux;
9543
9544 for (i = 0; i < aux->func_info_cnt; i++)
9545 if (aux->func_info[i].type_id == btf_id) {
9546 subprog = i;
9547 break;
9548 }
9549 if (subprog == -1) {
9550 verbose(env, "Subprog %s doesn't exist\n", tname);
9551 return -EINVAL;
9552 }
9553 conservative = aux->func_info_aux[subprog].unreliable;
9554 key = ((u64)aux->id) << 32 | btf_id;
9555 } else {
9556 key = btf_id;
9557 }
f1b9509c
AS
9558
9559 switch (prog->expected_attach_type) {
9560 case BPF_TRACE_RAW_TP:
5b92a28a
AS
9561 if (tgt_prog) {
9562 verbose(env,
9563 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
9564 return -EINVAL;
9565 }
38207291
MKL
9566 if (!btf_type_is_typedef(t)) {
9567 verbose(env, "attach_btf_id %u is not a typedef\n",
9568 btf_id);
9569 return -EINVAL;
9570 }
f1b9509c 9571 if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
38207291
MKL
9572 verbose(env, "attach_btf_id %u points to wrong type name %s\n",
9573 btf_id, tname);
9574 return -EINVAL;
9575 }
9576 tname += sizeof(prefix) - 1;
5b92a28a 9577 t = btf_type_by_id(btf, t->type);
38207291
MKL
9578 if (!btf_type_is_ptr(t))
9579 /* should never happen in valid vmlinux build */
9580 return -EINVAL;
5b92a28a 9581 t = btf_type_by_id(btf, t->type);
38207291
MKL
9582 if (!btf_type_is_func_proto(t))
9583 /* should never happen in valid vmlinux build */
9584 return -EINVAL;
9585
9586 /* remember two read only pointers that are valid for
9587 * the life time of the kernel
9588 */
9589 prog->aux->attach_func_name = tname;
9590 prog->aux->attach_func_proto = t;
9591 prog->aux->attach_btf_trace = true;
f1b9509c 9592 return 0;
fec56f58
AS
9593 case BPF_TRACE_FENTRY:
9594 case BPF_TRACE_FEXIT:
9595 if (!btf_type_is_func(t)) {
9596 verbose(env, "attach_btf_id %u is not a function\n",
9597 btf_id);
9598 return -EINVAL;
9599 }
5b92a28a 9600 t = btf_type_by_id(btf, t->type);
fec56f58
AS
9601 if (!btf_type_is_func_proto(t))
9602 return -EINVAL;
5b92a28a 9603 tr = bpf_trampoline_lookup(key);
fec56f58
AS
9604 if (!tr)
9605 return -ENOMEM;
9606 prog->aux->attach_func_name = tname;
5b92a28a 9607 /* t is either vmlinux type or another program's type */
fec56f58
AS
9608 prog->aux->attach_func_proto = t;
9609 mutex_lock(&tr->mutex);
9610 if (tr->func.addr) {
9611 prog->aux->trampoline = tr;
9612 goto out;
9613 }
5b92a28a
AS
9614 if (tgt_prog && conservative) {
9615 prog->aux->attach_func_proto = NULL;
9616 t = NULL;
9617 }
9618 ret = btf_distill_func_proto(&env->log, btf, t,
fec56f58
AS
9619 tname, &tr->func.model);
9620 if (ret < 0)
9621 goto out;
5b92a28a
AS
9622 if (tgt_prog) {
9623 if (!tgt_prog->jited) {
9624 /* for now */
9625 verbose(env, "Can trace only JITed BPF progs\n");
9626 ret = -EINVAL;
9627 goto out;
9628 }
9629 if (tgt_prog->type == BPF_PROG_TYPE_TRACING) {
9630 /* prevent cycles */
9631 verbose(env, "Cannot recursively attach\n");
9632 ret = -EINVAL;
9633 goto out;
9634 }
e9eeec58
YS
9635 if (subprog == 0)
9636 addr = (long) tgt_prog->bpf_func;
9637 else
9638 addr = (long) tgt_prog->aux->func[subprog]->bpf_func;
5b92a28a
AS
9639 } else {
9640 addr = kallsyms_lookup_name(tname);
9641 if (!addr) {
9642 verbose(env,
9643 "The address of function %s cannot be found\n",
9644 tname);
9645 ret = -ENOENT;
9646 goto out;
9647 }
fec56f58
AS
9648 }
9649 tr->func.addr = (void *)addr;
9650 prog->aux->trampoline = tr;
9651out:
9652 mutex_unlock(&tr->mutex);
9653 if (ret)
9654 bpf_trampoline_put(tr);
9655 return ret;
f1b9509c
AS
9656 default:
9657 return -EINVAL;
38207291 9658 }
38207291
MKL
9659}
9660
838e9690
YS
9661int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
9662 union bpf_attr __user *uattr)
51580e79 9663{
06ee7115 9664 u64 start_time = ktime_get_ns();
58e2af8b 9665 struct bpf_verifier_env *env;
b9193c1b 9666 struct bpf_verifier_log *log;
9e4c24e7 9667 int i, len, ret = -EINVAL;
e2ae4ca2 9668 bool is_priv;
51580e79 9669
eba0c929
AB
9670 /* no program is valid */
9671 if (ARRAY_SIZE(bpf_verifier_ops) == 0)
9672 return -EINVAL;
9673
58e2af8b 9674 /* 'struct bpf_verifier_env' can be global, but since it's not small,
cbd35700
AS
9675 * allocate/free it every time bpf_check() is called
9676 */
58e2af8b 9677 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
cbd35700
AS
9678 if (!env)
9679 return -ENOMEM;
61bd5218 9680 log = &env->log;
cbd35700 9681
9e4c24e7 9682 len = (*prog)->len;
fad953ce 9683 env->insn_aux_data =
9e4c24e7 9684 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len));
3df126f3
JK
9685 ret = -ENOMEM;
9686 if (!env->insn_aux_data)
9687 goto err_free_env;
9e4c24e7
JK
9688 for (i = 0; i < len; i++)
9689 env->insn_aux_data[i].orig_idx = i;
9bac3d6d 9690 env->prog = *prog;
00176a34 9691 env->ops = bpf_verifier_ops[env->prog->type];
45a73c17 9692 is_priv = capable(CAP_SYS_ADMIN);
0246e64d 9693
8580ac94
AS
9694 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
9695 mutex_lock(&bpf_verifier_lock);
9696 if (!btf_vmlinux)
9697 btf_vmlinux = btf_parse_vmlinux();
9698 mutex_unlock(&bpf_verifier_lock);
9699 }
9700
cbd35700 9701 /* grab the mutex to protect few globals used by verifier */
45a73c17
AS
9702 if (!is_priv)
9703 mutex_lock(&bpf_verifier_lock);
cbd35700
AS
9704
9705 if (attr->log_level || attr->log_buf || attr->log_size) {
9706 /* user requested verbose verifier output
9707 * and supplied buffer to store the verification trace
9708 */
e7bf8249
JK
9709 log->level = attr->log_level;
9710 log->ubuf = (char __user *) (unsigned long) attr->log_buf;
9711 log->len_total = attr->log_size;
cbd35700
AS
9712
9713 ret = -EINVAL;
e7bf8249 9714 /* log attributes have to be sane */
7a9f5c65 9715 if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 ||
06ee7115 9716 !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK)
3df126f3 9717 goto err_unlock;
cbd35700 9718 }
1ad2f583 9719
8580ac94
AS
9720 if (IS_ERR(btf_vmlinux)) {
9721 /* Either gcc or pahole or kernel are broken. */
9722 verbose(env, "in-kernel BTF is malformed\n");
9723 ret = PTR_ERR(btf_vmlinux);
38207291 9724 goto skip_full_check;
8580ac94
AS
9725 }
9726
38207291
MKL
9727 ret = check_attach_btf_id(env);
9728 if (ret)
9729 goto skip_full_check;
9730
1ad2f583
DB
9731 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
9732 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
e07b98d9 9733 env->strict_alignment = true;
e9ee9efc
DM
9734 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
9735 env->strict_alignment = false;
cbd35700 9736
e2ae4ca2
JK
9737 env->allow_ptr_leaks = is_priv;
9738
10d274e8
AS
9739 if (is_priv)
9740 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
9741
f4e3ec0d
JK
9742 ret = replace_map_fd_with_map_ptr(env);
9743 if (ret < 0)
9744 goto skip_full_check;
9745
cae1927c 9746 if (bpf_prog_is_dev_bound(env->prog->aux)) {
a40a2632 9747 ret = bpf_prog_offload_verifier_prep(env->prog);
ab3f0063 9748 if (ret)
f4e3ec0d 9749 goto skip_full_check;
ab3f0063
JK
9750 }
9751
dc2a4ebc 9752 env->explored_states = kvcalloc(state_htab_size(env),
58e2af8b 9753 sizeof(struct bpf_verifier_state_list *),
f1bca824
AS
9754 GFP_USER);
9755 ret = -ENOMEM;
9756 if (!env->explored_states)
9757 goto skip_full_check;
9758
d9762e84 9759 ret = check_subprogs(env);
475fb78f
AS
9760 if (ret < 0)
9761 goto skip_full_check;
9762
c454a46b 9763 ret = check_btf_info(env, attr, uattr);
838e9690
YS
9764 if (ret < 0)
9765 goto skip_full_check;
9766
d9762e84
MKL
9767 ret = check_cfg(env);
9768 if (ret < 0)
9769 goto skip_full_check;
9770
17a52670 9771 ret = do_check(env);
8c01c4f8
CG
9772 if (env->cur_state) {
9773 free_verifier_state(env->cur_state, true);
9774 env->cur_state = NULL;
9775 }
cbd35700 9776
c941ce9c
QM
9777 if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
9778 ret = bpf_prog_offload_finalize(env);
9779
0246e64d 9780skip_full_check:
638f5b90 9781 while (!pop_stack(env, NULL, NULL));
f1bca824 9782 free_states(env);
0246e64d 9783
c131187d 9784 if (ret == 0)
9b38c405 9785 ret = check_max_stack_depth(env);
c131187d 9786
9b38c405 9787 /* instruction rewrites happen after this point */
e2ae4ca2
JK
9788 if (is_priv) {
9789 if (ret == 0)
9790 opt_hard_wire_dead_code_branches(env);
52875a04
JK
9791 if (ret == 0)
9792 ret = opt_remove_dead_code(env);
a1b14abc
JK
9793 if (ret == 0)
9794 ret = opt_remove_nops(env);
52875a04
JK
9795 } else {
9796 if (ret == 0)
9797 sanitize_dead_code(env);
e2ae4ca2
JK
9798 }
9799
9bac3d6d
AS
9800 if (ret == 0)
9801 /* program is valid, convert *(u32*)(ctx + off) accesses */
9802 ret = convert_ctx_accesses(env);
9803
e245c5c6 9804 if (ret == 0)
79741b3b 9805 ret = fixup_bpf_calls(env);
e245c5c6 9806
a4b1d3c1
JW
9807 /* do 32-bit optimization after insn patching has done so those patched
9808 * insns could be handled correctly.
9809 */
d6c2308c
JW
9810 if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
9811 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
9812 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
9813 : false;
a4b1d3c1
JW
9814 }
9815
1ea47e01
AS
9816 if (ret == 0)
9817 ret = fixup_call_args(env);
9818
06ee7115
AS
9819 env->verification_time = ktime_get_ns() - start_time;
9820 print_verification_stats(env);
9821
a2a7d570 9822 if (log->level && bpf_verifier_log_full(log))
cbd35700 9823 ret = -ENOSPC;
a2a7d570 9824 if (log->level && !log->ubuf) {
cbd35700 9825 ret = -EFAULT;
a2a7d570 9826 goto err_release_maps;
cbd35700
AS
9827 }
9828
0246e64d
AS
9829 if (ret == 0 && env->used_map_cnt) {
9830 /* if program passed verifier, update used_maps in bpf_prog_info */
9bac3d6d
AS
9831 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
9832 sizeof(env->used_maps[0]),
9833 GFP_KERNEL);
0246e64d 9834
9bac3d6d 9835 if (!env->prog->aux->used_maps) {
0246e64d 9836 ret = -ENOMEM;
a2a7d570 9837 goto err_release_maps;
0246e64d
AS
9838 }
9839
9bac3d6d 9840 memcpy(env->prog->aux->used_maps, env->used_maps,
0246e64d 9841 sizeof(env->used_maps[0]) * env->used_map_cnt);
9bac3d6d 9842 env->prog->aux->used_map_cnt = env->used_map_cnt;
0246e64d
AS
9843
9844 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
9845 * bpf_ld_imm64 instructions
9846 */
9847 convert_pseudo_ld_imm64(env);
9848 }
cbd35700 9849
ba64e7d8
YS
9850 if (ret == 0)
9851 adjust_btf_func(env);
9852
a2a7d570 9853err_release_maps:
9bac3d6d 9854 if (!env->prog->aux->used_maps)
0246e64d 9855 /* if we didn't copy map pointers into bpf_prog_info, release
ab7f5bf0 9856 * them now. Otherwise free_used_maps() will release them.
0246e64d
AS
9857 */
9858 release_maps(env);
9bac3d6d 9859 *prog = env->prog;
3df126f3 9860err_unlock:
45a73c17
AS
9861 if (!is_priv)
9862 mutex_unlock(&bpf_verifier_lock);
3df126f3
JK
9863 vfree(env->insn_aux_data);
9864err_free_env:
9865 kfree(env);
51580e79
AS
9866 return ret;
9867}