| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 3 | * Copyright (c) 2016 Facebook |
| 4 | * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io |
| 5 | */ |
| 6 | #include <uapi/linux/btf.h> |
| 7 | #include <linux/bpf-cgroup.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/bpf.h> |
| 12 | #include <linux/btf.h> |
| 13 | #include <linux/bpf_verifier.h> |
| 14 | #include <linux/filter.h> |
| 15 | #include <net/netlink.h> |
| 16 | #include <linux/file.h> |
| 17 | #include <linux/vmalloc.h> |
| 18 | #include <linux/stringify.h> |
| 19 | #include <linux/bsearch.h> |
| 20 | #include <linux/sort.h> |
| 21 | #include <linux/perf_event.h> |
| 22 | #include <linux/ctype.h> |
| 23 | #include <linux/error-injection.h> |
| 24 | #include <linux/bpf_lsm.h> |
| 25 | #include <linux/btf_ids.h> |
| 26 | #include <linux/poison.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/cpumask.h> |
| 29 | #include <linux/bpf_mem_alloc.h> |
| 30 | #include <net/xdp.h> |
| 31 | #include <linux/trace_events.h> |
| 32 | #include <linux/kallsyms.h> |
| 33 | |
| 34 | #include "disasm.h" |
| 35 | |
| 36 | static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { |
| 37 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
| 38 | [_id] = & _name ## _verifier_ops, |
| 39 | #define BPF_MAP_TYPE(_id, _ops) |
| 40 | #define BPF_LINK_TYPE(_id, _name) |
| 41 | #include <linux/bpf_types.h> |
| 42 | #undef BPF_PROG_TYPE |
| 43 | #undef BPF_MAP_TYPE |
| 44 | #undef BPF_LINK_TYPE |
| 45 | }; |
| 46 | |
| 47 | struct bpf_mem_alloc bpf_global_percpu_ma; |
| 48 | static bool bpf_global_percpu_ma_set; |
| 49 | |
| 50 | /* bpf_check() is a static code analyzer that walks eBPF program |
| 51 | * instruction by instruction and updates register/stack state. |
| 52 | * All paths of conditional branches are analyzed until 'bpf_exit' insn. |
| 53 | * |
| 54 | * The first pass is depth-first-search to check that the program is a DAG. |
| 55 | * It rejects the following programs: |
| 56 | * - larger than BPF_MAXINSNS insns |
| 57 | * - if loop is present (detected via back-edge) |
| 58 | * - unreachable insns exist (shouldn't be a forest. program = one function) |
| 59 | * - out of bounds or malformed jumps |
| 60 | * The second pass is all possible path descent from the 1st insn. |
| 61 | * Since it's analyzing all paths through the program, the length of the |
| 62 | * analysis is limited to 64k insn, which may be hit even if total number of |
| 63 | * insn is less then 4K, but there are too many branches that change stack/regs. |
| 64 | * Number of 'branches to be analyzed' is limited to 1k |
| 65 | * |
| 66 | * On entry to each instruction, each register has a type, and the instruction |
| 67 | * changes the types of the registers depending on instruction semantics. |
| 68 | * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is |
| 69 | * copied to R1. |
| 70 | * |
| 71 | * All registers are 64-bit. |
| 72 | * R0 - return register |
| 73 | * R1-R5 argument passing registers |
| 74 | * R6-R9 callee saved registers |
| 75 | * R10 - frame pointer read-only |
| 76 | * |
| 77 | * At the start of BPF program the register R1 contains a pointer to bpf_context |
| 78 | * and has type PTR_TO_CTX. |
| 79 | * |
| 80 | * Verifier tracks arithmetic operations on pointers in case: |
| 81 | * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), |
| 82 | * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), |
| 83 | * 1st insn copies R10 (which has FRAME_PTR) type into R1 |
| 84 | * and 2nd arithmetic instruction is pattern matched to recognize |
| 85 | * that it wants to construct a pointer to some element within stack. |
| 86 | * So after 2nd insn, the register R1 has type PTR_TO_STACK |
| 87 | * (and -20 constant is saved for further stack bounds checking). |
| 88 | * Meaning that this reg is a pointer to stack plus known immediate constant. |
| 89 | * |
| 90 | * Most of the time the registers have SCALAR_VALUE type, which |
| 91 | * means the register has some value, but it's not a valid pointer. |
| 92 | * (like pointer plus pointer becomes SCALAR_VALUE type) |
| 93 | * |
| 94 | * When verifier sees load or store instructions the type of base register |
| 95 | * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are |
| 96 | * four pointer types recognized by check_mem_access() function. |
| 97 | * |
| 98 | * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' |
| 99 | * and the range of [ptr, ptr + map's value_size) is accessible. |
| 100 | * |
| 101 | * registers used to pass values to function calls are checked against |
| 102 | * function argument constraints. |
| 103 | * |
| 104 | * ARG_PTR_TO_MAP_KEY is one of such argument constraints. |
| 105 | * It means that the register type passed to this function must be |
| 106 | * PTR_TO_STACK and it will be used inside the function as |
| 107 | * 'pointer to map element key' |
| 108 | * |
| 109 | * For example the argument constraints for bpf_map_lookup_elem(): |
| 110 | * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
| 111 | * .arg1_type = ARG_CONST_MAP_PTR, |
| 112 | * .arg2_type = ARG_PTR_TO_MAP_KEY, |
| 113 | * |
| 114 | * ret_type says that this function returns 'pointer to map elem value or null' |
| 115 | * function expects 1st argument to be a const pointer to 'struct bpf_map' and |
| 116 | * 2nd argument should be a pointer to stack, which will be used inside |
| 117 | * the helper function as a pointer to map element key. |
| 118 | * |
| 119 | * On the kernel side the helper function looks like: |
| 120 | * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
| 121 | * { |
| 122 | * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; |
| 123 | * void *key = (void *) (unsigned long) r2; |
| 124 | * void *value; |
| 125 | * |
| 126 | * here kernel can access 'key' and 'map' pointers safely, knowing that |
| 127 | * [key, key + map->key_size) bytes are valid and were initialized on |
| 128 | * the stack of eBPF program. |
| 129 | * } |
| 130 | * |
| 131 | * Corresponding eBPF program may look like: |
| 132 | * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR |
| 133 | * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK |
| 134 | * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP |
| 135 | * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), |
| 136 | * here verifier looks at prototype of map_lookup_elem() and sees: |
| 137 | * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, |
| 138 | * Now verifier knows that this map has key of R1->map_ptr->key_size bytes |
| 139 | * |
| 140 | * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, |
| 141 | * Now verifier checks that [R2, R2 + map's key_size) are within stack limits |
| 142 | * and were initialized prior to this call. |
| 143 | * If it's ok, then verifier allows this BPF_CALL insn and looks at |
| 144 | * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets |
| 145 | * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function |
| 146 | * returns either pointer to map value or NULL. |
| 147 | * |
| 148 | * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' |
| 149 | * insn, the register holding that pointer in the true branch changes state to |
| 150 | * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false |
| 151 | * branch. See check_cond_jmp_op(). |
| 152 | * |
| 153 | * After the call R0 is set to return type of the function and registers R1-R5 |
| 154 | * are set to NOT_INIT to indicate that they are no longer readable. |
| 155 | * |
| 156 | * The following reference types represent a potential reference to a kernel |
| 157 | * resource which, after first being allocated, must be checked and freed by |
| 158 | * the BPF program: |
| 159 | * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET |
| 160 | * |
| 161 | * When the verifier sees a helper call return a reference type, it allocates a |
| 162 | * pointer id for the reference and stores it in the current function state. |
| 163 | * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into |
| 164 | * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type |
| 165 | * passes through a NULL-check conditional. For the branch wherein the state is |
| 166 | * changed to CONST_IMM, the verifier releases the reference. |
| 167 | * |
| 168 | * For each helper function that allocates a reference, such as |
| 169 | * bpf_sk_lookup_tcp(), there is a corresponding release function, such as |
| 170 | * bpf_sk_release(). When a reference type passes into the release function, |
| 171 | * the verifier also releases the reference. If any unchecked or unreleased |
| 172 | * reference remains at the end of the program, the verifier rejects it. |
| 173 | */ |
| 174 | |
| 175 | /* verifier_state + insn_idx are pushed to stack when branch is encountered */ |
| 176 | struct bpf_verifier_stack_elem { |
| 177 | /* verifier state is 'st' |
| 178 | * before processing instruction 'insn_idx' |
| 179 | * and after processing instruction 'prev_insn_idx' |
| 180 | */ |
| 181 | struct bpf_verifier_state st; |
| 182 | int insn_idx; |
| 183 | int prev_insn_idx; |
| 184 | struct bpf_verifier_stack_elem *next; |
| 185 | /* length of verifier log at the time this state was pushed on stack */ |
| 186 | u32 log_pos; |
| 187 | }; |
| 188 | |
| 189 | #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 |
| 190 | #define BPF_COMPLEXITY_LIMIT_STATES 64 |
| 191 | |
| 192 | #define BPF_MAP_KEY_POISON (1ULL << 63) |
| 193 | #define BPF_MAP_KEY_SEEN (1ULL << 62) |
| 194 | |
| 195 | #define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512 |
| 196 | |
| 197 | #define BPF_PRIV_STACK_MIN_SIZE 64 |
| 198 | |
| 199 | static int acquire_reference(struct bpf_verifier_env *env, int insn_idx); |
| 200 | static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id); |
| 201 | static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); |
| 202 | static void invalidate_non_owning_refs(struct bpf_verifier_env *env); |
| 203 | static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env); |
| 204 | static int ref_set_non_owning(struct bpf_verifier_env *env, |
| 205 | struct bpf_reg_state *reg); |
| 206 | static void specialize_kfunc(struct bpf_verifier_env *env, |
| 207 | u32 func_id, u16 offset, unsigned long *addr); |
| 208 | static bool is_trusted_reg(const struct bpf_reg_state *reg); |
| 209 | |
| 210 | static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) |
| 211 | { |
| 212 | return aux->map_ptr_state.poison; |
| 213 | } |
| 214 | |
| 215 | static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) |
| 216 | { |
| 217 | return aux->map_ptr_state.unpriv; |
| 218 | } |
| 219 | |
| 220 | static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, |
| 221 | struct bpf_map *map, |
| 222 | bool unpriv, bool poison) |
| 223 | { |
| 224 | unpriv |= bpf_map_ptr_unpriv(aux); |
| 225 | aux->map_ptr_state.unpriv = unpriv; |
| 226 | aux->map_ptr_state.poison = poison; |
| 227 | aux->map_ptr_state.map_ptr = map; |
| 228 | } |
| 229 | |
| 230 | static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) |
| 231 | { |
| 232 | return aux->map_key_state & BPF_MAP_KEY_POISON; |
| 233 | } |
| 234 | |
| 235 | static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) |
| 236 | { |
| 237 | return !(aux->map_key_state & BPF_MAP_KEY_SEEN); |
| 238 | } |
| 239 | |
| 240 | static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) |
| 241 | { |
| 242 | return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); |
| 243 | } |
| 244 | |
| 245 | static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) |
| 246 | { |
| 247 | bool poisoned = bpf_map_key_poisoned(aux); |
| 248 | |
| 249 | aux->map_key_state = state | BPF_MAP_KEY_SEEN | |
| 250 | (poisoned ? BPF_MAP_KEY_POISON : 0ULL); |
| 251 | } |
| 252 | |
| 253 | static bool bpf_helper_call(const struct bpf_insn *insn) |
| 254 | { |
| 255 | return insn->code == (BPF_JMP | BPF_CALL) && |
| 256 | insn->src_reg == 0; |
| 257 | } |
| 258 | |
| 259 | static bool bpf_pseudo_call(const struct bpf_insn *insn) |
| 260 | { |
| 261 | return insn->code == (BPF_JMP | BPF_CALL) && |
| 262 | insn->src_reg == BPF_PSEUDO_CALL; |
| 263 | } |
| 264 | |
| 265 | static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) |
| 266 | { |
| 267 | return insn->code == (BPF_JMP | BPF_CALL) && |
| 268 | insn->src_reg == BPF_PSEUDO_KFUNC_CALL; |
| 269 | } |
| 270 | |
| 271 | struct bpf_call_arg_meta { |
| 272 | struct bpf_map *map_ptr; |
| 273 | bool raw_mode; |
| 274 | bool pkt_access; |
| 275 | u8 release_regno; |
| 276 | int regno; |
| 277 | int access_size; |
| 278 | int mem_size; |
| 279 | u64 msize_max_value; |
| 280 | int ref_obj_id; |
| 281 | int dynptr_id; |
| 282 | int map_uid; |
| 283 | int func_id; |
| 284 | struct btf *btf; |
| 285 | u32 btf_id; |
| 286 | struct btf *ret_btf; |
| 287 | u32 ret_btf_id; |
| 288 | u32 subprogno; |
| 289 | struct btf_field *kptr_field; |
| 290 | s64 const_map_key; |
| 291 | }; |
| 292 | |
| 293 | struct bpf_kfunc_call_arg_meta { |
| 294 | /* In parameters */ |
| 295 | struct btf *btf; |
| 296 | u32 func_id; |
| 297 | u32 kfunc_flags; |
| 298 | const struct btf_type *func_proto; |
| 299 | const char *func_name; |
| 300 | /* Out parameters */ |
| 301 | u32 ref_obj_id; |
| 302 | u8 release_regno; |
| 303 | bool r0_rdonly; |
| 304 | u32 ret_btf_id; |
| 305 | u64 r0_size; |
| 306 | u32 subprogno; |
| 307 | struct { |
| 308 | u64 value; |
| 309 | bool found; |
| 310 | } arg_constant; |
| 311 | |
| 312 | /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling, |
| 313 | * generally to pass info about user-defined local kptr types to later |
| 314 | * verification logic |
| 315 | * bpf_obj_drop/bpf_percpu_obj_drop |
| 316 | * Record the local kptr type to be drop'd |
| 317 | * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type) |
| 318 | * Record the local kptr type to be refcount_incr'd and use |
| 319 | * arg_owning_ref to determine whether refcount_acquire should be |
| 320 | * fallible |
| 321 | */ |
| 322 | struct btf *arg_btf; |
| 323 | u32 arg_btf_id; |
| 324 | bool arg_owning_ref; |
| 325 | bool arg_prog; |
| 326 | |
| 327 | struct { |
| 328 | struct btf_field *field; |
| 329 | } arg_list_head; |
| 330 | struct { |
| 331 | struct btf_field *field; |
| 332 | } arg_rbtree_root; |
| 333 | struct { |
| 334 | enum bpf_dynptr_type type; |
| 335 | u32 id; |
| 336 | u32 ref_obj_id; |
| 337 | } initialized_dynptr; |
| 338 | struct { |
| 339 | u8 spi; |
| 340 | u8 frameno; |
| 341 | } iter; |
| 342 | struct { |
| 343 | struct bpf_map *ptr; |
| 344 | int uid; |
| 345 | } map; |
| 346 | u64 mem_size; |
| 347 | }; |
| 348 | |
| 349 | struct btf *btf_vmlinux; |
| 350 | |
| 351 | static const char *btf_type_name(const struct btf *btf, u32 id) |
| 352 | { |
| 353 | return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); |
| 354 | } |
| 355 | |
| 356 | static DEFINE_MUTEX(bpf_verifier_lock); |
| 357 | static DEFINE_MUTEX(bpf_percpu_ma_lock); |
| 358 | |
| 359 | __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) |
| 360 | { |
| 361 | struct bpf_verifier_env *env = private_data; |
| 362 | va_list args; |
| 363 | |
| 364 | if (!bpf_verifier_log_needed(&env->log)) |
| 365 | return; |
| 366 | |
| 367 | va_start(args, fmt); |
| 368 | bpf_verifier_vlog(&env->log, fmt, args); |
| 369 | va_end(args); |
| 370 | } |
| 371 | |
| 372 | static void verbose_invalid_scalar(struct bpf_verifier_env *env, |
| 373 | struct bpf_reg_state *reg, |
| 374 | struct bpf_retval_range range, const char *ctx, |
| 375 | const char *reg_name) |
| 376 | { |
| 377 | bool unknown = true; |
| 378 | |
| 379 | verbose(env, "%s the register %s has", ctx, reg_name); |
| 380 | if (reg->smin_value > S64_MIN) { |
| 381 | verbose(env, " smin=%lld", reg->smin_value); |
| 382 | unknown = false; |
| 383 | } |
| 384 | if (reg->smax_value < S64_MAX) { |
| 385 | verbose(env, " smax=%lld", reg->smax_value); |
| 386 | unknown = false; |
| 387 | } |
| 388 | if (unknown) |
| 389 | verbose(env, " unknown scalar value"); |
| 390 | verbose(env, " should have been in [%d, %d]\n", range.minval, range.maxval); |
| 391 | } |
| 392 | |
| 393 | static bool reg_not_null(const struct bpf_reg_state *reg) |
| 394 | { |
| 395 | enum bpf_reg_type type; |
| 396 | |
| 397 | type = reg->type; |
| 398 | if (type_may_be_null(type)) |
| 399 | return false; |
| 400 | |
| 401 | type = base_type(type); |
| 402 | return type == PTR_TO_SOCKET || |
| 403 | type == PTR_TO_TCP_SOCK || |
| 404 | type == PTR_TO_MAP_VALUE || |
| 405 | type == PTR_TO_MAP_KEY || |
| 406 | type == PTR_TO_SOCK_COMMON || |
| 407 | (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || |
| 408 | type == PTR_TO_MEM; |
| 409 | } |
| 410 | |
| 411 | static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) |
| 412 | { |
| 413 | struct btf_record *rec = NULL; |
| 414 | struct btf_struct_meta *meta; |
| 415 | |
| 416 | if (reg->type == PTR_TO_MAP_VALUE) { |
| 417 | rec = reg->map_ptr->record; |
| 418 | } else if (type_is_ptr_alloc_obj(reg->type)) { |
| 419 | meta = btf_find_struct_meta(reg->btf, reg->btf_id); |
| 420 | if (meta) |
| 421 | rec = meta->record; |
| 422 | } |
| 423 | return rec; |
| 424 | } |
| 425 | |
| 426 | static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) |
| 427 | { |
| 428 | struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; |
| 429 | |
| 430 | return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL; |
| 431 | } |
| 432 | |
| 433 | static const char *subprog_name(const struct bpf_verifier_env *env, int subprog) |
| 434 | { |
| 435 | struct bpf_func_info *info; |
| 436 | |
| 437 | if (!env->prog->aux->func_info) |
| 438 | return ""; |
| 439 | |
| 440 | info = &env->prog->aux->func_info[subprog]; |
| 441 | return btf_type_name(env->prog->aux->btf, info->type_id); |
| 442 | } |
| 443 | |
| 444 | static void mark_subprog_exc_cb(struct bpf_verifier_env *env, int subprog) |
| 445 | { |
| 446 | struct bpf_subprog_info *info = subprog_info(env, subprog); |
| 447 | |
| 448 | info->is_cb = true; |
| 449 | info->is_async_cb = true; |
| 450 | info->is_exception_cb = true; |
| 451 | } |
| 452 | |
| 453 | static bool subprog_is_exc_cb(struct bpf_verifier_env *env, int subprog) |
| 454 | { |
| 455 | return subprog_info(env, subprog)->is_exception_cb; |
| 456 | } |
| 457 | |
| 458 | static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) |
| 459 | { |
| 460 | return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK); |
| 461 | } |
| 462 | |
| 463 | static bool type_is_rdonly_mem(u32 type) |
| 464 | { |
| 465 | return type & MEM_RDONLY; |
| 466 | } |
| 467 | |
| 468 | static bool is_acquire_function(enum bpf_func_id func_id, |
| 469 | const struct bpf_map *map) |
| 470 | { |
| 471 | enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; |
| 472 | |
| 473 | if (func_id == BPF_FUNC_sk_lookup_tcp || |
| 474 | func_id == BPF_FUNC_sk_lookup_udp || |
| 475 | func_id == BPF_FUNC_skc_lookup_tcp || |
| 476 | func_id == BPF_FUNC_ringbuf_reserve || |
| 477 | func_id == BPF_FUNC_kptr_xchg) |
| 478 | return true; |
| 479 | |
| 480 | if (func_id == BPF_FUNC_map_lookup_elem && |
| 481 | (map_type == BPF_MAP_TYPE_SOCKMAP || |
| 482 | map_type == BPF_MAP_TYPE_SOCKHASH)) |
| 483 | return true; |
| 484 | |
| 485 | return false; |
| 486 | } |
| 487 | |
| 488 | static bool is_ptr_cast_function(enum bpf_func_id func_id) |
| 489 | { |
| 490 | return func_id == BPF_FUNC_tcp_sock || |
| 491 | func_id == BPF_FUNC_sk_fullsock || |
| 492 | func_id == BPF_FUNC_skc_to_tcp_sock || |
| 493 | func_id == BPF_FUNC_skc_to_tcp6_sock || |
| 494 | func_id == BPF_FUNC_skc_to_udp6_sock || |
| 495 | func_id == BPF_FUNC_skc_to_mptcp_sock || |
| 496 | func_id == BPF_FUNC_skc_to_tcp_timewait_sock || |
| 497 | func_id == BPF_FUNC_skc_to_tcp_request_sock; |
| 498 | } |
| 499 | |
| 500 | static bool is_dynptr_ref_function(enum bpf_func_id func_id) |
| 501 | { |
| 502 | return func_id == BPF_FUNC_dynptr_data; |
| 503 | } |
| 504 | |
| 505 | static bool is_sync_callback_calling_kfunc(u32 btf_id); |
| 506 | static bool is_async_callback_calling_kfunc(u32 btf_id); |
| 507 | static bool is_callback_calling_kfunc(u32 btf_id); |
| 508 | static bool is_bpf_throw_kfunc(struct bpf_insn *insn); |
| 509 | |
| 510 | static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id); |
| 511 | |
| 512 | static bool is_sync_callback_calling_function(enum bpf_func_id func_id) |
| 513 | { |
| 514 | return func_id == BPF_FUNC_for_each_map_elem || |
| 515 | func_id == BPF_FUNC_find_vma || |
| 516 | func_id == BPF_FUNC_loop || |
| 517 | func_id == BPF_FUNC_user_ringbuf_drain; |
| 518 | } |
| 519 | |
| 520 | static bool is_async_callback_calling_function(enum bpf_func_id func_id) |
| 521 | { |
| 522 | return func_id == BPF_FUNC_timer_set_callback; |
| 523 | } |
| 524 | |
| 525 | static bool is_callback_calling_function(enum bpf_func_id func_id) |
| 526 | { |
| 527 | return is_sync_callback_calling_function(func_id) || |
| 528 | is_async_callback_calling_function(func_id); |
| 529 | } |
| 530 | |
| 531 | static bool is_sync_callback_calling_insn(struct bpf_insn *insn) |
| 532 | { |
| 533 | return (bpf_helper_call(insn) && is_sync_callback_calling_function(insn->imm)) || |
| 534 | (bpf_pseudo_kfunc_call(insn) && is_sync_callback_calling_kfunc(insn->imm)); |
| 535 | } |
| 536 | |
| 537 | static bool is_async_callback_calling_insn(struct bpf_insn *insn) |
| 538 | { |
| 539 | return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) || |
| 540 | (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm)); |
| 541 | } |
| 542 | |
| 543 | static bool is_may_goto_insn(struct bpf_insn *insn) |
| 544 | { |
| 545 | return insn->code == (BPF_JMP | BPF_JCOND) && insn->src_reg == BPF_MAY_GOTO; |
| 546 | } |
| 547 | |
| 548 | static bool is_may_goto_insn_at(struct bpf_verifier_env *env, int insn_idx) |
| 549 | { |
| 550 | return is_may_goto_insn(&env->prog->insnsi[insn_idx]); |
| 551 | } |
| 552 | |
| 553 | static bool is_storage_get_function(enum bpf_func_id func_id) |
| 554 | { |
| 555 | return func_id == BPF_FUNC_sk_storage_get || |
| 556 | func_id == BPF_FUNC_inode_storage_get || |
| 557 | func_id == BPF_FUNC_task_storage_get || |
| 558 | func_id == BPF_FUNC_cgrp_storage_get; |
| 559 | } |
| 560 | |
| 561 | static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id, |
| 562 | const struct bpf_map *map) |
| 563 | { |
| 564 | int ref_obj_uses = 0; |
| 565 | |
| 566 | if (is_ptr_cast_function(func_id)) |
| 567 | ref_obj_uses++; |
| 568 | if (is_acquire_function(func_id, map)) |
| 569 | ref_obj_uses++; |
| 570 | if (is_dynptr_ref_function(func_id)) |
| 571 | ref_obj_uses++; |
| 572 | |
| 573 | return ref_obj_uses > 1; |
| 574 | } |
| 575 | |
| 576 | static bool is_cmpxchg_insn(const struct bpf_insn *insn) |
| 577 | { |
| 578 | return BPF_CLASS(insn->code) == BPF_STX && |
| 579 | BPF_MODE(insn->code) == BPF_ATOMIC && |
| 580 | insn->imm == BPF_CMPXCHG; |
| 581 | } |
| 582 | |
| 583 | static bool is_atomic_load_insn(const struct bpf_insn *insn) |
| 584 | { |
| 585 | return BPF_CLASS(insn->code) == BPF_STX && |
| 586 | BPF_MODE(insn->code) == BPF_ATOMIC && |
| 587 | insn->imm == BPF_LOAD_ACQ; |
| 588 | } |
| 589 | |
| 590 | static int __get_spi(s32 off) |
| 591 | { |
| 592 | return (-off - 1) / BPF_REG_SIZE; |
| 593 | } |
| 594 | |
| 595 | static struct bpf_func_state *func(struct bpf_verifier_env *env, |
| 596 | const struct bpf_reg_state *reg) |
| 597 | { |
| 598 | struct bpf_verifier_state *cur = env->cur_state; |
| 599 | |
| 600 | return cur->frame[reg->frameno]; |
| 601 | } |
| 602 | |
| 603 | static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) |
| 604 | { |
| 605 | int allocated_slots = state->allocated_stack / BPF_REG_SIZE; |
| 606 | |
| 607 | /* We need to check that slots between [spi - nr_slots + 1, spi] are |
| 608 | * within [0, allocated_stack). |
| 609 | * |
| 610 | * Please note that the spi grows downwards. For example, a dynptr |
| 611 | * takes the size of two stack slots; the first slot will be at |
| 612 | * spi and the second slot will be at spi - 1. |
| 613 | */ |
| 614 | return spi - nr_slots + 1 >= 0 && spi < allocated_slots; |
| 615 | } |
| 616 | |
| 617 | static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 618 | const char *obj_kind, int nr_slots) |
| 619 | { |
| 620 | int off, spi; |
| 621 | |
| 622 | if (!tnum_is_const(reg->var_off)) { |
| 623 | verbose(env, "%s has to be at a constant offset\n", obj_kind); |
| 624 | return -EINVAL; |
| 625 | } |
| 626 | |
| 627 | off = reg->off + reg->var_off.value; |
| 628 | if (off % BPF_REG_SIZE) { |
| 629 | verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); |
| 630 | return -EINVAL; |
| 631 | } |
| 632 | |
| 633 | spi = __get_spi(off); |
| 634 | if (spi + 1 < nr_slots) { |
| 635 | verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); |
| 636 | return -EINVAL; |
| 637 | } |
| 638 | |
| 639 | if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots)) |
| 640 | return -ERANGE; |
| 641 | return spi; |
| 642 | } |
| 643 | |
| 644 | static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 645 | { |
| 646 | return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS); |
| 647 | } |
| 648 | |
| 649 | static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots) |
| 650 | { |
| 651 | return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); |
| 652 | } |
| 653 | |
| 654 | static int irq_flag_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 655 | { |
| 656 | return stack_slot_obj_get_spi(env, reg, "irq_flag", 1); |
| 657 | } |
| 658 | |
| 659 | static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) |
| 660 | { |
| 661 | switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { |
| 662 | case DYNPTR_TYPE_LOCAL: |
| 663 | return BPF_DYNPTR_TYPE_LOCAL; |
| 664 | case DYNPTR_TYPE_RINGBUF: |
| 665 | return BPF_DYNPTR_TYPE_RINGBUF; |
| 666 | case DYNPTR_TYPE_SKB: |
| 667 | return BPF_DYNPTR_TYPE_SKB; |
| 668 | case DYNPTR_TYPE_XDP: |
| 669 | return BPF_DYNPTR_TYPE_XDP; |
| 670 | default: |
| 671 | return BPF_DYNPTR_TYPE_INVALID; |
| 672 | } |
| 673 | } |
| 674 | |
| 675 | static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type) |
| 676 | { |
| 677 | switch (type) { |
| 678 | case BPF_DYNPTR_TYPE_LOCAL: |
| 679 | return DYNPTR_TYPE_LOCAL; |
| 680 | case BPF_DYNPTR_TYPE_RINGBUF: |
| 681 | return DYNPTR_TYPE_RINGBUF; |
| 682 | case BPF_DYNPTR_TYPE_SKB: |
| 683 | return DYNPTR_TYPE_SKB; |
| 684 | case BPF_DYNPTR_TYPE_XDP: |
| 685 | return DYNPTR_TYPE_XDP; |
| 686 | default: |
| 687 | return 0; |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | static bool dynptr_type_refcounted(enum bpf_dynptr_type type) |
| 692 | { |
| 693 | return type == BPF_DYNPTR_TYPE_RINGBUF; |
| 694 | } |
| 695 | |
| 696 | static void __mark_dynptr_reg(struct bpf_reg_state *reg, |
| 697 | enum bpf_dynptr_type type, |
| 698 | bool first_slot, int dynptr_id); |
| 699 | |
| 700 | static void __mark_reg_not_init(const struct bpf_verifier_env *env, |
| 701 | struct bpf_reg_state *reg); |
| 702 | |
| 703 | static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, |
| 704 | struct bpf_reg_state *sreg1, |
| 705 | struct bpf_reg_state *sreg2, |
| 706 | enum bpf_dynptr_type type) |
| 707 | { |
| 708 | int id = ++env->id_gen; |
| 709 | |
| 710 | __mark_dynptr_reg(sreg1, type, true, id); |
| 711 | __mark_dynptr_reg(sreg2, type, false, id); |
| 712 | } |
| 713 | |
| 714 | static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, |
| 715 | struct bpf_reg_state *reg, |
| 716 | enum bpf_dynptr_type type) |
| 717 | { |
| 718 | __mark_dynptr_reg(reg, type, true, ++env->id_gen); |
| 719 | } |
| 720 | |
| 721 | static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, |
| 722 | struct bpf_func_state *state, int spi); |
| 723 | |
| 724 | static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 725 | enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id) |
| 726 | { |
| 727 | struct bpf_func_state *state = func(env, reg); |
| 728 | enum bpf_dynptr_type type; |
| 729 | int spi, i, err; |
| 730 | |
| 731 | spi = dynptr_get_spi(env, reg); |
| 732 | if (spi < 0) |
| 733 | return spi; |
| 734 | |
| 735 | /* We cannot assume both spi and spi - 1 belong to the same dynptr, |
| 736 | * hence we need to call destroy_if_dynptr_stack_slot twice for both, |
| 737 | * to ensure that for the following example: |
| 738 | * [d1][d1][d2][d2] |
| 739 | * spi 3 2 1 0 |
| 740 | * So marking spi = 2 should lead to destruction of both d1 and d2. In |
| 741 | * case they do belong to same dynptr, second call won't see slot_type |
| 742 | * as STACK_DYNPTR and will simply skip destruction. |
| 743 | */ |
| 744 | err = destroy_if_dynptr_stack_slot(env, state, spi); |
| 745 | if (err) |
| 746 | return err; |
| 747 | err = destroy_if_dynptr_stack_slot(env, state, spi - 1); |
| 748 | if (err) |
| 749 | return err; |
| 750 | |
| 751 | for (i = 0; i < BPF_REG_SIZE; i++) { |
| 752 | state->stack[spi].slot_type[i] = STACK_DYNPTR; |
| 753 | state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; |
| 754 | } |
| 755 | |
| 756 | type = arg_to_dynptr_type(arg_type); |
| 757 | if (type == BPF_DYNPTR_TYPE_INVALID) |
| 758 | return -EINVAL; |
| 759 | |
| 760 | mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, |
| 761 | &state->stack[spi - 1].spilled_ptr, type); |
| 762 | |
| 763 | if (dynptr_type_refcounted(type)) { |
| 764 | /* The id is used to track proper releasing */ |
| 765 | int id; |
| 766 | |
| 767 | if (clone_ref_obj_id) |
| 768 | id = clone_ref_obj_id; |
| 769 | else |
| 770 | id = acquire_reference(env, insn_idx); |
| 771 | |
| 772 | if (id < 0) |
| 773 | return id; |
| 774 | |
| 775 | state->stack[spi].spilled_ptr.ref_obj_id = id; |
| 776 | state->stack[spi - 1].spilled_ptr.ref_obj_id = id; |
| 777 | } |
| 778 | |
| 779 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 780 | state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 781 | |
| 782 | return 0; |
| 783 | } |
| 784 | |
| 785 | static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) |
| 786 | { |
| 787 | int i; |
| 788 | |
| 789 | for (i = 0; i < BPF_REG_SIZE; i++) { |
| 790 | state->stack[spi].slot_type[i] = STACK_INVALID; |
| 791 | state->stack[spi - 1].slot_type[i] = STACK_INVALID; |
| 792 | } |
| 793 | |
| 794 | __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); |
| 795 | __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); |
| 796 | |
| 797 | /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot? |
| 798 | * |
| 799 | * While we don't allow reading STACK_INVALID, it is still possible to |
| 800 | * do <8 byte writes marking some but not all slots as STACK_MISC. Then, |
| 801 | * helpers or insns can do partial read of that part without failing, |
| 802 | * but check_stack_range_initialized, check_stack_read_var_off, and |
| 803 | * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of |
| 804 | * the slot conservatively. Hence we need to prevent those liveness |
| 805 | * marking walks. |
| 806 | * |
| 807 | * This was not a problem before because STACK_INVALID is only set by |
| 808 | * default (where the default reg state has its reg->parent as NULL), or |
| 809 | * in clean_live_states after REG_LIVE_DONE (at which point |
| 810 | * mark_reg_read won't walk reg->parent chain), but not randomly during |
| 811 | * verifier state exploration (like we did above). Hence, for our case |
| 812 | * parentage chain will still be live (i.e. reg->parent may be |
| 813 | * non-NULL), while earlier reg->parent was NULL, so we need |
| 814 | * REG_LIVE_WRITTEN to screen off read marker propagation when it is |
| 815 | * done later on reads or by mark_dynptr_read as well to unnecessary |
| 816 | * mark registers in verifier state. |
| 817 | */ |
| 818 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 819 | state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 820 | } |
| 821 | |
| 822 | static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 823 | { |
| 824 | struct bpf_func_state *state = func(env, reg); |
| 825 | int spi, ref_obj_id, i; |
| 826 | |
| 827 | spi = dynptr_get_spi(env, reg); |
| 828 | if (spi < 0) |
| 829 | return spi; |
| 830 | |
| 831 | if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { |
| 832 | invalidate_dynptr(env, state, spi); |
| 833 | return 0; |
| 834 | } |
| 835 | |
| 836 | ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; |
| 837 | |
| 838 | /* If the dynptr has a ref_obj_id, then we need to invalidate |
| 839 | * two things: |
| 840 | * |
| 841 | * 1) Any dynptrs with a matching ref_obj_id (clones) |
| 842 | * 2) Any slices derived from this dynptr. |
| 843 | */ |
| 844 | |
| 845 | /* Invalidate any slices associated with this dynptr */ |
| 846 | WARN_ON_ONCE(release_reference(env, ref_obj_id)); |
| 847 | |
| 848 | /* Invalidate any dynptr clones */ |
| 849 | for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { |
| 850 | if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) |
| 851 | continue; |
| 852 | |
| 853 | /* it should always be the case that if the ref obj id |
| 854 | * matches then the stack slot also belongs to a |
| 855 | * dynptr |
| 856 | */ |
| 857 | if (state->stack[i].slot_type[0] != STACK_DYNPTR) { |
| 858 | verbose(env, "verifier internal error: misconfigured ref_obj_id\n"); |
| 859 | return -EFAULT; |
| 860 | } |
| 861 | if (state->stack[i].spilled_ptr.dynptr.first_slot) |
| 862 | invalidate_dynptr(env, state, i); |
| 863 | } |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | |
| 868 | static void __mark_reg_unknown(const struct bpf_verifier_env *env, |
| 869 | struct bpf_reg_state *reg); |
| 870 | |
| 871 | static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 872 | { |
| 873 | if (!env->allow_ptr_leaks) |
| 874 | __mark_reg_not_init(env, reg); |
| 875 | else |
| 876 | __mark_reg_unknown(env, reg); |
| 877 | } |
| 878 | |
| 879 | static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, |
| 880 | struct bpf_func_state *state, int spi) |
| 881 | { |
| 882 | struct bpf_func_state *fstate; |
| 883 | struct bpf_reg_state *dreg; |
| 884 | int i, dynptr_id; |
| 885 | |
| 886 | /* We always ensure that STACK_DYNPTR is never set partially, |
| 887 | * hence just checking for slot_type[0] is enough. This is |
| 888 | * different for STACK_SPILL, where it may be only set for |
| 889 | * 1 byte, so code has to use is_spilled_reg. |
| 890 | */ |
| 891 | if (state->stack[spi].slot_type[0] != STACK_DYNPTR) |
| 892 | return 0; |
| 893 | |
| 894 | /* Reposition spi to first slot */ |
| 895 | if (!state->stack[spi].spilled_ptr.dynptr.first_slot) |
| 896 | spi = spi + 1; |
| 897 | |
| 898 | if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { |
| 899 | verbose(env, "cannot overwrite referenced dynptr\n"); |
| 900 | return -EINVAL; |
| 901 | } |
| 902 | |
| 903 | mark_stack_slot_scratched(env, spi); |
| 904 | mark_stack_slot_scratched(env, spi - 1); |
| 905 | |
| 906 | /* Writing partially to one dynptr stack slot destroys both. */ |
| 907 | for (i = 0; i < BPF_REG_SIZE; i++) { |
| 908 | state->stack[spi].slot_type[i] = STACK_INVALID; |
| 909 | state->stack[spi - 1].slot_type[i] = STACK_INVALID; |
| 910 | } |
| 911 | |
| 912 | dynptr_id = state->stack[spi].spilled_ptr.id; |
| 913 | /* Invalidate any slices associated with this dynptr */ |
| 914 | bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ |
| 915 | /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */ |
| 916 | if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) |
| 917 | continue; |
| 918 | if (dreg->dynptr_id == dynptr_id) |
| 919 | mark_reg_invalid(env, dreg); |
| 920 | })); |
| 921 | |
| 922 | /* Do not release reference state, we are destroying dynptr on stack, |
| 923 | * not using some helper to release it. Just reset register. |
| 924 | */ |
| 925 | __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); |
| 926 | __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); |
| 927 | |
| 928 | /* Same reason as unmark_stack_slots_dynptr above */ |
| 929 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 930 | state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 931 | |
| 932 | return 0; |
| 933 | } |
| 934 | |
| 935 | static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 936 | { |
| 937 | int spi; |
| 938 | |
| 939 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 940 | return false; |
| 941 | |
| 942 | spi = dynptr_get_spi(env, reg); |
| 943 | |
| 944 | /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an |
| 945 | * error because this just means the stack state hasn't been updated yet. |
| 946 | * We will do check_mem_access to check and update stack bounds later. |
| 947 | */ |
| 948 | if (spi < 0 && spi != -ERANGE) |
| 949 | return false; |
| 950 | |
| 951 | /* We don't need to check if the stack slots are marked by previous |
| 952 | * dynptr initializations because we allow overwriting existing unreferenced |
| 953 | * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls |
| 954 | * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are |
| 955 | * touching are completely destructed before we reinitialize them for a new |
| 956 | * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early |
| 957 | * instead of delaying it until the end where the user will get "Unreleased |
| 958 | * reference" error. |
| 959 | */ |
| 960 | return true; |
| 961 | } |
| 962 | |
| 963 | static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 964 | { |
| 965 | struct bpf_func_state *state = func(env, reg); |
| 966 | int i, spi; |
| 967 | |
| 968 | /* This already represents first slot of initialized bpf_dynptr. |
| 969 | * |
| 970 | * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to |
| 971 | * check_func_arg_reg_off's logic, so we don't need to check its |
| 972 | * offset and alignment. |
| 973 | */ |
| 974 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 975 | return true; |
| 976 | |
| 977 | spi = dynptr_get_spi(env, reg); |
| 978 | if (spi < 0) |
| 979 | return false; |
| 980 | if (!state->stack[spi].spilled_ptr.dynptr.first_slot) |
| 981 | return false; |
| 982 | |
| 983 | for (i = 0; i < BPF_REG_SIZE; i++) { |
| 984 | if (state->stack[spi].slot_type[i] != STACK_DYNPTR || |
| 985 | state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) |
| 986 | return false; |
| 987 | } |
| 988 | |
| 989 | return true; |
| 990 | } |
| 991 | |
| 992 | static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 993 | enum bpf_arg_type arg_type) |
| 994 | { |
| 995 | struct bpf_func_state *state = func(env, reg); |
| 996 | enum bpf_dynptr_type dynptr_type; |
| 997 | int spi; |
| 998 | |
| 999 | /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ |
| 1000 | if (arg_type == ARG_PTR_TO_DYNPTR) |
| 1001 | return true; |
| 1002 | |
| 1003 | dynptr_type = arg_to_dynptr_type(arg_type); |
| 1004 | if (reg->type == CONST_PTR_TO_DYNPTR) { |
| 1005 | return reg->dynptr.type == dynptr_type; |
| 1006 | } else { |
| 1007 | spi = dynptr_get_spi(env, reg); |
| 1008 | if (spi < 0) |
| 1009 | return false; |
| 1010 | return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; |
| 1011 | } |
| 1012 | } |
| 1013 | |
| 1014 | static void __mark_reg_known_zero(struct bpf_reg_state *reg); |
| 1015 | |
| 1016 | static bool in_rcu_cs(struct bpf_verifier_env *env); |
| 1017 | |
| 1018 | static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta); |
| 1019 | |
| 1020 | static int mark_stack_slots_iter(struct bpf_verifier_env *env, |
| 1021 | struct bpf_kfunc_call_arg_meta *meta, |
| 1022 | struct bpf_reg_state *reg, int insn_idx, |
| 1023 | struct btf *btf, u32 btf_id, int nr_slots) |
| 1024 | { |
| 1025 | struct bpf_func_state *state = func(env, reg); |
| 1026 | int spi, i, j, id; |
| 1027 | |
| 1028 | spi = iter_get_spi(env, reg, nr_slots); |
| 1029 | if (spi < 0) |
| 1030 | return spi; |
| 1031 | |
| 1032 | id = acquire_reference(env, insn_idx); |
| 1033 | if (id < 0) |
| 1034 | return id; |
| 1035 | |
| 1036 | for (i = 0; i < nr_slots; i++) { |
| 1037 | struct bpf_stack_state *slot = &state->stack[spi - i]; |
| 1038 | struct bpf_reg_state *st = &slot->spilled_ptr; |
| 1039 | |
| 1040 | __mark_reg_known_zero(st); |
| 1041 | st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ |
| 1042 | if (is_kfunc_rcu_protected(meta)) { |
| 1043 | if (in_rcu_cs(env)) |
| 1044 | st->type |= MEM_RCU; |
| 1045 | else |
| 1046 | st->type |= PTR_UNTRUSTED; |
| 1047 | } |
| 1048 | st->live |= REG_LIVE_WRITTEN; |
| 1049 | st->ref_obj_id = i == 0 ? id : 0; |
| 1050 | st->iter.btf = btf; |
| 1051 | st->iter.btf_id = btf_id; |
| 1052 | st->iter.state = BPF_ITER_STATE_ACTIVE; |
| 1053 | st->iter.depth = 0; |
| 1054 | |
| 1055 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 1056 | slot->slot_type[j] = STACK_ITER; |
| 1057 | |
| 1058 | mark_stack_slot_scratched(env, spi - i); |
| 1059 | } |
| 1060 | |
| 1061 | return 0; |
| 1062 | } |
| 1063 | |
| 1064 | static int unmark_stack_slots_iter(struct bpf_verifier_env *env, |
| 1065 | struct bpf_reg_state *reg, int nr_slots) |
| 1066 | { |
| 1067 | struct bpf_func_state *state = func(env, reg); |
| 1068 | int spi, i, j; |
| 1069 | |
| 1070 | spi = iter_get_spi(env, reg, nr_slots); |
| 1071 | if (spi < 0) |
| 1072 | return spi; |
| 1073 | |
| 1074 | for (i = 0; i < nr_slots; i++) { |
| 1075 | struct bpf_stack_state *slot = &state->stack[spi - i]; |
| 1076 | struct bpf_reg_state *st = &slot->spilled_ptr; |
| 1077 | |
| 1078 | if (i == 0) |
| 1079 | WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); |
| 1080 | |
| 1081 | __mark_reg_not_init(env, st); |
| 1082 | |
| 1083 | /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ |
| 1084 | st->live |= REG_LIVE_WRITTEN; |
| 1085 | |
| 1086 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 1087 | slot->slot_type[j] = STACK_INVALID; |
| 1088 | |
| 1089 | mark_stack_slot_scratched(env, spi - i); |
| 1090 | } |
| 1091 | |
| 1092 | return 0; |
| 1093 | } |
| 1094 | |
| 1095 | static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, |
| 1096 | struct bpf_reg_state *reg, int nr_slots) |
| 1097 | { |
| 1098 | struct bpf_func_state *state = func(env, reg); |
| 1099 | int spi, i, j; |
| 1100 | |
| 1101 | /* For -ERANGE (i.e. spi not falling into allocated stack slots), we |
| 1102 | * will do check_mem_access to check and update stack bounds later, so |
| 1103 | * return true for that case. |
| 1104 | */ |
| 1105 | spi = iter_get_spi(env, reg, nr_slots); |
| 1106 | if (spi == -ERANGE) |
| 1107 | return true; |
| 1108 | if (spi < 0) |
| 1109 | return false; |
| 1110 | |
| 1111 | for (i = 0; i < nr_slots; i++) { |
| 1112 | struct bpf_stack_state *slot = &state->stack[spi - i]; |
| 1113 | |
| 1114 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 1115 | if (slot->slot_type[j] == STACK_ITER) |
| 1116 | return false; |
| 1117 | } |
| 1118 | |
| 1119 | return true; |
| 1120 | } |
| 1121 | |
| 1122 | static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 1123 | struct btf *btf, u32 btf_id, int nr_slots) |
| 1124 | { |
| 1125 | struct bpf_func_state *state = func(env, reg); |
| 1126 | int spi, i, j; |
| 1127 | |
| 1128 | spi = iter_get_spi(env, reg, nr_slots); |
| 1129 | if (spi < 0) |
| 1130 | return -EINVAL; |
| 1131 | |
| 1132 | for (i = 0; i < nr_slots; i++) { |
| 1133 | struct bpf_stack_state *slot = &state->stack[spi - i]; |
| 1134 | struct bpf_reg_state *st = &slot->spilled_ptr; |
| 1135 | |
| 1136 | if (st->type & PTR_UNTRUSTED) |
| 1137 | return -EPROTO; |
| 1138 | /* only main (first) slot has ref_obj_id set */ |
| 1139 | if (i == 0 && !st->ref_obj_id) |
| 1140 | return -EINVAL; |
| 1141 | if (i != 0 && st->ref_obj_id) |
| 1142 | return -EINVAL; |
| 1143 | if (st->iter.btf != btf || st->iter.btf_id != btf_id) |
| 1144 | return -EINVAL; |
| 1145 | |
| 1146 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 1147 | if (slot->slot_type[j] != STACK_ITER) |
| 1148 | return -EINVAL; |
| 1149 | } |
| 1150 | |
| 1151 | return 0; |
| 1152 | } |
| 1153 | |
| 1154 | static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx); |
| 1155 | static int release_irq_state(struct bpf_verifier_state *state, int id); |
| 1156 | |
| 1157 | static int mark_stack_slot_irq_flag(struct bpf_verifier_env *env, |
| 1158 | struct bpf_kfunc_call_arg_meta *meta, |
| 1159 | struct bpf_reg_state *reg, int insn_idx, |
| 1160 | int kfunc_class) |
| 1161 | { |
| 1162 | struct bpf_func_state *state = func(env, reg); |
| 1163 | struct bpf_stack_state *slot; |
| 1164 | struct bpf_reg_state *st; |
| 1165 | int spi, i, id; |
| 1166 | |
| 1167 | spi = irq_flag_get_spi(env, reg); |
| 1168 | if (spi < 0) |
| 1169 | return spi; |
| 1170 | |
| 1171 | id = acquire_irq_state(env, insn_idx); |
| 1172 | if (id < 0) |
| 1173 | return id; |
| 1174 | |
| 1175 | slot = &state->stack[spi]; |
| 1176 | st = &slot->spilled_ptr; |
| 1177 | |
| 1178 | __mark_reg_known_zero(st); |
| 1179 | st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ |
| 1180 | st->live |= REG_LIVE_WRITTEN; |
| 1181 | st->ref_obj_id = id; |
| 1182 | st->irq.kfunc_class = kfunc_class; |
| 1183 | |
| 1184 | for (i = 0; i < BPF_REG_SIZE; i++) |
| 1185 | slot->slot_type[i] = STACK_IRQ_FLAG; |
| 1186 | |
| 1187 | mark_stack_slot_scratched(env, spi); |
| 1188 | return 0; |
| 1189 | } |
| 1190 | |
| 1191 | static int unmark_stack_slot_irq_flag(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 1192 | int kfunc_class) |
| 1193 | { |
| 1194 | struct bpf_func_state *state = func(env, reg); |
| 1195 | struct bpf_stack_state *slot; |
| 1196 | struct bpf_reg_state *st; |
| 1197 | int spi, i, err; |
| 1198 | |
| 1199 | spi = irq_flag_get_spi(env, reg); |
| 1200 | if (spi < 0) |
| 1201 | return spi; |
| 1202 | |
| 1203 | slot = &state->stack[spi]; |
| 1204 | st = &slot->spilled_ptr; |
| 1205 | |
| 1206 | if (st->irq.kfunc_class != kfunc_class) { |
| 1207 | const char *flag_kfunc = st->irq.kfunc_class == IRQ_NATIVE_KFUNC ? "native" : "lock"; |
| 1208 | const char *used_kfunc = kfunc_class == IRQ_NATIVE_KFUNC ? "native" : "lock"; |
| 1209 | |
| 1210 | verbose(env, "irq flag acquired by %s kfuncs cannot be restored with %s kfuncs\n", |
| 1211 | flag_kfunc, used_kfunc); |
| 1212 | return -EINVAL; |
| 1213 | } |
| 1214 | |
| 1215 | err = release_irq_state(env->cur_state, st->ref_obj_id); |
| 1216 | WARN_ON_ONCE(err && err != -EACCES); |
| 1217 | if (err) { |
| 1218 | int insn_idx = 0; |
| 1219 | |
| 1220 | for (int i = 0; i < env->cur_state->acquired_refs; i++) { |
| 1221 | if (env->cur_state->refs[i].id == env->cur_state->active_irq_id) { |
| 1222 | insn_idx = env->cur_state->refs[i].insn_idx; |
| 1223 | break; |
| 1224 | } |
| 1225 | } |
| 1226 | |
| 1227 | verbose(env, "cannot restore irq state out of order, expected id=%d acquired at insn_idx=%d\n", |
| 1228 | env->cur_state->active_irq_id, insn_idx); |
| 1229 | return err; |
| 1230 | } |
| 1231 | |
| 1232 | __mark_reg_not_init(env, st); |
| 1233 | |
| 1234 | /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ |
| 1235 | st->live |= REG_LIVE_WRITTEN; |
| 1236 | |
| 1237 | for (i = 0; i < BPF_REG_SIZE; i++) |
| 1238 | slot->slot_type[i] = STACK_INVALID; |
| 1239 | |
| 1240 | mark_stack_slot_scratched(env, spi); |
| 1241 | return 0; |
| 1242 | } |
| 1243 | |
| 1244 | static bool is_irq_flag_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 1245 | { |
| 1246 | struct bpf_func_state *state = func(env, reg); |
| 1247 | struct bpf_stack_state *slot; |
| 1248 | int spi, i; |
| 1249 | |
| 1250 | /* For -ERANGE (i.e. spi not falling into allocated stack slots), we |
| 1251 | * will do check_mem_access to check and update stack bounds later, so |
| 1252 | * return true for that case. |
| 1253 | */ |
| 1254 | spi = irq_flag_get_spi(env, reg); |
| 1255 | if (spi == -ERANGE) |
| 1256 | return true; |
| 1257 | if (spi < 0) |
| 1258 | return false; |
| 1259 | |
| 1260 | slot = &state->stack[spi]; |
| 1261 | |
| 1262 | for (i = 0; i < BPF_REG_SIZE; i++) |
| 1263 | if (slot->slot_type[i] == STACK_IRQ_FLAG) |
| 1264 | return false; |
| 1265 | return true; |
| 1266 | } |
| 1267 | |
| 1268 | static int is_irq_flag_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 1269 | { |
| 1270 | struct bpf_func_state *state = func(env, reg); |
| 1271 | struct bpf_stack_state *slot; |
| 1272 | struct bpf_reg_state *st; |
| 1273 | int spi, i; |
| 1274 | |
| 1275 | spi = irq_flag_get_spi(env, reg); |
| 1276 | if (spi < 0) |
| 1277 | return -EINVAL; |
| 1278 | |
| 1279 | slot = &state->stack[spi]; |
| 1280 | st = &slot->spilled_ptr; |
| 1281 | |
| 1282 | if (!st->ref_obj_id) |
| 1283 | return -EINVAL; |
| 1284 | |
| 1285 | for (i = 0; i < BPF_REG_SIZE; i++) |
| 1286 | if (slot->slot_type[i] != STACK_IRQ_FLAG) |
| 1287 | return -EINVAL; |
| 1288 | return 0; |
| 1289 | } |
| 1290 | |
| 1291 | /* Check if given stack slot is "special": |
| 1292 | * - spilled register state (STACK_SPILL); |
| 1293 | * - dynptr state (STACK_DYNPTR); |
| 1294 | * - iter state (STACK_ITER). |
| 1295 | * - irq flag state (STACK_IRQ_FLAG) |
| 1296 | */ |
| 1297 | static bool is_stack_slot_special(const struct bpf_stack_state *stack) |
| 1298 | { |
| 1299 | enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; |
| 1300 | |
| 1301 | switch (type) { |
| 1302 | case STACK_SPILL: |
| 1303 | case STACK_DYNPTR: |
| 1304 | case STACK_ITER: |
| 1305 | case STACK_IRQ_FLAG: |
| 1306 | return true; |
| 1307 | case STACK_INVALID: |
| 1308 | case STACK_MISC: |
| 1309 | case STACK_ZERO: |
| 1310 | return false; |
| 1311 | default: |
| 1312 | WARN_ONCE(1, "unknown stack slot type %d\n", type); |
| 1313 | return true; |
| 1314 | } |
| 1315 | } |
| 1316 | |
| 1317 | /* The reg state of a pointer or a bounded scalar was saved when |
| 1318 | * it was spilled to the stack. |
| 1319 | */ |
| 1320 | static bool is_spilled_reg(const struct bpf_stack_state *stack) |
| 1321 | { |
| 1322 | return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; |
| 1323 | } |
| 1324 | |
| 1325 | static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack) |
| 1326 | { |
| 1327 | return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && |
| 1328 | stack->spilled_ptr.type == SCALAR_VALUE; |
| 1329 | } |
| 1330 | |
| 1331 | static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack) |
| 1332 | { |
| 1333 | return stack->slot_type[0] == STACK_SPILL && |
| 1334 | stack->spilled_ptr.type == SCALAR_VALUE; |
| 1335 | } |
| 1336 | |
| 1337 | /* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which |
| 1338 | * case they are equivalent, or it's STACK_ZERO, in which case we preserve |
| 1339 | * more precise STACK_ZERO. |
| 1340 | * Regardless of allow_ptr_leaks setting (i.e., privileged or unprivileged |
| 1341 | * mode), we won't promote STACK_INVALID to STACK_MISC. In privileged case it is |
| 1342 | * unnecessary as both are considered equivalent when loading data and pruning, |
| 1343 | * in case of unprivileged mode it will be incorrect to allow reads of invalid |
| 1344 | * slots. |
| 1345 | */ |
| 1346 | static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype) |
| 1347 | { |
| 1348 | if (*stype == STACK_ZERO) |
| 1349 | return; |
| 1350 | if (*stype == STACK_INVALID) |
| 1351 | return; |
| 1352 | *stype = STACK_MISC; |
| 1353 | } |
| 1354 | |
| 1355 | static void scrub_spilled_slot(u8 *stype) |
| 1356 | { |
| 1357 | if (*stype != STACK_INVALID) |
| 1358 | *stype = STACK_MISC; |
| 1359 | } |
| 1360 | |
| 1361 | /* copy array src of length n * size bytes to dst. dst is reallocated if it's too |
| 1362 | * small to hold src. This is different from krealloc since we don't want to preserve |
| 1363 | * the contents of dst. |
| 1364 | * |
| 1365 | * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could |
| 1366 | * not be allocated. |
| 1367 | */ |
| 1368 | static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) |
| 1369 | { |
| 1370 | size_t alloc_bytes; |
| 1371 | void *orig = dst; |
| 1372 | size_t bytes; |
| 1373 | |
| 1374 | if (ZERO_OR_NULL_PTR(src)) |
| 1375 | goto out; |
| 1376 | |
| 1377 | if (unlikely(check_mul_overflow(n, size, &bytes))) |
| 1378 | return NULL; |
| 1379 | |
| 1380 | alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes)); |
| 1381 | dst = krealloc(orig, alloc_bytes, flags); |
| 1382 | if (!dst) { |
| 1383 | kfree(orig); |
| 1384 | return NULL; |
| 1385 | } |
| 1386 | |
| 1387 | memcpy(dst, src, bytes); |
| 1388 | out: |
| 1389 | return dst ? dst : ZERO_SIZE_PTR; |
| 1390 | } |
| 1391 | |
| 1392 | /* resize an array from old_n items to new_n items. the array is reallocated if it's too |
| 1393 | * small to hold new_n items. new items are zeroed out if the array grows. |
| 1394 | * |
| 1395 | * Contrary to krealloc_array, does not free arr if new_n is zero. |
| 1396 | */ |
| 1397 | static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) |
| 1398 | { |
| 1399 | size_t alloc_size; |
| 1400 | void *new_arr; |
| 1401 | |
| 1402 | if (!new_n || old_n == new_n) |
| 1403 | goto out; |
| 1404 | |
| 1405 | alloc_size = kmalloc_size_roundup(size_mul(new_n, size)); |
| 1406 | new_arr = krealloc(arr, alloc_size, GFP_KERNEL); |
| 1407 | if (!new_arr) { |
| 1408 | kfree(arr); |
| 1409 | return NULL; |
| 1410 | } |
| 1411 | arr = new_arr; |
| 1412 | |
| 1413 | if (new_n > old_n) |
| 1414 | memset(arr + old_n * size, 0, (new_n - old_n) * size); |
| 1415 | |
| 1416 | out: |
| 1417 | return arr ? arr : ZERO_SIZE_PTR; |
| 1418 | } |
| 1419 | |
| 1420 | static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) |
| 1421 | { |
| 1422 | dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, |
| 1423 | sizeof(struct bpf_reference_state), GFP_KERNEL); |
| 1424 | if (!dst->refs) |
| 1425 | return -ENOMEM; |
| 1426 | |
| 1427 | dst->acquired_refs = src->acquired_refs; |
| 1428 | dst->active_locks = src->active_locks; |
| 1429 | dst->active_preempt_locks = src->active_preempt_locks; |
| 1430 | dst->active_rcu_lock = src->active_rcu_lock; |
| 1431 | dst->active_irq_id = src->active_irq_id; |
| 1432 | dst->active_lock_id = src->active_lock_id; |
| 1433 | dst->active_lock_ptr = src->active_lock_ptr; |
| 1434 | return 0; |
| 1435 | } |
| 1436 | |
| 1437 | static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) |
| 1438 | { |
| 1439 | size_t n = src->allocated_stack / BPF_REG_SIZE; |
| 1440 | |
| 1441 | dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), |
| 1442 | GFP_KERNEL); |
| 1443 | if (!dst->stack) |
| 1444 | return -ENOMEM; |
| 1445 | |
| 1446 | dst->allocated_stack = src->allocated_stack; |
| 1447 | return 0; |
| 1448 | } |
| 1449 | |
| 1450 | static int resize_reference_state(struct bpf_verifier_state *state, size_t n) |
| 1451 | { |
| 1452 | state->refs = realloc_array(state->refs, state->acquired_refs, n, |
| 1453 | sizeof(struct bpf_reference_state)); |
| 1454 | if (!state->refs) |
| 1455 | return -ENOMEM; |
| 1456 | |
| 1457 | state->acquired_refs = n; |
| 1458 | return 0; |
| 1459 | } |
| 1460 | |
| 1461 | /* Possibly update state->allocated_stack to be at least size bytes. Also |
| 1462 | * possibly update the function's high-water mark in its bpf_subprog_info. |
| 1463 | */ |
| 1464 | static int grow_stack_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int size) |
| 1465 | { |
| 1466 | size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; |
| 1467 | |
| 1468 | /* The stack size is always a multiple of BPF_REG_SIZE. */ |
| 1469 | size = round_up(size, BPF_REG_SIZE); |
| 1470 | n = size / BPF_REG_SIZE; |
| 1471 | |
| 1472 | if (old_n >= n) |
| 1473 | return 0; |
| 1474 | |
| 1475 | state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); |
| 1476 | if (!state->stack) |
| 1477 | return -ENOMEM; |
| 1478 | |
| 1479 | state->allocated_stack = size; |
| 1480 | |
| 1481 | /* update known max for given subprogram */ |
| 1482 | if (env->subprog_info[state->subprogno].stack_depth < size) |
| 1483 | env->subprog_info[state->subprogno].stack_depth = size; |
| 1484 | |
| 1485 | return 0; |
| 1486 | } |
| 1487 | |
| 1488 | /* Acquire a pointer id from the env and update the state->refs to include |
| 1489 | * this new pointer reference. |
| 1490 | * On success, returns a valid pointer id to associate with the register |
| 1491 | * On failure, returns a negative errno. |
| 1492 | */ |
| 1493 | static struct bpf_reference_state *acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) |
| 1494 | { |
| 1495 | struct bpf_verifier_state *state = env->cur_state; |
| 1496 | int new_ofs = state->acquired_refs; |
| 1497 | int err; |
| 1498 | |
| 1499 | err = resize_reference_state(state, state->acquired_refs + 1); |
| 1500 | if (err) |
| 1501 | return NULL; |
| 1502 | state->refs[new_ofs].insn_idx = insn_idx; |
| 1503 | |
| 1504 | return &state->refs[new_ofs]; |
| 1505 | } |
| 1506 | |
| 1507 | static int acquire_reference(struct bpf_verifier_env *env, int insn_idx) |
| 1508 | { |
| 1509 | struct bpf_reference_state *s; |
| 1510 | |
| 1511 | s = acquire_reference_state(env, insn_idx); |
| 1512 | if (!s) |
| 1513 | return -ENOMEM; |
| 1514 | s->type = REF_TYPE_PTR; |
| 1515 | s->id = ++env->id_gen; |
| 1516 | return s->id; |
| 1517 | } |
| 1518 | |
| 1519 | static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum ref_state_type type, |
| 1520 | int id, void *ptr) |
| 1521 | { |
| 1522 | struct bpf_verifier_state *state = env->cur_state; |
| 1523 | struct bpf_reference_state *s; |
| 1524 | |
| 1525 | s = acquire_reference_state(env, insn_idx); |
| 1526 | if (!s) |
| 1527 | return -ENOMEM; |
| 1528 | s->type = type; |
| 1529 | s->id = id; |
| 1530 | s->ptr = ptr; |
| 1531 | |
| 1532 | state->active_locks++; |
| 1533 | state->active_lock_id = id; |
| 1534 | state->active_lock_ptr = ptr; |
| 1535 | return 0; |
| 1536 | } |
| 1537 | |
| 1538 | static int acquire_irq_state(struct bpf_verifier_env *env, int insn_idx) |
| 1539 | { |
| 1540 | struct bpf_verifier_state *state = env->cur_state; |
| 1541 | struct bpf_reference_state *s; |
| 1542 | |
| 1543 | s = acquire_reference_state(env, insn_idx); |
| 1544 | if (!s) |
| 1545 | return -ENOMEM; |
| 1546 | s->type = REF_TYPE_IRQ; |
| 1547 | s->id = ++env->id_gen; |
| 1548 | |
| 1549 | state->active_irq_id = s->id; |
| 1550 | return s->id; |
| 1551 | } |
| 1552 | |
| 1553 | static void release_reference_state(struct bpf_verifier_state *state, int idx) |
| 1554 | { |
| 1555 | int last_idx; |
| 1556 | size_t rem; |
| 1557 | |
| 1558 | /* IRQ state requires the relative ordering of elements remaining the |
| 1559 | * same, since it relies on the refs array to behave as a stack, so that |
| 1560 | * it can detect out-of-order IRQ restore. Hence use memmove to shift |
| 1561 | * the array instead of swapping the final element into the deleted idx. |
| 1562 | */ |
| 1563 | last_idx = state->acquired_refs - 1; |
| 1564 | rem = state->acquired_refs - idx - 1; |
| 1565 | if (last_idx && idx != last_idx) |
| 1566 | memmove(&state->refs[idx], &state->refs[idx + 1], sizeof(*state->refs) * rem); |
| 1567 | memset(&state->refs[last_idx], 0, sizeof(*state->refs)); |
| 1568 | state->acquired_refs--; |
| 1569 | return; |
| 1570 | } |
| 1571 | |
| 1572 | static bool find_reference_state(struct bpf_verifier_state *state, int ptr_id) |
| 1573 | { |
| 1574 | int i; |
| 1575 | |
| 1576 | for (i = 0; i < state->acquired_refs; i++) |
| 1577 | if (state->refs[i].id == ptr_id) |
| 1578 | return true; |
| 1579 | |
| 1580 | return false; |
| 1581 | } |
| 1582 | |
| 1583 | static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr) |
| 1584 | { |
| 1585 | void *prev_ptr = NULL; |
| 1586 | u32 prev_id = 0; |
| 1587 | int i; |
| 1588 | |
| 1589 | for (i = 0; i < state->acquired_refs; i++) { |
| 1590 | if (state->refs[i].type == type && state->refs[i].id == id && |
| 1591 | state->refs[i].ptr == ptr) { |
| 1592 | release_reference_state(state, i); |
| 1593 | state->active_locks--; |
| 1594 | /* Reassign active lock (id, ptr). */ |
| 1595 | state->active_lock_id = prev_id; |
| 1596 | state->active_lock_ptr = prev_ptr; |
| 1597 | return 0; |
| 1598 | } |
| 1599 | if (state->refs[i].type & REF_TYPE_LOCK_MASK) { |
| 1600 | prev_id = state->refs[i].id; |
| 1601 | prev_ptr = state->refs[i].ptr; |
| 1602 | } |
| 1603 | } |
| 1604 | return -EINVAL; |
| 1605 | } |
| 1606 | |
| 1607 | static int release_irq_state(struct bpf_verifier_state *state, int id) |
| 1608 | { |
| 1609 | u32 prev_id = 0; |
| 1610 | int i; |
| 1611 | |
| 1612 | if (id != state->active_irq_id) |
| 1613 | return -EACCES; |
| 1614 | |
| 1615 | for (i = 0; i < state->acquired_refs; i++) { |
| 1616 | if (state->refs[i].type != REF_TYPE_IRQ) |
| 1617 | continue; |
| 1618 | if (state->refs[i].id == id) { |
| 1619 | release_reference_state(state, i); |
| 1620 | state->active_irq_id = prev_id; |
| 1621 | return 0; |
| 1622 | } else { |
| 1623 | prev_id = state->refs[i].id; |
| 1624 | } |
| 1625 | } |
| 1626 | return -EINVAL; |
| 1627 | } |
| 1628 | |
| 1629 | static struct bpf_reference_state *find_lock_state(struct bpf_verifier_state *state, enum ref_state_type type, |
| 1630 | int id, void *ptr) |
| 1631 | { |
| 1632 | int i; |
| 1633 | |
| 1634 | for (i = 0; i < state->acquired_refs; i++) { |
| 1635 | struct bpf_reference_state *s = &state->refs[i]; |
| 1636 | |
| 1637 | if (!(s->type & type)) |
| 1638 | continue; |
| 1639 | |
| 1640 | if (s->id == id && s->ptr == ptr) |
| 1641 | return s; |
| 1642 | } |
| 1643 | return NULL; |
| 1644 | } |
| 1645 | |
| 1646 | static void update_peak_states(struct bpf_verifier_env *env) |
| 1647 | { |
| 1648 | u32 cur_states; |
| 1649 | |
| 1650 | cur_states = env->explored_states_size + env->free_list_size; |
| 1651 | env->peak_states = max(env->peak_states, cur_states); |
| 1652 | } |
| 1653 | |
| 1654 | static void free_func_state(struct bpf_func_state *state) |
| 1655 | { |
| 1656 | if (!state) |
| 1657 | return; |
| 1658 | kfree(state->stack); |
| 1659 | kfree(state); |
| 1660 | } |
| 1661 | |
| 1662 | static void free_verifier_state(struct bpf_verifier_state *state, |
| 1663 | bool free_self) |
| 1664 | { |
| 1665 | int i; |
| 1666 | |
| 1667 | for (i = 0; i <= state->curframe; i++) { |
| 1668 | free_func_state(state->frame[i]); |
| 1669 | state->frame[i] = NULL; |
| 1670 | } |
| 1671 | kfree(state->refs); |
| 1672 | if (free_self) |
| 1673 | kfree(state); |
| 1674 | } |
| 1675 | |
| 1676 | /* struct bpf_verifier_state->{parent,loop_entry} refer to states |
| 1677 | * that are in either of env->{expored_states,free_list}. |
| 1678 | * In both cases the state is contained in struct bpf_verifier_state_list. |
| 1679 | */ |
| 1680 | static struct bpf_verifier_state_list *state_parent_as_list(struct bpf_verifier_state *st) |
| 1681 | { |
| 1682 | if (st->parent) |
| 1683 | return container_of(st->parent, struct bpf_verifier_state_list, state); |
| 1684 | return NULL; |
| 1685 | } |
| 1686 | |
| 1687 | static struct bpf_verifier_state_list *state_loop_entry_as_list(struct bpf_verifier_state *st) |
| 1688 | { |
| 1689 | if (st->loop_entry) |
| 1690 | return container_of(st->loop_entry, struct bpf_verifier_state_list, state); |
| 1691 | return NULL; |
| 1692 | } |
| 1693 | |
| 1694 | /* A state can be freed if it is no longer referenced: |
| 1695 | * - is in the env->free_list; |
| 1696 | * - has no children states; |
| 1697 | * - is not used as loop_entry. |
| 1698 | * |
| 1699 | * Freeing a state can make it's loop_entry free-able. |
| 1700 | */ |
| 1701 | static void maybe_free_verifier_state(struct bpf_verifier_env *env, |
| 1702 | struct bpf_verifier_state_list *sl) |
| 1703 | { |
| 1704 | struct bpf_verifier_state_list *loop_entry_sl; |
| 1705 | |
| 1706 | while (sl && sl->in_free_list && |
| 1707 | sl->state.branches == 0 && |
| 1708 | sl->state.used_as_loop_entry == 0) { |
| 1709 | loop_entry_sl = state_loop_entry_as_list(&sl->state); |
| 1710 | if (loop_entry_sl) |
| 1711 | loop_entry_sl->state.used_as_loop_entry--; |
| 1712 | list_del(&sl->node); |
| 1713 | free_verifier_state(&sl->state, false); |
| 1714 | kfree(sl); |
| 1715 | env->free_list_size--; |
| 1716 | sl = loop_entry_sl; |
| 1717 | } |
| 1718 | } |
| 1719 | |
| 1720 | /* copy verifier state from src to dst growing dst stack space |
| 1721 | * when necessary to accommodate larger src stack |
| 1722 | */ |
| 1723 | static int copy_func_state(struct bpf_func_state *dst, |
| 1724 | const struct bpf_func_state *src) |
| 1725 | { |
| 1726 | memcpy(dst, src, offsetof(struct bpf_func_state, stack)); |
| 1727 | return copy_stack_state(dst, src); |
| 1728 | } |
| 1729 | |
| 1730 | static int copy_verifier_state(struct bpf_verifier_state *dst_state, |
| 1731 | const struct bpf_verifier_state *src) |
| 1732 | { |
| 1733 | struct bpf_func_state *dst; |
| 1734 | int i, err; |
| 1735 | |
| 1736 | /* if dst has more stack frames then src frame, free them, this is also |
| 1737 | * necessary in case of exceptional exits using bpf_throw. |
| 1738 | */ |
| 1739 | for (i = src->curframe + 1; i <= dst_state->curframe; i++) { |
| 1740 | free_func_state(dst_state->frame[i]); |
| 1741 | dst_state->frame[i] = NULL; |
| 1742 | } |
| 1743 | err = copy_reference_state(dst_state, src); |
| 1744 | if (err) |
| 1745 | return err; |
| 1746 | dst_state->speculative = src->speculative; |
| 1747 | dst_state->in_sleepable = src->in_sleepable; |
| 1748 | dst_state->curframe = src->curframe; |
| 1749 | dst_state->branches = src->branches; |
| 1750 | dst_state->parent = src->parent; |
| 1751 | dst_state->first_insn_idx = src->first_insn_idx; |
| 1752 | dst_state->last_insn_idx = src->last_insn_idx; |
| 1753 | dst_state->insn_hist_start = src->insn_hist_start; |
| 1754 | dst_state->insn_hist_end = src->insn_hist_end; |
| 1755 | dst_state->dfs_depth = src->dfs_depth; |
| 1756 | dst_state->callback_unroll_depth = src->callback_unroll_depth; |
| 1757 | dst_state->used_as_loop_entry = src->used_as_loop_entry; |
| 1758 | dst_state->may_goto_depth = src->may_goto_depth; |
| 1759 | dst_state->loop_entry = src->loop_entry; |
| 1760 | for (i = 0; i <= src->curframe; i++) { |
| 1761 | dst = dst_state->frame[i]; |
| 1762 | if (!dst) { |
| 1763 | dst = kzalloc(sizeof(*dst), GFP_KERNEL); |
| 1764 | if (!dst) |
| 1765 | return -ENOMEM; |
| 1766 | dst_state->frame[i] = dst; |
| 1767 | } |
| 1768 | err = copy_func_state(dst, src->frame[i]); |
| 1769 | if (err) |
| 1770 | return err; |
| 1771 | } |
| 1772 | return 0; |
| 1773 | } |
| 1774 | |
| 1775 | static u32 state_htab_size(struct bpf_verifier_env *env) |
| 1776 | { |
| 1777 | return env->prog->len; |
| 1778 | } |
| 1779 | |
| 1780 | static struct list_head *explored_state(struct bpf_verifier_env *env, int idx) |
| 1781 | { |
| 1782 | struct bpf_verifier_state *cur = env->cur_state; |
| 1783 | struct bpf_func_state *state = cur->frame[cur->curframe]; |
| 1784 | |
| 1785 | return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; |
| 1786 | } |
| 1787 | |
| 1788 | static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b) |
| 1789 | { |
| 1790 | int fr; |
| 1791 | |
| 1792 | if (a->curframe != b->curframe) |
| 1793 | return false; |
| 1794 | |
| 1795 | for (fr = a->curframe; fr >= 0; fr--) |
| 1796 | if (a->frame[fr]->callsite != b->frame[fr]->callsite) |
| 1797 | return false; |
| 1798 | |
| 1799 | return true; |
| 1800 | } |
| 1801 | |
| 1802 | /* Open coded iterators allow back-edges in the state graph in order to |
| 1803 | * check unbounded loops that iterators. |
| 1804 | * |
| 1805 | * In is_state_visited() it is necessary to know if explored states are |
| 1806 | * part of some loops in order to decide whether non-exact states |
| 1807 | * comparison could be used: |
| 1808 | * - non-exact states comparison establishes sub-state relation and uses |
| 1809 | * read and precision marks to do so, these marks are propagated from |
| 1810 | * children states and thus are not guaranteed to be final in a loop; |
| 1811 | * - exact states comparison just checks if current and explored states |
| 1812 | * are identical (and thus form a back-edge). |
| 1813 | * |
| 1814 | * Paper "A New Algorithm for Identifying Loops in Decompilation" |
| 1815 | * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient |
| 1816 | * algorithm for loop structure detection and gives an overview of |
| 1817 | * relevant terminology. It also has helpful illustrations. |
| 1818 | * |
| 1819 | * [1] https://api.semanticscholar.org/CorpusID:15784067 |
| 1820 | * |
| 1821 | * We use a similar algorithm but because loop nested structure is |
| 1822 | * irrelevant for verifier ours is significantly simpler and resembles |
| 1823 | * strongly connected components algorithm from Sedgewick's textbook. |
| 1824 | * |
| 1825 | * Define topmost loop entry as a first node of the loop traversed in a |
| 1826 | * depth first search starting from initial state. The goal of the loop |
| 1827 | * tracking algorithm is to associate topmost loop entries with states |
| 1828 | * derived from these entries. |
| 1829 | * |
| 1830 | * For each step in the DFS states traversal algorithm needs to identify |
| 1831 | * the following situations: |
| 1832 | * |
| 1833 | * initial initial initial |
| 1834 | * | | | |
| 1835 | * V V V |
| 1836 | * ... ... .---------> hdr |
| 1837 | * | | | | |
| 1838 | * V V | V |
| 1839 | * cur .-> succ | .------... |
| 1840 | * | | | | | | |
| 1841 | * V | V | V V |
| 1842 | * succ '-- cur | ... ... |
| 1843 | * | | | |
| 1844 | * | V V |
| 1845 | * | succ <- cur |
| 1846 | * | | |
| 1847 | * | V |
| 1848 | * | ... |
| 1849 | * | | |
| 1850 | * '----' |
| 1851 | * |
| 1852 | * (A) successor state of cur (B) successor state of cur or it's entry |
| 1853 | * not yet traversed are in current DFS path, thus cur and succ |
| 1854 | * are members of the same outermost loop |
| 1855 | * |
| 1856 | * initial initial |
| 1857 | * | | |
| 1858 | * V V |
| 1859 | * ... ... |
| 1860 | * | | |
| 1861 | * V V |
| 1862 | * .------... .------... |
| 1863 | * | | | | |
| 1864 | * V V V V |
| 1865 | * .-> hdr ... ... ... |
| 1866 | * | | | | | |
| 1867 | * | V V V V |
| 1868 | * | succ <- cur succ <- cur |
| 1869 | * | | | |
| 1870 | * | V V |
| 1871 | * | ... ... |
| 1872 | * | | | |
| 1873 | * '----' exit |
| 1874 | * |
| 1875 | * (C) successor state of cur is a part of some loop but this loop |
| 1876 | * does not include cur or successor state is not in a loop at all. |
| 1877 | * |
| 1878 | * Algorithm could be described as the following python code: |
| 1879 | * |
| 1880 | * traversed = set() # Set of traversed nodes |
| 1881 | * entries = {} # Mapping from node to loop entry |
| 1882 | * depths = {} # Depth level assigned to graph node |
| 1883 | * path = set() # Current DFS path |
| 1884 | * |
| 1885 | * # Find outermost loop entry known for n |
| 1886 | * def get_loop_entry(n): |
| 1887 | * h = entries.get(n, None) |
| 1888 | * while h in entries: |
| 1889 | * h = entries[h] |
| 1890 | * return h |
| 1891 | * |
| 1892 | * # Update n's loop entry if h comes before n in current DFS path. |
| 1893 | * def update_loop_entry(n, h): |
| 1894 | * if h in path and depths[entries.get(n, n)] < depths[n]: |
| 1895 | * entries[n] = h1 |
| 1896 | * |
| 1897 | * def dfs(n, depth): |
| 1898 | * traversed.add(n) |
| 1899 | * path.add(n) |
| 1900 | * depths[n] = depth |
| 1901 | * for succ in G.successors(n): |
| 1902 | * if succ not in traversed: |
| 1903 | * # Case A: explore succ and update cur's loop entry |
| 1904 | * # only if succ's entry is in current DFS path. |
| 1905 | * dfs(succ, depth + 1) |
| 1906 | * h = entries.get(succ, None) |
| 1907 | * update_loop_entry(n, h) |
| 1908 | * else: |
| 1909 | * # Case B or C depending on `h1 in path` check in update_loop_entry(). |
| 1910 | * update_loop_entry(n, succ) |
| 1911 | * path.remove(n) |
| 1912 | * |
| 1913 | * To adapt this algorithm for use with verifier: |
| 1914 | * - use st->branch == 0 as a signal that DFS of succ had been finished |
| 1915 | * and cur's loop entry has to be updated (case A), handle this in |
| 1916 | * update_branch_counts(); |
| 1917 | * - use st->branch > 0 as a signal that st is in the current DFS path; |
| 1918 | * - handle cases B and C in is_state_visited(). |
| 1919 | */ |
| 1920 | static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_env *env, |
| 1921 | struct bpf_verifier_state *st) |
| 1922 | { |
| 1923 | struct bpf_verifier_state *topmost = st->loop_entry; |
| 1924 | u32 steps = 0; |
| 1925 | |
| 1926 | while (topmost && topmost->loop_entry) { |
| 1927 | if (verifier_bug_if(steps++ > st->dfs_depth, env, "infinite loop")) |
| 1928 | return ERR_PTR(-EFAULT); |
| 1929 | topmost = topmost->loop_entry; |
| 1930 | } |
| 1931 | return topmost; |
| 1932 | } |
| 1933 | |
| 1934 | static void update_loop_entry(struct bpf_verifier_env *env, |
| 1935 | struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) |
| 1936 | { |
| 1937 | /* The hdr->branches check decides between cases B and C in |
| 1938 | * comment for get_loop_entry(). If hdr->branches == 0 then |
| 1939 | * head's topmost loop entry is not in current DFS path, |
| 1940 | * hence 'cur' and 'hdr' are not in the same loop and there is |
| 1941 | * no need to update cur->loop_entry. |
| 1942 | */ |
| 1943 | if (hdr->branches && hdr->dfs_depth < (cur->loop_entry ?: cur)->dfs_depth) { |
| 1944 | if (cur->loop_entry) { |
| 1945 | cur->loop_entry->used_as_loop_entry--; |
| 1946 | maybe_free_verifier_state(env, state_loop_entry_as_list(cur)); |
| 1947 | } |
| 1948 | cur->loop_entry = hdr; |
| 1949 | hdr->used_as_loop_entry++; |
| 1950 | } |
| 1951 | } |
| 1952 | |
| 1953 | static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) |
| 1954 | { |
| 1955 | struct bpf_verifier_state_list *sl = NULL, *parent_sl; |
| 1956 | struct bpf_verifier_state *parent; |
| 1957 | |
| 1958 | while (st) { |
| 1959 | u32 br = --st->branches; |
| 1960 | |
| 1961 | /* br == 0 signals that DFS exploration for 'st' is finished, |
| 1962 | * thus it is necessary to update parent's loop entry if it |
| 1963 | * turned out that st is a part of some loop. |
| 1964 | * This is a part of 'case A' in get_loop_entry() comment. |
| 1965 | */ |
| 1966 | if (br == 0 && st->parent && st->loop_entry) |
| 1967 | update_loop_entry(env, st->parent, st->loop_entry); |
| 1968 | |
| 1969 | /* WARN_ON(br > 1) technically makes sense here, |
| 1970 | * but see comment in push_stack(), hence: |
| 1971 | */ |
| 1972 | WARN_ONCE((int)br < 0, |
| 1973 | "BUG update_branch_counts:branches_to_explore=%d\n", |
| 1974 | br); |
| 1975 | if (br) |
| 1976 | break; |
| 1977 | parent = st->parent; |
| 1978 | parent_sl = state_parent_as_list(st); |
| 1979 | if (sl) |
| 1980 | maybe_free_verifier_state(env, sl); |
| 1981 | st = parent; |
| 1982 | sl = parent_sl; |
| 1983 | } |
| 1984 | } |
| 1985 | |
| 1986 | static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, |
| 1987 | int *insn_idx, bool pop_log) |
| 1988 | { |
| 1989 | struct bpf_verifier_state *cur = env->cur_state; |
| 1990 | struct bpf_verifier_stack_elem *elem, *head = env->head; |
| 1991 | int err; |
| 1992 | |
| 1993 | if (env->head == NULL) |
| 1994 | return -ENOENT; |
| 1995 | |
| 1996 | if (cur) { |
| 1997 | err = copy_verifier_state(cur, &head->st); |
| 1998 | if (err) |
| 1999 | return err; |
| 2000 | } |
| 2001 | if (pop_log) |
| 2002 | bpf_vlog_reset(&env->log, head->log_pos); |
| 2003 | if (insn_idx) |
| 2004 | *insn_idx = head->insn_idx; |
| 2005 | if (prev_insn_idx) |
| 2006 | *prev_insn_idx = head->prev_insn_idx; |
| 2007 | elem = head->next; |
| 2008 | free_verifier_state(&head->st, false); |
| 2009 | kfree(head); |
| 2010 | env->head = elem; |
| 2011 | env->stack_size--; |
| 2012 | return 0; |
| 2013 | } |
| 2014 | |
| 2015 | static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, |
| 2016 | int insn_idx, int prev_insn_idx, |
| 2017 | bool speculative) |
| 2018 | { |
| 2019 | struct bpf_verifier_state *cur = env->cur_state; |
| 2020 | struct bpf_verifier_stack_elem *elem; |
| 2021 | int err; |
| 2022 | |
| 2023 | elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); |
| 2024 | if (!elem) |
| 2025 | goto err; |
| 2026 | |
| 2027 | elem->insn_idx = insn_idx; |
| 2028 | elem->prev_insn_idx = prev_insn_idx; |
| 2029 | elem->next = env->head; |
| 2030 | elem->log_pos = env->log.end_pos; |
| 2031 | env->head = elem; |
| 2032 | env->stack_size++; |
| 2033 | err = copy_verifier_state(&elem->st, cur); |
| 2034 | if (err) |
| 2035 | goto err; |
| 2036 | elem->st.speculative |= speculative; |
| 2037 | if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { |
| 2038 | verbose(env, "The sequence of %d jumps is too complex.\n", |
| 2039 | env->stack_size); |
| 2040 | goto err; |
| 2041 | } |
| 2042 | if (elem->st.parent) { |
| 2043 | ++elem->st.parent->branches; |
| 2044 | /* WARN_ON(branches > 2) technically makes sense here, |
| 2045 | * but |
| 2046 | * 1. speculative states will bump 'branches' for non-branch |
| 2047 | * instructions |
| 2048 | * 2. is_state_visited() heuristics may decide not to create |
| 2049 | * a new state for a sequence of branches and all such current |
| 2050 | * and cloned states will be pointing to a single parent state |
| 2051 | * which might have large 'branches' count. |
| 2052 | */ |
| 2053 | } |
| 2054 | return &elem->st; |
| 2055 | err: |
| 2056 | free_verifier_state(env->cur_state, true); |
| 2057 | env->cur_state = NULL; |
| 2058 | /* pop all elements and return */ |
| 2059 | while (!pop_stack(env, NULL, NULL, false)); |
| 2060 | return NULL; |
| 2061 | } |
| 2062 | |
| 2063 | #define CALLER_SAVED_REGS 6 |
| 2064 | static const int caller_saved[CALLER_SAVED_REGS] = { |
| 2065 | BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 |
| 2066 | }; |
| 2067 | |
| 2068 | /* This helper doesn't clear reg->id */ |
| 2069 | static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) |
| 2070 | { |
| 2071 | reg->var_off = tnum_const(imm); |
| 2072 | reg->smin_value = (s64)imm; |
| 2073 | reg->smax_value = (s64)imm; |
| 2074 | reg->umin_value = imm; |
| 2075 | reg->umax_value = imm; |
| 2076 | |
| 2077 | reg->s32_min_value = (s32)imm; |
| 2078 | reg->s32_max_value = (s32)imm; |
| 2079 | reg->u32_min_value = (u32)imm; |
| 2080 | reg->u32_max_value = (u32)imm; |
| 2081 | } |
| 2082 | |
| 2083 | /* Mark the unknown part of a register (variable offset or scalar value) as |
| 2084 | * known to have the value @imm. |
| 2085 | */ |
| 2086 | static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) |
| 2087 | { |
| 2088 | /* Clear off and union(map_ptr, range) */ |
| 2089 | memset(((u8 *)reg) + sizeof(reg->type), 0, |
| 2090 | offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); |
| 2091 | reg->id = 0; |
| 2092 | reg->ref_obj_id = 0; |
| 2093 | ___mark_reg_known(reg, imm); |
| 2094 | } |
| 2095 | |
| 2096 | static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) |
| 2097 | { |
| 2098 | reg->var_off = tnum_const_subreg(reg->var_off, imm); |
| 2099 | reg->s32_min_value = (s32)imm; |
| 2100 | reg->s32_max_value = (s32)imm; |
| 2101 | reg->u32_min_value = (u32)imm; |
| 2102 | reg->u32_max_value = (u32)imm; |
| 2103 | } |
| 2104 | |
| 2105 | /* Mark the 'variable offset' part of a register as zero. This should be |
| 2106 | * used only on registers holding a pointer type. |
| 2107 | */ |
| 2108 | static void __mark_reg_known_zero(struct bpf_reg_state *reg) |
| 2109 | { |
| 2110 | __mark_reg_known(reg, 0); |
| 2111 | } |
| 2112 | |
| 2113 | static void __mark_reg_const_zero(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 2114 | { |
| 2115 | __mark_reg_known(reg, 0); |
| 2116 | reg->type = SCALAR_VALUE; |
| 2117 | /* all scalars are assumed imprecise initially (unless unprivileged, |
| 2118 | * in which case everything is forced to be precise) |
| 2119 | */ |
| 2120 | reg->precise = !env->bpf_capable; |
| 2121 | } |
| 2122 | |
| 2123 | static void mark_reg_known_zero(struct bpf_verifier_env *env, |
| 2124 | struct bpf_reg_state *regs, u32 regno) |
| 2125 | { |
| 2126 | if (WARN_ON(regno >= MAX_BPF_REG)) { |
| 2127 | verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); |
| 2128 | /* Something bad happened, let's kill all regs */ |
| 2129 | for (regno = 0; regno < MAX_BPF_REG; regno++) |
| 2130 | __mark_reg_not_init(env, regs + regno); |
| 2131 | return; |
| 2132 | } |
| 2133 | __mark_reg_known_zero(regs + regno); |
| 2134 | } |
| 2135 | |
| 2136 | static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, |
| 2137 | bool first_slot, int dynptr_id) |
| 2138 | { |
| 2139 | /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for |
| 2140 | * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply |
| 2141 | * set it unconditionally as it is ignored for STACK_DYNPTR anyway. |
| 2142 | */ |
| 2143 | __mark_reg_known_zero(reg); |
| 2144 | reg->type = CONST_PTR_TO_DYNPTR; |
| 2145 | /* Give each dynptr a unique id to uniquely associate slices to it. */ |
| 2146 | reg->id = dynptr_id; |
| 2147 | reg->dynptr.type = type; |
| 2148 | reg->dynptr.first_slot = first_slot; |
| 2149 | } |
| 2150 | |
| 2151 | static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) |
| 2152 | { |
| 2153 | if (base_type(reg->type) == PTR_TO_MAP_VALUE) { |
| 2154 | const struct bpf_map *map = reg->map_ptr; |
| 2155 | |
| 2156 | if (map->inner_map_meta) { |
| 2157 | reg->type = CONST_PTR_TO_MAP; |
| 2158 | reg->map_ptr = map->inner_map_meta; |
| 2159 | /* transfer reg's id which is unique for every map_lookup_elem |
| 2160 | * as UID of the inner map. |
| 2161 | */ |
| 2162 | if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) |
| 2163 | reg->map_uid = reg->id; |
| 2164 | if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE)) |
| 2165 | reg->map_uid = reg->id; |
| 2166 | } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { |
| 2167 | reg->type = PTR_TO_XDP_SOCK; |
| 2168 | } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || |
| 2169 | map->map_type == BPF_MAP_TYPE_SOCKHASH) { |
| 2170 | reg->type = PTR_TO_SOCKET; |
| 2171 | } else { |
| 2172 | reg->type = PTR_TO_MAP_VALUE; |
| 2173 | } |
| 2174 | return; |
| 2175 | } |
| 2176 | |
| 2177 | reg->type &= ~PTR_MAYBE_NULL; |
| 2178 | } |
| 2179 | |
| 2180 | static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, |
| 2181 | struct btf_field_graph_root *ds_head) |
| 2182 | { |
| 2183 | __mark_reg_known_zero(®s[regno]); |
| 2184 | regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; |
| 2185 | regs[regno].btf = ds_head->btf; |
| 2186 | regs[regno].btf_id = ds_head->value_btf_id; |
| 2187 | regs[regno].off = ds_head->node_offset; |
| 2188 | } |
| 2189 | |
| 2190 | static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) |
| 2191 | { |
| 2192 | return type_is_pkt_pointer(reg->type); |
| 2193 | } |
| 2194 | |
| 2195 | static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) |
| 2196 | { |
| 2197 | return reg_is_pkt_pointer(reg) || |
| 2198 | reg->type == PTR_TO_PACKET_END; |
| 2199 | } |
| 2200 | |
| 2201 | static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg) |
| 2202 | { |
| 2203 | return base_type(reg->type) == PTR_TO_MEM && |
| 2204 | (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); |
| 2205 | } |
| 2206 | |
| 2207 | /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ |
| 2208 | static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, |
| 2209 | enum bpf_reg_type which) |
| 2210 | { |
| 2211 | /* The register can already have a range from prior markings. |
| 2212 | * This is fine as long as it hasn't been advanced from its |
| 2213 | * origin. |
| 2214 | */ |
| 2215 | return reg->type == which && |
| 2216 | reg->id == 0 && |
| 2217 | reg->off == 0 && |
| 2218 | tnum_equals_const(reg->var_off, 0); |
| 2219 | } |
| 2220 | |
| 2221 | /* Reset the min/max bounds of a register */ |
| 2222 | static void __mark_reg_unbounded(struct bpf_reg_state *reg) |
| 2223 | { |
| 2224 | reg->smin_value = S64_MIN; |
| 2225 | reg->smax_value = S64_MAX; |
| 2226 | reg->umin_value = 0; |
| 2227 | reg->umax_value = U64_MAX; |
| 2228 | |
| 2229 | reg->s32_min_value = S32_MIN; |
| 2230 | reg->s32_max_value = S32_MAX; |
| 2231 | reg->u32_min_value = 0; |
| 2232 | reg->u32_max_value = U32_MAX; |
| 2233 | } |
| 2234 | |
| 2235 | static void __mark_reg64_unbounded(struct bpf_reg_state *reg) |
| 2236 | { |
| 2237 | reg->smin_value = S64_MIN; |
| 2238 | reg->smax_value = S64_MAX; |
| 2239 | reg->umin_value = 0; |
| 2240 | reg->umax_value = U64_MAX; |
| 2241 | } |
| 2242 | |
| 2243 | static void __mark_reg32_unbounded(struct bpf_reg_state *reg) |
| 2244 | { |
| 2245 | reg->s32_min_value = S32_MIN; |
| 2246 | reg->s32_max_value = S32_MAX; |
| 2247 | reg->u32_min_value = 0; |
| 2248 | reg->u32_max_value = U32_MAX; |
| 2249 | } |
| 2250 | |
| 2251 | static void __update_reg32_bounds(struct bpf_reg_state *reg) |
| 2252 | { |
| 2253 | struct tnum var32_off = tnum_subreg(reg->var_off); |
| 2254 | |
| 2255 | /* min signed is max(sign bit) | min(other bits) */ |
| 2256 | reg->s32_min_value = max_t(s32, reg->s32_min_value, |
| 2257 | var32_off.value | (var32_off.mask & S32_MIN)); |
| 2258 | /* max signed is min(sign bit) | max(other bits) */ |
| 2259 | reg->s32_max_value = min_t(s32, reg->s32_max_value, |
| 2260 | var32_off.value | (var32_off.mask & S32_MAX)); |
| 2261 | reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); |
| 2262 | reg->u32_max_value = min(reg->u32_max_value, |
| 2263 | (u32)(var32_off.value | var32_off.mask)); |
| 2264 | } |
| 2265 | |
| 2266 | static void __update_reg64_bounds(struct bpf_reg_state *reg) |
| 2267 | { |
| 2268 | /* min signed is max(sign bit) | min(other bits) */ |
| 2269 | reg->smin_value = max_t(s64, reg->smin_value, |
| 2270 | reg->var_off.value | (reg->var_off.mask & S64_MIN)); |
| 2271 | /* max signed is min(sign bit) | max(other bits) */ |
| 2272 | reg->smax_value = min_t(s64, reg->smax_value, |
| 2273 | reg->var_off.value | (reg->var_off.mask & S64_MAX)); |
| 2274 | reg->umin_value = max(reg->umin_value, reg->var_off.value); |
| 2275 | reg->umax_value = min(reg->umax_value, |
| 2276 | reg->var_off.value | reg->var_off.mask); |
| 2277 | } |
| 2278 | |
| 2279 | static void __update_reg_bounds(struct bpf_reg_state *reg) |
| 2280 | { |
| 2281 | __update_reg32_bounds(reg); |
| 2282 | __update_reg64_bounds(reg); |
| 2283 | } |
| 2284 | |
| 2285 | /* Uses signed min/max values to inform unsigned, and vice-versa */ |
| 2286 | static void __reg32_deduce_bounds(struct bpf_reg_state *reg) |
| 2287 | { |
| 2288 | /* If upper 32 bits of u64/s64 range don't change, we can use lower 32 |
| 2289 | * bits to improve our u32/s32 boundaries. |
| 2290 | * |
| 2291 | * E.g., the case where we have upper 32 bits as zero ([10, 20] in |
| 2292 | * u64) is pretty trivial, it's obvious that in u32 we'll also have |
| 2293 | * [10, 20] range. But this property holds for any 64-bit range as |
| 2294 | * long as upper 32 bits in that entire range of values stay the same. |
| 2295 | * |
| 2296 | * E.g., u64 range [0x10000000A, 0x10000000F] ([4294967306, 4294967311] |
| 2297 | * in decimal) has the same upper 32 bits throughout all the values in |
| 2298 | * that range. As such, lower 32 bits form a valid [0xA, 0xF] ([10, 15]) |
| 2299 | * range. |
| 2300 | * |
| 2301 | * Note also, that [0xA, 0xF] is a valid range both in u32 and in s32, |
| 2302 | * following the rules outlined below about u64/s64 correspondence |
| 2303 | * (which equally applies to u32 vs s32 correspondence). In general it |
| 2304 | * depends on actual hexadecimal values of 32-bit range. They can form |
| 2305 | * only valid u32, or only valid s32 ranges in some cases. |
| 2306 | * |
| 2307 | * So we use all these insights to derive bounds for subregisters here. |
| 2308 | */ |
| 2309 | if ((reg->umin_value >> 32) == (reg->umax_value >> 32)) { |
| 2310 | /* u64 to u32 casting preserves validity of low 32 bits as |
| 2311 | * a range, if upper 32 bits are the same |
| 2312 | */ |
| 2313 | reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->umin_value); |
| 2314 | reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->umax_value); |
| 2315 | |
| 2316 | if ((s32)reg->umin_value <= (s32)reg->umax_value) { |
| 2317 | reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); |
| 2318 | reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); |
| 2319 | } |
| 2320 | } |
| 2321 | if ((reg->smin_value >> 32) == (reg->smax_value >> 32)) { |
| 2322 | /* low 32 bits should form a proper u32 range */ |
| 2323 | if ((u32)reg->smin_value <= (u32)reg->smax_value) { |
| 2324 | reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)reg->smin_value); |
| 2325 | reg->u32_max_value = min_t(u32, reg->u32_max_value, (u32)reg->smax_value); |
| 2326 | } |
| 2327 | /* low 32 bits should form a proper s32 range */ |
| 2328 | if ((s32)reg->smin_value <= (s32)reg->smax_value) { |
| 2329 | reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); |
| 2330 | reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); |
| 2331 | } |
| 2332 | } |
| 2333 | /* Special case where upper bits form a small sequence of two |
| 2334 | * sequential numbers (in 32-bit unsigned space, so 0xffffffff to |
| 2335 | * 0x00000000 is also valid), while lower bits form a proper s32 range |
| 2336 | * going from negative numbers to positive numbers. E.g., let's say we |
| 2337 | * have s64 range [-1, 1] ([0xffffffffffffffff, 0x0000000000000001]). |
| 2338 | * Possible s64 values are {-1, 0, 1} ({0xffffffffffffffff, |
| 2339 | * 0x0000000000000000, 0x00000000000001}). Ignoring upper 32 bits, |
| 2340 | * we still get a valid s32 range [-1, 1] ([0xffffffff, 0x00000001]). |
| 2341 | * Note that it doesn't have to be 0xffffffff going to 0x00000000 in |
| 2342 | * upper 32 bits. As a random example, s64 range |
| 2343 | * [0xfffffff0fffffff0; 0xfffffff100000010], forms a valid s32 range |
| 2344 | * [-16, 16] ([0xfffffff0; 0x00000010]) in its 32 bit subregister. |
| 2345 | */ |
| 2346 | if ((u32)(reg->umin_value >> 32) + 1 == (u32)(reg->umax_value >> 32) && |
| 2347 | (s32)reg->umin_value < 0 && (s32)reg->umax_value >= 0) { |
| 2348 | reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->umin_value); |
| 2349 | reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->umax_value); |
| 2350 | } |
| 2351 | if ((u32)(reg->smin_value >> 32) + 1 == (u32)(reg->smax_value >> 32) && |
| 2352 | (s32)reg->smin_value < 0 && (s32)reg->smax_value >= 0) { |
| 2353 | reg->s32_min_value = max_t(s32, reg->s32_min_value, (s32)reg->smin_value); |
| 2354 | reg->s32_max_value = min_t(s32, reg->s32_max_value, (s32)reg->smax_value); |
| 2355 | } |
| 2356 | /* if u32 range forms a valid s32 range (due to matching sign bit), |
| 2357 | * try to learn from that |
| 2358 | */ |
| 2359 | if ((s32)reg->u32_min_value <= (s32)reg->u32_max_value) { |
| 2360 | reg->s32_min_value = max_t(s32, reg->s32_min_value, reg->u32_min_value); |
| 2361 | reg->s32_max_value = min_t(s32, reg->s32_max_value, reg->u32_max_value); |
| 2362 | } |
| 2363 | /* If we cannot cross the sign boundary, then signed and unsigned bounds |
| 2364 | * are the same, so combine. This works even in the negative case, e.g. |
| 2365 | * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. |
| 2366 | */ |
| 2367 | if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { |
| 2368 | reg->u32_min_value = max_t(u32, reg->s32_min_value, reg->u32_min_value); |
| 2369 | reg->u32_max_value = min_t(u32, reg->s32_max_value, reg->u32_max_value); |
| 2370 | } |
| 2371 | } |
| 2372 | |
| 2373 | static void __reg64_deduce_bounds(struct bpf_reg_state *reg) |
| 2374 | { |
| 2375 | /* If u64 range forms a valid s64 range (due to matching sign bit), |
| 2376 | * try to learn from that. Let's do a bit of ASCII art to see when |
| 2377 | * this is happening. Let's take u64 range first: |
| 2378 | * |
| 2379 | * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX |
| 2380 | * |-------------------------------|--------------------------------| |
| 2381 | * |
| 2382 | * Valid u64 range is formed when umin and umax are anywhere in the |
| 2383 | * range [0, U64_MAX], and umin <= umax. u64 case is simple and |
| 2384 | * straightforward. Let's see how s64 range maps onto the same range |
| 2385 | * of values, annotated below the line for comparison: |
| 2386 | * |
| 2387 | * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX |
| 2388 | * |-------------------------------|--------------------------------| |
| 2389 | * 0 S64_MAX S64_MIN -1 |
| 2390 | * |
| 2391 | * So s64 values basically start in the middle and they are logically |
| 2392 | * contiguous to the right of it, wrapping around from -1 to 0, and |
| 2393 | * then finishing as S64_MAX (0x7fffffffffffffff) right before |
| 2394 | * S64_MIN. We can try drawing the continuity of u64 vs s64 values |
| 2395 | * more visually as mapped to sign-agnostic range of hex values. |
| 2396 | * |
| 2397 | * u64 start u64 end |
| 2398 | * _______________________________________________________________ |
| 2399 | * / \ |
| 2400 | * 0 0x7fffffffffffffff 0x8000000000000000 U64_MAX |
| 2401 | * |-------------------------------|--------------------------------| |
| 2402 | * 0 S64_MAX S64_MIN -1 |
| 2403 | * / \ |
| 2404 | * >------------------------------ -------------------------------> |
| 2405 | * s64 continues... s64 end s64 start s64 "midpoint" |
| 2406 | * |
| 2407 | * What this means is that, in general, we can't always derive |
| 2408 | * something new about u64 from any random s64 range, and vice versa. |
| 2409 | * |
| 2410 | * But we can do that in two particular cases. One is when entire |
| 2411 | * u64/s64 range is *entirely* contained within left half of the above |
| 2412 | * diagram or when it is *entirely* contained in the right half. I.e.: |
| 2413 | * |
| 2414 | * |-------------------------------|--------------------------------| |
| 2415 | * ^ ^ ^ ^ |
| 2416 | * A B C D |
| 2417 | * |
| 2418 | * [A, B] and [C, D] are contained entirely in their respective halves |
| 2419 | * and form valid contiguous ranges as both u64 and s64 values. [A, B] |
| 2420 | * will be non-negative both as u64 and s64 (and in fact it will be |
| 2421 | * identical ranges no matter the signedness). [C, D] treated as s64 |
| 2422 | * will be a range of negative values, while in u64 it will be |
| 2423 | * non-negative range of values larger than 0x8000000000000000. |
| 2424 | * |
| 2425 | * Now, any other range here can't be represented in both u64 and s64 |
| 2426 | * simultaneously. E.g., [A, C], [A, D], [B, C], [B, D] are valid |
| 2427 | * contiguous u64 ranges, but they are discontinuous in s64. [B, C] |
| 2428 | * in s64 would be properly presented as [S64_MIN, C] and [B, S64_MAX], |
| 2429 | * for example. Similarly, valid s64 range [D, A] (going from negative |
| 2430 | * to positive values), would be two separate [D, U64_MAX] and [0, A] |
| 2431 | * ranges as u64. Currently reg_state can't represent two segments per |
| 2432 | * numeric domain, so in such situations we can only derive maximal |
| 2433 | * possible range ([0, U64_MAX] for u64, and [S64_MIN, S64_MAX] for s64). |
| 2434 | * |
| 2435 | * So we use these facts to derive umin/umax from smin/smax and vice |
| 2436 | * versa only if they stay within the same "half". This is equivalent |
| 2437 | * to checking sign bit: lower half will have sign bit as zero, upper |
| 2438 | * half have sign bit 1. Below in code we simplify this by just |
| 2439 | * casting umin/umax as smin/smax and checking if they form valid |
| 2440 | * range, and vice versa. Those are equivalent checks. |
| 2441 | */ |
| 2442 | if ((s64)reg->umin_value <= (s64)reg->umax_value) { |
| 2443 | reg->smin_value = max_t(s64, reg->smin_value, reg->umin_value); |
| 2444 | reg->smax_value = min_t(s64, reg->smax_value, reg->umax_value); |
| 2445 | } |
| 2446 | /* If we cannot cross the sign boundary, then signed and unsigned bounds |
| 2447 | * are the same, so combine. This works even in the negative case, e.g. |
| 2448 | * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. |
| 2449 | */ |
| 2450 | if ((u64)reg->smin_value <= (u64)reg->smax_value) { |
| 2451 | reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); |
| 2452 | reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); |
| 2453 | } |
| 2454 | } |
| 2455 | |
| 2456 | static void __reg_deduce_mixed_bounds(struct bpf_reg_state *reg) |
| 2457 | { |
| 2458 | /* Try to tighten 64-bit bounds from 32-bit knowledge, using 32-bit |
| 2459 | * values on both sides of 64-bit range in hope to have tighter range. |
| 2460 | * E.g., if r1 is [0x1'00000000, 0x3'80000000], and we learn from |
| 2461 | * 32-bit signed > 0 operation that s32 bounds are now [1; 0x7fffffff]. |
| 2462 | * With this, we can substitute 1 as low 32-bits of _low_ 64-bit bound |
| 2463 | * (0x100000000 -> 0x100000001) and 0x7fffffff as low 32-bits of |
| 2464 | * _high_ 64-bit bound (0x380000000 -> 0x37fffffff) and arrive at a |
| 2465 | * better overall bounds for r1 as [0x1'000000001; 0x3'7fffffff]. |
| 2466 | * We just need to make sure that derived bounds we are intersecting |
| 2467 | * with are well-formed ranges in respective s64 or u64 domain, just |
| 2468 | * like we do with similar kinds of 32-to-64 or 64-to-32 adjustments. |
| 2469 | */ |
| 2470 | __u64 new_umin, new_umax; |
| 2471 | __s64 new_smin, new_smax; |
| 2472 | |
| 2473 | /* u32 -> u64 tightening, it's always well-formed */ |
| 2474 | new_umin = (reg->umin_value & ~0xffffffffULL) | reg->u32_min_value; |
| 2475 | new_umax = (reg->umax_value & ~0xffffffffULL) | reg->u32_max_value; |
| 2476 | reg->umin_value = max_t(u64, reg->umin_value, new_umin); |
| 2477 | reg->umax_value = min_t(u64, reg->umax_value, new_umax); |
| 2478 | /* u32 -> s64 tightening, u32 range embedded into s64 preserves range validity */ |
| 2479 | new_smin = (reg->smin_value & ~0xffffffffULL) | reg->u32_min_value; |
| 2480 | new_smax = (reg->smax_value & ~0xffffffffULL) | reg->u32_max_value; |
| 2481 | reg->smin_value = max_t(s64, reg->smin_value, new_smin); |
| 2482 | reg->smax_value = min_t(s64, reg->smax_value, new_smax); |
| 2483 | |
| 2484 | /* if s32 can be treated as valid u32 range, we can use it as well */ |
| 2485 | if ((u32)reg->s32_min_value <= (u32)reg->s32_max_value) { |
| 2486 | /* s32 -> u64 tightening */ |
| 2487 | new_umin = (reg->umin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; |
| 2488 | new_umax = (reg->umax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; |
| 2489 | reg->umin_value = max_t(u64, reg->umin_value, new_umin); |
| 2490 | reg->umax_value = min_t(u64, reg->umax_value, new_umax); |
| 2491 | /* s32 -> s64 tightening */ |
| 2492 | new_smin = (reg->smin_value & ~0xffffffffULL) | (u32)reg->s32_min_value; |
| 2493 | new_smax = (reg->smax_value & ~0xffffffffULL) | (u32)reg->s32_max_value; |
| 2494 | reg->smin_value = max_t(s64, reg->smin_value, new_smin); |
| 2495 | reg->smax_value = min_t(s64, reg->smax_value, new_smax); |
| 2496 | } |
| 2497 | |
| 2498 | /* Here we would like to handle a special case after sign extending load, |
| 2499 | * when upper bits for a 64-bit range are all 1s or all 0s. |
| 2500 | * |
| 2501 | * Upper bits are all 1s when register is in a range: |
| 2502 | * [0xffff_ffff_0000_0000, 0xffff_ffff_ffff_ffff] |
| 2503 | * Upper bits are all 0s when register is in a range: |
| 2504 | * [0x0000_0000_0000_0000, 0x0000_0000_ffff_ffff] |
| 2505 | * Together this forms are continuous range: |
| 2506 | * [0xffff_ffff_0000_0000, 0x0000_0000_ffff_ffff] |
| 2507 | * |
| 2508 | * Now, suppose that register range is in fact tighter: |
| 2509 | * [0xffff_ffff_8000_0000, 0x0000_0000_ffff_ffff] (R) |
| 2510 | * Also suppose that it's 32-bit range is positive, |
| 2511 | * meaning that lower 32-bits of the full 64-bit register |
| 2512 | * are in the range: |
| 2513 | * [0x0000_0000, 0x7fff_ffff] (W) |
| 2514 | * |
| 2515 | * If this happens, then any value in a range: |
| 2516 | * [0xffff_ffff_0000_0000, 0xffff_ffff_7fff_ffff] |
| 2517 | * is smaller than a lowest bound of the range (R): |
| 2518 | * 0xffff_ffff_8000_0000 |
| 2519 | * which means that upper bits of the full 64-bit register |
| 2520 | * can't be all 1s, when lower bits are in range (W). |
| 2521 | * |
| 2522 | * Note that: |
| 2523 | * - 0xffff_ffff_8000_0000 == (s64)S32_MIN |
| 2524 | * - 0x0000_0000_7fff_ffff == (s64)S32_MAX |
| 2525 | * These relations are used in the conditions below. |
| 2526 | */ |
| 2527 | if (reg->s32_min_value >= 0 && reg->smin_value >= S32_MIN && reg->smax_value <= S32_MAX) { |
| 2528 | reg->smin_value = reg->s32_min_value; |
| 2529 | reg->smax_value = reg->s32_max_value; |
| 2530 | reg->umin_value = reg->s32_min_value; |
| 2531 | reg->umax_value = reg->s32_max_value; |
| 2532 | reg->var_off = tnum_intersect(reg->var_off, |
| 2533 | tnum_range(reg->smin_value, reg->smax_value)); |
| 2534 | } |
| 2535 | } |
| 2536 | |
| 2537 | static void __reg_deduce_bounds(struct bpf_reg_state *reg) |
| 2538 | { |
| 2539 | __reg32_deduce_bounds(reg); |
| 2540 | __reg64_deduce_bounds(reg); |
| 2541 | __reg_deduce_mixed_bounds(reg); |
| 2542 | } |
| 2543 | |
| 2544 | /* Attempts to improve var_off based on unsigned min/max information */ |
| 2545 | static void __reg_bound_offset(struct bpf_reg_state *reg) |
| 2546 | { |
| 2547 | struct tnum var64_off = tnum_intersect(reg->var_off, |
| 2548 | tnum_range(reg->umin_value, |
| 2549 | reg->umax_value)); |
| 2550 | struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off), |
| 2551 | tnum_range(reg->u32_min_value, |
| 2552 | reg->u32_max_value)); |
| 2553 | |
| 2554 | reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); |
| 2555 | } |
| 2556 | |
| 2557 | static void reg_bounds_sync(struct bpf_reg_state *reg) |
| 2558 | { |
| 2559 | /* We might have learned new bounds from the var_off. */ |
| 2560 | __update_reg_bounds(reg); |
| 2561 | /* We might have learned something about the sign bit. */ |
| 2562 | __reg_deduce_bounds(reg); |
| 2563 | __reg_deduce_bounds(reg); |
| 2564 | /* We might have learned some bits from the bounds. */ |
| 2565 | __reg_bound_offset(reg); |
| 2566 | /* Intersecting with the old var_off might have improved our bounds |
| 2567 | * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), |
| 2568 | * then new var_off is (0; 0x7f...fc) which improves our umax. |
| 2569 | */ |
| 2570 | __update_reg_bounds(reg); |
| 2571 | } |
| 2572 | |
| 2573 | static int reg_bounds_sanity_check(struct bpf_verifier_env *env, |
| 2574 | struct bpf_reg_state *reg, const char *ctx) |
| 2575 | { |
| 2576 | const char *msg; |
| 2577 | |
| 2578 | if (reg->umin_value > reg->umax_value || |
| 2579 | reg->smin_value > reg->smax_value || |
| 2580 | reg->u32_min_value > reg->u32_max_value || |
| 2581 | reg->s32_min_value > reg->s32_max_value) { |
| 2582 | msg = "range bounds violation"; |
| 2583 | goto out; |
| 2584 | } |
| 2585 | |
| 2586 | if (tnum_is_const(reg->var_off)) { |
| 2587 | u64 uval = reg->var_off.value; |
| 2588 | s64 sval = (s64)uval; |
| 2589 | |
| 2590 | if (reg->umin_value != uval || reg->umax_value != uval || |
| 2591 | reg->smin_value != sval || reg->smax_value != sval) { |
| 2592 | msg = "const tnum out of sync with range bounds"; |
| 2593 | goto out; |
| 2594 | } |
| 2595 | } |
| 2596 | |
| 2597 | if (tnum_subreg_is_const(reg->var_off)) { |
| 2598 | u32 uval32 = tnum_subreg(reg->var_off).value; |
| 2599 | s32 sval32 = (s32)uval32; |
| 2600 | |
| 2601 | if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || |
| 2602 | reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { |
| 2603 | msg = "const subreg tnum out of sync with range bounds"; |
| 2604 | goto out; |
| 2605 | } |
| 2606 | } |
| 2607 | |
| 2608 | return 0; |
| 2609 | out: |
| 2610 | verbose(env, "REG INVARIANTS VIOLATION (%s): %s u64=[%#llx, %#llx] " |
| 2611 | "s64=[%#llx, %#llx] u32=[%#x, %#x] s32=[%#x, %#x] var_off=(%#llx, %#llx)\n", |
| 2612 | ctx, msg, reg->umin_value, reg->umax_value, |
| 2613 | reg->smin_value, reg->smax_value, |
| 2614 | reg->u32_min_value, reg->u32_max_value, |
| 2615 | reg->s32_min_value, reg->s32_max_value, |
| 2616 | reg->var_off.value, reg->var_off.mask); |
| 2617 | if (env->test_reg_invariants) |
| 2618 | return -EFAULT; |
| 2619 | __mark_reg_unbounded(reg); |
| 2620 | return 0; |
| 2621 | } |
| 2622 | |
| 2623 | static bool __reg32_bound_s64(s32 a) |
| 2624 | { |
| 2625 | return a >= 0 && a <= S32_MAX; |
| 2626 | } |
| 2627 | |
| 2628 | static void __reg_assign_32_into_64(struct bpf_reg_state *reg) |
| 2629 | { |
| 2630 | reg->umin_value = reg->u32_min_value; |
| 2631 | reg->umax_value = reg->u32_max_value; |
| 2632 | |
| 2633 | /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must |
| 2634 | * be positive otherwise set to worse case bounds and refine later |
| 2635 | * from tnum. |
| 2636 | */ |
| 2637 | if (__reg32_bound_s64(reg->s32_min_value) && |
| 2638 | __reg32_bound_s64(reg->s32_max_value)) { |
| 2639 | reg->smin_value = reg->s32_min_value; |
| 2640 | reg->smax_value = reg->s32_max_value; |
| 2641 | } else { |
| 2642 | reg->smin_value = 0; |
| 2643 | reg->smax_value = U32_MAX; |
| 2644 | } |
| 2645 | } |
| 2646 | |
| 2647 | /* Mark a register as having a completely unknown (scalar) value. */ |
| 2648 | static void __mark_reg_unknown_imprecise(struct bpf_reg_state *reg) |
| 2649 | { |
| 2650 | /* |
| 2651 | * Clear type, off, and union(map_ptr, range) and |
| 2652 | * padding between 'type' and union |
| 2653 | */ |
| 2654 | memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); |
| 2655 | reg->type = SCALAR_VALUE; |
| 2656 | reg->id = 0; |
| 2657 | reg->ref_obj_id = 0; |
| 2658 | reg->var_off = tnum_unknown; |
| 2659 | reg->frameno = 0; |
| 2660 | reg->precise = false; |
| 2661 | __mark_reg_unbounded(reg); |
| 2662 | } |
| 2663 | |
| 2664 | /* Mark a register as having a completely unknown (scalar) value, |
| 2665 | * initialize .precise as true when not bpf capable. |
| 2666 | */ |
| 2667 | static void __mark_reg_unknown(const struct bpf_verifier_env *env, |
| 2668 | struct bpf_reg_state *reg) |
| 2669 | { |
| 2670 | __mark_reg_unknown_imprecise(reg); |
| 2671 | reg->precise = !env->bpf_capable; |
| 2672 | } |
| 2673 | |
| 2674 | static void mark_reg_unknown(struct bpf_verifier_env *env, |
| 2675 | struct bpf_reg_state *regs, u32 regno) |
| 2676 | { |
| 2677 | if (WARN_ON(regno >= MAX_BPF_REG)) { |
| 2678 | verbose(env, "mark_reg_unknown(regs, %u)\n", regno); |
| 2679 | /* Something bad happened, let's kill all regs except FP */ |
| 2680 | for (regno = 0; regno < BPF_REG_FP; regno++) |
| 2681 | __mark_reg_not_init(env, regs + regno); |
| 2682 | return; |
| 2683 | } |
| 2684 | __mark_reg_unknown(env, regs + regno); |
| 2685 | } |
| 2686 | |
| 2687 | static int __mark_reg_s32_range(struct bpf_verifier_env *env, |
| 2688 | struct bpf_reg_state *regs, |
| 2689 | u32 regno, |
| 2690 | s32 s32_min, |
| 2691 | s32 s32_max) |
| 2692 | { |
| 2693 | struct bpf_reg_state *reg = regs + regno; |
| 2694 | |
| 2695 | reg->s32_min_value = max_t(s32, reg->s32_min_value, s32_min); |
| 2696 | reg->s32_max_value = min_t(s32, reg->s32_max_value, s32_max); |
| 2697 | |
| 2698 | reg->smin_value = max_t(s64, reg->smin_value, s32_min); |
| 2699 | reg->smax_value = min_t(s64, reg->smax_value, s32_max); |
| 2700 | |
| 2701 | reg_bounds_sync(reg); |
| 2702 | |
| 2703 | return reg_bounds_sanity_check(env, reg, "s32_range"); |
| 2704 | } |
| 2705 | |
| 2706 | static void __mark_reg_not_init(const struct bpf_verifier_env *env, |
| 2707 | struct bpf_reg_state *reg) |
| 2708 | { |
| 2709 | __mark_reg_unknown(env, reg); |
| 2710 | reg->type = NOT_INIT; |
| 2711 | } |
| 2712 | |
| 2713 | static void mark_reg_not_init(struct bpf_verifier_env *env, |
| 2714 | struct bpf_reg_state *regs, u32 regno) |
| 2715 | { |
| 2716 | if (WARN_ON(regno >= MAX_BPF_REG)) { |
| 2717 | verbose(env, "mark_reg_not_init(regs, %u)\n", regno); |
| 2718 | /* Something bad happened, let's kill all regs except FP */ |
| 2719 | for (regno = 0; regno < BPF_REG_FP; regno++) |
| 2720 | __mark_reg_not_init(env, regs + regno); |
| 2721 | return; |
| 2722 | } |
| 2723 | __mark_reg_not_init(env, regs + regno); |
| 2724 | } |
| 2725 | |
| 2726 | static void mark_btf_ld_reg(struct bpf_verifier_env *env, |
| 2727 | struct bpf_reg_state *regs, u32 regno, |
| 2728 | enum bpf_reg_type reg_type, |
| 2729 | struct btf *btf, u32 btf_id, |
| 2730 | enum bpf_type_flag flag) |
| 2731 | { |
| 2732 | if (reg_type == SCALAR_VALUE) { |
| 2733 | mark_reg_unknown(env, regs, regno); |
| 2734 | return; |
| 2735 | } |
| 2736 | mark_reg_known_zero(env, regs, regno); |
| 2737 | regs[regno].type = PTR_TO_BTF_ID | flag; |
| 2738 | regs[regno].btf = btf; |
| 2739 | regs[regno].btf_id = btf_id; |
| 2740 | if (type_may_be_null(flag)) |
| 2741 | regs[regno].id = ++env->id_gen; |
| 2742 | } |
| 2743 | |
| 2744 | #define DEF_NOT_SUBREG (0) |
| 2745 | static void init_reg_state(struct bpf_verifier_env *env, |
| 2746 | struct bpf_func_state *state) |
| 2747 | { |
| 2748 | struct bpf_reg_state *regs = state->regs; |
| 2749 | int i; |
| 2750 | |
| 2751 | for (i = 0; i < MAX_BPF_REG; i++) { |
| 2752 | mark_reg_not_init(env, regs, i); |
| 2753 | regs[i].live = REG_LIVE_NONE; |
| 2754 | regs[i].parent = NULL; |
| 2755 | regs[i].subreg_def = DEF_NOT_SUBREG; |
| 2756 | } |
| 2757 | |
| 2758 | /* frame pointer */ |
| 2759 | regs[BPF_REG_FP].type = PTR_TO_STACK; |
| 2760 | mark_reg_known_zero(env, regs, BPF_REG_FP); |
| 2761 | regs[BPF_REG_FP].frameno = state->frameno; |
| 2762 | } |
| 2763 | |
| 2764 | static struct bpf_retval_range retval_range(s32 minval, s32 maxval) |
| 2765 | { |
| 2766 | return (struct bpf_retval_range){ minval, maxval }; |
| 2767 | } |
| 2768 | |
| 2769 | #define BPF_MAIN_FUNC (-1) |
| 2770 | static void init_func_state(struct bpf_verifier_env *env, |
| 2771 | struct bpf_func_state *state, |
| 2772 | int callsite, int frameno, int subprogno) |
| 2773 | { |
| 2774 | state->callsite = callsite; |
| 2775 | state->frameno = frameno; |
| 2776 | state->subprogno = subprogno; |
| 2777 | state->callback_ret_range = retval_range(0, 0); |
| 2778 | init_reg_state(env, state); |
| 2779 | mark_verifier_state_scratched(env); |
| 2780 | } |
| 2781 | |
| 2782 | /* Similar to push_stack(), but for async callbacks */ |
| 2783 | static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, |
| 2784 | int insn_idx, int prev_insn_idx, |
| 2785 | int subprog, bool is_sleepable) |
| 2786 | { |
| 2787 | struct bpf_verifier_stack_elem *elem; |
| 2788 | struct bpf_func_state *frame; |
| 2789 | |
| 2790 | elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); |
| 2791 | if (!elem) |
| 2792 | goto err; |
| 2793 | |
| 2794 | elem->insn_idx = insn_idx; |
| 2795 | elem->prev_insn_idx = prev_insn_idx; |
| 2796 | elem->next = env->head; |
| 2797 | elem->log_pos = env->log.end_pos; |
| 2798 | env->head = elem; |
| 2799 | env->stack_size++; |
| 2800 | if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { |
| 2801 | verbose(env, |
| 2802 | "The sequence of %d jumps is too complex for async cb.\n", |
| 2803 | env->stack_size); |
| 2804 | goto err; |
| 2805 | } |
| 2806 | /* Unlike push_stack() do not copy_verifier_state(). |
| 2807 | * The caller state doesn't matter. |
| 2808 | * This is async callback. It starts in a fresh stack. |
| 2809 | * Initialize it similar to do_check_common(). |
| 2810 | * But we do need to make sure to not clobber insn_hist, so we keep |
| 2811 | * chaining insn_hist_start/insn_hist_end indices as for a normal |
| 2812 | * child state. |
| 2813 | */ |
| 2814 | elem->st.branches = 1; |
| 2815 | elem->st.in_sleepable = is_sleepable; |
| 2816 | elem->st.insn_hist_start = env->cur_state->insn_hist_end; |
| 2817 | elem->st.insn_hist_end = elem->st.insn_hist_start; |
| 2818 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); |
| 2819 | if (!frame) |
| 2820 | goto err; |
| 2821 | init_func_state(env, frame, |
| 2822 | BPF_MAIN_FUNC /* callsite */, |
| 2823 | 0 /* frameno within this callchain */, |
| 2824 | subprog /* subprog number within this prog */); |
| 2825 | elem->st.frame[0] = frame; |
| 2826 | return &elem->st; |
| 2827 | err: |
| 2828 | free_verifier_state(env->cur_state, true); |
| 2829 | env->cur_state = NULL; |
| 2830 | /* pop all elements and return */ |
| 2831 | while (!pop_stack(env, NULL, NULL, false)); |
| 2832 | return NULL; |
| 2833 | } |
| 2834 | |
| 2835 | |
| 2836 | enum reg_arg_type { |
| 2837 | SRC_OP, /* register is used as source operand */ |
| 2838 | DST_OP, /* register is used as destination operand */ |
| 2839 | DST_OP_NO_MARK /* same as above, check only, don't mark */ |
| 2840 | }; |
| 2841 | |
| 2842 | static int cmp_subprogs(const void *a, const void *b) |
| 2843 | { |
| 2844 | return ((struct bpf_subprog_info *)a)->start - |
| 2845 | ((struct bpf_subprog_info *)b)->start; |
| 2846 | } |
| 2847 | |
| 2848 | /* Find subprogram that contains instruction at 'off' */ |
| 2849 | static struct bpf_subprog_info *find_containing_subprog(struct bpf_verifier_env *env, int off) |
| 2850 | { |
| 2851 | struct bpf_subprog_info *vals = env->subprog_info; |
| 2852 | int l, r, m; |
| 2853 | |
| 2854 | if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0) |
| 2855 | return NULL; |
| 2856 | |
| 2857 | l = 0; |
| 2858 | r = env->subprog_cnt - 1; |
| 2859 | while (l < r) { |
| 2860 | m = l + (r - l + 1) / 2; |
| 2861 | if (vals[m].start <= off) |
| 2862 | l = m; |
| 2863 | else |
| 2864 | r = m - 1; |
| 2865 | } |
| 2866 | return &vals[l]; |
| 2867 | } |
| 2868 | |
| 2869 | /* Find subprogram that starts exactly at 'off' */ |
| 2870 | static int find_subprog(struct bpf_verifier_env *env, int off) |
| 2871 | { |
| 2872 | struct bpf_subprog_info *p; |
| 2873 | |
| 2874 | p = find_containing_subprog(env, off); |
| 2875 | if (!p || p->start != off) |
| 2876 | return -ENOENT; |
| 2877 | return p - env->subprog_info; |
| 2878 | } |
| 2879 | |
| 2880 | static int add_subprog(struct bpf_verifier_env *env, int off) |
| 2881 | { |
| 2882 | int insn_cnt = env->prog->len; |
| 2883 | int ret; |
| 2884 | |
| 2885 | if (off >= insn_cnt || off < 0) { |
| 2886 | verbose(env, "call to invalid destination\n"); |
| 2887 | return -EINVAL; |
| 2888 | } |
| 2889 | ret = find_subprog(env, off); |
| 2890 | if (ret >= 0) |
| 2891 | return ret; |
| 2892 | if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { |
| 2893 | verbose(env, "too many subprograms\n"); |
| 2894 | return -E2BIG; |
| 2895 | } |
| 2896 | /* determine subprog starts. The end is one before the next starts */ |
| 2897 | env->subprog_info[env->subprog_cnt++].start = off; |
| 2898 | sort(env->subprog_info, env->subprog_cnt, |
| 2899 | sizeof(env->subprog_info[0]), cmp_subprogs, NULL); |
| 2900 | return env->subprog_cnt - 1; |
| 2901 | } |
| 2902 | |
| 2903 | static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env) |
| 2904 | { |
| 2905 | struct bpf_prog_aux *aux = env->prog->aux; |
| 2906 | struct btf *btf = aux->btf; |
| 2907 | const struct btf_type *t; |
| 2908 | u32 main_btf_id, id; |
| 2909 | const char *name; |
| 2910 | int ret, i; |
| 2911 | |
| 2912 | /* Non-zero func_info_cnt implies valid btf */ |
| 2913 | if (!aux->func_info_cnt) |
| 2914 | return 0; |
| 2915 | main_btf_id = aux->func_info[0].type_id; |
| 2916 | |
| 2917 | t = btf_type_by_id(btf, main_btf_id); |
| 2918 | if (!t) { |
| 2919 | verbose(env, "invalid btf id for main subprog in func_info\n"); |
| 2920 | return -EINVAL; |
| 2921 | } |
| 2922 | |
| 2923 | name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); |
| 2924 | if (IS_ERR(name)) { |
| 2925 | ret = PTR_ERR(name); |
| 2926 | /* If there is no tag present, there is no exception callback */ |
| 2927 | if (ret == -ENOENT) |
| 2928 | ret = 0; |
| 2929 | else if (ret == -EEXIST) |
| 2930 | verbose(env, "multiple exception callback tags for main subprog\n"); |
| 2931 | return ret; |
| 2932 | } |
| 2933 | |
| 2934 | ret = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC); |
| 2935 | if (ret < 0) { |
| 2936 | verbose(env, "exception callback '%s' could not be found in BTF\n", name); |
| 2937 | return ret; |
| 2938 | } |
| 2939 | id = ret; |
| 2940 | t = btf_type_by_id(btf, id); |
| 2941 | if (btf_func_linkage(t) != BTF_FUNC_GLOBAL) { |
| 2942 | verbose(env, "exception callback '%s' must have global linkage\n", name); |
| 2943 | return -EINVAL; |
| 2944 | } |
| 2945 | ret = 0; |
| 2946 | for (i = 0; i < aux->func_info_cnt; i++) { |
| 2947 | if (aux->func_info[i].type_id != id) |
| 2948 | continue; |
| 2949 | ret = aux->func_info[i].insn_off; |
| 2950 | /* Further func_info and subprog checks will also happen |
| 2951 | * later, so assume this is the right insn_off for now. |
| 2952 | */ |
| 2953 | if (!ret) { |
| 2954 | verbose(env, "invalid exception callback insn_off in func_info: 0\n"); |
| 2955 | ret = -EINVAL; |
| 2956 | } |
| 2957 | } |
| 2958 | if (!ret) { |
| 2959 | verbose(env, "exception callback type id not found in func_info\n"); |
| 2960 | ret = -EINVAL; |
| 2961 | } |
| 2962 | return ret; |
| 2963 | } |
| 2964 | |
| 2965 | #define MAX_KFUNC_DESCS 256 |
| 2966 | #define MAX_KFUNC_BTFS 256 |
| 2967 | |
| 2968 | struct bpf_kfunc_desc { |
| 2969 | struct btf_func_model func_model; |
| 2970 | u32 func_id; |
| 2971 | s32 imm; |
| 2972 | u16 offset; |
| 2973 | unsigned long addr; |
| 2974 | }; |
| 2975 | |
| 2976 | struct bpf_kfunc_btf { |
| 2977 | struct btf *btf; |
| 2978 | struct module *module; |
| 2979 | u16 offset; |
| 2980 | }; |
| 2981 | |
| 2982 | struct bpf_kfunc_desc_tab { |
| 2983 | /* Sorted by func_id (BTF ID) and offset (fd_array offset) during |
| 2984 | * verification. JITs do lookups by bpf_insn, where func_id may not be |
| 2985 | * available, therefore at the end of verification do_misc_fixups() |
| 2986 | * sorts this by imm and offset. |
| 2987 | */ |
| 2988 | struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; |
| 2989 | u32 nr_descs; |
| 2990 | }; |
| 2991 | |
| 2992 | struct bpf_kfunc_btf_tab { |
| 2993 | struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; |
| 2994 | u32 nr_descs; |
| 2995 | }; |
| 2996 | |
| 2997 | static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) |
| 2998 | { |
| 2999 | const struct bpf_kfunc_desc *d0 = a; |
| 3000 | const struct bpf_kfunc_desc *d1 = b; |
| 3001 | |
| 3002 | /* func_id is not greater than BTF_MAX_TYPE */ |
| 3003 | return d0->func_id - d1->func_id ?: d0->offset - d1->offset; |
| 3004 | } |
| 3005 | |
| 3006 | static int kfunc_btf_cmp_by_off(const void *a, const void *b) |
| 3007 | { |
| 3008 | const struct bpf_kfunc_btf *d0 = a; |
| 3009 | const struct bpf_kfunc_btf *d1 = b; |
| 3010 | |
| 3011 | return d0->offset - d1->offset; |
| 3012 | } |
| 3013 | |
| 3014 | static const struct bpf_kfunc_desc * |
| 3015 | find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) |
| 3016 | { |
| 3017 | struct bpf_kfunc_desc desc = { |
| 3018 | .func_id = func_id, |
| 3019 | .offset = offset, |
| 3020 | }; |
| 3021 | struct bpf_kfunc_desc_tab *tab; |
| 3022 | |
| 3023 | tab = prog->aux->kfunc_tab; |
| 3024 | return bsearch(&desc, tab->descs, tab->nr_descs, |
| 3025 | sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); |
| 3026 | } |
| 3027 | |
| 3028 | int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, |
| 3029 | u16 btf_fd_idx, u8 **func_addr) |
| 3030 | { |
| 3031 | const struct bpf_kfunc_desc *desc; |
| 3032 | |
| 3033 | desc = find_kfunc_desc(prog, func_id, btf_fd_idx); |
| 3034 | if (!desc) |
| 3035 | return -EFAULT; |
| 3036 | |
| 3037 | *func_addr = (u8 *)desc->addr; |
| 3038 | return 0; |
| 3039 | } |
| 3040 | |
| 3041 | static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, |
| 3042 | s16 offset) |
| 3043 | { |
| 3044 | struct bpf_kfunc_btf kf_btf = { .offset = offset }; |
| 3045 | struct bpf_kfunc_btf_tab *tab; |
| 3046 | struct bpf_kfunc_btf *b; |
| 3047 | struct module *mod; |
| 3048 | struct btf *btf; |
| 3049 | int btf_fd; |
| 3050 | |
| 3051 | tab = env->prog->aux->kfunc_btf_tab; |
| 3052 | b = bsearch(&kf_btf, tab->descs, tab->nr_descs, |
| 3053 | sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); |
| 3054 | if (!b) { |
| 3055 | if (tab->nr_descs == MAX_KFUNC_BTFS) { |
| 3056 | verbose(env, "too many different module BTFs\n"); |
| 3057 | return ERR_PTR(-E2BIG); |
| 3058 | } |
| 3059 | |
| 3060 | if (bpfptr_is_null(env->fd_array)) { |
| 3061 | verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); |
| 3062 | return ERR_PTR(-EPROTO); |
| 3063 | } |
| 3064 | |
| 3065 | if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, |
| 3066 | offset * sizeof(btf_fd), |
| 3067 | sizeof(btf_fd))) |
| 3068 | return ERR_PTR(-EFAULT); |
| 3069 | |
| 3070 | btf = btf_get_by_fd(btf_fd); |
| 3071 | if (IS_ERR(btf)) { |
| 3072 | verbose(env, "invalid module BTF fd specified\n"); |
| 3073 | return btf; |
| 3074 | } |
| 3075 | |
| 3076 | if (!btf_is_module(btf)) { |
| 3077 | verbose(env, "BTF fd for kfunc is not a module BTF\n"); |
| 3078 | btf_put(btf); |
| 3079 | return ERR_PTR(-EINVAL); |
| 3080 | } |
| 3081 | |
| 3082 | mod = btf_try_get_module(btf); |
| 3083 | if (!mod) { |
| 3084 | btf_put(btf); |
| 3085 | return ERR_PTR(-ENXIO); |
| 3086 | } |
| 3087 | |
| 3088 | b = &tab->descs[tab->nr_descs++]; |
| 3089 | b->btf = btf; |
| 3090 | b->module = mod; |
| 3091 | b->offset = offset; |
| 3092 | |
| 3093 | /* sort() reorders entries by value, so b may no longer point |
| 3094 | * to the right entry after this |
| 3095 | */ |
| 3096 | sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), |
| 3097 | kfunc_btf_cmp_by_off, NULL); |
| 3098 | } else { |
| 3099 | btf = b->btf; |
| 3100 | } |
| 3101 | |
| 3102 | return btf; |
| 3103 | } |
| 3104 | |
| 3105 | void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) |
| 3106 | { |
| 3107 | if (!tab) |
| 3108 | return; |
| 3109 | |
| 3110 | while (tab->nr_descs--) { |
| 3111 | module_put(tab->descs[tab->nr_descs].module); |
| 3112 | btf_put(tab->descs[tab->nr_descs].btf); |
| 3113 | } |
| 3114 | kfree(tab); |
| 3115 | } |
| 3116 | |
| 3117 | static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) |
| 3118 | { |
| 3119 | if (offset) { |
| 3120 | if (offset < 0) { |
| 3121 | /* In the future, this can be allowed to increase limit |
| 3122 | * of fd index into fd_array, interpreted as u16. |
| 3123 | */ |
| 3124 | verbose(env, "negative offset disallowed for kernel module function call\n"); |
| 3125 | return ERR_PTR(-EINVAL); |
| 3126 | } |
| 3127 | |
| 3128 | return __find_kfunc_desc_btf(env, offset); |
| 3129 | } |
| 3130 | return btf_vmlinux ?: ERR_PTR(-ENOENT); |
| 3131 | } |
| 3132 | |
| 3133 | static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) |
| 3134 | { |
| 3135 | const struct btf_type *func, *func_proto; |
| 3136 | struct bpf_kfunc_btf_tab *btf_tab; |
| 3137 | struct bpf_kfunc_desc_tab *tab; |
| 3138 | struct bpf_prog_aux *prog_aux; |
| 3139 | struct bpf_kfunc_desc *desc; |
| 3140 | const char *func_name; |
| 3141 | struct btf *desc_btf; |
| 3142 | unsigned long call_imm; |
| 3143 | unsigned long addr; |
| 3144 | int err; |
| 3145 | |
| 3146 | prog_aux = env->prog->aux; |
| 3147 | tab = prog_aux->kfunc_tab; |
| 3148 | btf_tab = prog_aux->kfunc_btf_tab; |
| 3149 | if (!tab) { |
| 3150 | if (!btf_vmlinux) { |
| 3151 | verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); |
| 3152 | return -ENOTSUPP; |
| 3153 | } |
| 3154 | |
| 3155 | if (!env->prog->jit_requested) { |
| 3156 | verbose(env, "JIT is required for calling kernel function\n"); |
| 3157 | return -ENOTSUPP; |
| 3158 | } |
| 3159 | |
| 3160 | if (!bpf_jit_supports_kfunc_call()) { |
| 3161 | verbose(env, "JIT does not support calling kernel function\n"); |
| 3162 | return -ENOTSUPP; |
| 3163 | } |
| 3164 | |
| 3165 | if (!env->prog->gpl_compatible) { |
| 3166 | verbose(env, "cannot call kernel function from non-GPL compatible program\n"); |
| 3167 | return -EINVAL; |
| 3168 | } |
| 3169 | |
| 3170 | tab = kzalloc(sizeof(*tab), GFP_KERNEL); |
| 3171 | if (!tab) |
| 3172 | return -ENOMEM; |
| 3173 | prog_aux->kfunc_tab = tab; |
| 3174 | } |
| 3175 | |
| 3176 | /* func_id == 0 is always invalid, but instead of returning an error, be |
| 3177 | * conservative and wait until the code elimination pass before returning |
| 3178 | * error, so that invalid calls that get pruned out can be in BPF programs |
| 3179 | * loaded from userspace. It is also required that offset be untouched |
| 3180 | * for such calls. |
| 3181 | */ |
| 3182 | if (!func_id && !offset) |
| 3183 | return 0; |
| 3184 | |
| 3185 | if (!btf_tab && offset) { |
| 3186 | btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); |
| 3187 | if (!btf_tab) |
| 3188 | return -ENOMEM; |
| 3189 | prog_aux->kfunc_btf_tab = btf_tab; |
| 3190 | } |
| 3191 | |
| 3192 | desc_btf = find_kfunc_desc_btf(env, offset); |
| 3193 | if (IS_ERR(desc_btf)) { |
| 3194 | verbose(env, "failed to find BTF for kernel function\n"); |
| 3195 | return PTR_ERR(desc_btf); |
| 3196 | } |
| 3197 | |
| 3198 | if (find_kfunc_desc(env->prog, func_id, offset)) |
| 3199 | return 0; |
| 3200 | |
| 3201 | if (tab->nr_descs == MAX_KFUNC_DESCS) { |
| 3202 | verbose(env, "too many different kernel function calls\n"); |
| 3203 | return -E2BIG; |
| 3204 | } |
| 3205 | |
| 3206 | func = btf_type_by_id(desc_btf, func_id); |
| 3207 | if (!func || !btf_type_is_func(func)) { |
| 3208 | verbose(env, "kernel btf_id %u is not a function\n", |
| 3209 | func_id); |
| 3210 | return -EINVAL; |
| 3211 | } |
| 3212 | func_proto = btf_type_by_id(desc_btf, func->type); |
| 3213 | if (!func_proto || !btf_type_is_func_proto(func_proto)) { |
| 3214 | verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", |
| 3215 | func_id); |
| 3216 | return -EINVAL; |
| 3217 | } |
| 3218 | |
| 3219 | func_name = btf_name_by_offset(desc_btf, func->name_off); |
| 3220 | addr = kallsyms_lookup_name(func_name); |
| 3221 | if (!addr) { |
| 3222 | verbose(env, "cannot find address for kernel function %s\n", |
| 3223 | func_name); |
| 3224 | return -EINVAL; |
| 3225 | } |
| 3226 | specialize_kfunc(env, func_id, offset, &addr); |
| 3227 | |
| 3228 | if (bpf_jit_supports_far_kfunc_call()) { |
| 3229 | call_imm = func_id; |
| 3230 | } else { |
| 3231 | call_imm = BPF_CALL_IMM(addr); |
| 3232 | /* Check whether the relative offset overflows desc->imm */ |
| 3233 | if ((unsigned long)(s32)call_imm != call_imm) { |
| 3234 | verbose(env, "address of kernel function %s is out of range\n", |
| 3235 | func_name); |
| 3236 | return -EINVAL; |
| 3237 | } |
| 3238 | } |
| 3239 | |
| 3240 | if (bpf_dev_bound_kfunc_id(func_id)) { |
| 3241 | err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); |
| 3242 | if (err) |
| 3243 | return err; |
| 3244 | } |
| 3245 | |
| 3246 | desc = &tab->descs[tab->nr_descs++]; |
| 3247 | desc->func_id = func_id; |
| 3248 | desc->imm = call_imm; |
| 3249 | desc->offset = offset; |
| 3250 | desc->addr = addr; |
| 3251 | err = btf_distill_func_proto(&env->log, desc_btf, |
| 3252 | func_proto, func_name, |
| 3253 | &desc->func_model); |
| 3254 | if (!err) |
| 3255 | sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), |
| 3256 | kfunc_desc_cmp_by_id_off, NULL); |
| 3257 | return err; |
| 3258 | } |
| 3259 | |
| 3260 | static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b) |
| 3261 | { |
| 3262 | const struct bpf_kfunc_desc *d0 = a; |
| 3263 | const struct bpf_kfunc_desc *d1 = b; |
| 3264 | |
| 3265 | if (d0->imm != d1->imm) |
| 3266 | return d0->imm < d1->imm ? -1 : 1; |
| 3267 | if (d0->offset != d1->offset) |
| 3268 | return d0->offset < d1->offset ? -1 : 1; |
| 3269 | return 0; |
| 3270 | } |
| 3271 | |
| 3272 | static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog) |
| 3273 | { |
| 3274 | struct bpf_kfunc_desc_tab *tab; |
| 3275 | |
| 3276 | tab = prog->aux->kfunc_tab; |
| 3277 | if (!tab) |
| 3278 | return; |
| 3279 | |
| 3280 | sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), |
| 3281 | kfunc_desc_cmp_by_imm_off, NULL); |
| 3282 | } |
| 3283 | |
| 3284 | bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) |
| 3285 | { |
| 3286 | return !!prog->aux->kfunc_tab; |
| 3287 | } |
| 3288 | |
| 3289 | const struct btf_func_model * |
| 3290 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, |
| 3291 | const struct bpf_insn *insn) |
| 3292 | { |
| 3293 | const struct bpf_kfunc_desc desc = { |
| 3294 | .imm = insn->imm, |
| 3295 | .offset = insn->off, |
| 3296 | }; |
| 3297 | const struct bpf_kfunc_desc *res; |
| 3298 | struct bpf_kfunc_desc_tab *tab; |
| 3299 | |
| 3300 | tab = prog->aux->kfunc_tab; |
| 3301 | res = bsearch(&desc, tab->descs, tab->nr_descs, |
| 3302 | sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); |
| 3303 | |
| 3304 | return res ? &res->func_model : NULL; |
| 3305 | } |
| 3306 | |
| 3307 | static int add_kfunc_in_insns(struct bpf_verifier_env *env, |
| 3308 | struct bpf_insn *insn, int cnt) |
| 3309 | { |
| 3310 | int i, ret; |
| 3311 | |
| 3312 | for (i = 0; i < cnt; i++, insn++) { |
| 3313 | if (bpf_pseudo_kfunc_call(insn)) { |
| 3314 | ret = add_kfunc_call(env, insn->imm, insn->off); |
| 3315 | if (ret < 0) |
| 3316 | return ret; |
| 3317 | } |
| 3318 | } |
| 3319 | return 0; |
| 3320 | } |
| 3321 | |
| 3322 | static int add_subprog_and_kfunc(struct bpf_verifier_env *env) |
| 3323 | { |
| 3324 | struct bpf_subprog_info *subprog = env->subprog_info; |
| 3325 | int i, ret, insn_cnt = env->prog->len, ex_cb_insn; |
| 3326 | struct bpf_insn *insn = env->prog->insnsi; |
| 3327 | |
| 3328 | /* Add entry function. */ |
| 3329 | ret = add_subprog(env, 0); |
| 3330 | if (ret) |
| 3331 | return ret; |
| 3332 | |
| 3333 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 3334 | if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && |
| 3335 | !bpf_pseudo_kfunc_call(insn)) |
| 3336 | continue; |
| 3337 | |
| 3338 | if (!env->bpf_capable) { |
| 3339 | verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); |
| 3340 | return -EPERM; |
| 3341 | } |
| 3342 | |
| 3343 | if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) |
| 3344 | ret = add_subprog(env, i + insn->imm + 1); |
| 3345 | else |
| 3346 | ret = add_kfunc_call(env, insn->imm, insn->off); |
| 3347 | |
| 3348 | if (ret < 0) |
| 3349 | return ret; |
| 3350 | } |
| 3351 | |
| 3352 | ret = bpf_find_exception_callback_insn_off(env); |
| 3353 | if (ret < 0) |
| 3354 | return ret; |
| 3355 | ex_cb_insn = ret; |
| 3356 | |
| 3357 | /* If ex_cb_insn > 0, this means that the main program has a subprog |
| 3358 | * marked using BTF decl tag to serve as the exception callback. |
| 3359 | */ |
| 3360 | if (ex_cb_insn) { |
| 3361 | ret = add_subprog(env, ex_cb_insn); |
| 3362 | if (ret < 0) |
| 3363 | return ret; |
| 3364 | for (i = 1; i < env->subprog_cnt; i++) { |
| 3365 | if (env->subprog_info[i].start != ex_cb_insn) |
| 3366 | continue; |
| 3367 | env->exception_callback_subprog = i; |
| 3368 | mark_subprog_exc_cb(env, i); |
| 3369 | break; |
| 3370 | } |
| 3371 | } |
| 3372 | |
| 3373 | /* Add a fake 'exit' subprog which could simplify subprog iteration |
| 3374 | * logic. 'subprog_cnt' should not be increased. |
| 3375 | */ |
| 3376 | subprog[env->subprog_cnt].start = insn_cnt; |
| 3377 | |
| 3378 | if (env->log.level & BPF_LOG_LEVEL2) |
| 3379 | for (i = 0; i < env->subprog_cnt; i++) |
| 3380 | verbose(env, "func#%d @%d\n", i, subprog[i].start); |
| 3381 | |
| 3382 | return 0; |
| 3383 | } |
| 3384 | |
| 3385 | static int jmp_offset(struct bpf_insn *insn) |
| 3386 | { |
| 3387 | u8 code = insn->code; |
| 3388 | |
| 3389 | if (code == (BPF_JMP32 | BPF_JA)) |
| 3390 | return insn->imm; |
| 3391 | return insn->off; |
| 3392 | } |
| 3393 | |
| 3394 | static int check_subprogs(struct bpf_verifier_env *env) |
| 3395 | { |
| 3396 | int i, subprog_start, subprog_end, off, cur_subprog = 0; |
| 3397 | struct bpf_subprog_info *subprog = env->subprog_info; |
| 3398 | struct bpf_insn *insn = env->prog->insnsi; |
| 3399 | int insn_cnt = env->prog->len; |
| 3400 | |
| 3401 | /* now check that all jumps are within the same subprog */ |
| 3402 | subprog_start = subprog[cur_subprog].start; |
| 3403 | subprog_end = subprog[cur_subprog + 1].start; |
| 3404 | for (i = 0; i < insn_cnt; i++) { |
| 3405 | u8 code = insn[i].code; |
| 3406 | |
| 3407 | if (code == (BPF_JMP | BPF_CALL) && |
| 3408 | insn[i].src_reg == 0 && |
| 3409 | insn[i].imm == BPF_FUNC_tail_call) { |
| 3410 | subprog[cur_subprog].has_tail_call = true; |
| 3411 | subprog[cur_subprog].tail_call_reachable = true; |
| 3412 | } |
| 3413 | if (BPF_CLASS(code) == BPF_LD && |
| 3414 | (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) |
| 3415 | subprog[cur_subprog].has_ld_abs = true; |
| 3416 | if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) |
| 3417 | goto next; |
| 3418 | if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) |
| 3419 | goto next; |
| 3420 | off = i + jmp_offset(&insn[i]) + 1; |
| 3421 | if (off < subprog_start || off >= subprog_end) { |
| 3422 | verbose(env, "jump out of range from insn %d to %d\n", i, off); |
| 3423 | return -EINVAL; |
| 3424 | } |
| 3425 | next: |
| 3426 | if (i == subprog_end - 1) { |
| 3427 | /* to avoid fall-through from one subprog into another |
| 3428 | * the last insn of the subprog should be either exit |
| 3429 | * or unconditional jump back or bpf_throw call |
| 3430 | */ |
| 3431 | if (code != (BPF_JMP | BPF_EXIT) && |
| 3432 | code != (BPF_JMP32 | BPF_JA) && |
| 3433 | code != (BPF_JMP | BPF_JA)) { |
| 3434 | verbose(env, "last insn is not an exit or jmp\n"); |
| 3435 | return -EINVAL; |
| 3436 | } |
| 3437 | subprog_start = subprog_end; |
| 3438 | cur_subprog++; |
| 3439 | if (cur_subprog < env->subprog_cnt) |
| 3440 | subprog_end = subprog[cur_subprog + 1].start; |
| 3441 | } |
| 3442 | } |
| 3443 | return 0; |
| 3444 | } |
| 3445 | |
| 3446 | /* Parentage chain of this register (or stack slot) should take care of all |
| 3447 | * issues like callee-saved registers, stack slot allocation time, etc. |
| 3448 | */ |
| 3449 | static int mark_reg_read(struct bpf_verifier_env *env, |
| 3450 | const struct bpf_reg_state *state, |
| 3451 | struct bpf_reg_state *parent, u8 flag) |
| 3452 | { |
| 3453 | bool writes = parent == state->parent; /* Observe write marks */ |
| 3454 | int cnt = 0; |
| 3455 | |
| 3456 | while (parent) { |
| 3457 | /* if read wasn't screened by an earlier write ... */ |
| 3458 | if (writes && state->live & REG_LIVE_WRITTEN) |
| 3459 | break; |
| 3460 | if (verifier_bug_if(parent->live & REG_LIVE_DONE, env, |
| 3461 | "type %s var_off %lld off %d", |
| 3462 | reg_type_str(env, parent->type), |
| 3463 | parent->var_off.value, parent->off)) |
| 3464 | return -EFAULT; |
| 3465 | /* The first condition is more likely to be true than the |
| 3466 | * second, checked it first. |
| 3467 | */ |
| 3468 | if ((parent->live & REG_LIVE_READ) == flag || |
| 3469 | parent->live & REG_LIVE_READ64) |
| 3470 | /* The parentage chain never changes and |
| 3471 | * this parent was already marked as LIVE_READ. |
| 3472 | * There is no need to keep walking the chain again and |
| 3473 | * keep re-marking all parents as LIVE_READ. |
| 3474 | * This case happens when the same register is read |
| 3475 | * multiple times without writes into it in-between. |
| 3476 | * Also, if parent has the stronger REG_LIVE_READ64 set, |
| 3477 | * then no need to set the weak REG_LIVE_READ32. |
| 3478 | */ |
| 3479 | break; |
| 3480 | /* ... then we depend on parent's value */ |
| 3481 | parent->live |= flag; |
| 3482 | /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ |
| 3483 | if (flag == REG_LIVE_READ64) |
| 3484 | parent->live &= ~REG_LIVE_READ32; |
| 3485 | state = parent; |
| 3486 | parent = state->parent; |
| 3487 | writes = true; |
| 3488 | cnt++; |
| 3489 | } |
| 3490 | |
| 3491 | if (env->longest_mark_read_walk < cnt) |
| 3492 | env->longest_mark_read_walk = cnt; |
| 3493 | return 0; |
| 3494 | } |
| 3495 | |
| 3496 | static int mark_stack_slot_obj_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 3497 | int spi, int nr_slots) |
| 3498 | { |
| 3499 | struct bpf_func_state *state = func(env, reg); |
| 3500 | int err, i; |
| 3501 | |
| 3502 | for (i = 0; i < nr_slots; i++) { |
| 3503 | struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; |
| 3504 | |
| 3505 | err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); |
| 3506 | if (err) |
| 3507 | return err; |
| 3508 | |
| 3509 | mark_stack_slot_scratched(env, spi - i); |
| 3510 | } |
| 3511 | return 0; |
| 3512 | } |
| 3513 | |
| 3514 | static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 3515 | { |
| 3516 | int spi; |
| 3517 | |
| 3518 | /* For CONST_PTR_TO_DYNPTR, it must have already been done by |
| 3519 | * check_reg_arg in check_helper_call and mark_btf_func_reg_size in |
| 3520 | * check_kfunc_call. |
| 3521 | */ |
| 3522 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 3523 | return 0; |
| 3524 | spi = dynptr_get_spi(env, reg); |
| 3525 | if (spi < 0) |
| 3526 | return spi; |
| 3527 | /* Caller ensures dynptr is valid and initialized, which means spi is in |
| 3528 | * bounds and spi is the first dynptr slot. Simply mark stack slot as |
| 3529 | * read. |
| 3530 | */ |
| 3531 | return mark_stack_slot_obj_read(env, reg, spi, BPF_DYNPTR_NR_SLOTS); |
| 3532 | } |
| 3533 | |
| 3534 | static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 3535 | int spi, int nr_slots) |
| 3536 | { |
| 3537 | return mark_stack_slot_obj_read(env, reg, spi, nr_slots); |
| 3538 | } |
| 3539 | |
| 3540 | static int mark_irq_flag_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 3541 | { |
| 3542 | int spi; |
| 3543 | |
| 3544 | spi = irq_flag_get_spi(env, reg); |
| 3545 | if (spi < 0) |
| 3546 | return spi; |
| 3547 | return mark_stack_slot_obj_read(env, reg, spi, 1); |
| 3548 | } |
| 3549 | |
| 3550 | /* This function is supposed to be used by the following 32-bit optimization |
| 3551 | * code only. It returns TRUE if the source or destination register operates |
| 3552 | * on 64-bit, otherwise return FALSE. |
| 3553 | */ |
| 3554 | static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 3555 | u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) |
| 3556 | { |
| 3557 | u8 code, class, op; |
| 3558 | |
| 3559 | code = insn->code; |
| 3560 | class = BPF_CLASS(code); |
| 3561 | op = BPF_OP(code); |
| 3562 | if (class == BPF_JMP) { |
| 3563 | /* BPF_EXIT for "main" will reach here. Return TRUE |
| 3564 | * conservatively. |
| 3565 | */ |
| 3566 | if (op == BPF_EXIT) |
| 3567 | return true; |
| 3568 | if (op == BPF_CALL) { |
| 3569 | /* BPF to BPF call will reach here because of marking |
| 3570 | * caller saved clobber with DST_OP_NO_MARK for which we |
| 3571 | * don't care the register def because they are anyway |
| 3572 | * marked as NOT_INIT already. |
| 3573 | */ |
| 3574 | if (insn->src_reg == BPF_PSEUDO_CALL) |
| 3575 | return false; |
| 3576 | /* Helper call will reach here because of arg type |
| 3577 | * check, conservatively return TRUE. |
| 3578 | */ |
| 3579 | if (t == SRC_OP) |
| 3580 | return true; |
| 3581 | |
| 3582 | return false; |
| 3583 | } |
| 3584 | } |
| 3585 | |
| 3586 | if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) |
| 3587 | return false; |
| 3588 | |
| 3589 | if (class == BPF_ALU64 || class == BPF_JMP || |
| 3590 | (class == BPF_ALU && op == BPF_END && insn->imm == 64)) |
| 3591 | return true; |
| 3592 | |
| 3593 | if (class == BPF_ALU || class == BPF_JMP32) |
| 3594 | return false; |
| 3595 | |
| 3596 | if (class == BPF_LDX) { |
| 3597 | if (t != SRC_OP) |
| 3598 | return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX; |
| 3599 | /* LDX source must be ptr. */ |
| 3600 | return true; |
| 3601 | } |
| 3602 | |
| 3603 | if (class == BPF_STX) { |
| 3604 | /* BPF_STX (including atomic variants) has one or more source |
| 3605 | * operands, one of which is a ptr. Check whether the caller is |
| 3606 | * asking about it. |
| 3607 | */ |
| 3608 | if (t == SRC_OP && reg->type != SCALAR_VALUE) |
| 3609 | return true; |
| 3610 | return BPF_SIZE(code) == BPF_DW; |
| 3611 | } |
| 3612 | |
| 3613 | if (class == BPF_LD) { |
| 3614 | u8 mode = BPF_MODE(code); |
| 3615 | |
| 3616 | /* LD_IMM64 */ |
| 3617 | if (mode == BPF_IMM) |
| 3618 | return true; |
| 3619 | |
| 3620 | /* Both LD_IND and LD_ABS return 32-bit data. */ |
| 3621 | if (t != SRC_OP) |
| 3622 | return false; |
| 3623 | |
| 3624 | /* Implicit ctx ptr. */ |
| 3625 | if (regno == BPF_REG_6) |
| 3626 | return true; |
| 3627 | |
| 3628 | /* Explicit source could be any width. */ |
| 3629 | return true; |
| 3630 | } |
| 3631 | |
| 3632 | if (class == BPF_ST) |
| 3633 | /* The only source register for BPF_ST is a ptr. */ |
| 3634 | return true; |
| 3635 | |
| 3636 | /* Conservatively return true at default. */ |
| 3637 | return true; |
| 3638 | } |
| 3639 | |
| 3640 | /* Return the regno defined by the insn, or -1. */ |
| 3641 | static int insn_def_regno(const struct bpf_insn *insn) |
| 3642 | { |
| 3643 | switch (BPF_CLASS(insn->code)) { |
| 3644 | case BPF_JMP: |
| 3645 | case BPF_JMP32: |
| 3646 | case BPF_ST: |
| 3647 | return -1; |
| 3648 | case BPF_STX: |
| 3649 | if (BPF_MODE(insn->code) == BPF_ATOMIC || |
| 3650 | BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) { |
| 3651 | if (insn->imm == BPF_CMPXCHG) |
| 3652 | return BPF_REG_0; |
| 3653 | else if (insn->imm == BPF_LOAD_ACQ) |
| 3654 | return insn->dst_reg; |
| 3655 | else if (insn->imm & BPF_FETCH) |
| 3656 | return insn->src_reg; |
| 3657 | } |
| 3658 | return -1; |
| 3659 | default: |
| 3660 | return insn->dst_reg; |
| 3661 | } |
| 3662 | } |
| 3663 | |
| 3664 | /* Return TRUE if INSN has defined any 32-bit value explicitly. */ |
| 3665 | static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 3666 | { |
| 3667 | int dst_reg = insn_def_regno(insn); |
| 3668 | |
| 3669 | if (dst_reg == -1) |
| 3670 | return false; |
| 3671 | |
| 3672 | return !is_reg64(env, insn, dst_reg, NULL, DST_OP); |
| 3673 | } |
| 3674 | |
| 3675 | static void mark_insn_zext(struct bpf_verifier_env *env, |
| 3676 | struct bpf_reg_state *reg) |
| 3677 | { |
| 3678 | s32 def_idx = reg->subreg_def; |
| 3679 | |
| 3680 | if (def_idx == DEF_NOT_SUBREG) |
| 3681 | return; |
| 3682 | |
| 3683 | env->insn_aux_data[def_idx - 1].zext_dst = true; |
| 3684 | /* The dst will be zero extended, so won't be sub-register anymore. */ |
| 3685 | reg->subreg_def = DEF_NOT_SUBREG; |
| 3686 | } |
| 3687 | |
| 3688 | static int __check_reg_arg(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno, |
| 3689 | enum reg_arg_type t) |
| 3690 | { |
| 3691 | struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; |
| 3692 | struct bpf_reg_state *reg; |
| 3693 | bool rw64; |
| 3694 | |
| 3695 | if (regno >= MAX_BPF_REG) { |
| 3696 | verbose(env, "R%d is invalid\n", regno); |
| 3697 | return -EINVAL; |
| 3698 | } |
| 3699 | |
| 3700 | mark_reg_scratched(env, regno); |
| 3701 | |
| 3702 | reg = ®s[regno]; |
| 3703 | rw64 = is_reg64(env, insn, regno, reg, t); |
| 3704 | if (t == SRC_OP) { |
| 3705 | /* check whether register used as source operand can be read */ |
| 3706 | if (reg->type == NOT_INIT) { |
| 3707 | verbose(env, "R%d !read_ok\n", regno); |
| 3708 | return -EACCES; |
| 3709 | } |
| 3710 | /* We don't need to worry about FP liveness because it's read-only */ |
| 3711 | if (regno == BPF_REG_FP) |
| 3712 | return 0; |
| 3713 | |
| 3714 | if (rw64) |
| 3715 | mark_insn_zext(env, reg); |
| 3716 | |
| 3717 | return mark_reg_read(env, reg, reg->parent, |
| 3718 | rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); |
| 3719 | } else { |
| 3720 | /* check whether register used as dest operand can be written to */ |
| 3721 | if (regno == BPF_REG_FP) { |
| 3722 | verbose(env, "frame pointer is read only\n"); |
| 3723 | return -EACCES; |
| 3724 | } |
| 3725 | reg->live |= REG_LIVE_WRITTEN; |
| 3726 | reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; |
| 3727 | if (t == DST_OP) |
| 3728 | mark_reg_unknown(env, regs, regno); |
| 3729 | } |
| 3730 | return 0; |
| 3731 | } |
| 3732 | |
| 3733 | static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, |
| 3734 | enum reg_arg_type t) |
| 3735 | { |
| 3736 | struct bpf_verifier_state *vstate = env->cur_state; |
| 3737 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 3738 | |
| 3739 | return __check_reg_arg(env, state->regs, regno, t); |
| 3740 | } |
| 3741 | |
| 3742 | static int insn_stack_access_flags(int frameno, int spi) |
| 3743 | { |
| 3744 | return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno; |
| 3745 | } |
| 3746 | |
| 3747 | static int insn_stack_access_spi(int insn_flags) |
| 3748 | { |
| 3749 | return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK; |
| 3750 | } |
| 3751 | |
| 3752 | static int insn_stack_access_frameno(int insn_flags) |
| 3753 | { |
| 3754 | return insn_flags & INSN_F_FRAMENO_MASK; |
| 3755 | } |
| 3756 | |
| 3757 | static void mark_jmp_point(struct bpf_verifier_env *env, int idx) |
| 3758 | { |
| 3759 | env->insn_aux_data[idx].jmp_point = true; |
| 3760 | } |
| 3761 | |
| 3762 | static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) |
| 3763 | { |
| 3764 | return env->insn_aux_data[insn_idx].jmp_point; |
| 3765 | } |
| 3766 | |
| 3767 | #define LR_FRAMENO_BITS 3 |
| 3768 | #define LR_SPI_BITS 6 |
| 3769 | #define LR_ENTRY_BITS (LR_SPI_BITS + LR_FRAMENO_BITS + 1) |
| 3770 | #define LR_SIZE_BITS 4 |
| 3771 | #define LR_FRAMENO_MASK ((1ull << LR_FRAMENO_BITS) - 1) |
| 3772 | #define LR_SPI_MASK ((1ull << LR_SPI_BITS) - 1) |
| 3773 | #define LR_SIZE_MASK ((1ull << LR_SIZE_BITS) - 1) |
| 3774 | #define LR_SPI_OFF LR_FRAMENO_BITS |
| 3775 | #define LR_IS_REG_OFF (LR_SPI_BITS + LR_FRAMENO_BITS) |
| 3776 | #define LINKED_REGS_MAX 6 |
| 3777 | |
| 3778 | struct linked_reg { |
| 3779 | u8 frameno; |
| 3780 | union { |
| 3781 | u8 spi; |
| 3782 | u8 regno; |
| 3783 | }; |
| 3784 | bool is_reg; |
| 3785 | }; |
| 3786 | |
| 3787 | struct linked_regs { |
| 3788 | int cnt; |
| 3789 | struct linked_reg entries[LINKED_REGS_MAX]; |
| 3790 | }; |
| 3791 | |
| 3792 | static struct linked_reg *linked_regs_push(struct linked_regs *s) |
| 3793 | { |
| 3794 | if (s->cnt < LINKED_REGS_MAX) |
| 3795 | return &s->entries[s->cnt++]; |
| 3796 | |
| 3797 | return NULL; |
| 3798 | } |
| 3799 | |
| 3800 | /* Use u64 as a vector of 6 10-bit values, use first 4-bits to track |
| 3801 | * number of elements currently in stack. |
| 3802 | * Pack one history entry for linked registers as 10 bits in the following format: |
| 3803 | * - 3-bits frameno |
| 3804 | * - 6-bits spi_or_reg |
| 3805 | * - 1-bit is_reg |
| 3806 | */ |
| 3807 | static u64 linked_regs_pack(struct linked_regs *s) |
| 3808 | { |
| 3809 | u64 val = 0; |
| 3810 | int i; |
| 3811 | |
| 3812 | for (i = 0; i < s->cnt; ++i) { |
| 3813 | struct linked_reg *e = &s->entries[i]; |
| 3814 | u64 tmp = 0; |
| 3815 | |
| 3816 | tmp |= e->frameno; |
| 3817 | tmp |= e->spi << LR_SPI_OFF; |
| 3818 | tmp |= (e->is_reg ? 1 : 0) << LR_IS_REG_OFF; |
| 3819 | |
| 3820 | val <<= LR_ENTRY_BITS; |
| 3821 | val |= tmp; |
| 3822 | } |
| 3823 | val <<= LR_SIZE_BITS; |
| 3824 | val |= s->cnt; |
| 3825 | return val; |
| 3826 | } |
| 3827 | |
| 3828 | static void linked_regs_unpack(u64 val, struct linked_regs *s) |
| 3829 | { |
| 3830 | int i; |
| 3831 | |
| 3832 | s->cnt = val & LR_SIZE_MASK; |
| 3833 | val >>= LR_SIZE_BITS; |
| 3834 | |
| 3835 | for (i = 0; i < s->cnt; ++i) { |
| 3836 | struct linked_reg *e = &s->entries[i]; |
| 3837 | |
| 3838 | e->frameno = val & LR_FRAMENO_MASK; |
| 3839 | e->spi = (val >> LR_SPI_OFF) & LR_SPI_MASK; |
| 3840 | e->is_reg = (val >> LR_IS_REG_OFF) & 0x1; |
| 3841 | val >>= LR_ENTRY_BITS; |
| 3842 | } |
| 3843 | } |
| 3844 | |
| 3845 | /* for any branch, call, exit record the history of jmps in the given state */ |
| 3846 | static int push_insn_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur, |
| 3847 | int insn_flags, u64 linked_regs) |
| 3848 | { |
| 3849 | struct bpf_insn_hist_entry *p; |
| 3850 | size_t alloc_size; |
| 3851 | |
| 3852 | /* combine instruction flags if we already recorded this instruction */ |
| 3853 | if (env->cur_hist_ent) { |
| 3854 | /* atomic instructions push insn_flags twice, for READ and |
| 3855 | * WRITE sides, but they should agree on stack slot |
| 3856 | */ |
| 3857 | verifier_bug_if((env->cur_hist_ent->flags & insn_flags) && |
| 3858 | (env->cur_hist_ent->flags & insn_flags) != insn_flags, |
| 3859 | env, "insn history: insn_idx %d cur flags %x new flags %x", |
| 3860 | env->insn_idx, env->cur_hist_ent->flags, insn_flags); |
| 3861 | env->cur_hist_ent->flags |= insn_flags; |
| 3862 | verifier_bug_if(env->cur_hist_ent->linked_regs != 0, env, |
| 3863 | "insn history: insn_idx %d linked_regs: %#llx", |
| 3864 | env->insn_idx, env->cur_hist_ent->linked_regs); |
| 3865 | env->cur_hist_ent->linked_regs = linked_regs; |
| 3866 | return 0; |
| 3867 | } |
| 3868 | |
| 3869 | if (cur->insn_hist_end + 1 > env->insn_hist_cap) { |
| 3870 | alloc_size = size_mul(cur->insn_hist_end + 1, sizeof(*p)); |
| 3871 | p = kvrealloc(env->insn_hist, alloc_size, GFP_USER); |
| 3872 | if (!p) |
| 3873 | return -ENOMEM; |
| 3874 | env->insn_hist = p; |
| 3875 | env->insn_hist_cap = alloc_size / sizeof(*p); |
| 3876 | } |
| 3877 | |
| 3878 | p = &env->insn_hist[cur->insn_hist_end]; |
| 3879 | p->idx = env->insn_idx; |
| 3880 | p->prev_idx = env->prev_insn_idx; |
| 3881 | p->flags = insn_flags; |
| 3882 | p->linked_regs = linked_regs; |
| 3883 | |
| 3884 | cur->insn_hist_end++; |
| 3885 | env->cur_hist_ent = p; |
| 3886 | |
| 3887 | return 0; |
| 3888 | } |
| 3889 | |
| 3890 | static struct bpf_insn_hist_entry *get_insn_hist_entry(struct bpf_verifier_env *env, |
| 3891 | u32 hist_start, u32 hist_end, int insn_idx) |
| 3892 | { |
| 3893 | if (hist_end > hist_start && env->insn_hist[hist_end - 1].idx == insn_idx) |
| 3894 | return &env->insn_hist[hist_end - 1]; |
| 3895 | return NULL; |
| 3896 | } |
| 3897 | |
| 3898 | /* Backtrack one insn at a time. If idx is not at the top of recorded |
| 3899 | * history then previous instruction came from straight line execution. |
| 3900 | * Return -ENOENT if we exhausted all instructions within given state. |
| 3901 | * |
| 3902 | * It's legal to have a bit of a looping with the same starting and ending |
| 3903 | * insn index within the same state, e.g.: 3->4->5->3, so just because current |
| 3904 | * instruction index is the same as state's first_idx doesn't mean we are |
| 3905 | * done. If there is still some jump history left, we should keep going. We |
| 3906 | * need to take into account that we might have a jump history between given |
| 3907 | * state's parent and itself, due to checkpointing. In this case, we'll have |
| 3908 | * history entry recording a jump from last instruction of parent state and |
| 3909 | * first instruction of given state. |
| 3910 | */ |
| 3911 | static int get_prev_insn_idx(const struct bpf_verifier_env *env, |
| 3912 | struct bpf_verifier_state *st, |
| 3913 | int insn_idx, u32 hist_start, u32 *hist_endp) |
| 3914 | { |
| 3915 | u32 hist_end = *hist_endp; |
| 3916 | u32 cnt = hist_end - hist_start; |
| 3917 | |
| 3918 | if (insn_idx == st->first_insn_idx) { |
| 3919 | if (cnt == 0) |
| 3920 | return -ENOENT; |
| 3921 | if (cnt == 1 && env->insn_hist[hist_start].idx == insn_idx) |
| 3922 | return -ENOENT; |
| 3923 | } |
| 3924 | |
| 3925 | if (cnt && env->insn_hist[hist_end - 1].idx == insn_idx) { |
| 3926 | (*hist_endp)--; |
| 3927 | return env->insn_hist[hist_end - 1].prev_idx; |
| 3928 | } else { |
| 3929 | return insn_idx - 1; |
| 3930 | } |
| 3931 | } |
| 3932 | |
| 3933 | static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) |
| 3934 | { |
| 3935 | const struct btf_type *func; |
| 3936 | struct btf *desc_btf; |
| 3937 | |
| 3938 | if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) |
| 3939 | return NULL; |
| 3940 | |
| 3941 | desc_btf = find_kfunc_desc_btf(data, insn->off); |
| 3942 | if (IS_ERR(desc_btf)) |
| 3943 | return "<error>"; |
| 3944 | |
| 3945 | func = btf_type_by_id(desc_btf, insn->imm); |
| 3946 | return btf_name_by_offset(desc_btf, func->name_off); |
| 3947 | } |
| 3948 | |
| 3949 | static void verbose_insn(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 3950 | { |
| 3951 | const struct bpf_insn_cbs cbs = { |
| 3952 | .cb_call = disasm_kfunc_name, |
| 3953 | .cb_print = verbose, |
| 3954 | .private_data = env, |
| 3955 | }; |
| 3956 | |
| 3957 | print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); |
| 3958 | } |
| 3959 | |
| 3960 | static inline void bt_init(struct backtrack_state *bt, u32 frame) |
| 3961 | { |
| 3962 | bt->frame = frame; |
| 3963 | } |
| 3964 | |
| 3965 | static inline void bt_reset(struct backtrack_state *bt) |
| 3966 | { |
| 3967 | struct bpf_verifier_env *env = bt->env; |
| 3968 | |
| 3969 | memset(bt, 0, sizeof(*bt)); |
| 3970 | bt->env = env; |
| 3971 | } |
| 3972 | |
| 3973 | static inline u32 bt_empty(struct backtrack_state *bt) |
| 3974 | { |
| 3975 | u64 mask = 0; |
| 3976 | int i; |
| 3977 | |
| 3978 | for (i = 0; i <= bt->frame; i++) |
| 3979 | mask |= bt->reg_masks[i] | bt->stack_masks[i]; |
| 3980 | |
| 3981 | return mask == 0; |
| 3982 | } |
| 3983 | |
| 3984 | static inline int bt_subprog_enter(struct backtrack_state *bt) |
| 3985 | { |
| 3986 | if (bt->frame == MAX_CALL_FRAMES - 1) { |
| 3987 | verifier_bug(bt->env, "subprog enter from frame %d", bt->frame); |
| 3988 | return -EFAULT; |
| 3989 | } |
| 3990 | bt->frame++; |
| 3991 | return 0; |
| 3992 | } |
| 3993 | |
| 3994 | static inline int bt_subprog_exit(struct backtrack_state *bt) |
| 3995 | { |
| 3996 | if (bt->frame == 0) { |
| 3997 | verifier_bug(bt->env, "subprog exit from frame 0"); |
| 3998 | return -EFAULT; |
| 3999 | } |
| 4000 | bt->frame--; |
| 4001 | return 0; |
| 4002 | } |
| 4003 | |
| 4004 | static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) |
| 4005 | { |
| 4006 | bt->reg_masks[frame] |= 1 << reg; |
| 4007 | } |
| 4008 | |
| 4009 | static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) |
| 4010 | { |
| 4011 | bt->reg_masks[frame] &= ~(1 << reg); |
| 4012 | } |
| 4013 | |
| 4014 | static inline void bt_set_reg(struct backtrack_state *bt, u32 reg) |
| 4015 | { |
| 4016 | bt_set_frame_reg(bt, bt->frame, reg); |
| 4017 | } |
| 4018 | |
| 4019 | static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg) |
| 4020 | { |
| 4021 | bt_clear_frame_reg(bt, bt->frame, reg); |
| 4022 | } |
| 4023 | |
| 4024 | static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) |
| 4025 | { |
| 4026 | bt->stack_masks[frame] |= 1ull << slot; |
| 4027 | } |
| 4028 | |
| 4029 | static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) |
| 4030 | { |
| 4031 | bt->stack_masks[frame] &= ~(1ull << slot); |
| 4032 | } |
| 4033 | |
| 4034 | static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame) |
| 4035 | { |
| 4036 | return bt->reg_masks[frame]; |
| 4037 | } |
| 4038 | |
| 4039 | static inline u32 bt_reg_mask(struct backtrack_state *bt) |
| 4040 | { |
| 4041 | return bt->reg_masks[bt->frame]; |
| 4042 | } |
| 4043 | |
| 4044 | static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame) |
| 4045 | { |
| 4046 | return bt->stack_masks[frame]; |
| 4047 | } |
| 4048 | |
| 4049 | static inline u64 bt_stack_mask(struct backtrack_state *bt) |
| 4050 | { |
| 4051 | return bt->stack_masks[bt->frame]; |
| 4052 | } |
| 4053 | |
| 4054 | static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) |
| 4055 | { |
| 4056 | return bt->reg_masks[bt->frame] & (1 << reg); |
| 4057 | } |
| 4058 | |
| 4059 | static inline bool bt_is_frame_reg_set(struct backtrack_state *bt, u32 frame, u32 reg) |
| 4060 | { |
| 4061 | return bt->reg_masks[frame] & (1 << reg); |
| 4062 | } |
| 4063 | |
| 4064 | static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot) |
| 4065 | { |
| 4066 | return bt->stack_masks[frame] & (1ull << slot); |
| 4067 | } |
| 4068 | |
| 4069 | /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */ |
| 4070 | static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask) |
| 4071 | { |
| 4072 | DECLARE_BITMAP(mask, 64); |
| 4073 | bool first = true; |
| 4074 | int i, n; |
| 4075 | |
| 4076 | buf[0] = '\0'; |
| 4077 | |
| 4078 | bitmap_from_u64(mask, reg_mask); |
| 4079 | for_each_set_bit(i, mask, 32) { |
| 4080 | n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i); |
| 4081 | first = false; |
| 4082 | buf += n; |
| 4083 | buf_sz -= n; |
| 4084 | if (buf_sz < 0) |
| 4085 | break; |
| 4086 | } |
| 4087 | } |
| 4088 | /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */ |
| 4089 | static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) |
| 4090 | { |
| 4091 | DECLARE_BITMAP(mask, 64); |
| 4092 | bool first = true; |
| 4093 | int i, n; |
| 4094 | |
| 4095 | buf[0] = '\0'; |
| 4096 | |
| 4097 | bitmap_from_u64(mask, stack_mask); |
| 4098 | for_each_set_bit(i, mask, 64) { |
| 4099 | n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); |
| 4100 | first = false; |
| 4101 | buf += n; |
| 4102 | buf_sz -= n; |
| 4103 | if (buf_sz < 0) |
| 4104 | break; |
| 4105 | } |
| 4106 | } |
| 4107 | |
| 4108 | /* If any register R in hist->linked_regs is marked as precise in bt, |
| 4109 | * do bt_set_frame_{reg,slot}(bt, R) for all registers in hist->linked_regs. |
| 4110 | */ |
| 4111 | static void bt_sync_linked_regs(struct backtrack_state *bt, struct bpf_insn_hist_entry *hist) |
| 4112 | { |
| 4113 | struct linked_regs linked_regs; |
| 4114 | bool some_precise = false; |
| 4115 | int i; |
| 4116 | |
| 4117 | if (!hist || hist->linked_regs == 0) |
| 4118 | return; |
| 4119 | |
| 4120 | linked_regs_unpack(hist->linked_regs, &linked_regs); |
| 4121 | for (i = 0; i < linked_regs.cnt; ++i) { |
| 4122 | struct linked_reg *e = &linked_regs.entries[i]; |
| 4123 | |
| 4124 | if ((e->is_reg && bt_is_frame_reg_set(bt, e->frameno, e->regno)) || |
| 4125 | (!e->is_reg && bt_is_frame_slot_set(bt, e->frameno, e->spi))) { |
| 4126 | some_precise = true; |
| 4127 | break; |
| 4128 | } |
| 4129 | } |
| 4130 | |
| 4131 | if (!some_precise) |
| 4132 | return; |
| 4133 | |
| 4134 | for (i = 0; i < linked_regs.cnt; ++i) { |
| 4135 | struct linked_reg *e = &linked_regs.entries[i]; |
| 4136 | |
| 4137 | if (e->is_reg) |
| 4138 | bt_set_frame_reg(bt, e->frameno, e->regno); |
| 4139 | else |
| 4140 | bt_set_frame_slot(bt, e->frameno, e->spi); |
| 4141 | } |
| 4142 | } |
| 4143 | |
| 4144 | static bool calls_callback(struct bpf_verifier_env *env, int insn_idx); |
| 4145 | |
| 4146 | /* For given verifier state backtrack_insn() is called from the last insn to |
| 4147 | * the first insn. Its purpose is to compute a bitmask of registers and |
| 4148 | * stack slots that needs precision in the parent verifier state. |
| 4149 | * |
| 4150 | * @idx is an index of the instruction we are currently processing; |
| 4151 | * @subseq_idx is an index of the subsequent instruction that: |
| 4152 | * - *would be* executed next, if jump history is viewed in forward order; |
| 4153 | * - *was* processed previously during backtracking. |
| 4154 | */ |
| 4155 | static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, |
| 4156 | struct bpf_insn_hist_entry *hist, struct backtrack_state *bt) |
| 4157 | { |
| 4158 | struct bpf_insn *insn = env->prog->insnsi + idx; |
| 4159 | u8 class = BPF_CLASS(insn->code); |
| 4160 | u8 opcode = BPF_OP(insn->code); |
| 4161 | u8 mode = BPF_MODE(insn->code); |
| 4162 | u32 dreg = insn->dst_reg; |
| 4163 | u32 sreg = insn->src_reg; |
| 4164 | u32 spi, i, fr; |
| 4165 | |
| 4166 | if (insn->code == 0) |
| 4167 | return 0; |
| 4168 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4169 | fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); |
| 4170 | verbose(env, "mark_precise: frame%d: regs=%s ", |
| 4171 | bt->frame, env->tmp_str_buf); |
| 4172 | fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); |
| 4173 | verbose(env, "stack=%s before ", env->tmp_str_buf); |
| 4174 | verbose(env, "%d: ", idx); |
| 4175 | verbose_insn(env, insn); |
| 4176 | } |
| 4177 | |
| 4178 | /* If there is a history record that some registers gained range at this insn, |
| 4179 | * propagate precision marks to those registers, so that bt_is_reg_set() |
| 4180 | * accounts for these registers. |
| 4181 | */ |
| 4182 | bt_sync_linked_regs(bt, hist); |
| 4183 | |
| 4184 | if (class == BPF_ALU || class == BPF_ALU64) { |
| 4185 | if (!bt_is_reg_set(bt, dreg)) |
| 4186 | return 0; |
| 4187 | if (opcode == BPF_END || opcode == BPF_NEG) { |
| 4188 | /* sreg is reserved and unused |
| 4189 | * dreg still need precision before this insn |
| 4190 | */ |
| 4191 | return 0; |
| 4192 | } else if (opcode == BPF_MOV) { |
| 4193 | if (BPF_SRC(insn->code) == BPF_X) { |
| 4194 | /* dreg = sreg or dreg = (s8, s16, s32)sreg |
| 4195 | * dreg needs precision after this insn |
| 4196 | * sreg needs precision before this insn |
| 4197 | */ |
| 4198 | bt_clear_reg(bt, dreg); |
| 4199 | if (sreg != BPF_REG_FP) |
| 4200 | bt_set_reg(bt, sreg); |
| 4201 | } else { |
| 4202 | /* dreg = K |
| 4203 | * dreg needs precision after this insn. |
| 4204 | * Corresponding register is already marked |
| 4205 | * as precise=true in this verifier state. |
| 4206 | * No further markings in parent are necessary |
| 4207 | */ |
| 4208 | bt_clear_reg(bt, dreg); |
| 4209 | } |
| 4210 | } else { |
| 4211 | if (BPF_SRC(insn->code) == BPF_X) { |
| 4212 | /* dreg += sreg |
| 4213 | * both dreg and sreg need precision |
| 4214 | * before this insn |
| 4215 | */ |
| 4216 | if (sreg != BPF_REG_FP) |
| 4217 | bt_set_reg(bt, sreg); |
| 4218 | } /* else dreg += K |
| 4219 | * dreg still needs precision before this insn |
| 4220 | */ |
| 4221 | } |
| 4222 | } else if (class == BPF_LDX || is_atomic_load_insn(insn)) { |
| 4223 | if (!bt_is_reg_set(bt, dreg)) |
| 4224 | return 0; |
| 4225 | bt_clear_reg(bt, dreg); |
| 4226 | |
| 4227 | /* scalars can only be spilled into stack w/o losing precision. |
| 4228 | * Load from any other memory can be zero extended. |
| 4229 | * The desire to keep that precision is already indicated |
| 4230 | * by 'precise' mark in corresponding register of this state. |
| 4231 | * No further tracking necessary. |
| 4232 | */ |
| 4233 | if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) |
| 4234 | return 0; |
| 4235 | /* dreg = *(u64 *)[fp - off] was a fill from the stack. |
| 4236 | * that [fp - off] slot contains scalar that needs to be |
| 4237 | * tracked with precision |
| 4238 | */ |
| 4239 | spi = insn_stack_access_spi(hist->flags); |
| 4240 | fr = insn_stack_access_frameno(hist->flags); |
| 4241 | bt_set_frame_slot(bt, fr, spi); |
| 4242 | } else if (class == BPF_STX || class == BPF_ST) { |
| 4243 | if (bt_is_reg_set(bt, dreg)) |
| 4244 | /* stx & st shouldn't be using _scalar_ dst_reg |
| 4245 | * to access memory. It means backtracking |
| 4246 | * encountered a case of pointer subtraction. |
| 4247 | */ |
| 4248 | return -ENOTSUPP; |
| 4249 | /* scalars can only be spilled into stack */ |
| 4250 | if (!hist || !(hist->flags & INSN_F_STACK_ACCESS)) |
| 4251 | return 0; |
| 4252 | spi = insn_stack_access_spi(hist->flags); |
| 4253 | fr = insn_stack_access_frameno(hist->flags); |
| 4254 | if (!bt_is_frame_slot_set(bt, fr, spi)) |
| 4255 | return 0; |
| 4256 | bt_clear_frame_slot(bt, fr, spi); |
| 4257 | if (class == BPF_STX) |
| 4258 | bt_set_reg(bt, sreg); |
| 4259 | } else if (class == BPF_JMP || class == BPF_JMP32) { |
| 4260 | if (bpf_pseudo_call(insn)) { |
| 4261 | int subprog_insn_idx, subprog; |
| 4262 | |
| 4263 | subprog_insn_idx = idx + insn->imm + 1; |
| 4264 | subprog = find_subprog(env, subprog_insn_idx); |
| 4265 | if (subprog < 0) |
| 4266 | return -EFAULT; |
| 4267 | |
| 4268 | if (subprog_is_global(env, subprog)) { |
| 4269 | /* check that jump history doesn't have any |
| 4270 | * extra instructions from subprog; the next |
| 4271 | * instruction after call to global subprog |
| 4272 | * should be literally next instruction in |
| 4273 | * caller program |
| 4274 | */ |
| 4275 | verifier_bug_if(idx + 1 != subseq_idx, env, |
| 4276 | "extra insn from subprog"); |
| 4277 | /* r1-r5 are invalidated after subprog call, |
| 4278 | * so for global func call it shouldn't be set |
| 4279 | * anymore |
| 4280 | */ |
| 4281 | if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { |
| 4282 | verifier_bug(env, "global subprog unexpected regs %x", |
| 4283 | bt_reg_mask(bt)); |
| 4284 | return -EFAULT; |
| 4285 | } |
| 4286 | /* global subprog always sets R0 */ |
| 4287 | bt_clear_reg(bt, BPF_REG_0); |
| 4288 | return 0; |
| 4289 | } else { |
| 4290 | /* static subprog call instruction, which |
| 4291 | * means that we are exiting current subprog, |
| 4292 | * so only r1-r5 could be still requested as |
| 4293 | * precise, r0 and r6-r10 or any stack slot in |
| 4294 | * the current frame should be zero by now |
| 4295 | */ |
| 4296 | if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { |
| 4297 | verifier_bug(env, "static subprog unexpected regs %x", |
| 4298 | bt_reg_mask(bt)); |
| 4299 | return -EFAULT; |
| 4300 | } |
| 4301 | /* we are now tracking register spills correctly, |
| 4302 | * so any instance of leftover slots is a bug |
| 4303 | */ |
| 4304 | if (bt_stack_mask(bt) != 0) { |
| 4305 | verifier_bug(env, |
| 4306 | "static subprog leftover stack slots %llx", |
| 4307 | bt_stack_mask(bt)); |
| 4308 | return -EFAULT; |
| 4309 | } |
| 4310 | /* propagate r1-r5 to the caller */ |
| 4311 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) { |
| 4312 | if (bt_is_reg_set(bt, i)) { |
| 4313 | bt_clear_reg(bt, i); |
| 4314 | bt_set_frame_reg(bt, bt->frame - 1, i); |
| 4315 | } |
| 4316 | } |
| 4317 | if (bt_subprog_exit(bt)) |
| 4318 | return -EFAULT; |
| 4319 | return 0; |
| 4320 | } |
| 4321 | } else if (is_sync_callback_calling_insn(insn) && idx != subseq_idx - 1) { |
| 4322 | /* exit from callback subprog to callback-calling helper or |
| 4323 | * kfunc call. Use idx/subseq_idx check to discern it from |
| 4324 | * straight line code backtracking. |
| 4325 | * Unlike the subprog call handling above, we shouldn't |
| 4326 | * propagate precision of r1-r5 (if any requested), as they are |
| 4327 | * not actually arguments passed directly to callback subprogs |
| 4328 | */ |
| 4329 | if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { |
| 4330 | verifier_bug(env, "callback unexpected regs %x", |
| 4331 | bt_reg_mask(bt)); |
| 4332 | return -EFAULT; |
| 4333 | } |
| 4334 | if (bt_stack_mask(bt) != 0) { |
| 4335 | verifier_bug(env, "callback leftover stack slots %llx", |
| 4336 | bt_stack_mask(bt)); |
| 4337 | return -EFAULT; |
| 4338 | } |
| 4339 | /* clear r1-r5 in callback subprog's mask */ |
| 4340 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) |
| 4341 | bt_clear_reg(bt, i); |
| 4342 | if (bt_subprog_exit(bt)) |
| 4343 | return -EFAULT; |
| 4344 | return 0; |
| 4345 | } else if (opcode == BPF_CALL) { |
| 4346 | /* kfunc with imm==0 is invalid and fixup_kfunc_call will |
| 4347 | * catch this error later. Make backtracking conservative |
| 4348 | * with ENOTSUPP. |
| 4349 | */ |
| 4350 | if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) |
| 4351 | return -ENOTSUPP; |
| 4352 | /* regular helper call sets R0 */ |
| 4353 | bt_clear_reg(bt, BPF_REG_0); |
| 4354 | if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { |
| 4355 | /* if backtracking was looking for registers R1-R5 |
| 4356 | * they should have been found already. |
| 4357 | */ |
| 4358 | verifier_bug(env, "backtracking call unexpected regs %x", |
| 4359 | bt_reg_mask(bt)); |
| 4360 | return -EFAULT; |
| 4361 | } |
| 4362 | } else if (opcode == BPF_EXIT) { |
| 4363 | bool r0_precise; |
| 4364 | |
| 4365 | /* Backtracking to a nested function call, 'idx' is a part of |
| 4366 | * the inner frame 'subseq_idx' is a part of the outer frame. |
| 4367 | * In case of a regular function call, instructions giving |
| 4368 | * precision to registers R1-R5 should have been found already. |
| 4369 | * In case of a callback, it is ok to have R1-R5 marked for |
| 4370 | * backtracking, as these registers are set by the function |
| 4371 | * invoking callback. |
| 4372 | */ |
| 4373 | if (subseq_idx >= 0 && calls_callback(env, subseq_idx)) |
| 4374 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) |
| 4375 | bt_clear_reg(bt, i); |
| 4376 | if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { |
| 4377 | verifier_bug(env, "backtracking exit unexpected regs %x", |
| 4378 | bt_reg_mask(bt)); |
| 4379 | return -EFAULT; |
| 4380 | } |
| 4381 | |
| 4382 | /* BPF_EXIT in subprog or callback always returns |
| 4383 | * right after the call instruction, so by checking |
| 4384 | * whether the instruction at subseq_idx-1 is subprog |
| 4385 | * call or not we can distinguish actual exit from |
| 4386 | * *subprog* from exit from *callback*. In the former |
| 4387 | * case, we need to propagate r0 precision, if |
| 4388 | * necessary. In the former we never do that. |
| 4389 | */ |
| 4390 | r0_precise = subseq_idx - 1 >= 0 && |
| 4391 | bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && |
| 4392 | bt_is_reg_set(bt, BPF_REG_0); |
| 4393 | |
| 4394 | bt_clear_reg(bt, BPF_REG_0); |
| 4395 | if (bt_subprog_enter(bt)) |
| 4396 | return -EFAULT; |
| 4397 | |
| 4398 | if (r0_precise) |
| 4399 | bt_set_reg(bt, BPF_REG_0); |
| 4400 | /* r6-r9 and stack slots will stay set in caller frame |
| 4401 | * bitmasks until we return back from callee(s) |
| 4402 | */ |
| 4403 | return 0; |
| 4404 | } else if (BPF_SRC(insn->code) == BPF_X) { |
| 4405 | if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg)) |
| 4406 | return 0; |
| 4407 | /* dreg <cond> sreg |
| 4408 | * Both dreg and sreg need precision before |
| 4409 | * this insn. If only sreg was marked precise |
| 4410 | * before it would be equally necessary to |
| 4411 | * propagate it to dreg. |
| 4412 | */ |
| 4413 | if (!hist || !(hist->flags & INSN_F_SRC_REG_STACK)) |
| 4414 | bt_set_reg(bt, sreg); |
| 4415 | if (!hist || !(hist->flags & INSN_F_DST_REG_STACK)) |
| 4416 | bt_set_reg(bt, dreg); |
| 4417 | } else if (BPF_SRC(insn->code) == BPF_K) { |
| 4418 | /* dreg <cond> K |
| 4419 | * Only dreg still needs precision before |
| 4420 | * this insn, so for the K-based conditional |
| 4421 | * there is nothing new to be marked. |
| 4422 | */ |
| 4423 | } |
| 4424 | } else if (class == BPF_LD) { |
| 4425 | if (!bt_is_reg_set(bt, dreg)) |
| 4426 | return 0; |
| 4427 | bt_clear_reg(bt, dreg); |
| 4428 | /* It's ld_imm64 or ld_abs or ld_ind. |
| 4429 | * For ld_imm64 no further tracking of precision |
| 4430 | * into parent is necessary |
| 4431 | */ |
| 4432 | if (mode == BPF_IND || mode == BPF_ABS) |
| 4433 | /* to be analyzed */ |
| 4434 | return -ENOTSUPP; |
| 4435 | } |
| 4436 | /* Propagate precision marks to linked registers, to account for |
| 4437 | * registers marked as precise in this function. |
| 4438 | */ |
| 4439 | bt_sync_linked_regs(bt, hist); |
| 4440 | return 0; |
| 4441 | } |
| 4442 | |
| 4443 | /* the scalar precision tracking algorithm: |
| 4444 | * . at the start all registers have precise=false. |
| 4445 | * . scalar ranges are tracked as normal through alu and jmp insns. |
| 4446 | * . once precise value of the scalar register is used in: |
| 4447 | * . ptr + scalar alu |
| 4448 | * . if (scalar cond K|scalar) |
| 4449 | * . helper_call(.., scalar, ...) where ARG_CONST is expected |
| 4450 | * backtrack through the verifier states and mark all registers and |
| 4451 | * stack slots with spilled constants that these scalar regisers |
| 4452 | * should be precise. |
| 4453 | * . during state pruning two registers (or spilled stack slots) |
| 4454 | * are equivalent if both are not precise. |
| 4455 | * |
| 4456 | * Note the verifier cannot simply walk register parentage chain, |
| 4457 | * since many different registers and stack slots could have been |
| 4458 | * used to compute single precise scalar. |
| 4459 | * |
| 4460 | * The approach of starting with precise=true for all registers and then |
| 4461 | * backtrack to mark a register as not precise when the verifier detects |
| 4462 | * that program doesn't care about specific value (e.g., when helper |
| 4463 | * takes register as ARG_ANYTHING parameter) is not safe. |
| 4464 | * |
| 4465 | * It's ok to walk single parentage chain of the verifier states. |
| 4466 | * It's possible that this backtracking will go all the way till 1st insn. |
| 4467 | * All other branches will be explored for needing precision later. |
| 4468 | * |
| 4469 | * The backtracking needs to deal with cases like: |
| 4470 | * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) |
| 4471 | * r9 -= r8 |
| 4472 | * r5 = r9 |
| 4473 | * if r5 > 0x79f goto pc+7 |
| 4474 | * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) |
| 4475 | * r5 += 1 |
| 4476 | * ... |
| 4477 | * call bpf_perf_event_output#25 |
| 4478 | * where .arg5_type = ARG_CONST_SIZE_OR_ZERO |
| 4479 | * |
| 4480 | * and this case: |
| 4481 | * r6 = 1 |
| 4482 | * call foo // uses callee's r6 inside to compute r0 |
| 4483 | * r0 += r6 |
| 4484 | * if r0 == 0 goto |
| 4485 | * |
| 4486 | * to track above reg_mask/stack_mask needs to be independent for each frame. |
| 4487 | * |
| 4488 | * Also if parent's curframe > frame where backtracking started, |
| 4489 | * the verifier need to mark registers in both frames, otherwise callees |
| 4490 | * may incorrectly prune callers. This is similar to |
| 4491 | * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") |
| 4492 | * |
| 4493 | * For now backtracking falls back into conservative marking. |
| 4494 | */ |
| 4495 | static void mark_all_scalars_precise(struct bpf_verifier_env *env, |
| 4496 | struct bpf_verifier_state *st) |
| 4497 | { |
| 4498 | struct bpf_func_state *func; |
| 4499 | struct bpf_reg_state *reg; |
| 4500 | int i, j; |
| 4501 | |
| 4502 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4503 | verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", |
| 4504 | st->curframe); |
| 4505 | } |
| 4506 | |
| 4507 | /* big hammer: mark all scalars precise in this path. |
| 4508 | * pop_stack may still get !precise scalars. |
| 4509 | * We also skip current state and go straight to first parent state, |
| 4510 | * because precision markings in current non-checkpointed state are |
| 4511 | * not needed. See why in the comment in __mark_chain_precision below. |
| 4512 | */ |
| 4513 | for (st = st->parent; st; st = st->parent) { |
| 4514 | for (i = 0; i <= st->curframe; i++) { |
| 4515 | func = st->frame[i]; |
| 4516 | for (j = 0; j < BPF_REG_FP; j++) { |
| 4517 | reg = &func->regs[j]; |
| 4518 | if (reg->type != SCALAR_VALUE || reg->precise) |
| 4519 | continue; |
| 4520 | reg->precise = true; |
| 4521 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4522 | verbose(env, "force_precise: frame%d: forcing r%d to be precise\n", |
| 4523 | i, j); |
| 4524 | } |
| 4525 | } |
| 4526 | for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { |
| 4527 | if (!is_spilled_reg(&func->stack[j])) |
| 4528 | continue; |
| 4529 | reg = &func->stack[j].spilled_ptr; |
| 4530 | if (reg->type != SCALAR_VALUE || reg->precise) |
| 4531 | continue; |
| 4532 | reg->precise = true; |
| 4533 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4534 | verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n", |
| 4535 | i, -(j + 1) * 8); |
| 4536 | } |
| 4537 | } |
| 4538 | } |
| 4539 | } |
| 4540 | } |
| 4541 | |
| 4542 | static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) |
| 4543 | { |
| 4544 | struct bpf_func_state *func; |
| 4545 | struct bpf_reg_state *reg; |
| 4546 | int i, j; |
| 4547 | |
| 4548 | for (i = 0; i <= st->curframe; i++) { |
| 4549 | func = st->frame[i]; |
| 4550 | for (j = 0; j < BPF_REG_FP; j++) { |
| 4551 | reg = &func->regs[j]; |
| 4552 | if (reg->type != SCALAR_VALUE) |
| 4553 | continue; |
| 4554 | reg->precise = false; |
| 4555 | } |
| 4556 | for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { |
| 4557 | if (!is_spilled_reg(&func->stack[j])) |
| 4558 | continue; |
| 4559 | reg = &func->stack[j].spilled_ptr; |
| 4560 | if (reg->type != SCALAR_VALUE) |
| 4561 | continue; |
| 4562 | reg->precise = false; |
| 4563 | } |
| 4564 | } |
| 4565 | } |
| 4566 | |
| 4567 | /* |
| 4568 | * __mark_chain_precision() backtracks BPF program instruction sequence and |
| 4569 | * chain of verifier states making sure that register *regno* (if regno >= 0) |
| 4570 | * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked |
| 4571 | * SCALARS, as well as any other registers and slots that contribute to |
| 4572 | * a tracked state of given registers/stack slots, depending on specific BPF |
| 4573 | * assembly instructions (see backtrack_insns() for exact instruction handling |
| 4574 | * logic). This backtracking relies on recorded insn_hist and is able to |
| 4575 | * traverse entire chain of parent states. This process ends only when all the |
| 4576 | * necessary registers/slots and their transitive dependencies are marked as |
| 4577 | * precise. |
| 4578 | * |
| 4579 | * One important and subtle aspect is that precise marks *do not matter* in |
| 4580 | * the currently verified state (current state). It is important to understand |
| 4581 | * why this is the case. |
| 4582 | * |
| 4583 | * First, note that current state is the state that is not yet "checkpointed", |
| 4584 | * i.e., it is not yet put into env->explored_states, and it has no children |
| 4585 | * states as well. It's ephemeral, and can end up either a) being discarded if |
| 4586 | * compatible explored state is found at some point or BPF_EXIT instruction is |
| 4587 | * reached or b) checkpointed and put into env->explored_states, branching out |
| 4588 | * into one or more children states. |
| 4589 | * |
| 4590 | * In the former case, precise markings in current state are completely |
| 4591 | * ignored by state comparison code (see regsafe() for details). Only |
| 4592 | * checkpointed ("old") state precise markings are important, and if old |
| 4593 | * state's register/slot is precise, regsafe() assumes current state's |
| 4594 | * register/slot as precise and checks value ranges exactly and precisely. If |
| 4595 | * states turn out to be compatible, current state's necessary precise |
| 4596 | * markings and any required parent states' precise markings are enforced |
| 4597 | * after the fact with propagate_precision() logic, after the fact. But it's |
| 4598 | * important to realize that in this case, even after marking current state |
| 4599 | * registers/slots as precise, we immediately discard current state. So what |
| 4600 | * actually matters is any of the precise markings propagated into current |
| 4601 | * state's parent states, which are always checkpointed (due to b) case above). |
| 4602 | * As such, for scenario a) it doesn't matter if current state has precise |
| 4603 | * markings set or not. |
| 4604 | * |
| 4605 | * Now, for the scenario b), checkpointing and forking into child(ren) |
| 4606 | * state(s). Note that before current state gets to checkpointing step, any |
| 4607 | * processed instruction always assumes precise SCALAR register/slot |
| 4608 | * knowledge: if precise value or range is useful to prune jump branch, BPF |
| 4609 | * verifier takes this opportunity enthusiastically. Similarly, when |
| 4610 | * register's value is used to calculate offset or memory address, exact |
| 4611 | * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to |
| 4612 | * what we mentioned above about state comparison ignoring precise markings |
| 4613 | * during state comparison, BPF verifier ignores and also assumes precise |
| 4614 | * markings *at will* during instruction verification process. But as verifier |
| 4615 | * assumes precision, it also propagates any precision dependencies across |
| 4616 | * parent states, which are not yet finalized, so can be further restricted |
| 4617 | * based on new knowledge gained from restrictions enforced by their children |
| 4618 | * states. This is so that once those parent states are finalized, i.e., when |
| 4619 | * they have no more active children state, state comparison logic in |
| 4620 | * is_state_visited() would enforce strict and precise SCALAR ranges, if |
| 4621 | * required for correctness. |
| 4622 | * |
| 4623 | * To build a bit more intuition, note also that once a state is checkpointed, |
| 4624 | * the path we took to get to that state is not important. This is crucial |
| 4625 | * property for state pruning. When state is checkpointed and finalized at |
| 4626 | * some instruction index, it can be correctly and safely used to "short |
| 4627 | * circuit" any *compatible* state that reaches exactly the same instruction |
| 4628 | * index. I.e., if we jumped to that instruction from a completely different |
| 4629 | * code path than original finalized state was derived from, it doesn't |
| 4630 | * matter, current state can be discarded because from that instruction |
| 4631 | * forward having a compatible state will ensure we will safely reach the |
| 4632 | * exit. States describe preconditions for further exploration, but completely |
| 4633 | * forget the history of how we got here. |
| 4634 | * |
| 4635 | * This also means that even if we needed precise SCALAR range to get to |
| 4636 | * finalized state, but from that point forward *that same* SCALAR register is |
| 4637 | * never used in a precise context (i.e., it's precise value is not needed for |
| 4638 | * correctness), it's correct and safe to mark such register as "imprecise" |
| 4639 | * (i.e., precise marking set to false). This is what we rely on when we do |
| 4640 | * not set precise marking in current state. If no child state requires |
| 4641 | * precision for any given SCALAR register, it's safe to dictate that it can |
| 4642 | * be imprecise. If any child state does require this register to be precise, |
| 4643 | * we'll mark it precise later retroactively during precise markings |
| 4644 | * propagation from child state to parent states. |
| 4645 | * |
| 4646 | * Skipping precise marking setting in current state is a mild version of |
| 4647 | * relying on the above observation. But we can utilize this property even |
| 4648 | * more aggressively by proactively forgetting any precise marking in the |
| 4649 | * current state (which we inherited from the parent state), right before we |
| 4650 | * checkpoint it and branch off into new child state. This is done by |
| 4651 | * mark_all_scalars_imprecise() to hopefully get more permissive and generic |
| 4652 | * finalized states which help in short circuiting more future states. |
| 4653 | */ |
| 4654 | static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) |
| 4655 | { |
| 4656 | struct backtrack_state *bt = &env->bt; |
| 4657 | struct bpf_verifier_state *st = env->cur_state; |
| 4658 | int first_idx = st->first_insn_idx; |
| 4659 | int last_idx = env->insn_idx; |
| 4660 | int subseq_idx = -1; |
| 4661 | struct bpf_func_state *func; |
| 4662 | struct bpf_reg_state *reg; |
| 4663 | bool skip_first = true; |
| 4664 | int i, fr, err; |
| 4665 | |
| 4666 | if (!env->bpf_capable) |
| 4667 | return 0; |
| 4668 | |
| 4669 | /* set frame number from which we are starting to backtrack */ |
| 4670 | bt_init(bt, env->cur_state->curframe); |
| 4671 | |
| 4672 | /* Do sanity checks against current state of register and/or stack |
| 4673 | * slot, but don't set precise flag in current state, as precision |
| 4674 | * tracking in the current state is unnecessary. |
| 4675 | */ |
| 4676 | func = st->frame[bt->frame]; |
| 4677 | if (regno >= 0) { |
| 4678 | reg = &func->regs[regno]; |
| 4679 | if (reg->type != SCALAR_VALUE) { |
| 4680 | WARN_ONCE(1, "backtracing misuse"); |
| 4681 | return -EFAULT; |
| 4682 | } |
| 4683 | bt_set_reg(bt, regno); |
| 4684 | } |
| 4685 | |
| 4686 | if (bt_empty(bt)) |
| 4687 | return 0; |
| 4688 | |
| 4689 | for (;;) { |
| 4690 | DECLARE_BITMAP(mask, 64); |
| 4691 | u32 hist_start = st->insn_hist_start; |
| 4692 | u32 hist_end = st->insn_hist_end; |
| 4693 | struct bpf_insn_hist_entry *hist; |
| 4694 | |
| 4695 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4696 | verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", |
| 4697 | bt->frame, last_idx, first_idx, subseq_idx); |
| 4698 | } |
| 4699 | |
| 4700 | if (last_idx < 0) { |
| 4701 | /* we are at the entry into subprog, which |
| 4702 | * is expected for global funcs, but only if |
| 4703 | * requested precise registers are R1-R5 |
| 4704 | * (which are global func's input arguments) |
| 4705 | */ |
| 4706 | if (st->curframe == 0 && |
| 4707 | st->frame[0]->subprogno > 0 && |
| 4708 | st->frame[0]->callsite == BPF_MAIN_FUNC && |
| 4709 | bt_stack_mask(bt) == 0 && |
| 4710 | (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) { |
| 4711 | bitmap_from_u64(mask, bt_reg_mask(bt)); |
| 4712 | for_each_set_bit(i, mask, 32) { |
| 4713 | reg = &st->frame[0]->regs[i]; |
| 4714 | bt_clear_reg(bt, i); |
| 4715 | if (reg->type == SCALAR_VALUE) |
| 4716 | reg->precise = true; |
| 4717 | } |
| 4718 | return 0; |
| 4719 | } |
| 4720 | |
| 4721 | verifier_bug(env, "backtracking func entry subprog %d reg_mask %x stack_mask %llx", |
| 4722 | st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); |
| 4723 | return -EFAULT; |
| 4724 | } |
| 4725 | |
| 4726 | for (i = last_idx;;) { |
| 4727 | if (skip_first) { |
| 4728 | err = 0; |
| 4729 | skip_first = false; |
| 4730 | } else { |
| 4731 | hist = get_insn_hist_entry(env, hist_start, hist_end, i); |
| 4732 | err = backtrack_insn(env, i, subseq_idx, hist, bt); |
| 4733 | } |
| 4734 | if (err == -ENOTSUPP) { |
| 4735 | mark_all_scalars_precise(env, env->cur_state); |
| 4736 | bt_reset(bt); |
| 4737 | return 0; |
| 4738 | } else if (err) { |
| 4739 | return err; |
| 4740 | } |
| 4741 | if (bt_empty(bt)) |
| 4742 | /* Found assignment(s) into tracked register in this state. |
| 4743 | * Since this state is already marked, just return. |
| 4744 | * Nothing to be tracked further in the parent state. |
| 4745 | */ |
| 4746 | return 0; |
| 4747 | subseq_idx = i; |
| 4748 | i = get_prev_insn_idx(env, st, i, hist_start, &hist_end); |
| 4749 | if (i == -ENOENT) |
| 4750 | break; |
| 4751 | if (i >= env->prog->len) { |
| 4752 | /* This can happen if backtracking reached insn 0 |
| 4753 | * and there are still reg_mask or stack_mask |
| 4754 | * to backtrack. |
| 4755 | * It means the backtracking missed the spot where |
| 4756 | * particular register was initialized with a constant. |
| 4757 | */ |
| 4758 | verifier_bug(env, "backtracking idx %d", i); |
| 4759 | return -EFAULT; |
| 4760 | } |
| 4761 | } |
| 4762 | st = st->parent; |
| 4763 | if (!st) |
| 4764 | break; |
| 4765 | |
| 4766 | for (fr = bt->frame; fr >= 0; fr--) { |
| 4767 | func = st->frame[fr]; |
| 4768 | bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); |
| 4769 | for_each_set_bit(i, mask, 32) { |
| 4770 | reg = &func->regs[i]; |
| 4771 | if (reg->type != SCALAR_VALUE) { |
| 4772 | bt_clear_frame_reg(bt, fr, i); |
| 4773 | continue; |
| 4774 | } |
| 4775 | if (reg->precise) |
| 4776 | bt_clear_frame_reg(bt, fr, i); |
| 4777 | else |
| 4778 | reg->precise = true; |
| 4779 | } |
| 4780 | |
| 4781 | bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); |
| 4782 | for_each_set_bit(i, mask, 64) { |
| 4783 | if (verifier_bug_if(i >= func->allocated_stack / BPF_REG_SIZE, |
| 4784 | env, "stack slot %d, total slots %d", |
| 4785 | i, func->allocated_stack / BPF_REG_SIZE)) |
| 4786 | return -EFAULT; |
| 4787 | |
| 4788 | if (!is_spilled_scalar_reg(&func->stack[i])) { |
| 4789 | bt_clear_frame_slot(bt, fr, i); |
| 4790 | continue; |
| 4791 | } |
| 4792 | reg = &func->stack[i].spilled_ptr; |
| 4793 | if (reg->precise) |
| 4794 | bt_clear_frame_slot(bt, fr, i); |
| 4795 | else |
| 4796 | reg->precise = true; |
| 4797 | } |
| 4798 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 4799 | fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, |
| 4800 | bt_frame_reg_mask(bt, fr)); |
| 4801 | verbose(env, "mark_precise: frame%d: parent state regs=%s ", |
| 4802 | fr, env->tmp_str_buf); |
| 4803 | fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, |
| 4804 | bt_frame_stack_mask(bt, fr)); |
| 4805 | verbose(env, "stack=%s: ", env->tmp_str_buf); |
| 4806 | print_verifier_state(env, st, fr, true); |
| 4807 | } |
| 4808 | } |
| 4809 | |
| 4810 | if (bt_empty(bt)) |
| 4811 | return 0; |
| 4812 | |
| 4813 | subseq_idx = first_idx; |
| 4814 | last_idx = st->last_insn_idx; |
| 4815 | first_idx = st->first_insn_idx; |
| 4816 | } |
| 4817 | |
| 4818 | /* if we still have requested precise regs or slots, we missed |
| 4819 | * something (e.g., stack access through non-r10 register), so |
| 4820 | * fallback to marking all precise |
| 4821 | */ |
| 4822 | if (!bt_empty(bt)) { |
| 4823 | mark_all_scalars_precise(env, env->cur_state); |
| 4824 | bt_reset(bt); |
| 4825 | } |
| 4826 | |
| 4827 | return 0; |
| 4828 | } |
| 4829 | |
| 4830 | int mark_chain_precision(struct bpf_verifier_env *env, int regno) |
| 4831 | { |
| 4832 | return __mark_chain_precision(env, regno); |
| 4833 | } |
| 4834 | |
| 4835 | /* mark_chain_precision_batch() assumes that env->bt is set in the caller to |
| 4836 | * desired reg and stack masks across all relevant frames |
| 4837 | */ |
| 4838 | static int mark_chain_precision_batch(struct bpf_verifier_env *env) |
| 4839 | { |
| 4840 | return __mark_chain_precision(env, -1); |
| 4841 | } |
| 4842 | |
| 4843 | static bool is_spillable_regtype(enum bpf_reg_type type) |
| 4844 | { |
| 4845 | switch (base_type(type)) { |
| 4846 | case PTR_TO_MAP_VALUE: |
| 4847 | case PTR_TO_STACK: |
| 4848 | case PTR_TO_CTX: |
| 4849 | case PTR_TO_PACKET: |
| 4850 | case PTR_TO_PACKET_META: |
| 4851 | case PTR_TO_PACKET_END: |
| 4852 | case PTR_TO_FLOW_KEYS: |
| 4853 | case CONST_PTR_TO_MAP: |
| 4854 | case PTR_TO_SOCKET: |
| 4855 | case PTR_TO_SOCK_COMMON: |
| 4856 | case PTR_TO_TCP_SOCK: |
| 4857 | case PTR_TO_XDP_SOCK: |
| 4858 | case PTR_TO_BTF_ID: |
| 4859 | case PTR_TO_BUF: |
| 4860 | case PTR_TO_MEM: |
| 4861 | case PTR_TO_FUNC: |
| 4862 | case PTR_TO_MAP_KEY: |
| 4863 | case PTR_TO_ARENA: |
| 4864 | return true; |
| 4865 | default: |
| 4866 | return false; |
| 4867 | } |
| 4868 | } |
| 4869 | |
| 4870 | /* Does this register contain a constant zero? */ |
| 4871 | static bool register_is_null(struct bpf_reg_state *reg) |
| 4872 | { |
| 4873 | return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); |
| 4874 | } |
| 4875 | |
| 4876 | /* check if register is a constant scalar value */ |
| 4877 | static bool is_reg_const(struct bpf_reg_state *reg, bool subreg32) |
| 4878 | { |
| 4879 | return reg->type == SCALAR_VALUE && |
| 4880 | tnum_is_const(subreg32 ? tnum_subreg(reg->var_off) : reg->var_off); |
| 4881 | } |
| 4882 | |
| 4883 | /* assuming is_reg_const() is true, return constant value of a register */ |
| 4884 | static u64 reg_const_value(struct bpf_reg_state *reg, bool subreg32) |
| 4885 | { |
| 4886 | return subreg32 ? tnum_subreg(reg->var_off).value : reg->var_off.value; |
| 4887 | } |
| 4888 | |
| 4889 | static bool __is_pointer_value(bool allow_ptr_leaks, |
| 4890 | const struct bpf_reg_state *reg) |
| 4891 | { |
| 4892 | if (allow_ptr_leaks) |
| 4893 | return false; |
| 4894 | |
| 4895 | return reg->type != SCALAR_VALUE; |
| 4896 | } |
| 4897 | |
| 4898 | static void assign_scalar_id_before_mov(struct bpf_verifier_env *env, |
| 4899 | struct bpf_reg_state *src_reg) |
| 4900 | { |
| 4901 | if (src_reg->type != SCALAR_VALUE) |
| 4902 | return; |
| 4903 | |
| 4904 | if (src_reg->id & BPF_ADD_CONST) { |
| 4905 | /* |
| 4906 | * The verifier is processing rX = rY insn and |
| 4907 | * rY->id has special linked register already. |
| 4908 | * Cleared it, since multiple rX += const are not supported. |
| 4909 | */ |
| 4910 | src_reg->id = 0; |
| 4911 | src_reg->off = 0; |
| 4912 | } |
| 4913 | |
| 4914 | if (!src_reg->id && !tnum_is_const(src_reg->var_off)) |
| 4915 | /* Ensure that src_reg has a valid ID that will be copied to |
| 4916 | * dst_reg and then will be used by sync_linked_regs() to |
| 4917 | * propagate min/max range. |
| 4918 | */ |
| 4919 | src_reg->id = ++env->id_gen; |
| 4920 | } |
| 4921 | |
| 4922 | /* Copy src state preserving dst->parent and dst->live fields */ |
| 4923 | static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) |
| 4924 | { |
| 4925 | struct bpf_reg_state *parent = dst->parent; |
| 4926 | enum bpf_reg_liveness live = dst->live; |
| 4927 | |
| 4928 | *dst = *src; |
| 4929 | dst->parent = parent; |
| 4930 | dst->live = live; |
| 4931 | } |
| 4932 | |
| 4933 | static void save_register_state(struct bpf_verifier_env *env, |
| 4934 | struct bpf_func_state *state, |
| 4935 | int spi, struct bpf_reg_state *reg, |
| 4936 | int size) |
| 4937 | { |
| 4938 | int i; |
| 4939 | |
| 4940 | copy_register_state(&state->stack[spi].spilled_ptr, reg); |
| 4941 | if (size == BPF_REG_SIZE) |
| 4942 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 4943 | |
| 4944 | for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) |
| 4945 | state->stack[spi].slot_type[i - 1] = STACK_SPILL; |
| 4946 | |
| 4947 | /* size < 8 bytes spill */ |
| 4948 | for (; i; i--) |
| 4949 | mark_stack_slot_misc(env, &state->stack[spi].slot_type[i - 1]); |
| 4950 | } |
| 4951 | |
| 4952 | static bool is_bpf_st_mem(struct bpf_insn *insn) |
| 4953 | { |
| 4954 | return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; |
| 4955 | } |
| 4956 | |
| 4957 | static int get_reg_width(struct bpf_reg_state *reg) |
| 4958 | { |
| 4959 | return fls64(reg->umax_value); |
| 4960 | } |
| 4961 | |
| 4962 | /* See comment for mark_fastcall_pattern_for_call() */ |
| 4963 | static void check_fastcall_stack_contract(struct bpf_verifier_env *env, |
| 4964 | struct bpf_func_state *state, int insn_idx, int off) |
| 4965 | { |
| 4966 | struct bpf_subprog_info *subprog = &env->subprog_info[state->subprogno]; |
| 4967 | struct bpf_insn_aux_data *aux = env->insn_aux_data; |
| 4968 | int i; |
| 4969 | |
| 4970 | if (subprog->fastcall_stack_off <= off || aux[insn_idx].fastcall_pattern) |
| 4971 | return; |
| 4972 | /* access to the region [max_stack_depth .. fastcall_stack_off) |
| 4973 | * from something that is not a part of the fastcall pattern, |
| 4974 | * disable fastcall rewrites for current subprogram by setting |
| 4975 | * fastcall_stack_off to a value smaller than any possible offset. |
| 4976 | */ |
| 4977 | subprog->fastcall_stack_off = S16_MIN; |
| 4978 | /* reset fastcall aux flags within subprogram, |
| 4979 | * happens at most once per subprogram |
| 4980 | */ |
| 4981 | for (i = subprog->start; i < (subprog + 1)->start; ++i) { |
| 4982 | aux[i].fastcall_spills_num = 0; |
| 4983 | aux[i].fastcall_pattern = 0; |
| 4984 | } |
| 4985 | } |
| 4986 | |
| 4987 | /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, |
| 4988 | * stack boundary and alignment are checked in check_mem_access() |
| 4989 | */ |
| 4990 | static int check_stack_write_fixed_off(struct bpf_verifier_env *env, |
| 4991 | /* stack frame we're writing to */ |
| 4992 | struct bpf_func_state *state, |
| 4993 | int off, int size, int value_regno, |
| 4994 | int insn_idx) |
| 4995 | { |
| 4996 | struct bpf_func_state *cur; /* state of the current function */ |
| 4997 | int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; |
| 4998 | struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; |
| 4999 | struct bpf_reg_state *reg = NULL; |
| 5000 | int insn_flags = insn_stack_access_flags(state->frameno, spi); |
| 5001 | |
| 5002 | /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, |
| 5003 | * so it's aligned access and [off, off + size) are within stack limits |
| 5004 | */ |
| 5005 | if (!env->allow_ptr_leaks && |
| 5006 | is_spilled_reg(&state->stack[spi]) && |
| 5007 | !is_spilled_scalar_reg(&state->stack[spi]) && |
| 5008 | size != BPF_REG_SIZE) { |
| 5009 | verbose(env, "attempt to corrupt spilled pointer on stack\n"); |
| 5010 | return -EACCES; |
| 5011 | } |
| 5012 | |
| 5013 | cur = env->cur_state->frame[env->cur_state->curframe]; |
| 5014 | if (value_regno >= 0) |
| 5015 | reg = &cur->regs[value_regno]; |
| 5016 | if (!env->bypass_spec_v4) { |
| 5017 | bool sanitize = reg && is_spillable_regtype(reg->type); |
| 5018 | |
| 5019 | for (i = 0; i < size; i++) { |
| 5020 | u8 type = state->stack[spi].slot_type[i]; |
| 5021 | |
| 5022 | if (type != STACK_MISC && type != STACK_ZERO) { |
| 5023 | sanitize = true; |
| 5024 | break; |
| 5025 | } |
| 5026 | } |
| 5027 | |
| 5028 | if (sanitize) |
| 5029 | env->insn_aux_data[insn_idx].sanitize_stack_spill = true; |
| 5030 | } |
| 5031 | |
| 5032 | err = destroy_if_dynptr_stack_slot(env, state, spi); |
| 5033 | if (err) |
| 5034 | return err; |
| 5035 | |
| 5036 | check_fastcall_stack_contract(env, state, insn_idx, off); |
| 5037 | mark_stack_slot_scratched(env, spi); |
| 5038 | if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { |
| 5039 | bool reg_value_fits; |
| 5040 | |
| 5041 | reg_value_fits = get_reg_width(reg) <= BITS_PER_BYTE * size; |
| 5042 | /* Make sure that reg had an ID to build a relation on spill. */ |
| 5043 | if (reg_value_fits) |
| 5044 | assign_scalar_id_before_mov(env, reg); |
| 5045 | save_register_state(env, state, spi, reg, size); |
| 5046 | /* Break the relation on a narrowing spill. */ |
| 5047 | if (!reg_value_fits) |
| 5048 | state->stack[spi].spilled_ptr.id = 0; |
| 5049 | } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && |
| 5050 | env->bpf_capable) { |
| 5051 | struct bpf_reg_state *tmp_reg = &env->fake_reg[0]; |
| 5052 | |
| 5053 | memset(tmp_reg, 0, sizeof(*tmp_reg)); |
| 5054 | __mark_reg_known(tmp_reg, insn->imm); |
| 5055 | tmp_reg->type = SCALAR_VALUE; |
| 5056 | save_register_state(env, state, spi, tmp_reg, size); |
| 5057 | } else if (reg && is_spillable_regtype(reg->type)) { |
| 5058 | /* register containing pointer is being spilled into stack */ |
| 5059 | if (size != BPF_REG_SIZE) { |
| 5060 | verbose_linfo(env, insn_idx, "; "); |
| 5061 | verbose(env, "invalid size of register spill\n"); |
| 5062 | return -EACCES; |
| 5063 | } |
| 5064 | if (state != cur && reg->type == PTR_TO_STACK) { |
| 5065 | verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); |
| 5066 | return -EINVAL; |
| 5067 | } |
| 5068 | save_register_state(env, state, spi, reg, size); |
| 5069 | } else { |
| 5070 | u8 type = STACK_MISC; |
| 5071 | |
| 5072 | /* regular write of data into stack destroys any spilled ptr */ |
| 5073 | state->stack[spi].spilled_ptr.type = NOT_INIT; |
| 5074 | /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */ |
| 5075 | if (is_stack_slot_special(&state->stack[spi])) |
| 5076 | for (i = 0; i < BPF_REG_SIZE; i++) |
| 5077 | scrub_spilled_slot(&state->stack[spi].slot_type[i]); |
| 5078 | |
| 5079 | /* only mark the slot as written if all 8 bytes were written |
| 5080 | * otherwise read propagation may incorrectly stop too soon |
| 5081 | * when stack slots are partially written. |
| 5082 | * This heuristic means that read propagation will be |
| 5083 | * conservative, since it will add reg_live_read marks |
| 5084 | * to stack slots all the way to first state when programs |
| 5085 | * writes+reads less than 8 bytes |
| 5086 | */ |
| 5087 | if (size == BPF_REG_SIZE) |
| 5088 | state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; |
| 5089 | |
| 5090 | /* when we zero initialize stack slots mark them as such */ |
| 5091 | if ((reg && register_is_null(reg)) || |
| 5092 | (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { |
| 5093 | /* STACK_ZERO case happened because register spill |
| 5094 | * wasn't properly aligned at the stack slot boundary, |
| 5095 | * so it's not a register spill anymore; force |
| 5096 | * originating register to be precise to make |
| 5097 | * STACK_ZERO correct for subsequent states |
| 5098 | */ |
| 5099 | err = mark_chain_precision(env, value_regno); |
| 5100 | if (err) |
| 5101 | return err; |
| 5102 | type = STACK_ZERO; |
| 5103 | } |
| 5104 | |
| 5105 | /* Mark slots affected by this stack write. */ |
| 5106 | for (i = 0; i < size; i++) |
| 5107 | state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; |
| 5108 | insn_flags = 0; /* not a register spill */ |
| 5109 | } |
| 5110 | |
| 5111 | if (insn_flags) |
| 5112 | return push_insn_history(env, env->cur_state, insn_flags, 0); |
| 5113 | return 0; |
| 5114 | } |
| 5115 | |
| 5116 | /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is |
| 5117 | * known to contain a variable offset. |
| 5118 | * This function checks whether the write is permitted and conservatively |
| 5119 | * tracks the effects of the write, considering that each stack slot in the |
| 5120 | * dynamic range is potentially written to. |
| 5121 | * |
| 5122 | * 'off' includes 'regno->off'. |
| 5123 | * 'value_regno' can be -1, meaning that an unknown value is being written to |
| 5124 | * the stack. |
| 5125 | * |
| 5126 | * Spilled pointers in range are not marked as written because we don't know |
| 5127 | * what's going to be actually written. This means that read propagation for |
| 5128 | * future reads cannot be terminated by this write. |
| 5129 | * |
| 5130 | * For privileged programs, uninitialized stack slots are considered |
| 5131 | * initialized by this write (even though we don't know exactly what offsets |
| 5132 | * are going to be written to). The idea is that we don't want the verifier to |
| 5133 | * reject future reads that access slots written to through variable offsets. |
| 5134 | */ |
| 5135 | static int check_stack_write_var_off(struct bpf_verifier_env *env, |
| 5136 | /* func where register points to */ |
| 5137 | struct bpf_func_state *state, |
| 5138 | int ptr_regno, int off, int size, |
| 5139 | int value_regno, int insn_idx) |
| 5140 | { |
| 5141 | struct bpf_func_state *cur; /* state of the current function */ |
| 5142 | int min_off, max_off; |
| 5143 | int i, err; |
| 5144 | struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; |
| 5145 | struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; |
| 5146 | bool writing_zero = false; |
| 5147 | /* set if the fact that we're writing a zero is used to let any |
| 5148 | * stack slots remain STACK_ZERO |
| 5149 | */ |
| 5150 | bool zero_used = false; |
| 5151 | |
| 5152 | cur = env->cur_state->frame[env->cur_state->curframe]; |
| 5153 | ptr_reg = &cur->regs[ptr_regno]; |
| 5154 | min_off = ptr_reg->smin_value + off; |
| 5155 | max_off = ptr_reg->smax_value + off + size; |
| 5156 | if (value_regno >= 0) |
| 5157 | value_reg = &cur->regs[value_regno]; |
| 5158 | if ((value_reg && register_is_null(value_reg)) || |
| 5159 | (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) |
| 5160 | writing_zero = true; |
| 5161 | |
| 5162 | for (i = min_off; i < max_off; i++) { |
| 5163 | int spi; |
| 5164 | |
| 5165 | spi = __get_spi(i); |
| 5166 | err = destroy_if_dynptr_stack_slot(env, state, spi); |
| 5167 | if (err) |
| 5168 | return err; |
| 5169 | } |
| 5170 | |
| 5171 | check_fastcall_stack_contract(env, state, insn_idx, min_off); |
| 5172 | /* Variable offset writes destroy any spilled pointers in range. */ |
| 5173 | for (i = min_off; i < max_off; i++) { |
| 5174 | u8 new_type, *stype; |
| 5175 | int slot, spi; |
| 5176 | |
| 5177 | slot = -i - 1; |
| 5178 | spi = slot / BPF_REG_SIZE; |
| 5179 | stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; |
| 5180 | mark_stack_slot_scratched(env, spi); |
| 5181 | |
| 5182 | if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { |
| 5183 | /* Reject the write if range we may write to has not |
| 5184 | * been initialized beforehand. If we didn't reject |
| 5185 | * here, the ptr status would be erased below (even |
| 5186 | * though not all slots are actually overwritten), |
| 5187 | * possibly opening the door to leaks. |
| 5188 | * |
| 5189 | * We do however catch STACK_INVALID case below, and |
| 5190 | * only allow reading possibly uninitialized memory |
| 5191 | * later for CAP_PERFMON, as the write may not happen to |
| 5192 | * that slot. |
| 5193 | */ |
| 5194 | verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", |
| 5195 | insn_idx, i); |
| 5196 | return -EINVAL; |
| 5197 | } |
| 5198 | |
| 5199 | /* If writing_zero and the spi slot contains a spill of value 0, |
| 5200 | * maintain the spill type. |
| 5201 | */ |
| 5202 | if (writing_zero && *stype == STACK_SPILL && |
| 5203 | is_spilled_scalar_reg(&state->stack[spi])) { |
| 5204 | struct bpf_reg_state *spill_reg = &state->stack[spi].spilled_ptr; |
| 5205 | |
| 5206 | if (tnum_is_const(spill_reg->var_off) && spill_reg->var_off.value == 0) { |
| 5207 | zero_used = true; |
| 5208 | continue; |
| 5209 | } |
| 5210 | } |
| 5211 | |
| 5212 | /* Erase all other spilled pointers. */ |
| 5213 | state->stack[spi].spilled_ptr.type = NOT_INIT; |
| 5214 | |
| 5215 | /* Update the slot type. */ |
| 5216 | new_type = STACK_MISC; |
| 5217 | if (writing_zero && *stype == STACK_ZERO) { |
| 5218 | new_type = STACK_ZERO; |
| 5219 | zero_used = true; |
| 5220 | } |
| 5221 | /* If the slot is STACK_INVALID, we check whether it's OK to |
| 5222 | * pretend that it will be initialized by this write. The slot |
| 5223 | * might not actually be written to, and so if we mark it as |
| 5224 | * initialized future reads might leak uninitialized memory. |
| 5225 | * For privileged programs, we will accept such reads to slots |
| 5226 | * that may or may not be written because, if we're reject |
| 5227 | * them, the error would be too confusing. |
| 5228 | */ |
| 5229 | if (*stype == STACK_INVALID && !env->allow_uninit_stack) { |
| 5230 | verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", |
| 5231 | insn_idx, i); |
| 5232 | return -EINVAL; |
| 5233 | } |
| 5234 | *stype = new_type; |
| 5235 | } |
| 5236 | if (zero_used) { |
| 5237 | /* backtracking doesn't work for STACK_ZERO yet. */ |
| 5238 | err = mark_chain_precision(env, value_regno); |
| 5239 | if (err) |
| 5240 | return err; |
| 5241 | } |
| 5242 | return 0; |
| 5243 | } |
| 5244 | |
| 5245 | /* When register 'dst_regno' is assigned some values from stack[min_off, |
| 5246 | * max_off), we set the register's type according to the types of the |
| 5247 | * respective stack slots. If all the stack values are known to be zeros, then |
| 5248 | * so is the destination reg. Otherwise, the register is considered to be |
| 5249 | * SCALAR. This function does not deal with register filling; the caller must |
| 5250 | * ensure that all spilled registers in the stack range have been marked as |
| 5251 | * read. |
| 5252 | */ |
| 5253 | static void mark_reg_stack_read(struct bpf_verifier_env *env, |
| 5254 | /* func where src register points to */ |
| 5255 | struct bpf_func_state *ptr_state, |
| 5256 | int min_off, int max_off, int dst_regno) |
| 5257 | { |
| 5258 | struct bpf_verifier_state *vstate = env->cur_state; |
| 5259 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 5260 | int i, slot, spi; |
| 5261 | u8 *stype; |
| 5262 | int zeros = 0; |
| 5263 | |
| 5264 | for (i = min_off; i < max_off; i++) { |
| 5265 | slot = -i - 1; |
| 5266 | spi = slot / BPF_REG_SIZE; |
| 5267 | mark_stack_slot_scratched(env, spi); |
| 5268 | stype = ptr_state->stack[spi].slot_type; |
| 5269 | if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) |
| 5270 | break; |
| 5271 | zeros++; |
| 5272 | } |
| 5273 | if (zeros == max_off - min_off) { |
| 5274 | /* Any access_size read into register is zero extended, |
| 5275 | * so the whole register == const_zero. |
| 5276 | */ |
| 5277 | __mark_reg_const_zero(env, &state->regs[dst_regno]); |
| 5278 | } else { |
| 5279 | /* have read misc data from the stack */ |
| 5280 | mark_reg_unknown(env, state->regs, dst_regno); |
| 5281 | } |
| 5282 | state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
| 5283 | } |
| 5284 | |
| 5285 | /* Read the stack at 'off' and put the results into the register indicated by |
| 5286 | * 'dst_regno'. It handles reg filling if the addressed stack slot is a |
| 5287 | * spilled reg. |
| 5288 | * |
| 5289 | * 'dst_regno' can be -1, meaning that the read value is not going to a |
| 5290 | * register. |
| 5291 | * |
| 5292 | * The access is assumed to be within the current stack bounds. |
| 5293 | */ |
| 5294 | static int check_stack_read_fixed_off(struct bpf_verifier_env *env, |
| 5295 | /* func where src register points to */ |
| 5296 | struct bpf_func_state *reg_state, |
| 5297 | int off, int size, int dst_regno) |
| 5298 | { |
| 5299 | struct bpf_verifier_state *vstate = env->cur_state; |
| 5300 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 5301 | int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; |
| 5302 | struct bpf_reg_state *reg; |
| 5303 | u8 *stype, type; |
| 5304 | int insn_flags = insn_stack_access_flags(reg_state->frameno, spi); |
| 5305 | |
| 5306 | stype = reg_state->stack[spi].slot_type; |
| 5307 | reg = ®_state->stack[spi].spilled_ptr; |
| 5308 | |
| 5309 | mark_stack_slot_scratched(env, spi); |
| 5310 | check_fastcall_stack_contract(env, state, env->insn_idx, off); |
| 5311 | |
| 5312 | if (is_spilled_reg(®_state->stack[spi])) { |
| 5313 | u8 spill_size = 1; |
| 5314 | |
| 5315 | for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) |
| 5316 | spill_size++; |
| 5317 | |
| 5318 | if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { |
| 5319 | if (reg->type != SCALAR_VALUE) { |
| 5320 | verbose_linfo(env, env->insn_idx, "; "); |
| 5321 | verbose(env, "invalid size of register fill\n"); |
| 5322 | return -EACCES; |
| 5323 | } |
| 5324 | |
| 5325 | mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
| 5326 | if (dst_regno < 0) |
| 5327 | return 0; |
| 5328 | |
| 5329 | if (size <= spill_size && |
| 5330 | bpf_stack_narrow_access_ok(off, size, spill_size)) { |
| 5331 | /* The earlier check_reg_arg() has decided the |
| 5332 | * subreg_def for this insn. Save it first. |
| 5333 | */ |
| 5334 | s32 subreg_def = state->regs[dst_regno].subreg_def; |
| 5335 | |
| 5336 | copy_register_state(&state->regs[dst_regno], reg); |
| 5337 | state->regs[dst_regno].subreg_def = subreg_def; |
| 5338 | |
| 5339 | /* Break the relation on a narrowing fill. |
| 5340 | * coerce_reg_to_size will adjust the boundaries. |
| 5341 | */ |
| 5342 | if (get_reg_width(reg) > size * BITS_PER_BYTE) |
| 5343 | state->regs[dst_regno].id = 0; |
| 5344 | } else { |
| 5345 | int spill_cnt = 0, zero_cnt = 0; |
| 5346 | |
| 5347 | for (i = 0; i < size; i++) { |
| 5348 | type = stype[(slot - i) % BPF_REG_SIZE]; |
| 5349 | if (type == STACK_SPILL) { |
| 5350 | spill_cnt++; |
| 5351 | continue; |
| 5352 | } |
| 5353 | if (type == STACK_MISC) |
| 5354 | continue; |
| 5355 | if (type == STACK_ZERO) { |
| 5356 | zero_cnt++; |
| 5357 | continue; |
| 5358 | } |
| 5359 | if (type == STACK_INVALID && env->allow_uninit_stack) |
| 5360 | continue; |
| 5361 | verbose(env, "invalid read from stack off %d+%d size %d\n", |
| 5362 | off, i, size); |
| 5363 | return -EACCES; |
| 5364 | } |
| 5365 | |
| 5366 | if (spill_cnt == size && |
| 5367 | tnum_is_const(reg->var_off) && reg->var_off.value == 0) { |
| 5368 | __mark_reg_const_zero(env, &state->regs[dst_regno]); |
| 5369 | /* this IS register fill, so keep insn_flags */ |
| 5370 | } else if (zero_cnt == size) { |
| 5371 | /* similarly to mark_reg_stack_read(), preserve zeroes */ |
| 5372 | __mark_reg_const_zero(env, &state->regs[dst_regno]); |
| 5373 | insn_flags = 0; /* not restoring original register state */ |
| 5374 | } else { |
| 5375 | mark_reg_unknown(env, state->regs, dst_regno); |
| 5376 | insn_flags = 0; /* not restoring original register state */ |
| 5377 | } |
| 5378 | } |
| 5379 | state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
| 5380 | } else if (dst_regno >= 0) { |
| 5381 | /* restore register state from stack */ |
| 5382 | copy_register_state(&state->regs[dst_regno], reg); |
| 5383 | /* mark reg as written since spilled pointer state likely |
| 5384 | * has its liveness marks cleared by is_state_visited() |
| 5385 | * which resets stack/reg liveness for state transitions |
| 5386 | */ |
| 5387 | state->regs[dst_regno].live |= REG_LIVE_WRITTEN; |
| 5388 | } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { |
| 5389 | /* If dst_regno==-1, the caller is asking us whether |
| 5390 | * it is acceptable to use this value as a SCALAR_VALUE |
| 5391 | * (e.g. for XADD). |
| 5392 | * We must not allow unprivileged callers to do that |
| 5393 | * with spilled pointers. |
| 5394 | */ |
| 5395 | verbose(env, "leaking pointer from stack off %d\n", |
| 5396 | off); |
| 5397 | return -EACCES; |
| 5398 | } |
| 5399 | mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
| 5400 | } else { |
| 5401 | for (i = 0; i < size; i++) { |
| 5402 | type = stype[(slot - i) % BPF_REG_SIZE]; |
| 5403 | if (type == STACK_MISC) |
| 5404 | continue; |
| 5405 | if (type == STACK_ZERO) |
| 5406 | continue; |
| 5407 | if (type == STACK_INVALID && env->allow_uninit_stack) |
| 5408 | continue; |
| 5409 | verbose(env, "invalid read from stack off %d+%d size %d\n", |
| 5410 | off, i, size); |
| 5411 | return -EACCES; |
| 5412 | } |
| 5413 | mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
| 5414 | if (dst_regno >= 0) |
| 5415 | mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); |
| 5416 | insn_flags = 0; /* we are not restoring spilled register */ |
| 5417 | } |
| 5418 | if (insn_flags) |
| 5419 | return push_insn_history(env, env->cur_state, insn_flags, 0); |
| 5420 | return 0; |
| 5421 | } |
| 5422 | |
| 5423 | enum bpf_access_src { |
| 5424 | ACCESS_DIRECT = 1, /* the access is performed by an instruction */ |
| 5425 | ACCESS_HELPER = 2, /* the access is performed by a helper */ |
| 5426 | }; |
| 5427 | |
| 5428 | static int check_stack_range_initialized(struct bpf_verifier_env *env, |
| 5429 | int regno, int off, int access_size, |
| 5430 | bool zero_size_allowed, |
| 5431 | enum bpf_access_type type, |
| 5432 | struct bpf_call_arg_meta *meta); |
| 5433 | |
| 5434 | static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) |
| 5435 | { |
| 5436 | return cur_regs(env) + regno; |
| 5437 | } |
| 5438 | |
| 5439 | /* Read the stack at 'ptr_regno + off' and put the result into the register |
| 5440 | * 'dst_regno'. |
| 5441 | * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), |
| 5442 | * but not its variable offset. |
| 5443 | * 'size' is assumed to be <= reg size and the access is assumed to be aligned. |
| 5444 | * |
| 5445 | * As opposed to check_stack_read_fixed_off, this function doesn't deal with |
| 5446 | * filling registers (i.e. reads of spilled register cannot be detected when |
| 5447 | * the offset is not fixed). We conservatively mark 'dst_regno' as containing |
| 5448 | * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable |
| 5449 | * offset; for a fixed offset check_stack_read_fixed_off should be used |
| 5450 | * instead. |
| 5451 | */ |
| 5452 | static int check_stack_read_var_off(struct bpf_verifier_env *env, |
| 5453 | int ptr_regno, int off, int size, int dst_regno) |
| 5454 | { |
| 5455 | /* The state of the source register. */ |
| 5456 | struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
| 5457 | struct bpf_func_state *ptr_state = func(env, reg); |
| 5458 | int err; |
| 5459 | int min_off, max_off; |
| 5460 | |
| 5461 | /* Note that we pass a NULL meta, so raw access will not be permitted. |
| 5462 | */ |
| 5463 | err = check_stack_range_initialized(env, ptr_regno, off, size, |
| 5464 | false, BPF_READ, NULL); |
| 5465 | if (err) |
| 5466 | return err; |
| 5467 | |
| 5468 | min_off = reg->smin_value + off; |
| 5469 | max_off = reg->smax_value + off; |
| 5470 | mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); |
| 5471 | check_fastcall_stack_contract(env, ptr_state, env->insn_idx, min_off); |
| 5472 | return 0; |
| 5473 | } |
| 5474 | |
| 5475 | /* check_stack_read dispatches to check_stack_read_fixed_off or |
| 5476 | * check_stack_read_var_off. |
| 5477 | * |
| 5478 | * The caller must ensure that the offset falls within the allocated stack |
| 5479 | * bounds. |
| 5480 | * |
| 5481 | * 'dst_regno' is a register which will receive the value from the stack. It |
| 5482 | * can be -1, meaning that the read value is not going to a register. |
| 5483 | */ |
| 5484 | static int check_stack_read(struct bpf_verifier_env *env, |
| 5485 | int ptr_regno, int off, int size, |
| 5486 | int dst_regno) |
| 5487 | { |
| 5488 | struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
| 5489 | struct bpf_func_state *state = func(env, reg); |
| 5490 | int err; |
| 5491 | /* Some accesses are only permitted with a static offset. */ |
| 5492 | bool var_off = !tnum_is_const(reg->var_off); |
| 5493 | |
| 5494 | /* The offset is required to be static when reads don't go to a |
| 5495 | * register, in order to not leak pointers (see |
| 5496 | * check_stack_read_fixed_off). |
| 5497 | */ |
| 5498 | if (dst_regno < 0 && var_off) { |
| 5499 | char tn_buf[48]; |
| 5500 | |
| 5501 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 5502 | verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", |
| 5503 | tn_buf, off, size); |
| 5504 | return -EACCES; |
| 5505 | } |
| 5506 | /* Variable offset is prohibited for unprivileged mode for simplicity |
| 5507 | * since it requires corresponding support in Spectre masking for stack |
| 5508 | * ALU. See also retrieve_ptr_limit(). The check in |
| 5509 | * check_stack_access_for_ptr_arithmetic() called by |
| 5510 | * adjust_ptr_min_max_vals() prevents users from creating stack pointers |
| 5511 | * with variable offsets, therefore no check is required here. Further, |
| 5512 | * just checking it here would be insufficient as speculative stack |
| 5513 | * writes could still lead to unsafe speculative behaviour. |
| 5514 | */ |
| 5515 | if (!var_off) { |
| 5516 | off += reg->var_off.value; |
| 5517 | err = check_stack_read_fixed_off(env, state, off, size, |
| 5518 | dst_regno); |
| 5519 | } else { |
| 5520 | /* Variable offset stack reads need more conservative handling |
| 5521 | * than fixed offset ones. Note that dst_regno >= 0 on this |
| 5522 | * branch. |
| 5523 | */ |
| 5524 | err = check_stack_read_var_off(env, ptr_regno, off, size, |
| 5525 | dst_regno); |
| 5526 | } |
| 5527 | return err; |
| 5528 | } |
| 5529 | |
| 5530 | |
| 5531 | /* check_stack_write dispatches to check_stack_write_fixed_off or |
| 5532 | * check_stack_write_var_off. |
| 5533 | * |
| 5534 | * 'ptr_regno' is the register used as a pointer into the stack. |
| 5535 | * 'off' includes 'ptr_regno->off', but not its variable offset (if any). |
| 5536 | * 'value_regno' is the register whose value we're writing to the stack. It can |
| 5537 | * be -1, meaning that we're not writing from a register. |
| 5538 | * |
| 5539 | * The caller must ensure that the offset falls within the maximum stack size. |
| 5540 | */ |
| 5541 | static int check_stack_write(struct bpf_verifier_env *env, |
| 5542 | int ptr_regno, int off, int size, |
| 5543 | int value_regno, int insn_idx) |
| 5544 | { |
| 5545 | struct bpf_reg_state *reg = reg_state(env, ptr_regno); |
| 5546 | struct bpf_func_state *state = func(env, reg); |
| 5547 | int err; |
| 5548 | |
| 5549 | if (tnum_is_const(reg->var_off)) { |
| 5550 | off += reg->var_off.value; |
| 5551 | err = check_stack_write_fixed_off(env, state, off, size, |
| 5552 | value_regno, insn_idx); |
| 5553 | } else { |
| 5554 | /* Variable offset stack reads need more conservative handling |
| 5555 | * than fixed offset ones. |
| 5556 | */ |
| 5557 | err = check_stack_write_var_off(env, state, |
| 5558 | ptr_regno, off, size, |
| 5559 | value_regno, insn_idx); |
| 5560 | } |
| 5561 | return err; |
| 5562 | } |
| 5563 | |
| 5564 | static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, |
| 5565 | int off, int size, enum bpf_access_type type) |
| 5566 | { |
| 5567 | struct bpf_reg_state *regs = cur_regs(env); |
| 5568 | struct bpf_map *map = regs[regno].map_ptr; |
| 5569 | u32 cap = bpf_map_flags_to_cap(map); |
| 5570 | |
| 5571 | if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { |
| 5572 | verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", |
| 5573 | map->value_size, off, size); |
| 5574 | return -EACCES; |
| 5575 | } |
| 5576 | |
| 5577 | if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { |
| 5578 | verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", |
| 5579 | map->value_size, off, size); |
| 5580 | return -EACCES; |
| 5581 | } |
| 5582 | |
| 5583 | return 0; |
| 5584 | } |
| 5585 | |
| 5586 | /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ |
| 5587 | static int __check_mem_access(struct bpf_verifier_env *env, int regno, |
| 5588 | int off, int size, u32 mem_size, |
| 5589 | bool zero_size_allowed) |
| 5590 | { |
| 5591 | bool size_ok = size > 0 || (size == 0 && zero_size_allowed); |
| 5592 | struct bpf_reg_state *reg; |
| 5593 | |
| 5594 | if (off >= 0 && size_ok && (u64)off + size <= mem_size) |
| 5595 | return 0; |
| 5596 | |
| 5597 | reg = &cur_regs(env)[regno]; |
| 5598 | switch (reg->type) { |
| 5599 | case PTR_TO_MAP_KEY: |
| 5600 | verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", |
| 5601 | mem_size, off, size); |
| 5602 | break; |
| 5603 | case PTR_TO_MAP_VALUE: |
| 5604 | verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", |
| 5605 | mem_size, off, size); |
| 5606 | break; |
| 5607 | case PTR_TO_PACKET: |
| 5608 | case PTR_TO_PACKET_META: |
| 5609 | case PTR_TO_PACKET_END: |
| 5610 | verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", |
| 5611 | off, size, regno, reg->id, off, mem_size); |
| 5612 | break; |
| 5613 | case PTR_TO_MEM: |
| 5614 | default: |
| 5615 | verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", |
| 5616 | mem_size, off, size); |
| 5617 | } |
| 5618 | |
| 5619 | return -EACCES; |
| 5620 | } |
| 5621 | |
| 5622 | /* check read/write into a memory region with possible variable offset */ |
| 5623 | static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, |
| 5624 | int off, int size, u32 mem_size, |
| 5625 | bool zero_size_allowed) |
| 5626 | { |
| 5627 | struct bpf_verifier_state *vstate = env->cur_state; |
| 5628 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 5629 | struct bpf_reg_state *reg = &state->regs[regno]; |
| 5630 | int err; |
| 5631 | |
| 5632 | /* We may have adjusted the register pointing to memory region, so we |
| 5633 | * need to try adding each of min_value and max_value to off |
| 5634 | * to make sure our theoretical access will be safe. |
| 5635 | * |
| 5636 | * The minimum value is only important with signed |
| 5637 | * comparisons where we can't assume the floor of a |
| 5638 | * value is 0. If we are using signed variables for our |
| 5639 | * index'es we need to make sure that whatever we use |
| 5640 | * will have a set floor within our range. |
| 5641 | */ |
| 5642 | if (reg->smin_value < 0 && |
| 5643 | (reg->smin_value == S64_MIN || |
| 5644 | (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || |
| 5645 | reg->smin_value + off < 0)) { |
| 5646 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
| 5647 | regno); |
| 5648 | return -EACCES; |
| 5649 | } |
| 5650 | err = __check_mem_access(env, regno, reg->smin_value + off, size, |
| 5651 | mem_size, zero_size_allowed); |
| 5652 | if (err) { |
| 5653 | verbose(env, "R%d min value is outside of the allowed memory range\n", |
| 5654 | regno); |
| 5655 | return err; |
| 5656 | } |
| 5657 | |
| 5658 | /* If we haven't set a max value then we need to bail since we can't be |
| 5659 | * sure we won't do bad things. |
| 5660 | * If reg->umax_value + off could overflow, treat that as unbounded too. |
| 5661 | */ |
| 5662 | if (reg->umax_value >= BPF_MAX_VAR_OFF) { |
| 5663 | verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", |
| 5664 | regno); |
| 5665 | return -EACCES; |
| 5666 | } |
| 5667 | err = __check_mem_access(env, regno, reg->umax_value + off, size, |
| 5668 | mem_size, zero_size_allowed); |
| 5669 | if (err) { |
| 5670 | verbose(env, "R%d max value is outside of the allowed memory range\n", |
| 5671 | regno); |
| 5672 | return err; |
| 5673 | } |
| 5674 | |
| 5675 | return 0; |
| 5676 | } |
| 5677 | |
| 5678 | static int __check_ptr_off_reg(struct bpf_verifier_env *env, |
| 5679 | const struct bpf_reg_state *reg, int regno, |
| 5680 | bool fixed_off_ok) |
| 5681 | { |
| 5682 | /* Access to this pointer-typed register or passing it to a helper |
| 5683 | * is only allowed in its original, unmodified form. |
| 5684 | */ |
| 5685 | |
| 5686 | if (reg->off < 0) { |
| 5687 | verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", |
| 5688 | reg_type_str(env, reg->type), regno, reg->off); |
| 5689 | return -EACCES; |
| 5690 | } |
| 5691 | |
| 5692 | if (!fixed_off_ok && reg->off) { |
| 5693 | verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", |
| 5694 | reg_type_str(env, reg->type), regno, reg->off); |
| 5695 | return -EACCES; |
| 5696 | } |
| 5697 | |
| 5698 | if (!tnum_is_const(reg->var_off) || reg->var_off.value) { |
| 5699 | char tn_buf[48]; |
| 5700 | |
| 5701 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 5702 | verbose(env, "variable %s access var_off=%s disallowed\n", |
| 5703 | reg_type_str(env, reg->type), tn_buf); |
| 5704 | return -EACCES; |
| 5705 | } |
| 5706 | |
| 5707 | return 0; |
| 5708 | } |
| 5709 | |
| 5710 | static int check_ptr_off_reg(struct bpf_verifier_env *env, |
| 5711 | const struct bpf_reg_state *reg, int regno) |
| 5712 | { |
| 5713 | return __check_ptr_off_reg(env, reg, regno, false); |
| 5714 | } |
| 5715 | |
| 5716 | static int map_kptr_match_type(struct bpf_verifier_env *env, |
| 5717 | struct btf_field *kptr_field, |
| 5718 | struct bpf_reg_state *reg, u32 regno) |
| 5719 | { |
| 5720 | const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); |
| 5721 | int perm_flags; |
| 5722 | const char *reg_name = ""; |
| 5723 | |
| 5724 | if (btf_is_kernel(reg->btf)) { |
| 5725 | perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; |
| 5726 | |
| 5727 | /* Only unreferenced case accepts untrusted pointers */ |
| 5728 | if (kptr_field->type == BPF_KPTR_UNREF) |
| 5729 | perm_flags |= PTR_UNTRUSTED; |
| 5730 | } else { |
| 5731 | perm_flags = PTR_MAYBE_NULL | MEM_ALLOC; |
| 5732 | if (kptr_field->type == BPF_KPTR_PERCPU) |
| 5733 | perm_flags |= MEM_PERCPU; |
| 5734 | } |
| 5735 | |
| 5736 | if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) |
| 5737 | goto bad_type; |
| 5738 | |
| 5739 | /* We need to verify reg->type and reg->btf, before accessing reg->btf */ |
| 5740 | reg_name = btf_type_name(reg->btf, reg->btf_id); |
| 5741 | |
| 5742 | /* For ref_ptr case, release function check should ensure we get one |
| 5743 | * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the |
| 5744 | * normal store of unreferenced kptr, we must ensure var_off is zero. |
| 5745 | * Since ref_ptr cannot be accessed directly by BPF insns, checks for |
| 5746 | * reg->off and reg->ref_obj_id are not needed here. |
| 5747 | */ |
| 5748 | if (__check_ptr_off_reg(env, reg, regno, true)) |
| 5749 | return -EACCES; |
| 5750 | |
| 5751 | /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and |
| 5752 | * we also need to take into account the reg->off. |
| 5753 | * |
| 5754 | * We want to support cases like: |
| 5755 | * |
| 5756 | * struct foo { |
| 5757 | * struct bar br; |
| 5758 | * struct baz bz; |
| 5759 | * }; |
| 5760 | * |
| 5761 | * struct foo *v; |
| 5762 | * v = func(); // PTR_TO_BTF_ID |
| 5763 | * val->foo = v; // reg->off is zero, btf and btf_id match type |
| 5764 | * val->bar = &v->br; // reg->off is still zero, but we need to retry with |
| 5765 | * // first member type of struct after comparison fails |
| 5766 | * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked |
| 5767 | * // to match type |
| 5768 | * |
| 5769 | * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off |
| 5770 | * is zero. We must also ensure that btf_struct_ids_match does not walk |
| 5771 | * the struct to match type against first member of struct, i.e. reject |
| 5772 | * second case from above. Hence, when type is BPF_KPTR_REF, we set |
| 5773 | * strict mode to true for type match. |
| 5774 | */ |
| 5775 | if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, |
| 5776 | kptr_field->kptr.btf, kptr_field->kptr.btf_id, |
| 5777 | kptr_field->type != BPF_KPTR_UNREF)) |
| 5778 | goto bad_type; |
| 5779 | return 0; |
| 5780 | bad_type: |
| 5781 | verbose(env, "invalid kptr access, R%d type=%s%s ", regno, |
| 5782 | reg_type_str(env, reg->type), reg_name); |
| 5783 | verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); |
| 5784 | if (kptr_field->type == BPF_KPTR_UNREF) |
| 5785 | verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), |
| 5786 | targ_name); |
| 5787 | else |
| 5788 | verbose(env, "\n"); |
| 5789 | return -EINVAL; |
| 5790 | } |
| 5791 | |
| 5792 | static bool in_sleepable(struct bpf_verifier_env *env) |
| 5793 | { |
| 5794 | return env->prog->sleepable || |
| 5795 | (env->cur_state && env->cur_state->in_sleepable); |
| 5796 | } |
| 5797 | |
| 5798 | /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() |
| 5799 | * can dereference RCU protected pointers and result is PTR_TRUSTED. |
| 5800 | */ |
| 5801 | static bool in_rcu_cs(struct bpf_verifier_env *env) |
| 5802 | { |
| 5803 | return env->cur_state->active_rcu_lock || |
| 5804 | env->cur_state->active_locks || |
| 5805 | !in_sleepable(env); |
| 5806 | } |
| 5807 | |
| 5808 | /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ |
| 5809 | BTF_SET_START(rcu_protected_types) |
| 5810 | #ifdef CONFIG_NET |
| 5811 | BTF_ID(struct, prog_test_ref_kfunc) |
| 5812 | #endif |
| 5813 | #ifdef CONFIG_CGROUPS |
| 5814 | BTF_ID(struct, cgroup) |
| 5815 | #endif |
| 5816 | #ifdef CONFIG_BPF_JIT |
| 5817 | BTF_ID(struct, bpf_cpumask) |
| 5818 | #endif |
| 5819 | BTF_ID(struct, task_struct) |
| 5820 | #ifdef CONFIG_CRYPTO |
| 5821 | BTF_ID(struct, bpf_crypto_ctx) |
| 5822 | #endif |
| 5823 | BTF_SET_END(rcu_protected_types) |
| 5824 | |
| 5825 | static bool rcu_protected_object(const struct btf *btf, u32 btf_id) |
| 5826 | { |
| 5827 | if (!btf_is_kernel(btf)) |
| 5828 | return true; |
| 5829 | return btf_id_set_contains(&rcu_protected_types, btf_id); |
| 5830 | } |
| 5831 | |
| 5832 | static struct btf_record *kptr_pointee_btf_record(struct btf_field *kptr_field) |
| 5833 | { |
| 5834 | struct btf_struct_meta *meta; |
| 5835 | |
| 5836 | if (btf_is_kernel(kptr_field->kptr.btf)) |
| 5837 | return NULL; |
| 5838 | |
| 5839 | meta = btf_find_struct_meta(kptr_field->kptr.btf, |
| 5840 | kptr_field->kptr.btf_id); |
| 5841 | |
| 5842 | return meta ? meta->record : NULL; |
| 5843 | } |
| 5844 | |
| 5845 | static bool rcu_safe_kptr(const struct btf_field *field) |
| 5846 | { |
| 5847 | const struct btf_field_kptr *kptr = &field->kptr; |
| 5848 | |
| 5849 | return field->type == BPF_KPTR_PERCPU || |
| 5850 | (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); |
| 5851 | } |
| 5852 | |
| 5853 | static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field) |
| 5854 | { |
| 5855 | struct btf_record *rec; |
| 5856 | u32 ret; |
| 5857 | |
| 5858 | ret = PTR_MAYBE_NULL; |
| 5859 | if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) { |
| 5860 | ret |= MEM_RCU; |
| 5861 | if (kptr_field->type == BPF_KPTR_PERCPU) |
| 5862 | ret |= MEM_PERCPU; |
| 5863 | else if (!btf_is_kernel(kptr_field->kptr.btf)) |
| 5864 | ret |= MEM_ALLOC; |
| 5865 | |
| 5866 | rec = kptr_pointee_btf_record(kptr_field); |
| 5867 | if (rec && btf_record_has_field(rec, BPF_GRAPH_NODE)) |
| 5868 | ret |= NON_OWN_REF; |
| 5869 | } else { |
| 5870 | ret |= PTR_UNTRUSTED; |
| 5871 | } |
| 5872 | |
| 5873 | return ret; |
| 5874 | } |
| 5875 | |
| 5876 | static int mark_uptr_ld_reg(struct bpf_verifier_env *env, u32 regno, |
| 5877 | struct btf_field *field) |
| 5878 | { |
| 5879 | struct bpf_reg_state *reg; |
| 5880 | const struct btf_type *t; |
| 5881 | |
| 5882 | t = btf_type_by_id(field->kptr.btf, field->kptr.btf_id); |
| 5883 | mark_reg_known_zero(env, cur_regs(env), regno); |
| 5884 | reg = reg_state(env, regno); |
| 5885 | reg->type = PTR_TO_MEM | PTR_MAYBE_NULL; |
| 5886 | reg->mem_size = t->size; |
| 5887 | reg->id = ++env->id_gen; |
| 5888 | |
| 5889 | return 0; |
| 5890 | } |
| 5891 | |
| 5892 | static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, |
| 5893 | int value_regno, int insn_idx, |
| 5894 | struct btf_field *kptr_field) |
| 5895 | { |
| 5896 | struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; |
| 5897 | int class = BPF_CLASS(insn->code); |
| 5898 | struct bpf_reg_state *val_reg; |
| 5899 | |
| 5900 | /* Things we already checked for in check_map_access and caller: |
| 5901 | * - Reject cases where variable offset may touch kptr |
| 5902 | * - size of access (must be BPF_DW) |
| 5903 | * - tnum_is_const(reg->var_off) |
| 5904 | * - kptr_field->offset == off + reg->var_off.value |
| 5905 | */ |
| 5906 | /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */ |
| 5907 | if (BPF_MODE(insn->code) != BPF_MEM) { |
| 5908 | verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); |
| 5909 | return -EACCES; |
| 5910 | } |
| 5911 | |
| 5912 | /* We only allow loading referenced kptr, since it will be marked as |
| 5913 | * untrusted, similar to unreferenced kptr. |
| 5914 | */ |
| 5915 | if (class != BPF_LDX && |
| 5916 | (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { |
| 5917 | verbose(env, "store to referenced kptr disallowed\n"); |
| 5918 | return -EACCES; |
| 5919 | } |
| 5920 | if (class != BPF_LDX && kptr_field->type == BPF_UPTR) { |
| 5921 | verbose(env, "store to uptr disallowed\n"); |
| 5922 | return -EACCES; |
| 5923 | } |
| 5924 | |
| 5925 | if (class == BPF_LDX) { |
| 5926 | if (kptr_field->type == BPF_UPTR) |
| 5927 | return mark_uptr_ld_reg(env, value_regno, kptr_field); |
| 5928 | |
| 5929 | /* We can simply mark the value_regno receiving the pointer |
| 5930 | * value from map as PTR_TO_BTF_ID, with the correct type. |
| 5931 | */ |
| 5932 | mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, |
| 5933 | kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); |
| 5934 | } else if (class == BPF_STX) { |
| 5935 | val_reg = reg_state(env, value_regno); |
| 5936 | if (!register_is_null(val_reg) && |
| 5937 | map_kptr_match_type(env, kptr_field, val_reg, value_regno)) |
| 5938 | return -EACCES; |
| 5939 | } else if (class == BPF_ST) { |
| 5940 | if (insn->imm) { |
| 5941 | verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", |
| 5942 | kptr_field->offset); |
| 5943 | return -EACCES; |
| 5944 | } |
| 5945 | } else { |
| 5946 | verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); |
| 5947 | return -EACCES; |
| 5948 | } |
| 5949 | return 0; |
| 5950 | } |
| 5951 | |
| 5952 | /* check read/write into a map element with possible variable offset */ |
| 5953 | static int check_map_access(struct bpf_verifier_env *env, u32 regno, |
| 5954 | int off, int size, bool zero_size_allowed, |
| 5955 | enum bpf_access_src src) |
| 5956 | { |
| 5957 | struct bpf_verifier_state *vstate = env->cur_state; |
| 5958 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 5959 | struct bpf_reg_state *reg = &state->regs[regno]; |
| 5960 | struct bpf_map *map = reg->map_ptr; |
| 5961 | struct btf_record *rec; |
| 5962 | int err, i; |
| 5963 | |
| 5964 | err = check_mem_region_access(env, regno, off, size, map->value_size, |
| 5965 | zero_size_allowed); |
| 5966 | if (err) |
| 5967 | return err; |
| 5968 | |
| 5969 | if (IS_ERR_OR_NULL(map->record)) |
| 5970 | return 0; |
| 5971 | rec = map->record; |
| 5972 | for (i = 0; i < rec->cnt; i++) { |
| 5973 | struct btf_field *field = &rec->fields[i]; |
| 5974 | u32 p = field->offset; |
| 5975 | |
| 5976 | /* If any part of a field can be touched by load/store, reject |
| 5977 | * this program. To check that [x1, x2) overlaps with [y1, y2), |
| 5978 | * it is sufficient to check x1 < y2 && y1 < x2. |
| 5979 | */ |
| 5980 | if (reg->smin_value + off < p + field->size && |
| 5981 | p < reg->umax_value + off + size) { |
| 5982 | switch (field->type) { |
| 5983 | case BPF_KPTR_UNREF: |
| 5984 | case BPF_KPTR_REF: |
| 5985 | case BPF_KPTR_PERCPU: |
| 5986 | case BPF_UPTR: |
| 5987 | if (src != ACCESS_DIRECT) { |
| 5988 | verbose(env, "%s cannot be accessed indirectly by helper\n", |
| 5989 | btf_field_type_name(field->type)); |
| 5990 | return -EACCES; |
| 5991 | } |
| 5992 | if (!tnum_is_const(reg->var_off)) { |
| 5993 | verbose(env, "%s access cannot have variable offset\n", |
| 5994 | btf_field_type_name(field->type)); |
| 5995 | return -EACCES; |
| 5996 | } |
| 5997 | if (p != off + reg->var_off.value) { |
| 5998 | verbose(env, "%s access misaligned expected=%u off=%llu\n", |
| 5999 | btf_field_type_name(field->type), |
| 6000 | p, off + reg->var_off.value); |
| 6001 | return -EACCES; |
| 6002 | } |
| 6003 | if (size != bpf_size_to_bytes(BPF_DW)) { |
| 6004 | verbose(env, "%s access size must be BPF_DW\n", |
| 6005 | btf_field_type_name(field->type)); |
| 6006 | return -EACCES; |
| 6007 | } |
| 6008 | break; |
| 6009 | default: |
| 6010 | verbose(env, "%s cannot be accessed directly by load/store\n", |
| 6011 | btf_field_type_name(field->type)); |
| 6012 | return -EACCES; |
| 6013 | } |
| 6014 | } |
| 6015 | } |
| 6016 | return 0; |
| 6017 | } |
| 6018 | |
| 6019 | #define MAX_PACKET_OFF 0xffff |
| 6020 | |
| 6021 | static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, |
| 6022 | const struct bpf_call_arg_meta *meta, |
| 6023 | enum bpf_access_type t) |
| 6024 | { |
| 6025 | enum bpf_prog_type prog_type = resolve_prog_type(env->prog); |
| 6026 | |
| 6027 | switch (prog_type) { |
| 6028 | /* Program types only with direct read access go here! */ |
| 6029 | case BPF_PROG_TYPE_LWT_IN: |
| 6030 | case BPF_PROG_TYPE_LWT_OUT: |
| 6031 | case BPF_PROG_TYPE_LWT_SEG6LOCAL: |
| 6032 | case BPF_PROG_TYPE_SK_REUSEPORT: |
| 6033 | case BPF_PROG_TYPE_FLOW_DISSECTOR: |
| 6034 | case BPF_PROG_TYPE_CGROUP_SKB: |
| 6035 | if (t == BPF_WRITE) |
| 6036 | return false; |
| 6037 | fallthrough; |
| 6038 | |
| 6039 | /* Program types with direct read + write access go here! */ |
| 6040 | case BPF_PROG_TYPE_SCHED_CLS: |
| 6041 | case BPF_PROG_TYPE_SCHED_ACT: |
| 6042 | case BPF_PROG_TYPE_XDP: |
| 6043 | case BPF_PROG_TYPE_LWT_XMIT: |
| 6044 | case BPF_PROG_TYPE_SK_SKB: |
| 6045 | case BPF_PROG_TYPE_SK_MSG: |
| 6046 | if (meta) |
| 6047 | return meta->pkt_access; |
| 6048 | |
| 6049 | env->seen_direct_write = true; |
| 6050 | return true; |
| 6051 | |
| 6052 | case BPF_PROG_TYPE_CGROUP_SOCKOPT: |
| 6053 | if (t == BPF_WRITE) |
| 6054 | env->seen_direct_write = true; |
| 6055 | |
| 6056 | return true; |
| 6057 | |
| 6058 | default: |
| 6059 | return false; |
| 6060 | } |
| 6061 | } |
| 6062 | |
| 6063 | static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, |
| 6064 | int size, bool zero_size_allowed) |
| 6065 | { |
| 6066 | struct bpf_reg_state *regs = cur_regs(env); |
| 6067 | struct bpf_reg_state *reg = ®s[regno]; |
| 6068 | int err; |
| 6069 | |
| 6070 | /* We may have added a variable offset to the packet pointer; but any |
| 6071 | * reg->range we have comes after that. We are only checking the fixed |
| 6072 | * offset. |
| 6073 | */ |
| 6074 | |
| 6075 | /* We don't allow negative numbers, because we aren't tracking enough |
| 6076 | * detail to prove they're safe. |
| 6077 | */ |
| 6078 | if (reg->smin_value < 0) { |
| 6079 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
| 6080 | regno); |
| 6081 | return -EACCES; |
| 6082 | } |
| 6083 | |
| 6084 | err = reg->range < 0 ? -EINVAL : |
| 6085 | __check_mem_access(env, regno, off, size, reg->range, |
| 6086 | zero_size_allowed); |
| 6087 | if (err) { |
| 6088 | verbose(env, "R%d offset is outside of the packet\n", regno); |
| 6089 | return err; |
| 6090 | } |
| 6091 | |
| 6092 | /* __check_mem_access has made sure "off + size - 1" is within u16. |
| 6093 | * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, |
| 6094 | * otherwise find_good_pkt_pointers would have refused to set range info |
| 6095 | * that __check_mem_access would have rejected this pkt access. |
| 6096 | * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. |
| 6097 | */ |
| 6098 | env->prog->aux->max_pkt_offset = |
| 6099 | max_t(u32, env->prog->aux->max_pkt_offset, |
| 6100 | off + reg->umax_value + size - 1); |
| 6101 | |
| 6102 | return err; |
| 6103 | } |
| 6104 | |
| 6105 | /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ |
| 6106 | static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, |
| 6107 | enum bpf_access_type t, struct bpf_insn_access_aux *info) |
| 6108 | { |
| 6109 | if (env->ops->is_valid_access && |
| 6110 | env->ops->is_valid_access(off, size, t, env->prog, info)) { |
| 6111 | /* A non zero info.ctx_field_size indicates that this field is a |
| 6112 | * candidate for later verifier transformation to load the whole |
| 6113 | * field and then apply a mask when accessed with a narrower |
| 6114 | * access than actual ctx access size. A zero info.ctx_field_size |
| 6115 | * will only allow for whole field access and rejects any other |
| 6116 | * type of narrower access. |
| 6117 | */ |
| 6118 | if (base_type(info->reg_type) == PTR_TO_BTF_ID) { |
| 6119 | if (info->ref_obj_id && |
| 6120 | !find_reference_state(env->cur_state, info->ref_obj_id)) { |
| 6121 | verbose(env, "invalid bpf_context access off=%d. Reference may already be released\n", |
| 6122 | off); |
| 6123 | return -EACCES; |
| 6124 | } |
| 6125 | } else { |
| 6126 | env->insn_aux_data[insn_idx].ctx_field_size = info->ctx_field_size; |
| 6127 | } |
| 6128 | /* remember the offset of last byte accessed in ctx */ |
| 6129 | if (env->prog->aux->max_ctx_offset < off + size) |
| 6130 | env->prog->aux->max_ctx_offset = off + size; |
| 6131 | return 0; |
| 6132 | } |
| 6133 | |
| 6134 | verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); |
| 6135 | return -EACCES; |
| 6136 | } |
| 6137 | |
| 6138 | static int check_flow_keys_access(struct bpf_verifier_env *env, int off, |
| 6139 | int size) |
| 6140 | { |
| 6141 | if (size < 0 || off < 0 || |
| 6142 | (u64)off + size > sizeof(struct bpf_flow_keys)) { |
| 6143 | verbose(env, "invalid access to flow keys off=%d size=%d\n", |
| 6144 | off, size); |
| 6145 | return -EACCES; |
| 6146 | } |
| 6147 | return 0; |
| 6148 | } |
| 6149 | |
| 6150 | static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, |
| 6151 | u32 regno, int off, int size, |
| 6152 | enum bpf_access_type t) |
| 6153 | { |
| 6154 | struct bpf_reg_state *regs = cur_regs(env); |
| 6155 | struct bpf_reg_state *reg = ®s[regno]; |
| 6156 | struct bpf_insn_access_aux info = {}; |
| 6157 | bool valid; |
| 6158 | |
| 6159 | if (reg->smin_value < 0) { |
| 6160 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
| 6161 | regno); |
| 6162 | return -EACCES; |
| 6163 | } |
| 6164 | |
| 6165 | switch (reg->type) { |
| 6166 | case PTR_TO_SOCK_COMMON: |
| 6167 | valid = bpf_sock_common_is_valid_access(off, size, t, &info); |
| 6168 | break; |
| 6169 | case PTR_TO_SOCKET: |
| 6170 | valid = bpf_sock_is_valid_access(off, size, t, &info); |
| 6171 | break; |
| 6172 | case PTR_TO_TCP_SOCK: |
| 6173 | valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); |
| 6174 | break; |
| 6175 | case PTR_TO_XDP_SOCK: |
| 6176 | valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); |
| 6177 | break; |
| 6178 | default: |
| 6179 | valid = false; |
| 6180 | } |
| 6181 | |
| 6182 | |
| 6183 | if (valid) { |
| 6184 | env->insn_aux_data[insn_idx].ctx_field_size = |
| 6185 | info.ctx_field_size; |
| 6186 | return 0; |
| 6187 | } |
| 6188 | |
| 6189 | verbose(env, "R%d invalid %s access off=%d size=%d\n", |
| 6190 | regno, reg_type_str(env, reg->type), off, size); |
| 6191 | |
| 6192 | return -EACCES; |
| 6193 | } |
| 6194 | |
| 6195 | static bool is_pointer_value(struct bpf_verifier_env *env, int regno) |
| 6196 | { |
| 6197 | return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); |
| 6198 | } |
| 6199 | |
| 6200 | static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) |
| 6201 | { |
| 6202 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 6203 | |
| 6204 | return reg->type == PTR_TO_CTX; |
| 6205 | } |
| 6206 | |
| 6207 | static bool is_sk_reg(struct bpf_verifier_env *env, int regno) |
| 6208 | { |
| 6209 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 6210 | |
| 6211 | return type_is_sk_pointer(reg->type); |
| 6212 | } |
| 6213 | |
| 6214 | static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) |
| 6215 | { |
| 6216 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 6217 | |
| 6218 | return type_is_pkt_pointer(reg->type); |
| 6219 | } |
| 6220 | |
| 6221 | static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) |
| 6222 | { |
| 6223 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 6224 | |
| 6225 | /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ |
| 6226 | return reg->type == PTR_TO_FLOW_KEYS; |
| 6227 | } |
| 6228 | |
| 6229 | static bool is_arena_reg(struct bpf_verifier_env *env, int regno) |
| 6230 | { |
| 6231 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 6232 | |
| 6233 | return reg->type == PTR_TO_ARENA; |
| 6234 | } |
| 6235 | |
| 6236 | /* Return false if @regno contains a pointer whose type isn't supported for |
| 6237 | * atomic instruction @insn. |
| 6238 | */ |
| 6239 | static bool atomic_ptr_type_ok(struct bpf_verifier_env *env, int regno, |
| 6240 | struct bpf_insn *insn) |
| 6241 | { |
| 6242 | if (is_ctx_reg(env, regno)) |
| 6243 | return false; |
| 6244 | if (is_pkt_reg(env, regno)) |
| 6245 | return false; |
| 6246 | if (is_flow_key_reg(env, regno)) |
| 6247 | return false; |
| 6248 | if (is_sk_reg(env, regno)) |
| 6249 | return false; |
| 6250 | if (is_arena_reg(env, regno)) |
| 6251 | return bpf_jit_supports_insn(insn, true); |
| 6252 | |
| 6253 | return true; |
| 6254 | } |
| 6255 | |
| 6256 | static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { |
| 6257 | #ifdef CONFIG_NET |
| 6258 | [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], |
| 6259 | [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], |
| 6260 | [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], |
| 6261 | #endif |
| 6262 | [CONST_PTR_TO_MAP] = btf_bpf_map_id, |
| 6263 | }; |
| 6264 | |
| 6265 | static bool is_trusted_reg(const struct bpf_reg_state *reg) |
| 6266 | { |
| 6267 | /* A referenced register is always trusted. */ |
| 6268 | if (reg->ref_obj_id) |
| 6269 | return true; |
| 6270 | |
| 6271 | /* Types listed in the reg2btf_ids are always trusted */ |
| 6272 | if (reg2btf_ids[base_type(reg->type)] && |
| 6273 | !bpf_type_has_unsafe_modifiers(reg->type)) |
| 6274 | return true; |
| 6275 | |
| 6276 | /* If a register is not referenced, it is trusted if it has the |
| 6277 | * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the |
| 6278 | * other type modifiers may be safe, but we elect to take an opt-in |
| 6279 | * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are |
| 6280 | * not. |
| 6281 | * |
| 6282 | * Eventually, we should make PTR_TRUSTED the single source of truth |
| 6283 | * for whether a register is trusted. |
| 6284 | */ |
| 6285 | return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && |
| 6286 | !bpf_type_has_unsafe_modifiers(reg->type); |
| 6287 | } |
| 6288 | |
| 6289 | static bool is_rcu_reg(const struct bpf_reg_state *reg) |
| 6290 | { |
| 6291 | return reg->type & MEM_RCU; |
| 6292 | } |
| 6293 | |
| 6294 | static void clear_trusted_flags(enum bpf_type_flag *flag) |
| 6295 | { |
| 6296 | *flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU); |
| 6297 | } |
| 6298 | |
| 6299 | static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, |
| 6300 | const struct bpf_reg_state *reg, |
| 6301 | int off, int size, bool strict) |
| 6302 | { |
| 6303 | struct tnum reg_off; |
| 6304 | int ip_align; |
| 6305 | |
| 6306 | /* Byte size accesses are always allowed. */ |
| 6307 | if (!strict || size == 1) |
| 6308 | return 0; |
| 6309 | |
| 6310 | /* For platforms that do not have a Kconfig enabling |
| 6311 | * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of |
| 6312 | * NET_IP_ALIGN is universally set to '2'. And on platforms |
| 6313 | * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get |
| 6314 | * to this code only in strict mode where we want to emulate |
| 6315 | * the NET_IP_ALIGN==2 checking. Therefore use an |
| 6316 | * unconditional IP align value of '2'. |
| 6317 | */ |
| 6318 | ip_align = 2; |
| 6319 | |
| 6320 | reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); |
| 6321 | if (!tnum_is_aligned(reg_off, size)) { |
| 6322 | char tn_buf[48]; |
| 6323 | |
| 6324 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 6325 | verbose(env, |
| 6326 | "misaligned packet access off %d+%s+%d+%d size %d\n", |
| 6327 | ip_align, tn_buf, reg->off, off, size); |
| 6328 | return -EACCES; |
| 6329 | } |
| 6330 | |
| 6331 | return 0; |
| 6332 | } |
| 6333 | |
| 6334 | static int check_generic_ptr_alignment(struct bpf_verifier_env *env, |
| 6335 | const struct bpf_reg_state *reg, |
| 6336 | const char *pointer_desc, |
| 6337 | int off, int size, bool strict) |
| 6338 | { |
| 6339 | struct tnum reg_off; |
| 6340 | |
| 6341 | /* Byte size accesses are always allowed. */ |
| 6342 | if (!strict || size == 1) |
| 6343 | return 0; |
| 6344 | |
| 6345 | reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); |
| 6346 | if (!tnum_is_aligned(reg_off, size)) { |
| 6347 | char tn_buf[48]; |
| 6348 | |
| 6349 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 6350 | verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", |
| 6351 | pointer_desc, tn_buf, reg->off, off, size); |
| 6352 | return -EACCES; |
| 6353 | } |
| 6354 | |
| 6355 | return 0; |
| 6356 | } |
| 6357 | |
| 6358 | static int check_ptr_alignment(struct bpf_verifier_env *env, |
| 6359 | const struct bpf_reg_state *reg, int off, |
| 6360 | int size, bool strict_alignment_once) |
| 6361 | { |
| 6362 | bool strict = env->strict_alignment || strict_alignment_once; |
| 6363 | const char *pointer_desc = ""; |
| 6364 | |
| 6365 | switch (reg->type) { |
| 6366 | case PTR_TO_PACKET: |
| 6367 | case PTR_TO_PACKET_META: |
| 6368 | /* Special case, because of NET_IP_ALIGN. Given metadata sits |
| 6369 | * right in front, treat it the very same way. |
| 6370 | */ |
| 6371 | return check_pkt_ptr_alignment(env, reg, off, size, strict); |
| 6372 | case PTR_TO_FLOW_KEYS: |
| 6373 | pointer_desc = "flow keys "; |
| 6374 | break; |
| 6375 | case PTR_TO_MAP_KEY: |
| 6376 | pointer_desc = "key "; |
| 6377 | break; |
| 6378 | case PTR_TO_MAP_VALUE: |
| 6379 | pointer_desc = "value "; |
| 6380 | break; |
| 6381 | case PTR_TO_CTX: |
| 6382 | pointer_desc = "context "; |
| 6383 | break; |
| 6384 | case PTR_TO_STACK: |
| 6385 | pointer_desc = "stack "; |
| 6386 | /* The stack spill tracking logic in check_stack_write_fixed_off() |
| 6387 | * and check_stack_read_fixed_off() relies on stack accesses being |
| 6388 | * aligned. |
| 6389 | */ |
| 6390 | strict = true; |
| 6391 | break; |
| 6392 | case PTR_TO_SOCKET: |
| 6393 | pointer_desc = "sock "; |
| 6394 | break; |
| 6395 | case PTR_TO_SOCK_COMMON: |
| 6396 | pointer_desc = "sock_common "; |
| 6397 | break; |
| 6398 | case PTR_TO_TCP_SOCK: |
| 6399 | pointer_desc = "tcp_sock "; |
| 6400 | break; |
| 6401 | case PTR_TO_XDP_SOCK: |
| 6402 | pointer_desc = "xdp_sock "; |
| 6403 | break; |
| 6404 | case PTR_TO_ARENA: |
| 6405 | return 0; |
| 6406 | default: |
| 6407 | break; |
| 6408 | } |
| 6409 | return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, |
| 6410 | strict); |
| 6411 | } |
| 6412 | |
| 6413 | static enum priv_stack_mode bpf_enable_priv_stack(struct bpf_prog *prog) |
| 6414 | { |
| 6415 | if (!bpf_jit_supports_private_stack()) |
| 6416 | return NO_PRIV_STACK; |
| 6417 | |
| 6418 | /* bpf_prog_check_recur() checks all prog types that use bpf trampoline |
| 6419 | * while kprobe/tp/perf_event/raw_tp don't use trampoline hence checked |
| 6420 | * explicitly. |
| 6421 | */ |
| 6422 | switch (prog->type) { |
| 6423 | case BPF_PROG_TYPE_KPROBE: |
| 6424 | case BPF_PROG_TYPE_TRACEPOINT: |
| 6425 | case BPF_PROG_TYPE_PERF_EVENT: |
| 6426 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
| 6427 | return PRIV_STACK_ADAPTIVE; |
| 6428 | case BPF_PROG_TYPE_TRACING: |
| 6429 | case BPF_PROG_TYPE_LSM: |
| 6430 | case BPF_PROG_TYPE_STRUCT_OPS: |
| 6431 | if (prog->aux->priv_stack_requested || bpf_prog_check_recur(prog)) |
| 6432 | return PRIV_STACK_ADAPTIVE; |
| 6433 | fallthrough; |
| 6434 | default: |
| 6435 | break; |
| 6436 | } |
| 6437 | |
| 6438 | return NO_PRIV_STACK; |
| 6439 | } |
| 6440 | |
| 6441 | static int round_up_stack_depth(struct bpf_verifier_env *env, int stack_depth) |
| 6442 | { |
| 6443 | if (env->prog->jit_requested) |
| 6444 | return round_up(stack_depth, 16); |
| 6445 | |
| 6446 | /* round up to 32-bytes, since this is granularity |
| 6447 | * of interpreter stack size |
| 6448 | */ |
| 6449 | return round_up(max_t(u32, stack_depth, 1), 32); |
| 6450 | } |
| 6451 | |
| 6452 | /* starting from main bpf function walk all instructions of the function |
| 6453 | * and recursively walk all callees that given function can call. |
| 6454 | * Ignore jump and exit insns. |
| 6455 | * Since recursion is prevented by check_cfg() this algorithm |
| 6456 | * only needs a local stack of MAX_CALL_FRAMES to remember callsites |
| 6457 | */ |
| 6458 | static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx, |
| 6459 | bool priv_stack_supported) |
| 6460 | { |
| 6461 | struct bpf_subprog_info *subprog = env->subprog_info; |
| 6462 | struct bpf_insn *insn = env->prog->insnsi; |
| 6463 | int depth = 0, frame = 0, i, subprog_end, subprog_depth; |
| 6464 | bool tail_call_reachable = false; |
| 6465 | int ret_insn[MAX_CALL_FRAMES]; |
| 6466 | int ret_prog[MAX_CALL_FRAMES]; |
| 6467 | int j; |
| 6468 | |
| 6469 | i = subprog[idx].start; |
| 6470 | if (!priv_stack_supported) |
| 6471 | subprog[idx].priv_stack_mode = NO_PRIV_STACK; |
| 6472 | process_func: |
| 6473 | /* protect against potential stack overflow that might happen when |
| 6474 | * bpf2bpf calls get combined with tailcalls. Limit the caller's stack |
| 6475 | * depth for such case down to 256 so that the worst case scenario |
| 6476 | * would result in 8k stack size (32 which is tailcall limit * 256 = |
| 6477 | * 8k). |
| 6478 | * |
| 6479 | * To get the idea what might happen, see an example: |
| 6480 | * func1 -> sub rsp, 128 |
| 6481 | * subfunc1 -> sub rsp, 256 |
| 6482 | * tailcall1 -> add rsp, 256 |
| 6483 | * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) |
| 6484 | * subfunc2 -> sub rsp, 64 |
| 6485 | * subfunc22 -> sub rsp, 128 |
| 6486 | * tailcall2 -> add rsp, 128 |
| 6487 | * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) |
| 6488 | * |
| 6489 | * tailcall will unwind the current stack frame but it will not get rid |
| 6490 | * of caller's stack as shown on the example above. |
| 6491 | */ |
| 6492 | if (idx && subprog[idx].has_tail_call && depth >= 256) { |
| 6493 | verbose(env, |
| 6494 | "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", |
| 6495 | depth); |
| 6496 | return -EACCES; |
| 6497 | } |
| 6498 | |
| 6499 | subprog_depth = round_up_stack_depth(env, subprog[idx].stack_depth); |
| 6500 | if (priv_stack_supported) { |
| 6501 | /* Request private stack support only if the subprog stack |
| 6502 | * depth is no less than BPF_PRIV_STACK_MIN_SIZE. This is to |
| 6503 | * avoid jit penalty if the stack usage is small. |
| 6504 | */ |
| 6505 | if (subprog[idx].priv_stack_mode == PRIV_STACK_UNKNOWN && |
| 6506 | subprog_depth >= BPF_PRIV_STACK_MIN_SIZE) |
| 6507 | subprog[idx].priv_stack_mode = PRIV_STACK_ADAPTIVE; |
| 6508 | } |
| 6509 | |
| 6510 | if (subprog[idx].priv_stack_mode == PRIV_STACK_ADAPTIVE) { |
| 6511 | if (subprog_depth > MAX_BPF_STACK) { |
| 6512 | verbose(env, "stack size of subprog %d is %d. Too large\n", |
| 6513 | idx, subprog_depth); |
| 6514 | return -EACCES; |
| 6515 | } |
| 6516 | } else { |
| 6517 | depth += subprog_depth; |
| 6518 | if (depth > MAX_BPF_STACK) { |
| 6519 | verbose(env, "combined stack size of %d calls is %d. Too large\n", |
| 6520 | frame + 1, depth); |
| 6521 | return -EACCES; |
| 6522 | } |
| 6523 | } |
| 6524 | continue_func: |
| 6525 | subprog_end = subprog[idx + 1].start; |
| 6526 | for (; i < subprog_end; i++) { |
| 6527 | int next_insn, sidx; |
| 6528 | |
| 6529 | if (bpf_pseudo_kfunc_call(insn + i) && !insn[i].off) { |
| 6530 | bool err = false; |
| 6531 | |
| 6532 | if (!is_bpf_throw_kfunc(insn + i)) |
| 6533 | continue; |
| 6534 | if (subprog[idx].is_cb) |
| 6535 | err = true; |
| 6536 | for (int c = 0; c < frame && !err; c++) { |
| 6537 | if (subprog[ret_prog[c]].is_cb) { |
| 6538 | err = true; |
| 6539 | break; |
| 6540 | } |
| 6541 | } |
| 6542 | if (!err) |
| 6543 | continue; |
| 6544 | verbose(env, |
| 6545 | "bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n", |
| 6546 | i, idx); |
| 6547 | return -EINVAL; |
| 6548 | } |
| 6549 | |
| 6550 | if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) |
| 6551 | continue; |
| 6552 | /* remember insn and function to return to */ |
| 6553 | ret_insn[frame] = i + 1; |
| 6554 | ret_prog[frame] = idx; |
| 6555 | |
| 6556 | /* find the callee */ |
| 6557 | next_insn = i + insn[i].imm + 1; |
| 6558 | sidx = find_subprog(env, next_insn); |
| 6559 | if (verifier_bug_if(sidx < 0, env, "callee not found at insn %d", next_insn)) |
| 6560 | return -EFAULT; |
| 6561 | if (subprog[sidx].is_async_cb) { |
| 6562 | if (subprog[sidx].has_tail_call) { |
| 6563 | verifier_bug(env, "subprog has tail_call and async cb"); |
| 6564 | return -EFAULT; |
| 6565 | } |
| 6566 | /* async callbacks don't increase bpf prog stack size unless called directly */ |
| 6567 | if (!bpf_pseudo_call(insn + i)) |
| 6568 | continue; |
| 6569 | if (subprog[sidx].is_exception_cb) { |
| 6570 | verbose(env, "insn %d cannot call exception cb directly", i); |
| 6571 | return -EINVAL; |
| 6572 | } |
| 6573 | } |
| 6574 | i = next_insn; |
| 6575 | idx = sidx; |
| 6576 | if (!priv_stack_supported) |
| 6577 | subprog[idx].priv_stack_mode = NO_PRIV_STACK; |
| 6578 | |
| 6579 | if (subprog[idx].has_tail_call) |
| 6580 | tail_call_reachable = true; |
| 6581 | |
| 6582 | frame++; |
| 6583 | if (frame >= MAX_CALL_FRAMES) { |
| 6584 | verbose(env, "the call stack of %d frames is too deep !\n", |
| 6585 | frame); |
| 6586 | return -E2BIG; |
| 6587 | } |
| 6588 | goto process_func; |
| 6589 | } |
| 6590 | /* if tail call got detected across bpf2bpf calls then mark each of the |
| 6591 | * currently present subprog frames as tail call reachable subprogs; |
| 6592 | * this info will be utilized by JIT so that we will be preserving the |
| 6593 | * tail call counter throughout bpf2bpf calls combined with tailcalls |
| 6594 | */ |
| 6595 | if (tail_call_reachable) |
| 6596 | for (j = 0; j < frame; j++) { |
| 6597 | if (subprog[ret_prog[j]].is_exception_cb) { |
| 6598 | verbose(env, "cannot tail call within exception cb\n"); |
| 6599 | return -EINVAL; |
| 6600 | } |
| 6601 | subprog[ret_prog[j]].tail_call_reachable = true; |
| 6602 | } |
| 6603 | if (subprog[0].tail_call_reachable) |
| 6604 | env->prog->aux->tail_call_reachable = true; |
| 6605 | |
| 6606 | /* end of for() loop means the last insn of the 'subprog' |
| 6607 | * was reached. Doesn't matter whether it was JA or EXIT |
| 6608 | */ |
| 6609 | if (frame == 0) |
| 6610 | return 0; |
| 6611 | if (subprog[idx].priv_stack_mode != PRIV_STACK_ADAPTIVE) |
| 6612 | depth -= round_up_stack_depth(env, subprog[idx].stack_depth); |
| 6613 | frame--; |
| 6614 | i = ret_insn[frame]; |
| 6615 | idx = ret_prog[frame]; |
| 6616 | goto continue_func; |
| 6617 | } |
| 6618 | |
| 6619 | static int check_max_stack_depth(struct bpf_verifier_env *env) |
| 6620 | { |
| 6621 | enum priv_stack_mode priv_stack_mode = PRIV_STACK_UNKNOWN; |
| 6622 | struct bpf_subprog_info *si = env->subprog_info; |
| 6623 | bool priv_stack_supported; |
| 6624 | int ret; |
| 6625 | |
| 6626 | for (int i = 0; i < env->subprog_cnt; i++) { |
| 6627 | if (si[i].has_tail_call) { |
| 6628 | priv_stack_mode = NO_PRIV_STACK; |
| 6629 | break; |
| 6630 | } |
| 6631 | } |
| 6632 | |
| 6633 | if (priv_stack_mode == PRIV_STACK_UNKNOWN) |
| 6634 | priv_stack_mode = bpf_enable_priv_stack(env->prog); |
| 6635 | |
| 6636 | /* All async_cb subprogs use normal kernel stack. If a particular |
| 6637 | * subprog appears in both main prog and async_cb subtree, that |
| 6638 | * subprog will use normal kernel stack to avoid potential nesting. |
| 6639 | * The reverse subprog traversal ensures when main prog subtree is |
| 6640 | * checked, the subprogs appearing in async_cb subtrees are already |
| 6641 | * marked as using normal kernel stack, so stack size checking can |
| 6642 | * be done properly. |
| 6643 | */ |
| 6644 | for (int i = env->subprog_cnt - 1; i >= 0; i--) { |
| 6645 | if (!i || si[i].is_async_cb) { |
| 6646 | priv_stack_supported = !i && priv_stack_mode == PRIV_STACK_ADAPTIVE; |
| 6647 | ret = check_max_stack_depth_subprog(env, i, priv_stack_supported); |
| 6648 | if (ret < 0) |
| 6649 | return ret; |
| 6650 | } |
| 6651 | } |
| 6652 | |
| 6653 | for (int i = 0; i < env->subprog_cnt; i++) { |
| 6654 | if (si[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) { |
| 6655 | env->prog->aux->jits_use_priv_stack = true; |
| 6656 | break; |
| 6657 | } |
| 6658 | } |
| 6659 | |
| 6660 | return 0; |
| 6661 | } |
| 6662 | |
| 6663 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
| 6664 | static int get_callee_stack_depth(struct bpf_verifier_env *env, |
| 6665 | const struct bpf_insn *insn, int idx) |
| 6666 | { |
| 6667 | int start = idx + insn->imm + 1, subprog; |
| 6668 | |
| 6669 | subprog = find_subprog(env, start); |
| 6670 | if (verifier_bug_if(subprog < 0, env, "get stack depth: no program at insn %d", start)) |
| 6671 | return -EFAULT; |
| 6672 | return env->subprog_info[subprog].stack_depth; |
| 6673 | } |
| 6674 | #endif |
| 6675 | |
| 6676 | static int __check_buffer_access(struct bpf_verifier_env *env, |
| 6677 | const char *buf_info, |
| 6678 | const struct bpf_reg_state *reg, |
| 6679 | int regno, int off, int size) |
| 6680 | { |
| 6681 | if (off < 0) { |
| 6682 | verbose(env, |
| 6683 | "R%d invalid %s buffer access: off=%d, size=%d\n", |
| 6684 | regno, buf_info, off, size); |
| 6685 | return -EACCES; |
| 6686 | } |
| 6687 | if (!tnum_is_const(reg->var_off) || reg->var_off.value) { |
| 6688 | char tn_buf[48]; |
| 6689 | |
| 6690 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 6691 | verbose(env, |
| 6692 | "R%d invalid variable buffer offset: off=%d, var_off=%s\n", |
| 6693 | regno, off, tn_buf); |
| 6694 | return -EACCES; |
| 6695 | } |
| 6696 | |
| 6697 | return 0; |
| 6698 | } |
| 6699 | |
| 6700 | static int check_tp_buffer_access(struct bpf_verifier_env *env, |
| 6701 | const struct bpf_reg_state *reg, |
| 6702 | int regno, int off, int size) |
| 6703 | { |
| 6704 | int err; |
| 6705 | |
| 6706 | err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); |
| 6707 | if (err) |
| 6708 | return err; |
| 6709 | |
| 6710 | if (off + size > env->prog->aux->max_tp_access) |
| 6711 | env->prog->aux->max_tp_access = off + size; |
| 6712 | |
| 6713 | return 0; |
| 6714 | } |
| 6715 | |
| 6716 | static int check_buffer_access(struct bpf_verifier_env *env, |
| 6717 | const struct bpf_reg_state *reg, |
| 6718 | int regno, int off, int size, |
| 6719 | bool zero_size_allowed, |
| 6720 | u32 *max_access) |
| 6721 | { |
| 6722 | const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; |
| 6723 | int err; |
| 6724 | |
| 6725 | err = __check_buffer_access(env, buf_info, reg, regno, off, size); |
| 6726 | if (err) |
| 6727 | return err; |
| 6728 | |
| 6729 | if (off + size > *max_access) |
| 6730 | *max_access = off + size; |
| 6731 | |
| 6732 | return 0; |
| 6733 | } |
| 6734 | |
| 6735 | /* BPF architecture zero extends alu32 ops into 64-bit registesr */ |
| 6736 | static void zext_32_to_64(struct bpf_reg_state *reg) |
| 6737 | { |
| 6738 | reg->var_off = tnum_subreg(reg->var_off); |
| 6739 | __reg_assign_32_into_64(reg); |
| 6740 | } |
| 6741 | |
| 6742 | /* truncate register to smaller size (in bytes) |
| 6743 | * must be called with size < BPF_REG_SIZE |
| 6744 | */ |
| 6745 | static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) |
| 6746 | { |
| 6747 | u64 mask; |
| 6748 | |
| 6749 | /* clear high bits in bit representation */ |
| 6750 | reg->var_off = tnum_cast(reg->var_off, size); |
| 6751 | |
| 6752 | /* fix arithmetic bounds */ |
| 6753 | mask = ((u64)1 << (size * 8)) - 1; |
| 6754 | if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { |
| 6755 | reg->umin_value &= mask; |
| 6756 | reg->umax_value &= mask; |
| 6757 | } else { |
| 6758 | reg->umin_value = 0; |
| 6759 | reg->umax_value = mask; |
| 6760 | } |
| 6761 | reg->smin_value = reg->umin_value; |
| 6762 | reg->smax_value = reg->umax_value; |
| 6763 | |
| 6764 | /* If size is smaller than 32bit register the 32bit register |
| 6765 | * values are also truncated so we push 64-bit bounds into |
| 6766 | * 32-bit bounds. Above were truncated < 32-bits already. |
| 6767 | */ |
| 6768 | if (size < 4) |
| 6769 | __mark_reg32_unbounded(reg); |
| 6770 | |
| 6771 | reg_bounds_sync(reg); |
| 6772 | } |
| 6773 | |
| 6774 | static void set_sext64_default_val(struct bpf_reg_state *reg, int size) |
| 6775 | { |
| 6776 | if (size == 1) { |
| 6777 | reg->smin_value = reg->s32_min_value = S8_MIN; |
| 6778 | reg->smax_value = reg->s32_max_value = S8_MAX; |
| 6779 | } else if (size == 2) { |
| 6780 | reg->smin_value = reg->s32_min_value = S16_MIN; |
| 6781 | reg->smax_value = reg->s32_max_value = S16_MAX; |
| 6782 | } else { |
| 6783 | /* size == 4 */ |
| 6784 | reg->smin_value = reg->s32_min_value = S32_MIN; |
| 6785 | reg->smax_value = reg->s32_max_value = S32_MAX; |
| 6786 | } |
| 6787 | reg->umin_value = reg->u32_min_value = 0; |
| 6788 | reg->umax_value = U64_MAX; |
| 6789 | reg->u32_max_value = U32_MAX; |
| 6790 | reg->var_off = tnum_unknown; |
| 6791 | } |
| 6792 | |
| 6793 | static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size) |
| 6794 | { |
| 6795 | s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval; |
| 6796 | u64 top_smax_value, top_smin_value; |
| 6797 | u64 num_bits = size * 8; |
| 6798 | |
| 6799 | if (tnum_is_const(reg->var_off)) { |
| 6800 | u64_cval = reg->var_off.value; |
| 6801 | if (size == 1) |
| 6802 | reg->var_off = tnum_const((s8)u64_cval); |
| 6803 | else if (size == 2) |
| 6804 | reg->var_off = tnum_const((s16)u64_cval); |
| 6805 | else |
| 6806 | /* size == 4 */ |
| 6807 | reg->var_off = tnum_const((s32)u64_cval); |
| 6808 | |
| 6809 | u64_cval = reg->var_off.value; |
| 6810 | reg->smax_value = reg->smin_value = u64_cval; |
| 6811 | reg->umax_value = reg->umin_value = u64_cval; |
| 6812 | reg->s32_max_value = reg->s32_min_value = u64_cval; |
| 6813 | reg->u32_max_value = reg->u32_min_value = u64_cval; |
| 6814 | return; |
| 6815 | } |
| 6816 | |
| 6817 | top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; |
| 6818 | top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; |
| 6819 | |
| 6820 | if (top_smax_value != top_smin_value) |
| 6821 | goto out; |
| 6822 | |
| 6823 | /* find the s64_min and s64_min after sign extension */ |
| 6824 | if (size == 1) { |
| 6825 | init_s64_max = (s8)reg->smax_value; |
| 6826 | init_s64_min = (s8)reg->smin_value; |
| 6827 | } else if (size == 2) { |
| 6828 | init_s64_max = (s16)reg->smax_value; |
| 6829 | init_s64_min = (s16)reg->smin_value; |
| 6830 | } else { |
| 6831 | init_s64_max = (s32)reg->smax_value; |
| 6832 | init_s64_min = (s32)reg->smin_value; |
| 6833 | } |
| 6834 | |
| 6835 | s64_max = max(init_s64_max, init_s64_min); |
| 6836 | s64_min = min(init_s64_max, init_s64_min); |
| 6837 | |
| 6838 | /* both of s64_max/s64_min positive or negative */ |
| 6839 | if ((s64_max >= 0) == (s64_min >= 0)) { |
| 6840 | reg->s32_min_value = reg->smin_value = s64_min; |
| 6841 | reg->s32_max_value = reg->smax_value = s64_max; |
| 6842 | reg->u32_min_value = reg->umin_value = s64_min; |
| 6843 | reg->u32_max_value = reg->umax_value = s64_max; |
| 6844 | reg->var_off = tnum_range(s64_min, s64_max); |
| 6845 | return; |
| 6846 | } |
| 6847 | |
| 6848 | out: |
| 6849 | set_sext64_default_val(reg, size); |
| 6850 | } |
| 6851 | |
| 6852 | static void set_sext32_default_val(struct bpf_reg_state *reg, int size) |
| 6853 | { |
| 6854 | if (size == 1) { |
| 6855 | reg->s32_min_value = S8_MIN; |
| 6856 | reg->s32_max_value = S8_MAX; |
| 6857 | } else { |
| 6858 | /* size == 2 */ |
| 6859 | reg->s32_min_value = S16_MIN; |
| 6860 | reg->s32_max_value = S16_MAX; |
| 6861 | } |
| 6862 | reg->u32_min_value = 0; |
| 6863 | reg->u32_max_value = U32_MAX; |
| 6864 | reg->var_off = tnum_subreg(tnum_unknown); |
| 6865 | } |
| 6866 | |
| 6867 | static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size) |
| 6868 | { |
| 6869 | s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val; |
| 6870 | u32 top_smax_value, top_smin_value; |
| 6871 | u32 num_bits = size * 8; |
| 6872 | |
| 6873 | if (tnum_is_const(reg->var_off)) { |
| 6874 | u32_val = reg->var_off.value; |
| 6875 | if (size == 1) |
| 6876 | reg->var_off = tnum_const((s8)u32_val); |
| 6877 | else |
| 6878 | reg->var_off = tnum_const((s16)u32_val); |
| 6879 | |
| 6880 | u32_val = reg->var_off.value; |
| 6881 | reg->s32_min_value = reg->s32_max_value = u32_val; |
| 6882 | reg->u32_min_value = reg->u32_max_value = u32_val; |
| 6883 | return; |
| 6884 | } |
| 6885 | |
| 6886 | top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; |
| 6887 | top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; |
| 6888 | |
| 6889 | if (top_smax_value != top_smin_value) |
| 6890 | goto out; |
| 6891 | |
| 6892 | /* find the s32_min and s32_min after sign extension */ |
| 6893 | if (size == 1) { |
| 6894 | init_s32_max = (s8)reg->s32_max_value; |
| 6895 | init_s32_min = (s8)reg->s32_min_value; |
| 6896 | } else { |
| 6897 | /* size == 2 */ |
| 6898 | init_s32_max = (s16)reg->s32_max_value; |
| 6899 | init_s32_min = (s16)reg->s32_min_value; |
| 6900 | } |
| 6901 | s32_max = max(init_s32_max, init_s32_min); |
| 6902 | s32_min = min(init_s32_max, init_s32_min); |
| 6903 | |
| 6904 | if ((s32_min >= 0) == (s32_max >= 0)) { |
| 6905 | reg->s32_min_value = s32_min; |
| 6906 | reg->s32_max_value = s32_max; |
| 6907 | reg->u32_min_value = (u32)s32_min; |
| 6908 | reg->u32_max_value = (u32)s32_max; |
| 6909 | reg->var_off = tnum_subreg(tnum_range(s32_min, s32_max)); |
| 6910 | return; |
| 6911 | } |
| 6912 | |
| 6913 | out: |
| 6914 | set_sext32_default_val(reg, size); |
| 6915 | } |
| 6916 | |
| 6917 | static bool bpf_map_is_rdonly(const struct bpf_map *map) |
| 6918 | { |
| 6919 | /* A map is considered read-only if the following condition are true: |
| 6920 | * |
| 6921 | * 1) BPF program side cannot change any of the map content. The |
| 6922 | * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map |
| 6923 | * and was set at map creation time. |
| 6924 | * 2) The map value(s) have been initialized from user space by a |
| 6925 | * loader and then "frozen", such that no new map update/delete |
| 6926 | * operations from syscall side are possible for the rest of |
| 6927 | * the map's lifetime from that point onwards. |
| 6928 | * 3) Any parallel/pending map update/delete operations from syscall |
| 6929 | * side have been completed. Only after that point, it's safe to |
| 6930 | * assume that map value(s) are immutable. |
| 6931 | */ |
| 6932 | return (map->map_flags & BPF_F_RDONLY_PROG) && |
| 6933 | READ_ONCE(map->frozen) && |
| 6934 | !bpf_map_write_active(map); |
| 6935 | } |
| 6936 | |
| 6937 | static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val, |
| 6938 | bool is_ldsx) |
| 6939 | { |
| 6940 | void *ptr; |
| 6941 | u64 addr; |
| 6942 | int err; |
| 6943 | |
| 6944 | err = map->ops->map_direct_value_addr(map, &addr, off); |
| 6945 | if (err) |
| 6946 | return err; |
| 6947 | ptr = (void *)(long)addr + off; |
| 6948 | |
| 6949 | switch (size) { |
| 6950 | case sizeof(u8): |
| 6951 | *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr; |
| 6952 | break; |
| 6953 | case sizeof(u16): |
| 6954 | *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr; |
| 6955 | break; |
| 6956 | case sizeof(u32): |
| 6957 | *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr; |
| 6958 | break; |
| 6959 | case sizeof(u64): |
| 6960 | *val = *(u64 *)ptr; |
| 6961 | break; |
| 6962 | default: |
| 6963 | return -EINVAL; |
| 6964 | } |
| 6965 | return 0; |
| 6966 | } |
| 6967 | |
| 6968 | #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu) |
| 6969 | #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null) |
| 6970 | #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted) |
| 6971 | #define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type) __PASTE(__type, __safe_trusted_or_null) |
| 6972 | |
| 6973 | /* |
| 6974 | * Allow list few fields as RCU trusted or full trusted. |
| 6975 | * This logic doesn't allow mix tagging and will be removed once GCC supports |
| 6976 | * btf_type_tag. |
| 6977 | */ |
| 6978 | |
| 6979 | /* RCU trusted: these fields are trusted in RCU CS and never NULL */ |
| 6980 | BTF_TYPE_SAFE_RCU(struct task_struct) { |
| 6981 | const cpumask_t *cpus_ptr; |
| 6982 | struct css_set __rcu *cgroups; |
| 6983 | struct task_struct __rcu *real_parent; |
| 6984 | struct task_struct *group_leader; |
| 6985 | }; |
| 6986 | |
| 6987 | BTF_TYPE_SAFE_RCU(struct cgroup) { |
| 6988 | /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ |
| 6989 | struct kernfs_node *kn; |
| 6990 | }; |
| 6991 | |
| 6992 | BTF_TYPE_SAFE_RCU(struct css_set) { |
| 6993 | struct cgroup *dfl_cgrp; |
| 6994 | }; |
| 6995 | |
| 6996 | /* RCU trusted: these fields are trusted in RCU CS and can be NULL */ |
| 6997 | BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) { |
| 6998 | struct file __rcu *exe_file; |
| 6999 | }; |
| 7000 | |
| 7001 | /* skb->sk, req->sk are not RCU protected, but we mark them as such |
| 7002 | * because bpf prog accessible sockets are SOCK_RCU_FREE. |
| 7003 | */ |
| 7004 | BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) { |
| 7005 | struct sock *sk; |
| 7006 | }; |
| 7007 | |
| 7008 | BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) { |
| 7009 | struct sock *sk; |
| 7010 | }; |
| 7011 | |
| 7012 | /* full trusted: these fields are trusted even outside of RCU CS and never NULL */ |
| 7013 | BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { |
| 7014 | struct seq_file *seq; |
| 7015 | }; |
| 7016 | |
| 7017 | BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { |
| 7018 | struct bpf_iter_meta *meta; |
| 7019 | struct task_struct *task; |
| 7020 | }; |
| 7021 | |
| 7022 | BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { |
| 7023 | struct file *file; |
| 7024 | }; |
| 7025 | |
| 7026 | BTF_TYPE_SAFE_TRUSTED(struct file) { |
| 7027 | struct inode *f_inode; |
| 7028 | }; |
| 7029 | |
| 7030 | BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry) { |
| 7031 | struct inode *d_inode; |
| 7032 | }; |
| 7033 | |
| 7034 | BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) { |
| 7035 | struct sock *sk; |
| 7036 | }; |
| 7037 | |
| 7038 | static bool type_is_rcu(struct bpf_verifier_env *env, |
| 7039 | struct bpf_reg_state *reg, |
| 7040 | const char *field_name, u32 btf_id) |
| 7041 | { |
| 7042 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct)); |
| 7043 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup)); |
| 7044 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set)); |
| 7045 | |
| 7046 | return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); |
| 7047 | } |
| 7048 | |
| 7049 | static bool type_is_rcu_or_null(struct bpf_verifier_env *env, |
| 7050 | struct bpf_reg_state *reg, |
| 7051 | const char *field_name, u32 btf_id) |
| 7052 | { |
| 7053 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct)); |
| 7054 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff)); |
| 7055 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock)); |
| 7056 | |
| 7057 | return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); |
| 7058 | } |
| 7059 | |
| 7060 | static bool type_is_trusted(struct bpf_verifier_env *env, |
| 7061 | struct bpf_reg_state *reg, |
| 7062 | const char *field_name, u32 btf_id) |
| 7063 | { |
| 7064 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta)); |
| 7065 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task)); |
| 7066 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm)); |
| 7067 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file)); |
| 7068 | |
| 7069 | return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); |
| 7070 | } |
| 7071 | |
| 7072 | static bool type_is_trusted_or_null(struct bpf_verifier_env *env, |
| 7073 | struct bpf_reg_state *reg, |
| 7074 | const char *field_name, u32 btf_id) |
| 7075 | { |
| 7076 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket)); |
| 7077 | BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct dentry)); |
| 7078 | |
| 7079 | return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, |
| 7080 | "__safe_trusted_or_null"); |
| 7081 | } |
| 7082 | |
| 7083 | static int check_ptr_to_btf_access(struct bpf_verifier_env *env, |
| 7084 | struct bpf_reg_state *regs, |
| 7085 | int regno, int off, int size, |
| 7086 | enum bpf_access_type atype, |
| 7087 | int value_regno) |
| 7088 | { |
| 7089 | struct bpf_reg_state *reg = regs + regno; |
| 7090 | const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); |
| 7091 | const char *tname = btf_name_by_offset(reg->btf, t->name_off); |
| 7092 | const char *field_name = NULL; |
| 7093 | enum bpf_type_flag flag = 0; |
| 7094 | u32 btf_id = 0; |
| 7095 | int ret; |
| 7096 | |
| 7097 | if (!env->allow_ptr_leaks) { |
| 7098 | verbose(env, |
| 7099 | "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", |
| 7100 | tname); |
| 7101 | return -EPERM; |
| 7102 | } |
| 7103 | if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { |
| 7104 | verbose(env, |
| 7105 | "Cannot access kernel 'struct %s' from non-GPL compatible program\n", |
| 7106 | tname); |
| 7107 | return -EINVAL; |
| 7108 | } |
| 7109 | if (off < 0) { |
| 7110 | verbose(env, |
| 7111 | "R%d is ptr_%s invalid negative access: off=%d\n", |
| 7112 | regno, tname, off); |
| 7113 | return -EACCES; |
| 7114 | } |
| 7115 | if (!tnum_is_const(reg->var_off) || reg->var_off.value) { |
| 7116 | char tn_buf[48]; |
| 7117 | |
| 7118 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 7119 | verbose(env, |
| 7120 | "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", |
| 7121 | regno, tname, off, tn_buf); |
| 7122 | return -EACCES; |
| 7123 | } |
| 7124 | |
| 7125 | if (reg->type & MEM_USER) { |
| 7126 | verbose(env, |
| 7127 | "R%d is ptr_%s access user memory: off=%d\n", |
| 7128 | regno, tname, off); |
| 7129 | return -EACCES; |
| 7130 | } |
| 7131 | |
| 7132 | if (reg->type & MEM_PERCPU) { |
| 7133 | verbose(env, |
| 7134 | "R%d is ptr_%s access percpu memory: off=%d\n", |
| 7135 | regno, tname, off); |
| 7136 | return -EACCES; |
| 7137 | } |
| 7138 | |
| 7139 | if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { |
| 7140 | if (!btf_is_kernel(reg->btf)) { |
| 7141 | verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); |
| 7142 | return -EFAULT; |
| 7143 | } |
| 7144 | ret = env->ops->btf_struct_access(&env->log, reg, off, size); |
| 7145 | } else { |
| 7146 | /* Writes are permitted with default btf_struct_access for |
| 7147 | * program allocated objects (which always have ref_obj_id > 0), |
| 7148 | * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC. |
| 7149 | */ |
| 7150 | if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { |
| 7151 | verbose(env, "only read is supported\n"); |
| 7152 | return -EACCES; |
| 7153 | } |
| 7154 | |
| 7155 | if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && |
| 7156 | !(reg->type & MEM_RCU) && !reg->ref_obj_id) { |
| 7157 | verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); |
| 7158 | return -EFAULT; |
| 7159 | } |
| 7160 | |
| 7161 | ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); |
| 7162 | } |
| 7163 | |
| 7164 | if (ret < 0) |
| 7165 | return ret; |
| 7166 | |
| 7167 | if (ret != PTR_TO_BTF_ID) { |
| 7168 | /* just mark; */ |
| 7169 | |
| 7170 | } else if (type_flag(reg->type) & PTR_UNTRUSTED) { |
| 7171 | /* If this is an untrusted pointer, all pointers formed by walking it |
| 7172 | * also inherit the untrusted flag. |
| 7173 | */ |
| 7174 | flag = PTR_UNTRUSTED; |
| 7175 | |
| 7176 | } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) { |
| 7177 | /* By default any pointer obtained from walking a trusted pointer is no |
| 7178 | * longer trusted, unless the field being accessed has explicitly been |
| 7179 | * marked as inheriting its parent's state of trust (either full or RCU). |
| 7180 | * For example: |
| 7181 | * 'cgroups' pointer is untrusted if task->cgroups dereference |
| 7182 | * happened in a sleepable program outside of bpf_rcu_read_lock() |
| 7183 | * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). |
| 7184 | * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED. |
| 7185 | * |
| 7186 | * A regular RCU-protected pointer with __rcu tag can also be deemed |
| 7187 | * trusted if we are in an RCU CS. Such pointer can be NULL. |
| 7188 | */ |
| 7189 | if (type_is_trusted(env, reg, field_name, btf_id)) { |
| 7190 | flag |= PTR_TRUSTED; |
| 7191 | } else if (type_is_trusted_or_null(env, reg, field_name, btf_id)) { |
| 7192 | flag |= PTR_TRUSTED | PTR_MAYBE_NULL; |
| 7193 | } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { |
| 7194 | if (type_is_rcu(env, reg, field_name, btf_id)) { |
| 7195 | /* ignore __rcu tag and mark it MEM_RCU */ |
| 7196 | flag |= MEM_RCU; |
| 7197 | } else if (flag & MEM_RCU || |
| 7198 | type_is_rcu_or_null(env, reg, field_name, btf_id)) { |
| 7199 | /* __rcu tagged pointers can be NULL */ |
| 7200 | flag |= MEM_RCU | PTR_MAYBE_NULL; |
| 7201 | |
| 7202 | /* We always trust them */ |
| 7203 | if (type_is_rcu_or_null(env, reg, field_name, btf_id) && |
| 7204 | flag & PTR_UNTRUSTED) |
| 7205 | flag &= ~PTR_UNTRUSTED; |
| 7206 | } else if (flag & (MEM_PERCPU | MEM_USER)) { |
| 7207 | /* keep as-is */ |
| 7208 | } else { |
| 7209 | /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */ |
| 7210 | clear_trusted_flags(&flag); |
| 7211 | } |
| 7212 | } else { |
| 7213 | /* |
| 7214 | * If not in RCU CS or MEM_RCU pointer can be NULL then |
| 7215 | * aggressively mark as untrusted otherwise such |
| 7216 | * pointers will be plain PTR_TO_BTF_ID without flags |
| 7217 | * and will be allowed to be passed into helpers for |
| 7218 | * compat reasons. |
| 7219 | */ |
| 7220 | flag = PTR_UNTRUSTED; |
| 7221 | } |
| 7222 | } else { |
| 7223 | /* Old compat. Deprecated */ |
| 7224 | clear_trusted_flags(&flag); |
| 7225 | } |
| 7226 | |
| 7227 | if (atype == BPF_READ && value_regno >= 0) |
| 7228 | mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); |
| 7229 | |
| 7230 | return 0; |
| 7231 | } |
| 7232 | |
| 7233 | static int check_ptr_to_map_access(struct bpf_verifier_env *env, |
| 7234 | struct bpf_reg_state *regs, |
| 7235 | int regno, int off, int size, |
| 7236 | enum bpf_access_type atype, |
| 7237 | int value_regno) |
| 7238 | { |
| 7239 | struct bpf_reg_state *reg = regs + regno; |
| 7240 | struct bpf_map *map = reg->map_ptr; |
| 7241 | struct bpf_reg_state map_reg; |
| 7242 | enum bpf_type_flag flag = 0; |
| 7243 | const struct btf_type *t; |
| 7244 | const char *tname; |
| 7245 | u32 btf_id; |
| 7246 | int ret; |
| 7247 | |
| 7248 | if (!btf_vmlinux) { |
| 7249 | verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); |
| 7250 | return -ENOTSUPP; |
| 7251 | } |
| 7252 | |
| 7253 | if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { |
| 7254 | verbose(env, "map_ptr access not supported for map type %d\n", |
| 7255 | map->map_type); |
| 7256 | return -ENOTSUPP; |
| 7257 | } |
| 7258 | |
| 7259 | t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); |
| 7260 | tname = btf_name_by_offset(btf_vmlinux, t->name_off); |
| 7261 | |
| 7262 | if (!env->allow_ptr_leaks) { |
| 7263 | verbose(env, |
| 7264 | "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", |
| 7265 | tname); |
| 7266 | return -EPERM; |
| 7267 | } |
| 7268 | |
| 7269 | if (off < 0) { |
| 7270 | verbose(env, "R%d is %s invalid negative access: off=%d\n", |
| 7271 | regno, tname, off); |
| 7272 | return -EACCES; |
| 7273 | } |
| 7274 | |
| 7275 | if (atype != BPF_READ) { |
| 7276 | verbose(env, "only read from %s is supported\n", tname); |
| 7277 | return -EACCES; |
| 7278 | } |
| 7279 | |
| 7280 | /* Simulate access to a PTR_TO_BTF_ID */ |
| 7281 | memset(&map_reg, 0, sizeof(map_reg)); |
| 7282 | mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); |
| 7283 | ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); |
| 7284 | if (ret < 0) |
| 7285 | return ret; |
| 7286 | |
| 7287 | if (value_regno >= 0) |
| 7288 | mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); |
| 7289 | |
| 7290 | return 0; |
| 7291 | } |
| 7292 | |
| 7293 | /* Check that the stack access at the given offset is within bounds. The |
| 7294 | * maximum valid offset is -1. |
| 7295 | * |
| 7296 | * The minimum valid offset is -MAX_BPF_STACK for writes, and |
| 7297 | * -state->allocated_stack for reads. |
| 7298 | */ |
| 7299 | static int check_stack_slot_within_bounds(struct bpf_verifier_env *env, |
| 7300 | s64 off, |
| 7301 | struct bpf_func_state *state, |
| 7302 | enum bpf_access_type t) |
| 7303 | { |
| 7304 | int min_valid_off; |
| 7305 | |
| 7306 | if (t == BPF_WRITE || env->allow_uninit_stack) |
| 7307 | min_valid_off = -MAX_BPF_STACK; |
| 7308 | else |
| 7309 | min_valid_off = -state->allocated_stack; |
| 7310 | |
| 7311 | if (off < min_valid_off || off > -1) |
| 7312 | return -EACCES; |
| 7313 | return 0; |
| 7314 | } |
| 7315 | |
| 7316 | /* Check that the stack access at 'regno + off' falls within the maximum stack |
| 7317 | * bounds. |
| 7318 | * |
| 7319 | * 'off' includes `regno->offset`, but not its dynamic part (if any). |
| 7320 | */ |
| 7321 | static int check_stack_access_within_bounds( |
| 7322 | struct bpf_verifier_env *env, |
| 7323 | int regno, int off, int access_size, |
| 7324 | enum bpf_access_type type) |
| 7325 | { |
| 7326 | struct bpf_reg_state *regs = cur_regs(env); |
| 7327 | struct bpf_reg_state *reg = regs + regno; |
| 7328 | struct bpf_func_state *state = func(env, reg); |
| 7329 | s64 min_off, max_off; |
| 7330 | int err; |
| 7331 | char *err_extra; |
| 7332 | |
| 7333 | if (type == BPF_READ) |
| 7334 | err_extra = " read from"; |
| 7335 | else |
| 7336 | err_extra = " write to"; |
| 7337 | |
| 7338 | if (tnum_is_const(reg->var_off)) { |
| 7339 | min_off = (s64)reg->var_off.value + off; |
| 7340 | max_off = min_off + access_size; |
| 7341 | } else { |
| 7342 | if (reg->smax_value >= BPF_MAX_VAR_OFF || |
| 7343 | reg->smin_value <= -BPF_MAX_VAR_OFF) { |
| 7344 | verbose(env, "invalid unbounded variable-offset%s stack R%d\n", |
| 7345 | err_extra, regno); |
| 7346 | return -EACCES; |
| 7347 | } |
| 7348 | min_off = reg->smin_value + off; |
| 7349 | max_off = reg->smax_value + off + access_size; |
| 7350 | } |
| 7351 | |
| 7352 | err = check_stack_slot_within_bounds(env, min_off, state, type); |
| 7353 | if (!err && max_off > 0) |
| 7354 | err = -EINVAL; /* out of stack access into non-negative offsets */ |
| 7355 | if (!err && access_size < 0) |
| 7356 | /* access_size should not be negative (or overflow an int); others checks |
| 7357 | * along the way should have prevented such an access. |
| 7358 | */ |
| 7359 | err = -EFAULT; /* invalid negative access size; integer overflow? */ |
| 7360 | |
| 7361 | if (err) { |
| 7362 | if (tnum_is_const(reg->var_off)) { |
| 7363 | verbose(env, "invalid%s stack R%d off=%d size=%d\n", |
| 7364 | err_extra, regno, off, access_size); |
| 7365 | } else { |
| 7366 | char tn_buf[48]; |
| 7367 | |
| 7368 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 7369 | verbose(env, "invalid variable-offset%s stack R%d var_off=%s off=%d size=%d\n", |
| 7370 | err_extra, regno, tn_buf, off, access_size); |
| 7371 | } |
| 7372 | return err; |
| 7373 | } |
| 7374 | |
| 7375 | /* Note that there is no stack access with offset zero, so the needed stack |
| 7376 | * size is -min_off, not -min_off+1. |
| 7377 | */ |
| 7378 | return grow_stack_state(env, state, -min_off /* size */); |
| 7379 | } |
| 7380 | |
| 7381 | static bool get_func_retval_range(struct bpf_prog *prog, |
| 7382 | struct bpf_retval_range *range) |
| 7383 | { |
| 7384 | if (prog->type == BPF_PROG_TYPE_LSM && |
| 7385 | prog->expected_attach_type == BPF_LSM_MAC && |
| 7386 | !bpf_lsm_get_retval_range(prog, range)) { |
| 7387 | return true; |
| 7388 | } |
| 7389 | return false; |
| 7390 | } |
| 7391 | |
| 7392 | /* check whether memory at (regno + off) is accessible for t = (read | write) |
| 7393 | * if t==write, value_regno is a register which value is stored into memory |
| 7394 | * if t==read, value_regno is a register which will receive the value from memory |
| 7395 | * if t==write && value_regno==-1, some unknown value is stored into memory |
| 7396 | * if t==read && value_regno==-1, don't care what we read from memory |
| 7397 | */ |
| 7398 | static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, |
| 7399 | int off, int bpf_size, enum bpf_access_type t, |
| 7400 | int value_regno, bool strict_alignment_once, bool is_ldsx) |
| 7401 | { |
| 7402 | struct bpf_reg_state *regs = cur_regs(env); |
| 7403 | struct bpf_reg_state *reg = regs + regno; |
| 7404 | int size, err = 0; |
| 7405 | |
| 7406 | size = bpf_size_to_bytes(bpf_size); |
| 7407 | if (size < 0) |
| 7408 | return size; |
| 7409 | |
| 7410 | /* alignment checks will add in reg->off themselves */ |
| 7411 | err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); |
| 7412 | if (err) |
| 7413 | return err; |
| 7414 | |
| 7415 | /* for access checks, reg->off is just part of off */ |
| 7416 | off += reg->off; |
| 7417 | |
| 7418 | if (reg->type == PTR_TO_MAP_KEY) { |
| 7419 | if (t == BPF_WRITE) { |
| 7420 | verbose(env, "write to change key R%d not allowed\n", regno); |
| 7421 | return -EACCES; |
| 7422 | } |
| 7423 | |
| 7424 | err = check_mem_region_access(env, regno, off, size, |
| 7425 | reg->map_ptr->key_size, false); |
| 7426 | if (err) |
| 7427 | return err; |
| 7428 | if (value_regno >= 0) |
| 7429 | mark_reg_unknown(env, regs, value_regno); |
| 7430 | } else if (reg->type == PTR_TO_MAP_VALUE) { |
| 7431 | struct btf_field *kptr_field = NULL; |
| 7432 | |
| 7433 | if (t == BPF_WRITE && value_regno >= 0 && |
| 7434 | is_pointer_value(env, value_regno)) { |
| 7435 | verbose(env, "R%d leaks addr into map\n", value_regno); |
| 7436 | return -EACCES; |
| 7437 | } |
| 7438 | err = check_map_access_type(env, regno, off, size, t); |
| 7439 | if (err) |
| 7440 | return err; |
| 7441 | err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); |
| 7442 | if (err) |
| 7443 | return err; |
| 7444 | if (tnum_is_const(reg->var_off)) |
| 7445 | kptr_field = btf_record_find(reg->map_ptr->record, |
| 7446 | off + reg->var_off.value, BPF_KPTR | BPF_UPTR); |
| 7447 | if (kptr_field) { |
| 7448 | err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); |
| 7449 | } else if (t == BPF_READ && value_regno >= 0) { |
| 7450 | struct bpf_map *map = reg->map_ptr; |
| 7451 | |
| 7452 | /* if map is read-only, track its contents as scalars */ |
| 7453 | if (tnum_is_const(reg->var_off) && |
| 7454 | bpf_map_is_rdonly(map) && |
| 7455 | map->ops->map_direct_value_addr) { |
| 7456 | int map_off = off + reg->var_off.value; |
| 7457 | u64 val = 0; |
| 7458 | |
| 7459 | err = bpf_map_direct_read(map, map_off, size, |
| 7460 | &val, is_ldsx); |
| 7461 | if (err) |
| 7462 | return err; |
| 7463 | |
| 7464 | regs[value_regno].type = SCALAR_VALUE; |
| 7465 | __mark_reg_known(®s[value_regno], val); |
| 7466 | } else { |
| 7467 | mark_reg_unknown(env, regs, value_regno); |
| 7468 | } |
| 7469 | } |
| 7470 | } else if (base_type(reg->type) == PTR_TO_MEM) { |
| 7471 | bool rdonly_mem = type_is_rdonly_mem(reg->type); |
| 7472 | |
| 7473 | if (type_may_be_null(reg->type)) { |
| 7474 | verbose(env, "R%d invalid mem access '%s'\n", regno, |
| 7475 | reg_type_str(env, reg->type)); |
| 7476 | return -EACCES; |
| 7477 | } |
| 7478 | |
| 7479 | if (t == BPF_WRITE && rdonly_mem) { |
| 7480 | verbose(env, "R%d cannot write into %s\n", |
| 7481 | regno, reg_type_str(env, reg->type)); |
| 7482 | return -EACCES; |
| 7483 | } |
| 7484 | |
| 7485 | if (t == BPF_WRITE && value_regno >= 0 && |
| 7486 | is_pointer_value(env, value_regno)) { |
| 7487 | verbose(env, "R%d leaks addr into mem\n", value_regno); |
| 7488 | return -EACCES; |
| 7489 | } |
| 7490 | |
| 7491 | err = check_mem_region_access(env, regno, off, size, |
| 7492 | reg->mem_size, false); |
| 7493 | if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) |
| 7494 | mark_reg_unknown(env, regs, value_regno); |
| 7495 | } else if (reg->type == PTR_TO_CTX) { |
| 7496 | struct bpf_retval_range range; |
| 7497 | struct bpf_insn_access_aux info = { |
| 7498 | .reg_type = SCALAR_VALUE, |
| 7499 | .is_ldsx = is_ldsx, |
| 7500 | .log = &env->log, |
| 7501 | }; |
| 7502 | |
| 7503 | if (t == BPF_WRITE && value_regno >= 0 && |
| 7504 | is_pointer_value(env, value_regno)) { |
| 7505 | verbose(env, "R%d leaks addr into ctx\n", value_regno); |
| 7506 | return -EACCES; |
| 7507 | } |
| 7508 | |
| 7509 | err = check_ptr_off_reg(env, reg, regno); |
| 7510 | if (err < 0) |
| 7511 | return err; |
| 7512 | |
| 7513 | err = check_ctx_access(env, insn_idx, off, size, t, &info); |
| 7514 | if (err) |
| 7515 | verbose_linfo(env, insn_idx, "; "); |
| 7516 | if (!err && t == BPF_READ && value_regno >= 0) { |
| 7517 | /* ctx access returns either a scalar, or a |
| 7518 | * PTR_TO_PACKET[_META,_END]. In the latter |
| 7519 | * case, we know the offset is zero. |
| 7520 | */ |
| 7521 | if (info.reg_type == SCALAR_VALUE) { |
| 7522 | if (info.is_retval && get_func_retval_range(env->prog, &range)) { |
| 7523 | err = __mark_reg_s32_range(env, regs, value_regno, |
| 7524 | range.minval, range.maxval); |
| 7525 | if (err) |
| 7526 | return err; |
| 7527 | } else { |
| 7528 | mark_reg_unknown(env, regs, value_regno); |
| 7529 | } |
| 7530 | } else { |
| 7531 | mark_reg_known_zero(env, regs, |
| 7532 | value_regno); |
| 7533 | if (type_may_be_null(info.reg_type)) |
| 7534 | regs[value_regno].id = ++env->id_gen; |
| 7535 | /* A load of ctx field could have different |
| 7536 | * actual load size with the one encoded in the |
| 7537 | * insn. When the dst is PTR, it is for sure not |
| 7538 | * a sub-register. |
| 7539 | */ |
| 7540 | regs[value_regno].subreg_def = DEF_NOT_SUBREG; |
| 7541 | if (base_type(info.reg_type) == PTR_TO_BTF_ID) { |
| 7542 | regs[value_regno].btf = info.btf; |
| 7543 | regs[value_regno].btf_id = info.btf_id; |
| 7544 | regs[value_regno].ref_obj_id = info.ref_obj_id; |
| 7545 | } |
| 7546 | } |
| 7547 | regs[value_regno].type = info.reg_type; |
| 7548 | } |
| 7549 | |
| 7550 | } else if (reg->type == PTR_TO_STACK) { |
| 7551 | /* Basic bounds checks. */ |
| 7552 | err = check_stack_access_within_bounds(env, regno, off, size, t); |
| 7553 | if (err) |
| 7554 | return err; |
| 7555 | |
| 7556 | if (t == BPF_READ) |
| 7557 | err = check_stack_read(env, regno, off, size, |
| 7558 | value_regno); |
| 7559 | else |
| 7560 | err = check_stack_write(env, regno, off, size, |
| 7561 | value_regno, insn_idx); |
| 7562 | } else if (reg_is_pkt_pointer(reg)) { |
| 7563 | if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { |
| 7564 | verbose(env, "cannot write into packet\n"); |
| 7565 | return -EACCES; |
| 7566 | } |
| 7567 | if (t == BPF_WRITE && value_regno >= 0 && |
| 7568 | is_pointer_value(env, value_regno)) { |
| 7569 | verbose(env, "R%d leaks addr into packet\n", |
| 7570 | value_regno); |
| 7571 | return -EACCES; |
| 7572 | } |
| 7573 | err = check_packet_access(env, regno, off, size, false); |
| 7574 | if (!err && t == BPF_READ && value_regno >= 0) |
| 7575 | mark_reg_unknown(env, regs, value_regno); |
| 7576 | } else if (reg->type == PTR_TO_FLOW_KEYS) { |
| 7577 | if (t == BPF_WRITE && value_regno >= 0 && |
| 7578 | is_pointer_value(env, value_regno)) { |
| 7579 | verbose(env, "R%d leaks addr into flow keys\n", |
| 7580 | value_regno); |
| 7581 | return -EACCES; |
| 7582 | } |
| 7583 | |
| 7584 | err = check_flow_keys_access(env, off, size); |
| 7585 | if (!err && t == BPF_READ && value_regno >= 0) |
| 7586 | mark_reg_unknown(env, regs, value_regno); |
| 7587 | } else if (type_is_sk_pointer(reg->type)) { |
| 7588 | if (t == BPF_WRITE) { |
| 7589 | verbose(env, "R%d cannot write into %s\n", |
| 7590 | regno, reg_type_str(env, reg->type)); |
| 7591 | return -EACCES; |
| 7592 | } |
| 7593 | err = check_sock_access(env, insn_idx, regno, off, size, t); |
| 7594 | if (!err && value_regno >= 0) |
| 7595 | mark_reg_unknown(env, regs, value_regno); |
| 7596 | } else if (reg->type == PTR_TO_TP_BUFFER) { |
| 7597 | err = check_tp_buffer_access(env, reg, regno, off, size); |
| 7598 | if (!err && t == BPF_READ && value_regno >= 0) |
| 7599 | mark_reg_unknown(env, regs, value_regno); |
| 7600 | } else if (base_type(reg->type) == PTR_TO_BTF_ID && |
| 7601 | !type_may_be_null(reg->type)) { |
| 7602 | err = check_ptr_to_btf_access(env, regs, regno, off, size, t, |
| 7603 | value_regno); |
| 7604 | } else if (reg->type == CONST_PTR_TO_MAP) { |
| 7605 | err = check_ptr_to_map_access(env, regs, regno, off, size, t, |
| 7606 | value_regno); |
| 7607 | } else if (base_type(reg->type) == PTR_TO_BUF) { |
| 7608 | bool rdonly_mem = type_is_rdonly_mem(reg->type); |
| 7609 | u32 *max_access; |
| 7610 | |
| 7611 | if (rdonly_mem) { |
| 7612 | if (t == BPF_WRITE) { |
| 7613 | verbose(env, "R%d cannot write into %s\n", |
| 7614 | regno, reg_type_str(env, reg->type)); |
| 7615 | return -EACCES; |
| 7616 | } |
| 7617 | max_access = &env->prog->aux->max_rdonly_access; |
| 7618 | } else { |
| 7619 | max_access = &env->prog->aux->max_rdwr_access; |
| 7620 | } |
| 7621 | |
| 7622 | err = check_buffer_access(env, reg, regno, off, size, false, |
| 7623 | max_access); |
| 7624 | |
| 7625 | if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) |
| 7626 | mark_reg_unknown(env, regs, value_regno); |
| 7627 | } else if (reg->type == PTR_TO_ARENA) { |
| 7628 | if (t == BPF_READ && value_regno >= 0) |
| 7629 | mark_reg_unknown(env, regs, value_regno); |
| 7630 | } else { |
| 7631 | verbose(env, "R%d invalid mem access '%s'\n", regno, |
| 7632 | reg_type_str(env, reg->type)); |
| 7633 | return -EACCES; |
| 7634 | } |
| 7635 | |
| 7636 | if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && |
| 7637 | regs[value_regno].type == SCALAR_VALUE) { |
| 7638 | if (!is_ldsx) |
| 7639 | /* b/h/w load zero-extends, mark upper bits as known 0 */ |
| 7640 | coerce_reg_to_size(®s[value_regno], size); |
| 7641 | else |
| 7642 | coerce_reg_to_size_sx(®s[value_regno], size); |
| 7643 | } |
| 7644 | return err; |
| 7645 | } |
| 7646 | |
| 7647 | static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, |
| 7648 | bool allow_trust_mismatch); |
| 7649 | |
| 7650 | static int check_load_mem(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 7651 | bool strict_alignment_once, bool is_ldsx, |
| 7652 | bool allow_trust_mismatch, const char *ctx) |
| 7653 | { |
| 7654 | struct bpf_reg_state *regs = cur_regs(env); |
| 7655 | enum bpf_reg_type src_reg_type; |
| 7656 | int err; |
| 7657 | |
| 7658 | /* check src operand */ |
| 7659 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 7660 | if (err) |
| 7661 | return err; |
| 7662 | |
| 7663 | /* check dst operand */ |
| 7664 | err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); |
| 7665 | if (err) |
| 7666 | return err; |
| 7667 | |
| 7668 | src_reg_type = regs[insn->src_reg].type; |
| 7669 | |
| 7670 | /* Check if (src_reg + off) is readable. The state of dst_reg will be |
| 7671 | * updated by this call. |
| 7672 | */ |
| 7673 | err = check_mem_access(env, env->insn_idx, insn->src_reg, insn->off, |
| 7674 | BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, |
| 7675 | strict_alignment_once, is_ldsx); |
| 7676 | err = err ?: save_aux_ptr_type(env, src_reg_type, |
| 7677 | allow_trust_mismatch); |
| 7678 | err = err ?: reg_bounds_sanity_check(env, ®s[insn->dst_reg], ctx); |
| 7679 | |
| 7680 | return err; |
| 7681 | } |
| 7682 | |
| 7683 | static int check_store_reg(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 7684 | bool strict_alignment_once) |
| 7685 | { |
| 7686 | struct bpf_reg_state *regs = cur_regs(env); |
| 7687 | enum bpf_reg_type dst_reg_type; |
| 7688 | int err; |
| 7689 | |
| 7690 | /* check src1 operand */ |
| 7691 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 7692 | if (err) |
| 7693 | return err; |
| 7694 | |
| 7695 | /* check src2 operand */ |
| 7696 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 7697 | if (err) |
| 7698 | return err; |
| 7699 | |
| 7700 | dst_reg_type = regs[insn->dst_reg].type; |
| 7701 | |
| 7702 | /* Check if (dst_reg + off) is writeable. */ |
| 7703 | err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, |
| 7704 | BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, |
| 7705 | strict_alignment_once, false); |
| 7706 | err = err ?: save_aux_ptr_type(env, dst_reg_type, false); |
| 7707 | |
| 7708 | return err; |
| 7709 | } |
| 7710 | |
| 7711 | static int check_atomic_rmw(struct bpf_verifier_env *env, |
| 7712 | struct bpf_insn *insn) |
| 7713 | { |
| 7714 | int load_reg; |
| 7715 | int err; |
| 7716 | |
| 7717 | if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { |
| 7718 | verbose(env, "invalid atomic operand size\n"); |
| 7719 | return -EINVAL; |
| 7720 | } |
| 7721 | |
| 7722 | /* check src1 operand */ |
| 7723 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 7724 | if (err) |
| 7725 | return err; |
| 7726 | |
| 7727 | /* check src2 operand */ |
| 7728 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 7729 | if (err) |
| 7730 | return err; |
| 7731 | |
| 7732 | if (insn->imm == BPF_CMPXCHG) { |
| 7733 | /* Check comparison of R0 with memory location */ |
| 7734 | const u32 aux_reg = BPF_REG_0; |
| 7735 | |
| 7736 | err = check_reg_arg(env, aux_reg, SRC_OP); |
| 7737 | if (err) |
| 7738 | return err; |
| 7739 | |
| 7740 | if (is_pointer_value(env, aux_reg)) { |
| 7741 | verbose(env, "R%d leaks addr into mem\n", aux_reg); |
| 7742 | return -EACCES; |
| 7743 | } |
| 7744 | } |
| 7745 | |
| 7746 | if (is_pointer_value(env, insn->src_reg)) { |
| 7747 | verbose(env, "R%d leaks addr into mem\n", insn->src_reg); |
| 7748 | return -EACCES; |
| 7749 | } |
| 7750 | |
| 7751 | if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) { |
| 7752 | verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", |
| 7753 | insn->dst_reg, |
| 7754 | reg_type_str(env, reg_state(env, insn->dst_reg)->type)); |
| 7755 | return -EACCES; |
| 7756 | } |
| 7757 | |
| 7758 | if (insn->imm & BPF_FETCH) { |
| 7759 | if (insn->imm == BPF_CMPXCHG) |
| 7760 | load_reg = BPF_REG_0; |
| 7761 | else |
| 7762 | load_reg = insn->src_reg; |
| 7763 | |
| 7764 | /* check and record load of old value */ |
| 7765 | err = check_reg_arg(env, load_reg, DST_OP); |
| 7766 | if (err) |
| 7767 | return err; |
| 7768 | } else { |
| 7769 | /* This instruction accesses a memory location but doesn't |
| 7770 | * actually load it into a register. |
| 7771 | */ |
| 7772 | load_reg = -1; |
| 7773 | } |
| 7774 | |
| 7775 | /* Check whether we can read the memory, with second call for fetch |
| 7776 | * case to simulate the register fill. |
| 7777 | */ |
| 7778 | err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, |
| 7779 | BPF_SIZE(insn->code), BPF_READ, -1, true, false); |
| 7780 | if (!err && load_reg >= 0) |
| 7781 | err = check_mem_access(env, env->insn_idx, insn->dst_reg, |
| 7782 | insn->off, BPF_SIZE(insn->code), |
| 7783 | BPF_READ, load_reg, true, false); |
| 7784 | if (err) |
| 7785 | return err; |
| 7786 | |
| 7787 | if (is_arena_reg(env, insn->dst_reg)) { |
| 7788 | err = save_aux_ptr_type(env, PTR_TO_ARENA, false); |
| 7789 | if (err) |
| 7790 | return err; |
| 7791 | } |
| 7792 | /* Check whether we can write into the same memory. */ |
| 7793 | err = check_mem_access(env, env->insn_idx, insn->dst_reg, insn->off, |
| 7794 | BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); |
| 7795 | if (err) |
| 7796 | return err; |
| 7797 | return 0; |
| 7798 | } |
| 7799 | |
| 7800 | static int check_atomic_load(struct bpf_verifier_env *env, |
| 7801 | struct bpf_insn *insn) |
| 7802 | { |
| 7803 | int err; |
| 7804 | |
| 7805 | err = check_load_mem(env, insn, true, false, false, "atomic_load"); |
| 7806 | if (err) |
| 7807 | return err; |
| 7808 | |
| 7809 | if (!atomic_ptr_type_ok(env, insn->src_reg, insn)) { |
| 7810 | verbose(env, "BPF_ATOMIC loads from R%d %s is not allowed\n", |
| 7811 | insn->src_reg, |
| 7812 | reg_type_str(env, reg_state(env, insn->src_reg)->type)); |
| 7813 | return -EACCES; |
| 7814 | } |
| 7815 | |
| 7816 | return 0; |
| 7817 | } |
| 7818 | |
| 7819 | static int check_atomic_store(struct bpf_verifier_env *env, |
| 7820 | struct bpf_insn *insn) |
| 7821 | { |
| 7822 | int err; |
| 7823 | |
| 7824 | err = check_store_reg(env, insn, true); |
| 7825 | if (err) |
| 7826 | return err; |
| 7827 | |
| 7828 | if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) { |
| 7829 | verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", |
| 7830 | insn->dst_reg, |
| 7831 | reg_type_str(env, reg_state(env, insn->dst_reg)->type)); |
| 7832 | return -EACCES; |
| 7833 | } |
| 7834 | |
| 7835 | return 0; |
| 7836 | } |
| 7837 | |
| 7838 | static int check_atomic(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 7839 | { |
| 7840 | switch (insn->imm) { |
| 7841 | case BPF_ADD: |
| 7842 | case BPF_ADD | BPF_FETCH: |
| 7843 | case BPF_AND: |
| 7844 | case BPF_AND | BPF_FETCH: |
| 7845 | case BPF_OR: |
| 7846 | case BPF_OR | BPF_FETCH: |
| 7847 | case BPF_XOR: |
| 7848 | case BPF_XOR | BPF_FETCH: |
| 7849 | case BPF_XCHG: |
| 7850 | case BPF_CMPXCHG: |
| 7851 | return check_atomic_rmw(env, insn); |
| 7852 | case BPF_LOAD_ACQ: |
| 7853 | if (BPF_SIZE(insn->code) == BPF_DW && BITS_PER_LONG != 64) { |
| 7854 | verbose(env, |
| 7855 | "64-bit load-acquires are only supported on 64-bit arches\n"); |
| 7856 | return -EOPNOTSUPP; |
| 7857 | } |
| 7858 | return check_atomic_load(env, insn); |
| 7859 | case BPF_STORE_REL: |
| 7860 | if (BPF_SIZE(insn->code) == BPF_DW && BITS_PER_LONG != 64) { |
| 7861 | verbose(env, |
| 7862 | "64-bit store-releases are only supported on 64-bit arches\n"); |
| 7863 | return -EOPNOTSUPP; |
| 7864 | } |
| 7865 | return check_atomic_store(env, insn); |
| 7866 | default: |
| 7867 | verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", |
| 7868 | insn->imm); |
| 7869 | return -EINVAL; |
| 7870 | } |
| 7871 | } |
| 7872 | |
| 7873 | /* When register 'regno' is used to read the stack (either directly or through |
| 7874 | * a helper function) make sure that it's within stack boundary and, depending |
| 7875 | * on the access type and privileges, that all elements of the stack are |
| 7876 | * initialized. |
| 7877 | * |
| 7878 | * 'off' includes 'regno->off', but not its dynamic part (if any). |
| 7879 | * |
| 7880 | * All registers that have been spilled on the stack in the slots within the |
| 7881 | * read offsets are marked as read. |
| 7882 | */ |
| 7883 | static int check_stack_range_initialized( |
| 7884 | struct bpf_verifier_env *env, int regno, int off, |
| 7885 | int access_size, bool zero_size_allowed, |
| 7886 | enum bpf_access_type type, struct bpf_call_arg_meta *meta) |
| 7887 | { |
| 7888 | struct bpf_reg_state *reg = reg_state(env, regno); |
| 7889 | struct bpf_func_state *state = func(env, reg); |
| 7890 | int err, min_off, max_off, i, j, slot, spi; |
| 7891 | /* Some accesses can write anything into the stack, others are |
| 7892 | * read-only. |
| 7893 | */ |
| 7894 | bool clobber = false; |
| 7895 | |
| 7896 | if (access_size == 0 && !zero_size_allowed) { |
| 7897 | verbose(env, "invalid zero-sized read\n"); |
| 7898 | return -EACCES; |
| 7899 | } |
| 7900 | |
| 7901 | if (type == BPF_WRITE) |
| 7902 | clobber = true; |
| 7903 | |
| 7904 | err = check_stack_access_within_bounds(env, regno, off, access_size, type); |
| 7905 | if (err) |
| 7906 | return err; |
| 7907 | |
| 7908 | |
| 7909 | if (tnum_is_const(reg->var_off)) { |
| 7910 | min_off = max_off = reg->var_off.value + off; |
| 7911 | } else { |
| 7912 | /* Variable offset is prohibited for unprivileged mode for |
| 7913 | * simplicity since it requires corresponding support in |
| 7914 | * Spectre masking for stack ALU. |
| 7915 | * See also retrieve_ptr_limit(). |
| 7916 | */ |
| 7917 | if (!env->bypass_spec_v1) { |
| 7918 | char tn_buf[48]; |
| 7919 | |
| 7920 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 7921 | verbose(env, "R%d variable offset stack access prohibited for !root, var_off=%s\n", |
| 7922 | regno, tn_buf); |
| 7923 | return -EACCES; |
| 7924 | } |
| 7925 | /* Only initialized buffer on stack is allowed to be accessed |
| 7926 | * with variable offset. With uninitialized buffer it's hard to |
| 7927 | * guarantee that whole memory is marked as initialized on |
| 7928 | * helper return since specific bounds are unknown what may |
| 7929 | * cause uninitialized stack leaking. |
| 7930 | */ |
| 7931 | if (meta && meta->raw_mode) |
| 7932 | meta = NULL; |
| 7933 | |
| 7934 | min_off = reg->smin_value + off; |
| 7935 | max_off = reg->smax_value + off; |
| 7936 | } |
| 7937 | |
| 7938 | if (meta && meta->raw_mode) { |
| 7939 | /* Ensure we won't be overwriting dynptrs when simulating byte |
| 7940 | * by byte access in check_helper_call using meta.access_size. |
| 7941 | * This would be a problem if we have a helper in the future |
| 7942 | * which takes: |
| 7943 | * |
| 7944 | * helper(uninit_mem, len, dynptr) |
| 7945 | * |
| 7946 | * Now, uninint_mem may overlap with dynptr pointer. Hence, it |
| 7947 | * may end up writing to dynptr itself when touching memory from |
| 7948 | * arg 1. This can be relaxed on a case by case basis for known |
| 7949 | * safe cases, but reject due to the possibilitiy of aliasing by |
| 7950 | * default. |
| 7951 | */ |
| 7952 | for (i = min_off; i < max_off + access_size; i++) { |
| 7953 | int stack_off = -i - 1; |
| 7954 | |
| 7955 | spi = __get_spi(i); |
| 7956 | /* raw_mode may write past allocated_stack */ |
| 7957 | if (state->allocated_stack <= stack_off) |
| 7958 | continue; |
| 7959 | if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { |
| 7960 | verbose(env, "potential write to dynptr at off=%d disallowed\n", i); |
| 7961 | return -EACCES; |
| 7962 | } |
| 7963 | } |
| 7964 | meta->access_size = access_size; |
| 7965 | meta->regno = regno; |
| 7966 | return 0; |
| 7967 | } |
| 7968 | |
| 7969 | for (i = min_off; i < max_off + access_size; i++) { |
| 7970 | u8 *stype; |
| 7971 | |
| 7972 | slot = -i - 1; |
| 7973 | spi = slot / BPF_REG_SIZE; |
| 7974 | if (state->allocated_stack <= slot) { |
| 7975 | verbose(env, "allocated_stack too small\n"); |
| 7976 | return -EFAULT; |
| 7977 | } |
| 7978 | |
| 7979 | stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; |
| 7980 | if (*stype == STACK_MISC) |
| 7981 | goto mark; |
| 7982 | if ((*stype == STACK_ZERO) || |
| 7983 | (*stype == STACK_INVALID && env->allow_uninit_stack)) { |
| 7984 | if (clobber) { |
| 7985 | /* helper can write anything into the stack */ |
| 7986 | *stype = STACK_MISC; |
| 7987 | } |
| 7988 | goto mark; |
| 7989 | } |
| 7990 | |
| 7991 | if (is_spilled_reg(&state->stack[spi]) && |
| 7992 | (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || |
| 7993 | env->allow_ptr_leaks)) { |
| 7994 | if (clobber) { |
| 7995 | __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); |
| 7996 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 7997 | scrub_spilled_slot(&state->stack[spi].slot_type[j]); |
| 7998 | } |
| 7999 | goto mark; |
| 8000 | } |
| 8001 | |
| 8002 | if (tnum_is_const(reg->var_off)) { |
| 8003 | verbose(env, "invalid read from stack R%d off %d+%d size %d\n", |
| 8004 | regno, min_off, i - min_off, access_size); |
| 8005 | } else { |
| 8006 | char tn_buf[48]; |
| 8007 | |
| 8008 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 8009 | verbose(env, "invalid read from stack R%d var_off %s+%d size %d\n", |
| 8010 | regno, tn_buf, i - min_off, access_size); |
| 8011 | } |
| 8012 | return -EACCES; |
| 8013 | mark: |
| 8014 | /* reading any byte out of 8-byte 'spill_slot' will cause |
| 8015 | * the whole slot to be marked as 'read' |
| 8016 | */ |
| 8017 | mark_reg_read(env, &state->stack[spi].spilled_ptr, |
| 8018 | state->stack[spi].spilled_ptr.parent, |
| 8019 | REG_LIVE_READ64); |
| 8020 | /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not |
| 8021 | * be sure that whether stack slot is written to or not. Hence, |
| 8022 | * we must still conservatively propagate reads upwards even if |
| 8023 | * helper may write to the entire memory range. |
| 8024 | */ |
| 8025 | } |
| 8026 | return 0; |
| 8027 | } |
| 8028 | |
| 8029 | static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, |
| 8030 | int access_size, enum bpf_access_type access_type, |
| 8031 | bool zero_size_allowed, |
| 8032 | struct bpf_call_arg_meta *meta) |
| 8033 | { |
| 8034 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8035 | u32 *max_access; |
| 8036 | |
| 8037 | switch (base_type(reg->type)) { |
| 8038 | case PTR_TO_PACKET: |
| 8039 | case PTR_TO_PACKET_META: |
| 8040 | return check_packet_access(env, regno, reg->off, access_size, |
| 8041 | zero_size_allowed); |
| 8042 | case PTR_TO_MAP_KEY: |
| 8043 | if (access_type == BPF_WRITE) { |
| 8044 | verbose(env, "R%d cannot write into %s\n", regno, |
| 8045 | reg_type_str(env, reg->type)); |
| 8046 | return -EACCES; |
| 8047 | } |
| 8048 | return check_mem_region_access(env, regno, reg->off, access_size, |
| 8049 | reg->map_ptr->key_size, false); |
| 8050 | case PTR_TO_MAP_VALUE: |
| 8051 | if (check_map_access_type(env, regno, reg->off, access_size, access_type)) |
| 8052 | return -EACCES; |
| 8053 | return check_map_access(env, regno, reg->off, access_size, |
| 8054 | zero_size_allowed, ACCESS_HELPER); |
| 8055 | case PTR_TO_MEM: |
| 8056 | if (type_is_rdonly_mem(reg->type)) { |
| 8057 | if (access_type == BPF_WRITE) { |
| 8058 | verbose(env, "R%d cannot write into %s\n", regno, |
| 8059 | reg_type_str(env, reg->type)); |
| 8060 | return -EACCES; |
| 8061 | } |
| 8062 | } |
| 8063 | return check_mem_region_access(env, regno, reg->off, |
| 8064 | access_size, reg->mem_size, |
| 8065 | zero_size_allowed); |
| 8066 | case PTR_TO_BUF: |
| 8067 | if (type_is_rdonly_mem(reg->type)) { |
| 8068 | if (access_type == BPF_WRITE) { |
| 8069 | verbose(env, "R%d cannot write into %s\n", regno, |
| 8070 | reg_type_str(env, reg->type)); |
| 8071 | return -EACCES; |
| 8072 | } |
| 8073 | |
| 8074 | max_access = &env->prog->aux->max_rdonly_access; |
| 8075 | } else { |
| 8076 | max_access = &env->prog->aux->max_rdwr_access; |
| 8077 | } |
| 8078 | return check_buffer_access(env, reg, regno, reg->off, |
| 8079 | access_size, zero_size_allowed, |
| 8080 | max_access); |
| 8081 | case PTR_TO_STACK: |
| 8082 | return check_stack_range_initialized( |
| 8083 | env, |
| 8084 | regno, reg->off, access_size, |
| 8085 | zero_size_allowed, access_type, meta); |
| 8086 | case PTR_TO_BTF_ID: |
| 8087 | return check_ptr_to_btf_access(env, regs, regno, reg->off, |
| 8088 | access_size, BPF_READ, -1); |
| 8089 | case PTR_TO_CTX: |
| 8090 | /* in case the function doesn't know how to access the context, |
| 8091 | * (because we are in a program of type SYSCALL for example), we |
| 8092 | * can not statically check its size. |
| 8093 | * Dynamically check it now. |
| 8094 | */ |
| 8095 | if (!env->ops->convert_ctx_access) { |
| 8096 | int offset = access_size - 1; |
| 8097 | |
| 8098 | /* Allow zero-byte read from PTR_TO_CTX */ |
| 8099 | if (access_size == 0) |
| 8100 | return zero_size_allowed ? 0 : -EACCES; |
| 8101 | |
| 8102 | return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, |
| 8103 | access_type, -1, false, false); |
| 8104 | } |
| 8105 | |
| 8106 | fallthrough; |
| 8107 | default: /* scalar_value or invalid ptr */ |
| 8108 | /* Allow zero-byte read from NULL, regardless of pointer type */ |
| 8109 | if (zero_size_allowed && access_size == 0 && |
| 8110 | register_is_null(reg)) |
| 8111 | return 0; |
| 8112 | |
| 8113 | verbose(env, "R%d type=%s ", regno, |
| 8114 | reg_type_str(env, reg->type)); |
| 8115 | verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); |
| 8116 | return -EACCES; |
| 8117 | } |
| 8118 | } |
| 8119 | |
| 8120 | /* verify arguments to helpers or kfuncs consisting of a pointer and an access |
| 8121 | * size. |
| 8122 | * |
| 8123 | * @regno is the register containing the access size. regno-1 is the register |
| 8124 | * containing the pointer. |
| 8125 | */ |
| 8126 | static int check_mem_size_reg(struct bpf_verifier_env *env, |
| 8127 | struct bpf_reg_state *reg, u32 regno, |
| 8128 | enum bpf_access_type access_type, |
| 8129 | bool zero_size_allowed, |
| 8130 | struct bpf_call_arg_meta *meta) |
| 8131 | { |
| 8132 | int err; |
| 8133 | |
| 8134 | /* This is used to refine r0 return value bounds for helpers |
| 8135 | * that enforce this value as an upper bound on return values. |
| 8136 | * See do_refine_retval_range() for helpers that can refine |
| 8137 | * the return value. C type of helper is u32 so we pull register |
| 8138 | * bound from umax_value however, if negative verifier errors |
| 8139 | * out. Only upper bounds can be learned because retval is an |
| 8140 | * int type and negative retvals are allowed. |
| 8141 | */ |
| 8142 | meta->msize_max_value = reg->umax_value; |
| 8143 | |
| 8144 | /* The register is SCALAR_VALUE; the access check happens using |
| 8145 | * its boundaries. For unprivileged variable accesses, disable |
| 8146 | * raw mode so that the program is required to initialize all |
| 8147 | * the memory that the helper could just partially fill up. |
| 8148 | */ |
| 8149 | if (!tnum_is_const(reg->var_off)) |
| 8150 | meta = NULL; |
| 8151 | |
| 8152 | if (reg->smin_value < 0) { |
| 8153 | verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", |
| 8154 | regno); |
| 8155 | return -EACCES; |
| 8156 | } |
| 8157 | |
| 8158 | if (reg->umin_value == 0 && !zero_size_allowed) { |
| 8159 | verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n", |
| 8160 | regno, reg->umin_value, reg->umax_value); |
| 8161 | return -EACCES; |
| 8162 | } |
| 8163 | |
| 8164 | if (reg->umax_value >= BPF_MAX_VAR_SIZ) { |
| 8165 | verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", |
| 8166 | regno); |
| 8167 | return -EACCES; |
| 8168 | } |
| 8169 | err = check_helper_mem_access(env, regno - 1, reg->umax_value, |
| 8170 | access_type, zero_size_allowed, meta); |
| 8171 | if (!err) |
| 8172 | err = mark_chain_precision(env, regno); |
| 8173 | return err; |
| 8174 | } |
| 8175 | |
| 8176 | static int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 8177 | u32 regno, u32 mem_size) |
| 8178 | { |
| 8179 | bool may_be_null = type_may_be_null(reg->type); |
| 8180 | struct bpf_reg_state saved_reg; |
| 8181 | int err; |
| 8182 | |
| 8183 | if (register_is_null(reg)) |
| 8184 | return 0; |
| 8185 | |
| 8186 | /* Assuming that the register contains a value check if the memory |
| 8187 | * access is safe. Temporarily save and restore the register's state as |
| 8188 | * the conversion shouldn't be visible to a caller. |
| 8189 | */ |
| 8190 | if (may_be_null) { |
| 8191 | saved_reg = *reg; |
| 8192 | mark_ptr_not_null_reg(reg); |
| 8193 | } |
| 8194 | |
| 8195 | err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL); |
| 8196 | err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL); |
| 8197 | |
| 8198 | if (may_be_null) |
| 8199 | *reg = saved_reg; |
| 8200 | |
| 8201 | return err; |
| 8202 | } |
| 8203 | |
| 8204 | static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
| 8205 | u32 regno) |
| 8206 | { |
| 8207 | struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; |
| 8208 | bool may_be_null = type_may_be_null(mem_reg->type); |
| 8209 | struct bpf_reg_state saved_reg; |
| 8210 | struct bpf_call_arg_meta meta; |
| 8211 | int err; |
| 8212 | |
| 8213 | WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); |
| 8214 | |
| 8215 | memset(&meta, 0, sizeof(meta)); |
| 8216 | |
| 8217 | if (may_be_null) { |
| 8218 | saved_reg = *mem_reg; |
| 8219 | mark_ptr_not_null_reg(mem_reg); |
| 8220 | } |
| 8221 | |
| 8222 | err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta); |
| 8223 | err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta); |
| 8224 | |
| 8225 | if (may_be_null) |
| 8226 | *mem_reg = saved_reg; |
| 8227 | |
| 8228 | return err; |
| 8229 | } |
| 8230 | |
| 8231 | enum { |
| 8232 | PROCESS_SPIN_LOCK = (1 << 0), |
| 8233 | PROCESS_RES_LOCK = (1 << 1), |
| 8234 | PROCESS_LOCK_IRQ = (1 << 2), |
| 8235 | }; |
| 8236 | |
| 8237 | /* Implementation details: |
| 8238 | * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL. |
| 8239 | * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL. |
| 8240 | * Two bpf_map_lookups (even with the same key) will have different reg->id. |
| 8241 | * Two separate bpf_obj_new will also have different reg->id. |
| 8242 | * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier |
| 8243 | * clears reg->id after value_or_null->value transition, since the verifier only |
| 8244 | * cares about the range of access to valid map value pointer and doesn't care |
| 8245 | * about actual address of the map element. |
| 8246 | * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps |
| 8247 | * reg->id > 0 after value_or_null->value transition. By doing so |
| 8248 | * two bpf_map_lookups will be considered two different pointers that |
| 8249 | * point to different bpf_spin_locks. Likewise for pointers to allocated objects |
| 8250 | * returned from bpf_obj_new. |
| 8251 | * The verifier allows taking only one bpf_spin_lock at a time to avoid |
| 8252 | * dead-locks. |
| 8253 | * Since only one bpf_spin_lock is allowed the checks are simpler than |
| 8254 | * reg_is_refcounted() logic. The verifier needs to remember only |
| 8255 | * one spin_lock instead of array of acquired_refs. |
| 8256 | * env->cur_state->active_locks remembers which map value element or allocated |
| 8257 | * object got locked and clears it after bpf_spin_unlock. |
| 8258 | */ |
| 8259 | static int process_spin_lock(struct bpf_verifier_env *env, int regno, int flags) |
| 8260 | { |
| 8261 | bool is_lock = flags & PROCESS_SPIN_LOCK, is_res_lock = flags & PROCESS_RES_LOCK; |
| 8262 | const char *lock_str = is_res_lock ? "bpf_res_spin" : "bpf_spin"; |
| 8263 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8264 | struct bpf_verifier_state *cur = env->cur_state; |
| 8265 | bool is_const = tnum_is_const(reg->var_off); |
| 8266 | bool is_irq = flags & PROCESS_LOCK_IRQ; |
| 8267 | u64 val = reg->var_off.value; |
| 8268 | struct bpf_map *map = NULL; |
| 8269 | struct btf *btf = NULL; |
| 8270 | struct btf_record *rec; |
| 8271 | u32 spin_lock_off; |
| 8272 | int err; |
| 8273 | |
| 8274 | if (!is_const) { |
| 8275 | verbose(env, |
| 8276 | "R%d doesn't have constant offset. %s_lock has to be at the constant offset\n", |
| 8277 | regno, lock_str); |
| 8278 | return -EINVAL; |
| 8279 | } |
| 8280 | if (reg->type == PTR_TO_MAP_VALUE) { |
| 8281 | map = reg->map_ptr; |
| 8282 | if (!map->btf) { |
| 8283 | verbose(env, |
| 8284 | "map '%s' has to have BTF in order to use %s_lock\n", |
| 8285 | map->name, lock_str); |
| 8286 | return -EINVAL; |
| 8287 | } |
| 8288 | } else { |
| 8289 | btf = reg->btf; |
| 8290 | } |
| 8291 | |
| 8292 | rec = reg_btf_record(reg); |
| 8293 | if (!btf_record_has_field(rec, is_res_lock ? BPF_RES_SPIN_LOCK : BPF_SPIN_LOCK)) { |
| 8294 | verbose(env, "%s '%s' has no valid %s_lock\n", map ? "map" : "local", |
| 8295 | map ? map->name : "kptr", lock_str); |
| 8296 | return -EINVAL; |
| 8297 | } |
| 8298 | spin_lock_off = is_res_lock ? rec->res_spin_lock_off : rec->spin_lock_off; |
| 8299 | if (spin_lock_off != val + reg->off) { |
| 8300 | verbose(env, "off %lld doesn't point to 'struct %s_lock' that is at %d\n", |
| 8301 | val + reg->off, lock_str, spin_lock_off); |
| 8302 | return -EINVAL; |
| 8303 | } |
| 8304 | if (is_lock) { |
| 8305 | void *ptr; |
| 8306 | int type; |
| 8307 | |
| 8308 | if (map) |
| 8309 | ptr = map; |
| 8310 | else |
| 8311 | ptr = btf; |
| 8312 | |
| 8313 | if (!is_res_lock && cur->active_locks) { |
| 8314 | if (find_lock_state(env->cur_state, REF_TYPE_LOCK, 0, NULL)) { |
| 8315 | verbose(env, |
| 8316 | "Locking two bpf_spin_locks are not allowed\n"); |
| 8317 | return -EINVAL; |
| 8318 | } |
| 8319 | } else if (is_res_lock && cur->active_locks) { |
| 8320 | if (find_lock_state(env->cur_state, REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ, reg->id, ptr)) { |
| 8321 | verbose(env, "Acquiring the same lock again, AA deadlock detected\n"); |
| 8322 | return -EINVAL; |
| 8323 | } |
| 8324 | } |
| 8325 | |
| 8326 | if (is_res_lock && is_irq) |
| 8327 | type = REF_TYPE_RES_LOCK_IRQ; |
| 8328 | else if (is_res_lock) |
| 8329 | type = REF_TYPE_RES_LOCK; |
| 8330 | else |
| 8331 | type = REF_TYPE_LOCK; |
| 8332 | err = acquire_lock_state(env, env->insn_idx, type, reg->id, ptr); |
| 8333 | if (err < 0) { |
| 8334 | verbose(env, "Failed to acquire lock state\n"); |
| 8335 | return err; |
| 8336 | } |
| 8337 | } else { |
| 8338 | void *ptr; |
| 8339 | int type; |
| 8340 | |
| 8341 | if (map) |
| 8342 | ptr = map; |
| 8343 | else |
| 8344 | ptr = btf; |
| 8345 | |
| 8346 | if (!cur->active_locks) { |
| 8347 | verbose(env, "%s_unlock without taking a lock\n", lock_str); |
| 8348 | return -EINVAL; |
| 8349 | } |
| 8350 | |
| 8351 | if (is_res_lock && is_irq) |
| 8352 | type = REF_TYPE_RES_LOCK_IRQ; |
| 8353 | else if (is_res_lock) |
| 8354 | type = REF_TYPE_RES_LOCK; |
| 8355 | else |
| 8356 | type = REF_TYPE_LOCK; |
| 8357 | if (!find_lock_state(cur, type, reg->id, ptr)) { |
| 8358 | verbose(env, "%s_unlock of different lock\n", lock_str); |
| 8359 | return -EINVAL; |
| 8360 | } |
| 8361 | if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) { |
| 8362 | verbose(env, "%s_unlock cannot be out of order\n", lock_str); |
| 8363 | return -EINVAL; |
| 8364 | } |
| 8365 | if (release_lock_state(cur, type, reg->id, ptr)) { |
| 8366 | verbose(env, "%s_unlock of different lock\n", lock_str); |
| 8367 | return -EINVAL; |
| 8368 | } |
| 8369 | |
| 8370 | invalidate_non_owning_refs(env); |
| 8371 | } |
| 8372 | return 0; |
| 8373 | } |
| 8374 | |
| 8375 | static int process_timer_func(struct bpf_verifier_env *env, int regno, |
| 8376 | struct bpf_call_arg_meta *meta) |
| 8377 | { |
| 8378 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8379 | bool is_const = tnum_is_const(reg->var_off); |
| 8380 | struct bpf_map *map = reg->map_ptr; |
| 8381 | u64 val = reg->var_off.value; |
| 8382 | |
| 8383 | if (!is_const) { |
| 8384 | verbose(env, |
| 8385 | "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", |
| 8386 | regno); |
| 8387 | return -EINVAL; |
| 8388 | } |
| 8389 | if (!map->btf) { |
| 8390 | verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", |
| 8391 | map->name); |
| 8392 | return -EINVAL; |
| 8393 | } |
| 8394 | if (!btf_record_has_field(map->record, BPF_TIMER)) { |
| 8395 | verbose(env, "map '%s' has no valid bpf_timer\n", map->name); |
| 8396 | return -EINVAL; |
| 8397 | } |
| 8398 | if (map->record->timer_off != val + reg->off) { |
| 8399 | verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", |
| 8400 | val + reg->off, map->record->timer_off); |
| 8401 | return -EINVAL; |
| 8402 | } |
| 8403 | if (meta->map_ptr) { |
| 8404 | verifier_bug(env, "Two map pointers in a timer helper"); |
| 8405 | return -EFAULT; |
| 8406 | } |
| 8407 | meta->map_uid = reg->map_uid; |
| 8408 | meta->map_ptr = map; |
| 8409 | return 0; |
| 8410 | } |
| 8411 | |
| 8412 | static int process_wq_func(struct bpf_verifier_env *env, int regno, |
| 8413 | struct bpf_kfunc_call_arg_meta *meta) |
| 8414 | { |
| 8415 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8416 | struct bpf_map *map = reg->map_ptr; |
| 8417 | u64 val = reg->var_off.value; |
| 8418 | |
| 8419 | if (map->record->wq_off != val + reg->off) { |
| 8420 | verbose(env, "off %lld doesn't point to 'struct bpf_wq' that is at %d\n", |
| 8421 | val + reg->off, map->record->wq_off); |
| 8422 | return -EINVAL; |
| 8423 | } |
| 8424 | meta->map.uid = reg->map_uid; |
| 8425 | meta->map.ptr = map; |
| 8426 | return 0; |
| 8427 | } |
| 8428 | |
| 8429 | static int process_kptr_func(struct bpf_verifier_env *env, int regno, |
| 8430 | struct bpf_call_arg_meta *meta) |
| 8431 | { |
| 8432 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8433 | struct btf_field *kptr_field; |
| 8434 | struct bpf_map *map_ptr; |
| 8435 | struct btf_record *rec; |
| 8436 | u32 kptr_off; |
| 8437 | |
| 8438 | if (type_is_ptr_alloc_obj(reg->type)) { |
| 8439 | rec = reg_btf_record(reg); |
| 8440 | } else { /* PTR_TO_MAP_VALUE */ |
| 8441 | map_ptr = reg->map_ptr; |
| 8442 | if (!map_ptr->btf) { |
| 8443 | verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", |
| 8444 | map_ptr->name); |
| 8445 | return -EINVAL; |
| 8446 | } |
| 8447 | rec = map_ptr->record; |
| 8448 | meta->map_ptr = map_ptr; |
| 8449 | } |
| 8450 | |
| 8451 | if (!tnum_is_const(reg->var_off)) { |
| 8452 | verbose(env, |
| 8453 | "R%d doesn't have constant offset. kptr has to be at the constant offset\n", |
| 8454 | regno); |
| 8455 | return -EINVAL; |
| 8456 | } |
| 8457 | |
| 8458 | if (!btf_record_has_field(rec, BPF_KPTR)) { |
| 8459 | verbose(env, "R%d has no valid kptr\n", regno); |
| 8460 | return -EINVAL; |
| 8461 | } |
| 8462 | |
| 8463 | kptr_off = reg->off + reg->var_off.value; |
| 8464 | kptr_field = btf_record_find(rec, kptr_off, BPF_KPTR); |
| 8465 | if (!kptr_field) { |
| 8466 | verbose(env, "off=%d doesn't point to kptr\n", kptr_off); |
| 8467 | return -EACCES; |
| 8468 | } |
| 8469 | if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { |
| 8470 | verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); |
| 8471 | return -EACCES; |
| 8472 | } |
| 8473 | meta->kptr_field = kptr_field; |
| 8474 | return 0; |
| 8475 | } |
| 8476 | |
| 8477 | /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK |
| 8478 | * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR. |
| 8479 | * |
| 8480 | * In both cases we deal with the first 8 bytes, but need to mark the next 8 |
| 8481 | * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of |
| 8482 | * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object. |
| 8483 | * |
| 8484 | * Mutability of bpf_dynptr is at two levels, one is at the level of struct |
| 8485 | * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct |
| 8486 | * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can |
| 8487 | * mutate the view of the dynptr and also possibly destroy it. In the latter |
| 8488 | * case, it cannot mutate the bpf_dynptr itself but it can still mutate the |
| 8489 | * memory that dynptr points to. |
| 8490 | * |
| 8491 | * The verifier will keep track both levels of mutation (bpf_dynptr's in |
| 8492 | * reg->type and the memory's in reg->dynptr.type), but there is no support for |
| 8493 | * readonly dynptr view yet, hence only the first case is tracked and checked. |
| 8494 | * |
| 8495 | * This is consistent with how C applies the const modifier to a struct object, |
| 8496 | * where the pointer itself inside bpf_dynptr becomes const but not what it |
| 8497 | * points to. |
| 8498 | * |
| 8499 | * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument |
| 8500 | * type, and declare it as 'const struct bpf_dynptr *' in their prototype. |
| 8501 | */ |
| 8502 | static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx, |
| 8503 | enum bpf_arg_type arg_type, int clone_ref_obj_id) |
| 8504 | { |
| 8505 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8506 | int err; |
| 8507 | |
| 8508 | if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { |
| 8509 | verbose(env, |
| 8510 | "arg#%d expected pointer to stack or const struct bpf_dynptr\n", |
| 8511 | regno - 1); |
| 8512 | return -EINVAL; |
| 8513 | } |
| 8514 | |
| 8515 | /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an |
| 8516 | * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): |
| 8517 | */ |
| 8518 | if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) { |
| 8519 | verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n"); |
| 8520 | return -EFAULT; |
| 8521 | } |
| 8522 | |
| 8523 | /* MEM_UNINIT - Points to memory that is an appropriate candidate for |
| 8524 | * constructing a mutable bpf_dynptr object. |
| 8525 | * |
| 8526 | * Currently, this is only possible with PTR_TO_STACK |
| 8527 | * pointing to a region of at least 16 bytes which doesn't |
| 8528 | * contain an existing bpf_dynptr. |
| 8529 | * |
| 8530 | * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be |
| 8531 | * mutated or destroyed. However, the memory it points to |
| 8532 | * may be mutated. |
| 8533 | * |
| 8534 | * None - Points to a initialized dynptr that can be mutated and |
| 8535 | * destroyed, including mutation of the memory it points |
| 8536 | * to. |
| 8537 | */ |
| 8538 | if (arg_type & MEM_UNINIT) { |
| 8539 | int i; |
| 8540 | |
| 8541 | if (!is_dynptr_reg_valid_uninit(env, reg)) { |
| 8542 | verbose(env, "Dynptr has to be an uninitialized dynptr\n"); |
| 8543 | return -EINVAL; |
| 8544 | } |
| 8545 | |
| 8546 | /* we write BPF_DW bits (8 bytes) at a time */ |
| 8547 | for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { |
| 8548 | err = check_mem_access(env, insn_idx, regno, |
| 8549 | i, BPF_DW, BPF_WRITE, -1, false, false); |
| 8550 | if (err) |
| 8551 | return err; |
| 8552 | } |
| 8553 | |
| 8554 | err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id); |
| 8555 | } else /* MEM_RDONLY and None case from above */ { |
| 8556 | /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ |
| 8557 | if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { |
| 8558 | verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); |
| 8559 | return -EINVAL; |
| 8560 | } |
| 8561 | |
| 8562 | if (!is_dynptr_reg_valid_init(env, reg)) { |
| 8563 | verbose(env, |
| 8564 | "Expected an initialized dynptr as arg #%d\n", |
| 8565 | regno - 1); |
| 8566 | return -EINVAL; |
| 8567 | } |
| 8568 | |
| 8569 | /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */ |
| 8570 | if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { |
| 8571 | verbose(env, |
| 8572 | "Expected a dynptr of type %s as arg #%d\n", |
| 8573 | dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1); |
| 8574 | return -EINVAL; |
| 8575 | } |
| 8576 | |
| 8577 | err = mark_dynptr_read(env, reg); |
| 8578 | } |
| 8579 | return err; |
| 8580 | } |
| 8581 | |
| 8582 | static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi) |
| 8583 | { |
| 8584 | struct bpf_func_state *state = func(env, reg); |
| 8585 | |
| 8586 | return state->stack[spi].spilled_ptr.ref_obj_id; |
| 8587 | } |
| 8588 | |
| 8589 | static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta) |
| 8590 | { |
| 8591 | return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); |
| 8592 | } |
| 8593 | |
| 8594 | static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta) |
| 8595 | { |
| 8596 | return meta->kfunc_flags & KF_ITER_NEW; |
| 8597 | } |
| 8598 | |
| 8599 | static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta) |
| 8600 | { |
| 8601 | return meta->kfunc_flags & KF_ITER_NEXT; |
| 8602 | } |
| 8603 | |
| 8604 | static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta) |
| 8605 | { |
| 8606 | return meta->kfunc_flags & KF_ITER_DESTROY; |
| 8607 | } |
| 8608 | |
| 8609 | static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg_idx, |
| 8610 | const struct btf_param *arg) |
| 8611 | { |
| 8612 | /* btf_check_iter_kfuncs() guarantees that first argument of any iter |
| 8613 | * kfunc is iter state pointer |
| 8614 | */ |
| 8615 | if (is_iter_kfunc(meta)) |
| 8616 | return arg_idx == 0; |
| 8617 | |
| 8618 | /* iter passed as an argument to a generic kfunc */ |
| 8619 | return btf_param_match_suffix(meta->btf, arg, "__iter"); |
| 8620 | } |
| 8621 | |
| 8622 | static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, |
| 8623 | struct bpf_kfunc_call_arg_meta *meta) |
| 8624 | { |
| 8625 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 8626 | const struct btf_type *t; |
| 8627 | int spi, err, i, nr_slots, btf_id; |
| 8628 | |
| 8629 | if (reg->type != PTR_TO_STACK) { |
| 8630 | verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1); |
| 8631 | return -EINVAL; |
| 8632 | } |
| 8633 | |
| 8634 | /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs() |
| 8635 | * ensures struct convention, so we wouldn't need to do any BTF |
| 8636 | * validation here. But given iter state can be passed as a parameter |
| 8637 | * to any kfunc, if arg has "__iter" suffix, we need to be a bit more |
| 8638 | * conservative here. |
| 8639 | */ |
| 8640 | btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); |
| 8641 | if (btf_id < 0) { |
| 8642 | verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1); |
| 8643 | return -EINVAL; |
| 8644 | } |
| 8645 | t = btf_type_by_id(meta->btf, btf_id); |
| 8646 | nr_slots = t->size / BPF_REG_SIZE; |
| 8647 | |
| 8648 | if (is_iter_new_kfunc(meta)) { |
| 8649 | /* bpf_iter_<type>_new() expects pointer to uninit iter state */ |
| 8650 | if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { |
| 8651 | verbose(env, "expected uninitialized iter_%s as arg #%d\n", |
| 8652 | iter_type_str(meta->btf, btf_id), regno - 1); |
| 8653 | return -EINVAL; |
| 8654 | } |
| 8655 | |
| 8656 | for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { |
| 8657 | err = check_mem_access(env, insn_idx, regno, |
| 8658 | i, BPF_DW, BPF_WRITE, -1, false, false); |
| 8659 | if (err) |
| 8660 | return err; |
| 8661 | } |
| 8662 | |
| 8663 | err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); |
| 8664 | if (err) |
| 8665 | return err; |
| 8666 | } else { |
| 8667 | /* iter_next() or iter_destroy(), as well as any kfunc |
| 8668 | * accepting iter argument, expect initialized iter state |
| 8669 | */ |
| 8670 | err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); |
| 8671 | switch (err) { |
| 8672 | case 0: |
| 8673 | break; |
| 8674 | case -EINVAL: |
| 8675 | verbose(env, "expected an initialized iter_%s as arg #%d\n", |
| 8676 | iter_type_str(meta->btf, btf_id), regno - 1); |
| 8677 | return err; |
| 8678 | case -EPROTO: |
| 8679 | verbose(env, "expected an RCU CS when using %s\n", meta->func_name); |
| 8680 | return err; |
| 8681 | default: |
| 8682 | return err; |
| 8683 | } |
| 8684 | |
| 8685 | spi = iter_get_spi(env, reg, nr_slots); |
| 8686 | if (spi < 0) |
| 8687 | return spi; |
| 8688 | |
| 8689 | err = mark_iter_read(env, reg, spi, nr_slots); |
| 8690 | if (err) |
| 8691 | return err; |
| 8692 | |
| 8693 | /* remember meta->iter info for process_iter_next_call() */ |
| 8694 | meta->iter.spi = spi; |
| 8695 | meta->iter.frameno = reg->frameno; |
| 8696 | meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); |
| 8697 | |
| 8698 | if (is_iter_destroy_kfunc(meta)) { |
| 8699 | err = unmark_stack_slots_iter(env, reg, nr_slots); |
| 8700 | if (err) |
| 8701 | return err; |
| 8702 | } |
| 8703 | } |
| 8704 | |
| 8705 | return 0; |
| 8706 | } |
| 8707 | |
| 8708 | /* Look for a previous loop entry at insn_idx: nearest parent state |
| 8709 | * stopped at insn_idx with callsites matching those in cur->frame. |
| 8710 | */ |
| 8711 | static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env, |
| 8712 | struct bpf_verifier_state *cur, |
| 8713 | int insn_idx) |
| 8714 | { |
| 8715 | struct bpf_verifier_state_list *sl; |
| 8716 | struct bpf_verifier_state *st; |
| 8717 | struct list_head *pos, *head; |
| 8718 | |
| 8719 | /* Explored states are pushed in stack order, most recent states come first */ |
| 8720 | head = explored_state(env, insn_idx); |
| 8721 | list_for_each(pos, head) { |
| 8722 | sl = container_of(pos, struct bpf_verifier_state_list, node); |
| 8723 | /* If st->branches != 0 state is a part of current DFS verification path, |
| 8724 | * hence cur & st for a loop. |
| 8725 | */ |
| 8726 | st = &sl->state; |
| 8727 | if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && |
| 8728 | st->dfs_depth < cur->dfs_depth) |
| 8729 | return st; |
| 8730 | } |
| 8731 | |
| 8732 | return NULL; |
| 8733 | } |
| 8734 | |
| 8735 | static void reset_idmap_scratch(struct bpf_verifier_env *env); |
| 8736 | static bool regs_exact(const struct bpf_reg_state *rold, |
| 8737 | const struct bpf_reg_state *rcur, |
| 8738 | struct bpf_idmap *idmap); |
| 8739 | |
| 8740 | static void maybe_widen_reg(struct bpf_verifier_env *env, |
| 8741 | struct bpf_reg_state *rold, struct bpf_reg_state *rcur, |
| 8742 | struct bpf_idmap *idmap) |
| 8743 | { |
| 8744 | if (rold->type != SCALAR_VALUE) |
| 8745 | return; |
| 8746 | if (rold->type != rcur->type) |
| 8747 | return; |
| 8748 | if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) |
| 8749 | return; |
| 8750 | __mark_reg_unknown(env, rcur); |
| 8751 | } |
| 8752 | |
| 8753 | static int widen_imprecise_scalars(struct bpf_verifier_env *env, |
| 8754 | struct bpf_verifier_state *old, |
| 8755 | struct bpf_verifier_state *cur) |
| 8756 | { |
| 8757 | struct bpf_func_state *fold, *fcur; |
| 8758 | int i, fr; |
| 8759 | |
| 8760 | reset_idmap_scratch(env); |
| 8761 | for (fr = old->curframe; fr >= 0; fr--) { |
| 8762 | fold = old->frame[fr]; |
| 8763 | fcur = cur->frame[fr]; |
| 8764 | |
| 8765 | for (i = 0; i < MAX_BPF_REG; i++) |
| 8766 | maybe_widen_reg(env, |
| 8767 | &fold->regs[i], |
| 8768 | &fcur->regs[i], |
| 8769 | &env->idmap_scratch); |
| 8770 | |
| 8771 | for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { |
| 8772 | if (!is_spilled_reg(&fold->stack[i]) || |
| 8773 | !is_spilled_reg(&fcur->stack[i])) |
| 8774 | continue; |
| 8775 | |
| 8776 | maybe_widen_reg(env, |
| 8777 | &fold->stack[i].spilled_ptr, |
| 8778 | &fcur->stack[i].spilled_ptr, |
| 8779 | &env->idmap_scratch); |
| 8780 | } |
| 8781 | } |
| 8782 | return 0; |
| 8783 | } |
| 8784 | |
| 8785 | static struct bpf_reg_state *get_iter_from_state(struct bpf_verifier_state *cur_st, |
| 8786 | struct bpf_kfunc_call_arg_meta *meta) |
| 8787 | { |
| 8788 | int iter_frameno = meta->iter.frameno; |
| 8789 | int iter_spi = meta->iter.spi; |
| 8790 | |
| 8791 | return &cur_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; |
| 8792 | } |
| 8793 | |
| 8794 | /* process_iter_next_call() is called when verifier gets to iterator's next |
| 8795 | * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer |
| 8796 | * to it as just "iter_next()" in comments below. |
| 8797 | * |
| 8798 | * BPF verifier relies on a crucial contract for any iter_next() |
| 8799 | * implementation: it should *eventually* return NULL, and once that happens |
| 8800 | * it should keep returning NULL. That is, once iterator exhausts elements to |
| 8801 | * iterate, it should never reset or spuriously return new elements. |
| 8802 | * |
| 8803 | * With the assumption of such contract, process_iter_next_call() simulates |
| 8804 | * a fork in the verifier state to validate loop logic correctness and safety |
| 8805 | * without having to simulate infinite amount of iterations. |
| 8806 | * |
| 8807 | * In current state, we first assume that iter_next() returned NULL and |
| 8808 | * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such |
| 8809 | * conditions we should not form an infinite loop and should eventually reach |
| 8810 | * exit. |
| 8811 | * |
| 8812 | * Besides that, we also fork current state and enqueue it for later |
| 8813 | * verification. In a forked state we keep iterator state as ACTIVE |
| 8814 | * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We |
| 8815 | * also bump iteration depth to prevent erroneous infinite loop detection |
| 8816 | * later on (see iter_active_depths_differ() comment for details). In this |
| 8817 | * state we assume that we'll eventually loop back to another iter_next() |
| 8818 | * calls (it could be in exactly same location or in some other instruction, |
| 8819 | * it doesn't matter, we don't make any unnecessary assumptions about this, |
| 8820 | * everything revolves around iterator state in a stack slot, not which |
| 8821 | * instruction is calling iter_next()). When that happens, we either will come |
| 8822 | * to iter_next() with equivalent state and can conclude that next iteration |
| 8823 | * will proceed in exactly the same way as we just verified, so it's safe to |
| 8824 | * assume that loop converges. If not, we'll go on another iteration |
| 8825 | * simulation with a different input state, until all possible starting states |
| 8826 | * are validated or we reach maximum number of instructions limit. |
| 8827 | * |
| 8828 | * This way, we will either exhaustively discover all possible input states |
| 8829 | * that iterator loop can start with and eventually will converge, or we'll |
| 8830 | * effectively regress into bounded loop simulation logic and either reach |
| 8831 | * maximum number of instructions if loop is not provably convergent, or there |
| 8832 | * is some statically known limit on number of iterations (e.g., if there is |
| 8833 | * an explicit `if n > 100 then break;` statement somewhere in the loop). |
| 8834 | * |
| 8835 | * Iteration convergence logic in is_state_visited() relies on exact |
| 8836 | * states comparison, which ignores read and precision marks. |
| 8837 | * This is necessary because read and precision marks are not finalized |
| 8838 | * while in the loop. Exact comparison might preclude convergence for |
| 8839 | * simple programs like below: |
| 8840 | * |
| 8841 | * i = 0; |
| 8842 | * while(iter_next(&it)) |
| 8843 | * i++; |
| 8844 | * |
| 8845 | * At each iteration step i++ would produce a new distinct state and |
| 8846 | * eventually instruction processing limit would be reached. |
| 8847 | * |
| 8848 | * To avoid such behavior speculatively forget (widen) range for |
| 8849 | * imprecise scalar registers, if those registers were not precise at the |
| 8850 | * end of the previous iteration and do not match exactly. |
| 8851 | * |
| 8852 | * This is a conservative heuristic that allows to verify wide range of programs, |
| 8853 | * however it precludes verification of programs that conjure an |
| 8854 | * imprecise value on the first loop iteration and use it as precise on a second. |
| 8855 | * For example, the following safe program would fail to verify: |
| 8856 | * |
| 8857 | * struct bpf_num_iter it; |
| 8858 | * int arr[10]; |
| 8859 | * int i = 0, a = 0; |
| 8860 | * bpf_iter_num_new(&it, 0, 10); |
| 8861 | * while (bpf_iter_num_next(&it)) { |
| 8862 | * if (a == 0) { |
| 8863 | * a = 1; |
| 8864 | * i = 7; // Because i changed verifier would forget |
| 8865 | * // it's range on second loop entry. |
| 8866 | * } else { |
| 8867 | * arr[i] = 42; // This would fail to verify. |
| 8868 | * } |
| 8869 | * } |
| 8870 | * bpf_iter_num_destroy(&it); |
| 8871 | */ |
| 8872 | static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, |
| 8873 | struct bpf_kfunc_call_arg_meta *meta) |
| 8874 | { |
| 8875 | struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; |
| 8876 | struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; |
| 8877 | struct bpf_reg_state *cur_iter, *queued_iter; |
| 8878 | |
| 8879 | BTF_TYPE_EMIT(struct bpf_iter); |
| 8880 | |
| 8881 | cur_iter = get_iter_from_state(cur_st, meta); |
| 8882 | |
| 8883 | if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && |
| 8884 | cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { |
| 8885 | verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n", |
| 8886 | cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); |
| 8887 | return -EFAULT; |
| 8888 | } |
| 8889 | |
| 8890 | if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { |
| 8891 | /* Because iter_next() call is a checkpoint is_state_visitied() |
| 8892 | * should guarantee parent state with same call sites and insn_idx. |
| 8893 | */ |
| 8894 | if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || |
| 8895 | !same_callsites(cur_st->parent, cur_st)) { |
| 8896 | verbose(env, "bug: bad parent state for iter next call"); |
| 8897 | return -EFAULT; |
| 8898 | } |
| 8899 | /* Note cur_st->parent in the call below, it is necessary to skip |
| 8900 | * checkpoint created for cur_st by is_state_visited() |
| 8901 | * right at this instruction. |
| 8902 | */ |
| 8903 | prev_st = find_prev_entry(env, cur_st->parent, insn_idx); |
| 8904 | /* branch out active iter state */ |
| 8905 | queued_st = push_stack(env, insn_idx + 1, insn_idx, false); |
| 8906 | if (!queued_st) |
| 8907 | return -ENOMEM; |
| 8908 | |
| 8909 | queued_iter = get_iter_from_state(queued_st, meta); |
| 8910 | queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; |
| 8911 | queued_iter->iter.depth++; |
| 8912 | if (prev_st) |
| 8913 | widen_imprecise_scalars(env, prev_st, queued_st); |
| 8914 | |
| 8915 | queued_fr = queued_st->frame[queued_st->curframe]; |
| 8916 | mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); |
| 8917 | } |
| 8918 | |
| 8919 | /* switch to DRAINED state, but keep the depth unchanged */ |
| 8920 | /* mark current iter state as drained and assume returned NULL */ |
| 8921 | cur_iter->iter.state = BPF_ITER_STATE_DRAINED; |
| 8922 | __mark_reg_const_zero(env, &cur_fr->regs[BPF_REG_0]); |
| 8923 | |
| 8924 | return 0; |
| 8925 | } |
| 8926 | |
| 8927 | static bool arg_type_is_mem_size(enum bpf_arg_type type) |
| 8928 | { |
| 8929 | return type == ARG_CONST_SIZE || |
| 8930 | type == ARG_CONST_SIZE_OR_ZERO; |
| 8931 | } |
| 8932 | |
| 8933 | static bool arg_type_is_raw_mem(enum bpf_arg_type type) |
| 8934 | { |
| 8935 | return base_type(type) == ARG_PTR_TO_MEM && |
| 8936 | type & MEM_UNINIT; |
| 8937 | } |
| 8938 | |
| 8939 | static bool arg_type_is_release(enum bpf_arg_type type) |
| 8940 | { |
| 8941 | return type & OBJ_RELEASE; |
| 8942 | } |
| 8943 | |
| 8944 | static bool arg_type_is_dynptr(enum bpf_arg_type type) |
| 8945 | { |
| 8946 | return base_type(type) == ARG_PTR_TO_DYNPTR; |
| 8947 | } |
| 8948 | |
| 8949 | static int resolve_map_arg_type(struct bpf_verifier_env *env, |
| 8950 | const struct bpf_call_arg_meta *meta, |
| 8951 | enum bpf_arg_type *arg_type) |
| 8952 | { |
| 8953 | if (!meta->map_ptr) { |
| 8954 | /* kernel subsystem misconfigured verifier */ |
| 8955 | verbose(env, "invalid map_ptr to access map->type\n"); |
| 8956 | return -EACCES; |
| 8957 | } |
| 8958 | |
| 8959 | switch (meta->map_ptr->map_type) { |
| 8960 | case BPF_MAP_TYPE_SOCKMAP: |
| 8961 | case BPF_MAP_TYPE_SOCKHASH: |
| 8962 | if (*arg_type == ARG_PTR_TO_MAP_VALUE) { |
| 8963 | *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; |
| 8964 | } else { |
| 8965 | verbose(env, "invalid arg_type for sockmap/sockhash\n"); |
| 8966 | return -EINVAL; |
| 8967 | } |
| 8968 | break; |
| 8969 | case BPF_MAP_TYPE_BLOOM_FILTER: |
| 8970 | if (meta->func_id == BPF_FUNC_map_peek_elem) |
| 8971 | *arg_type = ARG_PTR_TO_MAP_VALUE; |
| 8972 | break; |
| 8973 | default: |
| 8974 | break; |
| 8975 | } |
| 8976 | return 0; |
| 8977 | } |
| 8978 | |
| 8979 | struct bpf_reg_types { |
| 8980 | const enum bpf_reg_type types[10]; |
| 8981 | u32 *btf_id; |
| 8982 | }; |
| 8983 | |
| 8984 | static const struct bpf_reg_types sock_types = { |
| 8985 | .types = { |
| 8986 | PTR_TO_SOCK_COMMON, |
| 8987 | PTR_TO_SOCKET, |
| 8988 | PTR_TO_TCP_SOCK, |
| 8989 | PTR_TO_XDP_SOCK, |
| 8990 | }, |
| 8991 | }; |
| 8992 | |
| 8993 | #ifdef CONFIG_NET |
| 8994 | static const struct bpf_reg_types btf_id_sock_common_types = { |
| 8995 | .types = { |
| 8996 | PTR_TO_SOCK_COMMON, |
| 8997 | PTR_TO_SOCKET, |
| 8998 | PTR_TO_TCP_SOCK, |
| 8999 | PTR_TO_XDP_SOCK, |
| 9000 | PTR_TO_BTF_ID, |
| 9001 | PTR_TO_BTF_ID | PTR_TRUSTED, |
| 9002 | }, |
| 9003 | .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], |
| 9004 | }; |
| 9005 | #endif |
| 9006 | |
| 9007 | static const struct bpf_reg_types mem_types = { |
| 9008 | .types = { |
| 9009 | PTR_TO_STACK, |
| 9010 | PTR_TO_PACKET, |
| 9011 | PTR_TO_PACKET_META, |
| 9012 | PTR_TO_MAP_KEY, |
| 9013 | PTR_TO_MAP_VALUE, |
| 9014 | PTR_TO_MEM, |
| 9015 | PTR_TO_MEM | MEM_RINGBUF, |
| 9016 | PTR_TO_BUF, |
| 9017 | PTR_TO_BTF_ID | PTR_TRUSTED, |
| 9018 | }, |
| 9019 | }; |
| 9020 | |
| 9021 | static const struct bpf_reg_types spin_lock_types = { |
| 9022 | .types = { |
| 9023 | PTR_TO_MAP_VALUE, |
| 9024 | PTR_TO_BTF_ID | MEM_ALLOC, |
| 9025 | } |
| 9026 | }; |
| 9027 | |
| 9028 | static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; |
| 9029 | static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; |
| 9030 | static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; |
| 9031 | static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } }; |
| 9032 | static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; |
| 9033 | static const struct bpf_reg_types btf_ptr_types = { |
| 9034 | .types = { |
| 9035 | PTR_TO_BTF_ID, |
| 9036 | PTR_TO_BTF_ID | PTR_TRUSTED, |
| 9037 | PTR_TO_BTF_ID | MEM_RCU, |
| 9038 | }, |
| 9039 | }; |
| 9040 | static const struct bpf_reg_types percpu_btf_ptr_types = { |
| 9041 | .types = { |
| 9042 | PTR_TO_BTF_ID | MEM_PERCPU, |
| 9043 | PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU, |
| 9044 | PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED, |
| 9045 | } |
| 9046 | }; |
| 9047 | static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; |
| 9048 | static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; |
| 9049 | static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; |
| 9050 | static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; |
| 9051 | static const struct bpf_reg_types kptr_xchg_dest_types = { |
| 9052 | .types = { |
| 9053 | PTR_TO_MAP_VALUE, |
| 9054 | PTR_TO_BTF_ID | MEM_ALLOC |
| 9055 | } |
| 9056 | }; |
| 9057 | static const struct bpf_reg_types dynptr_types = { |
| 9058 | .types = { |
| 9059 | PTR_TO_STACK, |
| 9060 | CONST_PTR_TO_DYNPTR, |
| 9061 | } |
| 9062 | }; |
| 9063 | |
| 9064 | static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { |
| 9065 | [ARG_PTR_TO_MAP_KEY] = &mem_types, |
| 9066 | [ARG_PTR_TO_MAP_VALUE] = &mem_types, |
| 9067 | [ARG_CONST_SIZE] = &scalar_types, |
| 9068 | [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, |
| 9069 | [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, |
| 9070 | [ARG_CONST_MAP_PTR] = &const_map_ptr_types, |
| 9071 | [ARG_PTR_TO_CTX] = &context_types, |
| 9072 | [ARG_PTR_TO_SOCK_COMMON] = &sock_types, |
| 9073 | #ifdef CONFIG_NET |
| 9074 | [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, |
| 9075 | #endif |
| 9076 | [ARG_PTR_TO_SOCKET] = &fullsock_types, |
| 9077 | [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, |
| 9078 | [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, |
| 9079 | [ARG_PTR_TO_MEM] = &mem_types, |
| 9080 | [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, |
| 9081 | [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, |
| 9082 | [ARG_PTR_TO_FUNC] = &func_ptr_types, |
| 9083 | [ARG_PTR_TO_STACK] = &stack_ptr_types, |
| 9084 | [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, |
| 9085 | [ARG_PTR_TO_TIMER] = &timer_types, |
| 9086 | [ARG_KPTR_XCHG_DEST] = &kptr_xchg_dest_types, |
| 9087 | [ARG_PTR_TO_DYNPTR] = &dynptr_types, |
| 9088 | }; |
| 9089 | |
| 9090 | static int check_reg_type(struct bpf_verifier_env *env, u32 regno, |
| 9091 | enum bpf_arg_type arg_type, |
| 9092 | const u32 *arg_btf_id, |
| 9093 | struct bpf_call_arg_meta *meta) |
| 9094 | { |
| 9095 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 9096 | enum bpf_reg_type expected, type = reg->type; |
| 9097 | const struct bpf_reg_types *compatible; |
| 9098 | int i, j; |
| 9099 | |
| 9100 | compatible = compatible_reg_types[base_type(arg_type)]; |
| 9101 | if (!compatible) { |
| 9102 | verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); |
| 9103 | return -EFAULT; |
| 9104 | } |
| 9105 | |
| 9106 | /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, |
| 9107 | * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY |
| 9108 | * |
| 9109 | * Same for MAYBE_NULL: |
| 9110 | * |
| 9111 | * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, |
| 9112 | * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL |
| 9113 | * |
| 9114 | * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type. |
| 9115 | * |
| 9116 | * Therefore we fold these flags depending on the arg_type before comparison. |
| 9117 | */ |
| 9118 | if (arg_type & MEM_RDONLY) |
| 9119 | type &= ~MEM_RDONLY; |
| 9120 | if (arg_type & PTR_MAYBE_NULL) |
| 9121 | type &= ~PTR_MAYBE_NULL; |
| 9122 | if (base_type(arg_type) == ARG_PTR_TO_MEM) |
| 9123 | type &= ~DYNPTR_TYPE_FLAG_MASK; |
| 9124 | |
| 9125 | /* Local kptr types are allowed as the source argument of bpf_kptr_xchg */ |
| 9126 | if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type) && regno == BPF_REG_2) { |
| 9127 | type &= ~MEM_ALLOC; |
| 9128 | type &= ~MEM_PERCPU; |
| 9129 | } |
| 9130 | |
| 9131 | for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { |
| 9132 | expected = compatible->types[i]; |
| 9133 | if (expected == NOT_INIT) |
| 9134 | break; |
| 9135 | |
| 9136 | if (type == expected) |
| 9137 | goto found; |
| 9138 | } |
| 9139 | |
| 9140 | verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); |
| 9141 | for (j = 0; j + 1 < i; j++) |
| 9142 | verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); |
| 9143 | verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); |
| 9144 | return -EACCES; |
| 9145 | |
| 9146 | found: |
| 9147 | if (base_type(reg->type) != PTR_TO_BTF_ID) |
| 9148 | return 0; |
| 9149 | |
| 9150 | if (compatible == &mem_types) { |
| 9151 | if (!(arg_type & MEM_RDONLY)) { |
| 9152 | verbose(env, |
| 9153 | "%s() may write into memory pointed by R%d type=%s\n", |
| 9154 | func_id_name(meta->func_id), |
| 9155 | regno, reg_type_str(env, reg->type)); |
| 9156 | return -EACCES; |
| 9157 | } |
| 9158 | return 0; |
| 9159 | } |
| 9160 | |
| 9161 | switch ((int)reg->type) { |
| 9162 | case PTR_TO_BTF_ID: |
| 9163 | case PTR_TO_BTF_ID | PTR_TRUSTED: |
| 9164 | case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL: |
| 9165 | case PTR_TO_BTF_ID | MEM_RCU: |
| 9166 | case PTR_TO_BTF_ID | PTR_MAYBE_NULL: |
| 9167 | case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU: |
| 9168 | { |
| 9169 | /* For bpf_sk_release, it needs to match against first member |
| 9170 | * 'struct sock_common', hence make an exception for it. This |
| 9171 | * allows bpf_sk_release to work for multiple socket types. |
| 9172 | */ |
| 9173 | bool strict_type_match = arg_type_is_release(arg_type) && |
| 9174 | meta->func_id != BPF_FUNC_sk_release; |
| 9175 | |
| 9176 | if (type_may_be_null(reg->type) && |
| 9177 | (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) { |
| 9178 | verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno); |
| 9179 | return -EACCES; |
| 9180 | } |
| 9181 | |
| 9182 | if (!arg_btf_id) { |
| 9183 | if (!compatible->btf_id) { |
| 9184 | verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); |
| 9185 | return -EFAULT; |
| 9186 | } |
| 9187 | arg_btf_id = compatible->btf_id; |
| 9188 | } |
| 9189 | |
| 9190 | if (meta->func_id == BPF_FUNC_kptr_xchg) { |
| 9191 | if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) |
| 9192 | return -EACCES; |
| 9193 | } else { |
| 9194 | if (arg_btf_id == BPF_PTR_POISON) { |
| 9195 | verbose(env, "verifier internal error:"); |
| 9196 | verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", |
| 9197 | regno); |
| 9198 | return -EACCES; |
| 9199 | } |
| 9200 | |
| 9201 | if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, |
| 9202 | btf_vmlinux, *arg_btf_id, |
| 9203 | strict_type_match)) { |
| 9204 | verbose(env, "R%d is of type %s but %s is expected\n", |
| 9205 | regno, btf_type_name(reg->btf, reg->btf_id), |
| 9206 | btf_type_name(btf_vmlinux, *arg_btf_id)); |
| 9207 | return -EACCES; |
| 9208 | } |
| 9209 | } |
| 9210 | break; |
| 9211 | } |
| 9212 | case PTR_TO_BTF_ID | MEM_ALLOC: |
| 9213 | case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC: |
| 9214 | if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && |
| 9215 | meta->func_id != BPF_FUNC_kptr_xchg) { |
| 9216 | verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); |
| 9217 | return -EFAULT; |
| 9218 | } |
| 9219 | /* Check if local kptr in src arg matches kptr in dst arg */ |
| 9220 | if (meta->func_id == BPF_FUNC_kptr_xchg && regno == BPF_REG_2) { |
| 9221 | if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) |
| 9222 | return -EACCES; |
| 9223 | } |
| 9224 | break; |
| 9225 | case PTR_TO_BTF_ID | MEM_PERCPU: |
| 9226 | case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU: |
| 9227 | case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED: |
| 9228 | /* Handled by helper specific checks */ |
| 9229 | break; |
| 9230 | default: |
| 9231 | verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n"); |
| 9232 | return -EFAULT; |
| 9233 | } |
| 9234 | return 0; |
| 9235 | } |
| 9236 | |
| 9237 | static struct btf_field * |
| 9238 | reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields) |
| 9239 | { |
| 9240 | struct btf_field *field; |
| 9241 | struct btf_record *rec; |
| 9242 | |
| 9243 | rec = reg_btf_record(reg); |
| 9244 | if (!rec) |
| 9245 | return NULL; |
| 9246 | |
| 9247 | field = btf_record_find(rec, off, fields); |
| 9248 | if (!field) |
| 9249 | return NULL; |
| 9250 | |
| 9251 | return field; |
| 9252 | } |
| 9253 | |
| 9254 | static int check_func_arg_reg_off(struct bpf_verifier_env *env, |
| 9255 | const struct bpf_reg_state *reg, int regno, |
| 9256 | enum bpf_arg_type arg_type) |
| 9257 | { |
| 9258 | u32 type = reg->type; |
| 9259 | |
| 9260 | /* When referenced register is passed to release function, its fixed |
| 9261 | * offset must be 0. |
| 9262 | * |
| 9263 | * We will check arg_type_is_release reg has ref_obj_id when storing |
| 9264 | * meta->release_regno. |
| 9265 | */ |
| 9266 | if (arg_type_is_release(arg_type)) { |
| 9267 | /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it |
| 9268 | * may not directly point to the object being released, but to |
| 9269 | * dynptr pointing to such object, which might be at some offset |
| 9270 | * on the stack. In that case, we simply to fallback to the |
| 9271 | * default handling. |
| 9272 | */ |
| 9273 | if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) |
| 9274 | return 0; |
| 9275 | |
| 9276 | /* Doing check_ptr_off_reg check for the offset will catch this |
| 9277 | * because fixed_off_ok is false, but checking here allows us |
| 9278 | * to give the user a better error message. |
| 9279 | */ |
| 9280 | if (reg->off) { |
| 9281 | verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n", |
| 9282 | regno); |
| 9283 | return -EINVAL; |
| 9284 | } |
| 9285 | return __check_ptr_off_reg(env, reg, regno, false); |
| 9286 | } |
| 9287 | |
| 9288 | switch (type) { |
| 9289 | /* Pointer types where both fixed and variable offset is explicitly allowed: */ |
| 9290 | case PTR_TO_STACK: |
| 9291 | case PTR_TO_PACKET: |
| 9292 | case PTR_TO_PACKET_META: |
| 9293 | case PTR_TO_MAP_KEY: |
| 9294 | case PTR_TO_MAP_VALUE: |
| 9295 | case PTR_TO_MEM: |
| 9296 | case PTR_TO_MEM | MEM_RDONLY: |
| 9297 | case PTR_TO_MEM | MEM_RINGBUF: |
| 9298 | case PTR_TO_BUF: |
| 9299 | case PTR_TO_BUF | MEM_RDONLY: |
| 9300 | case PTR_TO_ARENA: |
| 9301 | case SCALAR_VALUE: |
| 9302 | return 0; |
| 9303 | /* All the rest must be rejected, except PTR_TO_BTF_ID which allows |
| 9304 | * fixed offset. |
| 9305 | */ |
| 9306 | case PTR_TO_BTF_ID: |
| 9307 | case PTR_TO_BTF_ID | MEM_ALLOC: |
| 9308 | case PTR_TO_BTF_ID | PTR_TRUSTED: |
| 9309 | case PTR_TO_BTF_ID | MEM_RCU: |
| 9310 | case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: |
| 9311 | case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: |
| 9312 | /* When referenced PTR_TO_BTF_ID is passed to release function, |
| 9313 | * its fixed offset must be 0. In the other cases, fixed offset |
| 9314 | * can be non-zero. This was already checked above. So pass |
| 9315 | * fixed_off_ok as true to allow fixed offset for all other |
| 9316 | * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we |
| 9317 | * still need to do checks instead of returning. |
| 9318 | */ |
| 9319 | return __check_ptr_off_reg(env, reg, regno, true); |
| 9320 | default: |
| 9321 | return __check_ptr_off_reg(env, reg, regno, false); |
| 9322 | } |
| 9323 | } |
| 9324 | |
| 9325 | static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env, |
| 9326 | const struct bpf_func_proto *fn, |
| 9327 | struct bpf_reg_state *regs) |
| 9328 | { |
| 9329 | struct bpf_reg_state *state = NULL; |
| 9330 | int i; |
| 9331 | |
| 9332 | for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) |
| 9333 | if (arg_type_is_dynptr(fn->arg_type[i])) { |
| 9334 | if (state) { |
| 9335 | verbose(env, "verifier internal error: multiple dynptr args\n"); |
| 9336 | return NULL; |
| 9337 | } |
| 9338 | state = ®s[BPF_REG_1 + i]; |
| 9339 | } |
| 9340 | |
| 9341 | if (!state) |
| 9342 | verbose(env, "verifier internal error: no dynptr arg found\n"); |
| 9343 | |
| 9344 | return state; |
| 9345 | } |
| 9346 | |
| 9347 | static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 9348 | { |
| 9349 | struct bpf_func_state *state = func(env, reg); |
| 9350 | int spi; |
| 9351 | |
| 9352 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 9353 | return reg->id; |
| 9354 | spi = dynptr_get_spi(env, reg); |
| 9355 | if (spi < 0) |
| 9356 | return spi; |
| 9357 | return state->stack[spi].spilled_ptr.id; |
| 9358 | } |
| 9359 | |
| 9360 | static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 9361 | { |
| 9362 | struct bpf_func_state *state = func(env, reg); |
| 9363 | int spi; |
| 9364 | |
| 9365 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 9366 | return reg->ref_obj_id; |
| 9367 | spi = dynptr_get_spi(env, reg); |
| 9368 | if (spi < 0) |
| 9369 | return spi; |
| 9370 | return state->stack[spi].spilled_ptr.ref_obj_id; |
| 9371 | } |
| 9372 | |
| 9373 | static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env, |
| 9374 | struct bpf_reg_state *reg) |
| 9375 | { |
| 9376 | struct bpf_func_state *state = func(env, reg); |
| 9377 | int spi; |
| 9378 | |
| 9379 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 9380 | return reg->dynptr.type; |
| 9381 | |
| 9382 | spi = __get_spi(reg->off); |
| 9383 | if (spi < 0) { |
| 9384 | verbose(env, "verifier internal error: invalid spi when querying dynptr type\n"); |
| 9385 | return BPF_DYNPTR_TYPE_INVALID; |
| 9386 | } |
| 9387 | |
| 9388 | return state->stack[spi].spilled_ptr.dynptr.type; |
| 9389 | } |
| 9390 | |
| 9391 | static int check_reg_const_str(struct bpf_verifier_env *env, |
| 9392 | struct bpf_reg_state *reg, u32 regno) |
| 9393 | { |
| 9394 | struct bpf_map *map = reg->map_ptr; |
| 9395 | int err; |
| 9396 | int map_off; |
| 9397 | u64 map_addr; |
| 9398 | char *str_ptr; |
| 9399 | |
| 9400 | if (reg->type != PTR_TO_MAP_VALUE) |
| 9401 | return -EINVAL; |
| 9402 | |
| 9403 | if (!bpf_map_is_rdonly(map)) { |
| 9404 | verbose(env, "R%d does not point to a readonly map'\n", regno); |
| 9405 | return -EACCES; |
| 9406 | } |
| 9407 | |
| 9408 | if (!tnum_is_const(reg->var_off)) { |
| 9409 | verbose(env, "R%d is not a constant address'\n", regno); |
| 9410 | return -EACCES; |
| 9411 | } |
| 9412 | |
| 9413 | if (!map->ops->map_direct_value_addr) { |
| 9414 | verbose(env, "no direct value access support for this map type\n"); |
| 9415 | return -EACCES; |
| 9416 | } |
| 9417 | |
| 9418 | err = check_map_access(env, regno, reg->off, |
| 9419 | map->value_size - reg->off, false, |
| 9420 | ACCESS_HELPER); |
| 9421 | if (err) |
| 9422 | return err; |
| 9423 | |
| 9424 | map_off = reg->off + reg->var_off.value; |
| 9425 | err = map->ops->map_direct_value_addr(map, &map_addr, map_off); |
| 9426 | if (err) { |
| 9427 | verbose(env, "direct value access on string failed\n"); |
| 9428 | return err; |
| 9429 | } |
| 9430 | |
| 9431 | str_ptr = (char *)(long)(map_addr); |
| 9432 | if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { |
| 9433 | verbose(env, "string is not zero-terminated\n"); |
| 9434 | return -EINVAL; |
| 9435 | } |
| 9436 | return 0; |
| 9437 | } |
| 9438 | |
| 9439 | /* Returns constant key value in `value` if possible, else negative error */ |
| 9440 | static int get_constant_map_key(struct bpf_verifier_env *env, |
| 9441 | struct bpf_reg_state *key, |
| 9442 | u32 key_size, |
| 9443 | s64 *value) |
| 9444 | { |
| 9445 | struct bpf_func_state *state = func(env, key); |
| 9446 | struct bpf_reg_state *reg; |
| 9447 | int slot, spi, off; |
| 9448 | int spill_size = 0; |
| 9449 | int zero_size = 0; |
| 9450 | int stack_off; |
| 9451 | int i, err; |
| 9452 | u8 *stype; |
| 9453 | |
| 9454 | if (!env->bpf_capable) |
| 9455 | return -EOPNOTSUPP; |
| 9456 | if (key->type != PTR_TO_STACK) |
| 9457 | return -EOPNOTSUPP; |
| 9458 | if (!tnum_is_const(key->var_off)) |
| 9459 | return -EOPNOTSUPP; |
| 9460 | |
| 9461 | stack_off = key->off + key->var_off.value; |
| 9462 | slot = -stack_off - 1; |
| 9463 | spi = slot / BPF_REG_SIZE; |
| 9464 | off = slot % BPF_REG_SIZE; |
| 9465 | stype = state->stack[spi].slot_type; |
| 9466 | |
| 9467 | /* First handle precisely tracked STACK_ZERO */ |
| 9468 | for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--) |
| 9469 | zero_size++; |
| 9470 | if (zero_size >= key_size) { |
| 9471 | *value = 0; |
| 9472 | return 0; |
| 9473 | } |
| 9474 | |
| 9475 | /* Check that stack contains a scalar spill of expected size */ |
| 9476 | if (!is_spilled_scalar_reg(&state->stack[spi])) |
| 9477 | return -EOPNOTSUPP; |
| 9478 | for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--) |
| 9479 | spill_size++; |
| 9480 | if (spill_size != key_size) |
| 9481 | return -EOPNOTSUPP; |
| 9482 | |
| 9483 | reg = &state->stack[spi].spilled_ptr; |
| 9484 | if (!tnum_is_const(reg->var_off)) |
| 9485 | /* Stack value not statically known */ |
| 9486 | return -EOPNOTSUPP; |
| 9487 | |
| 9488 | /* We are relying on a constant value. So mark as precise |
| 9489 | * to prevent pruning on it. |
| 9490 | */ |
| 9491 | bt_set_frame_slot(&env->bt, key->frameno, spi); |
| 9492 | err = mark_chain_precision_batch(env); |
| 9493 | if (err < 0) |
| 9494 | return err; |
| 9495 | |
| 9496 | *value = reg->var_off.value; |
| 9497 | return 0; |
| 9498 | } |
| 9499 | |
| 9500 | static bool can_elide_value_nullness(enum bpf_map_type type); |
| 9501 | |
| 9502 | static int check_func_arg(struct bpf_verifier_env *env, u32 arg, |
| 9503 | struct bpf_call_arg_meta *meta, |
| 9504 | const struct bpf_func_proto *fn, |
| 9505 | int insn_idx) |
| 9506 | { |
| 9507 | u32 regno = BPF_REG_1 + arg; |
| 9508 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 9509 | enum bpf_arg_type arg_type = fn->arg_type[arg]; |
| 9510 | enum bpf_reg_type type = reg->type; |
| 9511 | u32 *arg_btf_id = NULL; |
| 9512 | u32 key_size; |
| 9513 | int err = 0; |
| 9514 | |
| 9515 | if (arg_type == ARG_DONTCARE) |
| 9516 | return 0; |
| 9517 | |
| 9518 | err = check_reg_arg(env, regno, SRC_OP); |
| 9519 | if (err) |
| 9520 | return err; |
| 9521 | |
| 9522 | if (arg_type == ARG_ANYTHING) { |
| 9523 | if (is_pointer_value(env, regno)) { |
| 9524 | verbose(env, "R%d leaks addr into helper function\n", |
| 9525 | regno); |
| 9526 | return -EACCES; |
| 9527 | } |
| 9528 | return 0; |
| 9529 | } |
| 9530 | |
| 9531 | if (type_is_pkt_pointer(type) && |
| 9532 | !may_access_direct_pkt_data(env, meta, BPF_READ)) { |
| 9533 | verbose(env, "helper access to the packet is not allowed\n"); |
| 9534 | return -EACCES; |
| 9535 | } |
| 9536 | |
| 9537 | if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) { |
| 9538 | err = resolve_map_arg_type(env, meta, &arg_type); |
| 9539 | if (err) |
| 9540 | return err; |
| 9541 | } |
| 9542 | |
| 9543 | if (register_is_null(reg) && type_may_be_null(arg_type)) |
| 9544 | /* A NULL register has a SCALAR_VALUE type, so skip |
| 9545 | * type checking. |
| 9546 | */ |
| 9547 | goto skip_type_check; |
| 9548 | |
| 9549 | /* arg_btf_id and arg_size are in a union. */ |
| 9550 | if (base_type(arg_type) == ARG_PTR_TO_BTF_ID || |
| 9551 | base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK) |
| 9552 | arg_btf_id = fn->arg_btf_id[arg]; |
| 9553 | |
| 9554 | err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); |
| 9555 | if (err) |
| 9556 | return err; |
| 9557 | |
| 9558 | err = check_func_arg_reg_off(env, reg, regno, arg_type); |
| 9559 | if (err) |
| 9560 | return err; |
| 9561 | |
| 9562 | skip_type_check: |
| 9563 | if (arg_type_is_release(arg_type)) { |
| 9564 | if (arg_type_is_dynptr(arg_type)) { |
| 9565 | struct bpf_func_state *state = func(env, reg); |
| 9566 | int spi; |
| 9567 | |
| 9568 | /* Only dynptr created on stack can be released, thus |
| 9569 | * the get_spi and stack state checks for spilled_ptr |
| 9570 | * should only be done before process_dynptr_func for |
| 9571 | * PTR_TO_STACK. |
| 9572 | */ |
| 9573 | if (reg->type == PTR_TO_STACK) { |
| 9574 | spi = dynptr_get_spi(env, reg); |
| 9575 | if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { |
| 9576 | verbose(env, "arg %d is an unacquired reference\n", regno); |
| 9577 | return -EINVAL; |
| 9578 | } |
| 9579 | } else { |
| 9580 | verbose(env, "cannot release unowned const bpf_dynptr\n"); |
| 9581 | return -EINVAL; |
| 9582 | } |
| 9583 | } else if (!reg->ref_obj_id && !register_is_null(reg)) { |
| 9584 | verbose(env, "R%d must be referenced when passed to release function\n", |
| 9585 | regno); |
| 9586 | return -EINVAL; |
| 9587 | } |
| 9588 | if (meta->release_regno) { |
| 9589 | verbose(env, "verifier internal error: more than one release argument\n"); |
| 9590 | return -EFAULT; |
| 9591 | } |
| 9592 | meta->release_regno = regno; |
| 9593 | } |
| 9594 | |
| 9595 | if (reg->ref_obj_id && base_type(arg_type) != ARG_KPTR_XCHG_DEST) { |
| 9596 | if (meta->ref_obj_id) { |
| 9597 | verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", |
| 9598 | regno, reg->ref_obj_id, |
| 9599 | meta->ref_obj_id); |
| 9600 | return -EFAULT; |
| 9601 | } |
| 9602 | meta->ref_obj_id = reg->ref_obj_id; |
| 9603 | } |
| 9604 | |
| 9605 | switch (base_type(arg_type)) { |
| 9606 | case ARG_CONST_MAP_PTR: |
| 9607 | /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ |
| 9608 | if (meta->map_ptr) { |
| 9609 | /* Use map_uid (which is unique id of inner map) to reject: |
| 9610 | * inner_map1 = bpf_map_lookup_elem(outer_map, key1) |
| 9611 | * inner_map2 = bpf_map_lookup_elem(outer_map, key2) |
| 9612 | * if (inner_map1 && inner_map2) { |
| 9613 | * timer = bpf_map_lookup_elem(inner_map1); |
| 9614 | * if (timer) |
| 9615 | * // mismatch would have been allowed |
| 9616 | * bpf_timer_init(timer, inner_map2); |
| 9617 | * } |
| 9618 | * |
| 9619 | * Comparing map_ptr is enough to distinguish normal and outer maps. |
| 9620 | */ |
| 9621 | if (meta->map_ptr != reg->map_ptr || |
| 9622 | meta->map_uid != reg->map_uid) { |
| 9623 | verbose(env, |
| 9624 | "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", |
| 9625 | meta->map_uid, reg->map_uid); |
| 9626 | return -EINVAL; |
| 9627 | } |
| 9628 | } |
| 9629 | meta->map_ptr = reg->map_ptr; |
| 9630 | meta->map_uid = reg->map_uid; |
| 9631 | break; |
| 9632 | case ARG_PTR_TO_MAP_KEY: |
| 9633 | /* bpf_map_xxx(..., map_ptr, ..., key) call: |
| 9634 | * check that [key, key + map->key_size) are within |
| 9635 | * stack limits and initialized |
| 9636 | */ |
| 9637 | if (!meta->map_ptr) { |
| 9638 | /* in function declaration map_ptr must come before |
| 9639 | * map_key, so that it's verified and known before |
| 9640 | * we have to check map_key here. Otherwise it means |
| 9641 | * that kernel subsystem misconfigured verifier |
| 9642 | */ |
| 9643 | verbose(env, "invalid map_ptr to access map->key\n"); |
| 9644 | return -EACCES; |
| 9645 | } |
| 9646 | key_size = meta->map_ptr->key_size; |
| 9647 | err = check_helper_mem_access(env, regno, key_size, BPF_READ, false, NULL); |
| 9648 | if (err) |
| 9649 | return err; |
| 9650 | if (can_elide_value_nullness(meta->map_ptr->map_type)) { |
| 9651 | err = get_constant_map_key(env, reg, key_size, &meta->const_map_key); |
| 9652 | if (err < 0) { |
| 9653 | meta->const_map_key = -1; |
| 9654 | if (err == -EOPNOTSUPP) |
| 9655 | err = 0; |
| 9656 | else |
| 9657 | return err; |
| 9658 | } |
| 9659 | } |
| 9660 | break; |
| 9661 | case ARG_PTR_TO_MAP_VALUE: |
| 9662 | if (type_may_be_null(arg_type) && register_is_null(reg)) |
| 9663 | return 0; |
| 9664 | |
| 9665 | /* bpf_map_xxx(..., map_ptr, ..., value) call: |
| 9666 | * check [value, value + map->value_size) validity |
| 9667 | */ |
| 9668 | if (!meta->map_ptr) { |
| 9669 | /* kernel subsystem misconfigured verifier */ |
| 9670 | verbose(env, "invalid map_ptr to access map->value\n"); |
| 9671 | return -EACCES; |
| 9672 | } |
| 9673 | meta->raw_mode = arg_type & MEM_UNINIT; |
| 9674 | err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, |
| 9675 | arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ, |
| 9676 | false, meta); |
| 9677 | break; |
| 9678 | case ARG_PTR_TO_PERCPU_BTF_ID: |
| 9679 | if (!reg->btf_id) { |
| 9680 | verbose(env, "Helper has invalid btf_id in R%d\n", regno); |
| 9681 | return -EACCES; |
| 9682 | } |
| 9683 | meta->ret_btf = reg->btf; |
| 9684 | meta->ret_btf_id = reg->btf_id; |
| 9685 | break; |
| 9686 | case ARG_PTR_TO_SPIN_LOCK: |
| 9687 | if (in_rbtree_lock_required_cb(env)) { |
| 9688 | verbose(env, "can't spin_{lock,unlock} in rbtree cb\n"); |
| 9689 | return -EACCES; |
| 9690 | } |
| 9691 | if (meta->func_id == BPF_FUNC_spin_lock) { |
| 9692 | err = process_spin_lock(env, regno, PROCESS_SPIN_LOCK); |
| 9693 | if (err) |
| 9694 | return err; |
| 9695 | } else if (meta->func_id == BPF_FUNC_spin_unlock) { |
| 9696 | err = process_spin_lock(env, regno, 0); |
| 9697 | if (err) |
| 9698 | return err; |
| 9699 | } else { |
| 9700 | verbose(env, "verifier internal error\n"); |
| 9701 | return -EFAULT; |
| 9702 | } |
| 9703 | break; |
| 9704 | case ARG_PTR_TO_TIMER: |
| 9705 | err = process_timer_func(env, regno, meta); |
| 9706 | if (err) |
| 9707 | return err; |
| 9708 | break; |
| 9709 | case ARG_PTR_TO_FUNC: |
| 9710 | meta->subprogno = reg->subprogno; |
| 9711 | break; |
| 9712 | case ARG_PTR_TO_MEM: |
| 9713 | /* The access to this pointer is only checked when we hit the |
| 9714 | * next is_mem_size argument below. |
| 9715 | */ |
| 9716 | meta->raw_mode = arg_type & MEM_UNINIT; |
| 9717 | if (arg_type & MEM_FIXED_SIZE) { |
| 9718 | err = check_helper_mem_access(env, regno, fn->arg_size[arg], |
| 9719 | arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ, |
| 9720 | false, meta); |
| 9721 | if (err) |
| 9722 | return err; |
| 9723 | if (arg_type & MEM_ALIGNED) |
| 9724 | err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); |
| 9725 | } |
| 9726 | break; |
| 9727 | case ARG_CONST_SIZE: |
| 9728 | err = check_mem_size_reg(env, reg, regno, |
| 9729 | fn->arg_type[arg - 1] & MEM_WRITE ? |
| 9730 | BPF_WRITE : BPF_READ, |
| 9731 | false, meta); |
| 9732 | break; |
| 9733 | case ARG_CONST_SIZE_OR_ZERO: |
| 9734 | err = check_mem_size_reg(env, reg, regno, |
| 9735 | fn->arg_type[arg - 1] & MEM_WRITE ? |
| 9736 | BPF_WRITE : BPF_READ, |
| 9737 | true, meta); |
| 9738 | break; |
| 9739 | case ARG_PTR_TO_DYNPTR: |
| 9740 | err = process_dynptr_func(env, regno, insn_idx, arg_type, 0); |
| 9741 | if (err) |
| 9742 | return err; |
| 9743 | break; |
| 9744 | case ARG_CONST_ALLOC_SIZE_OR_ZERO: |
| 9745 | if (!tnum_is_const(reg->var_off)) { |
| 9746 | verbose(env, "R%d is not a known constant'\n", |
| 9747 | regno); |
| 9748 | return -EACCES; |
| 9749 | } |
| 9750 | meta->mem_size = reg->var_off.value; |
| 9751 | err = mark_chain_precision(env, regno); |
| 9752 | if (err) |
| 9753 | return err; |
| 9754 | break; |
| 9755 | case ARG_PTR_TO_CONST_STR: |
| 9756 | { |
| 9757 | err = check_reg_const_str(env, reg, regno); |
| 9758 | if (err) |
| 9759 | return err; |
| 9760 | break; |
| 9761 | } |
| 9762 | case ARG_KPTR_XCHG_DEST: |
| 9763 | err = process_kptr_func(env, regno, meta); |
| 9764 | if (err) |
| 9765 | return err; |
| 9766 | break; |
| 9767 | } |
| 9768 | |
| 9769 | return err; |
| 9770 | } |
| 9771 | |
| 9772 | static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) |
| 9773 | { |
| 9774 | enum bpf_attach_type eatype = env->prog->expected_attach_type; |
| 9775 | enum bpf_prog_type type = resolve_prog_type(env->prog); |
| 9776 | |
| 9777 | if (func_id != BPF_FUNC_map_update_elem && |
| 9778 | func_id != BPF_FUNC_map_delete_elem) |
| 9779 | return false; |
| 9780 | |
| 9781 | /* It's not possible to get access to a locked struct sock in these |
| 9782 | * contexts, so updating is safe. |
| 9783 | */ |
| 9784 | switch (type) { |
| 9785 | case BPF_PROG_TYPE_TRACING: |
| 9786 | if (eatype == BPF_TRACE_ITER) |
| 9787 | return true; |
| 9788 | break; |
| 9789 | case BPF_PROG_TYPE_SOCK_OPS: |
| 9790 | /* map_update allowed only via dedicated helpers with event type checks */ |
| 9791 | if (func_id == BPF_FUNC_map_delete_elem) |
| 9792 | return true; |
| 9793 | break; |
| 9794 | case BPF_PROG_TYPE_SOCKET_FILTER: |
| 9795 | case BPF_PROG_TYPE_SCHED_CLS: |
| 9796 | case BPF_PROG_TYPE_SCHED_ACT: |
| 9797 | case BPF_PROG_TYPE_XDP: |
| 9798 | case BPF_PROG_TYPE_SK_REUSEPORT: |
| 9799 | case BPF_PROG_TYPE_FLOW_DISSECTOR: |
| 9800 | case BPF_PROG_TYPE_SK_LOOKUP: |
| 9801 | return true; |
| 9802 | default: |
| 9803 | break; |
| 9804 | } |
| 9805 | |
| 9806 | verbose(env, "cannot update sockmap in this context\n"); |
| 9807 | return false; |
| 9808 | } |
| 9809 | |
| 9810 | static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) |
| 9811 | { |
| 9812 | return env->prog->jit_requested && |
| 9813 | bpf_jit_supports_subprog_tailcalls(); |
| 9814 | } |
| 9815 | |
| 9816 | static int check_map_func_compatibility(struct bpf_verifier_env *env, |
| 9817 | struct bpf_map *map, int func_id) |
| 9818 | { |
| 9819 | if (!map) |
| 9820 | return 0; |
| 9821 | |
| 9822 | /* We need a two way check, first is from map perspective ... */ |
| 9823 | switch (map->map_type) { |
| 9824 | case BPF_MAP_TYPE_PROG_ARRAY: |
| 9825 | if (func_id != BPF_FUNC_tail_call) |
| 9826 | goto error; |
| 9827 | break; |
| 9828 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
| 9829 | if (func_id != BPF_FUNC_perf_event_read && |
| 9830 | func_id != BPF_FUNC_perf_event_output && |
| 9831 | func_id != BPF_FUNC_skb_output && |
| 9832 | func_id != BPF_FUNC_perf_event_read_value && |
| 9833 | func_id != BPF_FUNC_xdp_output) |
| 9834 | goto error; |
| 9835 | break; |
| 9836 | case BPF_MAP_TYPE_RINGBUF: |
| 9837 | if (func_id != BPF_FUNC_ringbuf_output && |
| 9838 | func_id != BPF_FUNC_ringbuf_reserve && |
| 9839 | func_id != BPF_FUNC_ringbuf_query && |
| 9840 | func_id != BPF_FUNC_ringbuf_reserve_dynptr && |
| 9841 | func_id != BPF_FUNC_ringbuf_submit_dynptr && |
| 9842 | func_id != BPF_FUNC_ringbuf_discard_dynptr) |
| 9843 | goto error; |
| 9844 | break; |
| 9845 | case BPF_MAP_TYPE_USER_RINGBUF: |
| 9846 | if (func_id != BPF_FUNC_user_ringbuf_drain) |
| 9847 | goto error; |
| 9848 | break; |
| 9849 | case BPF_MAP_TYPE_STACK_TRACE: |
| 9850 | if (func_id != BPF_FUNC_get_stackid) |
| 9851 | goto error; |
| 9852 | break; |
| 9853 | case BPF_MAP_TYPE_CGROUP_ARRAY: |
| 9854 | if (func_id != BPF_FUNC_skb_under_cgroup && |
| 9855 | func_id != BPF_FUNC_current_task_under_cgroup) |
| 9856 | goto error; |
| 9857 | break; |
| 9858 | case BPF_MAP_TYPE_CGROUP_STORAGE: |
| 9859 | case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: |
| 9860 | if (func_id != BPF_FUNC_get_local_storage) |
| 9861 | goto error; |
| 9862 | break; |
| 9863 | case BPF_MAP_TYPE_DEVMAP: |
| 9864 | case BPF_MAP_TYPE_DEVMAP_HASH: |
| 9865 | if (func_id != BPF_FUNC_redirect_map && |
| 9866 | func_id != BPF_FUNC_map_lookup_elem) |
| 9867 | goto error; |
| 9868 | break; |
| 9869 | /* Restrict bpf side of cpumap and xskmap, open when use-cases |
| 9870 | * appear. |
| 9871 | */ |
| 9872 | case BPF_MAP_TYPE_CPUMAP: |
| 9873 | if (func_id != BPF_FUNC_redirect_map) |
| 9874 | goto error; |
| 9875 | break; |
| 9876 | case BPF_MAP_TYPE_XSKMAP: |
| 9877 | if (func_id != BPF_FUNC_redirect_map && |
| 9878 | func_id != BPF_FUNC_map_lookup_elem) |
| 9879 | goto error; |
| 9880 | break; |
| 9881 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: |
| 9882 | case BPF_MAP_TYPE_HASH_OF_MAPS: |
| 9883 | if (func_id != BPF_FUNC_map_lookup_elem) |
| 9884 | goto error; |
| 9885 | break; |
| 9886 | case BPF_MAP_TYPE_SOCKMAP: |
| 9887 | if (func_id != BPF_FUNC_sk_redirect_map && |
| 9888 | func_id != BPF_FUNC_sock_map_update && |
| 9889 | func_id != BPF_FUNC_msg_redirect_map && |
| 9890 | func_id != BPF_FUNC_sk_select_reuseport && |
| 9891 | func_id != BPF_FUNC_map_lookup_elem && |
| 9892 | !may_update_sockmap(env, func_id)) |
| 9893 | goto error; |
| 9894 | break; |
| 9895 | case BPF_MAP_TYPE_SOCKHASH: |
| 9896 | if (func_id != BPF_FUNC_sk_redirect_hash && |
| 9897 | func_id != BPF_FUNC_sock_hash_update && |
| 9898 | func_id != BPF_FUNC_msg_redirect_hash && |
| 9899 | func_id != BPF_FUNC_sk_select_reuseport && |
| 9900 | func_id != BPF_FUNC_map_lookup_elem && |
| 9901 | !may_update_sockmap(env, func_id)) |
| 9902 | goto error; |
| 9903 | break; |
| 9904 | case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: |
| 9905 | if (func_id != BPF_FUNC_sk_select_reuseport) |
| 9906 | goto error; |
| 9907 | break; |
| 9908 | case BPF_MAP_TYPE_QUEUE: |
| 9909 | case BPF_MAP_TYPE_STACK: |
| 9910 | if (func_id != BPF_FUNC_map_peek_elem && |
| 9911 | func_id != BPF_FUNC_map_pop_elem && |
| 9912 | func_id != BPF_FUNC_map_push_elem) |
| 9913 | goto error; |
| 9914 | break; |
| 9915 | case BPF_MAP_TYPE_SK_STORAGE: |
| 9916 | if (func_id != BPF_FUNC_sk_storage_get && |
| 9917 | func_id != BPF_FUNC_sk_storage_delete && |
| 9918 | func_id != BPF_FUNC_kptr_xchg) |
| 9919 | goto error; |
| 9920 | break; |
| 9921 | case BPF_MAP_TYPE_INODE_STORAGE: |
| 9922 | if (func_id != BPF_FUNC_inode_storage_get && |
| 9923 | func_id != BPF_FUNC_inode_storage_delete && |
| 9924 | func_id != BPF_FUNC_kptr_xchg) |
| 9925 | goto error; |
| 9926 | break; |
| 9927 | case BPF_MAP_TYPE_TASK_STORAGE: |
| 9928 | if (func_id != BPF_FUNC_task_storage_get && |
| 9929 | func_id != BPF_FUNC_task_storage_delete && |
| 9930 | func_id != BPF_FUNC_kptr_xchg) |
| 9931 | goto error; |
| 9932 | break; |
| 9933 | case BPF_MAP_TYPE_CGRP_STORAGE: |
| 9934 | if (func_id != BPF_FUNC_cgrp_storage_get && |
| 9935 | func_id != BPF_FUNC_cgrp_storage_delete && |
| 9936 | func_id != BPF_FUNC_kptr_xchg) |
| 9937 | goto error; |
| 9938 | break; |
| 9939 | case BPF_MAP_TYPE_BLOOM_FILTER: |
| 9940 | if (func_id != BPF_FUNC_map_peek_elem && |
| 9941 | func_id != BPF_FUNC_map_push_elem) |
| 9942 | goto error; |
| 9943 | break; |
| 9944 | default: |
| 9945 | break; |
| 9946 | } |
| 9947 | |
| 9948 | /* ... and second from the function itself. */ |
| 9949 | switch (func_id) { |
| 9950 | case BPF_FUNC_tail_call: |
| 9951 | if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) |
| 9952 | goto error; |
| 9953 | if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { |
| 9954 | verbose(env, "mixing of tail_calls and bpf-to-bpf calls is not supported\n"); |
| 9955 | return -EINVAL; |
| 9956 | } |
| 9957 | break; |
| 9958 | case BPF_FUNC_perf_event_read: |
| 9959 | case BPF_FUNC_perf_event_output: |
| 9960 | case BPF_FUNC_perf_event_read_value: |
| 9961 | case BPF_FUNC_skb_output: |
| 9962 | case BPF_FUNC_xdp_output: |
| 9963 | if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) |
| 9964 | goto error; |
| 9965 | break; |
| 9966 | case BPF_FUNC_ringbuf_output: |
| 9967 | case BPF_FUNC_ringbuf_reserve: |
| 9968 | case BPF_FUNC_ringbuf_query: |
| 9969 | case BPF_FUNC_ringbuf_reserve_dynptr: |
| 9970 | case BPF_FUNC_ringbuf_submit_dynptr: |
| 9971 | case BPF_FUNC_ringbuf_discard_dynptr: |
| 9972 | if (map->map_type != BPF_MAP_TYPE_RINGBUF) |
| 9973 | goto error; |
| 9974 | break; |
| 9975 | case BPF_FUNC_user_ringbuf_drain: |
| 9976 | if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) |
| 9977 | goto error; |
| 9978 | break; |
| 9979 | case BPF_FUNC_get_stackid: |
| 9980 | if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) |
| 9981 | goto error; |
| 9982 | break; |
| 9983 | case BPF_FUNC_current_task_under_cgroup: |
| 9984 | case BPF_FUNC_skb_under_cgroup: |
| 9985 | if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) |
| 9986 | goto error; |
| 9987 | break; |
| 9988 | case BPF_FUNC_redirect_map: |
| 9989 | if (map->map_type != BPF_MAP_TYPE_DEVMAP && |
| 9990 | map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && |
| 9991 | map->map_type != BPF_MAP_TYPE_CPUMAP && |
| 9992 | map->map_type != BPF_MAP_TYPE_XSKMAP) |
| 9993 | goto error; |
| 9994 | break; |
| 9995 | case BPF_FUNC_sk_redirect_map: |
| 9996 | case BPF_FUNC_msg_redirect_map: |
| 9997 | case BPF_FUNC_sock_map_update: |
| 9998 | if (map->map_type != BPF_MAP_TYPE_SOCKMAP) |
| 9999 | goto error; |
| 10000 | break; |
| 10001 | case BPF_FUNC_sk_redirect_hash: |
| 10002 | case BPF_FUNC_msg_redirect_hash: |
| 10003 | case BPF_FUNC_sock_hash_update: |
| 10004 | if (map->map_type != BPF_MAP_TYPE_SOCKHASH) |
| 10005 | goto error; |
| 10006 | break; |
| 10007 | case BPF_FUNC_get_local_storage: |
| 10008 | if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && |
| 10009 | map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) |
| 10010 | goto error; |
| 10011 | break; |
| 10012 | case BPF_FUNC_sk_select_reuseport: |
| 10013 | if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && |
| 10014 | map->map_type != BPF_MAP_TYPE_SOCKMAP && |
| 10015 | map->map_type != BPF_MAP_TYPE_SOCKHASH) |
| 10016 | goto error; |
| 10017 | break; |
| 10018 | case BPF_FUNC_map_pop_elem: |
| 10019 | if (map->map_type != BPF_MAP_TYPE_QUEUE && |
| 10020 | map->map_type != BPF_MAP_TYPE_STACK) |
| 10021 | goto error; |
| 10022 | break; |
| 10023 | case BPF_FUNC_map_peek_elem: |
| 10024 | case BPF_FUNC_map_push_elem: |
| 10025 | if (map->map_type != BPF_MAP_TYPE_QUEUE && |
| 10026 | map->map_type != BPF_MAP_TYPE_STACK && |
| 10027 | map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) |
| 10028 | goto error; |
| 10029 | break; |
| 10030 | case BPF_FUNC_map_lookup_percpu_elem: |
| 10031 | if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && |
| 10032 | map->map_type != BPF_MAP_TYPE_PERCPU_HASH && |
| 10033 | map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) |
| 10034 | goto error; |
| 10035 | break; |
| 10036 | case BPF_FUNC_sk_storage_get: |
| 10037 | case BPF_FUNC_sk_storage_delete: |
| 10038 | if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) |
| 10039 | goto error; |
| 10040 | break; |
| 10041 | case BPF_FUNC_inode_storage_get: |
| 10042 | case BPF_FUNC_inode_storage_delete: |
| 10043 | if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) |
| 10044 | goto error; |
| 10045 | break; |
| 10046 | case BPF_FUNC_task_storage_get: |
| 10047 | case BPF_FUNC_task_storage_delete: |
| 10048 | if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) |
| 10049 | goto error; |
| 10050 | break; |
| 10051 | case BPF_FUNC_cgrp_storage_get: |
| 10052 | case BPF_FUNC_cgrp_storage_delete: |
| 10053 | if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) |
| 10054 | goto error; |
| 10055 | break; |
| 10056 | default: |
| 10057 | break; |
| 10058 | } |
| 10059 | |
| 10060 | return 0; |
| 10061 | error: |
| 10062 | verbose(env, "cannot pass map_type %d into func %s#%d\n", |
| 10063 | map->map_type, func_id_name(func_id), func_id); |
| 10064 | return -EINVAL; |
| 10065 | } |
| 10066 | |
| 10067 | static bool check_raw_mode_ok(const struct bpf_func_proto *fn) |
| 10068 | { |
| 10069 | int count = 0; |
| 10070 | |
| 10071 | if (arg_type_is_raw_mem(fn->arg1_type)) |
| 10072 | count++; |
| 10073 | if (arg_type_is_raw_mem(fn->arg2_type)) |
| 10074 | count++; |
| 10075 | if (arg_type_is_raw_mem(fn->arg3_type)) |
| 10076 | count++; |
| 10077 | if (arg_type_is_raw_mem(fn->arg4_type)) |
| 10078 | count++; |
| 10079 | if (arg_type_is_raw_mem(fn->arg5_type)) |
| 10080 | count++; |
| 10081 | |
| 10082 | /* We only support one arg being in raw mode at the moment, |
| 10083 | * which is sufficient for the helper functions we have |
| 10084 | * right now. |
| 10085 | */ |
| 10086 | return count <= 1; |
| 10087 | } |
| 10088 | |
| 10089 | static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg) |
| 10090 | { |
| 10091 | bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; |
| 10092 | bool has_size = fn->arg_size[arg] != 0; |
| 10093 | bool is_next_size = false; |
| 10094 | |
| 10095 | if (arg + 1 < ARRAY_SIZE(fn->arg_type)) |
| 10096 | is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); |
| 10097 | |
| 10098 | if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) |
| 10099 | return is_next_size; |
| 10100 | |
| 10101 | return has_size == is_next_size || is_next_size == is_fixed; |
| 10102 | } |
| 10103 | |
| 10104 | static bool check_arg_pair_ok(const struct bpf_func_proto *fn) |
| 10105 | { |
| 10106 | /* bpf_xxx(..., buf, len) call will access 'len' |
| 10107 | * bytes from memory 'buf'. Both arg types need |
| 10108 | * to be paired, so make sure there's no buggy |
| 10109 | * helper function specification. |
| 10110 | */ |
| 10111 | if (arg_type_is_mem_size(fn->arg1_type) || |
| 10112 | check_args_pair_invalid(fn, 0) || |
| 10113 | check_args_pair_invalid(fn, 1) || |
| 10114 | check_args_pair_invalid(fn, 2) || |
| 10115 | check_args_pair_invalid(fn, 3) || |
| 10116 | check_args_pair_invalid(fn, 4)) |
| 10117 | return false; |
| 10118 | |
| 10119 | return true; |
| 10120 | } |
| 10121 | |
| 10122 | static bool check_btf_id_ok(const struct bpf_func_proto *fn) |
| 10123 | { |
| 10124 | int i; |
| 10125 | |
| 10126 | for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { |
| 10127 | if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) |
| 10128 | return !!fn->arg_btf_id[i]; |
| 10129 | if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) |
| 10130 | return fn->arg_btf_id[i] == BPF_PTR_POISON; |
| 10131 | if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && |
| 10132 | /* arg_btf_id and arg_size are in a union. */ |
| 10133 | (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || |
| 10134 | !(fn->arg_type[i] & MEM_FIXED_SIZE))) |
| 10135 | return false; |
| 10136 | } |
| 10137 | |
| 10138 | return true; |
| 10139 | } |
| 10140 | |
| 10141 | static int check_func_proto(const struct bpf_func_proto *fn, int func_id) |
| 10142 | { |
| 10143 | return check_raw_mode_ok(fn) && |
| 10144 | check_arg_pair_ok(fn) && |
| 10145 | check_btf_id_ok(fn) ? 0 : -EINVAL; |
| 10146 | } |
| 10147 | |
| 10148 | /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] |
| 10149 | * are now invalid, so turn them into unknown SCALAR_VALUE. |
| 10150 | * |
| 10151 | * This also applies to dynptr slices belonging to skb and xdp dynptrs, |
| 10152 | * since these slices point to packet data. |
| 10153 | */ |
| 10154 | static void clear_all_pkt_pointers(struct bpf_verifier_env *env) |
| 10155 | { |
| 10156 | struct bpf_func_state *state; |
| 10157 | struct bpf_reg_state *reg; |
| 10158 | |
| 10159 | bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ |
| 10160 | if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg)) |
| 10161 | mark_reg_invalid(env, reg); |
| 10162 | })); |
| 10163 | } |
| 10164 | |
| 10165 | enum { |
| 10166 | AT_PKT_END = -1, |
| 10167 | BEYOND_PKT_END = -2, |
| 10168 | }; |
| 10169 | |
| 10170 | static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) |
| 10171 | { |
| 10172 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 10173 | struct bpf_reg_state *reg = &state->regs[regn]; |
| 10174 | |
| 10175 | if (reg->type != PTR_TO_PACKET) |
| 10176 | /* PTR_TO_PACKET_META is not supported yet */ |
| 10177 | return; |
| 10178 | |
| 10179 | /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. |
| 10180 | * How far beyond pkt_end it goes is unknown. |
| 10181 | * if (!range_open) it's the case of pkt >= pkt_end |
| 10182 | * if (range_open) it's the case of pkt > pkt_end |
| 10183 | * hence this pointer is at least 1 byte bigger than pkt_end |
| 10184 | */ |
| 10185 | if (range_open) |
| 10186 | reg->range = BEYOND_PKT_END; |
| 10187 | else |
| 10188 | reg->range = AT_PKT_END; |
| 10189 | } |
| 10190 | |
| 10191 | static int release_reference_nomark(struct bpf_verifier_state *state, int ref_obj_id) |
| 10192 | { |
| 10193 | int i; |
| 10194 | |
| 10195 | for (i = 0; i < state->acquired_refs; i++) { |
| 10196 | if (state->refs[i].type != REF_TYPE_PTR) |
| 10197 | continue; |
| 10198 | if (state->refs[i].id == ref_obj_id) { |
| 10199 | release_reference_state(state, i); |
| 10200 | return 0; |
| 10201 | } |
| 10202 | } |
| 10203 | return -EINVAL; |
| 10204 | } |
| 10205 | |
| 10206 | /* The pointer with the specified id has released its reference to kernel |
| 10207 | * resources. Identify all copies of the same pointer and clear the reference. |
| 10208 | * |
| 10209 | * This is the release function corresponding to acquire_reference(). Idempotent. |
| 10210 | */ |
| 10211 | static int release_reference(struct bpf_verifier_env *env, int ref_obj_id) |
| 10212 | { |
| 10213 | struct bpf_verifier_state *vstate = env->cur_state; |
| 10214 | struct bpf_func_state *state; |
| 10215 | struct bpf_reg_state *reg; |
| 10216 | int err; |
| 10217 | |
| 10218 | err = release_reference_nomark(vstate, ref_obj_id); |
| 10219 | if (err) |
| 10220 | return err; |
| 10221 | |
| 10222 | bpf_for_each_reg_in_vstate(vstate, state, reg, ({ |
| 10223 | if (reg->ref_obj_id == ref_obj_id) |
| 10224 | mark_reg_invalid(env, reg); |
| 10225 | })); |
| 10226 | |
| 10227 | return 0; |
| 10228 | } |
| 10229 | |
| 10230 | static void invalidate_non_owning_refs(struct bpf_verifier_env *env) |
| 10231 | { |
| 10232 | struct bpf_func_state *unused; |
| 10233 | struct bpf_reg_state *reg; |
| 10234 | |
| 10235 | bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ |
| 10236 | if (type_is_non_owning_ref(reg->type)) |
| 10237 | mark_reg_invalid(env, reg); |
| 10238 | })); |
| 10239 | } |
| 10240 | |
| 10241 | static void clear_caller_saved_regs(struct bpf_verifier_env *env, |
| 10242 | struct bpf_reg_state *regs) |
| 10243 | { |
| 10244 | int i; |
| 10245 | |
| 10246 | /* after the call registers r0 - r5 were scratched */ |
| 10247 | for (i = 0; i < CALLER_SAVED_REGS; i++) { |
| 10248 | mark_reg_not_init(env, regs, caller_saved[i]); |
| 10249 | __check_reg_arg(env, regs, caller_saved[i], DST_OP_NO_MARK); |
| 10250 | } |
| 10251 | } |
| 10252 | |
| 10253 | typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, |
| 10254 | struct bpf_func_state *caller, |
| 10255 | struct bpf_func_state *callee, |
| 10256 | int insn_idx); |
| 10257 | |
| 10258 | static int set_callee_state(struct bpf_verifier_env *env, |
| 10259 | struct bpf_func_state *caller, |
| 10260 | struct bpf_func_state *callee, int insn_idx); |
| 10261 | |
| 10262 | static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int callsite, |
| 10263 | set_callee_state_fn set_callee_state_cb, |
| 10264 | struct bpf_verifier_state *state) |
| 10265 | { |
| 10266 | struct bpf_func_state *caller, *callee; |
| 10267 | int err; |
| 10268 | |
| 10269 | if (state->curframe + 1 >= MAX_CALL_FRAMES) { |
| 10270 | verbose(env, "the call stack of %d frames is too deep\n", |
| 10271 | state->curframe + 2); |
| 10272 | return -E2BIG; |
| 10273 | } |
| 10274 | |
| 10275 | if (state->frame[state->curframe + 1]) { |
| 10276 | verifier_bug(env, "Frame %d already allocated", state->curframe + 1); |
| 10277 | return -EFAULT; |
| 10278 | } |
| 10279 | |
| 10280 | caller = state->frame[state->curframe]; |
| 10281 | callee = kzalloc(sizeof(*callee), GFP_KERNEL); |
| 10282 | if (!callee) |
| 10283 | return -ENOMEM; |
| 10284 | state->frame[state->curframe + 1] = callee; |
| 10285 | |
| 10286 | /* callee cannot access r0, r6 - r9 for reading and has to write |
| 10287 | * into its own stack before reading from it. |
| 10288 | * callee can read/write into caller's stack |
| 10289 | */ |
| 10290 | init_func_state(env, callee, |
| 10291 | /* remember the callsite, it will be used by bpf_exit */ |
| 10292 | callsite, |
| 10293 | state->curframe + 1 /* frameno within this callchain */, |
| 10294 | subprog /* subprog number within this prog */); |
| 10295 | err = set_callee_state_cb(env, caller, callee, callsite); |
| 10296 | if (err) |
| 10297 | goto err_out; |
| 10298 | |
| 10299 | /* only increment it after check_reg_arg() finished */ |
| 10300 | state->curframe++; |
| 10301 | |
| 10302 | return 0; |
| 10303 | |
| 10304 | err_out: |
| 10305 | free_func_state(callee); |
| 10306 | state->frame[state->curframe + 1] = NULL; |
| 10307 | return err; |
| 10308 | } |
| 10309 | |
| 10310 | static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, |
| 10311 | const struct btf *btf, |
| 10312 | struct bpf_reg_state *regs) |
| 10313 | { |
| 10314 | struct bpf_subprog_info *sub = subprog_info(env, subprog); |
| 10315 | struct bpf_verifier_log *log = &env->log; |
| 10316 | u32 i; |
| 10317 | int ret; |
| 10318 | |
| 10319 | ret = btf_prepare_func_args(env, subprog); |
| 10320 | if (ret) |
| 10321 | return ret; |
| 10322 | |
| 10323 | /* check that BTF function arguments match actual types that the |
| 10324 | * verifier sees. |
| 10325 | */ |
| 10326 | for (i = 0; i < sub->arg_cnt; i++) { |
| 10327 | u32 regno = i + 1; |
| 10328 | struct bpf_reg_state *reg = ®s[regno]; |
| 10329 | struct bpf_subprog_arg_info *arg = &sub->args[i]; |
| 10330 | |
| 10331 | if (arg->arg_type == ARG_ANYTHING) { |
| 10332 | if (reg->type != SCALAR_VALUE) { |
| 10333 | bpf_log(log, "R%d is not a scalar\n", regno); |
| 10334 | return -EINVAL; |
| 10335 | } |
| 10336 | } else if (arg->arg_type == ARG_PTR_TO_CTX) { |
| 10337 | ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE); |
| 10338 | if (ret < 0) |
| 10339 | return ret; |
| 10340 | /* If function expects ctx type in BTF check that caller |
| 10341 | * is passing PTR_TO_CTX. |
| 10342 | */ |
| 10343 | if (reg->type != PTR_TO_CTX) { |
| 10344 | bpf_log(log, "arg#%d expects pointer to ctx\n", i); |
| 10345 | return -EINVAL; |
| 10346 | } |
| 10347 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { |
| 10348 | ret = check_func_arg_reg_off(env, reg, regno, ARG_DONTCARE); |
| 10349 | if (ret < 0) |
| 10350 | return ret; |
| 10351 | if (check_mem_reg(env, reg, regno, arg->mem_size)) |
| 10352 | return -EINVAL; |
| 10353 | if (!(arg->arg_type & PTR_MAYBE_NULL) && (reg->type & PTR_MAYBE_NULL)) { |
| 10354 | bpf_log(log, "arg#%d is expected to be non-NULL\n", i); |
| 10355 | return -EINVAL; |
| 10356 | } |
| 10357 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { |
| 10358 | /* |
| 10359 | * Can pass any value and the kernel won't crash, but |
| 10360 | * only PTR_TO_ARENA or SCALAR make sense. Everything |
| 10361 | * else is a bug in the bpf program. Point it out to |
| 10362 | * the user at the verification time instead of |
| 10363 | * run-time debug nightmare. |
| 10364 | */ |
| 10365 | if (reg->type != PTR_TO_ARENA && reg->type != SCALAR_VALUE) { |
| 10366 | bpf_log(log, "R%d is not a pointer to arena or scalar.\n", regno); |
| 10367 | return -EINVAL; |
| 10368 | } |
| 10369 | } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { |
| 10370 | ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR); |
| 10371 | if (ret) |
| 10372 | return ret; |
| 10373 | |
| 10374 | ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); |
| 10375 | if (ret) |
| 10376 | return ret; |
| 10377 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { |
| 10378 | struct bpf_call_arg_meta meta; |
| 10379 | int err; |
| 10380 | |
| 10381 | if (register_is_null(reg) && type_may_be_null(arg->arg_type)) |
| 10382 | continue; |
| 10383 | |
| 10384 | memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */ |
| 10385 | err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta); |
| 10386 | err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type); |
| 10387 | if (err) |
| 10388 | return err; |
| 10389 | } else { |
| 10390 | verifier_bug(env, "unrecognized arg#%d type %d", i, arg->arg_type); |
| 10391 | return -EFAULT; |
| 10392 | } |
| 10393 | } |
| 10394 | |
| 10395 | return 0; |
| 10396 | } |
| 10397 | |
| 10398 | /* Compare BTF of a function call with given bpf_reg_state. |
| 10399 | * Returns: |
| 10400 | * EFAULT - there is a verifier bug. Abort verification. |
| 10401 | * EINVAL - there is a type mismatch or BTF is not available. |
| 10402 | * 0 - BTF matches with what bpf_reg_state expects. |
| 10403 | * Only PTR_TO_CTX and SCALAR_VALUE states are recognized. |
| 10404 | */ |
| 10405 | static int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog, |
| 10406 | struct bpf_reg_state *regs) |
| 10407 | { |
| 10408 | struct bpf_prog *prog = env->prog; |
| 10409 | struct btf *btf = prog->aux->btf; |
| 10410 | u32 btf_id; |
| 10411 | int err; |
| 10412 | |
| 10413 | if (!prog->aux->func_info) |
| 10414 | return -EINVAL; |
| 10415 | |
| 10416 | btf_id = prog->aux->func_info[subprog].type_id; |
| 10417 | if (!btf_id) |
| 10418 | return -EFAULT; |
| 10419 | |
| 10420 | if (prog->aux->func_info_aux[subprog].unreliable) |
| 10421 | return -EINVAL; |
| 10422 | |
| 10423 | err = btf_check_func_arg_match(env, subprog, btf, regs); |
| 10424 | /* Compiler optimizations can remove arguments from static functions |
| 10425 | * or mismatched type can be passed into a global function. |
| 10426 | * In such cases mark the function as unreliable from BTF point of view. |
| 10427 | */ |
| 10428 | if (err) |
| 10429 | prog->aux->func_info_aux[subprog].unreliable = true; |
| 10430 | return err; |
| 10431 | } |
| 10432 | |
| 10433 | static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 10434 | int insn_idx, int subprog, |
| 10435 | set_callee_state_fn set_callee_state_cb) |
| 10436 | { |
| 10437 | struct bpf_verifier_state *state = env->cur_state, *callback_state; |
| 10438 | struct bpf_func_state *caller, *callee; |
| 10439 | int err; |
| 10440 | |
| 10441 | caller = state->frame[state->curframe]; |
| 10442 | err = btf_check_subprog_call(env, subprog, caller->regs); |
| 10443 | if (err == -EFAULT) |
| 10444 | return err; |
| 10445 | |
| 10446 | /* set_callee_state is used for direct subprog calls, but we are |
| 10447 | * interested in validating only BPF helpers that can call subprogs as |
| 10448 | * callbacks |
| 10449 | */ |
| 10450 | env->subprog_info[subprog].is_cb = true; |
| 10451 | if (bpf_pseudo_kfunc_call(insn) && |
| 10452 | !is_callback_calling_kfunc(insn->imm)) { |
| 10453 | verifier_bug(env, "kfunc %s#%d not marked as callback-calling", |
| 10454 | func_id_name(insn->imm), insn->imm); |
| 10455 | return -EFAULT; |
| 10456 | } else if (!bpf_pseudo_kfunc_call(insn) && |
| 10457 | !is_callback_calling_function(insn->imm)) { /* helper */ |
| 10458 | verifier_bug(env, "helper %s#%d not marked as callback-calling", |
| 10459 | func_id_name(insn->imm), insn->imm); |
| 10460 | return -EFAULT; |
| 10461 | } |
| 10462 | |
| 10463 | if (is_async_callback_calling_insn(insn)) { |
| 10464 | struct bpf_verifier_state *async_cb; |
| 10465 | |
| 10466 | /* there is no real recursion here. timer and workqueue callbacks are async */ |
| 10467 | env->subprog_info[subprog].is_async_cb = true; |
| 10468 | async_cb = push_async_cb(env, env->subprog_info[subprog].start, |
| 10469 | insn_idx, subprog, |
| 10470 | is_bpf_wq_set_callback_impl_kfunc(insn->imm)); |
| 10471 | if (!async_cb) |
| 10472 | return -EFAULT; |
| 10473 | callee = async_cb->frame[0]; |
| 10474 | callee->async_entry_cnt = caller->async_entry_cnt + 1; |
| 10475 | |
| 10476 | /* Convert bpf_timer_set_callback() args into timer callback args */ |
| 10477 | err = set_callee_state_cb(env, caller, callee, insn_idx); |
| 10478 | if (err) |
| 10479 | return err; |
| 10480 | |
| 10481 | return 0; |
| 10482 | } |
| 10483 | |
| 10484 | /* for callback functions enqueue entry to callback and |
| 10485 | * proceed with next instruction within current frame. |
| 10486 | */ |
| 10487 | callback_state = push_stack(env, env->subprog_info[subprog].start, insn_idx, false); |
| 10488 | if (!callback_state) |
| 10489 | return -ENOMEM; |
| 10490 | |
| 10491 | err = setup_func_entry(env, subprog, insn_idx, set_callee_state_cb, |
| 10492 | callback_state); |
| 10493 | if (err) |
| 10494 | return err; |
| 10495 | |
| 10496 | callback_state->callback_unroll_depth++; |
| 10497 | callback_state->frame[callback_state->curframe - 1]->callback_depth++; |
| 10498 | caller->callback_depth = 0; |
| 10499 | return 0; |
| 10500 | } |
| 10501 | |
| 10502 | static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 10503 | int *insn_idx) |
| 10504 | { |
| 10505 | struct bpf_verifier_state *state = env->cur_state; |
| 10506 | struct bpf_func_state *caller; |
| 10507 | int err, subprog, target_insn; |
| 10508 | |
| 10509 | target_insn = *insn_idx + insn->imm + 1; |
| 10510 | subprog = find_subprog(env, target_insn); |
| 10511 | if (verifier_bug_if(subprog < 0, env, "target of func call at insn %d is not a program", |
| 10512 | target_insn)) |
| 10513 | return -EFAULT; |
| 10514 | |
| 10515 | caller = state->frame[state->curframe]; |
| 10516 | err = btf_check_subprog_call(env, subprog, caller->regs); |
| 10517 | if (err == -EFAULT) |
| 10518 | return err; |
| 10519 | if (subprog_is_global(env, subprog)) { |
| 10520 | const char *sub_name = subprog_name(env, subprog); |
| 10521 | |
| 10522 | if (env->cur_state->active_locks) { |
| 10523 | verbose(env, "global function calls are not allowed while holding a lock,\n" |
| 10524 | "use static function instead\n"); |
| 10525 | return -EINVAL; |
| 10526 | } |
| 10527 | |
| 10528 | if (env->subprog_info[subprog].might_sleep && |
| 10529 | (env->cur_state->active_rcu_lock || env->cur_state->active_preempt_locks || |
| 10530 | env->cur_state->active_irq_id || !in_sleepable(env))) { |
| 10531 | verbose(env, "global functions that may sleep are not allowed in non-sleepable context,\n" |
| 10532 | "i.e., in a RCU/IRQ/preempt-disabled section, or in\n" |
| 10533 | "a non-sleepable BPF program context\n"); |
| 10534 | return -EINVAL; |
| 10535 | } |
| 10536 | |
| 10537 | if (err) { |
| 10538 | verbose(env, "Caller passes invalid args into func#%d ('%s')\n", |
| 10539 | subprog, sub_name); |
| 10540 | return err; |
| 10541 | } |
| 10542 | |
| 10543 | verbose(env, "Func#%d ('%s') is global and assumed valid.\n", |
| 10544 | subprog, sub_name); |
| 10545 | if (env->subprog_info[subprog].changes_pkt_data) |
| 10546 | clear_all_pkt_pointers(env); |
| 10547 | /* mark global subprog for verifying after main prog */ |
| 10548 | subprog_aux(env, subprog)->called = true; |
| 10549 | clear_caller_saved_regs(env, caller->regs); |
| 10550 | |
| 10551 | /* All global functions return a 64-bit SCALAR_VALUE */ |
| 10552 | mark_reg_unknown(env, caller->regs, BPF_REG_0); |
| 10553 | caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; |
| 10554 | |
| 10555 | /* continue with next insn after call */ |
| 10556 | return 0; |
| 10557 | } |
| 10558 | |
| 10559 | /* for regular function entry setup new frame and continue |
| 10560 | * from that frame. |
| 10561 | */ |
| 10562 | err = setup_func_entry(env, subprog, *insn_idx, set_callee_state, state); |
| 10563 | if (err) |
| 10564 | return err; |
| 10565 | |
| 10566 | clear_caller_saved_regs(env, caller->regs); |
| 10567 | |
| 10568 | /* and go analyze first insn of the callee */ |
| 10569 | *insn_idx = env->subprog_info[subprog].start - 1; |
| 10570 | |
| 10571 | if (env->log.level & BPF_LOG_LEVEL) { |
| 10572 | verbose(env, "caller:\n"); |
| 10573 | print_verifier_state(env, state, caller->frameno, true); |
| 10574 | verbose(env, "callee:\n"); |
| 10575 | print_verifier_state(env, state, state->curframe, true); |
| 10576 | } |
| 10577 | |
| 10578 | return 0; |
| 10579 | } |
| 10580 | |
| 10581 | int map_set_for_each_callback_args(struct bpf_verifier_env *env, |
| 10582 | struct bpf_func_state *caller, |
| 10583 | struct bpf_func_state *callee) |
| 10584 | { |
| 10585 | /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, |
| 10586 | * void *callback_ctx, u64 flags); |
| 10587 | * callback_fn(struct bpf_map *map, void *key, void *value, |
| 10588 | * void *callback_ctx); |
| 10589 | */ |
| 10590 | callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; |
| 10591 | |
| 10592 | callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; |
| 10593 | __mark_reg_known_zero(&callee->regs[BPF_REG_2]); |
| 10594 | callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; |
| 10595 | |
| 10596 | callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; |
| 10597 | __mark_reg_known_zero(&callee->regs[BPF_REG_3]); |
| 10598 | callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; |
| 10599 | |
| 10600 | /* pointer to stack or null */ |
| 10601 | callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; |
| 10602 | |
| 10603 | /* unused */ |
| 10604 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10605 | return 0; |
| 10606 | } |
| 10607 | |
| 10608 | static int set_callee_state(struct bpf_verifier_env *env, |
| 10609 | struct bpf_func_state *caller, |
| 10610 | struct bpf_func_state *callee, int insn_idx) |
| 10611 | { |
| 10612 | int i; |
| 10613 | |
| 10614 | /* copy r1 - r5 args that callee can access. The copy includes parent |
| 10615 | * pointers, which connects us up to the liveness chain |
| 10616 | */ |
| 10617 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) |
| 10618 | callee->regs[i] = caller->regs[i]; |
| 10619 | return 0; |
| 10620 | } |
| 10621 | |
| 10622 | static int set_map_elem_callback_state(struct bpf_verifier_env *env, |
| 10623 | struct bpf_func_state *caller, |
| 10624 | struct bpf_func_state *callee, |
| 10625 | int insn_idx) |
| 10626 | { |
| 10627 | struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; |
| 10628 | struct bpf_map *map; |
| 10629 | int err; |
| 10630 | |
| 10631 | /* valid map_ptr and poison value does not matter */ |
| 10632 | map = insn_aux->map_ptr_state.map_ptr; |
| 10633 | if (!map->ops->map_set_for_each_callback_args || |
| 10634 | !map->ops->map_for_each_callback) { |
| 10635 | verbose(env, "callback function not allowed for map\n"); |
| 10636 | return -ENOTSUPP; |
| 10637 | } |
| 10638 | |
| 10639 | err = map->ops->map_set_for_each_callback_args(env, caller, callee); |
| 10640 | if (err) |
| 10641 | return err; |
| 10642 | |
| 10643 | callee->in_callback_fn = true; |
| 10644 | callee->callback_ret_range = retval_range(0, 1); |
| 10645 | return 0; |
| 10646 | } |
| 10647 | |
| 10648 | static int set_loop_callback_state(struct bpf_verifier_env *env, |
| 10649 | struct bpf_func_state *caller, |
| 10650 | struct bpf_func_state *callee, |
| 10651 | int insn_idx) |
| 10652 | { |
| 10653 | /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, |
| 10654 | * u64 flags); |
| 10655 | * callback_fn(u64 index, void *callback_ctx); |
| 10656 | */ |
| 10657 | callee->regs[BPF_REG_1].type = SCALAR_VALUE; |
| 10658 | callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; |
| 10659 | |
| 10660 | /* unused */ |
| 10661 | __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); |
| 10662 | __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); |
| 10663 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10664 | |
| 10665 | callee->in_callback_fn = true; |
| 10666 | callee->callback_ret_range = retval_range(0, 1); |
| 10667 | return 0; |
| 10668 | } |
| 10669 | |
| 10670 | static int set_timer_callback_state(struct bpf_verifier_env *env, |
| 10671 | struct bpf_func_state *caller, |
| 10672 | struct bpf_func_state *callee, |
| 10673 | int insn_idx) |
| 10674 | { |
| 10675 | struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; |
| 10676 | |
| 10677 | /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); |
| 10678 | * callback_fn(struct bpf_map *map, void *key, void *value); |
| 10679 | */ |
| 10680 | callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; |
| 10681 | __mark_reg_known_zero(&callee->regs[BPF_REG_1]); |
| 10682 | callee->regs[BPF_REG_1].map_ptr = map_ptr; |
| 10683 | |
| 10684 | callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; |
| 10685 | __mark_reg_known_zero(&callee->regs[BPF_REG_2]); |
| 10686 | callee->regs[BPF_REG_2].map_ptr = map_ptr; |
| 10687 | |
| 10688 | callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; |
| 10689 | __mark_reg_known_zero(&callee->regs[BPF_REG_3]); |
| 10690 | callee->regs[BPF_REG_3].map_ptr = map_ptr; |
| 10691 | |
| 10692 | /* unused */ |
| 10693 | __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); |
| 10694 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10695 | callee->in_async_callback_fn = true; |
| 10696 | callee->callback_ret_range = retval_range(0, 1); |
| 10697 | return 0; |
| 10698 | } |
| 10699 | |
| 10700 | static int set_find_vma_callback_state(struct bpf_verifier_env *env, |
| 10701 | struct bpf_func_state *caller, |
| 10702 | struct bpf_func_state *callee, |
| 10703 | int insn_idx) |
| 10704 | { |
| 10705 | /* bpf_find_vma(struct task_struct *task, u64 addr, |
| 10706 | * void *callback_fn, void *callback_ctx, u64 flags) |
| 10707 | * (callback_fn)(struct task_struct *task, |
| 10708 | * struct vm_area_struct *vma, void *callback_ctx); |
| 10709 | */ |
| 10710 | callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; |
| 10711 | |
| 10712 | callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; |
| 10713 | __mark_reg_known_zero(&callee->regs[BPF_REG_2]); |
| 10714 | callee->regs[BPF_REG_2].btf = btf_vmlinux; |
| 10715 | callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA]; |
| 10716 | |
| 10717 | /* pointer to stack or null */ |
| 10718 | callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; |
| 10719 | |
| 10720 | /* unused */ |
| 10721 | __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); |
| 10722 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10723 | callee->in_callback_fn = true; |
| 10724 | callee->callback_ret_range = retval_range(0, 1); |
| 10725 | return 0; |
| 10726 | } |
| 10727 | |
| 10728 | static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, |
| 10729 | struct bpf_func_state *caller, |
| 10730 | struct bpf_func_state *callee, |
| 10731 | int insn_idx) |
| 10732 | { |
| 10733 | /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void |
| 10734 | * callback_ctx, u64 flags); |
| 10735 | * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); |
| 10736 | */ |
| 10737 | __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); |
| 10738 | mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); |
| 10739 | callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; |
| 10740 | |
| 10741 | /* unused */ |
| 10742 | __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); |
| 10743 | __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); |
| 10744 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10745 | |
| 10746 | callee->in_callback_fn = true; |
| 10747 | callee->callback_ret_range = retval_range(0, 1); |
| 10748 | return 0; |
| 10749 | } |
| 10750 | |
| 10751 | static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, |
| 10752 | struct bpf_func_state *caller, |
| 10753 | struct bpf_func_state *callee, |
| 10754 | int insn_idx) |
| 10755 | { |
| 10756 | /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, |
| 10757 | * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)); |
| 10758 | * |
| 10759 | * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset |
| 10760 | * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd |
| 10761 | * by this point, so look at 'root' |
| 10762 | */ |
| 10763 | struct btf_field *field; |
| 10764 | |
| 10765 | field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, |
| 10766 | BPF_RB_ROOT); |
| 10767 | if (!field || !field->graph_root.value_btf_id) |
| 10768 | return -EFAULT; |
| 10769 | |
| 10770 | mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); |
| 10771 | ref_set_non_owning(env, &callee->regs[BPF_REG_1]); |
| 10772 | mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); |
| 10773 | ref_set_non_owning(env, &callee->regs[BPF_REG_2]); |
| 10774 | |
| 10775 | __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); |
| 10776 | __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); |
| 10777 | __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); |
| 10778 | callee->in_callback_fn = true; |
| 10779 | callee->callback_ret_range = retval_range(0, 1); |
| 10780 | return 0; |
| 10781 | } |
| 10782 | |
| 10783 | static bool is_rbtree_lock_required_kfunc(u32 btf_id); |
| 10784 | |
| 10785 | /* Are we currently verifying the callback for a rbtree helper that must |
| 10786 | * be called with lock held? If so, no need to complain about unreleased |
| 10787 | * lock |
| 10788 | */ |
| 10789 | static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) |
| 10790 | { |
| 10791 | struct bpf_verifier_state *state = env->cur_state; |
| 10792 | struct bpf_insn *insn = env->prog->insnsi; |
| 10793 | struct bpf_func_state *callee; |
| 10794 | int kfunc_btf_id; |
| 10795 | |
| 10796 | if (!state->curframe) |
| 10797 | return false; |
| 10798 | |
| 10799 | callee = state->frame[state->curframe]; |
| 10800 | |
| 10801 | if (!callee->in_callback_fn) |
| 10802 | return false; |
| 10803 | |
| 10804 | kfunc_btf_id = insn[callee->callsite].imm; |
| 10805 | return is_rbtree_lock_required_kfunc(kfunc_btf_id); |
| 10806 | } |
| 10807 | |
| 10808 | static bool retval_range_within(struct bpf_retval_range range, const struct bpf_reg_state *reg, |
| 10809 | bool return_32bit) |
| 10810 | { |
| 10811 | if (return_32bit) |
| 10812 | return range.minval <= reg->s32_min_value && reg->s32_max_value <= range.maxval; |
| 10813 | else |
| 10814 | return range.minval <= reg->smin_value && reg->smax_value <= range.maxval; |
| 10815 | } |
| 10816 | |
| 10817 | static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) |
| 10818 | { |
| 10819 | struct bpf_verifier_state *state = env->cur_state, *prev_st; |
| 10820 | struct bpf_func_state *caller, *callee; |
| 10821 | struct bpf_reg_state *r0; |
| 10822 | bool in_callback_fn; |
| 10823 | int err; |
| 10824 | |
| 10825 | callee = state->frame[state->curframe]; |
| 10826 | r0 = &callee->regs[BPF_REG_0]; |
| 10827 | if (r0->type == PTR_TO_STACK) { |
| 10828 | /* technically it's ok to return caller's stack pointer |
| 10829 | * (or caller's caller's pointer) back to the caller, |
| 10830 | * since these pointers are valid. Only current stack |
| 10831 | * pointer will be invalid as soon as function exits, |
| 10832 | * but let's be conservative |
| 10833 | */ |
| 10834 | verbose(env, "cannot return stack pointer to the caller\n"); |
| 10835 | return -EINVAL; |
| 10836 | } |
| 10837 | |
| 10838 | caller = state->frame[state->curframe - 1]; |
| 10839 | if (callee->in_callback_fn) { |
| 10840 | if (r0->type != SCALAR_VALUE) { |
| 10841 | verbose(env, "R0 not a scalar value\n"); |
| 10842 | return -EACCES; |
| 10843 | } |
| 10844 | |
| 10845 | /* we are going to rely on register's precise value */ |
| 10846 | err = mark_reg_read(env, r0, r0->parent, REG_LIVE_READ64); |
| 10847 | err = err ?: mark_chain_precision(env, BPF_REG_0); |
| 10848 | if (err) |
| 10849 | return err; |
| 10850 | |
| 10851 | /* enforce R0 return value range, and bpf_callback_t returns 64bit */ |
| 10852 | if (!retval_range_within(callee->callback_ret_range, r0, false)) { |
| 10853 | verbose_invalid_scalar(env, r0, callee->callback_ret_range, |
| 10854 | "At callback return", "R0"); |
| 10855 | return -EINVAL; |
| 10856 | } |
| 10857 | if (!calls_callback(env, callee->callsite)) { |
| 10858 | verbose(env, "BUG: in callback at %d, callsite %d !calls_callback\n", |
| 10859 | *insn_idx, callee->callsite); |
| 10860 | return -EFAULT; |
| 10861 | } |
| 10862 | } else { |
| 10863 | /* return to the caller whatever r0 had in the callee */ |
| 10864 | caller->regs[BPF_REG_0] = *r0; |
| 10865 | } |
| 10866 | |
| 10867 | /* for callbacks like bpf_loop or bpf_for_each_map_elem go back to callsite, |
| 10868 | * there function call logic would reschedule callback visit. If iteration |
| 10869 | * converges is_state_visited() would prune that visit eventually. |
| 10870 | */ |
| 10871 | in_callback_fn = callee->in_callback_fn; |
| 10872 | if (in_callback_fn) |
| 10873 | *insn_idx = callee->callsite; |
| 10874 | else |
| 10875 | *insn_idx = callee->callsite + 1; |
| 10876 | |
| 10877 | if (env->log.level & BPF_LOG_LEVEL) { |
| 10878 | verbose(env, "returning from callee:\n"); |
| 10879 | print_verifier_state(env, state, callee->frameno, true); |
| 10880 | verbose(env, "to caller at %d:\n", *insn_idx); |
| 10881 | print_verifier_state(env, state, caller->frameno, true); |
| 10882 | } |
| 10883 | /* clear everything in the callee. In case of exceptional exits using |
| 10884 | * bpf_throw, this will be done by copy_verifier_state for extra frames. */ |
| 10885 | free_func_state(callee); |
| 10886 | state->frame[state->curframe--] = NULL; |
| 10887 | |
| 10888 | /* for callbacks widen imprecise scalars to make programs like below verify: |
| 10889 | * |
| 10890 | * struct ctx { int i; } |
| 10891 | * void cb(int idx, struct ctx *ctx) { ctx->i++; ... } |
| 10892 | * ... |
| 10893 | * struct ctx = { .i = 0; } |
| 10894 | * bpf_loop(100, cb, &ctx, 0); |
| 10895 | * |
| 10896 | * This is similar to what is done in process_iter_next_call() for open |
| 10897 | * coded iterators. |
| 10898 | */ |
| 10899 | prev_st = in_callback_fn ? find_prev_entry(env, state, *insn_idx) : NULL; |
| 10900 | if (prev_st) { |
| 10901 | err = widen_imprecise_scalars(env, prev_st, state); |
| 10902 | if (err) |
| 10903 | return err; |
| 10904 | } |
| 10905 | return 0; |
| 10906 | } |
| 10907 | |
| 10908 | static int do_refine_retval_range(struct bpf_verifier_env *env, |
| 10909 | struct bpf_reg_state *regs, int ret_type, |
| 10910 | int func_id, |
| 10911 | struct bpf_call_arg_meta *meta) |
| 10912 | { |
| 10913 | struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; |
| 10914 | |
| 10915 | if (ret_type != RET_INTEGER) |
| 10916 | return 0; |
| 10917 | |
| 10918 | switch (func_id) { |
| 10919 | case BPF_FUNC_get_stack: |
| 10920 | case BPF_FUNC_get_task_stack: |
| 10921 | case BPF_FUNC_probe_read_str: |
| 10922 | case BPF_FUNC_probe_read_kernel_str: |
| 10923 | case BPF_FUNC_probe_read_user_str: |
| 10924 | ret_reg->smax_value = meta->msize_max_value; |
| 10925 | ret_reg->s32_max_value = meta->msize_max_value; |
| 10926 | ret_reg->smin_value = -MAX_ERRNO; |
| 10927 | ret_reg->s32_min_value = -MAX_ERRNO; |
| 10928 | reg_bounds_sync(ret_reg); |
| 10929 | break; |
| 10930 | case BPF_FUNC_get_smp_processor_id: |
| 10931 | ret_reg->umax_value = nr_cpu_ids - 1; |
| 10932 | ret_reg->u32_max_value = nr_cpu_ids - 1; |
| 10933 | ret_reg->smax_value = nr_cpu_ids - 1; |
| 10934 | ret_reg->s32_max_value = nr_cpu_ids - 1; |
| 10935 | ret_reg->umin_value = 0; |
| 10936 | ret_reg->u32_min_value = 0; |
| 10937 | ret_reg->smin_value = 0; |
| 10938 | ret_reg->s32_min_value = 0; |
| 10939 | reg_bounds_sync(ret_reg); |
| 10940 | break; |
| 10941 | } |
| 10942 | |
| 10943 | return reg_bounds_sanity_check(env, ret_reg, "retval"); |
| 10944 | } |
| 10945 | |
| 10946 | static int |
| 10947 | record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, |
| 10948 | int func_id, int insn_idx) |
| 10949 | { |
| 10950 | struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; |
| 10951 | struct bpf_map *map = meta->map_ptr; |
| 10952 | |
| 10953 | if (func_id != BPF_FUNC_tail_call && |
| 10954 | func_id != BPF_FUNC_map_lookup_elem && |
| 10955 | func_id != BPF_FUNC_map_update_elem && |
| 10956 | func_id != BPF_FUNC_map_delete_elem && |
| 10957 | func_id != BPF_FUNC_map_push_elem && |
| 10958 | func_id != BPF_FUNC_map_pop_elem && |
| 10959 | func_id != BPF_FUNC_map_peek_elem && |
| 10960 | func_id != BPF_FUNC_for_each_map_elem && |
| 10961 | func_id != BPF_FUNC_redirect_map && |
| 10962 | func_id != BPF_FUNC_map_lookup_percpu_elem) |
| 10963 | return 0; |
| 10964 | |
| 10965 | if (map == NULL) { |
| 10966 | verbose(env, "kernel subsystem misconfigured verifier\n"); |
| 10967 | return -EINVAL; |
| 10968 | } |
| 10969 | |
| 10970 | /* In case of read-only, some additional restrictions |
| 10971 | * need to be applied in order to prevent altering the |
| 10972 | * state of the map from program side. |
| 10973 | */ |
| 10974 | if ((map->map_flags & BPF_F_RDONLY_PROG) && |
| 10975 | (func_id == BPF_FUNC_map_delete_elem || |
| 10976 | func_id == BPF_FUNC_map_update_elem || |
| 10977 | func_id == BPF_FUNC_map_push_elem || |
| 10978 | func_id == BPF_FUNC_map_pop_elem)) { |
| 10979 | verbose(env, "write into map forbidden\n"); |
| 10980 | return -EACCES; |
| 10981 | } |
| 10982 | |
| 10983 | if (!aux->map_ptr_state.map_ptr) |
| 10984 | bpf_map_ptr_store(aux, meta->map_ptr, |
| 10985 | !meta->map_ptr->bypass_spec_v1, false); |
| 10986 | else if (aux->map_ptr_state.map_ptr != meta->map_ptr) |
| 10987 | bpf_map_ptr_store(aux, meta->map_ptr, |
| 10988 | !meta->map_ptr->bypass_spec_v1, true); |
| 10989 | return 0; |
| 10990 | } |
| 10991 | |
| 10992 | static int |
| 10993 | record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, |
| 10994 | int func_id, int insn_idx) |
| 10995 | { |
| 10996 | struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; |
| 10997 | struct bpf_reg_state *regs = cur_regs(env), *reg; |
| 10998 | struct bpf_map *map = meta->map_ptr; |
| 10999 | u64 val, max; |
| 11000 | int err; |
| 11001 | |
| 11002 | if (func_id != BPF_FUNC_tail_call) |
| 11003 | return 0; |
| 11004 | if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { |
| 11005 | verbose(env, "kernel subsystem misconfigured verifier\n"); |
| 11006 | return -EINVAL; |
| 11007 | } |
| 11008 | |
| 11009 | reg = ®s[BPF_REG_3]; |
| 11010 | val = reg->var_off.value; |
| 11011 | max = map->max_entries; |
| 11012 | |
| 11013 | if (!(is_reg_const(reg, false) && val < max)) { |
| 11014 | bpf_map_key_store(aux, BPF_MAP_KEY_POISON); |
| 11015 | return 0; |
| 11016 | } |
| 11017 | |
| 11018 | err = mark_chain_precision(env, BPF_REG_3); |
| 11019 | if (err) |
| 11020 | return err; |
| 11021 | if (bpf_map_key_unseen(aux)) |
| 11022 | bpf_map_key_store(aux, val); |
| 11023 | else if (!bpf_map_key_poisoned(aux) && |
| 11024 | bpf_map_key_immediate(aux) != val) |
| 11025 | bpf_map_key_store(aux, BPF_MAP_KEY_POISON); |
| 11026 | return 0; |
| 11027 | } |
| 11028 | |
| 11029 | static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit) |
| 11030 | { |
| 11031 | struct bpf_verifier_state *state = env->cur_state; |
| 11032 | enum bpf_prog_type type = resolve_prog_type(env->prog); |
| 11033 | struct bpf_reg_state *reg = reg_state(env, BPF_REG_0); |
| 11034 | bool refs_lingering = false; |
| 11035 | int i; |
| 11036 | |
| 11037 | if (!exception_exit && cur_func(env)->frameno) |
| 11038 | return 0; |
| 11039 | |
| 11040 | for (i = 0; i < state->acquired_refs; i++) { |
| 11041 | if (state->refs[i].type != REF_TYPE_PTR) |
| 11042 | continue; |
| 11043 | /* Allow struct_ops programs to return a referenced kptr back to |
| 11044 | * kernel. Type checks are performed later in check_return_code. |
| 11045 | */ |
| 11046 | if (type == BPF_PROG_TYPE_STRUCT_OPS && !exception_exit && |
| 11047 | reg->ref_obj_id == state->refs[i].id) |
| 11048 | continue; |
| 11049 | verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", |
| 11050 | state->refs[i].id, state->refs[i].insn_idx); |
| 11051 | refs_lingering = true; |
| 11052 | } |
| 11053 | return refs_lingering ? -EINVAL : 0; |
| 11054 | } |
| 11055 | |
| 11056 | static int check_resource_leak(struct bpf_verifier_env *env, bool exception_exit, bool check_lock, const char *prefix) |
| 11057 | { |
| 11058 | int err; |
| 11059 | |
| 11060 | if (check_lock && env->cur_state->active_locks) { |
| 11061 | verbose(env, "%s cannot be used inside bpf_spin_lock-ed region\n", prefix); |
| 11062 | return -EINVAL; |
| 11063 | } |
| 11064 | |
| 11065 | err = check_reference_leak(env, exception_exit); |
| 11066 | if (err) { |
| 11067 | verbose(env, "%s would lead to reference leak\n", prefix); |
| 11068 | return err; |
| 11069 | } |
| 11070 | |
| 11071 | if (check_lock && env->cur_state->active_irq_id) { |
| 11072 | verbose(env, "%s cannot be used inside bpf_local_irq_save-ed region\n", prefix); |
| 11073 | return -EINVAL; |
| 11074 | } |
| 11075 | |
| 11076 | if (check_lock && env->cur_state->active_rcu_lock) { |
| 11077 | verbose(env, "%s cannot be used inside bpf_rcu_read_lock-ed region\n", prefix); |
| 11078 | return -EINVAL; |
| 11079 | } |
| 11080 | |
| 11081 | if (check_lock && env->cur_state->active_preempt_locks) { |
| 11082 | verbose(env, "%s cannot be used inside bpf_preempt_disable-ed region\n", prefix); |
| 11083 | return -EINVAL; |
| 11084 | } |
| 11085 | |
| 11086 | return 0; |
| 11087 | } |
| 11088 | |
| 11089 | static int check_bpf_snprintf_call(struct bpf_verifier_env *env, |
| 11090 | struct bpf_reg_state *regs) |
| 11091 | { |
| 11092 | struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; |
| 11093 | struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; |
| 11094 | struct bpf_map *fmt_map = fmt_reg->map_ptr; |
| 11095 | struct bpf_bprintf_data data = {}; |
| 11096 | int err, fmt_map_off, num_args; |
| 11097 | u64 fmt_addr; |
| 11098 | char *fmt; |
| 11099 | |
| 11100 | /* data must be an array of u64 */ |
| 11101 | if (data_len_reg->var_off.value % 8) |
| 11102 | return -EINVAL; |
| 11103 | num_args = data_len_reg->var_off.value / 8; |
| 11104 | |
| 11105 | /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const |
| 11106 | * and map_direct_value_addr is set. |
| 11107 | */ |
| 11108 | fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; |
| 11109 | err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, |
| 11110 | fmt_map_off); |
| 11111 | if (err) { |
| 11112 | verbose(env, "failed to retrieve map value address\n"); |
| 11113 | return -EFAULT; |
| 11114 | } |
| 11115 | fmt = (char *)(long)fmt_addr + fmt_map_off; |
| 11116 | |
| 11117 | /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we |
| 11118 | * can focus on validating the format specifiers. |
| 11119 | */ |
| 11120 | err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); |
| 11121 | if (err < 0) |
| 11122 | verbose(env, "Invalid format string\n"); |
| 11123 | |
| 11124 | return err; |
| 11125 | } |
| 11126 | |
| 11127 | static int check_get_func_ip(struct bpf_verifier_env *env) |
| 11128 | { |
| 11129 | enum bpf_prog_type type = resolve_prog_type(env->prog); |
| 11130 | int func_id = BPF_FUNC_get_func_ip; |
| 11131 | |
| 11132 | if (type == BPF_PROG_TYPE_TRACING) { |
| 11133 | if (!bpf_prog_has_trampoline(env->prog)) { |
| 11134 | verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", |
| 11135 | func_id_name(func_id), func_id); |
| 11136 | return -ENOTSUPP; |
| 11137 | } |
| 11138 | return 0; |
| 11139 | } else if (type == BPF_PROG_TYPE_KPROBE) { |
| 11140 | return 0; |
| 11141 | } |
| 11142 | |
| 11143 | verbose(env, "func %s#%d not supported for program type %d\n", |
| 11144 | func_id_name(func_id), func_id, type); |
| 11145 | return -ENOTSUPP; |
| 11146 | } |
| 11147 | |
| 11148 | static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) |
| 11149 | { |
| 11150 | return &env->insn_aux_data[env->insn_idx]; |
| 11151 | } |
| 11152 | |
| 11153 | static bool loop_flag_is_zero(struct bpf_verifier_env *env) |
| 11154 | { |
| 11155 | struct bpf_reg_state *regs = cur_regs(env); |
| 11156 | struct bpf_reg_state *reg = ®s[BPF_REG_4]; |
| 11157 | bool reg_is_null = register_is_null(reg); |
| 11158 | |
| 11159 | if (reg_is_null) |
| 11160 | mark_chain_precision(env, BPF_REG_4); |
| 11161 | |
| 11162 | return reg_is_null; |
| 11163 | } |
| 11164 | |
| 11165 | static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) |
| 11166 | { |
| 11167 | struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; |
| 11168 | |
| 11169 | if (!state->initialized) { |
| 11170 | state->initialized = 1; |
| 11171 | state->fit_for_inline = loop_flag_is_zero(env); |
| 11172 | state->callback_subprogno = subprogno; |
| 11173 | return; |
| 11174 | } |
| 11175 | |
| 11176 | if (!state->fit_for_inline) |
| 11177 | return; |
| 11178 | |
| 11179 | state->fit_for_inline = (loop_flag_is_zero(env) && |
| 11180 | state->callback_subprogno == subprogno); |
| 11181 | } |
| 11182 | |
| 11183 | /* Returns whether or not the given map type can potentially elide |
| 11184 | * lookup return value nullness check. This is possible if the key |
| 11185 | * is statically known. |
| 11186 | */ |
| 11187 | static bool can_elide_value_nullness(enum bpf_map_type type) |
| 11188 | { |
| 11189 | switch (type) { |
| 11190 | case BPF_MAP_TYPE_ARRAY: |
| 11191 | case BPF_MAP_TYPE_PERCPU_ARRAY: |
| 11192 | return true; |
| 11193 | default: |
| 11194 | return false; |
| 11195 | } |
| 11196 | } |
| 11197 | |
| 11198 | static int get_helper_proto(struct bpf_verifier_env *env, int func_id, |
| 11199 | const struct bpf_func_proto **ptr) |
| 11200 | { |
| 11201 | if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) |
| 11202 | return -ERANGE; |
| 11203 | |
| 11204 | if (!env->ops->get_func_proto) |
| 11205 | return -EINVAL; |
| 11206 | |
| 11207 | *ptr = env->ops->get_func_proto(func_id, env->prog); |
| 11208 | return *ptr ? 0 : -EINVAL; |
| 11209 | } |
| 11210 | |
| 11211 | static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 11212 | int *insn_idx_p) |
| 11213 | { |
| 11214 | enum bpf_prog_type prog_type = resolve_prog_type(env->prog); |
| 11215 | bool returns_cpu_specific_alloc_ptr = false; |
| 11216 | const struct bpf_func_proto *fn = NULL; |
| 11217 | enum bpf_return_type ret_type; |
| 11218 | enum bpf_type_flag ret_flag; |
| 11219 | struct bpf_reg_state *regs; |
| 11220 | struct bpf_call_arg_meta meta; |
| 11221 | int insn_idx = *insn_idx_p; |
| 11222 | bool changes_data; |
| 11223 | int i, err, func_id; |
| 11224 | |
| 11225 | /* find function prototype */ |
| 11226 | func_id = insn->imm; |
| 11227 | err = get_helper_proto(env, insn->imm, &fn); |
| 11228 | if (err == -ERANGE) { |
| 11229 | verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); |
| 11230 | return -EINVAL; |
| 11231 | } |
| 11232 | |
| 11233 | if (err) { |
| 11234 | verbose(env, "program of this type cannot use helper %s#%d\n", |
| 11235 | func_id_name(func_id), func_id); |
| 11236 | return err; |
| 11237 | } |
| 11238 | |
| 11239 | /* eBPF programs must be GPL compatible to use GPL-ed functions */ |
| 11240 | if (!env->prog->gpl_compatible && fn->gpl_only) { |
| 11241 | verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); |
| 11242 | return -EINVAL; |
| 11243 | } |
| 11244 | |
| 11245 | if (fn->allowed && !fn->allowed(env->prog)) { |
| 11246 | verbose(env, "helper call is not allowed in probe\n"); |
| 11247 | return -EINVAL; |
| 11248 | } |
| 11249 | |
| 11250 | if (!in_sleepable(env) && fn->might_sleep) { |
| 11251 | verbose(env, "helper call might sleep in a non-sleepable prog\n"); |
| 11252 | return -EINVAL; |
| 11253 | } |
| 11254 | |
| 11255 | /* With LD_ABS/IND some JITs save/restore skb from r1. */ |
| 11256 | changes_data = bpf_helper_changes_pkt_data(func_id); |
| 11257 | if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { |
| 11258 | verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", |
| 11259 | func_id_name(func_id), func_id); |
| 11260 | return -EINVAL; |
| 11261 | } |
| 11262 | |
| 11263 | memset(&meta, 0, sizeof(meta)); |
| 11264 | meta.pkt_access = fn->pkt_access; |
| 11265 | |
| 11266 | err = check_func_proto(fn, func_id); |
| 11267 | if (err) { |
| 11268 | verbose(env, "kernel subsystem misconfigured func %s#%d\n", |
| 11269 | func_id_name(func_id), func_id); |
| 11270 | return err; |
| 11271 | } |
| 11272 | |
| 11273 | if (env->cur_state->active_rcu_lock) { |
| 11274 | if (fn->might_sleep) { |
| 11275 | verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n", |
| 11276 | func_id_name(func_id), func_id); |
| 11277 | return -EINVAL; |
| 11278 | } |
| 11279 | |
| 11280 | if (in_sleepable(env) && is_storage_get_function(func_id)) |
| 11281 | env->insn_aux_data[insn_idx].storage_get_func_atomic = true; |
| 11282 | } |
| 11283 | |
| 11284 | if (env->cur_state->active_preempt_locks) { |
| 11285 | if (fn->might_sleep) { |
| 11286 | verbose(env, "sleepable helper %s#%d in non-preemptible region\n", |
| 11287 | func_id_name(func_id), func_id); |
| 11288 | return -EINVAL; |
| 11289 | } |
| 11290 | |
| 11291 | if (in_sleepable(env) && is_storage_get_function(func_id)) |
| 11292 | env->insn_aux_data[insn_idx].storage_get_func_atomic = true; |
| 11293 | } |
| 11294 | |
| 11295 | if (env->cur_state->active_irq_id) { |
| 11296 | if (fn->might_sleep) { |
| 11297 | verbose(env, "sleepable helper %s#%d in IRQ-disabled region\n", |
| 11298 | func_id_name(func_id), func_id); |
| 11299 | return -EINVAL; |
| 11300 | } |
| 11301 | |
| 11302 | if (in_sleepable(env) && is_storage_get_function(func_id)) |
| 11303 | env->insn_aux_data[insn_idx].storage_get_func_atomic = true; |
| 11304 | } |
| 11305 | |
| 11306 | meta.func_id = func_id; |
| 11307 | /* check args */ |
| 11308 | for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { |
| 11309 | err = check_func_arg(env, i, &meta, fn, insn_idx); |
| 11310 | if (err) |
| 11311 | return err; |
| 11312 | } |
| 11313 | |
| 11314 | err = record_func_map(env, &meta, func_id, insn_idx); |
| 11315 | if (err) |
| 11316 | return err; |
| 11317 | |
| 11318 | err = record_func_key(env, &meta, func_id, insn_idx); |
| 11319 | if (err) |
| 11320 | return err; |
| 11321 | |
| 11322 | /* Mark slots with STACK_MISC in case of raw mode, stack offset |
| 11323 | * is inferred from register state. |
| 11324 | */ |
| 11325 | for (i = 0; i < meta.access_size; i++) { |
| 11326 | err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, |
| 11327 | BPF_WRITE, -1, false, false); |
| 11328 | if (err) |
| 11329 | return err; |
| 11330 | } |
| 11331 | |
| 11332 | regs = cur_regs(env); |
| 11333 | |
| 11334 | if (meta.release_regno) { |
| 11335 | err = -EINVAL; |
| 11336 | /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot |
| 11337 | * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr |
| 11338 | * is safe to do directly. |
| 11339 | */ |
| 11340 | if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { |
| 11341 | if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) { |
| 11342 | verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n"); |
| 11343 | return -EFAULT; |
| 11344 | } |
| 11345 | err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); |
| 11346 | } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) { |
| 11347 | u32 ref_obj_id = meta.ref_obj_id; |
| 11348 | bool in_rcu = in_rcu_cs(env); |
| 11349 | struct bpf_func_state *state; |
| 11350 | struct bpf_reg_state *reg; |
| 11351 | |
| 11352 | err = release_reference_nomark(env->cur_state, ref_obj_id); |
| 11353 | if (!err) { |
| 11354 | bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ |
| 11355 | if (reg->ref_obj_id == ref_obj_id) { |
| 11356 | if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { |
| 11357 | reg->ref_obj_id = 0; |
| 11358 | reg->type &= ~MEM_ALLOC; |
| 11359 | reg->type |= MEM_RCU; |
| 11360 | } else { |
| 11361 | mark_reg_invalid(env, reg); |
| 11362 | } |
| 11363 | } |
| 11364 | })); |
| 11365 | } |
| 11366 | } else if (meta.ref_obj_id) { |
| 11367 | err = release_reference(env, meta.ref_obj_id); |
| 11368 | } else if (register_is_null(®s[meta.release_regno])) { |
| 11369 | /* meta.ref_obj_id can only be 0 if register that is meant to be |
| 11370 | * released is NULL, which must be > R0. |
| 11371 | */ |
| 11372 | err = 0; |
| 11373 | } |
| 11374 | if (err) { |
| 11375 | verbose(env, "func %s#%d reference has not been acquired before\n", |
| 11376 | func_id_name(func_id), func_id); |
| 11377 | return err; |
| 11378 | } |
| 11379 | } |
| 11380 | |
| 11381 | switch (func_id) { |
| 11382 | case BPF_FUNC_tail_call: |
| 11383 | err = check_resource_leak(env, false, true, "tail_call"); |
| 11384 | if (err) |
| 11385 | return err; |
| 11386 | break; |
| 11387 | case BPF_FUNC_get_local_storage: |
| 11388 | /* check that flags argument in get_local_storage(map, flags) is 0, |
| 11389 | * this is required because get_local_storage() can't return an error. |
| 11390 | */ |
| 11391 | if (!register_is_null(®s[BPF_REG_2])) { |
| 11392 | verbose(env, "get_local_storage() doesn't support non-zero flags\n"); |
| 11393 | return -EINVAL; |
| 11394 | } |
| 11395 | break; |
| 11396 | case BPF_FUNC_for_each_map_elem: |
| 11397 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 11398 | set_map_elem_callback_state); |
| 11399 | break; |
| 11400 | case BPF_FUNC_timer_set_callback: |
| 11401 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 11402 | set_timer_callback_state); |
| 11403 | break; |
| 11404 | case BPF_FUNC_find_vma: |
| 11405 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 11406 | set_find_vma_callback_state); |
| 11407 | break; |
| 11408 | case BPF_FUNC_snprintf: |
| 11409 | err = check_bpf_snprintf_call(env, regs); |
| 11410 | break; |
| 11411 | case BPF_FUNC_loop: |
| 11412 | update_loop_inline_state(env, meta.subprogno); |
| 11413 | /* Verifier relies on R1 value to determine if bpf_loop() iteration |
| 11414 | * is finished, thus mark it precise. |
| 11415 | */ |
| 11416 | err = mark_chain_precision(env, BPF_REG_1); |
| 11417 | if (err) |
| 11418 | return err; |
| 11419 | if (cur_func(env)->callback_depth < regs[BPF_REG_1].umax_value) { |
| 11420 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 11421 | set_loop_callback_state); |
| 11422 | } else { |
| 11423 | cur_func(env)->callback_depth = 0; |
| 11424 | if (env->log.level & BPF_LOG_LEVEL2) |
| 11425 | verbose(env, "frame%d bpf_loop iteration limit reached\n", |
| 11426 | env->cur_state->curframe); |
| 11427 | } |
| 11428 | break; |
| 11429 | case BPF_FUNC_dynptr_from_mem: |
| 11430 | if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { |
| 11431 | verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", |
| 11432 | reg_type_str(env, regs[BPF_REG_1].type)); |
| 11433 | return -EACCES; |
| 11434 | } |
| 11435 | break; |
| 11436 | case BPF_FUNC_set_retval: |
| 11437 | if (prog_type == BPF_PROG_TYPE_LSM && |
| 11438 | env->prog->expected_attach_type == BPF_LSM_CGROUP) { |
| 11439 | if (!env->prog->aux->attach_func_proto->type) { |
| 11440 | /* Make sure programs that attach to void |
| 11441 | * hooks don't try to modify return value. |
| 11442 | */ |
| 11443 | verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); |
| 11444 | return -EINVAL; |
| 11445 | } |
| 11446 | } |
| 11447 | break; |
| 11448 | case BPF_FUNC_dynptr_data: |
| 11449 | { |
| 11450 | struct bpf_reg_state *reg; |
| 11451 | int id, ref_obj_id; |
| 11452 | |
| 11453 | reg = get_dynptr_arg_reg(env, fn, regs); |
| 11454 | if (!reg) |
| 11455 | return -EFAULT; |
| 11456 | |
| 11457 | |
| 11458 | if (meta.dynptr_id) { |
| 11459 | verbose(env, "verifier internal error: meta.dynptr_id already set\n"); |
| 11460 | return -EFAULT; |
| 11461 | } |
| 11462 | if (meta.ref_obj_id) { |
| 11463 | verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); |
| 11464 | return -EFAULT; |
| 11465 | } |
| 11466 | |
| 11467 | id = dynptr_id(env, reg); |
| 11468 | if (id < 0) { |
| 11469 | verbose(env, "verifier internal error: failed to obtain dynptr id\n"); |
| 11470 | return id; |
| 11471 | } |
| 11472 | |
| 11473 | ref_obj_id = dynptr_ref_obj_id(env, reg); |
| 11474 | if (ref_obj_id < 0) { |
| 11475 | verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); |
| 11476 | return ref_obj_id; |
| 11477 | } |
| 11478 | |
| 11479 | meta.dynptr_id = id; |
| 11480 | meta.ref_obj_id = ref_obj_id; |
| 11481 | |
| 11482 | break; |
| 11483 | } |
| 11484 | case BPF_FUNC_dynptr_write: |
| 11485 | { |
| 11486 | enum bpf_dynptr_type dynptr_type; |
| 11487 | struct bpf_reg_state *reg; |
| 11488 | |
| 11489 | reg = get_dynptr_arg_reg(env, fn, regs); |
| 11490 | if (!reg) |
| 11491 | return -EFAULT; |
| 11492 | |
| 11493 | dynptr_type = dynptr_get_type(env, reg); |
| 11494 | if (dynptr_type == BPF_DYNPTR_TYPE_INVALID) |
| 11495 | return -EFAULT; |
| 11496 | |
| 11497 | if (dynptr_type == BPF_DYNPTR_TYPE_SKB) |
| 11498 | /* this will trigger clear_all_pkt_pointers(), which will |
| 11499 | * invalidate all dynptr slices associated with the skb |
| 11500 | */ |
| 11501 | changes_data = true; |
| 11502 | |
| 11503 | break; |
| 11504 | } |
| 11505 | case BPF_FUNC_per_cpu_ptr: |
| 11506 | case BPF_FUNC_this_cpu_ptr: |
| 11507 | { |
| 11508 | struct bpf_reg_state *reg = ®s[BPF_REG_1]; |
| 11509 | const struct btf_type *type; |
| 11510 | |
| 11511 | if (reg->type & MEM_RCU) { |
| 11512 | type = btf_type_by_id(reg->btf, reg->btf_id); |
| 11513 | if (!type || !btf_type_is_struct(type)) { |
| 11514 | verbose(env, "Helper has invalid btf/btf_id in R1\n"); |
| 11515 | return -EFAULT; |
| 11516 | } |
| 11517 | returns_cpu_specific_alloc_ptr = true; |
| 11518 | env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; |
| 11519 | } |
| 11520 | break; |
| 11521 | } |
| 11522 | case BPF_FUNC_user_ringbuf_drain: |
| 11523 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 11524 | set_user_ringbuf_callback_state); |
| 11525 | break; |
| 11526 | } |
| 11527 | |
| 11528 | if (err) |
| 11529 | return err; |
| 11530 | |
| 11531 | /* reset caller saved regs */ |
| 11532 | for (i = 0; i < CALLER_SAVED_REGS; i++) { |
| 11533 | mark_reg_not_init(env, regs, caller_saved[i]); |
| 11534 | check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); |
| 11535 | } |
| 11536 | |
| 11537 | /* helper call returns 64-bit value. */ |
| 11538 | regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; |
| 11539 | |
| 11540 | /* update return register (already marked as written above) */ |
| 11541 | ret_type = fn->ret_type; |
| 11542 | ret_flag = type_flag(ret_type); |
| 11543 | |
| 11544 | switch (base_type(ret_type)) { |
| 11545 | case RET_INTEGER: |
| 11546 | /* sets type to SCALAR_VALUE */ |
| 11547 | mark_reg_unknown(env, regs, BPF_REG_0); |
| 11548 | break; |
| 11549 | case RET_VOID: |
| 11550 | regs[BPF_REG_0].type = NOT_INIT; |
| 11551 | break; |
| 11552 | case RET_PTR_TO_MAP_VALUE: |
| 11553 | /* There is no offset yet applied, variable or fixed */ |
| 11554 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11555 | /* remember map_ptr, so that check_map_access() |
| 11556 | * can check 'value_size' boundary of memory access |
| 11557 | * to map element returned from bpf_map_lookup_elem() |
| 11558 | */ |
| 11559 | if (meta.map_ptr == NULL) { |
| 11560 | verbose(env, |
| 11561 | "kernel subsystem misconfigured verifier\n"); |
| 11562 | return -EINVAL; |
| 11563 | } |
| 11564 | |
| 11565 | if (func_id == BPF_FUNC_map_lookup_elem && |
| 11566 | can_elide_value_nullness(meta.map_ptr->map_type) && |
| 11567 | meta.const_map_key >= 0 && |
| 11568 | meta.const_map_key < meta.map_ptr->max_entries) |
| 11569 | ret_flag &= ~PTR_MAYBE_NULL; |
| 11570 | |
| 11571 | regs[BPF_REG_0].map_ptr = meta.map_ptr; |
| 11572 | regs[BPF_REG_0].map_uid = meta.map_uid; |
| 11573 | regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; |
| 11574 | if (!type_may_be_null(ret_flag) && |
| 11575 | btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) { |
| 11576 | regs[BPF_REG_0].id = ++env->id_gen; |
| 11577 | } |
| 11578 | break; |
| 11579 | case RET_PTR_TO_SOCKET: |
| 11580 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11581 | regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; |
| 11582 | break; |
| 11583 | case RET_PTR_TO_SOCK_COMMON: |
| 11584 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11585 | regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; |
| 11586 | break; |
| 11587 | case RET_PTR_TO_TCP_SOCK: |
| 11588 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11589 | regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; |
| 11590 | break; |
| 11591 | case RET_PTR_TO_MEM: |
| 11592 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11593 | regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; |
| 11594 | regs[BPF_REG_0].mem_size = meta.mem_size; |
| 11595 | break; |
| 11596 | case RET_PTR_TO_MEM_OR_BTF_ID: |
| 11597 | { |
| 11598 | const struct btf_type *t; |
| 11599 | |
| 11600 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11601 | t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); |
| 11602 | if (!btf_type_is_struct(t)) { |
| 11603 | u32 tsize; |
| 11604 | const struct btf_type *ret; |
| 11605 | const char *tname; |
| 11606 | |
| 11607 | /* resolve the type size of ksym. */ |
| 11608 | ret = btf_resolve_size(meta.ret_btf, t, &tsize); |
| 11609 | if (IS_ERR(ret)) { |
| 11610 | tname = btf_name_by_offset(meta.ret_btf, t->name_off); |
| 11611 | verbose(env, "unable to resolve the size of type '%s': %ld\n", |
| 11612 | tname, PTR_ERR(ret)); |
| 11613 | return -EINVAL; |
| 11614 | } |
| 11615 | regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; |
| 11616 | regs[BPF_REG_0].mem_size = tsize; |
| 11617 | } else { |
| 11618 | if (returns_cpu_specific_alloc_ptr) { |
| 11619 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU; |
| 11620 | } else { |
| 11621 | /* MEM_RDONLY may be carried from ret_flag, but it |
| 11622 | * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise |
| 11623 | * it will confuse the check of PTR_TO_BTF_ID in |
| 11624 | * check_mem_access(). |
| 11625 | */ |
| 11626 | ret_flag &= ~MEM_RDONLY; |
| 11627 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; |
| 11628 | } |
| 11629 | |
| 11630 | regs[BPF_REG_0].btf = meta.ret_btf; |
| 11631 | regs[BPF_REG_0].btf_id = meta.ret_btf_id; |
| 11632 | } |
| 11633 | break; |
| 11634 | } |
| 11635 | case RET_PTR_TO_BTF_ID: |
| 11636 | { |
| 11637 | struct btf *ret_btf; |
| 11638 | int ret_btf_id; |
| 11639 | |
| 11640 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 11641 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; |
| 11642 | if (func_id == BPF_FUNC_kptr_xchg) { |
| 11643 | ret_btf = meta.kptr_field->kptr.btf; |
| 11644 | ret_btf_id = meta.kptr_field->kptr.btf_id; |
| 11645 | if (!btf_is_kernel(ret_btf)) { |
| 11646 | regs[BPF_REG_0].type |= MEM_ALLOC; |
| 11647 | if (meta.kptr_field->type == BPF_KPTR_PERCPU) |
| 11648 | regs[BPF_REG_0].type |= MEM_PERCPU; |
| 11649 | } |
| 11650 | } else { |
| 11651 | if (fn->ret_btf_id == BPF_PTR_POISON) { |
| 11652 | verbose(env, "verifier internal error:"); |
| 11653 | verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", |
| 11654 | func_id_name(func_id)); |
| 11655 | return -EINVAL; |
| 11656 | } |
| 11657 | ret_btf = btf_vmlinux; |
| 11658 | ret_btf_id = *fn->ret_btf_id; |
| 11659 | } |
| 11660 | if (ret_btf_id == 0) { |
| 11661 | verbose(env, "invalid return type %u of func %s#%d\n", |
| 11662 | base_type(ret_type), func_id_name(func_id), |
| 11663 | func_id); |
| 11664 | return -EINVAL; |
| 11665 | } |
| 11666 | regs[BPF_REG_0].btf = ret_btf; |
| 11667 | regs[BPF_REG_0].btf_id = ret_btf_id; |
| 11668 | break; |
| 11669 | } |
| 11670 | default: |
| 11671 | verbose(env, "unknown return type %u of func %s#%d\n", |
| 11672 | base_type(ret_type), func_id_name(func_id), func_id); |
| 11673 | return -EINVAL; |
| 11674 | } |
| 11675 | |
| 11676 | if (type_may_be_null(regs[BPF_REG_0].type)) |
| 11677 | regs[BPF_REG_0].id = ++env->id_gen; |
| 11678 | |
| 11679 | if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) { |
| 11680 | verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", |
| 11681 | func_id_name(func_id), func_id); |
| 11682 | return -EFAULT; |
| 11683 | } |
| 11684 | |
| 11685 | if (is_dynptr_ref_function(func_id)) |
| 11686 | regs[BPF_REG_0].dynptr_id = meta.dynptr_id; |
| 11687 | |
| 11688 | if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { |
| 11689 | /* For release_reference() */ |
| 11690 | regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; |
| 11691 | } else if (is_acquire_function(func_id, meta.map_ptr)) { |
| 11692 | int id = acquire_reference(env, insn_idx); |
| 11693 | |
| 11694 | if (id < 0) |
| 11695 | return id; |
| 11696 | /* For mark_ptr_or_null_reg() */ |
| 11697 | regs[BPF_REG_0].id = id; |
| 11698 | /* For release_reference() */ |
| 11699 | regs[BPF_REG_0].ref_obj_id = id; |
| 11700 | } |
| 11701 | |
| 11702 | err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); |
| 11703 | if (err) |
| 11704 | return err; |
| 11705 | |
| 11706 | err = check_map_func_compatibility(env, meta.map_ptr, func_id); |
| 11707 | if (err) |
| 11708 | return err; |
| 11709 | |
| 11710 | if ((func_id == BPF_FUNC_get_stack || |
| 11711 | func_id == BPF_FUNC_get_task_stack) && |
| 11712 | !env->prog->has_callchain_buf) { |
| 11713 | const char *err_str; |
| 11714 | |
| 11715 | #ifdef CONFIG_PERF_EVENTS |
| 11716 | err = get_callchain_buffers(sysctl_perf_event_max_stack); |
| 11717 | err_str = "cannot get callchain buffer for func %s#%d\n"; |
| 11718 | #else |
| 11719 | err = -ENOTSUPP; |
| 11720 | err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; |
| 11721 | #endif |
| 11722 | if (err) { |
| 11723 | verbose(env, err_str, func_id_name(func_id), func_id); |
| 11724 | return err; |
| 11725 | } |
| 11726 | |
| 11727 | env->prog->has_callchain_buf = true; |
| 11728 | } |
| 11729 | |
| 11730 | if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) |
| 11731 | env->prog->call_get_stack = true; |
| 11732 | |
| 11733 | if (func_id == BPF_FUNC_get_func_ip) { |
| 11734 | if (check_get_func_ip(env)) |
| 11735 | return -ENOTSUPP; |
| 11736 | env->prog->call_get_func_ip = true; |
| 11737 | } |
| 11738 | |
| 11739 | if (changes_data) |
| 11740 | clear_all_pkt_pointers(env); |
| 11741 | return 0; |
| 11742 | } |
| 11743 | |
| 11744 | /* mark_btf_func_reg_size() is used when the reg size is determined by |
| 11745 | * the BTF func_proto's return value size and argument. |
| 11746 | */ |
| 11747 | static void __mark_btf_func_reg_size(struct bpf_verifier_env *env, struct bpf_reg_state *regs, |
| 11748 | u32 regno, size_t reg_size) |
| 11749 | { |
| 11750 | struct bpf_reg_state *reg = ®s[regno]; |
| 11751 | |
| 11752 | if (regno == BPF_REG_0) { |
| 11753 | /* Function return value */ |
| 11754 | reg->live |= REG_LIVE_WRITTEN; |
| 11755 | reg->subreg_def = reg_size == sizeof(u64) ? |
| 11756 | DEF_NOT_SUBREG : env->insn_idx + 1; |
| 11757 | } else { |
| 11758 | /* Function argument */ |
| 11759 | if (reg_size == sizeof(u64)) { |
| 11760 | mark_insn_zext(env, reg); |
| 11761 | mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); |
| 11762 | } else { |
| 11763 | mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); |
| 11764 | } |
| 11765 | } |
| 11766 | } |
| 11767 | |
| 11768 | static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, |
| 11769 | size_t reg_size) |
| 11770 | { |
| 11771 | return __mark_btf_func_reg_size(env, cur_regs(env), regno, reg_size); |
| 11772 | } |
| 11773 | |
| 11774 | static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta) |
| 11775 | { |
| 11776 | return meta->kfunc_flags & KF_ACQUIRE; |
| 11777 | } |
| 11778 | |
| 11779 | static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) |
| 11780 | { |
| 11781 | return meta->kfunc_flags & KF_RELEASE; |
| 11782 | } |
| 11783 | |
| 11784 | static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) |
| 11785 | { |
| 11786 | return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); |
| 11787 | } |
| 11788 | |
| 11789 | static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) |
| 11790 | { |
| 11791 | return meta->kfunc_flags & KF_SLEEPABLE; |
| 11792 | } |
| 11793 | |
| 11794 | static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta) |
| 11795 | { |
| 11796 | return meta->kfunc_flags & KF_DESTRUCTIVE; |
| 11797 | } |
| 11798 | |
| 11799 | static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) |
| 11800 | { |
| 11801 | return meta->kfunc_flags & KF_RCU; |
| 11802 | } |
| 11803 | |
| 11804 | static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta) |
| 11805 | { |
| 11806 | return meta->kfunc_flags & KF_RCU_PROTECTED; |
| 11807 | } |
| 11808 | |
| 11809 | static bool is_kfunc_arg_mem_size(const struct btf *btf, |
| 11810 | const struct btf_param *arg, |
| 11811 | const struct bpf_reg_state *reg) |
| 11812 | { |
| 11813 | const struct btf_type *t; |
| 11814 | |
| 11815 | t = btf_type_skip_modifiers(btf, arg->type, NULL); |
| 11816 | if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) |
| 11817 | return false; |
| 11818 | |
| 11819 | return btf_param_match_suffix(btf, arg, "__sz"); |
| 11820 | } |
| 11821 | |
| 11822 | static bool is_kfunc_arg_const_mem_size(const struct btf *btf, |
| 11823 | const struct btf_param *arg, |
| 11824 | const struct bpf_reg_state *reg) |
| 11825 | { |
| 11826 | const struct btf_type *t; |
| 11827 | |
| 11828 | t = btf_type_skip_modifiers(btf, arg->type, NULL); |
| 11829 | if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) |
| 11830 | return false; |
| 11831 | |
| 11832 | return btf_param_match_suffix(btf, arg, "__szk"); |
| 11833 | } |
| 11834 | |
| 11835 | static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) |
| 11836 | { |
| 11837 | return btf_param_match_suffix(btf, arg, "__opt"); |
| 11838 | } |
| 11839 | |
| 11840 | static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) |
| 11841 | { |
| 11842 | return btf_param_match_suffix(btf, arg, "__k"); |
| 11843 | } |
| 11844 | |
| 11845 | static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) |
| 11846 | { |
| 11847 | return btf_param_match_suffix(btf, arg, "__ign"); |
| 11848 | } |
| 11849 | |
| 11850 | static bool is_kfunc_arg_map(const struct btf *btf, const struct btf_param *arg) |
| 11851 | { |
| 11852 | return btf_param_match_suffix(btf, arg, "__map"); |
| 11853 | } |
| 11854 | |
| 11855 | static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) |
| 11856 | { |
| 11857 | return btf_param_match_suffix(btf, arg, "__alloc"); |
| 11858 | } |
| 11859 | |
| 11860 | static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) |
| 11861 | { |
| 11862 | return btf_param_match_suffix(btf, arg, "__uninit"); |
| 11863 | } |
| 11864 | |
| 11865 | static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) |
| 11866 | { |
| 11867 | return btf_param_match_suffix(btf, arg, "__refcounted_kptr"); |
| 11868 | } |
| 11869 | |
| 11870 | static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg) |
| 11871 | { |
| 11872 | return btf_param_match_suffix(btf, arg, "__nullable"); |
| 11873 | } |
| 11874 | |
| 11875 | static bool is_kfunc_arg_const_str(const struct btf *btf, const struct btf_param *arg) |
| 11876 | { |
| 11877 | return btf_param_match_suffix(btf, arg, "__str"); |
| 11878 | } |
| 11879 | |
| 11880 | static bool is_kfunc_arg_irq_flag(const struct btf *btf, const struct btf_param *arg) |
| 11881 | { |
| 11882 | return btf_param_match_suffix(btf, arg, "__irq_flag"); |
| 11883 | } |
| 11884 | |
| 11885 | static bool is_kfunc_arg_prog(const struct btf *btf, const struct btf_param *arg) |
| 11886 | { |
| 11887 | return btf_param_match_suffix(btf, arg, "__prog"); |
| 11888 | } |
| 11889 | |
| 11890 | static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, |
| 11891 | const struct btf_param *arg, |
| 11892 | const char *name) |
| 11893 | { |
| 11894 | int len, target_len = strlen(name); |
| 11895 | const char *param_name; |
| 11896 | |
| 11897 | param_name = btf_name_by_offset(btf, arg->name_off); |
| 11898 | if (str_is_empty(param_name)) |
| 11899 | return false; |
| 11900 | len = strlen(param_name); |
| 11901 | if (len != target_len) |
| 11902 | return false; |
| 11903 | if (strcmp(param_name, name)) |
| 11904 | return false; |
| 11905 | |
| 11906 | return true; |
| 11907 | } |
| 11908 | |
| 11909 | enum { |
| 11910 | KF_ARG_DYNPTR_ID, |
| 11911 | KF_ARG_LIST_HEAD_ID, |
| 11912 | KF_ARG_LIST_NODE_ID, |
| 11913 | KF_ARG_RB_ROOT_ID, |
| 11914 | KF_ARG_RB_NODE_ID, |
| 11915 | KF_ARG_WORKQUEUE_ID, |
| 11916 | KF_ARG_RES_SPIN_LOCK_ID, |
| 11917 | }; |
| 11918 | |
| 11919 | BTF_ID_LIST(kf_arg_btf_ids) |
| 11920 | BTF_ID(struct, bpf_dynptr) |
| 11921 | BTF_ID(struct, bpf_list_head) |
| 11922 | BTF_ID(struct, bpf_list_node) |
| 11923 | BTF_ID(struct, bpf_rb_root) |
| 11924 | BTF_ID(struct, bpf_rb_node) |
| 11925 | BTF_ID(struct, bpf_wq) |
| 11926 | BTF_ID(struct, bpf_res_spin_lock) |
| 11927 | |
| 11928 | static bool __is_kfunc_ptr_arg_type(const struct btf *btf, |
| 11929 | const struct btf_param *arg, int type) |
| 11930 | { |
| 11931 | const struct btf_type *t; |
| 11932 | u32 res_id; |
| 11933 | |
| 11934 | t = btf_type_skip_modifiers(btf, arg->type, NULL); |
| 11935 | if (!t) |
| 11936 | return false; |
| 11937 | if (!btf_type_is_ptr(t)) |
| 11938 | return false; |
| 11939 | t = btf_type_skip_modifiers(btf, t->type, &res_id); |
| 11940 | if (!t) |
| 11941 | return false; |
| 11942 | return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]); |
| 11943 | } |
| 11944 | |
| 11945 | static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg) |
| 11946 | { |
| 11947 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID); |
| 11948 | } |
| 11949 | |
| 11950 | static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg) |
| 11951 | { |
| 11952 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID); |
| 11953 | } |
| 11954 | |
| 11955 | static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg) |
| 11956 | { |
| 11957 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID); |
| 11958 | } |
| 11959 | |
| 11960 | static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg) |
| 11961 | { |
| 11962 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID); |
| 11963 | } |
| 11964 | |
| 11965 | static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg) |
| 11966 | { |
| 11967 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID); |
| 11968 | } |
| 11969 | |
| 11970 | static bool is_kfunc_arg_wq(const struct btf *btf, const struct btf_param *arg) |
| 11971 | { |
| 11972 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_WORKQUEUE_ID); |
| 11973 | } |
| 11974 | |
| 11975 | static bool is_kfunc_arg_res_spin_lock(const struct btf *btf, const struct btf_param *arg) |
| 11976 | { |
| 11977 | return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RES_SPIN_LOCK_ID); |
| 11978 | } |
| 11979 | |
| 11980 | static bool is_rbtree_node_type(const struct btf_type *t) |
| 11981 | { |
| 11982 | return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_RB_NODE_ID]); |
| 11983 | } |
| 11984 | |
| 11985 | static bool is_list_node_type(const struct btf_type *t) |
| 11986 | { |
| 11987 | return t == btf_type_by_id(btf_vmlinux, kf_arg_btf_ids[KF_ARG_LIST_NODE_ID]); |
| 11988 | } |
| 11989 | |
| 11990 | static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, |
| 11991 | const struct btf_param *arg) |
| 11992 | { |
| 11993 | const struct btf_type *t; |
| 11994 | |
| 11995 | t = btf_type_resolve_func_ptr(btf, arg->type, NULL); |
| 11996 | if (!t) |
| 11997 | return false; |
| 11998 | |
| 11999 | return true; |
| 12000 | } |
| 12001 | |
| 12002 | /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ |
| 12003 | static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, |
| 12004 | const struct btf *btf, |
| 12005 | const struct btf_type *t, int rec) |
| 12006 | { |
| 12007 | const struct btf_type *member_type; |
| 12008 | const struct btf_member *member; |
| 12009 | u32 i; |
| 12010 | |
| 12011 | if (!btf_type_is_struct(t)) |
| 12012 | return false; |
| 12013 | |
| 12014 | for_each_member(i, t, member) { |
| 12015 | const struct btf_array *array; |
| 12016 | |
| 12017 | member_type = btf_type_skip_modifiers(btf, member->type, NULL); |
| 12018 | if (btf_type_is_struct(member_type)) { |
| 12019 | if (rec >= 3) { |
| 12020 | verbose(env, "max struct nesting depth exceeded\n"); |
| 12021 | return false; |
| 12022 | } |
| 12023 | if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1)) |
| 12024 | return false; |
| 12025 | continue; |
| 12026 | } |
| 12027 | if (btf_type_is_array(member_type)) { |
| 12028 | array = btf_array(member_type); |
| 12029 | if (!array->nelems) |
| 12030 | return false; |
| 12031 | member_type = btf_type_skip_modifiers(btf, array->type, NULL); |
| 12032 | if (!btf_type_is_scalar(member_type)) |
| 12033 | return false; |
| 12034 | continue; |
| 12035 | } |
| 12036 | if (!btf_type_is_scalar(member_type)) |
| 12037 | return false; |
| 12038 | } |
| 12039 | return true; |
| 12040 | } |
| 12041 | |
| 12042 | enum kfunc_ptr_arg_type { |
| 12043 | KF_ARG_PTR_TO_CTX, |
| 12044 | KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ |
| 12045 | KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */ |
| 12046 | KF_ARG_PTR_TO_DYNPTR, |
| 12047 | KF_ARG_PTR_TO_ITER, |
| 12048 | KF_ARG_PTR_TO_LIST_HEAD, |
| 12049 | KF_ARG_PTR_TO_LIST_NODE, |
| 12050 | KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ |
| 12051 | KF_ARG_PTR_TO_MEM, |
| 12052 | KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ |
| 12053 | KF_ARG_PTR_TO_CALLBACK, |
| 12054 | KF_ARG_PTR_TO_RB_ROOT, |
| 12055 | KF_ARG_PTR_TO_RB_NODE, |
| 12056 | KF_ARG_PTR_TO_NULL, |
| 12057 | KF_ARG_PTR_TO_CONST_STR, |
| 12058 | KF_ARG_PTR_TO_MAP, |
| 12059 | KF_ARG_PTR_TO_WORKQUEUE, |
| 12060 | KF_ARG_PTR_TO_IRQ_FLAG, |
| 12061 | KF_ARG_PTR_TO_RES_SPIN_LOCK, |
| 12062 | }; |
| 12063 | |
| 12064 | enum special_kfunc_type { |
| 12065 | KF_bpf_obj_new_impl, |
| 12066 | KF_bpf_obj_drop_impl, |
| 12067 | KF_bpf_refcount_acquire_impl, |
| 12068 | KF_bpf_list_push_front_impl, |
| 12069 | KF_bpf_list_push_back_impl, |
| 12070 | KF_bpf_list_pop_front, |
| 12071 | KF_bpf_list_pop_back, |
| 12072 | KF_bpf_list_front, |
| 12073 | KF_bpf_list_back, |
| 12074 | KF_bpf_cast_to_kern_ctx, |
| 12075 | KF_bpf_rdonly_cast, |
| 12076 | KF_bpf_rcu_read_lock, |
| 12077 | KF_bpf_rcu_read_unlock, |
| 12078 | KF_bpf_rbtree_remove, |
| 12079 | KF_bpf_rbtree_add_impl, |
| 12080 | KF_bpf_rbtree_first, |
| 12081 | KF_bpf_rbtree_root, |
| 12082 | KF_bpf_rbtree_left, |
| 12083 | KF_bpf_rbtree_right, |
| 12084 | KF_bpf_dynptr_from_skb, |
| 12085 | KF_bpf_dynptr_from_xdp, |
| 12086 | KF_bpf_dynptr_slice, |
| 12087 | KF_bpf_dynptr_slice_rdwr, |
| 12088 | KF_bpf_dynptr_clone, |
| 12089 | KF_bpf_percpu_obj_new_impl, |
| 12090 | KF_bpf_percpu_obj_drop_impl, |
| 12091 | KF_bpf_throw, |
| 12092 | KF_bpf_wq_set_callback_impl, |
| 12093 | KF_bpf_preempt_disable, |
| 12094 | KF_bpf_preempt_enable, |
| 12095 | KF_bpf_iter_css_task_new, |
| 12096 | KF_bpf_session_cookie, |
| 12097 | KF_bpf_get_kmem_cache, |
| 12098 | KF_bpf_local_irq_save, |
| 12099 | KF_bpf_local_irq_restore, |
| 12100 | KF_bpf_iter_num_new, |
| 12101 | KF_bpf_iter_num_next, |
| 12102 | KF_bpf_iter_num_destroy, |
| 12103 | KF_bpf_set_dentry_xattr, |
| 12104 | KF_bpf_remove_dentry_xattr, |
| 12105 | KF_bpf_res_spin_lock, |
| 12106 | KF_bpf_res_spin_unlock, |
| 12107 | KF_bpf_res_spin_lock_irqsave, |
| 12108 | KF_bpf_res_spin_unlock_irqrestore, |
| 12109 | KF___bpf_trap, |
| 12110 | }; |
| 12111 | |
| 12112 | BTF_ID_LIST(special_kfunc_list) |
| 12113 | BTF_ID(func, bpf_obj_new_impl) |
| 12114 | BTF_ID(func, bpf_obj_drop_impl) |
| 12115 | BTF_ID(func, bpf_refcount_acquire_impl) |
| 12116 | BTF_ID(func, bpf_list_push_front_impl) |
| 12117 | BTF_ID(func, bpf_list_push_back_impl) |
| 12118 | BTF_ID(func, bpf_list_pop_front) |
| 12119 | BTF_ID(func, bpf_list_pop_back) |
| 12120 | BTF_ID(func, bpf_list_front) |
| 12121 | BTF_ID(func, bpf_list_back) |
| 12122 | BTF_ID(func, bpf_cast_to_kern_ctx) |
| 12123 | BTF_ID(func, bpf_rdonly_cast) |
| 12124 | BTF_ID(func, bpf_rcu_read_lock) |
| 12125 | BTF_ID(func, bpf_rcu_read_unlock) |
| 12126 | BTF_ID(func, bpf_rbtree_remove) |
| 12127 | BTF_ID(func, bpf_rbtree_add_impl) |
| 12128 | BTF_ID(func, bpf_rbtree_first) |
| 12129 | BTF_ID(func, bpf_rbtree_root) |
| 12130 | BTF_ID(func, bpf_rbtree_left) |
| 12131 | BTF_ID(func, bpf_rbtree_right) |
| 12132 | #ifdef CONFIG_NET |
| 12133 | BTF_ID(func, bpf_dynptr_from_skb) |
| 12134 | BTF_ID(func, bpf_dynptr_from_xdp) |
| 12135 | #else |
| 12136 | BTF_ID_UNUSED |
| 12137 | BTF_ID_UNUSED |
| 12138 | #endif |
| 12139 | BTF_ID(func, bpf_dynptr_slice) |
| 12140 | BTF_ID(func, bpf_dynptr_slice_rdwr) |
| 12141 | BTF_ID(func, bpf_dynptr_clone) |
| 12142 | BTF_ID(func, bpf_percpu_obj_new_impl) |
| 12143 | BTF_ID(func, bpf_percpu_obj_drop_impl) |
| 12144 | BTF_ID(func, bpf_throw) |
| 12145 | BTF_ID(func, bpf_wq_set_callback_impl) |
| 12146 | BTF_ID(func, bpf_preempt_disable) |
| 12147 | BTF_ID(func, bpf_preempt_enable) |
| 12148 | #ifdef CONFIG_CGROUPS |
| 12149 | BTF_ID(func, bpf_iter_css_task_new) |
| 12150 | #else |
| 12151 | BTF_ID_UNUSED |
| 12152 | #endif |
| 12153 | #ifdef CONFIG_BPF_EVENTS |
| 12154 | BTF_ID(func, bpf_session_cookie) |
| 12155 | #else |
| 12156 | BTF_ID_UNUSED |
| 12157 | #endif |
| 12158 | BTF_ID(func, bpf_get_kmem_cache) |
| 12159 | BTF_ID(func, bpf_local_irq_save) |
| 12160 | BTF_ID(func, bpf_local_irq_restore) |
| 12161 | BTF_ID(func, bpf_iter_num_new) |
| 12162 | BTF_ID(func, bpf_iter_num_next) |
| 12163 | BTF_ID(func, bpf_iter_num_destroy) |
| 12164 | #ifdef CONFIG_BPF_LSM |
| 12165 | BTF_ID(func, bpf_set_dentry_xattr) |
| 12166 | BTF_ID(func, bpf_remove_dentry_xattr) |
| 12167 | #else |
| 12168 | BTF_ID_UNUSED |
| 12169 | BTF_ID_UNUSED |
| 12170 | #endif |
| 12171 | BTF_ID(func, bpf_res_spin_lock) |
| 12172 | BTF_ID(func, bpf_res_spin_unlock) |
| 12173 | BTF_ID(func, bpf_res_spin_lock_irqsave) |
| 12174 | BTF_ID(func, bpf_res_spin_unlock_irqrestore) |
| 12175 | BTF_ID(func, __bpf_trap) |
| 12176 | |
| 12177 | static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) |
| 12178 | { |
| 12179 | if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && |
| 12180 | meta->arg_owning_ref) { |
| 12181 | return false; |
| 12182 | } |
| 12183 | |
| 12184 | return meta->kfunc_flags & KF_RET_NULL; |
| 12185 | } |
| 12186 | |
| 12187 | static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) |
| 12188 | { |
| 12189 | return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; |
| 12190 | } |
| 12191 | |
| 12192 | static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta) |
| 12193 | { |
| 12194 | return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; |
| 12195 | } |
| 12196 | |
| 12197 | static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta *meta) |
| 12198 | { |
| 12199 | return meta->func_id == special_kfunc_list[KF_bpf_preempt_disable]; |
| 12200 | } |
| 12201 | |
| 12202 | static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta *meta) |
| 12203 | { |
| 12204 | return meta->func_id == special_kfunc_list[KF_bpf_preempt_enable]; |
| 12205 | } |
| 12206 | |
| 12207 | static enum kfunc_ptr_arg_type |
| 12208 | get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, |
| 12209 | struct bpf_kfunc_call_arg_meta *meta, |
| 12210 | const struct btf_type *t, const struct btf_type *ref_t, |
| 12211 | const char *ref_tname, const struct btf_param *args, |
| 12212 | int argno, int nargs) |
| 12213 | { |
| 12214 | u32 regno = argno + 1; |
| 12215 | struct bpf_reg_state *regs = cur_regs(env); |
| 12216 | struct bpf_reg_state *reg = ®s[regno]; |
| 12217 | bool arg_mem_size = false; |
| 12218 | |
| 12219 | if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) |
| 12220 | return KF_ARG_PTR_TO_CTX; |
| 12221 | |
| 12222 | /* In this function, we verify the kfunc's BTF as per the argument type, |
| 12223 | * leaving the rest of the verification with respect to the register |
| 12224 | * type to our caller. When a set of conditions hold in the BTF type of |
| 12225 | * arguments, we resolve it to a known kfunc_ptr_arg_type. |
| 12226 | */ |
| 12227 | if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) |
| 12228 | return KF_ARG_PTR_TO_CTX; |
| 12229 | |
| 12230 | if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) |
| 12231 | return KF_ARG_PTR_TO_NULL; |
| 12232 | |
| 12233 | if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) |
| 12234 | return KF_ARG_PTR_TO_ALLOC_BTF_ID; |
| 12235 | |
| 12236 | if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) |
| 12237 | return KF_ARG_PTR_TO_REFCOUNTED_KPTR; |
| 12238 | |
| 12239 | if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) |
| 12240 | return KF_ARG_PTR_TO_DYNPTR; |
| 12241 | |
| 12242 | if (is_kfunc_arg_iter(meta, argno, &args[argno])) |
| 12243 | return KF_ARG_PTR_TO_ITER; |
| 12244 | |
| 12245 | if (is_kfunc_arg_list_head(meta->btf, &args[argno])) |
| 12246 | return KF_ARG_PTR_TO_LIST_HEAD; |
| 12247 | |
| 12248 | if (is_kfunc_arg_list_node(meta->btf, &args[argno])) |
| 12249 | return KF_ARG_PTR_TO_LIST_NODE; |
| 12250 | |
| 12251 | if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) |
| 12252 | return KF_ARG_PTR_TO_RB_ROOT; |
| 12253 | |
| 12254 | if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) |
| 12255 | return KF_ARG_PTR_TO_RB_NODE; |
| 12256 | |
| 12257 | if (is_kfunc_arg_const_str(meta->btf, &args[argno])) |
| 12258 | return KF_ARG_PTR_TO_CONST_STR; |
| 12259 | |
| 12260 | if (is_kfunc_arg_map(meta->btf, &args[argno])) |
| 12261 | return KF_ARG_PTR_TO_MAP; |
| 12262 | |
| 12263 | if (is_kfunc_arg_wq(meta->btf, &args[argno])) |
| 12264 | return KF_ARG_PTR_TO_WORKQUEUE; |
| 12265 | |
| 12266 | if (is_kfunc_arg_irq_flag(meta->btf, &args[argno])) |
| 12267 | return KF_ARG_PTR_TO_IRQ_FLAG; |
| 12268 | |
| 12269 | if (is_kfunc_arg_res_spin_lock(meta->btf, &args[argno])) |
| 12270 | return KF_ARG_PTR_TO_RES_SPIN_LOCK; |
| 12271 | |
| 12272 | if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { |
| 12273 | if (!btf_type_is_struct(ref_t)) { |
| 12274 | verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n", |
| 12275 | meta->func_name, argno, btf_type_str(ref_t), ref_tname); |
| 12276 | return -EINVAL; |
| 12277 | } |
| 12278 | return KF_ARG_PTR_TO_BTF_ID; |
| 12279 | } |
| 12280 | |
| 12281 | if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) |
| 12282 | return KF_ARG_PTR_TO_CALLBACK; |
| 12283 | |
| 12284 | if (argno + 1 < nargs && |
| 12285 | (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || |
| 12286 | is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) |
| 12287 | arg_mem_size = true; |
| 12288 | |
| 12289 | /* This is the catch all argument type of register types supported by |
| 12290 | * check_helper_mem_access. However, we only allow when argument type is |
| 12291 | * pointer to scalar, or struct composed (recursively) of scalars. When |
| 12292 | * arg_mem_size is true, the pointer can be void *. |
| 12293 | */ |
| 12294 | if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && |
| 12295 | (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { |
| 12296 | verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", |
| 12297 | argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); |
| 12298 | return -EINVAL; |
| 12299 | } |
| 12300 | return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM; |
| 12301 | } |
| 12302 | |
| 12303 | static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, |
| 12304 | struct bpf_reg_state *reg, |
| 12305 | const struct btf_type *ref_t, |
| 12306 | const char *ref_tname, u32 ref_id, |
| 12307 | struct bpf_kfunc_call_arg_meta *meta, |
| 12308 | int argno) |
| 12309 | { |
| 12310 | const struct btf_type *reg_ref_t; |
| 12311 | bool strict_type_match = false; |
| 12312 | const struct btf *reg_btf; |
| 12313 | const char *reg_ref_tname; |
| 12314 | bool taking_projection; |
| 12315 | bool struct_same; |
| 12316 | u32 reg_ref_id; |
| 12317 | |
| 12318 | if (base_type(reg->type) == PTR_TO_BTF_ID) { |
| 12319 | reg_btf = reg->btf; |
| 12320 | reg_ref_id = reg->btf_id; |
| 12321 | } else { |
| 12322 | reg_btf = btf_vmlinux; |
| 12323 | reg_ref_id = *reg2btf_ids[base_type(reg->type)]; |
| 12324 | } |
| 12325 | |
| 12326 | /* Enforce strict type matching for calls to kfuncs that are acquiring |
| 12327 | * or releasing a reference, or are no-cast aliases. We do _not_ |
| 12328 | * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default, |
| 12329 | * as we want to enable BPF programs to pass types that are bitwise |
| 12330 | * equivalent without forcing them to explicitly cast with something |
| 12331 | * like bpf_cast_to_kern_ctx(). |
| 12332 | * |
| 12333 | * For example, say we had a type like the following: |
| 12334 | * |
| 12335 | * struct bpf_cpumask { |
| 12336 | * cpumask_t cpumask; |
| 12337 | * refcount_t usage; |
| 12338 | * }; |
| 12339 | * |
| 12340 | * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed |
| 12341 | * to a struct cpumask, so it would be safe to pass a struct |
| 12342 | * bpf_cpumask * to a kfunc expecting a struct cpumask *. |
| 12343 | * |
| 12344 | * The philosophy here is similar to how we allow scalars of different |
| 12345 | * types to be passed to kfuncs as long as the size is the same. The |
| 12346 | * only difference here is that we're simply allowing |
| 12347 | * btf_struct_ids_match() to walk the struct at the 0th offset, and |
| 12348 | * resolve types. |
| 12349 | */ |
| 12350 | if ((is_kfunc_release(meta) && reg->ref_obj_id) || |
| 12351 | btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) |
| 12352 | strict_type_match = true; |
| 12353 | |
| 12354 | WARN_ON_ONCE(is_kfunc_release(meta) && |
| 12355 | (reg->off || !tnum_is_const(reg->var_off) || |
| 12356 | reg->var_off.value)); |
| 12357 | |
| 12358 | reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); |
| 12359 | reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); |
| 12360 | struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match); |
| 12361 | /* If kfunc is accepting a projection type (ie. __sk_buff), it cannot |
| 12362 | * actually use it -- it must cast to the underlying type. So we allow |
| 12363 | * caller to pass in the underlying type. |
| 12364 | */ |
| 12365 | taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname); |
| 12366 | if (!taking_projection && !struct_same) { |
| 12367 | verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", |
| 12368 | meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, |
| 12369 | btf_type_str(reg_ref_t), reg_ref_tname); |
| 12370 | return -EINVAL; |
| 12371 | } |
| 12372 | return 0; |
| 12373 | } |
| 12374 | |
| 12375 | static int process_irq_flag(struct bpf_verifier_env *env, int regno, |
| 12376 | struct bpf_kfunc_call_arg_meta *meta) |
| 12377 | { |
| 12378 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; |
| 12379 | int err, kfunc_class = IRQ_NATIVE_KFUNC; |
| 12380 | bool irq_save; |
| 12381 | |
| 12382 | if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_save] || |
| 12383 | meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) { |
| 12384 | irq_save = true; |
| 12385 | if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) |
| 12386 | kfunc_class = IRQ_LOCK_KFUNC; |
| 12387 | } else if (meta->func_id == special_kfunc_list[KF_bpf_local_irq_restore] || |
| 12388 | meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) { |
| 12389 | irq_save = false; |
| 12390 | if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) |
| 12391 | kfunc_class = IRQ_LOCK_KFUNC; |
| 12392 | } else { |
| 12393 | verbose(env, "verifier internal error: unknown irq flags kfunc\n"); |
| 12394 | return -EFAULT; |
| 12395 | } |
| 12396 | |
| 12397 | if (irq_save) { |
| 12398 | if (!is_irq_flag_reg_valid_uninit(env, reg)) { |
| 12399 | verbose(env, "expected uninitialized irq flag as arg#%d\n", regno - 1); |
| 12400 | return -EINVAL; |
| 12401 | } |
| 12402 | |
| 12403 | err = check_mem_access(env, env->insn_idx, regno, 0, BPF_DW, BPF_WRITE, -1, false, false); |
| 12404 | if (err) |
| 12405 | return err; |
| 12406 | |
| 12407 | err = mark_stack_slot_irq_flag(env, meta, reg, env->insn_idx, kfunc_class); |
| 12408 | if (err) |
| 12409 | return err; |
| 12410 | } else { |
| 12411 | err = is_irq_flag_reg_valid_init(env, reg); |
| 12412 | if (err) { |
| 12413 | verbose(env, "expected an initialized irq flag as arg#%d\n", regno - 1); |
| 12414 | return err; |
| 12415 | } |
| 12416 | |
| 12417 | err = mark_irq_flag_read(env, reg); |
| 12418 | if (err) |
| 12419 | return err; |
| 12420 | |
| 12421 | err = unmark_stack_slot_irq_flag(env, reg, kfunc_class); |
| 12422 | if (err) |
| 12423 | return err; |
| 12424 | } |
| 12425 | return 0; |
| 12426 | } |
| 12427 | |
| 12428 | |
| 12429 | static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 12430 | { |
| 12431 | struct btf_record *rec = reg_btf_record(reg); |
| 12432 | |
| 12433 | if (!env->cur_state->active_locks) { |
| 12434 | verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); |
| 12435 | return -EFAULT; |
| 12436 | } |
| 12437 | |
| 12438 | if (type_flag(reg->type) & NON_OWN_REF) { |
| 12439 | verbose(env, "verifier internal error: NON_OWN_REF already set\n"); |
| 12440 | return -EFAULT; |
| 12441 | } |
| 12442 | |
| 12443 | reg->type |= NON_OWN_REF; |
| 12444 | if (rec->refcount_off >= 0) |
| 12445 | reg->type |= MEM_RCU; |
| 12446 | |
| 12447 | return 0; |
| 12448 | } |
| 12449 | |
| 12450 | static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) |
| 12451 | { |
| 12452 | struct bpf_verifier_state *state = env->cur_state; |
| 12453 | struct bpf_func_state *unused; |
| 12454 | struct bpf_reg_state *reg; |
| 12455 | int i; |
| 12456 | |
| 12457 | if (!ref_obj_id) { |
| 12458 | verbose(env, "verifier internal error: ref_obj_id is zero for " |
| 12459 | "owning -> non-owning conversion\n"); |
| 12460 | return -EFAULT; |
| 12461 | } |
| 12462 | |
| 12463 | for (i = 0; i < state->acquired_refs; i++) { |
| 12464 | if (state->refs[i].id != ref_obj_id) |
| 12465 | continue; |
| 12466 | |
| 12467 | /* Clear ref_obj_id here so release_reference doesn't clobber |
| 12468 | * the whole reg |
| 12469 | */ |
| 12470 | bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ |
| 12471 | if (reg->ref_obj_id == ref_obj_id) { |
| 12472 | reg->ref_obj_id = 0; |
| 12473 | ref_set_non_owning(env, reg); |
| 12474 | } |
| 12475 | })); |
| 12476 | return 0; |
| 12477 | } |
| 12478 | |
| 12479 | verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); |
| 12480 | return -EFAULT; |
| 12481 | } |
| 12482 | |
| 12483 | /* Implementation details: |
| 12484 | * |
| 12485 | * Each register points to some region of memory, which we define as an |
| 12486 | * allocation. Each allocation may embed a bpf_spin_lock which protects any |
| 12487 | * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same |
| 12488 | * allocation. The lock and the data it protects are colocated in the same |
| 12489 | * memory region. |
| 12490 | * |
| 12491 | * Hence, everytime a register holds a pointer value pointing to such |
| 12492 | * allocation, the verifier preserves a unique reg->id for it. |
| 12493 | * |
| 12494 | * The verifier remembers the lock 'ptr' and the lock 'id' whenever |
| 12495 | * bpf_spin_lock is called. |
| 12496 | * |
| 12497 | * To enable this, lock state in the verifier captures two values: |
| 12498 | * active_lock.ptr = Register's type specific pointer |
| 12499 | * active_lock.id = A unique ID for each register pointer value |
| 12500 | * |
| 12501 | * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two |
| 12502 | * supported register types. |
| 12503 | * |
| 12504 | * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of |
| 12505 | * allocated objects is the reg->btf pointer. |
| 12506 | * |
| 12507 | * The active_lock.id is non-unique for maps supporting direct_value_addr, as we |
| 12508 | * can establish the provenance of the map value statically for each distinct |
| 12509 | * lookup into such maps. They always contain a single map value hence unique |
| 12510 | * IDs for each pseudo load pessimizes the algorithm and rejects valid programs. |
| 12511 | * |
| 12512 | * So, in case of global variables, they use array maps with max_entries = 1, |
| 12513 | * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point |
| 12514 | * into the same map value as max_entries is 1, as described above). |
| 12515 | * |
| 12516 | * In case of inner map lookups, the inner map pointer has same map_ptr as the |
| 12517 | * outer map pointer (in verifier context), but each lookup into an inner map |
| 12518 | * assigns a fresh reg->id to the lookup, so while lookups into distinct inner |
| 12519 | * maps from the same outer map share the same map_ptr as active_lock.ptr, they |
| 12520 | * will get different reg->id assigned to each lookup, hence different |
| 12521 | * active_lock.id. |
| 12522 | * |
| 12523 | * In case of allocated objects, active_lock.ptr is the reg->btf, and the |
| 12524 | * reg->id is a unique ID preserved after the NULL pointer check on the pointer |
| 12525 | * returned from bpf_obj_new. Each allocation receives a new reg->id. |
| 12526 | */ |
| 12527 | static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) |
| 12528 | { |
| 12529 | struct bpf_reference_state *s; |
| 12530 | void *ptr; |
| 12531 | u32 id; |
| 12532 | |
| 12533 | switch ((int)reg->type) { |
| 12534 | case PTR_TO_MAP_VALUE: |
| 12535 | ptr = reg->map_ptr; |
| 12536 | break; |
| 12537 | case PTR_TO_BTF_ID | MEM_ALLOC: |
| 12538 | ptr = reg->btf; |
| 12539 | break; |
| 12540 | default: |
| 12541 | verbose(env, "verifier internal error: unknown reg type for lock check\n"); |
| 12542 | return -EFAULT; |
| 12543 | } |
| 12544 | id = reg->id; |
| 12545 | |
| 12546 | if (!env->cur_state->active_locks) |
| 12547 | return -EINVAL; |
| 12548 | s = find_lock_state(env->cur_state, REF_TYPE_LOCK_MASK, id, ptr); |
| 12549 | if (!s) { |
| 12550 | verbose(env, "held lock and object are not in the same allocation\n"); |
| 12551 | return -EINVAL; |
| 12552 | } |
| 12553 | return 0; |
| 12554 | } |
| 12555 | |
| 12556 | static bool is_bpf_list_api_kfunc(u32 btf_id) |
| 12557 | { |
| 12558 | return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || |
| 12559 | btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || |
| 12560 | btf_id == special_kfunc_list[KF_bpf_list_pop_front] || |
| 12561 | btf_id == special_kfunc_list[KF_bpf_list_pop_back] || |
| 12562 | btf_id == special_kfunc_list[KF_bpf_list_front] || |
| 12563 | btf_id == special_kfunc_list[KF_bpf_list_back]; |
| 12564 | } |
| 12565 | |
| 12566 | static bool is_bpf_rbtree_api_kfunc(u32 btf_id) |
| 12567 | { |
| 12568 | return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || |
| 12569 | btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || |
| 12570 | btf_id == special_kfunc_list[KF_bpf_rbtree_first] || |
| 12571 | btf_id == special_kfunc_list[KF_bpf_rbtree_root] || |
| 12572 | btf_id == special_kfunc_list[KF_bpf_rbtree_left] || |
| 12573 | btf_id == special_kfunc_list[KF_bpf_rbtree_right]; |
| 12574 | } |
| 12575 | |
| 12576 | static bool is_bpf_iter_num_api_kfunc(u32 btf_id) |
| 12577 | { |
| 12578 | return btf_id == special_kfunc_list[KF_bpf_iter_num_new] || |
| 12579 | btf_id == special_kfunc_list[KF_bpf_iter_num_next] || |
| 12580 | btf_id == special_kfunc_list[KF_bpf_iter_num_destroy]; |
| 12581 | } |
| 12582 | |
| 12583 | static bool is_bpf_graph_api_kfunc(u32 btf_id) |
| 12584 | { |
| 12585 | return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || |
| 12586 | btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; |
| 12587 | } |
| 12588 | |
| 12589 | static bool is_bpf_res_spin_lock_kfunc(u32 btf_id) |
| 12590 | { |
| 12591 | return btf_id == special_kfunc_list[KF_bpf_res_spin_lock] || |
| 12592 | btf_id == special_kfunc_list[KF_bpf_res_spin_unlock] || |
| 12593 | btf_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] || |
| 12594 | btf_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]; |
| 12595 | } |
| 12596 | |
| 12597 | static bool kfunc_spin_allowed(u32 btf_id) |
| 12598 | { |
| 12599 | return is_bpf_graph_api_kfunc(btf_id) || is_bpf_iter_num_api_kfunc(btf_id) || |
| 12600 | is_bpf_res_spin_lock_kfunc(btf_id); |
| 12601 | } |
| 12602 | |
| 12603 | static bool is_sync_callback_calling_kfunc(u32 btf_id) |
| 12604 | { |
| 12605 | return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; |
| 12606 | } |
| 12607 | |
| 12608 | static bool is_async_callback_calling_kfunc(u32 btf_id) |
| 12609 | { |
| 12610 | return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl]; |
| 12611 | } |
| 12612 | |
| 12613 | static bool is_bpf_throw_kfunc(struct bpf_insn *insn) |
| 12614 | { |
| 12615 | return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && |
| 12616 | insn->imm == special_kfunc_list[KF_bpf_throw]; |
| 12617 | } |
| 12618 | |
| 12619 | static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id) |
| 12620 | { |
| 12621 | return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl]; |
| 12622 | } |
| 12623 | |
| 12624 | static bool is_callback_calling_kfunc(u32 btf_id) |
| 12625 | { |
| 12626 | return is_sync_callback_calling_kfunc(btf_id) || |
| 12627 | is_async_callback_calling_kfunc(btf_id); |
| 12628 | } |
| 12629 | |
| 12630 | static bool is_rbtree_lock_required_kfunc(u32 btf_id) |
| 12631 | { |
| 12632 | return is_bpf_rbtree_api_kfunc(btf_id); |
| 12633 | } |
| 12634 | |
| 12635 | static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env, |
| 12636 | enum btf_field_type head_field_type, |
| 12637 | u32 kfunc_btf_id) |
| 12638 | { |
| 12639 | bool ret; |
| 12640 | |
| 12641 | switch (head_field_type) { |
| 12642 | case BPF_LIST_HEAD: |
| 12643 | ret = is_bpf_list_api_kfunc(kfunc_btf_id); |
| 12644 | break; |
| 12645 | case BPF_RB_ROOT: |
| 12646 | ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id); |
| 12647 | break; |
| 12648 | default: |
| 12649 | verbose(env, "verifier internal error: unexpected graph root argument type %s\n", |
| 12650 | btf_field_type_name(head_field_type)); |
| 12651 | return false; |
| 12652 | } |
| 12653 | |
| 12654 | if (!ret) |
| 12655 | verbose(env, "verifier internal error: %s head arg for unknown kfunc\n", |
| 12656 | btf_field_type_name(head_field_type)); |
| 12657 | return ret; |
| 12658 | } |
| 12659 | |
| 12660 | static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, |
| 12661 | enum btf_field_type node_field_type, |
| 12662 | u32 kfunc_btf_id) |
| 12663 | { |
| 12664 | bool ret; |
| 12665 | |
| 12666 | switch (node_field_type) { |
| 12667 | case BPF_LIST_NODE: |
| 12668 | ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || |
| 12669 | kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); |
| 12670 | break; |
| 12671 | case BPF_RB_NODE: |
| 12672 | ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || |
| 12673 | kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || |
| 12674 | kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_left] || |
| 12675 | kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_right]); |
| 12676 | break; |
| 12677 | default: |
| 12678 | verbose(env, "verifier internal error: unexpected graph node argument type %s\n", |
| 12679 | btf_field_type_name(node_field_type)); |
| 12680 | return false; |
| 12681 | } |
| 12682 | |
| 12683 | if (!ret) |
| 12684 | verbose(env, "verifier internal error: %s node arg for unknown kfunc\n", |
| 12685 | btf_field_type_name(node_field_type)); |
| 12686 | return ret; |
| 12687 | } |
| 12688 | |
| 12689 | static int |
| 12690 | __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env, |
| 12691 | struct bpf_reg_state *reg, u32 regno, |
| 12692 | struct bpf_kfunc_call_arg_meta *meta, |
| 12693 | enum btf_field_type head_field_type, |
| 12694 | struct btf_field **head_field) |
| 12695 | { |
| 12696 | const char *head_type_name; |
| 12697 | struct btf_field *field; |
| 12698 | struct btf_record *rec; |
| 12699 | u32 head_off; |
| 12700 | |
| 12701 | if (meta->btf != btf_vmlinux) { |
| 12702 | verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); |
| 12703 | return -EFAULT; |
| 12704 | } |
| 12705 | |
| 12706 | if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) |
| 12707 | return -EFAULT; |
| 12708 | |
| 12709 | head_type_name = btf_field_type_name(head_field_type); |
| 12710 | if (!tnum_is_const(reg->var_off)) { |
| 12711 | verbose(env, |
| 12712 | "R%d doesn't have constant offset. %s has to be at the constant offset\n", |
| 12713 | regno, head_type_name); |
| 12714 | return -EINVAL; |
| 12715 | } |
| 12716 | |
| 12717 | rec = reg_btf_record(reg); |
| 12718 | head_off = reg->off + reg->var_off.value; |
| 12719 | field = btf_record_find(rec, head_off, head_field_type); |
| 12720 | if (!field) { |
| 12721 | verbose(env, "%s not found at offset=%u\n", head_type_name, head_off); |
| 12722 | return -EINVAL; |
| 12723 | } |
| 12724 | |
| 12725 | /* All functions require bpf_list_head to be protected using a bpf_spin_lock */ |
| 12726 | if (check_reg_allocation_locked(env, reg)) { |
| 12727 | verbose(env, "bpf_spin_lock at off=%d must be held for %s\n", |
| 12728 | rec->spin_lock_off, head_type_name); |
| 12729 | return -EINVAL; |
| 12730 | } |
| 12731 | |
| 12732 | if (*head_field) { |
| 12733 | verbose(env, "verifier internal error: repeating %s arg\n", head_type_name); |
| 12734 | return -EFAULT; |
| 12735 | } |
| 12736 | *head_field = field; |
| 12737 | return 0; |
| 12738 | } |
| 12739 | |
| 12740 | static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env, |
| 12741 | struct bpf_reg_state *reg, u32 regno, |
| 12742 | struct bpf_kfunc_call_arg_meta *meta) |
| 12743 | { |
| 12744 | return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD, |
| 12745 | &meta->arg_list_head.field); |
| 12746 | } |
| 12747 | |
| 12748 | static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env, |
| 12749 | struct bpf_reg_state *reg, u32 regno, |
| 12750 | struct bpf_kfunc_call_arg_meta *meta) |
| 12751 | { |
| 12752 | return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT, |
| 12753 | &meta->arg_rbtree_root.field); |
| 12754 | } |
| 12755 | |
| 12756 | static int |
| 12757 | __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, |
| 12758 | struct bpf_reg_state *reg, u32 regno, |
| 12759 | struct bpf_kfunc_call_arg_meta *meta, |
| 12760 | enum btf_field_type head_field_type, |
| 12761 | enum btf_field_type node_field_type, |
| 12762 | struct btf_field **node_field) |
| 12763 | { |
| 12764 | const char *node_type_name; |
| 12765 | const struct btf_type *et, *t; |
| 12766 | struct btf_field *field; |
| 12767 | u32 node_off; |
| 12768 | |
| 12769 | if (meta->btf != btf_vmlinux) { |
| 12770 | verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); |
| 12771 | return -EFAULT; |
| 12772 | } |
| 12773 | |
| 12774 | if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) |
| 12775 | return -EFAULT; |
| 12776 | |
| 12777 | node_type_name = btf_field_type_name(node_field_type); |
| 12778 | if (!tnum_is_const(reg->var_off)) { |
| 12779 | verbose(env, |
| 12780 | "R%d doesn't have constant offset. %s has to be at the constant offset\n", |
| 12781 | regno, node_type_name); |
| 12782 | return -EINVAL; |
| 12783 | } |
| 12784 | |
| 12785 | node_off = reg->off + reg->var_off.value; |
| 12786 | field = reg_find_field_offset(reg, node_off, node_field_type); |
| 12787 | if (!field) { |
| 12788 | verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); |
| 12789 | return -EINVAL; |
| 12790 | } |
| 12791 | |
| 12792 | field = *node_field; |
| 12793 | |
| 12794 | et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); |
| 12795 | t = btf_type_by_id(reg->btf, reg->btf_id); |
| 12796 | if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, |
| 12797 | field->graph_root.value_btf_id, true)) { |
| 12798 | verbose(env, "operation on %s expects arg#1 %s at offset=%d " |
| 12799 | "in struct %s, but arg is at offset=%d in struct %s\n", |
| 12800 | btf_field_type_name(head_field_type), |
| 12801 | btf_field_type_name(node_field_type), |
| 12802 | field->graph_root.node_offset, |
| 12803 | btf_name_by_offset(field->graph_root.btf, et->name_off), |
| 12804 | node_off, btf_name_by_offset(reg->btf, t->name_off)); |
| 12805 | return -EINVAL; |
| 12806 | } |
| 12807 | meta->arg_btf = reg->btf; |
| 12808 | meta->arg_btf_id = reg->btf_id; |
| 12809 | |
| 12810 | if (node_off != field->graph_root.node_offset) { |
| 12811 | verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n", |
| 12812 | node_off, btf_field_type_name(node_field_type), |
| 12813 | field->graph_root.node_offset, |
| 12814 | btf_name_by_offset(field->graph_root.btf, et->name_off)); |
| 12815 | return -EINVAL; |
| 12816 | } |
| 12817 | |
| 12818 | return 0; |
| 12819 | } |
| 12820 | |
| 12821 | static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, |
| 12822 | struct bpf_reg_state *reg, u32 regno, |
| 12823 | struct bpf_kfunc_call_arg_meta *meta) |
| 12824 | { |
| 12825 | return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, |
| 12826 | BPF_LIST_HEAD, BPF_LIST_NODE, |
| 12827 | &meta->arg_list_head.field); |
| 12828 | } |
| 12829 | |
| 12830 | static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, |
| 12831 | struct bpf_reg_state *reg, u32 regno, |
| 12832 | struct bpf_kfunc_call_arg_meta *meta) |
| 12833 | { |
| 12834 | return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, |
| 12835 | BPF_RB_ROOT, BPF_RB_NODE, |
| 12836 | &meta->arg_rbtree_root.field); |
| 12837 | } |
| 12838 | |
| 12839 | /* |
| 12840 | * css_task iter allowlist is needed to avoid dead locking on css_set_lock. |
| 12841 | * LSM hooks and iters (both sleepable and non-sleepable) are safe. |
| 12842 | * Any sleepable progs are also safe since bpf_check_attach_target() enforce |
| 12843 | * them can only be attached to some specific hook points. |
| 12844 | */ |
| 12845 | static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) |
| 12846 | { |
| 12847 | enum bpf_prog_type prog_type = resolve_prog_type(env->prog); |
| 12848 | |
| 12849 | switch (prog_type) { |
| 12850 | case BPF_PROG_TYPE_LSM: |
| 12851 | return true; |
| 12852 | case BPF_PROG_TYPE_TRACING: |
| 12853 | if (env->prog->expected_attach_type == BPF_TRACE_ITER) |
| 12854 | return true; |
| 12855 | fallthrough; |
| 12856 | default: |
| 12857 | return in_sleepable(env); |
| 12858 | } |
| 12859 | } |
| 12860 | |
| 12861 | static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, |
| 12862 | int insn_idx) |
| 12863 | { |
| 12864 | const char *func_name = meta->func_name, *ref_tname; |
| 12865 | const struct btf *btf = meta->btf; |
| 12866 | const struct btf_param *args; |
| 12867 | struct btf_record *rec; |
| 12868 | u32 i, nargs; |
| 12869 | int ret; |
| 12870 | |
| 12871 | args = (const struct btf_param *)(meta->func_proto + 1); |
| 12872 | nargs = btf_type_vlen(meta->func_proto); |
| 12873 | if (nargs > MAX_BPF_FUNC_REG_ARGS) { |
| 12874 | verbose(env, "Function %s has %d > %d args\n", func_name, nargs, |
| 12875 | MAX_BPF_FUNC_REG_ARGS); |
| 12876 | return -EINVAL; |
| 12877 | } |
| 12878 | |
| 12879 | /* Check that BTF function arguments match actual types that the |
| 12880 | * verifier sees. |
| 12881 | */ |
| 12882 | for (i = 0; i < nargs; i++) { |
| 12883 | struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1]; |
| 12884 | const struct btf_type *t, *ref_t, *resolve_ret; |
| 12885 | enum bpf_arg_type arg_type = ARG_DONTCARE; |
| 12886 | u32 regno = i + 1, ref_id, type_size; |
| 12887 | bool is_ret_buf_sz = false; |
| 12888 | int kf_arg_type; |
| 12889 | |
| 12890 | t = btf_type_skip_modifiers(btf, args[i].type, NULL); |
| 12891 | |
| 12892 | if (is_kfunc_arg_ignore(btf, &args[i])) |
| 12893 | continue; |
| 12894 | |
| 12895 | if (is_kfunc_arg_prog(btf, &args[i])) { |
| 12896 | /* Used to reject repeated use of __prog. */ |
| 12897 | if (meta->arg_prog) { |
| 12898 | verbose(env, "Only 1 prog->aux argument supported per-kfunc\n"); |
| 12899 | return -EFAULT; |
| 12900 | } |
| 12901 | meta->arg_prog = true; |
| 12902 | cur_aux(env)->arg_prog = regno; |
| 12903 | continue; |
| 12904 | } |
| 12905 | |
| 12906 | if (btf_type_is_scalar(t)) { |
| 12907 | if (reg->type != SCALAR_VALUE) { |
| 12908 | verbose(env, "R%d is not a scalar\n", regno); |
| 12909 | return -EINVAL; |
| 12910 | } |
| 12911 | |
| 12912 | if (is_kfunc_arg_constant(meta->btf, &args[i])) { |
| 12913 | if (meta->arg_constant.found) { |
| 12914 | verbose(env, "verifier internal error: only one constant argument permitted\n"); |
| 12915 | return -EFAULT; |
| 12916 | } |
| 12917 | if (!tnum_is_const(reg->var_off)) { |
| 12918 | verbose(env, "R%d must be a known constant\n", regno); |
| 12919 | return -EINVAL; |
| 12920 | } |
| 12921 | ret = mark_chain_precision(env, regno); |
| 12922 | if (ret < 0) |
| 12923 | return ret; |
| 12924 | meta->arg_constant.found = true; |
| 12925 | meta->arg_constant.value = reg->var_off.value; |
| 12926 | } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) { |
| 12927 | meta->r0_rdonly = true; |
| 12928 | is_ret_buf_sz = true; |
| 12929 | } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) { |
| 12930 | is_ret_buf_sz = true; |
| 12931 | } |
| 12932 | |
| 12933 | if (is_ret_buf_sz) { |
| 12934 | if (meta->r0_size) { |
| 12935 | verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); |
| 12936 | return -EINVAL; |
| 12937 | } |
| 12938 | |
| 12939 | if (!tnum_is_const(reg->var_off)) { |
| 12940 | verbose(env, "R%d is not a const\n", regno); |
| 12941 | return -EINVAL; |
| 12942 | } |
| 12943 | |
| 12944 | meta->r0_size = reg->var_off.value; |
| 12945 | ret = mark_chain_precision(env, regno); |
| 12946 | if (ret) |
| 12947 | return ret; |
| 12948 | } |
| 12949 | continue; |
| 12950 | } |
| 12951 | |
| 12952 | if (!btf_type_is_ptr(t)) { |
| 12953 | verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t)); |
| 12954 | return -EINVAL; |
| 12955 | } |
| 12956 | |
| 12957 | if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && |
| 12958 | (register_is_null(reg) || type_may_be_null(reg->type)) && |
| 12959 | !is_kfunc_arg_nullable(meta->btf, &args[i])) { |
| 12960 | verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); |
| 12961 | return -EACCES; |
| 12962 | } |
| 12963 | |
| 12964 | if (reg->ref_obj_id) { |
| 12965 | if (is_kfunc_release(meta) && meta->ref_obj_id) { |
| 12966 | verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", |
| 12967 | regno, reg->ref_obj_id, |
| 12968 | meta->ref_obj_id); |
| 12969 | return -EFAULT; |
| 12970 | } |
| 12971 | meta->ref_obj_id = reg->ref_obj_id; |
| 12972 | if (is_kfunc_release(meta)) |
| 12973 | meta->release_regno = regno; |
| 12974 | } |
| 12975 | |
| 12976 | ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); |
| 12977 | ref_tname = btf_name_by_offset(btf, ref_t->name_off); |
| 12978 | |
| 12979 | kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs); |
| 12980 | if (kf_arg_type < 0) |
| 12981 | return kf_arg_type; |
| 12982 | |
| 12983 | switch (kf_arg_type) { |
| 12984 | case KF_ARG_PTR_TO_NULL: |
| 12985 | continue; |
| 12986 | case KF_ARG_PTR_TO_MAP: |
| 12987 | if (!reg->map_ptr) { |
| 12988 | verbose(env, "pointer in R%d isn't map pointer\n", regno); |
| 12989 | return -EINVAL; |
| 12990 | } |
| 12991 | if (meta->map.ptr && reg->map_ptr->record->wq_off >= 0) { |
| 12992 | /* Use map_uid (which is unique id of inner map) to reject: |
| 12993 | * inner_map1 = bpf_map_lookup_elem(outer_map, key1) |
| 12994 | * inner_map2 = bpf_map_lookup_elem(outer_map, key2) |
| 12995 | * if (inner_map1 && inner_map2) { |
| 12996 | * wq = bpf_map_lookup_elem(inner_map1); |
| 12997 | * if (wq) |
| 12998 | * // mismatch would have been allowed |
| 12999 | * bpf_wq_init(wq, inner_map2); |
| 13000 | * } |
| 13001 | * |
| 13002 | * Comparing map_ptr is enough to distinguish normal and outer maps. |
| 13003 | */ |
| 13004 | if (meta->map.ptr != reg->map_ptr || |
| 13005 | meta->map.uid != reg->map_uid) { |
| 13006 | verbose(env, |
| 13007 | "workqueue pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", |
| 13008 | meta->map.uid, reg->map_uid); |
| 13009 | return -EINVAL; |
| 13010 | } |
| 13011 | } |
| 13012 | meta->map.ptr = reg->map_ptr; |
| 13013 | meta->map.uid = reg->map_uid; |
| 13014 | fallthrough; |
| 13015 | case KF_ARG_PTR_TO_ALLOC_BTF_ID: |
| 13016 | case KF_ARG_PTR_TO_BTF_ID: |
| 13017 | if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) |
| 13018 | break; |
| 13019 | |
| 13020 | if (!is_trusted_reg(reg)) { |
| 13021 | if (!is_kfunc_rcu(meta)) { |
| 13022 | verbose(env, "R%d must be referenced or trusted\n", regno); |
| 13023 | return -EINVAL; |
| 13024 | } |
| 13025 | if (!is_rcu_reg(reg)) { |
| 13026 | verbose(env, "R%d must be a rcu pointer\n", regno); |
| 13027 | return -EINVAL; |
| 13028 | } |
| 13029 | } |
| 13030 | fallthrough; |
| 13031 | case KF_ARG_PTR_TO_CTX: |
| 13032 | case KF_ARG_PTR_TO_DYNPTR: |
| 13033 | case KF_ARG_PTR_TO_ITER: |
| 13034 | case KF_ARG_PTR_TO_LIST_HEAD: |
| 13035 | case KF_ARG_PTR_TO_LIST_NODE: |
| 13036 | case KF_ARG_PTR_TO_RB_ROOT: |
| 13037 | case KF_ARG_PTR_TO_RB_NODE: |
| 13038 | case KF_ARG_PTR_TO_MEM: |
| 13039 | case KF_ARG_PTR_TO_MEM_SIZE: |
| 13040 | case KF_ARG_PTR_TO_CALLBACK: |
| 13041 | case KF_ARG_PTR_TO_REFCOUNTED_KPTR: |
| 13042 | case KF_ARG_PTR_TO_CONST_STR: |
| 13043 | case KF_ARG_PTR_TO_WORKQUEUE: |
| 13044 | case KF_ARG_PTR_TO_IRQ_FLAG: |
| 13045 | case KF_ARG_PTR_TO_RES_SPIN_LOCK: |
| 13046 | break; |
| 13047 | default: |
| 13048 | WARN_ON_ONCE(1); |
| 13049 | return -EFAULT; |
| 13050 | } |
| 13051 | |
| 13052 | if (is_kfunc_release(meta) && reg->ref_obj_id) |
| 13053 | arg_type |= OBJ_RELEASE; |
| 13054 | ret = check_func_arg_reg_off(env, reg, regno, arg_type); |
| 13055 | if (ret < 0) |
| 13056 | return ret; |
| 13057 | |
| 13058 | switch (kf_arg_type) { |
| 13059 | case KF_ARG_PTR_TO_CTX: |
| 13060 | if (reg->type != PTR_TO_CTX) { |
| 13061 | verbose(env, "arg#%d expected pointer to ctx, but got %s\n", |
| 13062 | i, reg_type_str(env, reg->type)); |
| 13063 | return -EINVAL; |
| 13064 | } |
| 13065 | |
| 13066 | if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { |
| 13067 | ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); |
| 13068 | if (ret < 0) |
| 13069 | return -EINVAL; |
| 13070 | meta->ret_btf_id = ret; |
| 13071 | } |
| 13072 | break; |
| 13073 | case KF_ARG_PTR_TO_ALLOC_BTF_ID: |
| 13074 | if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13075 | if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { |
| 13076 | verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i); |
| 13077 | return -EINVAL; |
| 13078 | } |
| 13079 | } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { |
| 13080 | if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { |
| 13081 | verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i); |
| 13082 | return -EINVAL; |
| 13083 | } |
| 13084 | } else { |
| 13085 | verbose(env, "arg#%d expected pointer to allocated object\n", i); |
| 13086 | return -EINVAL; |
| 13087 | } |
| 13088 | if (!reg->ref_obj_id) { |
| 13089 | verbose(env, "allocated object must be referenced\n"); |
| 13090 | return -EINVAL; |
| 13091 | } |
| 13092 | if (meta->btf == btf_vmlinux) { |
| 13093 | meta->arg_btf = reg->btf; |
| 13094 | meta->arg_btf_id = reg->btf_id; |
| 13095 | } |
| 13096 | break; |
| 13097 | case KF_ARG_PTR_TO_DYNPTR: |
| 13098 | { |
| 13099 | enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; |
| 13100 | int clone_ref_obj_id = 0; |
| 13101 | |
| 13102 | if (reg->type == CONST_PTR_TO_DYNPTR) |
| 13103 | dynptr_arg_type |= MEM_RDONLY; |
| 13104 | |
| 13105 | if (is_kfunc_arg_uninit(btf, &args[i])) |
| 13106 | dynptr_arg_type |= MEM_UNINIT; |
| 13107 | |
| 13108 | if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { |
| 13109 | dynptr_arg_type |= DYNPTR_TYPE_SKB; |
| 13110 | } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { |
| 13111 | dynptr_arg_type |= DYNPTR_TYPE_XDP; |
| 13112 | } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && |
| 13113 | (dynptr_arg_type & MEM_UNINIT)) { |
| 13114 | enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; |
| 13115 | |
| 13116 | if (parent_type == BPF_DYNPTR_TYPE_INVALID) { |
| 13117 | verbose(env, "verifier internal error: no dynptr type for parent of clone\n"); |
| 13118 | return -EFAULT; |
| 13119 | } |
| 13120 | |
| 13121 | dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type); |
| 13122 | clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; |
| 13123 | if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) { |
| 13124 | verbose(env, "verifier internal error: missing ref obj id for parent of clone\n"); |
| 13125 | return -EFAULT; |
| 13126 | } |
| 13127 | } |
| 13128 | |
| 13129 | ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id); |
| 13130 | if (ret < 0) |
| 13131 | return ret; |
| 13132 | |
| 13133 | if (!(dynptr_arg_type & MEM_UNINIT)) { |
| 13134 | int id = dynptr_id(env, reg); |
| 13135 | |
| 13136 | if (id < 0) { |
| 13137 | verbose(env, "verifier internal error: failed to obtain dynptr id\n"); |
| 13138 | return id; |
| 13139 | } |
| 13140 | meta->initialized_dynptr.id = id; |
| 13141 | meta->initialized_dynptr.type = dynptr_get_type(env, reg); |
| 13142 | meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); |
| 13143 | } |
| 13144 | |
| 13145 | break; |
| 13146 | } |
| 13147 | case KF_ARG_PTR_TO_ITER: |
| 13148 | if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { |
| 13149 | if (!check_css_task_iter_allowlist(env)) { |
| 13150 | verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n"); |
| 13151 | return -EINVAL; |
| 13152 | } |
| 13153 | } |
| 13154 | ret = process_iter_arg(env, regno, insn_idx, meta); |
| 13155 | if (ret < 0) |
| 13156 | return ret; |
| 13157 | break; |
| 13158 | case KF_ARG_PTR_TO_LIST_HEAD: |
| 13159 | if (reg->type != PTR_TO_MAP_VALUE && |
| 13160 | reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13161 | verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); |
| 13162 | return -EINVAL; |
| 13163 | } |
| 13164 | if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { |
| 13165 | verbose(env, "allocated object must be referenced\n"); |
| 13166 | return -EINVAL; |
| 13167 | } |
| 13168 | ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta); |
| 13169 | if (ret < 0) |
| 13170 | return ret; |
| 13171 | break; |
| 13172 | case KF_ARG_PTR_TO_RB_ROOT: |
| 13173 | if (reg->type != PTR_TO_MAP_VALUE && |
| 13174 | reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13175 | verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); |
| 13176 | return -EINVAL; |
| 13177 | } |
| 13178 | if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { |
| 13179 | verbose(env, "allocated object must be referenced\n"); |
| 13180 | return -EINVAL; |
| 13181 | } |
| 13182 | ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta); |
| 13183 | if (ret < 0) |
| 13184 | return ret; |
| 13185 | break; |
| 13186 | case KF_ARG_PTR_TO_LIST_NODE: |
| 13187 | if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13188 | verbose(env, "arg#%d expected pointer to allocated object\n", i); |
| 13189 | return -EINVAL; |
| 13190 | } |
| 13191 | if (!reg->ref_obj_id) { |
| 13192 | verbose(env, "allocated object must be referenced\n"); |
| 13193 | return -EINVAL; |
| 13194 | } |
| 13195 | ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta); |
| 13196 | if (ret < 0) |
| 13197 | return ret; |
| 13198 | break; |
| 13199 | case KF_ARG_PTR_TO_RB_NODE: |
| 13200 | if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { |
| 13201 | if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13202 | verbose(env, "arg#%d expected pointer to allocated object\n", i); |
| 13203 | return -EINVAL; |
| 13204 | } |
| 13205 | if (!reg->ref_obj_id) { |
| 13206 | verbose(env, "allocated object must be referenced\n"); |
| 13207 | return -EINVAL; |
| 13208 | } |
| 13209 | } else { |
| 13210 | if (!type_is_non_owning_ref(reg->type) && !reg->ref_obj_id) { |
| 13211 | verbose(env, "%s can only take non-owning or refcounted bpf_rb_node pointer\n", func_name); |
| 13212 | return -EINVAL; |
| 13213 | } |
| 13214 | if (in_rbtree_lock_required_cb(env)) { |
| 13215 | verbose(env, "%s not allowed in rbtree cb\n", func_name); |
| 13216 | return -EINVAL; |
| 13217 | } |
| 13218 | } |
| 13219 | |
| 13220 | ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); |
| 13221 | if (ret < 0) |
| 13222 | return ret; |
| 13223 | break; |
| 13224 | case KF_ARG_PTR_TO_MAP: |
| 13225 | /* If argument has '__map' suffix expect 'struct bpf_map *' */ |
| 13226 | ref_id = *reg2btf_ids[CONST_PTR_TO_MAP]; |
| 13227 | ref_t = btf_type_by_id(btf_vmlinux, ref_id); |
| 13228 | ref_tname = btf_name_by_offset(btf, ref_t->name_off); |
| 13229 | fallthrough; |
| 13230 | case KF_ARG_PTR_TO_BTF_ID: |
| 13231 | /* Only base_type is checked, further checks are done here */ |
| 13232 | if ((base_type(reg->type) != PTR_TO_BTF_ID || |
| 13233 | (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && |
| 13234 | !reg2btf_ids[base_type(reg->type)]) { |
| 13235 | verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); |
| 13236 | verbose(env, "expected %s or socket\n", |
| 13237 | reg_type_str(env, base_type(reg->type) | |
| 13238 | (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); |
| 13239 | return -EINVAL; |
| 13240 | } |
| 13241 | ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); |
| 13242 | if (ret < 0) |
| 13243 | return ret; |
| 13244 | break; |
| 13245 | case KF_ARG_PTR_TO_MEM: |
| 13246 | resolve_ret = btf_resolve_size(btf, ref_t, &type_size); |
| 13247 | if (IS_ERR(resolve_ret)) { |
| 13248 | verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n", |
| 13249 | i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret)); |
| 13250 | return -EINVAL; |
| 13251 | } |
| 13252 | ret = check_mem_reg(env, reg, regno, type_size); |
| 13253 | if (ret < 0) |
| 13254 | return ret; |
| 13255 | break; |
| 13256 | case KF_ARG_PTR_TO_MEM_SIZE: |
| 13257 | { |
| 13258 | struct bpf_reg_state *buff_reg = ®s[regno]; |
| 13259 | const struct btf_param *buff_arg = &args[i]; |
| 13260 | struct bpf_reg_state *size_reg = ®s[regno + 1]; |
| 13261 | const struct btf_param *size_arg = &args[i + 1]; |
| 13262 | |
| 13263 | if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { |
| 13264 | ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); |
| 13265 | if (ret < 0) { |
| 13266 | verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); |
| 13267 | return ret; |
| 13268 | } |
| 13269 | } |
| 13270 | |
| 13271 | if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { |
| 13272 | if (meta->arg_constant.found) { |
| 13273 | verbose(env, "verifier internal error: only one constant argument permitted\n"); |
| 13274 | return -EFAULT; |
| 13275 | } |
| 13276 | if (!tnum_is_const(size_reg->var_off)) { |
| 13277 | verbose(env, "R%d must be a known constant\n", regno + 1); |
| 13278 | return -EINVAL; |
| 13279 | } |
| 13280 | meta->arg_constant.found = true; |
| 13281 | meta->arg_constant.value = size_reg->var_off.value; |
| 13282 | } |
| 13283 | |
| 13284 | /* Skip next '__sz' or '__szk' argument */ |
| 13285 | i++; |
| 13286 | break; |
| 13287 | } |
| 13288 | case KF_ARG_PTR_TO_CALLBACK: |
| 13289 | if (reg->type != PTR_TO_FUNC) { |
| 13290 | verbose(env, "arg%d expected pointer to func\n", i); |
| 13291 | return -EINVAL; |
| 13292 | } |
| 13293 | meta->subprogno = reg->subprogno; |
| 13294 | break; |
| 13295 | case KF_ARG_PTR_TO_REFCOUNTED_KPTR: |
| 13296 | if (!type_is_ptr_alloc_obj(reg->type)) { |
| 13297 | verbose(env, "arg#%d is neither owning or non-owning ref\n", i); |
| 13298 | return -EINVAL; |
| 13299 | } |
| 13300 | if (!type_is_non_owning_ref(reg->type)) |
| 13301 | meta->arg_owning_ref = true; |
| 13302 | |
| 13303 | rec = reg_btf_record(reg); |
| 13304 | if (!rec) { |
| 13305 | verbose(env, "verifier internal error: Couldn't find btf_record\n"); |
| 13306 | return -EFAULT; |
| 13307 | } |
| 13308 | |
| 13309 | if (rec->refcount_off < 0) { |
| 13310 | verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); |
| 13311 | return -EINVAL; |
| 13312 | } |
| 13313 | |
| 13314 | meta->arg_btf = reg->btf; |
| 13315 | meta->arg_btf_id = reg->btf_id; |
| 13316 | break; |
| 13317 | case KF_ARG_PTR_TO_CONST_STR: |
| 13318 | if (reg->type != PTR_TO_MAP_VALUE) { |
| 13319 | verbose(env, "arg#%d doesn't point to a const string\n", i); |
| 13320 | return -EINVAL; |
| 13321 | } |
| 13322 | ret = check_reg_const_str(env, reg, regno); |
| 13323 | if (ret) |
| 13324 | return ret; |
| 13325 | break; |
| 13326 | case KF_ARG_PTR_TO_WORKQUEUE: |
| 13327 | if (reg->type != PTR_TO_MAP_VALUE) { |
| 13328 | verbose(env, "arg#%d doesn't point to a map value\n", i); |
| 13329 | return -EINVAL; |
| 13330 | } |
| 13331 | ret = process_wq_func(env, regno, meta); |
| 13332 | if (ret < 0) |
| 13333 | return ret; |
| 13334 | break; |
| 13335 | case KF_ARG_PTR_TO_IRQ_FLAG: |
| 13336 | if (reg->type != PTR_TO_STACK) { |
| 13337 | verbose(env, "arg#%d doesn't point to an irq flag on stack\n", i); |
| 13338 | return -EINVAL; |
| 13339 | } |
| 13340 | ret = process_irq_flag(env, regno, meta); |
| 13341 | if (ret < 0) |
| 13342 | return ret; |
| 13343 | break; |
| 13344 | case KF_ARG_PTR_TO_RES_SPIN_LOCK: |
| 13345 | { |
| 13346 | int flags = PROCESS_RES_LOCK; |
| 13347 | |
| 13348 | if (reg->type != PTR_TO_MAP_VALUE && reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { |
| 13349 | verbose(env, "arg#%d doesn't point to map value or allocated object\n", i); |
| 13350 | return -EINVAL; |
| 13351 | } |
| 13352 | |
| 13353 | if (!is_bpf_res_spin_lock_kfunc(meta->func_id)) |
| 13354 | return -EFAULT; |
| 13355 | if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock] || |
| 13356 | meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave]) |
| 13357 | flags |= PROCESS_SPIN_LOCK; |
| 13358 | if (meta->func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave] || |
| 13359 | meta->func_id == special_kfunc_list[KF_bpf_res_spin_unlock_irqrestore]) |
| 13360 | flags |= PROCESS_LOCK_IRQ; |
| 13361 | ret = process_spin_lock(env, regno, flags); |
| 13362 | if (ret < 0) |
| 13363 | return ret; |
| 13364 | break; |
| 13365 | } |
| 13366 | } |
| 13367 | } |
| 13368 | |
| 13369 | if (is_kfunc_release(meta) && !meta->release_regno) { |
| 13370 | verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", |
| 13371 | func_name); |
| 13372 | return -EINVAL; |
| 13373 | } |
| 13374 | |
| 13375 | return 0; |
| 13376 | } |
| 13377 | |
| 13378 | static int fetch_kfunc_meta(struct bpf_verifier_env *env, |
| 13379 | struct bpf_insn *insn, |
| 13380 | struct bpf_kfunc_call_arg_meta *meta, |
| 13381 | const char **kfunc_name) |
| 13382 | { |
| 13383 | const struct btf_type *func, *func_proto; |
| 13384 | u32 func_id, *kfunc_flags; |
| 13385 | const char *func_name; |
| 13386 | struct btf *desc_btf; |
| 13387 | |
| 13388 | if (kfunc_name) |
| 13389 | *kfunc_name = NULL; |
| 13390 | |
| 13391 | if (!insn->imm) |
| 13392 | return -EINVAL; |
| 13393 | |
| 13394 | desc_btf = find_kfunc_desc_btf(env, insn->off); |
| 13395 | if (IS_ERR(desc_btf)) |
| 13396 | return PTR_ERR(desc_btf); |
| 13397 | |
| 13398 | func_id = insn->imm; |
| 13399 | func = btf_type_by_id(desc_btf, func_id); |
| 13400 | func_name = btf_name_by_offset(desc_btf, func->name_off); |
| 13401 | if (kfunc_name) |
| 13402 | *kfunc_name = func_name; |
| 13403 | func_proto = btf_type_by_id(desc_btf, func->type); |
| 13404 | |
| 13405 | kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); |
| 13406 | if (!kfunc_flags) { |
| 13407 | return -EACCES; |
| 13408 | } |
| 13409 | |
| 13410 | memset(meta, 0, sizeof(*meta)); |
| 13411 | meta->btf = desc_btf; |
| 13412 | meta->func_id = func_id; |
| 13413 | meta->kfunc_flags = *kfunc_flags; |
| 13414 | meta->func_proto = func_proto; |
| 13415 | meta->func_name = func_name; |
| 13416 | |
| 13417 | return 0; |
| 13418 | } |
| 13419 | |
| 13420 | /* check special kfuncs and return: |
| 13421 | * 1 - not fall-through to 'else' branch, continue verification |
| 13422 | * 0 - fall-through to 'else' branch |
| 13423 | * < 0 - not fall-through to 'else' branch, return error |
| 13424 | */ |
| 13425 | static int check_special_kfunc(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, |
| 13426 | struct bpf_reg_state *regs, struct bpf_insn_aux_data *insn_aux, |
| 13427 | const struct btf_type *ptr_type, struct btf *desc_btf) |
| 13428 | { |
| 13429 | const struct btf_type *ret_t; |
| 13430 | int err = 0; |
| 13431 | |
| 13432 | if (meta->btf != btf_vmlinux) |
| 13433 | return 0; |
| 13434 | |
| 13435 | if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || |
| 13436 | meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { |
| 13437 | struct btf_struct_meta *struct_meta; |
| 13438 | struct btf *ret_btf; |
| 13439 | u32 ret_btf_id; |
| 13440 | |
| 13441 | if (meta->func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) |
| 13442 | return -ENOMEM; |
| 13443 | |
| 13444 | if (((u64)(u32)meta->arg_constant.value) != meta->arg_constant.value) { |
| 13445 | verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); |
| 13446 | return -EINVAL; |
| 13447 | } |
| 13448 | |
| 13449 | ret_btf = env->prog->aux->btf; |
| 13450 | ret_btf_id = meta->arg_constant.value; |
| 13451 | |
| 13452 | /* This may be NULL due to user not supplying a BTF */ |
| 13453 | if (!ret_btf) { |
| 13454 | verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n"); |
| 13455 | return -EINVAL; |
| 13456 | } |
| 13457 | |
| 13458 | ret_t = btf_type_by_id(ret_btf, ret_btf_id); |
| 13459 | if (!ret_t || !__btf_type_is_struct(ret_t)) { |
| 13460 | verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n"); |
| 13461 | return -EINVAL; |
| 13462 | } |
| 13463 | |
| 13464 | if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { |
| 13465 | if (ret_t->size > BPF_GLOBAL_PERCPU_MA_MAX_SIZE) { |
| 13466 | verbose(env, "bpf_percpu_obj_new type size (%d) is greater than %d\n", |
| 13467 | ret_t->size, BPF_GLOBAL_PERCPU_MA_MAX_SIZE); |
| 13468 | return -EINVAL; |
| 13469 | } |
| 13470 | |
| 13471 | if (!bpf_global_percpu_ma_set) { |
| 13472 | mutex_lock(&bpf_percpu_ma_lock); |
| 13473 | if (!bpf_global_percpu_ma_set) { |
| 13474 | /* Charge memory allocated with bpf_global_percpu_ma to |
| 13475 | * root memcg. The obj_cgroup for root memcg is NULL. |
| 13476 | */ |
| 13477 | err = bpf_mem_alloc_percpu_init(&bpf_global_percpu_ma, NULL); |
| 13478 | if (!err) |
| 13479 | bpf_global_percpu_ma_set = true; |
| 13480 | } |
| 13481 | mutex_unlock(&bpf_percpu_ma_lock); |
| 13482 | if (err) |
| 13483 | return err; |
| 13484 | } |
| 13485 | |
| 13486 | mutex_lock(&bpf_percpu_ma_lock); |
| 13487 | err = bpf_mem_alloc_percpu_unit_init(&bpf_global_percpu_ma, ret_t->size); |
| 13488 | mutex_unlock(&bpf_percpu_ma_lock); |
| 13489 | if (err) |
| 13490 | return err; |
| 13491 | } |
| 13492 | |
| 13493 | struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); |
| 13494 | if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { |
| 13495 | if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { |
| 13496 | verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); |
| 13497 | return -EINVAL; |
| 13498 | } |
| 13499 | |
| 13500 | if (struct_meta) { |
| 13501 | verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n"); |
| 13502 | return -EINVAL; |
| 13503 | } |
| 13504 | } |
| 13505 | |
| 13506 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13507 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; |
| 13508 | regs[BPF_REG_0].btf = ret_btf; |
| 13509 | regs[BPF_REG_0].btf_id = ret_btf_id; |
| 13510 | if (meta->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) |
| 13511 | regs[BPF_REG_0].type |= MEM_PERCPU; |
| 13512 | |
| 13513 | insn_aux->obj_new_size = ret_t->size; |
| 13514 | insn_aux->kptr_struct_meta = struct_meta; |
| 13515 | } else if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { |
| 13516 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13517 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; |
| 13518 | regs[BPF_REG_0].btf = meta->arg_btf; |
| 13519 | regs[BPF_REG_0].btf_id = meta->arg_btf_id; |
| 13520 | |
| 13521 | insn_aux->kptr_struct_meta = |
| 13522 | btf_find_struct_meta(meta->arg_btf, |
| 13523 | meta->arg_btf_id); |
| 13524 | } else if (is_list_node_type(ptr_type)) { |
| 13525 | struct btf_field *field = meta->arg_list_head.field; |
| 13526 | |
| 13527 | mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); |
| 13528 | } else if (is_rbtree_node_type(ptr_type)) { |
| 13529 | struct btf_field *field = meta->arg_rbtree_root.field; |
| 13530 | |
| 13531 | mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); |
| 13532 | } else if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { |
| 13533 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13534 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; |
| 13535 | regs[BPF_REG_0].btf = desc_btf; |
| 13536 | regs[BPF_REG_0].btf_id = meta->ret_btf_id; |
| 13537 | } else if (meta->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { |
| 13538 | ret_t = btf_type_by_id(desc_btf, meta->arg_constant.value); |
| 13539 | if (!ret_t || !btf_type_is_struct(ret_t)) { |
| 13540 | verbose(env, |
| 13541 | "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); |
| 13542 | return -EINVAL; |
| 13543 | } |
| 13544 | |
| 13545 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13546 | regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; |
| 13547 | regs[BPF_REG_0].btf = desc_btf; |
| 13548 | regs[BPF_REG_0].btf_id = meta->arg_constant.value; |
| 13549 | } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice] || |
| 13550 | meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { |
| 13551 | enum bpf_type_flag type_flag = get_dynptr_type_flag(meta->initialized_dynptr.type); |
| 13552 | |
| 13553 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13554 | |
| 13555 | if (!meta->arg_constant.found) { |
| 13556 | verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); |
| 13557 | return -EFAULT; |
| 13558 | } |
| 13559 | |
| 13560 | regs[BPF_REG_0].mem_size = meta->arg_constant.value; |
| 13561 | |
| 13562 | /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ |
| 13563 | regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; |
| 13564 | |
| 13565 | if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { |
| 13566 | regs[BPF_REG_0].type |= MEM_RDONLY; |
| 13567 | } else { |
| 13568 | /* this will set env->seen_direct_write to true */ |
| 13569 | if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { |
| 13570 | verbose(env, "the prog does not allow writes to packet data\n"); |
| 13571 | return -EINVAL; |
| 13572 | } |
| 13573 | } |
| 13574 | |
| 13575 | if (!meta->initialized_dynptr.id) { |
| 13576 | verbose(env, "verifier internal error: no dynptr id\n"); |
| 13577 | return -EFAULT; |
| 13578 | } |
| 13579 | regs[BPF_REG_0].dynptr_id = meta->initialized_dynptr.id; |
| 13580 | |
| 13581 | /* we don't need to set BPF_REG_0's ref obj id |
| 13582 | * because packet slices are not refcounted (see |
| 13583 | * dynptr_type_refcounted) |
| 13584 | */ |
| 13585 | } else { |
| 13586 | return 0; |
| 13587 | } |
| 13588 | |
| 13589 | return 1; |
| 13590 | } |
| 13591 | |
| 13592 | static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name); |
| 13593 | |
| 13594 | static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 13595 | int *insn_idx_p) |
| 13596 | { |
| 13597 | bool sleepable, rcu_lock, rcu_unlock, preempt_disable, preempt_enable; |
| 13598 | u32 i, nargs, ptr_type_id, release_ref_obj_id; |
| 13599 | struct bpf_reg_state *regs = cur_regs(env); |
| 13600 | const char *func_name, *ptr_type_name; |
| 13601 | const struct btf_type *t, *ptr_type; |
| 13602 | struct bpf_kfunc_call_arg_meta meta; |
| 13603 | struct bpf_insn_aux_data *insn_aux; |
| 13604 | int err, insn_idx = *insn_idx_p; |
| 13605 | const struct btf_param *args; |
| 13606 | struct btf *desc_btf; |
| 13607 | |
| 13608 | /* skip for now, but return error when we find this in fixup_kfunc_call */ |
| 13609 | if (!insn->imm) |
| 13610 | return 0; |
| 13611 | |
| 13612 | err = fetch_kfunc_meta(env, insn, &meta, &func_name); |
| 13613 | if (err == -EACCES && func_name) |
| 13614 | verbose(env, "calling kernel function %s is not allowed\n", func_name); |
| 13615 | if (err) |
| 13616 | return err; |
| 13617 | desc_btf = meta.btf; |
| 13618 | insn_aux = &env->insn_aux_data[insn_idx]; |
| 13619 | |
| 13620 | insn_aux->is_iter_next = is_iter_next_kfunc(&meta); |
| 13621 | |
| 13622 | if (!insn->off && |
| 13623 | (insn->imm == special_kfunc_list[KF_bpf_res_spin_lock] || |
| 13624 | insn->imm == special_kfunc_list[KF_bpf_res_spin_lock_irqsave])) { |
| 13625 | struct bpf_verifier_state *branch; |
| 13626 | struct bpf_reg_state *regs; |
| 13627 | |
| 13628 | branch = push_stack(env, env->insn_idx + 1, env->insn_idx, false); |
| 13629 | if (!branch) { |
| 13630 | verbose(env, "failed to push state for failed lock acquisition\n"); |
| 13631 | return -ENOMEM; |
| 13632 | } |
| 13633 | |
| 13634 | regs = branch->frame[branch->curframe]->regs; |
| 13635 | |
| 13636 | /* Clear r0-r5 registers in forked state */ |
| 13637 | for (i = 0; i < CALLER_SAVED_REGS; i++) |
| 13638 | mark_reg_not_init(env, regs, caller_saved[i]); |
| 13639 | |
| 13640 | mark_reg_unknown(env, regs, BPF_REG_0); |
| 13641 | err = __mark_reg_s32_range(env, regs, BPF_REG_0, -MAX_ERRNO, -1); |
| 13642 | if (err) { |
| 13643 | verbose(env, "failed to mark s32 range for retval in forked state for lock\n"); |
| 13644 | return err; |
| 13645 | } |
| 13646 | __mark_btf_func_reg_size(env, regs, BPF_REG_0, sizeof(u32)); |
| 13647 | } else if (!insn->off && insn->imm == special_kfunc_list[KF___bpf_trap]) { |
| 13648 | verbose(env, "unexpected __bpf_trap() due to uninitialized variable?\n"); |
| 13649 | return -EFAULT; |
| 13650 | } |
| 13651 | |
| 13652 | if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) { |
| 13653 | verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n"); |
| 13654 | return -EACCES; |
| 13655 | } |
| 13656 | |
| 13657 | sleepable = is_kfunc_sleepable(&meta); |
| 13658 | if (sleepable && !in_sleepable(env)) { |
| 13659 | verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); |
| 13660 | return -EACCES; |
| 13661 | } |
| 13662 | |
| 13663 | /* Check the arguments */ |
| 13664 | err = check_kfunc_args(env, &meta, insn_idx); |
| 13665 | if (err < 0) |
| 13666 | return err; |
| 13667 | |
| 13668 | if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { |
| 13669 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 13670 | set_rbtree_add_callback_state); |
| 13671 | if (err) { |
| 13672 | verbose(env, "kfunc %s#%d failed callback verification\n", |
| 13673 | func_name, meta.func_id); |
| 13674 | return err; |
| 13675 | } |
| 13676 | } |
| 13677 | |
| 13678 | if (meta.func_id == special_kfunc_list[KF_bpf_session_cookie]) { |
| 13679 | meta.r0_size = sizeof(u64); |
| 13680 | meta.r0_rdonly = false; |
| 13681 | } |
| 13682 | |
| 13683 | if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) { |
| 13684 | err = push_callback_call(env, insn, insn_idx, meta.subprogno, |
| 13685 | set_timer_callback_state); |
| 13686 | if (err) { |
| 13687 | verbose(env, "kfunc %s#%d failed callback verification\n", |
| 13688 | func_name, meta.func_id); |
| 13689 | return err; |
| 13690 | } |
| 13691 | } |
| 13692 | |
| 13693 | rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); |
| 13694 | rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); |
| 13695 | |
| 13696 | preempt_disable = is_kfunc_bpf_preempt_disable(&meta); |
| 13697 | preempt_enable = is_kfunc_bpf_preempt_enable(&meta); |
| 13698 | |
| 13699 | if (env->cur_state->active_rcu_lock) { |
| 13700 | struct bpf_func_state *state; |
| 13701 | struct bpf_reg_state *reg; |
| 13702 | u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER); |
| 13703 | |
| 13704 | if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { |
| 13705 | verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); |
| 13706 | return -EACCES; |
| 13707 | } |
| 13708 | |
| 13709 | if (rcu_lock) { |
| 13710 | verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); |
| 13711 | return -EINVAL; |
| 13712 | } else if (rcu_unlock) { |
| 13713 | bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ |
| 13714 | if (reg->type & MEM_RCU) { |
| 13715 | reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); |
| 13716 | reg->type |= PTR_UNTRUSTED; |
| 13717 | } |
| 13718 | })); |
| 13719 | env->cur_state->active_rcu_lock = false; |
| 13720 | } else if (sleepable) { |
| 13721 | verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name); |
| 13722 | return -EACCES; |
| 13723 | } |
| 13724 | } else if (rcu_lock) { |
| 13725 | env->cur_state->active_rcu_lock = true; |
| 13726 | } else if (rcu_unlock) { |
| 13727 | verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name); |
| 13728 | return -EINVAL; |
| 13729 | } |
| 13730 | |
| 13731 | if (env->cur_state->active_preempt_locks) { |
| 13732 | if (preempt_disable) { |
| 13733 | env->cur_state->active_preempt_locks++; |
| 13734 | } else if (preempt_enable) { |
| 13735 | env->cur_state->active_preempt_locks--; |
| 13736 | } else if (sleepable) { |
| 13737 | verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name); |
| 13738 | return -EACCES; |
| 13739 | } |
| 13740 | } else if (preempt_disable) { |
| 13741 | env->cur_state->active_preempt_locks++; |
| 13742 | } else if (preempt_enable) { |
| 13743 | verbose(env, "unmatched attempt to enable preemption (kernel function %s)\n", func_name); |
| 13744 | return -EINVAL; |
| 13745 | } |
| 13746 | |
| 13747 | if (env->cur_state->active_irq_id && sleepable) { |
| 13748 | verbose(env, "kernel func %s is sleepable within IRQ-disabled region\n", func_name); |
| 13749 | return -EACCES; |
| 13750 | } |
| 13751 | |
| 13752 | /* In case of release function, we get register number of refcounted |
| 13753 | * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. |
| 13754 | */ |
| 13755 | if (meta.release_regno) { |
| 13756 | err = release_reference(env, regs[meta.release_regno].ref_obj_id); |
| 13757 | if (err) { |
| 13758 | verbose(env, "kfunc %s#%d reference has not been acquired before\n", |
| 13759 | func_name, meta.func_id); |
| 13760 | return err; |
| 13761 | } |
| 13762 | } |
| 13763 | |
| 13764 | if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || |
| 13765 | meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || |
| 13766 | meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { |
| 13767 | release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; |
| 13768 | insn_aux->insert_off = regs[BPF_REG_2].off; |
| 13769 | insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); |
| 13770 | err = ref_convert_owning_non_owning(env, release_ref_obj_id); |
| 13771 | if (err) { |
| 13772 | verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", |
| 13773 | func_name, meta.func_id); |
| 13774 | return err; |
| 13775 | } |
| 13776 | |
| 13777 | err = release_reference(env, release_ref_obj_id); |
| 13778 | if (err) { |
| 13779 | verbose(env, "kfunc %s#%d reference has not been acquired before\n", |
| 13780 | func_name, meta.func_id); |
| 13781 | return err; |
| 13782 | } |
| 13783 | } |
| 13784 | |
| 13785 | if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { |
| 13786 | if (!bpf_jit_supports_exceptions()) { |
| 13787 | verbose(env, "JIT does not support calling kfunc %s#%d\n", |
| 13788 | func_name, meta.func_id); |
| 13789 | return -ENOTSUPP; |
| 13790 | } |
| 13791 | env->seen_exception = true; |
| 13792 | |
| 13793 | /* In the case of the default callback, the cookie value passed |
| 13794 | * to bpf_throw becomes the return value of the program. |
| 13795 | */ |
| 13796 | if (!env->exception_callback_subprog) { |
| 13797 | err = check_return_code(env, BPF_REG_1, "R1"); |
| 13798 | if (err < 0) |
| 13799 | return err; |
| 13800 | } |
| 13801 | } |
| 13802 | |
| 13803 | for (i = 0; i < CALLER_SAVED_REGS; i++) |
| 13804 | mark_reg_not_init(env, regs, caller_saved[i]); |
| 13805 | |
| 13806 | /* Check return type */ |
| 13807 | t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); |
| 13808 | |
| 13809 | if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { |
| 13810 | /* Only exception is bpf_obj_new_impl */ |
| 13811 | if (meta.btf != btf_vmlinux || |
| 13812 | (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && |
| 13813 | meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] && |
| 13814 | meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { |
| 13815 | verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); |
| 13816 | return -EINVAL; |
| 13817 | } |
| 13818 | } |
| 13819 | |
| 13820 | if (btf_type_is_scalar(t)) { |
| 13821 | mark_reg_unknown(env, regs, BPF_REG_0); |
| 13822 | if (meta.btf == btf_vmlinux && (meta.func_id == special_kfunc_list[KF_bpf_res_spin_lock] || |
| 13823 | meta.func_id == special_kfunc_list[KF_bpf_res_spin_lock_irqsave])) |
| 13824 | __mark_reg_const_zero(env, ®s[BPF_REG_0]); |
| 13825 | mark_btf_func_reg_size(env, BPF_REG_0, t->size); |
| 13826 | } else if (btf_type_is_ptr(t)) { |
| 13827 | ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); |
| 13828 | err = check_special_kfunc(env, &meta, regs, insn_aux, ptr_type, desc_btf); |
| 13829 | if (err) { |
| 13830 | if (err < 0) |
| 13831 | return err; |
| 13832 | } else if (btf_type_is_void(ptr_type)) { |
| 13833 | /* kfunc returning 'void *' is equivalent to returning scalar */ |
| 13834 | mark_reg_unknown(env, regs, BPF_REG_0); |
| 13835 | } else if (!__btf_type_is_struct(ptr_type)) { |
| 13836 | if (!meta.r0_size) { |
| 13837 | __u32 sz; |
| 13838 | |
| 13839 | if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) { |
| 13840 | meta.r0_size = sz; |
| 13841 | meta.r0_rdonly = true; |
| 13842 | } |
| 13843 | } |
| 13844 | if (!meta.r0_size) { |
| 13845 | ptr_type_name = btf_name_by_offset(desc_btf, |
| 13846 | ptr_type->name_off); |
| 13847 | verbose(env, |
| 13848 | "kernel function %s returns pointer type %s %s is not supported\n", |
| 13849 | func_name, |
| 13850 | btf_type_str(ptr_type), |
| 13851 | ptr_type_name); |
| 13852 | return -EINVAL; |
| 13853 | } |
| 13854 | |
| 13855 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13856 | regs[BPF_REG_0].type = PTR_TO_MEM; |
| 13857 | regs[BPF_REG_0].mem_size = meta.r0_size; |
| 13858 | |
| 13859 | if (meta.r0_rdonly) |
| 13860 | regs[BPF_REG_0].type |= MEM_RDONLY; |
| 13861 | |
| 13862 | /* Ensures we don't access the memory after a release_reference() */ |
| 13863 | if (meta.ref_obj_id) |
| 13864 | regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; |
| 13865 | } else { |
| 13866 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 13867 | regs[BPF_REG_0].btf = desc_btf; |
| 13868 | regs[BPF_REG_0].type = PTR_TO_BTF_ID; |
| 13869 | regs[BPF_REG_0].btf_id = ptr_type_id; |
| 13870 | |
| 13871 | if (meta.func_id == special_kfunc_list[KF_bpf_get_kmem_cache]) |
| 13872 | regs[BPF_REG_0].type |= PTR_UNTRUSTED; |
| 13873 | |
| 13874 | if (is_iter_next_kfunc(&meta)) { |
| 13875 | struct bpf_reg_state *cur_iter; |
| 13876 | |
| 13877 | cur_iter = get_iter_from_state(env->cur_state, &meta); |
| 13878 | |
| 13879 | if (cur_iter->type & MEM_RCU) /* KF_RCU_PROTECTED */ |
| 13880 | regs[BPF_REG_0].type |= MEM_RCU; |
| 13881 | else |
| 13882 | regs[BPF_REG_0].type |= PTR_TRUSTED; |
| 13883 | } |
| 13884 | } |
| 13885 | |
| 13886 | if (is_kfunc_ret_null(&meta)) { |
| 13887 | regs[BPF_REG_0].type |= PTR_MAYBE_NULL; |
| 13888 | /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ |
| 13889 | regs[BPF_REG_0].id = ++env->id_gen; |
| 13890 | } |
| 13891 | mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); |
| 13892 | if (is_kfunc_acquire(&meta)) { |
| 13893 | int id = acquire_reference(env, insn_idx); |
| 13894 | |
| 13895 | if (id < 0) |
| 13896 | return id; |
| 13897 | if (is_kfunc_ret_null(&meta)) |
| 13898 | regs[BPF_REG_0].id = id; |
| 13899 | regs[BPF_REG_0].ref_obj_id = id; |
| 13900 | } else if (is_rbtree_node_type(ptr_type) || is_list_node_type(ptr_type)) { |
| 13901 | ref_set_non_owning(env, ®s[BPF_REG_0]); |
| 13902 | } |
| 13903 | |
| 13904 | if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) |
| 13905 | regs[BPF_REG_0].id = ++env->id_gen; |
| 13906 | } else if (btf_type_is_void(t)) { |
| 13907 | if (meta.btf == btf_vmlinux) { |
| 13908 | if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || |
| 13909 | meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { |
| 13910 | insn_aux->kptr_struct_meta = |
| 13911 | btf_find_struct_meta(meta.arg_btf, |
| 13912 | meta.arg_btf_id); |
| 13913 | } |
| 13914 | } |
| 13915 | } |
| 13916 | |
| 13917 | nargs = btf_type_vlen(meta.func_proto); |
| 13918 | args = (const struct btf_param *)(meta.func_proto + 1); |
| 13919 | for (i = 0; i < nargs; i++) { |
| 13920 | u32 regno = i + 1; |
| 13921 | |
| 13922 | t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); |
| 13923 | if (btf_type_is_ptr(t)) |
| 13924 | mark_btf_func_reg_size(env, regno, sizeof(void *)); |
| 13925 | else |
| 13926 | /* scalar. ensured by btf_check_kfunc_arg_match() */ |
| 13927 | mark_btf_func_reg_size(env, regno, t->size); |
| 13928 | } |
| 13929 | |
| 13930 | if (is_iter_next_kfunc(&meta)) { |
| 13931 | err = process_iter_next_call(env, insn_idx, &meta); |
| 13932 | if (err) |
| 13933 | return err; |
| 13934 | } |
| 13935 | |
| 13936 | return 0; |
| 13937 | } |
| 13938 | |
| 13939 | static bool check_reg_sane_offset(struct bpf_verifier_env *env, |
| 13940 | const struct bpf_reg_state *reg, |
| 13941 | enum bpf_reg_type type) |
| 13942 | { |
| 13943 | bool known = tnum_is_const(reg->var_off); |
| 13944 | s64 val = reg->var_off.value; |
| 13945 | s64 smin = reg->smin_value; |
| 13946 | |
| 13947 | if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { |
| 13948 | verbose(env, "math between %s pointer and %lld is not allowed\n", |
| 13949 | reg_type_str(env, type), val); |
| 13950 | return false; |
| 13951 | } |
| 13952 | |
| 13953 | if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { |
| 13954 | verbose(env, "%s pointer offset %d is not allowed\n", |
| 13955 | reg_type_str(env, type), reg->off); |
| 13956 | return false; |
| 13957 | } |
| 13958 | |
| 13959 | if (smin == S64_MIN) { |
| 13960 | verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", |
| 13961 | reg_type_str(env, type)); |
| 13962 | return false; |
| 13963 | } |
| 13964 | |
| 13965 | if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { |
| 13966 | verbose(env, "value %lld makes %s pointer be out of bounds\n", |
| 13967 | smin, reg_type_str(env, type)); |
| 13968 | return false; |
| 13969 | } |
| 13970 | |
| 13971 | return true; |
| 13972 | } |
| 13973 | |
| 13974 | enum { |
| 13975 | REASON_BOUNDS = -1, |
| 13976 | REASON_TYPE = -2, |
| 13977 | REASON_PATHS = -3, |
| 13978 | REASON_LIMIT = -4, |
| 13979 | REASON_STACK = -5, |
| 13980 | }; |
| 13981 | |
| 13982 | static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, |
| 13983 | u32 *alu_limit, bool mask_to_left) |
| 13984 | { |
| 13985 | u32 max = 0, ptr_limit = 0; |
| 13986 | |
| 13987 | switch (ptr_reg->type) { |
| 13988 | case PTR_TO_STACK: |
| 13989 | /* Offset 0 is out-of-bounds, but acceptable start for the |
| 13990 | * left direction, see BPF_REG_FP. Also, unknown scalar |
| 13991 | * offset where we would need to deal with min/max bounds is |
| 13992 | * currently prohibited for unprivileged. |
| 13993 | */ |
| 13994 | max = MAX_BPF_STACK + mask_to_left; |
| 13995 | ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); |
| 13996 | break; |
| 13997 | case PTR_TO_MAP_VALUE: |
| 13998 | max = ptr_reg->map_ptr->value_size; |
| 13999 | ptr_limit = (mask_to_left ? |
| 14000 | ptr_reg->smin_value : |
| 14001 | ptr_reg->umax_value) + ptr_reg->off; |
| 14002 | break; |
| 14003 | default: |
| 14004 | return REASON_TYPE; |
| 14005 | } |
| 14006 | |
| 14007 | if (ptr_limit >= max) |
| 14008 | return REASON_LIMIT; |
| 14009 | *alu_limit = ptr_limit; |
| 14010 | return 0; |
| 14011 | } |
| 14012 | |
| 14013 | static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, |
| 14014 | const struct bpf_insn *insn) |
| 14015 | { |
| 14016 | return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; |
| 14017 | } |
| 14018 | |
| 14019 | static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, |
| 14020 | u32 alu_state, u32 alu_limit) |
| 14021 | { |
| 14022 | /* If we arrived here from different branches with different |
| 14023 | * state or limits to sanitize, then this won't work. |
| 14024 | */ |
| 14025 | if (aux->alu_state && |
| 14026 | (aux->alu_state != alu_state || |
| 14027 | aux->alu_limit != alu_limit)) |
| 14028 | return REASON_PATHS; |
| 14029 | |
| 14030 | /* Corresponding fixup done in do_misc_fixups(). */ |
| 14031 | aux->alu_state = alu_state; |
| 14032 | aux->alu_limit = alu_limit; |
| 14033 | return 0; |
| 14034 | } |
| 14035 | |
| 14036 | static int sanitize_val_alu(struct bpf_verifier_env *env, |
| 14037 | struct bpf_insn *insn) |
| 14038 | { |
| 14039 | struct bpf_insn_aux_data *aux = cur_aux(env); |
| 14040 | |
| 14041 | if (can_skip_alu_sanitation(env, insn)) |
| 14042 | return 0; |
| 14043 | |
| 14044 | return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); |
| 14045 | } |
| 14046 | |
| 14047 | static bool sanitize_needed(u8 opcode) |
| 14048 | { |
| 14049 | return opcode == BPF_ADD || opcode == BPF_SUB; |
| 14050 | } |
| 14051 | |
| 14052 | struct bpf_sanitize_info { |
| 14053 | struct bpf_insn_aux_data aux; |
| 14054 | bool mask_to_left; |
| 14055 | }; |
| 14056 | |
| 14057 | static struct bpf_verifier_state * |
| 14058 | sanitize_speculative_path(struct bpf_verifier_env *env, |
| 14059 | const struct bpf_insn *insn, |
| 14060 | u32 next_idx, u32 curr_idx) |
| 14061 | { |
| 14062 | struct bpf_verifier_state *branch; |
| 14063 | struct bpf_reg_state *regs; |
| 14064 | |
| 14065 | branch = push_stack(env, next_idx, curr_idx, true); |
| 14066 | if (branch && insn) { |
| 14067 | regs = branch->frame[branch->curframe]->regs; |
| 14068 | if (BPF_SRC(insn->code) == BPF_K) { |
| 14069 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 14070 | } else if (BPF_SRC(insn->code) == BPF_X) { |
| 14071 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 14072 | mark_reg_unknown(env, regs, insn->src_reg); |
| 14073 | } |
| 14074 | } |
| 14075 | return branch; |
| 14076 | } |
| 14077 | |
| 14078 | static int sanitize_ptr_alu(struct bpf_verifier_env *env, |
| 14079 | struct bpf_insn *insn, |
| 14080 | const struct bpf_reg_state *ptr_reg, |
| 14081 | const struct bpf_reg_state *off_reg, |
| 14082 | struct bpf_reg_state *dst_reg, |
| 14083 | struct bpf_sanitize_info *info, |
| 14084 | const bool commit_window) |
| 14085 | { |
| 14086 | struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; |
| 14087 | struct bpf_verifier_state *vstate = env->cur_state; |
| 14088 | bool off_is_imm = tnum_is_const(off_reg->var_off); |
| 14089 | bool off_is_neg = off_reg->smin_value < 0; |
| 14090 | bool ptr_is_dst_reg = ptr_reg == dst_reg; |
| 14091 | u8 opcode = BPF_OP(insn->code); |
| 14092 | u32 alu_state, alu_limit; |
| 14093 | struct bpf_reg_state tmp; |
| 14094 | bool ret; |
| 14095 | int err; |
| 14096 | |
| 14097 | if (can_skip_alu_sanitation(env, insn)) |
| 14098 | return 0; |
| 14099 | |
| 14100 | /* We already marked aux for masking from non-speculative |
| 14101 | * paths, thus we got here in the first place. We only care |
| 14102 | * to explore bad access from here. |
| 14103 | */ |
| 14104 | if (vstate->speculative) |
| 14105 | goto do_sim; |
| 14106 | |
| 14107 | if (!commit_window) { |
| 14108 | if (!tnum_is_const(off_reg->var_off) && |
| 14109 | (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) |
| 14110 | return REASON_BOUNDS; |
| 14111 | |
| 14112 | info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || |
| 14113 | (opcode == BPF_SUB && !off_is_neg); |
| 14114 | } |
| 14115 | |
| 14116 | err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); |
| 14117 | if (err < 0) |
| 14118 | return err; |
| 14119 | |
| 14120 | if (commit_window) { |
| 14121 | /* In commit phase we narrow the masking window based on |
| 14122 | * the observed pointer move after the simulated operation. |
| 14123 | */ |
| 14124 | alu_state = info->aux.alu_state; |
| 14125 | alu_limit = abs(info->aux.alu_limit - alu_limit); |
| 14126 | } else { |
| 14127 | alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; |
| 14128 | alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; |
| 14129 | alu_state |= ptr_is_dst_reg ? |
| 14130 | BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; |
| 14131 | |
| 14132 | /* Limit pruning on unknown scalars to enable deep search for |
| 14133 | * potential masking differences from other program paths. |
| 14134 | */ |
| 14135 | if (!off_is_imm) |
| 14136 | env->explore_alu_limits = true; |
| 14137 | } |
| 14138 | |
| 14139 | err = update_alu_sanitation_state(aux, alu_state, alu_limit); |
| 14140 | if (err < 0) |
| 14141 | return err; |
| 14142 | do_sim: |
| 14143 | /* If we're in commit phase, we're done here given we already |
| 14144 | * pushed the truncated dst_reg into the speculative verification |
| 14145 | * stack. |
| 14146 | * |
| 14147 | * Also, when register is a known constant, we rewrite register-based |
| 14148 | * operation to immediate-based, and thus do not need masking (and as |
| 14149 | * a consequence, do not need to simulate the zero-truncation either). |
| 14150 | */ |
| 14151 | if (commit_window || off_is_imm) |
| 14152 | return 0; |
| 14153 | |
| 14154 | /* Simulate and find potential out-of-bounds access under |
| 14155 | * speculative execution from truncation as a result of |
| 14156 | * masking when off was not within expected range. If off |
| 14157 | * sits in dst, then we temporarily need to move ptr there |
| 14158 | * to simulate dst (== 0) +/-= ptr. Needed, for example, |
| 14159 | * for cases where we use K-based arithmetic in one direction |
| 14160 | * and truncated reg-based in the other in order to explore |
| 14161 | * bad access. |
| 14162 | */ |
| 14163 | if (!ptr_is_dst_reg) { |
| 14164 | tmp = *dst_reg; |
| 14165 | copy_register_state(dst_reg, ptr_reg); |
| 14166 | } |
| 14167 | ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, |
| 14168 | env->insn_idx); |
| 14169 | if (!ptr_is_dst_reg && ret) |
| 14170 | *dst_reg = tmp; |
| 14171 | return !ret ? REASON_STACK : 0; |
| 14172 | } |
| 14173 | |
| 14174 | static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) |
| 14175 | { |
| 14176 | struct bpf_verifier_state *vstate = env->cur_state; |
| 14177 | |
| 14178 | /* If we simulate paths under speculation, we don't update the |
| 14179 | * insn as 'seen' such that when we verify unreachable paths in |
| 14180 | * the non-speculative domain, sanitize_dead_code() can still |
| 14181 | * rewrite/sanitize them. |
| 14182 | */ |
| 14183 | if (!vstate->speculative) |
| 14184 | env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; |
| 14185 | } |
| 14186 | |
| 14187 | static int sanitize_err(struct bpf_verifier_env *env, |
| 14188 | const struct bpf_insn *insn, int reason, |
| 14189 | const struct bpf_reg_state *off_reg, |
| 14190 | const struct bpf_reg_state *dst_reg) |
| 14191 | { |
| 14192 | static const char *err = "pointer arithmetic with it prohibited for !root"; |
| 14193 | const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; |
| 14194 | u32 dst = insn->dst_reg, src = insn->src_reg; |
| 14195 | |
| 14196 | switch (reason) { |
| 14197 | case REASON_BOUNDS: |
| 14198 | verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", |
| 14199 | off_reg == dst_reg ? dst : src, err); |
| 14200 | break; |
| 14201 | case REASON_TYPE: |
| 14202 | verbose(env, "R%d has pointer with unsupported alu operation, %s\n", |
| 14203 | off_reg == dst_reg ? src : dst, err); |
| 14204 | break; |
| 14205 | case REASON_PATHS: |
| 14206 | verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", |
| 14207 | dst, op, err); |
| 14208 | break; |
| 14209 | case REASON_LIMIT: |
| 14210 | verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", |
| 14211 | dst, op, err); |
| 14212 | break; |
| 14213 | case REASON_STACK: |
| 14214 | verbose(env, "R%d could not be pushed for speculative verification, %s\n", |
| 14215 | dst, err); |
| 14216 | break; |
| 14217 | default: |
| 14218 | verbose(env, "verifier internal error: unknown reason (%d)\n", |
| 14219 | reason); |
| 14220 | break; |
| 14221 | } |
| 14222 | |
| 14223 | return -EACCES; |
| 14224 | } |
| 14225 | |
| 14226 | /* check that stack access falls within stack limits and that 'reg' doesn't |
| 14227 | * have a variable offset. |
| 14228 | * |
| 14229 | * Variable offset is prohibited for unprivileged mode for simplicity since it |
| 14230 | * requires corresponding support in Spectre masking for stack ALU. See also |
| 14231 | * retrieve_ptr_limit(). |
| 14232 | * |
| 14233 | * |
| 14234 | * 'off' includes 'reg->off'. |
| 14235 | */ |
| 14236 | static int check_stack_access_for_ptr_arithmetic( |
| 14237 | struct bpf_verifier_env *env, |
| 14238 | int regno, |
| 14239 | const struct bpf_reg_state *reg, |
| 14240 | int off) |
| 14241 | { |
| 14242 | if (!tnum_is_const(reg->var_off)) { |
| 14243 | char tn_buf[48]; |
| 14244 | |
| 14245 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
| 14246 | verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", |
| 14247 | regno, tn_buf, off); |
| 14248 | return -EACCES; |
| 14249 | } |
| 14250 | |
| 14251 | if (off >= 0 || off < -MAX_BPF_STACK) { |
| 14252 | verbose(env, "R%d stack pointer arithmetic goes out of range, " |
| 14253 | "prohibited for !root; off=%d\n", regno, off); |
| 14254 | return -EACCES; |
| 14255 | } |
| 14256 | |
| 14257 | return 0; |
| 14258 | } |
| 14259 | |
| 14260 | static int sanitize_check_bounds(struct bpf_verifier_env *env, |
| 14261 | const struct bpf_insn *insn, |
| 14262 | const struct bpf_reg_state *dst_reg) |
| 14263 | { |
| 14264 | u32 dst = insn->dst_reg; |
| 14265 | |
| 14266 | /* For unprivileged we require that resulting offset must be in bounds |
| 14267 | * in order to be able to sanitize access later on. |
| 14268 | */ |
| 14269 | if (env->bypass_spec_v1) |
| 14270 | return 0; |
| 14271 | |
| 14272 | switch (dst_reg->type) { |
| 14273 | case PTR_TO_STACK: |
| 14274 | if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, |
| 14275 | dst_reg->off + dst_reg->var_off.value)) |
| 14276 | return -EACCES; |
| 14277 | break; |
| 14278 | case PTR_TO_MAP_VALUE: |
| 14279 | if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { |
| 14280 | verbose(env, "R%d pointer arithmetic of map value goes out of range, " |
| 14281 | "prohibited for !root\n", dst); |
| 14282 | return -EACCES; |
| 14283 | } |
| 14284 | break; |
| 14285 | default: |
| 14286 | break; |
| 14287 | } |
| 14288 | |
| 14289 | return 0; |
| 14290 | } |
| 14291 | |
| 14292 | /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. |
| 14293 | * Caller should also handle BPF_MOV case separately. |
| 14294 | * If we return -EACCES, caller may want to try again treating pointer as a |
| 14295 | * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. |
| 14296 | */ |
| 14297 | static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, |
| 14298 | struct bpf_insn *insn, |
| 14299 | const struct bpf_reg_state *ptr_reg, |
| 14300 | const struct bpf_reg_state *off_reg) |
| 14301 | { |
| 14302 | struct bpf_verifier_state *vstate = env->cur_state; |
| 14303 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 14304 | struct bpf_reg_state *regs = state->regs, *dst_reg; |
| 14305 | bool known = tnum_is_const(off_reg->var_off); |
| 14306 | s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, |
| 14307 | smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; |
| 14308 | u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, |
| 14309 | umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; |
| 14310 | struct bpf_sanitize_info info = {}; |
| 14311 | u8 opcode = BPF_OP(insn->code); |
| 14312 | u32 dst = insn->dst_reg; |
| 14313 | int ret; |
| 14314 | |
| 14315 | dst_reg = ®s[dst]; |
| 14316 | |
| 14317 | if ((known && (smin_val != smax_val || umin_val != umax_val)) || |
| 14318 | smin_val > smax_val || umin_val > umax_val) { |
| 14319 | /* Taint dst register if offset had invalid bounds derived from |
| 14320 | * e.g. dead branches. |
| 14321 | */ |
| 14322 | __mark_reg_unknown(env, dst_reg); |
| 14323 | return 0; |
| 14324 | } |
| 14325 | |
| 14326 | if (BPF_CLASS(insn->code) != BPF_ALU64) { |
| 14327 | /* 32-bit ALU ops on pointers produce (meaningless) scalars */ |
| 14328 | if (opcode == BPF_SUB && env->allow_ptr_leaks) { |
| 14329 | __mark_reg_unknown(env, dst_reg); |
| 14330 | return 0; |
| 14331 | } |
| 14332 | |
| 14333 | verbose(env, |
| 14334 | "R%d 32-bit pointer arithmetic prohibited\n", |
| 14335 | dst); |
| 14336 | return -EACCES; |
| 14337 | } |
| 14338 | |
| 14339 | if (ptr_reg->type & PTR_MAYBE_NULL) { |
| 14340 | verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", |
| 14341 | dst, reg_type_str(env, ptr_reg->type)); |
| 14342 | return -EACCES; |
| 14343 | } |
| 14344 | |
| 14345 | switch (base_type(ptr_reg->type)) { |
| 14346 | case PTR_TO_CTX: |
| 14347 | case PTR_TO_MAP_VALUE: |
| 14348 | case PTR_TO_MAP_KEY: |
| 14349 | case PTR_TO_STACK: |
| 14350 | case PTR_TO_PACKET_META: |
| 14351 | case PTR_TO_PACKET: |
| 14352 | case PTR_TO_TP_BUFFER: |
| 14353 | case PTR_TO_BTF_ID: |
| 14354 | case PTR_TO_MEM: |
| 14355 | case PTR_TO_BUF: |
| 14356 | case PTR_TO_FUNC: |
| 14357 | case CONST_PTR_TO_DYNPTR: |
| 14358 | break; |
| 14359 | case PTR_TO_FLOW_KEYS: |
| 14360 | if (known) |
| 14361 | break; |
| 14362 | fallthrough; |
| 14363 | case CONST_PTR_TO_MAP: |
| 14364 | /* smin_val represents the known value */ |
| 14365 | if (known && smin_val == 0 && opcode == BPF_ADD) |
| 14366 | break; |
| 14367 | fallthrough; |
| 14368 | default: |
| 14369 | verbose(env, "R%d pointer arithmetic on %s prohibited\n", |
| 14370 | dst, reg_type_str(env, ptr_reg->type)); |
| 14371 | return -EACCES; |
| 14372 | } |
| 14373 | |
| 14374 | /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. |
| 14375 | * The id may be overwritten later if we create a new variable offset. |
| 14376 | */ |
| 14377 | dst_reg->type = ptr_reg->type; |
| 14378 | dst_reg->id = ptr_reg->id; |
| 14379 | |
| 14380 | if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || |
| 14381 | !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) |
| 14382 | return -EINVAL; |
| 14383 | |
| 14384 | /* pointer types do not carry 32-bit bounds at the moment. */ |
| 14385 | __mark_reg32_unbounded(dst_reg); |
| 14386 | |
| 14387 | if (sanitize_needed(opcode)) { |
| 14388 | ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, |
| 14389 | &info, false); |
| 14390 | if (ret < 0) |
| 14391 | return sanitize_err(env, insn, ret, off_reg, dst_reg); |
| 14392 | } |
| 14393 | |
| 14394 | switch (opcode) { |
| 14395 | case BPF_ADD: |
| 14396 | /* We can take a fixed offset as long as it doesn't overflow |
| 14397 | * the s32 'off' field |
| 14398 | */ |
| 14399 | if (known && (ptr_reg->off + smin_val == |
| 14400 | (s64)(s32)(ptr_reg->off + smin_val))) { |
| 14401 | /* pointer += K. Accumulate it into fixed offset */ |
| 14402 | dst_reg->smin_value = smin_ptr; |
| 14403 | dst_reg->smax_value = smax_ptr; |
| 14404 | dst_reg->umin_value = umin_ptr; |
| 14405 | dst_reg->umax_value = umax_ptr; |
| 14406 | dst_reg->var_off = ptr_reg->var_off; |
| 14407 | dst_reg->off = ptr_reg->off + smin_val; |
| 14408 | dst_reg->raw = ptr_reg->raw; |
| 14409 | break; |
| 14410 | } |
| 14411 | /* A new variable offset is created. Note that off_reg->off |
| 14412 | * == 0, since it's a scalar. |
| 14413 | * dst_reg gets the pointer type and since some positive |
| 14414 | * integer value was added to the pointer, give it a new 'id' |
| 14415 | * if it's a PTR_TO_PACKET. |
| 14416 | * this creates a new 'base' pointer, off_reg (variable) gets |
| 14417 | * added into the variable offset, and we copy the fixed offset |
| 14418 | * from ptr_reg. |
| 14419 | */ |
| 14420 | if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || |
| 14421 | check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { |
| 14422 | dst_reg->smin_value = S64_MIN; |
| 14423 | dst_reg->smax_value = S64_MAX; |
| 14424 | } |
| 14425 | if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || |
| 14426 | check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { |
| 14427 | dst_reg->umin_value = 0; |
| 14428 | dst_reg->umax_value = U64_MAX; |
| 14429 | } |
| 14430 | dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); |
| 14431 | dst_reg->off = ptr_reg->off; |
| 14432 | dst_reg->raw = ptr_reg->raw; |
| 14433 | if (reg_is_pkt_pointer(ptr_reg)) { |
| 14434 | dst_reg->id = ++env->id_gen; |
| 14435 | /* something was added to pkt_ptr, set range to zero */ |
| 14436 | memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); |
| 14437 | } |
| 14438 | break; |
| 14439 | case BPF_SUB: |
| 14440 | if (dst_reg == off_reg) { |
| 14441 | /* scalar -= pointer. Creates an unknown scalar */ |
| 14442 | verbose(env, "R%d tried to subtract pointer from scalar\n", |
| 14443 | dst); |
| 14444 | return -EACCES; |
| 14445 | } |
| 14446 | /* We don't allow subtraction from FP, because (according to |
| 14447 | * test_verifier.c test "invalid fp arithmetic", JITs might not |
| 14448 | * be able to deal with it. |
| 14449 | */ |
| 14450 | if (ptr_reg->type == PTR_TO_STACK) { |
| 14451 | verbose(env, "R%d subtraction from stack pointer prohibited\n", |
| 14452 | dst); |
| 14453 | return -EACCES; |
| 14454 | } |
| 14455 | if (known && (ptr_reg->off - smin_val == |
| 14456 | (s64)(s32)(ptr_reg->off - smin_val))) { |
| 14457 | /* pointer -= K. Subtract it from fixed offset */ |
| 14458 | dst_reg->smin_value = smin_ptr; |
| 14459 | dst_reg->smax_value = smax_ptr; |
| 14460 | dst_reg->umin_value = umin_ptr; |
| 14461 | dst_reg->umax_value = umax_ptr; |
| 14462 | dst_reg->var_off = ptr_reg->var_off; |
| 14463 | dst_reg->id = ptr_reg->id; |
| 14464 | dst_reg->off = ptr_reg->off - smin_val; |
| 14465 | dst_reg->raw = ptr_reg->raw; |
| 14466 | break; |
| 14467 | } |
| 14468 | /* A new variable offset is created. If the subtrahend is known |
| 14469 | * nonnegative, then any reg->range we had before is still good. |
| 14470 | */ |
| 14471 | if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || |
| 14472 | check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { |
| 14473 | /* Overflow possible, we know nothing */ |
| 14474 | dst_reg->smin_value = S64_MIN; |
| 14475 | dst_reg->smax_value = S64_MAX; |
| 14476 | } |
| 14477 | if (umin_ptr < umax_val) { |
| 14478 | /* Overflow possible, we know nothing */ |
| 14479 | dst_reg->umin_value = 0; |
| 14480 | dst_reg->umax_value = U64_MAX; |
| 14481 | } else { |
| 14482 | /* Cannot overflow (as long as bounds are consistent) */ |
| 14483 | dst_reg->umin_value = umin_ptr - umax_val; |
| 14484 | dst_reg->umax_value = umax_ptr - umin_val; |
| 14485 | } |
| 14486 | dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); |
| 14487 | dst_reg->off = ptr_reg->off; |
| 14488 | dst_reg->raw = ptr_reg->raw; |
| 14489 | if (reg_is_pkt_pointer(ptr_reg)) { |
| 14490 | dst_reg->id = ++env->id_gen; |
| 14491 | /* something was added to pkt_ptr, set range to zero */ |
| 14492 | if (smin_val < 0) |
| 14493 | memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); |
| 14494 | } |
| 14495 | break; |
| 14496 | case BPF_AND: |
| 14497 | case BPF_OR: |
| 14498 | case BPF_XOR: |
| 14499 | /* bitwise ops on pointers are troublesome, prohibit. */ |
| 14500 | verbose(env, "R%d bitwise operator %s on pointer prohibited\n", |
| 14501 | dst, bpf_alu_string[opcode >> 4]); |
| 14502 | return -EACCES; |
| 14503 | default: |
| 14504 | /* other operators (e.g. MUL,LSH) produce non-pointer results */ |
| 14505 | verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", |
| 14506 | dst, bpf_alu_string[opcode >> 4]); |
| 14507 | return -EACCES; |
| 14508 | } |
| 14509 | |
| 14510 | if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) |
| 14511 | return -EINVAL; |
| 14512 | reg_bounds_sync(dst_reg); |
| 14513 | if (sanitize_check_bounds(env, insn, dst_reg) < 0) |
| 14514 | return -EACCES; |
| 14515 | if (sanitize_needed(opcode)) { |
| 14516 | ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, |
| 14517 | &info, true); |
| 14518 | if (ret < 0) |
| 14519 | return sanitize_err(env, insn, ret, off_reg, dst_reg); |
| 14520 | } |
| 14521 | |
| 14522 | return 0; |
| 14523 | } |
| 14524 | |
| 14525 | static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, |
| 14526 | struct bpf_reg_state *src_reg) |
| 14527 | { |
| 14528 | s32 *dst_smin = &dst_reg->s32_min_value; |
| 14529 | s32 *dst_smax = &dst_reg->s32_max_value; |
| 14530 | u32 *dst_umin = &dst_reg->u32_min_value; |
| 14531 | u32 *dst_umax = &dst_reg->u32_max_value; |
| 14532 | |
| 14533 | if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || |
| 14534 | check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { |
| 14535 | *dst_smin = S32_MIN; |
| 14536 | *dst_smax = S32_MAX; |
| 14537 | } |
| 14538 | if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || |
| 14539 | check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { |
| 14540 | *dst_umin = 0; |
| 14541 | *dst_umax = U32_MAX; |
| 14542 | } |
| 14543 | } |
| 14544 | |
| 14545 | static void scalar_min_max_add(struct bpf_reg_state *dst_reg, |
| 14546 | struct bpf_reg_state *src_reg) |
| 14547 | { |
| 14548 | s64 *dst_smin = &dst_reg->smin_value; |
| 14549 | s64 *dst_smax = &dst_reg->smax_value; |
| 14550 | u64 *dst_umin = &dst_reg->umin_value; |
| 14551 | u64 *dst_umax = &dst_reg->umax_value; |
| 14552 | |
| 14553 | if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || |
| 14554 | check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { |
| 14555 | *dst_smin = S64_MIN; |
| 14556 | *dst_smax = S64_MAX; |
| 14557 | } |
| 14558 | if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || |
| 14559 | check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { |
| 14560 | *dst_umin = 0; |
| 14561 | *dst_umax = U64_MAX; |
| 14562 | } |
| 14563 | } |
| 14564 | |
| 14565 | static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, |
| 14566 | struct bpf_reg_state *src_reg) |
| 14567 | { |
| 14568 | s32 *dst_smin = &dst_reg->s32_min_value; |
| 14569 | s32 *dst_smax = &dst_reg->s32_max_value; |
| 14570 | u32 umin_val = src_reg->u32_min_value; |
| 14571 | u32 umax_val = src_reg->u32_max_value; |
| 14572 | |
| 14573 | if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || |
| 14574 | check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { |
| 14575 | /* Overflow possible, we know nothing */ |
| 14576 | *dst_smin = S32_MIN; |
| 14577 | *dst_smax = S32_MAX; |
| 14578 | } |
| 14579 | if (dst_reg->u32_min_value < umax_val) { |
| 14580 | /* Overflow possible, we know nothing */ |
| 14581 | dst_reg->u32_min_value = 0; |
| 14582 | dst_reg->u32_max_value = U32_MAX; |
| 14583 | } else { |
| 14584 | /* Cannot overflow (as long as bounds are consistent) */ |
| 14585 | dst_reg->u32_min_value -= umax_val; |
| 14586 | dst_reg->u32_max_value -= umin_val; |
| 14587 | } |
| 14588 | } |
| 14589 | |
| 14590 | static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, |
| 14591 | struct bpf_reg_state *src_reg) |
| 14592 | { |
| 14593 | s64 *dst_smin = &dst_reg->smin_value; |
| 14594 | s64 *dst_smax = &dst_reg->smax_value; |
| 14595 | u64 umin_val = src_reg->umin_value; |
| 14596 | u64 umax_val = src_reg->umax_value; |
| 14597 | |
| 14598 | if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || |
| 14599 | check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { |
| 14600 | /* Overflow possible, we know nothing */ |
| 14601 | *dst_smin = S64_MIN; |
| 14602 | *dst_smax = S64_MAX; |
| 14603 | } |
| 14604 | if (dst_reg->umin_value < umax_val) { |
| 14605 | /* Overflow possible, we know nothing */ |
| 14606 | dst_reg->umin_value = 0; |
| 14607 | dst_reg->umax_value = U64_MAX; |
| 14608 | } else { |
| 14609 | /* Cannot overflow (as long as bounds are consistent) */ |
| 14610 | dst_reg->umin_value -= umax_val; |
| 14611 | dst_reg->umax_value -= umin_val; |
| 14612 | } |
| 14613 | } |
| 14614 | |
| 14615 | static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, |
| 14616 | struct bpf_reg_state *src_reg) |
| 14617 | { |
| 14618 | s32 *dst_smin = &dst_reg->s32_min_value; |
| 14619 | s32 *dst_smax = &dst_reg->s32_max_value; |
| 14620 | u32 *dst_umin = &dst_reg->u32_min_value; |
| 14621 | u32 *dst_umax = &dst_reg->u32_max_value; |
| 14622 | s32 tmp_prod[4]; |
| 14623 | |
| 14624 | if (check_mul_overflow(*dst_umax, src_reg->u32_max_value, dst_umax) || |
| 14625 | check_mul_overflow(*dst_umin, src_reg->u32_min_value, dst_umin)) { |
| 14626 | /* Overflow possible, we know nothing */ |
| 14627 | *dst_umin = 0; |
| 14628 | *dst_umax = U32_MAX; |
| 14629 | } |
| 14630 | if (check_mul_overflow(*dst_smin, src_reg->s32_min_value, &tmp_prod[0]) || |
| 14631 | check_mul_overflow(*dst_smin, src_reg->s32_max_value, &tmp_prod[1]) || |
| 14632 | check_mul_overflow(*dst_smax, src_reg->s32_min_value, &tmp_prod[2]) || |
| 14633 | check_mul_overflow(*dst_smax, src_reg->s32_max_value, &tmp_prod[3])) { |
| 14634 | /* Overflow possible, we know nothing */ |
| 14635 | *dst_smin = S32_MIN; |
| 14636 | *dst_smax = S32_MAX; |
| 14637 | } else { |
| 14638 | *dst_smin = min_array(tmp_prod, 4); |
| 14639 | *dst_smax = max_array(tmp_prod, 4); |
| 14640 | } |
| 14641 | } |
| 14642 | |
| 14643 | static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, |
| 14644 | struct bpf_reg_state *src_reg) |
| 14645 | { |
| 14646 | s64 *dst_smin = &dst_reg->smin_value; |
| 14647 | s64 *dst_smax = &dst_reg->smax_value; |
| 14648 | u64 *dst_umin = &dst_reg->umin_value; |
| 14649 | u64 *dst_umax = &dst_reg->umax_value; |
| 14650 | s64 tmp_prod[4]; |
| 14651 | |
| 14652 | if (check_mul_overflow(*dst_umax, src_reg->umax_value, dst_umax) || |
| 14653 | check_mul_overflow(*dst_umin, src_reg->umin_value, dst_umin)) { |
| 14654 | /* Overflow possible, we know nothing */ |
| 14655 | *dst_umin = 0; |
| 14656 | *dst_umax = U64_MAX; |
| 14657 | } |
| 14658 | if (check_mul_overflow(*dst_smin, src_reg->smin_value, &tmp_prod[0]) || |
| 14659 | check_mul_overflow(*dst_smin, src_reg->smax_value, &tmp_prod[1]) || |
| 14660 | check_mul_overflow(*dst_smax, src_reg->smin_value, &tmp_prod[2]) || |
| 14661 | check_mul_overflow(*dst_smax, src_reg->smax_value, &tmp_prod[3])) { |
| 14662 | /* Overflow possible, we know nothing */ |
| 14663 | *dst_smin = S64_MIN; |
| 14664 | *dst_smax = S64_MAX; |
| 14665 | } else { |
| 14666 | *dst_smin = min_array(tmp_prod, 4); |
| 14667 | *dst_smax = max_array(tmp_prod, 4); |
| 14668 | } |
| 14669 | } |
| 14670 | |
| 14671 | static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, |
| 14672 | struct bpf_reg_state *src_reg) |
| 14673 | { |
| 14674 | bool src_known = tnum_subreg_is_const(src_reg->var_off); |
| 14675 | bool dst_known = tnum_subreg_is_const(dst_reg->var_off); |
| 14676 | struct tnum var32_off = tnum_subreg(dst_reg->var_off); |
| 14677 | u32 umax_val = src_reg->u32_max_value; |
| 14678 | |
| 14679 | if (src_known && dst_known) { |
| 14680 | __mark_reg32_known(dst_reg, var32_off.value); |
| 14681 | return; |
| 14682 | } |
| 14683 | |
| 14684 | /* We get our minimum from the var_off, since that's inherently |
| 14685 | * bitwise. Our maximum is the minimum of the operands' maxima. |
| 14686 | */ |
| 14687 | dst_reg->u32_min_value = var32_off.value; |
| 14688 | dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); |
| 14689 | |
| 14690 | /* Safe to set s32 bounds by casting u32 result into s32 when u32 |
| 14691 | * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded. |
| 14692 | */ |
| 14693 | if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { |
| 14694 | dst_reg->s32_min_value = dst_reg->u32_min_value; |
| 14695 | dst_reg->s32_max_value = dst_reg->u32_max_value; |
| 14696 | } else { |
| 14697 | dst_reg->s32_min_value = S32_MIN; |
| 14698 | dst_reg->s32_max_value = S32_MAX; |
| 14699 | } |
| 14700 | } |
| 14701 | |
| 14702 | static void scalar_min_max_and(struct bpf_reg_state *dst_reg, |
| 14703 | struct bpf_reg_state *src_reg) |
| 14704 | { |
| 14705 | bool src_known = tnum_is_const(src_reg->var_off); |
| 14706 | bool dst_known = tnum_is_const(dst_reg->var_off); |
| 14707 | u64 umax_val = src_reg->umax_value; |
| 14708 | |
| 14709 | if (src_known && dst_known) { |
| 14710 | __mark_reg_known(dst_reg, dst_reg->var_off.value); |
| 14711 | return; |
| 14712 | } |
| 14713 | |
| 14714 | /* We get our minimum from the var_off, since that's inherently |
| 14715 | * bitwise. Our maximum is the minimum of the operands' maxima. |
| 14716 | */ |
| 14717 | dst_reg->umin_value = dst_reg->var_off.value; |
| 14718 | dst_reg->umax_value = min(dst_reg->umax_value, umax_val); |
| 14719 | |
| 14720 | /* Safe to set s64 bounds by casting u64 result into s64 when u64 |
| 14721 | * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded. |
| 14722 | */ |
| 14723 | if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { |
| 14724 | dst_reg->smin_value = dst_reg->umin_value; |
| 14725 | dst_reg->smax_value = dst_reg->umax_value; |
| 14726 | } else { |
| 14727 | dst_reg->smin_value = S64_MIN; |
| 14728 | dst_reg->smax_value = S64_MAX; |
| 14729 | } |
| 14730 | /* We may learn something more from the var_off */ |
| 14731 | __update_reg_bounds(dst_reg); |
| 14732 | } |
| 14733 | |
| 14734 | static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, |
| 14735 | struct bpf_reg_state *src_reg) |
| 14736 | { |
| 14737 | bool src_known = tnum_subreg_is_const(src_reg->var_off); |
| 14738 | bool dst_known = tnum_subreg_is_const(dst_reg->var_off); |
| 14739 | struct tnum var32_off = tnum_subreg(dst_reg->var_off); |
| 14740 | u32 umin_val = src_reg->u32_min_value; |
| 14741 | |
| 14742 | if (src_known && dst_known) { |
| 14743 | __mark_reg32_known(dst_reg, var32_off.value); |
| 14744 | return; |
| 14745 | } |
| 14746 | |
| 14747 | /* We get our maximum from the var_off, and our minimum is the |
| 14748 | * maximum of the operands' minima |
| 14749 | */ |
| 14750 | dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); |
| 14751 | dst_reg->u32_max_value = var32_off.value | var32_off.mask; |
| 14752 | |
| 14753 | /* Safe to set s32 bounds by casting u32 result into s32 when u32 |
| 14754 | * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded. |
| 14755 | */ |
| 14756 | if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { |
| 14757 | dst_reg->s32_min_value = dst_reg->u32_min_value; |
| 14758 | dst_reg->s32_max_value = dst_reg->u32_max_value; |
| 14759 | } else { |
| 14760 | dst_reg->s32_min_value = S32_MIN; |
| 14761 | dst_reg->s32_max_value = S32_MAX; |
| 14762 | } |
| 14763 | } |
| 14764 | |
| 14765 | static void scalar_min_max_or(struct bpf_reg_state *dst_reg, |
| 14766 | struct bpf_reg_state *src_reg) |
| 14767 | { |
| 14768 | bool src_known = tnum_is_const(src_reg->var_off); |
| 14769 | bool dst_known = tnum_is_const(dst_reg->var_off); |
| 14770 | u64 umin_val = src_reg->umin_value; |
| 14771 | |
| 14772 | if (src_known && dst_known) { |
| 14773 | __mark_reg_known(dst_reg, dst_reg->var_off.value); |
| 14774 | return; |
| 14775 | } |
| 14776 | |
| 14777 | /* We get our maximum from the var_off, and our minimum is the |
| 14778 | * maximum of the operands' minima |
| 14779 | */ |
| 14780 | dst_reg->umin_value = max(dst_reg->umin_value, umin_val); |
| 14781 | dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; |
| 14782 | |
| 14783 | /* Safe to set s64 bounds by casting u64 result into s64 when u64 |
| 14784 | * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded. |
| 14785 | */ |
| 14786 | if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { |
| 14787 | dst_reg->smin_value = dst_reg->umin_value; |
| 14788 | dst_reg->smax_value = dst_reg->umax_value; |
| 14789 | } else { |
| 14790 | dst_reg->smin_value = S64_MIN; |
| 14791 | dst_reg->smax_value = S64_MAX; |
| 14792 | } |
| 14793 | /* We may learn something more from the var_off */ |
| 14794 | __update_reg_bounds(dst_reg); |
| 14795 | } |
| 14796 | |
| 14797 | static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, |
| 14798 | struct bpf_reg_state *src_reg) |
| 14799 | { |
| 14800 | bool src_known = tnum_subreg_is_const(src_reg->var_off); |
| 14801 | bool dst_known = tnum_subreg_is_const(dst_reg->var_off); |
| 14802 | struct tnum var32_off = tnum_subreg(dst_reg->var_off); |
| 14803 | |
| 14804 | if (src_known && dst_known) { |
| 14805 | __mark_reg32_known(dst_reg, var32_off.value); |
| 14806 | return; |
| 14807 | } |
| 14808 | |
| 14809 | /* We get both minimum and maximum from the var32_off. */ |
| 14810 | dst_reg->u32_min_value = var32_off.value; |
| 14811 | dst_reg->u32_max_value = var32_off.value | var32_off.mask; |
| 14812 | |
| 14813 | /* Safe to set s32 bounds by casting u32 result into s32 when u32 |
| 14814 | * doesn't cross sign boundary. Otherwise set s32 bounds to unbounded. |
| 14815 | */ |
| 14816 | if ((s32)dst_reg->u32_min_value <= (s32)dst_reg->u32_max_value) { |
| 14817 | dst_reg->s32_min_value = dst_reg->u32_min_value; |
| 14818 | dst_reg->s32_max_value = dst_reg->u32_max_value; |
| 14819 | } else { |
| 14820 | dst_reg->s32_min_value = S32_MIN; |
| 14821 | dst_reg->s32_max_value = S32_MAX; |
| 14822 | } |
| 14823 | } |
| 14824 | |
| 14825 | static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, |
| 14826 | struct bpf_reg_state *src_reg) |
| 14827 | { |
| 14828 | bool src_known = tnum_is_const(src_reg->var_off); |
| 14829 | bool dst_known = tnum_is_const(dst_reg->var_off); |
| 14830 | |
| 14831 | if (src_known && dst_known) { |
| 14832 | /* dst_reg->var_off.value has been updated earlier */ |
| 14833 | __mark_reg_known(dst_reg, dst_reg->var_off.value); |
| 14834 | return; |
| 14835 | } |
| 14836 | |
| 14837 | /* We get both minimum and maximum from the var_off. */ |
| 14838 | dst_reg->umin_value = dst_reg->var_off.value; |
| 14839 | dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; |
| 14840 | |
| 14841 | /* Safe to set s64 bounds by casting u64 result into s64 when u64 |
| 14842 | * doesn't cross sign boundary. Otherwise set s64 bounds to unbounded. |
| 14843 | */ |
| 14844 | if ((s64)dst_reg->umin_value <= (s64)dst_reg->umax_value) { |
| 14845 | dst_reg->smin_value = dst_reg->umin_value; |
| 14846 | dst_reg->smax_value = dst_reg->umax_value; |
| 14847 | } else { |
| 14848 | dst_reg->smin_value = S64_MIN; |
| 14849 | dst_reg->smax_value = S64_MAX; |
| 14850 | } |
| 14851 | |
| 14852 | __update_reg_bounds(dst_reg); |
| 14853 | } |
| 14854 | |
| 14855 | static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, |
| 14856 | u64 umin_val, u64 umax_val) |
| 14857 | { |
| 14858 | /* We lose all sign bit information (except what we can pick |
| 14859 | * up from var_off) |
| 14860 | */ |
| 14861 | dst_reg->s32_min_value = S32_MIN; |
| 14862 | dst_reg->s32_max_value = S32_MAX; |
| 14863 | /* If we might shift our top bit out, then we know nothing */ |
| 14864 | if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { |
| 14865 | dst_reg->u32_min_value = 0; |
| 14866 | dst_reg->u32_max_value = U32_MAX; |
| 14867 | } else { |
| 14868 | dst_reg->u32_min_value <<= umin_val; |
| 14869 | dst_reg->u32_max_value <<= umax_val; |
| 14870 | } |
| 14871 | } |
| 14872 | |
| 14873 | static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, |
| 14874 | struct bpf_reg_state *src_reg) |
| 14875 | { |
| 14876 | u32 umax_val = src_reg->u32_max_value; |
| 14877 | u32 umin_val = src_reg->u32_min_value; |
| 14878 | /* u32 alu operation will zext upper bits */ |
| 14879 | struct tnum subreg = tnum_subreg(dst_reg->var_off); |
| 14880 | |
| 14881 | __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); |
| 14882 | dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); |
| 14883 | /* Not required but being careful mark reg64 bounds as unknown so |
| 14884 | * that we are forced to pick them up from tnum and zext later and |
| 14885 | * if some path skips this step we are still safe. |
| 14886 | */ |
| 14887 | __mark_reg64_unbounded(dst_reg); |
| 14888 | __update_reg32_bounds(dst_reg); |
| 14889 | } |
| 14890 | |
| 14891 | static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, |
| 14892 | u64 umin_val, u64 umax_val) |
| 14893 | { |
| 14894 | /* Special case <<32 because it is a common compiler pattern to sign |
| 14895 | * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are |
| 14896 | * positive we know this shift will also be positive so we can track |
| 14897 | * bounds correctly. Otherwise we lose all sign bit information except |
| 14898 | * what we can pick up from var_off. Perhaps we can generalize this |
| 14899 | * later to shifts of any length. |
| 14900 | */ |
| 14901 | if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) |
| 14902 | dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; |
| 14903 | else |
| 14904 | dst_reg->smax_value = S64_MAX; |
| 14905 | |
| 14906 | if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) |
| 14907 | dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; |
| 14908 | else |
| 14909 | dst_reg->smin_value = S64_MIN; |
| 14910 | |
| 14911 | /* If we might shift our top bit out, then we know nothing */ |
| 14912 | if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { |
| 14913 | dst_reg->umin_value = 0; |
| 14914 | dst_reg->umax_value = U64_MAX; |
| 14915 | } else { |
| 14916 | dst_reg->umin_value <<= umin_val; |
| 14917 | dst_reg->umax_value <<= umax_val; |
| 14918 | } |
| 14919 | } |
| 14920 | |
| 14921 | static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, |
| 14922 | struct bpf_reg_state *src_reg) |
| 14923 | { |
| 14924 | u64 umax_val = src_reg->umax_value; |
| 14925 | u64 umin_val = src_reg->umin_value; |
| 14926 | |
| 14927 | /* scalar64 calc uses 32bit unshifted bounds so must be called first */ |
| 14928 | __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); |
| 14929 | __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); |
| 14930 | |
| 14931 | dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); |
| 14932 | /* We may learn something more from the var_off */ |
| 14933 | __update_reg_bounds(dst_reg); |
| 14934 | } |
| 14935 | |
| 14936 | static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, |
| 14937 | struct bpf_reg_state *src_reg) |
| 14938 | { |
| 14939 | struct tnum subreg = tnum_subreg(dst_reg->var_off); |
| 14940 | u32 umax_val = src_reg->u32_max_value; |
| 14941 | u32 umin_val = src_reg->u32_min_value; |
| 14942 | |
| 14943 | /* BPF_RSH is an unsigned shift. If the value in dst_reg might |
| 14944 | * be negative, then either: |
| 14945 | * 1) src_reg might be zero, so the sign bit of the result is |
| 14946 | * unknown, so we lose our signed bounds |
| 14947 | * 2) it's known negative, thus the unsigned bounds capture the |
| 14948 | * signed bounds |
| 14949 | * 3) the signed bounds cross zero, so they tell us nothing |
| 14950 | * about the result |
| 14951 | * If the value in dst_reg is known nonnegative, then again the |
| 14952 | * unsigned bounds capture the signed bounds. |
| 14953 | * Thus, in all cases it suffices to blow away our signed bounds |
| 14954 | * and rely on inferring new ones from the unsigned bounds and |
| 14955 | * var_off of the result. |
| 14956 | */ |
| 14957 | dst_reg->s32_min_value = S32_MIN; |
| 14958 | dst_reg->s32_max_value = S32_MAX; |
| 14959 | |
| 14960 | dst_reg->var_off = tnum_rshift(subreg, umin_val); |
| 14961 | dst_reg->u32_min_value >>= umax_val; |
| 14962 | dst_reg->u32_max_value >>= umin_val; |
| 14963 | |
| 14964 | __mark_reg64_unbounded(dst_reg); |
| 14965 | __update_reg32_bounds(dst_reg); |
| 14966 | } |
| 14967 | |
| 14968 | static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, |
| 14969 | struct bpf_reg_state *src_reg) |
| 14970 | { |
| 14971 | u64 umax_val = src_reg->umax_value; |
| 14972 | u64 umin_val = src_reg->umin_value; |
| 14973 | |
| 14974 | /* BPF_RSH is an unsigned shift. If the value in dst_reg might |
| 14975 | * be negative, then either: |
| 14976 | * 1) src_reg might be zero, so the sign bit of the result is |
| 14977 | * unknown, so we lose our signed bounds |
| 14978 | * 2) it's known negative, thus the unsigned bounds capture the |
| 14979 | * signed bounds |
| 14980 | * 3) the signed bounds cross zero, so they tell us nothing |
| 14981 | * about the result |
| 14982 | * If the value in dst_reg is known nonnegative, then again the |
| 14983 | * unsigned bounds capture the signed bounds. |
| 14984 | * Thus, in all cases it suffices to blow away our signed bounds |
| 14985 | * and rely on inferring new ones from the unsigned bounds and |
| 14986 | * var_off of the result. |
| 14987 | */ |
| 14988 | dst_reg->smin_value = S64_MIN; |
| 14989 | dst_reg->smax_value = S64_MAX; |
| 14990 | dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); |
| 14991 | dst_reg->umin_value >>= umax_val; |
| 14992 | dst_reg->umax_value >>= umin_val; |
| 14993 | |
| 14994 | /* Its not easy to operate on alu32 bounds here because it depends |
| 14995 | * on bits being shifted in. Take easy way out and mark unbounded |
| 14996 | * so we can recalculate later from tnum. |
| 14997 | */ |
| 14998 | __mark_reg32_unbounded(dst_reg); |
| 14999 | __update_reg_bounds(dst_reg); |
| 15000 | } |
| 15001 | |
| 15002 | static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, |
| 15003 | struct bpf_reg_state *src_reg) |
| 15004 | { |
| 15005 | u64 umin_val = src_reg->u32_min_value; |
| 15006 | |
| 15007 | /* Upon reaching here, src_known is true and |
| 15008 | * umax_val is equal to umin_val. |
| 15009 | */ |
| 15010 | dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); |
| 15011 | dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); |
| 15012 | |
| 15013 | dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); |
| 15014 | |
| 15015 | /* blow away the dst_reg umin_value/umax_value and rely on |
| 15016 | * dst_reg var_off to refine the result. |
| 15017 | */ |
| 15018 | dst_reg->u32_min_value = 0; |
| 15019 | dst_reg->u32_max_value = U32_MAX; |
| 15020 | |
| 15021 | __mark_reg64_unbounded(dst_reg); |
| 15022 | __update_reg32_bounds(dst_reg); |
| 15023 | } |
| 15024 | |
| 15025 | static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, |
| 15026 | struct bpf_reg_state *src_reg) |
| 15027 | { |
| 15028 | u64 umin_val = src_reg->umin_value; |
| 15029 | |
| 15030 | /* Upon reaching here, src_known is true and umax_val is equal |
| 15031 | * to umin_val. |
| 15032 | */ |
| 15033 | dst_reg->smin_value >>= umin_val; |
| 15034 | dst_reg->smax_value >>= umin_val; |
| 15035 | |
| 15036 | dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); |
| 15037 | |
| 15038 | /* blow away the dst_reg umin_value/umax_value and rely on |
| 15039 | * dst_reg var_off to refine the result. |
| 15040 | */ |
| 15041 | dst_reg->umin_value = 0; |
| 15042 | dst_reg->umax_value = U64_MAX; |
| 15043 | |
| 15044 | /* Its not easy to operate on alu32 bounds here because it depends |
| 15045 | * on bits being shifted in from upper 32-bits. Take easy way out |
| 15046 | * and mark unbounded so we can recalculate later from tnum. |
| 15047 | */ |
| 15048 | __mark_reg32_unbounded(dst_reg); |
| 15049 | __update_reg_bounds(dst_reg); |
| 15050 | } |
| 15051 | |
| 15052 | static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn, |
| 15053 | const struct bpf_reg_state *src_reg) |
| 15054 | { |
| 15055 | bool src_is_const = false; |
| 15056 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; |
| 15057 | |
| 15058 | if (insn_bitness == 32) { |
| 15059 | if (tnum_subreg_is_const(src_reg->var_off) |
| 15060 | && src_reg->s32_min_value == src_reg->s32_max_value |
| 15061 | && src_reg->u32_min_value == src_reg->u32_max_value) |
| 15062 | src_is_const = true; |
| 15063 | } else { |
| 15064 | if (tnum_is_const(src_reg->var_off) |
| 15065 | && src_reg->smin_value == src_reg->smax_value |
| 15066 | && src_reg->umin_value == src_reg->umax_value) |
| 15067 | src_is_const = true; |
| 15068 | } |
| 15069 | |
| 15070 | switch (BPF_OP(insn->code)) { |
| 15071 | case BPF_ADD: |
| 15072 | case BPF_SUB: |
| 15073 | case BPF_AND: |
| 15074 | case BPF_XOR: |
| 15075 | case BPF_OR: |
| 15076 | case BPF_MUL: |
| 15077 | return true; |
| 15078 | |
| 15079 | /* Shift operators range is only computable if shift dimension operand |
| 15080 | * is a constant. Shifts greater than 31 or 63 are undefined. This |
| 15081 | * includes shifts by a negative number. |
| 15082 | */ |
| 15083 | case BPF_LSH: |
| 15084 | case BPF_RSH: |
| 15085 | case BPF_ARSH: |
| 15086 | return (src_is_const && src_reg->umax_value < insn_bitness); |
| 15087 | default: |
| 15088 | return false; |
| 15089 | } |
| 15090 | } |
| 15091 | |
| 15092 | /* WARNING: This function does calculations on 64-bit values, but the actual |
| 15093 | * execution may occur on 32-bit values. Therefore, things like bitshifts |
| 15094 | * need extra checks in the 32-bit case. |
| 15095 | */ |
| 15096 | static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, |
| 15097 | struct bpf_insn *insn, |
| 15098 | struct bpf_reg_state *dst_reg, |
| 15099 | struct bpf_reg_state src_reg) |
| 15100 | { |
| 15101 | u8 opcode = BPF_OP(insn->code); |
| 15102 | bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); |
| 15103 | int ret; |
| 15104 | |
| 15105 | if (!is_safe_to_compute_dst_reg_range(insn, &src_reg)) { |
| 15106 | __mark_reg_unknown(env, dst_reg); |
| 15107 | return 0; |
| 15108 | } |
| 15109 | |
| 15110 | if (sanitize_needed(opcode)) { |
| 15111 | ret = sanitize_val_alu(env, insn); |
| 15112 | if (ret < 0) |
| 15113 | return sanitize_err(env, insn, ret, NULL, NULL); |
| 15114 | } |
| 15115 | |
| 15116 | /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. |
| 15117 | * There are two classes of instructions: The first class we track both |
| 15118 | * alu32 and alu64 sign/unsigned bounds independently this provides the |
| 15119 | * greatest amount of precision when alu operations are mixed with jmp32 |
| 15120 | * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, |
| 15121 | * and BPF_OR. This is possible because these ops have fairly easy to |
| 15122 | * understand and calculate behavior in both 32-bit and 64-bit alu ops. |
| 15123 | * See alu32 verifier tests for examples. The second class of |
| 15124 | * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy |
| 15125 | * with regards to tracking sign/unsigned bounds because the bits may |
| 15126 | * cross subreg boundaries in the alu64 case. When this happens we mark |
| 15127 | * the reg unbounded in the subreg bound space and use the resulting |
| 15128 | * tnum to calculate an approximation of the sign/unsigned bounds. |
| 15129 | */ |
| 15130 | switch (opcode) { |
| 15131 | case BPF_ADD: |
| 15132 | scalar32_min_max_add(dst_reg, &src_reg); |
| 15133 | scalar_min_max_add(dst_reg, &src_reg); |
| 15134 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); |
| 15135 | break; |
| 15136 | case BPF_SUB: |
| 15137 | scalar32_min_max_sub(dst_reg, &src_reg); |
| 15138 | scalar_min_max_sub(dst_reg, &src_reg); |
| 15139 | dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); |
| 15140 | break; |
| 15141 | case BPF_MUL: |
| 15142 | dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); |
| 15143 | scalar32_min_max_mul(dst_reg, &src_reg); |
| 15144 | scalar_min_max_mul(dst_reg, &src_reg); |
| 15145 | break; |
| 15146 | case BPF_AND: |
| 15147 | dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); |
| 15148 | scalar32_min_max_and(dst_reg, &src_reg); |
| 15149 | scalar_min_max_and(dst_reg, &src_reg); |
| 15150 | break; |
| 15151 | case BPF_OR: |
| 15152 | dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); |
| 15153 | scalar32_min_max_or(dst_reg, &src_reg); |
| 15154 | scalar_min_max_or(dst_reg, &src_reg); |
| 15155 | break; |
| 15156 | case BPF_XOR: |
| 15157 | dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); |
| 15158 | scalar32_min_max_xor(dst_reg, &src_reg); |
| 15159 | scalar_min_max_xor(dst_reg, &src_reg); |
| 15160 | break; |
| 15161 | case BPF_LSH: |
| 15162 | if (alu32) |
| 15163 | scalar32_min_max_lsh(dst_reg, &src_reg); |
| 15164 | else |
| 15165 | scalar_min_max_lsh(dst_reg, &src_reg); |
| 15166 | break; |
| 15167 | case BPF_RSH: |
| 15168 | if (alu32) |
| 15169 | scalar32_min_max_rsh(dst_reg, &src_reg); |
| 15170 | else |
| 15171 | scalar_min_max_rsh(dst_reg, &src_reg); |
| 15172 | break; |
| 15173 | case BPF_ARSH: |
| 15174 | if (alu32) |
| 15175 | scalar32_min_max_arsh(dst_reg, &src_reg); |
| 15176 | else |
| 15177 | scalar_min_max_arsh(dst_reg, &src_reg); |
| 15178 | break; |
| 15179 | default: |
| 15180 | break; |
| 15181 | } |
| 15182 | |
| 15183 | /* ALU32 ops are zero extended into 64bit register */ |
| 15184 | if (alu32) |
| 15185 | zext_32_to_64(dst_reg); |
| 15186 | reg_bounds_sync(dst_reg); |
| 15187 | return 0; |
| 15188 | } |
| 15189 | |
| 15190 | /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max |
| 15191 | * and var_off. |
| 15192 | */ |
| 15193 | static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, |
| 15194 | struct bpf_insn *insn) |
| 15195 | { |
| 15196 | struct bpf_verifier_state *vstate = env->cur_state; |
| 15197 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 15198 | struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; |
| 15199 | struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; |
| 15200 | bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); |
| 15201 | u8 opcode = BPF_OP(insn->code); |
| 15202 | int err; |
| 15203 | |
| 15204 | dst_reg = ®s[insn->dst_reg]; |
| 15205 | src_reg = NULL; |
| 15206 | |
| 15207 | if (dst_reg->type == PTR_TO_ARENA) { |
| 15208 | struct bpf_insn_aux_data *aux = cur_aux(env); |
| 15209 | |
| 15210 | if (BPF_CLASS(insn->code) == BPF_ALU64) |
| 15211 | /* |
| 15212 | * 32-bit operations zero upper bits automatically. |
| 15213 | * 64-bit operations need to be converted to 32. |
| 15214 | */ |
| 15215 | aux->needs_zext = true; |
| 15216 | |
| 15217 | /* Any arithmetic operations are allowed on arena pointers */ |
| 15218 | return 0; |
| 15219 | } |
| 15220 | |
| 15221 | if (dst_reg->type != SCALAR_VALUE) |
| 15222 | ptr_reg = dst_reg; |
| 15223 | |
| 15224 | if (BPF_SRC(insn->code) == BPF_X) { |
| 15225 | src_reg = ®s[insn->src_reg]; |
| 15226 | if (src_reg->type != SCALAR_VALUE) { |
| 15227 | if (dst_reg->type != SCALAR_VALUE) { |
| 15228 | /* Combining two pointers by any ALU op yields |
| 15229 | * an arbitrary scalar. Disallow all math except |
| 15230 | * pointer subtraction |
| 15231 | */ |
| 15232 | if (opcode == BPF_SUB && env->allow_ptr_leaks) { |
| 15233 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 15234 | return 0; |
| 15235 | } |
| 15236 | verbose(env, "R%d pointer %s pointer prohibited\n", |
| 15237 | insn->dst_reg, |
| 15238 | bpf_alu_string[opcode >> 4]); |
| 15239 | return -EACCES; |
| 15240 | } else { |
| 15241 | /* scalar += pointer |
| 15242 | * This is legal, but we have to reverse our |
| 15243 | * src/dest handling in computing the range |
| 15244 | */ |
| 15245 | err = mark_chain_precision(env, insn->dst_reg); |
| 15246 | if (err) |
| 15247 | return err; |
| 15248 | return adjust_ptr_min_max_vals(env, insn, |
| 15249 | src_reg, dst_reg); |
| 15250 | } |
| 15251 | } else if (ptr_reg) { |
| 15252 | /* pointer += scalar */ |
| 15253 | err = mark_chain_precision(env, insn->src_reg); |
| 15254 | if (err) |
| 15255 | return err; |
| 15256 | return adjust_ptr_min_max_vals(env, insn, |
| 15257 | dst_reg, src_reg); |
| 15258 | } else if (dst_reg->precise) { |
| 15259 | /* if dst_reg is precise, src_reg should be precise as well */ |
| 15260 | err = mark_chain_precision(env, insn->src_reg); |
| 15261 | if (err) |
| 15262 | return err; |
| 15263 | } |
| 15264 | } else { |
| 15265 | /* Pretend the src is a reg with a known value, since we only |
| 15266 | * need to be able to read from this state. |
| 15267 | */ |
| 15268 | off_reg.type = SCALAR_VALUE; |
| 15269 | __mark_reg_known(&off_reg, insn->imm); |
| 15270 | src_reg = &off_reg; |
| 15271 | if (ptr_reg) /* pointer += K */ |
| 15272 | return adjust_ptr_min_max_vals(env, insn, |
| 15273 | ptr_reg, src_reg); |
| 15274 | } |
| 15275 | |
| 15276 | /* Got here implies adding two SCALAR_VALUEs */ |
| 15277 | if (WARN_ON_ONCE(ptr_reg)) { |
| 15278 | print_verifier_state(env, vstate, vstate->curframe, true); |
| 15279 | verbose(env, "verifier internal error: unexpected ptr_reg\n"); |
| 15280 | return -EINVAL; |
| 15281 | } |
| 15282 | if (WARN_ON(!src_reg)) { |
| 15283 | print_verifier_state(env, vstate, vstate->curframe, true); |
| 15284 | verbose(env, "verifier internal error: no src_reg\n"); |
| 15285 | return -EINVAL; |
| 15286 | } |
| 15287 | err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); |
| 15288 | if (err) |
| 15289 | return err; |
| 15290 | /* |
| 15291 | * Compilers can generate the code |
| 15292 | * r1 = r2 |
| 15293 | * r1 += 0x1 |
| 15294 | * if r2 < 1000 goto ... |
| 15295 | * use r1 in memory access |
| 15296 | * So for 64-bit alu remember constant delta between r2 and r1 and |
| 15297 | * update r1 after 'if' condition. |
| 15298 | */ |
| 15299 | if (env->bpf_capable && |
| 15300 | BPF_OP(insn->code) == BPF_ADD && !alu32 && |
| 15301 | dst_reg->id && is_reg_const(src_reg, false)) { |
| 15302 | u64 val = reg_const_value(src_reg, false); |
| 15303 | |
| 15304 | if ((dst_reg->id & BPF_ADD_CONST) || |
| 15305 | /* prevent overflow in sync_linked_regs() later */ |
| 15306 | val > (u32)S32_MAX) { |
| 15307 | /* |
| 15308 | * If the register already went through rX += val |
| 15309 | * we cannot accumulate another val into rx->off. |
| 15310 | */ |
| 15311 | dst_reg->off = 0; |
| 15312 | dst_reg->id = 0; |
| 15313 | } else { |
| 15314 | dst_reg->id |= BPF_ADD_CONST; |
| 15315 | dst_reg->off = val; |
| 15316 | } |
| 15317 | } else { |
| 15318 | /* |
| 15319 | * Make sure ID is cleared otherwise dst_reg min/max could be |
| 15320 | * incorrectly propagated into other registers by sync_linked_regs() |
| 15321 | */ |
| 15322 | dst_reg->id = 0; |
| 15323 | } |
| 15324 | return 0; |
| 15325 | } |
| 15326 | |
| 15327 | /* check validity of 32-bit and 64-bit arithmetic operations */ |
| 15328 | static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 15329 | { |
| 15330 | struct bpf_reg_state *regs = cur_regs(env); |
| 15331 | u8 opcode = BPF_OP(insn->code); |
| 15332 | int err; |
| 15333 | |
| 15334 | if (opcode == BPF_END || opcode == BPF_NEG) { |
| 15335 | if (opcode == BPF_NEG) { |
| 15336 | if (BPF_SRC(insn->code) != BPF_K || |
| 15337 | insn->src_reg != BPF_REG_0 || |
| 15338 | insn->off != 0 || insn->imm != 0) { |
| 15339 | verbose(env, "BPF_NEG uses reserved fields\n"); |
| 15340 | return -EINVAL; |
| 15341 | } |
| 15342 | } else { |
| 15343 | if (insn->src_reg != BPF_REG_0 || insn->off != 0 || |
| 15344 | (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || |
| 15345 | (BPF_CLASS(insn->code) == BPF_ALU64 && |
| 15346 | BPF_SRC(insn->code) != BPF_TO_LE)) { |
| 15347 | verbose(env, "BPF_END uses reserved fields\n"); |
| 15348 | return -EINVAL; |
| 15349 | } |
| 15350 | } |
| 15351 | |
| 15352 | /* check src operand */ |
| 15353 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 15354 | if (err) |
| 15355 | return err; |
| 15356 | |
| 15357 | if (is_pointer_value(env, insn->dst_reg)) { |
| 15358 | verbose(env, "R%d pointer arithmetic prohibited\n", |
| 15359 | insn->dst_reg); |
| 15360 | return -EACCES; |
| 15361 | } |
| 15362 | |
| 15363 | /* check dest operand */ |
| 15364 | err = check_reg_arg(env, insn->dst_reg, DST_OP); |
| 15365 | if (err) |
| 15366 | return err; |
| 15367 | |
| 15368 | } else if (opcode == BPF_MOV) { |
| 15369 | |
| 15370 | if (BPF_SRC(insn->code) == BPF_X) { |
| 15371 | if (BPF_CLASS(insn->code) == BPF_ALU) { |
| 15372 | if ((insn->off != 0 && insn->off != 8 && insn->off != 16) || |
| 15373 | insn->imm) { |
| 15374 | verbose(env, "BPF_MOV uses reserved fields\n"); |
| 15375 | return -EINVAL; |
| 15376 | } |
| 15377 | } else if (insn->off == BPF_ADDR_SPACE_CAST) { |
| 15378 | if (insn->imm != 1 && insn->imm != 1u << 16) { |
| 15379 | verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n"); |
| 15380 | return -EINVAL; |
| 15381 | } |
| 15382 | if (!env->prog->aux->arena) { |
| 15383 | verbose(env, "addr_space_cast insn can only be used in a program that has an associated arena\n"); |
| 15384 | return -EINVAL; |
| 15385 | } |
| 15386 | } else { |
| 15387 | if ((insn->off != 0 && insn->off != 8 && insn->off != 16 && |
| 15388 | insn->off != 32) || insn->imm) { |
| 15389 | verbose(env, "BPF_MOV uses reserved fields\n"); |
| 15390 | return -EINVAL; |
| 15391 | } |
| 15392 | } |
| 15393 | |
| 15394 | /* check src operand */ |
| 15395 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 15396 | if (err) |
| 15397 | return err; |
| 15398 | } else { |
| 15399 | if (insn->src_reg != BPF_REG_0 || insn->off != 0) { |
| 15400 | verbose(env, "BPF_MOV uses reserved fields\n"); |
| 15401 | return -EINVAL; |
| 15402 | } |
| 15403 | } |
| 15404 | |
| 15405 | /* check dest operand, mark as required later */ |
| 15406 | err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); |
| 15407 | if (err) |
| 15408 | return err; |
| 15409 | |
| 15410 | if (BPF_SRC(insn->code) == BPF_X) { |
| 15411 | struct bpf_reg_state *src_reg = regs + insn->src_reg; |
| 15412 | struct bpf_reg_state *dst_reg = regs + insn->dst_reg; |
| 15413 | |
| 15414 | if (BPF_CLASS(insn->code) == BPF_ALU64) { |
| 15415 | if (insn->imm) { |
| 15416 | /* off == BPF_ADDR_SPACE_CAST */ |
| 15417 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 15418 | if (insn->imm == 1) { /* cast from as(1) to as(0) */ |
| 15419 | dst_reg->type = PTR_TO_ARENA; |
| 15420 | /* PTR_TO_ARENA is 32-bit */ |
| 15421 | dst_reg->subreg_def = env->insn_idx + 1; |
| 15422 | } |
| 15423 | } else if (insn->off == 0) { |
| 15424 | /* case: R1 = R2 |
| 15425 | * copy register state to dest reg |
| 15426 | */ |
| 15427 | assign_scalar_id_before_mov(env, src_reg); |
| 15428 | copy_register_state(dst_reg, src_reg); |
| 15429 | dst_reg->live |= REG_LIVE_WRITTEN; |
| 15430 | dst_reg->subreg_def = DEF_NOT_SUBREG; |
| 15431 | } else { |
| 15432 | /* case: R1 = (s8, s16 s32)R2 */ |
| 15433 | if (is_pointer_value(env, insn->src_reg)) { |
| 15434 | verbose(env, |
| 15435 | "R%d sign-extension part of pointer\n", |
| 15436 | insn->src_reg); |
| 15437 | return -EACCES; |
| 15438 | } else if (src_reg->type == SCALAR_VALUE) { |
| 15439 | bool no_sext; |
| 15440 | |
| 15441 | no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); |
| 15442 | if (no_sext) |
| 15443 | assign_scalar_id_before_mov(env, src_reg); |
| 15444 | copy_register_state(dst_reg, src_reg); |
| 15445 | if (!no_sext) |
| 15446 | dst_reg->id = 0; |
| 15447 | coerce_reg_to_size_sx(dst_reg, insn->off >> 3); |
| 15448 | dst_reg->live |= REG_LIVE_WRITTEN; |
| 15449 | dst_reg->subreg_def = DEF_NOT_SUBREG; |
| 15450 | } else { |
| 15451 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 15452 | } |
| 15453 | } |
| 15454 | } else { |
| 15455 | /* R1 = (u32) R2 */ |
| 15456 | if (is_pointer_value(env, insn->src_reg)) { |
| 15457 | verbose(env, |
| 15458 | "R%d partial copy of pointer\n", |
| 15459 | insn->src_reg); |
| 15460 | return -EACCES; |
| 15461 | } else if (src_reg->type == SCALAR_VALUE) { |
| 15462 | if (insn->off == 0) { |
| 15463 | bool is_src_reg_u32 = get_reg_width(src_reg) <= 32; |
| 15464 | |
| 15465 | if (is_src_reg_u32) |
| 15466 | assign_scalar_id_before_mov(env, src_reg); |
| 15467 | copy_register_state(dst_reg, src_reg); |
| 15468 | /* Make sure ID is cleared if src_reg is not in u32 |
| 15469 | * range otherwise dst_reg min/max could be incorrectly |
| 15470 | * propagated into src_reg by sync_linked_regs() |
| 15471 | */ |
| 15472 | if (!is_src_reg_u32) |
| 15473 | dst_reg->id = 0; |
| 15474 | dst_reg->live |= REG_LIVE_WRITTEN; |
| 15475 | dst_reg->subreg_def = env->insn_idx + 1; |
| 15476 | } else { |
| 15477 | /* case: W1 = (s8, s16)W2 */ |
| 15478 | bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); |
| 15479 | |
| 15480 | if (no_sext) |
| 15481 | assign_scalar_id_before_mov(env, src_reg); |
| 15482 | copy_register_state(dst_reg, src_reg); |
| 15483 | if (!no_sext) |
| 15484 | dst_reg->id = 0; |
| 15485 | dst_reg->live |= REG_LIVE_WRITTEN; |
| 15486 | dst_reg->subreg_def = env->insn_idx + 1; |
| 15487 | coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); |
| 15488 | } |
| 15489 | } else { |
| 15490 | mark_reg_unknown(env, regs, |
| 15491 | insn->dst_reg); |
| 15492 | } |
| 15493 | zext_32_to_64(dst_reg); |
| 15494 | reg_bounds_sync(dst_reg); |
| 15495 | } |
| 15496 | } else { |
| 15497 | /* case: R = imm |
| 15498 | * remember the value we stored into this reg |
| 15499 | */ |
| 15500 | /* clear any state __mark_reg_known doesn't set */ |
| 15501 | mark_reg_unknown(env, regs, insn->dst_reg); |
| 15502 | regs[insn->dst_reg].type = SCALAR_VALUE; |
| 15503 | if (BPF_CLASS(insn->code) == BPF_ALU64) { |
| 15504 | __mark_reg_known(regs + insn->dst_reg, |
| 15505 | insn->imm); |
| 15506 | } else { |
| 15507 | __mark_reg_known(regs + insn->dst_reg, |
| 15508 | (u32)insn->imm); |
| 15509 | } |
| 15510 | } |
| 15511 | |
| 15512 | } else if (opcode > BPF_END) { |
| 15513 | verbose(env, "invalid BPF_ALU opcode %x\n", opcode); |
| 15514 | return -EINVAL; |
| 15515 | |
| 15516 | } else { /* all other ALU ops: and, sub, xor, add, ... */ |
| 15517 | |
| 15518 | if (BPF_SRC(insn->code) == BPF_X) { |
| 15519 | if (insn->imm != 0 || insn->off > 1 || |
| 15520 | (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { |
| 15521 | verbose(env, "BPF_ALU uses reserved fields\n"); |
| 15522 | return -EINVAL; |
| 15523 | } |
| 15524 | /* check src1 operand */ |
| 15525 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 15526 | if (err) |
| 15527 | return err; |
| 15528 | } else { |
| 15529 | if (insn->src_reg != BPF_REG_0 || insn->off > 1 || |
| 15530 | (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { |
| 15531 | verbose(env, "BPF_ALU uses reserved fields\n"); |
| 15532 | return -EINVAL; |
| 15533 | } |
| 15534 | } |
| 15535 | |
| 15536 | /* check src2 operand */ |
| 15537 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 15538 | if (err) |
| 15539 | return err; |
| 15540 | |
| 15541 | if ((opcode == BPF_MOD || opcode == BPF_DIV) && |
| 15542 | BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { |
| 15543 | verbose(env, "div by zero\n"); |
| 15544 | return -EINVAL; |
| 15545 | } |
| 15546 | |
| 15547 | if ((opcode == BPF_LSH || opcode == BPF_RSH || |
| 15548 | opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { |
| 15549 | int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; |
| 15550 | |
| 15551 | if (insn->imm < 0 || insn->imm >= size) { |
| 15552 | verbose(env, "invalid shift %d\n", insn->imm); |
| 15553 | return -EINVAL; |
| 15554 | } |
| 15555 | } |
| 15556 | |
| 15557 | /* check dest operand */ |
| 15558 | err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); |
| 15559 | err = err ?: adjust_reg_min_max_vals(env, insn); |
| 15560 | if (err) |
| 15561 | return err; |
| 15562 | } |
| 15563 | |
| 15564 | return reg_bounds_sanity_check(env, ®s[insn->dst_reg], "alu"); |
| 15565 | } |
| 15566 | |
| 15567 | static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, |
| 15568 | struct bpf_reg_state *dst_reg, |
| 15569 | enum bpf_reg_type type, |
| 15570 | bool range_right_open) |
| 15571 | { |
| 15572 | struct bpf_func_state *state; |
| 15573 | struct bpf_reg_state *reg; |
| 15574 | int new_range; |
| 15575 | |
| 15576 | if (dst_reg->off < 0 || |
| 15577 | (dst_reg->off == 0 && range_right_open)) |
| 15578 | /* This doesn't give us any range */ |
| 15579 | return; |
| 15580 | |
| 15581 | if (dst_reg->umax_value > MAX_PACKET_OFF || |
| 15582 | dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) |
| 15583 | /* Risk of overflow. For instance, ptr + (1<<63) may be less |
| 15584 | * than pkt_end, but that's because it's also less than pkt. |
| 15585 | */ |
| 15586 | return; |
| 15587 | |
| 15588 | new_range = dst_reg->off; |
| 15589 | if (range_right_open) |
| 15590 | new_range++; |
| 15591 | |
| 15592 | /* Examples for register markings: |
| 15593 | * |
| 15594 | * pkt_data in dst register: |
| 15595 | * |
| 15596 | * r2 = r3; |
| 15597 | * r2 += 8; |
| 15598 | * if (r2 > pkt_end) goto <handle exception> |
| 15599 | * <access okay> |
| 15600 | * |
| 15601 | * r2 = r3; |
| 15602 | * r2 += 8; |
| 15603 | * if (r2 < pkt_end) goto <access okay> |
| 15604 | * <handle exception> |
| 15605 | * |
| 15606 | * Where: |
| 15607 | * r2 == dst_reg, pkt_end == src_reg |
| 15608 | * r2=pkt(id=n,off=8,r=0) |
| 15609 | * r3=pkt(id=n,off=0,r=0) |
| 15610 | * |
| 15611 | * pkt_data in src register: |
| 15612 | * |
| 15613 | * r2 = r3; |
| 15614 | * r2 += 8; |
| 15615 | * if (pkt_end >= r2) goto <access okay> |
| 15616 | * <handle exception> |
| 15617 | * |
| 15618 | * r2 = r3; |
| 15619 | * r2 += 8; |
| 15620 | * if (pkt_end <= r2) goto <handle exception> |
| 15621 | * <access okay> |
| 15622 | * |
| 15623 | * Where: |
| 15624 | * pkt_end == dst_reg, r2 == src_reg |
| 15625 | * r2=pkt(id=n,off=8,r=0) |
| 15626 | * r3=pkt(id=n,off=0,r=0) |
| 15627 | * |
| 15628 | * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) |
| 15629 | * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) |
| 15630 | * and [r3, r3 + 8-1) respectively is safe to access depending on |
| 15631 | * the check. |
| 15632 | */ |
| 15633 | |
| 15634 | /* If our ids match, then we must have the same max_value. And we |
| 15635 | * don't care about the other reg's fixed offset, since if it's too big |
| 15636 | * the range won't allow anything. |
| 15637 | * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. |
| 15638 | */ |
| 15639 | bpf_for_each_reg_in_vstate(vstate, state, reg, ({ |
| 15640 | if (reg->type == type && reg->id == dst_reg->id) |
| 15641 | /* keep the maximum range already checked */ |
| 15642 | reg->range = max(reg->range, new_range); |
| 15643 | })); |
| 15644 | } |
| 15645 | |
| 15646 | /* |
| 15647 | * <reg1> <op> <reg2>, currently assuming reg2 is a constant |
| 15648 | */ |
| 15649 | static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, |
| 15650 | u8 opcode, bool is_jmp32) |
| 15651 | { |
| 15652 | struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; |
| 15653 | struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; |
| 15654 | u64 umin1 = is_jmp32 ? (u64)reg1->u32_min_value : reg1->umin_value; |
| 15655 | u64 umax1 = is_jmp32 ? (u64)reg1->u32_max_value : reg1->umax_value; |
| 15656 | s64 smin1 = is_jmp32 ? (s64)reg1->s32_min_value : reg1->smin_value; |
| 15657 | s64 smax1 = is_jmp32 ? (s64)reg1->s32_max_value : reg1->smax_value; |
| 15658 | u64 umin2 = is_jmp32 ? (u64)reg2->u32_min_value : reg2->umin_value; |
| 15659 | u64 umax2 = is_jmp32 ? (u64)reg2->u32_max_value : reg2->umax_value; |
| 15660 | s64 smin2 = is_jmp32 ? (s64)reg2->s32_min_value : reg2->smin_value; |
| 15661 | s64 smax2 = is_jmp32 ? (s64)reg2->s32_max_value : reg2->smax_value; |
| 15662 | |
| 15663 | switch (opcode) { |
| 15664 | case BPF_JEQ: |
| 15665 | /* constants, umin/umax and smin/smax checks would be |
| 15666 | * redundant in this case because they all should match |
| 15667 | */ |
| 15668 | if (tnum_is_const(t1) && tnum_is_const(t2)) |
| 15669 | return t1.value == t2.value; |
| 15670 | /* non-overlapping ranges */ |
| 15671 | if (umin1 > umax2 || umax1 < umin2) |
| 15672 | return 0; |
| 15673 | if (smin1 > smax2 || smax1 < smin2) |
| 15674 | return 0; |
| 15675 | if (!is_jmp32) { |
| 15676 | /* if 64-bit ranges are inconclusive, see if we can |
| 15677 | * utilize 32-bit subrange knowledge to eliminate |
| 15678 | * branches that can't be taken a priori |
| 15679 | */ |
| 15680 | if (reg1->u32_min_value > reg2->u32_max_value || |
| 15681 | reg1->u32_max_value < reg2->u32_min_value) |
| 15682 | return 0; |
| 15683 | if (reg1->s32_min_value > reg2->s32_max_value || |
| 15684 | reg1->s32_max_value < reg2->s32_min_value) |
| 15685 | return 0; |
| 15686 | } |
| 15687 | break; |
| 15688 | case BPF_JNE: |
| 15689 | /* constants, umin/umax and smin/smax checks would be |
| 15690 | * redundant in this case because they all should match |
| 15691 | */ |
| 15692 | if (tnum_is_const(t1) && tnum_is_const(t2)) |
| 15693 | return t1.value != t2.value; |
| 15694 | /* non-overlapping ranges */ |
| 15695 | if (umin1 > umax2 || umax1 < umin2) |
| 15696 | return 1; |
| 15697 | if (smin1 > smax2 || smax1 < smin2) |
| 15698 | return 1; |
| 15699 | if (!is_jmp32) { |
| 15700 | /* if 64-bit ranges are inconclusive, see if we can |
| 15701 | * utilize 32-bit subrange knowledge to eliminate |
| 15702 | * branches that can't be taken a priori |
| 15703 | */ |
| 15704 | if (reg1->u32_min_value > reg2->u32_max_value || |
| 15705 | reg1->u32_max_value < reg2->u32_min_value) |
| 15706 | return 1; |
| 15707 | if (reg1->s32_min_value > reg2->s32_max_value || |
| 15708 | reg1->s32_max_value < reg2->s32_min_value) |
| 15709 | return 1; |
| 15710 | } |
| 15711 | break; |
| 15712 | case BPF_JSET: |
| 15713 | if (!is_reg_const(reg2, is_jmp32)) { |
| 15714 | swap(reg1, reg2); |
| 15715 | swap(t1, t2); |
| 15716 | } |
| 15717 | if (!is_reg_const(reg2, is_jmp32)) |
| 15718 | return -1; |
| 15719 | if ((~t1.mask & t1.value) & t2.value) |
| 15720 | return 1; |
| 15721 | if (!((t1.mask | t1.value) & t2.value)) |
| 15722 | return 0; |
| 15723 | break; |
| 15724 | case BPF_JGT: |
| 15725 | if (umin1 > umax2) |
| 15726 | return 1; |
| 15727 | else if (umax1 <= umin2) |
| 15728 | return 0; |
| 15729 | break; |
| 15730 | case BPF_JSGT: |
| 15731 | if (smin1 > smax2) |
| 15732 | return 1; |
| 15733 | else if (smax1 <= smin2) |
| 15734 | return 0; |
| 15735 | break; |
| 15736 | case BPF_JLT: |
| 15737 | if (umax1 < umin2) |
| 15738 | return 1; |
| 15739 | else if (umin1 >= umax2) |
| 15740 | return 0; |
| 15741 | break; |
| 15742 | case BPF_JSLT: |
| 15743 | if (smax1 < smin2) |
| 15744 | return 1; |
| 15745 | else if (smin1 >= smax2) |
| 15746 | return 0; |
| 15747 | break; |
| 15748 | case BPF_JGE: |
| 15749 | if (umin1 >= umax2) |
| 15750 | return 1; |
| 15751 | else if (umax1 < umin2) |
| 15752 | return 0; |
| 15753 | break; |
| 15754 | case BPF_JSGE: |
| 15755 | if (smin1 >= smax2) |
| 15756 | return 1; |
| 15757 | else if (smax1 < smin2) |
| 15758 | return 0; |
| 15759 | break; |
| 15760 | case BPF_JLE: |
| 15761 | if (umax1 <= umin2) |
| 15762 | return 1; |
| 15763 | else if (umin1 > umax2) |
| 15764 | return 0; |
| 15765 | break; |
| 15766 | case BPF_JSLE: |
| 15767 | if (smax1 <= smin2) |
| 15768 | return 1; |
| 15769 | else if (smin1 > smax2) |
| 15770 | return 0; |
| 15771 | break; |
| 15772 | } |
| 15773 | |
| 15774 | return -1; |
| 15775 | } |
| 15776 | |
| 15777 | static int flip_opcode(u32 opcode) |
| 15778 | { |
| 15779 | /* How can we transform "a <op> b" into "b <op> a"? */ |
| 15780 | static const u8 opcode_flip[16] = { |
| 15781 | /* these stay the same */ |
| 15782 | [BPF_JEQ >> 4] = BPF_JEQ, |
| 15783 | [BPF_JNE >> 4] = BPF_JNE, |
| 15784 | [BPF_JSET >> 4] = BPF_JSET, |
| 15785 | /* these swap "lesser" and "greater" (L and G in the opcodes) */ |
| 15786 | [BPF_JGE >> 4] = BPF_JLE, |
| 15787 | [BPF_JGT >> 4] = BPF_JLT, |
| 15788 | [BPF_JLE >> 4] = BPF_JGE, |
| 15789 | [BPF_JLT >> 4] = BPF_JGT, |
| 15790 | [BPF_JSGE >> 4] = BPF_JSLE, |
| 15791 | [BPF_JSGT >> 4] = BPF_JSLT, |
| 15792 | [BPF_JSLE >> 4] = BPF_JSGE, |
| 15793 | [BPF_JSLT >> 4] = BPF_JSGT |
| 15794 | }; |
| 15795 | return opcode_flip[opcode >> 4]; |
| 15796 | } |
| 15797 | |
| 15798 | static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, |
| 15799 | struct bpf_reg_state *src_reg, |
| 15800 | u8 opcode) |
| 15801 | { |
| 15802 | struct bpf_reg_state *pkt; |
| 15803 | |
| 15804 | if (src_reg->type == PTR_TO_PACKET_END) { |
| 15805 | pkt = dst_reg; |
| 15806 | } else if (dst_reg->type == PTR_TO_PACKET_END) { |
| 15807 | pkt = src_reg; |
| 15808 | opcode = flip_opcode(opcode); |
| 15809 | } else { |
| 15810 | return -1; |
| 15811 | } |
| 15812 | |
| 15813 | if (pkt->range >= 0) |
| 15814 | return -1; |
| 15815 | |
| 15816 | switch (opcode) { |
| 15817 | case BPF_JLE: |
| 15818 | /* pkt <= pkt_end */ |
| 15819 | fallthrough; |
| 15820 | case BPF_JGT: |
| 15821 | /* pkt > pkt_end */ |
| 15822 | if (pkt->range == BEYOND_PKT_END) |
| 15823 | /* pkt has at last one extra byte beyond pkt_end */ |
| 15824 | return opcode == BPF_JGT; |
| 15825 | break; |
| 15826 | case BPF_JLT: |
| 15827 | /* pkt < pkt_end */ |
| 15828 | fallthrough; |
| 15829 | case BPF_JGE: |
| 15830 | /* pkt >= pkt_end */ |
| 15831 | if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) |
| 15832 | return opcode == BPF_JGE; |
| 15833 | break; |
| 15834 | } |
| 15835 | return -1; |
| 15836 | } |
| 15837 | |
| 15838 | /* compute branch direction of the expression "if (<reg1> opcode <reg2>) goto target;" |
| 15839 | * and return: |
| 15840 | * 1 - branch will be taken and "goto target" will be executed |
| 15841 | * 0 - branch will not be taken and fall-through to next insn |
| 15842 | * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value |
| 15843 | * range [0,10] |
| 15844 | */ |
| 15845 | static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, |
| 15846 | u8 opcode, bool is_jmp32) |
| 15847 | { |
| 15848 | if (reg_is_pkt_pointer_any(reg1) && reg_is_pkt_pointer_any(reg2) && !is_jmp32) |
| 15849 | return is_pkt_ptr_branch_taken(reg1, reg2, opcode); |
| 15850 | |
| 15851 | if (__is_pointer_value(false, reg1) || __is_pointer_value(false, reg2)) { |
| 15852 | u64 val; |
| 15853 | |
| 15854 | /* arrange that reg2 is a scalar, and reg1 is a pointer */ |
| 15855 | if (!is_reg_const(reg2, is_jmp32)) { |
| 15856 | opcode = flip_opcode(opcode); |
| 15857 | swap(reg1, reg2); |
| 15858 | } |
| 15859 | /* and ensure that reg2 is a constant */ |
| 15860 | if (!is_reg_const(reg2, is_jmp32)) |
| 15861 | return -1; |
| 15862 | |
| 15863 | if (!reg_not_null(reg1)) |
| 15864 | return -1; |
| 15865 | |
| 15866 | /* If pointer is valid tests against zero will fail so we can |
| 15867 | * use this to direct branch taken. |
| 15868 | */ |
| 15869 | val = reg_const_value(reg2, is_jmp32); |
| 15870 | if (val != 0) |
| 15871 | return -1; |
| 15872 | |
| 15873 | switch (opcode) { |
| 15874 | case BPF_JEQ: |
| 15875 | return 0; |
| 15876 | case BPF_JNE: |
| 15877 | return 1; |
| 15878 | default: |
| 15879 | return -1; |
| 15880 | } |
| 15881 | } |
| 15882 | |
| 15883 | /* now deal with two scalars, but not necessarily constants */ |
| 15884 | return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32); |
| 15885 | } |
| 15886 | |
| 15887 | /* Opcode that corresponds to a *false* branch condition. |
| 15888 | * E.g., if r1 < r2, then reverse (false) condition is r1 >= r2 |
| 15889 | */ |
| 15890 | static u8 rev_opcode(u8 opcode) |
| 15891 | { |
| 15892 | switch (opcode) { |
| 15893 | case BPF_JEQ: return BPF_JNE; |
| 15894 | case BPF_JNE: return BPF_JEQ; |
| 15895 | /* JSET doesn't have it's reverse opcode in BPF, so add |
| 15896 | * BPF_X flag to denote the reverse of that operation |
| 15897 | */ |
| 15898 | case BPF_JSET: return BPF_JSET | BPF_X; |
| 15899 | case BPF_JSET | BPF_X: return BPF_JSET; |
| 15900 | case BPF_JGE: return BPF_JLT; |
| 15901 | case BPF_JGT: return BPF_JLE; |
| 15902 | case BPF_JLE: return BPF_JGT; |
| 15903 | case BPF_JLT: return BPF_JGE; |
| 15904 | case BPF_JSGE: return BPF_JSLT; |
| 15905 | case BPF_JSGT: return BPF_JSLE; |
| 15906 | case BPF_JSLE: return BPF_JSGT; |
| 15907 | case BPF_JSLT: return BPF_JSGE; |
| 15908 | default: return 0; |
| 15909 | } |
| 15910 | } |
| 15911 | |
| 15912 | /* Refine range knowledge for <reg1> <op> <reg>2 conditional operation. */ |
| 15913 | static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, |
| 15914 | u8 opcode, bool is_jmp32) |
| 15915 | { |
| 15916 | struct tnum t; |
| 15917 | u64 val; |
| 15918 | |
| 15919 | /* In case of GE/GT/SGE/JST, reuse LE/LT/SLE/SLT logic from below */ |
| 15920 | switch (opcode) { |
| 15921 | case BPF_JGE: |
| 15922 | case BPF_JGT: |
| 15923 | case BPF_JSGE: |
| 15924 | case BPF_JSGT: |
| 15925 | opcode = flip_opcode(opcode); |
| 15926 | swap(reg1, reg2); |
| 15927 | break; |
| 15928 | default: |
| 15929 | break; |
| 15930 | } |
| 15931 | |
| 15932 | switch (opcode) { |
| 15933 | case BPF_JEQ: |
| 15934 | if (is_jmp32) { |
| 15935 | reg1->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); |
| 15936 | reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); |
| 15937 | reg1->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); |
| 15938 | reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); |
| 15939 | reg2->u32_min_value = reg1->u32_min_value; |
| 15940 | reg2->u32_max_value = reg1->u32_max_value; |
| 15941 | reg2->s32_min_value = reg1->s32_min_value; |
| 15942 | reg2->s32_max_value = reg1->s32_max_value; |
| 15943 | |
| 15944 | t = tnum_intersect(tnum_subreg(reg1->var_off), tnum_subreg(reg2->var_off)); |
| 15945 | reg1->var_off = tnum_with_subreg(reg1->var_off, t); |
| 15946 | reg2->var_off = tnum_with_subreg(reg2->var_off, t); |
| 15947 | } else { |
| 15948 | reg1->umin_value = max(reg1->umin_value, reg2->umin_value); |
| 15949 | reg1->umax_value = min(reg1->umax_value, reg2->umax_value); |
| 15950 | reg1->smin_value = max(reg1->smin_value, reg2->smin_value); |
| 15951 | reg1->smax_value = min(reg1->smax_value, reg2->smax_value); |
| 15952 | reg2->umin_value = reg1->umin_value; |
| 15953 | reg2->umax_value = reg1->umax_value; |
| 15954 | reg2->smin_value = reg1->smin_value; |
| 15955 | reg2->smax_value = reg1->smax_value; |
| 15956 | |
| 15957 | reg1->var_off = tnum_intersect(reg1->var_off, reg2->var_off); |
| 15958 | reg2->var_off = reg1->var_off; |
| 15959 | } |
| 15960 | break; |
| 15961 | case BPF_JNE: |
| 15962 | if (!is_reg_const(reg2, is_jmp32)) |
| 15963 | swap(reg1, reg2); |
| 15964 | if (!is_reg_const(reg2, is_jmp32)) |
| 15965 | break; |
| 15966 | |
| 15967 | /* try to recompute the bound of reg1 if reg2 is a const and |
| 15968 | * is exactly the edge of reg1. |
| 15969 | */ |
| 15970 | val = reg_const_value(reg2, is_jmp32); |
| 15971 | if (is_jmp32) { |
| 15972 | /* u32_min_value is not equal to 0xffffffff at this point, |
| 15973 | * because otherwise u32_max_value is 0xffffffff as well, |
| 15974 | * in such a case both reg1 and reg2 would be constants, |
| 15975 | * jump would be predicted and reg_set_min_max() won't |
| 15976 | * be called. |
| 15977 | * |
| 15978 | * Same reasoning works for all {u,s}{min,max}{32,64} cases |
| 15979 | * below. |
| 15980 | */ |
| 15981 | if (reg1->u32_min_value == (u32)val) |
| 15982 | reg1->u32_min_value++; |
| 15983 | if (reg1->u32_max_value == (u32)val) |
| 15984 | reg1->u32_max_value--; |
| 15985 | if (reg1->s32_min_value == (s32)val) |
| 15986 | reg1->s32_min_value++; |
| 15987 | if (reg1->s32_max_value == (s32)val) |
| 15988 | reg1->s32_max_value--; |
| 15989 | } else { |
| 15990 | if (reg1->umin_value == (u64)val) |
| 15991 | reg1->umin_value++; |
| 15992 | if (reg1->umax_value == (u64)val) |
| 15993 | reg1->umax_value--; |
| 15994 | if (reg1->smin_value == (s64)val) |
| 15995 | reg1->smin_value++; |
| 15996 | if (reg1->smax_value == (s64)val) |
| 15997 | reg1->smax_value--; |
| 15998 | } |
| 15999 | break; |
| 16000 | case BPF_JSET: |
| 16001 | if (!is_reg_const(reg2, is_jmp32)) |
| 16002 | swap(reg1, reg2); |
| 16003 | if (!is_reg_const(reg2, is_jmp32)) |
| 16004 | break; |
| 16005 | val = reg_const_value(reg2, is_jmp32); |
| 16006 | /* BPF_JSET (i.e., TRUE branch, *not* BPF_JSET | BPF_X) |
| 16007 | * requires single bit to learn something useful. E.g., if we |
| 16008 | * know that `r1 & 0x3` is true, then which bits (0, 1, or both) |
| 16009 | * are actually set? We can learn something definite only if |
| 16010 | * it's a single-bit value to begin with. |
| 16011 | * |
| 16012 | * BPF_JSET | BPF_X (i.e., negation of BPF_JSET) doesn't have |
| 16013 | * this restriction. I.e., !(r1 & 0x3) means neither bit 0 nor |
| 16014 | * bit 1 is set, which we can readily use in adjustments. |
| 16015 | */ |
| 16016 | if (!is_power_of_2(val)) |
| 16017 | break; |
| 16018 | if (is_jmp32) { |
| 16019 | t = tnum_or(tnum_subreg(reg1->var_off), tnum_const(val)); |
| 16020 | reg1->var_off = tnum_with_subreg(reg1->var_off, t); |
| 16021 | } else { |
| 16022 | reg1->var_off = tnum_or(reg1->var_off, tnum_const(val)); |
| 16023 | } |
| 16024 | break; |
| 16025 | case BPF_JSET | BPF_X: /* reverse of BPF_JSET, see rev_opcode() */ |
| 16026 | if (!is_reg_const(reg2, is_jmp32)) |
| 16027 | swap(reg1, reg2); |
| 16028 | if (!is_reg_const(reg2, is_jmp32)) |
| 16029 | break; |
| 16030 | val = reg_const_value(reg2, is_jmp32); |
| 16031 | if (is_jmp32) { |
| 16032 | t = tnum_and(tnum_subreg(reg1->var_off), tnum_const(~val)); |
| 16033 | reg1->var_off = tnum_with_subreg(reg1->var_off, t); |
| 16034 | } else { |
| 16035 | reg1->var_off = tnum_and(reg1->var_off, tnum_const(~val)); |
| 16036 | } |
| 16037 | break; |
| 16038 | case BPF_JLE: |
| 16039 | if (is_jmp32) { |
| 16040 | reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value); |
| 16041 | reg2->u32_min_value = max(reg1->u32_min_value, reg2->u32_min_value); |
| 16042 | } else { |
| 16043 | reg1->umax_value = min(reg1->umax_value, reg2->umax_value); |
| 16044 | reg2->umin_value = max(reg1->umin_value, reg2->umin_value); |
| 16045 | } |
| 16046 | break; |
| 16047 | case BPF_JLT: |
| 16048 | if (is_jmp32) { |
| 16049 | reg1->u32_max_value = min(reg1->u32_max_value, reg2->u32_max_value - 1); |
| 16050 | reg2->u32_min_value = max(reg1->u32_min_value + 1, reg2->u32_min_value); |
| 16051 | } else { |
| 16052 | reg1->umax_value = min(reg1->umax_value, reg2->umax_value - 1); |
| 16053 | reg2->umin_value = max(reg1->umin_value + 1, reg2->umin_value); |
| 16054 | } |
| 16055 | break; |
| 16056 | case BPF_JSLE: |
| 16057 | if (is_jmp32) { |
| 16058 | reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value); |
| 16059 | reg2->s32_min_value = max(reg1->s32_min_value, reg2->s32_min_value); |
| 16060 | } else { |
| 16061 | reg1->smax_value = min(reg1->smax_value, reg2->smax_value); |
| 16062 | reg2->smin_value = max(reg1->smin_value, reg2->smin_value); |
| 16063 | } |
| 16064 | break; |
| 16065 | case BPF_JSLT: |
| 16066 | if (is_jmp32) { |
| 16067 | reg1->s32_max_value = min(reg1->s32_max_value, reg2->s32_max_value - 1); |
| 16068 | reg2->s32_min_value = max(reg1->s32_min_value + 1, reg2->s32_min_value); |
| 16069 | } else { |
| 16070 | reg1->smax_value = min(reg1->smax_value, reg2->smax_value - 1); |
| 16071 | reg2->smin_value = max(reg1->smin_value + 1, reg2->smin_value); |
| 16072 | } |
| 16073 | break; |
| 16074 | default: |
| 16075 | return; |
| 16076 | } |
| 16077 | } |
| 16078 | |
| 16079 | /* Adjusts the register min/max values in the case that the dst_reg and |
| 16080 | * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K |
| 16081 | * check, in which case we have a fake SCALAR_VALUE representing insn->imm). |
| 16082 | * Technically we can do similar adjustments for pointers to the same object, |
| 16083 | * but we don't support that right now. |
| 16084 | */ |
| 16085 | static int reg_set_min_max(struct bpf_verifier_env *env, |
| 16086 | struct bpf_reg_state *true_reg1, |
| 16087 | struct bpf_reg_state *true_reg2, |
| 16088 | struct bpf_reg_state *false_reg1, |
| 16089 | struct bpf_reg_state *false_reg2, |
| 16090 | u8 opcode, bool is_jmp32) |
| 16091 | { |
| 16092 | int err; |
| 16093 | |
| 16094 | /* If either register is a pointer, we can't learn anything about its |
| 16095 | * variable offset from the compare (unless they were a pointer into |
| 16096 | * the same object, but we don't bother with that). |
| 16097 | */ |
| 16098 | if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) |
| 16099 | return 0; |
| 16100 | |
| 16101 | /* fallthrough (FALSE) branch */ |
| 16102 | regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32); |
| 16103 | reg_bounds_sync(false_reg1); |
| 16104 | reg_bounds_sync(false_reg2); |
| 16105 | |
| 16106 | /* jump (TRUE) branch */ |
| 16107 | regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32); |
| 16108 | reg_bounds_sync(true_reg1); |
| 16109 | reg_bounds_sync(true_reg2); |
| 16110 | |
| 16111 | err = reg_bounds_sanity_check(env, true_reg1, "true_reg1"); |
| 16112 | err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2"); |
| 16113 | err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1"); |
| 16114 | err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2"); |
| 16115 | return err; |
| 16116 | } |
| 16117 | |
| 16118 | static void mark_ptr_or_null_reg(struct bpf_func_state *state, |
| 16119 | struct bpf_reg_state *reg, u32 id, |
| 16120 | bool is_null) |
| 16121 | { |
| 16122 | if (type_may_be_null(reg->type) && reg->id == id && |
| 16123 | (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { |
| 16124 | /* Old offset (both fixed and variable parts) should have been |
| 16125 | * known-zero, because we don't allow pointer arithmetic on |
| 16126 | * pointers that might be NULL. If we see this happening, don't |
| 16127 | * convert the register. |
| 16128 | * |
| 16129 | * But in some cases, some helpers that return local kptrs |
| 16130 | * advance offset for the returned pointer. In those cases, it |
| 16131 | * is fine to expect to see reg->off. |
| 16132 | */ |
| 16133 | if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) |
| 16134 | return; |
| 16135 | if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && |
| 16136 | WARN_ON_ONCE(reg->off)) |
| 16137 | return; |
| 16138 | |
| 16139 | if (is_null) { |
| 16140 | reg->type = SCALAR_VALUE; |
| 16141 | /* We don't need id and ref_obj_id from this point |
| 16142 | * onwards anymore, thus we should better reset it, |
| 16143 | * so that state pruning has chances to take effect. |
| 16144 | */ |
| 16145 | reg->id = 0; |
| 16146 | reg->ref_obj_id = 0; |
| 16147 | |
| 16148 | return; |
| 16149 | } |
| 16150 | |
| 16151 | mark_ptr_not_null_reg(reg); |
| 16152 | |
| 16153 | if (!reg_may_point_to_spin_lock(reg)) { |
| 16154 | /* For not-NULL ptr, reg->ref_obj_id will be reset |
| 16155 | * in release_reference(). |
| 16156 | * |
| 16157 | * reg->id is still used by spin_lock ptr. Other |
| 16158 | * than spin_lock ptr type, reg->id can be reset. |
| 16159 | */ |
| 16160 | reg->id = 0; |
| 16161 | } |
| 16162 | } |
| 16163 | } |
| 16164 | |
| 16165 | /* The logic is similar to find_good_pkt_pointers(), both could eventually |
| 16166 | * be folded together at some point. |
| 16167 | */ |
| 16168 | static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, |
| 16169 | bool is_null) |
| 16170 | { |
| 16171 | struct bpf_func_state *state = vstate->frame[vstate->curframe]; |
| 16172 | struct bpf_reg_state *regs = state->regs, *reg; |
| 16173 | u32 ref_obj_id = regs[regno].ref_obj_id; |
| 16174 | u32 id = regs[regno].id; |
| 16175 | |
| 16176 | if (ref_obj_id && ref_obj_id == id && is_null) |
| 16177 | /* regs[regno] is in the " == NULL" branch. |
| 16178 | * No one could have freed the reference state before |
| 16179 | * doing the NULL check. |
| 16180 | */ |
| 16181 | WARN_ON_ONCE(release_reference_nomark(vstate, id)); |
| 16182 | |
| 16183 | bpf_for_each_reg_in_vstate(vstate, state, reg, ({ |
| 16184 | mark_ptr_or_null_reg(state, reg, id, is_null); |
| 16185 | })); |
| 16186 | } |
| 16187 | |
| 16188 | static bool try_match_pkt_pointers(const struct bpf_insn *insn, |
| 16189 | struct bpf_reg_state *dst_reg, |
| 16190 | struct bpf_reg_state *src_reg, |
| 16191 | struct bpf_verifier_state *this_branch, |
| 16192 | struct bpf_verifier_state *other_branch) |
| 16193 | { |
| 16194 | if (BPF_SRC(insn->code) != BPF_X) |
| 16195 | return false; |
| 16196 | |
| 16197 | /* Pointers are always 64-bit. */ |
| 16198 | if (BPF_CLASS(insn->code) == BPF_JMP32) |
| 16199 | return false; |
| 16200 | |
| 16201 | switch (BPF_OP(insn->code)) { |
| 16202 | case BPF_JGT: |
| 16203 | if ((dst_reg->type == PTR_TO_PACKET && |
| 16204 | src_reg->type == PTR_TO_PACKET_END) || |
| 16205 | (dst_reg->type == PTR_TO_PACKET_META && |
| 16206 | reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { |
| 16207 | /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ |
| 16208 | find_good_pkt_pointers(this_branch, dst_reg, |
| 16209 | dst_reg->type, false); |
| 16210 | mark_pkt_end(other_branch, insn->dst_reg, true); |
| 16211 | } else if ((dst_reg->type == PTR_TO_PACKET_END && |
| 16212 | src_reg->type == PTR_TO_PACKET) || |
| 16213 | (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && |
| 16214 | src_reg->type == PTR_TO_PACKET_META)) { |
| 16215 | /* pkt_end > pkt_data', pkt_data > pkt_meta' */ |
| 16216 | find_good_pkt_pointers(other_branch, src_reg, |
| 16217 | src_reg->type, true); |
| 16218 | mark_pkt_end(this_branch, insn->src_reg, false); |
| 16219 | } else { |
| 16220 | return false; |
| 16221 | } |
| 16222 | break; |
| 16223 | case BPF_JLT: |
| 16224 | if ((dst_reg->type == PTR_TO_PACKET && |
| 16225 | src_reg->type == PTR_TO_PACKET_END) || |
| 16226 | (dst_reg->type == PTR_TO_PACKET_META && |
| 16227 | reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { |
| 16228 | /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ |
| 16229 | find_good_pkt_pointers(other_branch, dst_reg, |
| 16230 | dst_reg->type, true); |
| 16231 | mark_pkt_end(this_branch, insn->dst_reg, false); |
| 16232 | } else if ((dst_reg->type == PTR_TO_PACKET_END && |
| 16233 | src_reg->type == PTR_TO_PACKET) || |
| 16234 | (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && |
| 16235 | src_reg->type == PTR_TO_PACKET_META)) { |
| 16236 | /* pkt_end < pkt_data', pkt_data > pkt_meta' */ |
| 16237 | find_good_pkt_pointers(this_branch, src_reg, |
| 16238 | src_reg->type, false); |
| 16239 | mark_pkt_end(other_branch, insn->src_reg, true); |
| 16240 | } else { |
| 16241 | return false; |
| 16242 | } |
| 16243 | break; |
| 16244 | case BPF_JGE: |
| 16245 | if ((dst_reg->type == PTR_TO_PACKET && |
| 16246 | src_reg->type == PTR_TO_PACKET_END) || |
| 16247 | (dst_reg->type == PTR_TO_PACKET_META && |
| 16248 | reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { |
| 16249 | /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ |
| 16250 | find_good_pkt_pointers(this_branch, dst_reg, |
| 16251 | dst_reg->type, true); |
| 16252 | mark_pkt_end(other_branch, insn->dst_reg, false); |
| 16253 | } else if ((dst_reg->type == PTR_TO_PACKET_END && |
| 16254 | src_reg->type == PTR_TO_PACKET) || |
| 16255 | (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && |
| 16256 | src_reg->type == PTR_TO_PACKET_META)) { |
| 16257 | /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ |
| 16258 | find_good_pkt_pointers(other_branch, src_reg, |
| 16259 | src_reg->type, false); |
| 16260 | mark_pkt_end(this_branch, insn->src_reg, true); |
| 16261 | } else { |
| 16262 | return false; |
| 16263 | } |
| 16264 | break; |
| 16265 | case BPF_JLE: |
| 16266 | if ((dst_reg->type == PTR_TO_PACKET && |
| 16267 | src_reg->type == PTR_TO_PACKET_END) || |
| 16268 | (dst_reg->type == PTR_TO_PACKET_META && |
| 16269 | reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { |
| 16270 | /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ |
| 16271 | find_good_pkt_pointers(other_branch, dst_reg, |
| 16272 | dst_reg->type, false); |
| 16273 | mark_pkt_end(this_branch, insn->dst_reg, true); |
| 16274 | } else if ((dst_reg->type == PTR_TO_PACKET_END && |
| 16275 | src_reg->type == PTR_TO_PACKET) || |
| 16276 | (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && |
| 16277 | src_reg->type == PTR_TO_PACKET_META)) { |
| 16278 | /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ |
| 16279 | find_good_pkt_pointers(this_branch, src_reg, |
| 16280 | src_reg->type, true); |
| 16281 | mark_pkt_end(other_branch, insn->src_reg, false); |
| 16282 | } else { |
| 16283 | return false; |
| 16284 | } |
| 16285 | break; |
| 16286 | default: |
| 16287 | return false; |
| 16288 | } |
| 16289 | |
| 16290 | return true; |
| 16291 | } |
| 16292 | |
| 16293 | static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_state *reg, |
| 16294 | u32 id, u32 frameno, u32 spi_or_reg, bool is_reg) |
| 16295 | { |
| 16296 | struct linked_reg *e; |
| 16297 | |
| 16298 | if (reg->type != SCALAR_VALUE || (reg->id & ~BPF_ADD_CONST) != id) |
| 16299 | return; |
| 16300 | |
| 16301 | e = linked_regs_push(reg_set); |
| 16302 | if (e) { |
| 16303 | e->frameno = frameno; |
| 16304 | e->is_reg = is_reg; |
| 16305 | e->regno = spi_or_reg; |
| 16306 | } else { |
| 16307 | reg->id = 0; |
| 16308 | } |
| 16309 | } |
| 16310 | |
| 16311 | /* For all R being scalar registers or spilled scalar registers |
| 16312 | * in verifier state, save R in linked_regs if R->id == id. |
| 16313 | * If there are too many Rs sharing same id, reset id for leftover Rs. |
| 16314 | */ |
| 16315 | static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id, |
| 16316 | struct linked_regs *linked_regs) |
| 16317 | { |
| 16318 | struct bpf_func_state *func; |
| 16319 | struct bpf_reg_state *reg; |
| 16320 | int i, j; |
| 16321 | |
| 16322 | id = id & ~BPF_ADD_CONST; |
| 16323 | for (i = vstate->curframe; i >= 0; i--) { |
| 16324 | func = vstate->frame[i]; |
| 16325 | for (j = 0; j < BPF_REG_FP; j++) { |
| 16326 | reg = &func->regs[j]; |
| 16327 | __collect_linked_regs(linked_regs, reg, id, i, j, true); |
| 16328 | } |
| 16329 | for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { |
| 16330 | if (!is_spilled_reg(&func->stack[j])) |
| 16331 | continue; |
| 16332 | reg = &func->stack[j].spilled_ptr; |
| 16333 | __collect_linked_regs(linked_regs, reg, id, i, j, false); |
| 16334 | } |
| 16335 | } |
| 16336 | } |
| 16337 | |
| 16338 | /* For all R in linked_regs, copy known_reg range into R |
| 16339 | * if R->id == known_reg->id. |
| 16340 | */ |
| 16341 | static void sync_linked_regs(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg, |
| 16342 | struct linked_regs *linked_regs) |
| 16343 | { |
| 16344 | struct bpf_reg_state fake_reg; |
| 16345 | struct bpf_reg_state *reg; |
| 16346 | struct linked_reg *e; |
| 16347 | int i; |
| 16348 | |
| 16349 | for (i = 0; i < linked_regs->cnt; ++i) { |
| 16350 | e = &linked_regs->entries[i]; |
| 16351 | reg = e->is_reg ? &vstate->frame[e->frameno]->regs[e->regno] |
| 16352 | : &vstate->frame[e->frameno]->stack[e->spi].spilled_ptr; |
| 16353 | if (reg->type != SCALAR_VALUE || reg == known_reg) |
| 16354 | continue; |
| 16355 | if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) |
| 16356 | continue; |
| 16357 | if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || |
| 16358 | reg->off == known_reg->off) { |
| 16359 | s32 saved_subreg_def = reg->subreg_def; |
| 16360 | |
| 16361 | copy_register_state(reg, known_reg); |
| 16362 | reg->subreg_def = saved_subreg_def; |
| 16363 | } else { |
| 16364 | s32 saved_subreg_def = reg->subreg_def; |
| 16365 | s32 saved_off = reg->off; |
| 16366 | |
| 16367 | fake_reg.type = SCALAR_VALUE; |
| 16368 | __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); |
| 16369 | |
| 16370 | /* reg = known_reg; reg += delta */ |
| 16371 | copy_register_state(reg, known_reg); |
| 16372 | /* |
| 16373 | * Must preserve off, id and add_const flag, |
| 16374 | * otherwise another sync_linked_regs() will be incorrect. |
| 16375 | */ |
| 16376 | reg->off = saved_off; |
| 16377 | reg->subreg_def = saved_subreg_def; |
| 16378 | |
| 16379 | scalar32_min_max_add(reg, &fake_reg); |
| 16380 | scalar_min_max_add(reg, &fake_reg); |
| 16381 | reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); |
| 16382 | } |
| 16383 | } |
| 16384 | } |
| 16385 | |
| 16386 | static int check_cond_jmp_op(struct bpf_verifier_env *env, |
| 16387 | struct bpf_insn *insn, int *insn_idx) |
| 16388 | { |
| 16389 | struct bpf_verifier_state *this_branch = env->cur_state; |
| 16390 | struct bpf_verifier_state *other_branch; |
| 16391 | struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; |
| 16392 | struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; |
| 16393 | struct bpf_reg_state *eq_branch_regs; |
| 16394 | struct linked_regs linked_regs = {}; |
| 16395 | u8 opcode = BPF_OP(insn->code); |
| 16396 | int insn_flags = 0; |
| 16397 | bool is_jmp32; |
| 16398 | int pred = -1; |
| 16399 | int err; |
| 16400 | |
| 16401 | /* Only conditional jumps are expected to reach here. */ |
| 16402 | if (opcode == BPF_JA || opcode > BPF_JCOND) { |
| 16403 | verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); |
| 16404 | return -EINVAL; |
| 16405 | } |
| 16406 | |
| 16407 | if (opcode == BPF_JCOND) { |
| 16408 | struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; |
| 16409 | int idx = *insn_idx; |
| 16410 | |
| 16411 | if (insn->code != (BPF_JMP | BPF_JCOND) || |
| 16412 | insn->src_reg != BPF_MAY_GOTO || |
| 16413 | insn->dst_reg || insn->imm) { |
| 16414 | verbose(env, "invalid may_goto imm %d\n", insn->imm); |
| 16415 | return -EINVAL; |
| 16416 | } |
| 16417 | prev_st = find_prev_entry(env, cur_st->parent, idx); |
| 16418 | |
| 16419 | /* branch out 'fallthrough' insn as a new state to explore */ |
| 16420 | queued_st = push_stack(env, idx + 1, idx, false); |
| 16421 | if (!queued_st) |
| 16422 | return -ENOMEM; |
| 16423 | |
| 16424 | queued_st->may_goto_depth++; |
| 16425 | if (prev_st) |
| 16426 | widen_imprecise_scalars(env, prev_st, queued_st); |
| 16427 | *insn_idx += insn->off; |
| 16428 | return 0; |
| 16429 | } |
| 16430 | |
| 16431 | /* check src2 operand */ |
| 16432 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 16433 | if (err) |
| 16434 | return err; |
| 16435 | |
| 16436 | dst_reg = ®s[insn->dst_reg]; |
| 16437 | if (BPF_SRC(insn->code) == BPF_X) { |
| 16438 | if (insn->imm != 0) { |
| 16439 | verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); |
| 16440 | return -EINVAL; |
| 16441 | } |
| 16442 | |
| 16443 | /* check src1 operand */ |
| 16444 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 16445 | if (err) |
| 16446 | return err; |
| 16447 | |
| 16448 | src_reg = ®s[insn->src_reg]; |
| 16449 | if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) && |
| 16450 | is_pointer_value(env, insn->src_reg)) { |
| 16451 | verbose(env, "R%d pointer comparison prohibited\n", |
| 16452 | insn->src_reg); |
| 16453 | return -EACCES; |
| 16454 | } |
| 16455 | |
| 16456 | if (src_reg->type == PTR_TO_STACK) |
| 16457 | insn_flags |= INSN_F_SRC_REG_STACK; |
| 16458 | if (dst_reg->type == PTR_TO_STACK) |
| 16459 | insn_flags |= INSN_F_DST_REG_STACK; |
| 16460 | } else { |
| 16461 | if (insn->src_reg != BPF_REG_0) { |
| 16462 | verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); |
| 16463 | return -EINVAL; |
| 16464 | } |
| 16465 | src_reg = &env->fake_reg[0]; |
| 16466 | memset(src_reg, 0, sizeof(*src_reg)); |
| 16467 | src_reg->type = SCALAR_VALUE; |
| 16468 | __mark_reg_known(src_reg, insn->imm); |
| 16469 | |
| 16470 | if (dst_reg->type == PTR_TO_STACK) |
| 16471 | insn_flags |= INSN_F_DST_REG_STACK; |
| 16472 | } |
| 16473 | |
| 16474 | if (insn_flags) { |
| 16475 | err = push_insn_history(env, this_branch, insn_flags, 0); |
| 16476 | if (err) |
| 16477 | return err; |
| 16478 | } |
| 16479 | |
| 16480 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; |
| 16481 | pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32); |
| 16482 | if (pred >= 0) { |
| 16483 | /* If we get here with a dst_reg pointer type it is because |
| 16484 | * above is_branch_taken() special cased the 0 comparison. |
| 16485 | */ |
| 16486 | if (!__is_pointer_value(false, dst_reg)) |
| 16487 | err = mark_chain_precision(env, insn->dst_reg); |
| 16488 | if (BPF_SRC(insn->code) == BPF_X && !err && |
| 16489 | !__is_pointer_value(false, src_reg)) |
| 16490 | err = mark_chain_precision(env, insn->src_reg); |
| 16491 | if (err) |
| 16492 | return err; |
| 16493 | } |
| 16494 | |
| 16495 | if (pred == 1) { |
| 16496 | /* Only follow the goto, ignore fall-through. If needed, push |
| 16497 | * the fall-through branch for simulation under speculative |
| 16498 | * execution. |
| 16499 | */ |
| 16500 | if (!env->bypass_spec_v1 && |
| 16501 | !sanitize_speculative_path(env, insn, *insn_idx + 1, |
| 16502 | *insn_idx)) |
| 16503 | return -EFAULT; |
| 16504 | if (env->log.level & BPF_LOG_LEVEL) |
| 16505 | print_insn_state(env, this_branch, this_branch->curframe); |
| 16506 | *insn_idx += insn->off; |
| 16507 | return 0; |
| 16508 | } else if (pred == 0) { |
| 16509 | /* Only follow the fall-through branch, since that's where the |
| 16510 | * program will go. If needed, push the goto branch for |
| 16511 | * simulation under speculative execution. |
| 16512 | */ |
| 16513 | if (!env->bypass_spec_v1 && |
| 16514 | !sanitize_speculative_path(env, insn, |
| 16515 | *insn_idx + insn->off + 1, |
| 16516 | *insn_idx)) |
| 16517 | return -EFAULT; |
| 16518 | if (env->log.level & BPF_LOG_LEVEL) |
| 16519 | print_insn_state(env, this_branch, this_branch->curframe); |
| 16520 | return 0; |
| 16521 | } |
| 16522 | |
| 16523 | /* Push scalar registers sharing same ID to jump history, |
| 16524 | * do this before creating 'other_branch', so that both |
| 16525 | * 'this_branch' and 'other_branch' share this history |
| 16526 | * if parent state is created. |
| 16527 | */ |
| 16528 | if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id) |
| 16529 | collect_linked_regs(this_branch, src_reg->id, &linked_regs); |
| 16530 | if (dst_reg->type == SCALAR_VALUE && dst_reg->id) |
| 16531 | collect_linked_regs(this_branch, dst_reg->id, &linked_regs); |
| 16532 | if (linked_regs.cnt > 1) { |
| 16533 | err = push_insn_history(env, this_branch, 0, linked_regs_pack(&linked_regs)); |
| 16534 | if (err) |
| 16535 | return err; |
| 16536 | } |
| 16537 | |
| 16538 | other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, |
| 16539 | false); |
| 16540 | if (!other_branch) |
| 16541 | return -EFAULT; |
| 16542 | other_branch_regs = other_branch->frame[other_branch->curframe]->regs; |
| 16543 | |
| 16544 | if (BPF_SRC(insn->code) == BPF_X) { |
| 16545 | err = reg_set_min_max(env, |
| 16546 | &other_branch_regs[insn->dst_reg], |
| 16547 | &other_branch_regs[insn->src_reg], |
| 16548 | dst_reg, src_reg, opcode, is_jmp32); |
| 16549 | } else /* BPF_SRC(insn->code) == BPF_K */ { |
| 16550 | /* reg_set_min_max() can mangle the fake_reg. Make a copy |
| 16551 | * so that these are two different memory locations. The |
| 16552 | * src_reg is not used beyond here in context of K. |
| 16553 | */ |
| 16554 | memcpy(&env->fake_reg[1], &env->fake_reg[0], |
| 16555 | sizeof(env->fake_reg[0])); |
| 16556 | err = reg_set_min_max(env, |
| 16557 | &other_branch_regs[insn->dst_reg], |
| 16558 | &env->fake_reg[0], |
| 16559 | dst_reg, &env->fake_reg[1], |
| 16560 | opcode, is_jmp32); |
| 16561 | } |
| 16562 | if (err) |
| 16563 | return err; |
| 16564 | |
| 16565 | if (BPF_SRC(insn->code) == BPF_X && |
| 16566 | src_reg->type == SCALAR_VALUE && src_reg->id && |
| 16567 | !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { |
| 16568 | sync_linked_regs(this_branch, src_reg, &linked_regs); |
| 16569 | sync_linked_regs(other_branch, &other_branch_regs[insn->src_reg], &linked_regs); |
| 16570 | } |
| 16571 | if (dst_reg->type == SCALAR_VALUE && dst_reg->id && |
| 16572 | !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { |
| 16573 | sync_linked_regs(this_branch, dst_reg, &linked_regs); |
| 16574 | sync_linked_regs(other_branch, &other_branch_regs[insn->dst_reg], &linked_regs); |
| 16575 | } |
| 16576 | |
| 16577 | /* if one pointer register is compared to another pointer |
| 16578 | * register check if PTR_MAYBE_NULL could be lifted. |
| 16579 | * E.g. register A - maybe null |
| 16580 | * register B - not null |
| 16581 | * for JNE A, B, ... - A is not null in the false branch; |
| 16582 | * for JEQ A, B, ... - A is not null in the true branch. |
| 16583 | * |
| 16584 | * Since PTR_TO_BTF_ID points to a kernel struct that does |
| 16585 | * not need to be null checked by the BPF program, i.e., |
| 16586 | * could be null even without PTR_MAYBE_NULL marking, so |
| 16587 | * only propagate nullness when neither reg is that type. |
| 16588 | */ |
| 16589 | if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && |
| 16590 | __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) && |
| 16591 | type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && |
| 16592 | base_type(src_reg->type) != PTR_TO_BTF_ID && |
| 16593 | base_type(dst_reg->type) != PTR_TO_BTF_ID) { |
| 16594 | eq_branch_regs = NULL; |
| 16595 | switch (opcode) { |
| 16596 | case BPF_JEQ: |
| 16597 | eq_branch_regs = other_branch_regs; |
| 16598 | break; |
| 16599 | case BPF_JNE: |
| 16600 | eq_branch_regs = regs; |
| 16601 | break; |
| 16602 | default: |
| 16603 | /* do nothing */ |
| 16604 | break; |
| 16605 | } |
| 16606 | if (eq_branch_regs) { |
| 16607 | if (type_may_be_null(src_reg->type)) |
| 16608 | mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); |
| 16609 | else |
| 16610 | mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); |
| 16611 | } |
| 16612 | } |
| 16613 | |
| 16614 | /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). |
| 16615 | * NOTE: these optimizations below are related with pointer comparison |
| 16616 | * which will never be JMP32. |
| 16617 | */ |
| 16618 | if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && |
| 16619 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
| 16620 | type_may_be_null(dst_reg->type)) { |
| 16621 | /* Mark all identical registers in each branch as either |
| 16622 | * safe or unknown depending R == 0 or R != 0 conditional. |
| 16623 | */ |
| 16624 | mark_ptr_or_null_regs(this_branch, insn->dst_reg, |
| 16625 | opcode == BPF_JNE); |
| 16626 | mark_ptr_or_null_regs(other_branch, insn->dst_reg, |
| 16627 | opcode == BPF_JEQ); |
| 16628 | } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], |
| 16629 | this_branch, other_branch) && |
| 16630 | is_pointer_value(env, insn->dst_reg)) { |
| 16631 | verbose(env, "R%d pointer comparison prohibited\n", |
| 16632 | insn->dst_reg); |
| 16633 | return -EACCES; |
| 16634 | } |
| 16635 | if (env->log.level & BPF_LOG_LEVEL) |
| 16636 | print_insn_state(env, this_branch, this_branch->curframe); |
| 16637 | return 0; |
| 16638 | } |
| 16639 | |
| 16640 | /* verify BPF_LD_IMM64 instruction */ |
| 16641 | static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 16642 | { |
| 16643 | struct bpf_insn_aux_data *aux = cur_aux(env); |
| 16644 | struct bpf_reg_state *regs = cur_regs(env); |
| 16645 | struct bpf_reg_state *dst_reg; |
| 16646 | struct bpf_map *map; |
| 16647 | int err; |
| 16648 | |
| 16649 | if (BPF_SIZE(insn->code) != BPF_DW) { |
| 16650 | verbose(env, "invalid BPF_LD_IMM insn\n"); |
| 16651 | return -EINVAL; |
| 16652 | } |
| 16653 | if (insn->off != 0) { |
| 16654 | verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); |
| 16655 | return -EINVAL; |
| 16656 | } |
| 16657 | |
| 16658 | err = check_reg_arg(env, insn->dst_reg, DST_OP); |
| 16659 | if (err) |
| 16660 | return err; |
| 16661 | |
| 16662 | dst_reg = ®s[insn->dst_reg]; |
| 16663 | if (insn->src_reg == 0) { |
| 16664 | u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; |
| 16665 | |
| 16666 | dst_reg->type = SCALAR_VALUE; |
| 16667 | __mark_reg_known(®s[insn->dst_reg], imm); |
| 16668 | return 0; |
| 16669 | } |
| 16670 | |
| 16671 | /* All special src_reg cases are listed below. From this point onwards |
| 16672 | * we either succeed and assign a corresponding dst_reg->type after |
| 16673 | * zeroing the offset, or fail and reject the program. |
| 16674 | */ |
| 16675 | mark_reg_known_zero(env, regs, insn->dst_reg); |
| 16676 | |
| 16677 | if (insn->src_reg == BPF_PSEUDO_BTF_ID) { |
| 16678 | dst_reg->type = aux->btf_var.reg_type; |
| 16679 | switch (base_type(dst_reg->type)) { |
| 16680 | case PTR_TO_MEM: |
| 16681 | dst_reg->mem_size = aux->btf_var.mem_size; |
| 16682 | break; |
| 16683 | case PTR_TO_BTF_ID: |
| 16684 | dst_reg->btf = aux->btf_var.btf; |
| 16685 | dst_reg->btf_id = aux->btf_var.btf_id; |
| 16686 | break; |
| 16687 | default: |
| 16688 | verbose(env, "bpf verifier is misconfigured\n"); |
| 16689 | return -EFAULT; |
| 16690 | } |
| 16691 | return 0; |
| 16692 | } |
| 16693 | |
| 16694 | if (insn->src_reg == BPF_PSEUDO_FUNC) { |
| 16695 | struct bpf_prog_aux *aux = env->prog->aux; |
| 16696 | u32 subprogno = find_subprog(env, |
| 16697 | env->insn_idx + insn->imm + 1); |
| 16698 | |
| 16699 | if (!aux->func_info) { |
| 16700 | verbose(env, "missing btf func_info\n"); |
| 16701 | return -EINVAL; |
| 16702 | } |
| 16703 | if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { |
| 16704 | verbose(env, "callback function not static\n"); |
| 16705 | return -EINVAL; |
| 16706 | } |
| 16707 | |
| 16708 | dst_reg->type = PTR_TO_FUNC; |
| 16709 | dst_reg->subprogno = subprogno; |
| 16710 | return 0; |
| 16711 | } |
| 16712 | |
| 16713 | map = env->used_maps[aux->map_index]; |
| 16714 | dst_reg->map_ptr = map; |
| 16715 | |
| 16716 | if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || |
| 16717 | insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { |
| 16718 | if (map->map_type == BPF_MAP_TYPE_ARENA) { |
| 16719 | __mark_reg_unknown(env, dst_reg); |
| 16720 | return 0; |
| 16721 | } |
| 16722 | dst_reg->type = PTR_TO_MAP_VALUE; |
| 16723 | dst_reg->off = aux->map_off; |
| 16724 | WARN_ON_ONCE(map->max_entries != 1); |
| 16725 | /* We want reg->id to be same (0) as map_value is not distinct */ |
| 16726 | } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || |
| 16727 | insn->src_reg == BPF_PSEUDO_MAP_IDX) { |
| 16728 | dst_reg->type = CONST_PTR_TO_MAP; |
| 16729 | } else { |
| 16730 | verbose(env, "bpf verifier is misconfigured\n"); |
| 16731 | return -EINVAL; |
| 16732 | } |
| 16733 | |
| 16734 | return 0; |
| 16735 | } |
| 16736 | |
| 16737 | static bool may_access_skb(enum bpf_prog_type type) |
| 16738 | { |
| 16739 | switch (type) { |
| 16740 | case BPF_PROG_TYPE_SOCKET_FILTER: |
| 16741 | case BPF_PROG_TYPE_SCHED_CLS: |
| 16742 | case BPF_PROG_TYPE_SCHED_ACT: |
| 16743 | return true; |
| 16744 | default: |
| 16745 | return false; |
| 16746 | } |
| 16747 | } |
| 16748 | |
| 16749 | /* verify safety of LD_ABS|LD_IND instructions: |
| 16750 | * - they can only appear in the programs where ctx == skb |
| 16751 | * - since they are wrappers of function calls, they scratch R1-R5 registers, |
| 16752 | * preserve R6-R9, and store return value into R0 |
| 16753 | * |
| 16754 | * Implicit input: |
| 16755 | * ctx == skb == R6 == CTX |
| 16756 | * |
| 16757 | * Explicit input: |
| 16758 | * SRC == any register |
| 16759 | * IMM == 32-bit immediate |
| 16760 | * |
| 16761 | * Output: |
| 16762 | * R0 - 8/16/32-bit skb data converted to cpu endianness |
| 16763 | */ |
| 16764 | static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) |
| 16765 | { |
| 16766 | struct bpf_reg_state *regs = cur_regs(env); |
| 16767 | static const int ctx_reg = BPF_REG_6; |
| 16768 | u8 mode = BPF_MODE(insn->code); |
| 16769 | int i, err; |
| 16770 | |
| 16771 | if (!may_access_skb(resolve_prog_type(env->prog))) { |
| 16772 | verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); |
| 16773 | return -EINVAL; |
| 16774 | } |
| 16775 | |
| 16776 | if (!env->ops->gen_ld_abs) { |
| 16777 | verbose(env, "bpf verifier is misconfigured\n"); |
| 16778 | return -EINVAL; |
| 16779 | } |
| 16780 | |
| 16781 | if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || |
| 16782 | BPF_SIZE(insn->code) == BPF_DW || |
| 16783 | (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { |
| 16784 | verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); |
| 16785 | return -EINVAL; |
| 16786 | } |
| 16787 | |
| 16788 | /* check whether implicit source operand (register R6) is readable */ |
| 16789 | err = check_reg_arg(env, ctx_reg, SRC_OP); |
| 16790 | if (err) |
| 16791 | return err; |
| 16792 | |
| 16793 | /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as |
| 16794 | * gen_ld_abs() may terminate the program at runtime, leading to |
| 16795 | * reference leak. |
| 16796 | */ |
| 16797 | err = check_resource_leak(env, false, true, "BPF_LD_[ABS|IND]"); |
| 16798 | if (err) |
| 16799 | return err; |
| 16800 | |
| 16801 | if (regs[ctx_reg].type != PTR_TO_CTX) { |
| 16802 | verbose(env, |
| 16803 | "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); |
| 16804 | return -EINVAL; |
| 16805 | } |
| 16806 | |
| 16807 | if (mode == BPF_IND) { |
| 16808 | /* check explicit source operand */ |
| 16809 | err = check_reg_arg(env, insn->src_reg, SRC_OP); |
| 16810 | if (err) |
| 16811 | return err; |
| 16812 | } |
| 16813 | |
| 16814 | err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); |
| 16815 | if (err < 0) |
| 16816 | return err; |
| 16817 | |
| 16818 | /* reset caller saved regs to unreadable */ |
| 16819 | for (i = 0; i < CALLER_SAVED_REGS; i++) { |
| 16820 | mark_reg_not_init(env, regs, caller_saved[i]); |
| 16821 | check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); |
| 16822 | } |
| 16823 | |
| 16824 | /* mark destination R0 register as readable, since it contains |
| 16825 | * the value fetched from the packet. |
| 16826 | * Already marked as written above. |
| 16827 | */ |
| 16828 | mark_reg_unknown(env, regs, BPF_REG_0); |
| 16829 | /* ld_abs load up to 32-bit skb data. */ |
| 16830 | regs[BPF_REG_0].subreg_def = env->insn_idx + 1; |
| 16831 | return 0; |
| 16832 | } |
| 16833 | |
| 16834 | static int check_return_code(struct bpf_verifier_env *env, int regno, const char *reg_name) |
| 16835 | { |
| 16836 | const char *exit_ctx = "At program exit"; |
| 16837 | struct tnum enforce_attach_type_range = tnum_unknown; |
| 16838 | const struct bpf_prog *prog = env->prog; |
| 16839 | struct bpf_reg_state *reg = reg_state(env, regno); |
| 16840 | struct bpf_retval_range range = retval_range(0, 1); |
| 16841 | enum bpf_prog_type prog_type = resolve_prog_type(env->prog); |
| 16842 | int err; |
| 16843 | struct bpf_func_state *frame = env->cur_state->frame[0]; |
| 16844 | const bool is_subprog = frame->subprogno; |
| 16845 | bool return_32bit = false; |
| 16846 | const struct btf_type *reg_type, *ret_type = NULL; |
| 16847 | |
| 16848 | /* LSM and struct_ops func-ptr's return type could be "void" */ |
| 16849 | if (!is_subprog || frame->in_exception_callback_fn) { |
| 16850 | switch (prog_type) { |
| 16851 | case BPF_PROG_TYPE_LSM: |
| 16852 | if (prog->expected_attach_type == BPF_LSM_CGROUP) |
| 16853 | /* See below, can be 0 or 0-1 depending on hook. */ |
| 16854 | break; |
| 16855 | if (!prog->aux->attach_func_proto->type) |
| 16856 | return 0; |
| 16857 | break; |
| 16858 | case BPF_PROG_TYPE_STRUCT_OPS: |
| 16859 | if (!prog->aux->attach_func_proto->type) |
| 16860 | return 0; |
| 16861 | |
| 16862 | if (frame->in_exception_callback_fn) |
| 16863 | break; |
| 16864 | |
| 16865 | /* Allow a struct_ops program to return a referenced kptr if it |
| 16866 | * matches the operator's return type and is in its unmodified |
| 16867 | * form. A scalar zero (i.e., a null pointer) is also allowed. |
| 16868 | */ |
| 16869 | reg_type = reg->btf ? btf_type_by_id(reg->btf, reg->btf_id) : NULL; |
| 16870 | ret_type = btf_type_resolve_ptr(prog->aux->attach_btf, |
| 16871 | prog->aux->attach_func_proto->type, |
| 16872 | NULL); |
| 16873 | if (ret_type && ret_type == reg_type && reg->ref_obj_id) |
| 16874 | return __check_ptr_off_reg(env, reg, regno, false); |
| 16875 | break; |
| 16876 | default: |
| 16877 | break; |
| 16878 | } |
| 16879 | } |
| 16880 | |
| 16881 | /* eBPF calling convention is such that R0 is used |
| 16882 | * to return the value from eBPF program. |
| 16883 | * Make sure that it's readable at this time |
| 16884 | * of bpf_exit, which means that program wrote |
| 16885 | * something into it earlier |
| 16886 | */ |
| 16887 | err = check_reg_arg(env, regno, SRC_OP); |
| 16888 | if (err) |
| 16889 | return err; |
| 16890 | |
| 16891 | if (is_pointer_value(env, regno)) { |
| 16892 | verbose(env, "R%d leaks addr as return value\n", regno); |
| 16893 | return -EACCES; |
| 16894 | } |
| 16895 | |
| 16896 | if (frame->in_async_callback_fn) { |
| 16897 | /* enforce return zero from async callbacks like timer */ |
| 16898 | exit_ctx = "At async callback return"; |
| 16899 | range = retval_range(0, 0); |
| 16900 | goto enforce_retval; |
| 16901 | } |
| 16902 | |
| 16903 | if (is_subprog && !frame->in_exception_callback_fn) { |
| 16904 | if (reg->type != SCALAR_VALUE) { |
| 16905 | verbose(env, "At subprogram exit the register R%d is not a scalar value (%s)\n", |
| 16906 | regno, reg_type_str(env, reg->type)); |
| 16907 | return -EINVAL; |
| 16908 | } |
| 16909 | return 0; |
| 16910 | } |
| 16911 | |
| 16912 | switch (prog_type) { |
| 16913 | case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: |
| 16914 | if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || |
| 16915 | env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || |
| 16916 | env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || |
| 16917 | env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || |
| 16918 | env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || |
| 16919 | env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || |
| 16920 | env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || |
| 16921 | env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || |
| 16922 | env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) |
| 16923 | range = retval_range(1, 1); |
| 16924 | if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || |
| 16925 | env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) |
| 16926 | range = retval_range(0, 3); |
| 16927 | break; |
| 16928 | case BPF_PROG_TYPE_CGROUP_SKB: |
| 16929 | if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { |
| 16930 | range = retval_range(0, 3); |
| 16931 | enforce_attach_type_range = tnum_range(2, 3); |
| 16932 | } |
| 16933 | break; |
| 16934 | case BPF_PROG_TYPE_CGROUP_SOCK: |
| 16935 | case BPF_PROG_TYPE_SOCK_OPS: |
| 16936 | case BPF_PROG_TYPE_CGROUP_DEVICE: |
| 16937 | case BPF_PROG_TYPE_CGROUP_SYSCTL: |
| 16938 | case BPF_PROG_TYPE_CGROUP_SOCKOPT: |
| 16939 | break; |
| 16940 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
| 16941 | if (!env->prog->aux->attach_btf_id) |
| 16942 | return 0; |
| 16943 | range = retval_range(0, 0); |
| 16944 | break; |
| 16945 | case BPF_PROG_TYPE_TRACING: |
| 16946 | switch (env->prog->expected_attach_type) { |
| 16947 | case BPF_TRACE_FENTRY: |
| 16948 | case BPF_TRACE_FEXIT: |
| 16949 | range = retval_range(0, 0); |
| 16950 | break; |
| 16951 | case BPF_TRACE_RAW_TP: |
| 16952 | case BPF_MODIFY_RETURN: |
| 16953 | return 0; |
| 16954 | case BPF_TRACE_ITER: |
| 16955 | break; |
| 16956 | default: |
| 16957 | return -ENOTSUPP; |
| 16958 | } |
| 16959 | break; |
| 16960 | case BPF_PROG_TYPE_KPROBE: |
| 16961 | switch (env->prog->expected_attach_type) { |
| 16962 | case BPF_TRACE_KPROBE_SESSION: |
| 16963 | case BPF_TRACE_UPROBE_SESSION: |
| 16964 | range = retval_range(0, 1); |
| 16965 | break; |
| 16966 | default: |
| 16967 | return 0; |
| 16968 | } |
| 16969 | break; |
| 16970 | case BPF_PROG_TYPE_SK_LOOKUP: |
| 16971 | range = retval_range(SK_DROP, SK_PASS); |
| 16972 | break; |
| 16973 | |
| 16974 | case BPF_PROG_TYPE_LSM: |
| 16975 | if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { |
| 16976 | /* no range found, any return value is allowed */ |
| 16977 | if (!get_func_retval_range(env->prog, &range)) |
| 16978 | return 0; |
| 16979 | /* no restricted range, any return value is allowed */ |
| 16980 | if (range.minval == S32_MIN && range.maxval == S32_MAX) |
| 16981 | return 0; |
| 16982 | return_32bit = true; |
| 16983 | } else if (!env->prog->aux->attach_func_proto->type) { |
| 16984 | /* Make sure programs that attach to void |
| 16985 | * hooks don't try to modify return value. |
| 16986 | */ |
| 16987 | range = retval_range(1, 1); |
| 16988 | } |
| 16989 | break; |
| 16990 | |
| 16991 | case BPF_PROG_TYPE_NETFILTER: |
| 16992 | range = retval_range(NF_DROP, NF_ACCEPT); |
| 16993 | break; |
| 16994 | case BPF_PROG_TYPE_STRUCT_OPS: |
| 16995 | if (!ret_type) |
| 16996 | return 0; |
| 16997 | range = retval_range(0, 0); |
| 16998 | break; |
| 16999 | case BPF_PROG_TYPE_EXT: |
| 17000 | /* freplace program can return anything as its return value |
| 17001 | * depends on the to-be-replaced kernel func or bpf program. |
| 17002 | */ |
| 17003 | default: |
| 17004 | return 0; |
| 17005 | } |
| 17006 | |
| 17007 | enforce_retval: |
| 17008 | if (reg->type != SCALAR_VALUE) { |
| 17009 | verbose(env, "%s the register R%d is not a known value (%s)\n", |
| 17010 | exit_ctx, regno, reg_type_str(env, reg->type)); |
| 17011 | return -EINVAL; |
| 17012 | } |
| 17013 | |
| 17014 | err = mark_chain_precision(env, regno); |
| 17015 | if (err) |
| 17016 | return err; |
| 17017 | |
| 17018 | if (!retval_range_within(range, reg, return_32bit)) { |
| 17019 | verbose_invalid_scalar(env, reg, range, exit_ctx, reg_name); |
| 17020 | if (!is_subprog && |
| 17021 | prog->expected_attach_type == BPF_LSM_CGROUP && |
| 17022 | prog_type == BPF_PROG_TYPE_LSM && |
| 17023 | !prog->aux->attach_func_proto->type) |
| 17024 | verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); |
| 17025 | return -EINVAL; |
| 17026 | } |
| 17027 | |
| 17028 | if (!tnum_is_unknown(enforce_attach_type_range) && |
| 17029 | tnum_in(enforce_attach_type_range, reg->var_off)) |
| 17030 | env->prog->enforce_expected_attach_type = 1; |
| 17031 | return 0; |
| 17032 | } |
| 17033 | |
| 17034 | static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off) |
| 17035 | { |
| 17036 | struct bpf_subprog_info *subprog; |
| 17037 | |
| 17038 | subprog = find_containing_subprog(env, off); |
| 17039 | subprog->changes_pkt_data = true; |
| 17040 | } |
| 17041 | |
| 17042 | static void mark_subprog_might_sleep(struct bpf_verifier_env *env, int off) |
| 17043 | { |
| 17044 | struct bpf_subprog_info *subprog; |
| 17045 | |
| 17046 | subprog = find_containing_subprog(env, off); |
| 17047 | subprog->might_sleep = true; |
| 17048 | } |
| 17049 | |
| 17050 | /* 't' is an index of a call-site. |
| 17051 | * 'w' is a callee entry point. |
| 17052 | * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED. |
| 17053 | * Rely on DFS traversal order and absence of recursive calls to guarantee that |
| 17054 | * callee's change_pkt_data marks would be correct at that moment. |
| 17055 | */ |
| 17056 | static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w) |
| 17057 | { |
| 17058 | struct bpf_subprog_info *caller, *callee; |
| 17059 | |
| 17060 | caller = find_containing_subprog(env, t); |
| 17061 | callee = find_containing_subprog(env, w); |
| 17062 | caller->changes_pkt_data |= callee->changes_pkt_data; |
| 17063 | caller->might_sleep |= callee->might_sleep; |
| 17064 | } |
| 17065 | |
| 17066 | /* non-recursive DFS pseudo code |
| 17067 | * 1 procedure DFS-iterative(G,v): |
| 17068 | * 2 label v as discovered |
| 17069 | * 3 let S be a stack |
| 17070 | * 4 S.push(v) |
| 17071 | * 5 while S is not empty |
| 17072 | * 6 t <- S.peek() |
| 17073 | * 7 if t is what we're looking for: |
| 17074 | * 8 return t |
| 17075 | * 9 for all edges e in G.adjacentEdges(t) do |
| 17076 | * 10 if edge e is already labelled |
| 17077 | * 11 continue with the next edge |
| 17078 | * 12 w <- G.adjacentVertex(t,e) |
| 17079 | * 13 if vertex w is not discovered and not explored |
| 17080 | * 14 label e as tree-edge |
| 17081 | * 15 label w as discovered |
| 17082 | * 16 S.push(w) |
| 17083 | * 17 continue at 5 |
| 17084 | * 18 else if vertex w is discovered |
| 17085 | * 19 label e as back-edge |
| 17086 | * 20 else |
| 17087 | * 21 // vertex w is explored |
| 17088 | * 22 label e as forward- or cross-edge |
| 17089 | * 23 label t as explored |
| 17090 | * 24 S.pop() |
| 17091 | * |
| 17092 | * convention: |
| 17093 | * 0x10 - discovered |
| 17094 | * 0x11 - discovered and fall-through edge labelled |
| 17095 | * 0x12 - discovered and fall-through and branch edges labelled |
| 17096 | * 0x20 - explored |
| 17097 | */ |
| 17098 | |
| 17099 | enum { |
| 17100 | DISCOVERED = 0x10, |
| 17101 | EXPLORED = 0x20, |
| 17102 | FALLTHROUGH = 1, |
| 17103 | BRANCH = 2, |
| 17104 | }; |
| 17105 | |
| 17106 | static void mark_prune_point(struct bpf_verifier_env *env, int idx) |
| 17107 | { |
| 17108 | env->insn_aux_data[idx].prune_point = true; |
| 17109 | } |
| 17110 | |
| 17111 | static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) |
| 17112 | { |
| 17113 | return env->insn_aux_data[insn_idx].prune_point; |
| 17114 | } |
| 17115 | |
| 17116 | static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) |
| 17117 | { |
| 17118 | env->insn_aux_data[idx].force_checkpoint = true; |
| 17119 | } |
| 17120 | |
| 17121 | static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) |
| 17122 | { |
| 17123 | return env->insn_aux_data[insn_idx].force_checkpoint; |
| 17124 | } |
| 17125 | |
| 17126 | static void mark_calls_callback(struct bpf_verifier_env *env, int idx) |
| 17127 | { |
| 17128 | env->insn_aux_data[idx].calls_callback = true; |
| 17129 | } |
| 17130 | |
| 17131 | static bool calls_callback(struct bpf_verifier_env *env, int insn_idx) |
| 17132 | { |
| 17133 | return env->insn_aux_data[insn_idx].calls_callback; |
| 17134 | } |
| 17135 | |
| 17136 | enum { |
| 17137 | DONE_EXPLORING = 0, |
| 17138 | KEEP_EXPLORING = 1, |
| 17139 | }; |
| 17140 | |
| 17141 | /* t, w, e - match pseudo-code above: |
| 17142 | * t - index of current instruction |
| 17143 | * w - next instruction |
| 17144 | * e - edge |
| 17145 | */ |
| 17146 | static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) |
| 17147 | { |
| 17148 | int *insn_stack = env->cfg.insn_stack; |
| 17149 | int *insn_state = env->cfg.insn_state; |
| 17150 | |
| 17151 | if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) |
| 17152 | return DONE_EXPLORING; |
| 17153 | |
| 17154 | if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) |
| 17155 | return DONE_EXPLORING; |
| 17156 | |
| 17157 | if (w < 0 || w >= env->prog->len) { |
| 17158 | verbose_linfo(env, t, "%d: ", t); |
| 17159 | verbose(env, "jump out of range from insn %d to %d\n", t, w); |
| 17160 | return -EINVAL; |
| 17161 | } |
| 17162 | |
| 17163 | if (e == BRANCH) { |
| 17164 | /* mark branch target for state pruning */ |
| 17165 | mark_prune_point(env, w); |
| 17166 | mark_jmp_point(env, w); |
| 17167 | } |
| 17168 | |
| 17169 | if (insn_state[w] == 0) { |
| 17170 | /* tree-edge */ |
| 17171 | insn_state[t] = DISCOVERED | e; |
| 17172 | insn_state[w] = DISCOVERED; |
| 17173 | if (env->cfg.cur_stack >= env->prog->len) |
| 17174 | return -E2BIG; |
| 17175 | insn_stack[env->cfg.cur_stack++] = w; |
| 17176 | return KEEP_EXPLORING; |
| 17177 | } else if ((insn_state[w] & 0xF0) == DISCOVERED) { |
| 17178 | if (env->bpf_capable) |
| 17179 | return DONE_EXPLORING; |
| 17180 | verbose_linfo(env, t, "%d: ", t); |
| 17181 | verbose_linfo(env, w, "%d: ", w); |
| 17182 | verbose(env, "back-edge from insn %d to %d\n", t, w); |
| 17183 | return -EINVAL; |
| 17184 | } else if (insn_state[w] == EXPLORED) { |
| 17185 | /* forward- or cross-edge */ |
| 17186 | insn_state[t] = DISCOVERED | e; |
| 17187 | } else { |
| 17188 | verbose(env, "insn state internal bug\n"); |
| 17189 | return -EFAULT; |
| 17190 | } |
| 17191 | return DONE_EXPLORING; |
| 17192 | } |
| 17193 | |
| 17194 | static int visit_func_call_insn(int t, struct bpf_insn *insns, |
| 17195 | struct bpf_verifier_env *env, |
| 17196 | bool visit_callee) |
| 17197 | { |
| 17198 | int ret, insn_sz; |
| 17199 | int w; |
| 17200 | |
| 17201 | insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; |
| 17202 | ret = push_insn(t, t + insn_sz, FALLTHROUGH, env); |
| 17203 | if (ret) |
| 17204 | return ret; |
| 17205 | |
| 17206 | mark_prune_point(env, t + insn_sz); |
| 17207 | /* when we exit from subprog, we need to record non-linear history */ |
| 17208 | mark_jmp_point(env, t + insn_sz); |
| 17209 | |
| 17210 | if (visit_callee) { |
| 17211 | w = t + insns[t].imm + 1; |
| 17212 | mark_prune_point(env, t); |
| 17213 | merge_callee_effects(env, t, w); |
| 17214 | ret = push_insn(t, w, BRANCH, env); |
| 17215 | } |
| 17216 | return ret; |
| 17217 | } |
| 17218 | |
| 17219 | /* Bitmask with 1s for all caller saved registers */ |
| 17220 | #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1) |
| 17221 | |
| 17222 | /* True if do_misc_fixups() replaces calls to helper number 'imm', |
| 17223 | * replacement patch is presumed to follow bpf_fastcall contract |
| 17224 | * (see mark_fastcall_pattern_for_call() below). |
| 17225 | */ |
| 17226 | static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm) |
| 17227 | { |
| 17228 | switch (imm) { |
| 17229 | #ifdef CONFIG_X86_64 |
| 17230 | case BPF_FUNC_get_smp_processor_id: |
| 17231 | return env->prog->jit_requested && bpf_jit_supports_percpu_insn(); |
| 17232 | #endif |
| 17233 | default: |
| 17234 | return false; |
| 17235 | } |
| 17236 | } |
| 17237 | |
| 17238 | struct call_summary { |
| 17239 | u8 num_params; |
| 17240 | bool is_void; |
| 17241 | bool fastcall; |
| 17242 | }; |
| 17243 | |
| 17244 | /* If @call is a kfunc or helper call, fills @cs and returns true, |
| 17245 | * otherwise returns false. |
| 17246 | */ |
| 17247 | static bool get_call_summary(struct bpf_verifier_env *env, struct bpf_insn *call, |
| 17248 | struct call_summary *cs) |
| 17249 | { |
| 17250 | struct bpf_kfunc_call_arg_meta meta; |
| 17251 | const struct bpf_func_proto *fn; |
| 17252 | int i; |
| 17253 | |
| 17254 | if (bpf_helper_call(call)) { |
| 17255 | |
| 17256 | if (get_helper_proto(env, call->imm, &fn) < 0) |
| 17257 | /* error would be reported later */ |
| 17258 | return false; |
| 17259 | cs->fastcall = fn->allow_fastcall && |
| 17260 | (verifier_inlines_helper_call(env, call->imm) || |
| 17261 | bpf_jit_inlines_helper_call(call->imm)); |
| 17262 | cs->is_void = fn->ret_type == RET_VOID; |
| 17263 | cs->num_params = 0; |
| 17264 | for (i = 0; i < ARRAY_SIZE(fn->arg_type); ++i) { |
| 17265 | if (fn->arg_type[i] == ARG_DONTCARE) |
| 17266 | break; |
| 17267 | cs->num_params++; |
| 17268 | } |
| 17269 | return true; |
| 17270 | } |
| 17271 | |
| 17272 | if (bpf_pseudo_kfunc_call(call)) { |
| 17273 | int err; |
| 17274 | |
| 17275 | err = fetch_kfunc_meta(env, call, &meta, NULL); |
| 17276 | if (err < 0) |
| 17277 | /* error would be reported later */ |
| 17278 | return false; |
| 17279 | cs->num_params = btf_type_vlen(meta.func_proto); |
| 17280 | cs->fastcall = meta.kfunc_flags & KF_FASTCALL; |
| 17281 | cs->is_void = btf_type_is_void(btf_type_by_id(meta.btf, meta.func_proto->type)); |
| 17282 | return true; |
| 17283 | } |
| 17284 | |
| 17285 | return false; |
| 17286 | } |
| 17287 | |
| 17288 | /* LLVM define a bpf_fastcall function attribute. |
| 17289 | * This attribute means that function scratches only some of |
| 17290 | * the caller saved registers defined by ABI. |
| 17291 | * For BPF the set of such registers could be defined as follows: |
| 17292 | * - R0 is scratched only if function is non-void; |
| 17293 | * - R1-R5 are scratched only if corresponding parameter type is defined |
| 17294 | * in the function prototype. |
| 17295 | * |
| 17296 | * The contract between kernel and clang allows to simultaneously use |
| 17297 | * such functions and maintain backwards compatibility with old |
| 17298 | * kernels that don't understand bpf_fastcall calls: |
| 17299 | * |
| 17300 | * - for bpf_fastcall calls clang allocates registers as-if relevant r0-r5 |
| 17301 | * registers are not scratched by the call; |
| 17302 | * |
| 17303 | * - as a post-processing step, clang visits each bpf_fastcall call and adds |
| 17304 | * spill/fill for every live r0-r5; |
| 17305 | * |
| 17306 | * - stack offsets used for the spill/fill are allocated as lowest |
| 17307 | * stack offsets in whole function and are not used for any other |
| 17308 | * purposes; |
| 17309 | * |
| 17310 | * - when kernel loads a program, it looks for such patterns |
| 17311 | * (bpf_fastcall function surrounded by spills/fills) and checks if |
| 17312 | * spill/fill stack offsets are used exclusively in fastcall patterns; |
| 17313 | * |
| 17314 | * - if so, and if verifier or current JIT inlines the call to the |
| 17315 | * bpf_fastcall function (e.g. a helper call), kernel removes unnecessary |
| 17316 | * spill/fill pairs; |
| 17317 | * |
| 17318 | * - when old kernel loads a program, presence of spill/fill pairs |
| 17319 | * keeps BPF program valid, albeit slightly less efficient. |
| 17320 | * |
| 17321 | * For example: |
| 17322 | * |
| 17323 | * r1 = 1; |
| 17324 | * r2 = 2; |
| 17325 | * *(u64 *)(r10 - 8) = r1; r1 = 1; |
| 17326 | * *(u64 *)(r10 - 16) = r2; r2 = 2; |
| 17327 | * call %[to_be_inlined] --> call %[to_be_inlined] |
| 17328 | * r2 = *(u64 *)(r10 - 16); r0 = r1; |
| 17329 | * r1 = *(u64 *)(r10 - 8); r0 += r2; |
| 17330 | * r0 = r1; exit; |
| 17331 | * r0 += r2; |
| 17332 | * exit; |
| 17333 | * |
| 17334 | * The purpose of mark_fastcall_pattern_for_call is to: |
| 17335 | * - look for such patterns; |
| 17336 | * - mark spill and fill instructions in env->insn_aux_data[*].fastcall_pattern; |
| 17337 | * - mark set env->insn_aux_data[*].fastcall_spills_num for call instruction; |
| 17338 | * - update env->subprog_info[*]->fastcall_stack_off to find an offset |
| 17339 | * at which bpf_fastcall spill/fill stack slots start; |
| 17340 | * - update env->subprog_info[*]->keep_fastcall_stack. |
| 17341 | * |
| 17342 | * The .fastcall_pattern and .fastcall_stack_off are used by |
| 17343 | * check_fastcall_stack_contract() to check if every stack access to |
| 17344 | * fastcall spill/fill stack slot originates from spill/fill |
| 17345 | * instructions, members of fastcall patterns. |
| 17346 | * |
| 17347 | * If such condition holds true for a subprogram, fastcall patterns could |
| 17348 | * be rewritten by remove_fastcall_spills_fills(). |
| 17349 | * Otherwise bpf_fastcall patterns are not changed in the subprogram |
| 17350 | * (code, presumably, generated by an older clang version). |
| 17351 | * |
| 17352 | * For example, it is *not* safe to remove spill/fill below: |
| 17353 | * |
| 17354 | * r1 = 1; |
| 17355 | * *(u64 *)(r10 - 8) = r1; r1 = 1; |
| 17356 | * call %[to_be_inlined] --> call %[to_be_inlined] |
| 17357 | * r1 = *(u64 *)(r10 - 8); r0 = *(u64 *)(r10 - 8); <---- wrong !!! |
| 17358 | * r0 = *(u64 *)(r10 - 8); r0 += r1; |
| 17359 | * r0 += r1; exit; |
| 17360 | * exit; |
| 17361 | */ |
| 17362 | static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env, |
| 17363 | struct bpf_subprog_info *subprog, |
| 17364 | int insn_idx, s16 lowest_off) |
| 17365 | { |
| 17366 | struct bpf_insn *insns = env->prog->insnsi, *stx, *ldx; |
| 17367 | struct bpf_insn *call = &env->prog->insnsi[insn_idx]; |
| 17368 | u32 clobbered_regs_mask; |
| 17369 | struct call_summary cs; |
| 17370 | u32 expected_regs_mask; |
| 17371 | s16 off; |
| 17372 | int i; |
| 17373 | |
| 17374 | if (!get_call_summary(env, call, &cs)) |
| 17375 | return; |
| 17376 | |
| 17377 | /* A bitmask specifying which caller saved registers are clobbered |
| 17378 | * by a call to a helper/kfunc *as if* this helper/kfunc follows |
| 17379 | * bpf_fastcall contract: |
| 17380 | * - includes R0 if function is non-void; |
| 17381 | * - includes R1-R5 if corresponding parameter has is described |
| 17382 | * in the function prototype. |
| 17383 | */ |
| 17384 | clobbered_regs_mask = GENMASK(cs.num_params, cs.is_void ? 1 : 0); |
| 17385 | /* e.g. if helper call clobbers r{0,1}, expect r{2,3,4,5} in the pattern */ |
| 17386 | expected_regs_mask = ~clobbered_regs_mask & ALL_CALLER_SAVED_REGS; |
| 17387 | |
| 17388 | /* match pairs of form: |
| 17389 | * |
| 17390 | * *(u64 *)(r10 - Y) = rX (where Y % 8 == 0) |
| 17391 | * ... |
| 17392 | * call %[to_be_inlined] |
| 17393 | * ... |
| 17394 | * rX = *(u64 *)(r10 - Y) |
| 17395 | */ |
| 17396 | for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) { |
| 17397 | if (insn_idx - i < 0 || insn_idx + i >= env->prog->len) |
| 17398 | break; |
| 17399 | stx = &insns[insn_idx - i]; |
| 17400 | ldx = &insns[insn_idx + i]; |
| 17401 | /* must be a stack spill/fill pair */ |
| 17402 | if (stx->code != (BPF_STX | BPF_MEM | BPF_DW) || |
| 17403 | ldx->code != (BPF_LDX | BPF_MEM | BPF_DW) || |
| 17404 | stx->dst_reg != BPF_REG_10 || |
| 17405 | ldx->src_reg != BPF_REG_10) |
| 17406 | break; |
| 17407 | /* must be a spill/fill for the same reg */ |
| 17408 | if (stx->src_reg != ldx->dst_reg) |
| 17409 | break; |
| 17410 | /* must be one of the previously unseen registers */ |
| 17411 | if ((BIT(stx->src_reg) & expected_regs_mask) == 0) |
| 17412 | break; |
| 17413 | /* must be a spill/fill for the same expected offset, |
| 17414 | * no need to check offset alignment, BPF_DW stack access |
| 17415 | * is always 8-byte aligned. |
| 17416 | */ |
| 17417 | if (stx->off != off || ldx->off != off) |
| 17418 | break; |
| 17419 | expected_regs_mask &= ~BIT(stx->src_reg); |
| 17420 | env->insn_aux_data[insn_idx - i].fastcall_pattern = 1; |
| 17421 | env->insn_aux_data[insn_idx + i].fastcall_pattern = 1; |
| 17422 | } |
| 17423 | if (i == 1) |
| 17424 | return; |
| 17425 | |
| 17426 | /* Conditionally set 'fastcall_spills_num' to allow forward |
| 17427 | * compatibility when more helper functions are marked as |
| 17428 | * bpf_fastcall at compile time than current kernel supports, e.g: |
| 17429 | * |
| 17430 | * 1: *(u64 *)(r10 - 8) = r1 |
| 17431 | * 2: call A ;; assume A is bpf_fastcall for current kernel |
| 17432 | * 3: r1 = *(u64 *)(r10 - 8) |
| 17433 | * 4: *(u64 *)(r10 - 8) = r1 |
| 17434 | * 5: call B ;; assume B is not bpf_fastcall for current kernel |
| 17435 | * 6: r1 = *(u64 *)(r10 - 8) |
| 17436 | * |
| 17437 | * There is no need to block bpf_fastcall rewrite for such program. |
| 17438 | * Set 'fastcall_pattern' for both calls to keep check_fastcall_stack_contract() happy, |
| 17439 | * don't set 'fastcall_spills_num' for call B so that remove_fastcall_spills_fills() |
| 17440 | * does not remove spill/fill pair {4,6}. |
| 17441 | */ |
| 17442 | if (cs.fastcall) |
| 17443 | env->insn_aux_data[insn_idx].fastcall_spills_num = i - 1; |
| 17444 | else |
| 17445 | subprog->keep_fastcall_stack = 1; |
| 17446 | subprog->fastcall_stack_off = min(subprog->fastcall_stack_off, off); |
| 17447 | } |
| 17448 | |
| 17449 | static int mark_fastcall_patterns(struct bpf_verifier_env *env) |
| 17450 | { |
| 17451 | struct bpf_subprog_info *subprog = env->subprog_info; |
| 17452 | struct bpf_insn *insn; |
| 17453 | s16 lowest_off; |
| 17454 | int s, i; |
| 17455 | |
| 17456 | for (s = 0; s < env->subprog_cnt; ++s, ++subprog) { |
| 17457 | /* find lowest stack spill offset used in this subprog */ |
| 17458 | lowest_off = 0; |
| 17459 | for (i = subprog->start; i < (subprog + 1)->start; ++i) { |
| 17460 | insn = env->prog->insnsi + i; |
| 17461 | if (insn->code != (BPF_STX | BPF_MEM | BPF_DW) || |
| 17462 | insn->dst_reg != BPF_REG_10) |
| 17463 | continue; |
| 17464 | lowest_off = min(lowest_off, insn->off); |
| 17465 | } |
| 17466 | /* use this offset to find fastcall patterns */ |
| 17467 | for (i = subprog->start; i < (subprog + 1)->start; ++i) { |
| 17468 | insn = env->prog->insnsi + i; |
| 17469 | if (insn->code != (BPF_JMP | BPF_CALL)) |
| 17470 | continue; |
| 17471 | mark_fastcall_pattern_for_call(env, subprog, i, lowest_off); |
| 17472 | } |
| 17473 | } |
| 17474 | return 0; |
| 17475 | } |
| 17476 | |
| 17477 | /* Visits the instruction at index t and returns one of the following: |
| 17478 | * < 0 - an error occurred |
| 17479 | * DONE_EXPLORING - the instruction was fully explored |
| 17480 | * KEEP_EXPLORING - there is still work to be done before it is fully explored |
| 17481 | */ |
| 17482 | static int visit_insn(int t, struct bpf_verifier_env *env) |
| 17483 | { |
| 17484 | struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; |
| 17485 | int ret, off, insn_sz; |
| 17486 | |
| 17487 | if (bpf_pseudo_func(insn)) |
| 17488 | return visit_func_call_insn(t, insns, env, true); |
| 17489 | |
| 17490 | /* All non-branch instructions have a single fall-through edge. */ |
| 17491 | if (BPF_CLASS(insn->code) != BPF_JMP && |
| 17492 | BPF_CLASS(insn->code) != BPF_JMP32) { |
| 17493 | insn_sz = bpf_is_ldimm64(insn) ? 2 : 1; |
| 17494 | return push_insn(t, t + insn_sz, FALLTHROUGH, env); |
| 17495 | } |
| 17496 | |
| 17497 | switch (BPF_OP(insn->code)) { |
| 17498 | case BPF_EXIT: |
| 17499 | return DONE_EXPLORING; |
| 17500 | |
| 17501 | case BPF_CALL: |
| 17502 | if (is_async_callback_calling_insn(insn)) |
| 17503 | /* Mark this call insn as a prune point to trigger |
| 17504 | * is_state_visited() check before call itself is |
| 17505 | * processed by __check_func_call(). Otherwise new |
| 17506 | * async state will be pushed for further exploration. |
| 17507 | */ |
| 17508 | mark_prune_point(env, t); |
| 17509 | /* For functions that invoke callbacks it is not known how many times |
| 17510 | * callback would be called. Verifier models callback calling functions |
| 17511 | * by repeatedly visiting callback bodies and returning to origin call |
| 17512 | * instruction. |
| 17513 | * In order to stop such iteration verifier needs to identify when a |
| 17514 | * state identical some state from a previous iteration is reached. |
| 17515 | * Check below forces creation of checkpoint before callback calling |
| 17516 | * instruction to allow search for such identical states. |
| 17517 | */ |
| 17518 | if (is_sync_callback_calling_insn(insn)) { |
| 17519 | mark_calls_callback(env, t); |
| 17520 | mark_force_checkpoint(env, t); |
| 17521 | mark_prune_point(env, t); |
| 17522 | mark_jmp_point(env, t); |
| 17523 | } |
| 17524 | if (bpf_helper_call(insn)) { |
| 17525 | const struct bpf_func_proto *fp; |
| 17526 | |
| 17527 | ret = get_helper_proto(env, insn->imm, &fp); |
| 17528 | /* If called in a non-sleepable context program will be |
| 17529 | * rejected anyway, so we should end up with precise |
| 17530 | * sleepable marks on subprogs, except for dead code |
| 17531 | * elimination. |
| 17532 | */ |
| 17533 | if (ret == 0 && fp->might_sleep) |
| 17534 | mark_subprog_might_sleep(env, t); |
| 17535 | if (bpf_helper_changes_pkt_data(insn->imm)) |
| 17536 | mark_subprog_changes_pkt_data(env, t); |
| 17537 | } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { |
| 17538 | struct bpf_kfunc_call_arg_meta meta; |
| 17539 | |
| 17540 | ret = fetch_kfunc_meta(env, insn, &meta, NULL); |
| 17541 | if (ret == 0 && is_iter_next_kfunc(&meta)) { |
| 17542 | mark_prune_point(env, t); |
| 17543 | /* Checking and saving state checkpoints at iter_next() call |
| 17544 | * is crucial for fast convergence of open-coded iterator loop |
| 17545 | * logic, so we need to force it. If we don't do that, |
| 17546 | * is_state_visited() might skip saving a checkpoint, causing |
| 17547 | * unnecessarily long sequence of not checkpointed |
| 17548 | * instructions and jumps, leading to exhaustion of jump |
| 17549 | * history buffer, and potentially other undesired outcomes. |
| 17550 | * It is expected that with correct open-coded iterators |
| 17551 | * convergence will happen quickly, so we don't run a risk of |
| 17552 | * exhausting memory. |
| 17553 | */ |
| 17554 | mark_force_checkpoint(env, t); |
| 17555 | } |
| 17556 | /* Same as helpers, if called in a non-sleepable context |
| 17557 | * program will be rejected anyway, so we should end up |
| 17558 | * with precise sleepable marks on subprogs, except for |
| 17559 | * dead code elimination. |
| 17560 | */ |
| 17561 | if (ret == 0 && is_kfunc_sleepable(&meta)) |
| 17562 | mark_subprog_might_sleep(env, t); |
| 17563 | } |
| 17564 | return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); |
| 17565 | |
| 17566 | case BPF_JA: |
| 17567 | if (BPF_SRC(insn->code) != BPF_K) |
| 17568 | return -EINVAL; |
| 17569 | |
| 17570 | if (BPF_CLASS(insn->code) == BPF_JMP) |
| 17571 | off = insn->off; |
| 17572 | else |
| 17573 | off = insn->imm; |
| 17574 | |
| 17575 | /* unconditional jump with single edge */ |
| 17576 | ret = push_insn(t, t + off + 1, FALLTHROUGH, env); |
| 17577 | if (ret) |
| 17578 | return ret; |
| 17579 | |
| 17580 | mark_prune_point(env, t + off + 1); |
| 17581 | mark_jmp_point(env, t + off + 1); |
| 17582 | |
| 17583 | return ret; |
| 17584 | |
| 17585 | default: |
| 17586 | /* conditional jump with two edges */ |
| 17587 | mark_prune_point(env, t); |
| 17588 | if (is_may_goto_insn(insn)) |
| 17589 | mark_force_checkpoint(env, t); |
| 17590 | |
| 17591 | ret = push_insn(t, t + 1, FALLTHROUGH, env); |
| 17592 | if (ret) |
| 17593 | return ret; |
| 17594 | |
| 17595 | return push_insn(t, t + insn->off + 1, BRANCH, env); |
| 17596 | } |
| 17597 | } |
| 17598 | |
| 17599 | /* non-recursive depth-first-search to detect loops in BPF program |
| 17600 | * loop == back-edge in directed graph |
| 17601 | */ |
| 17602 | static int check_cfg(struct bpf_verifier_env *env) |
| 17603 | { |
| 17604 | int insn_cnt = env->prog->len; |
| 17605 | int *insn_stack, *insn_state, *insn_postorder; |
| 17606 | int ex_insn_beg, i, ret = 0; |
| 17607 | |
| 17608 | insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); |
| 17609 | if (!insn_state) |
| 17610 | return -ENOMEM; |
| 17611 | |
| 17612 | insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); |
| 17613 | if (!insn_stack) { |
| 17614 | kvfree(insn_state); |
| 17615 | return -ENOMEM; |
| 17616 | } |
| 17617 | |
| 17618 | insn_postorder = env->cfg.insn_postorder = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); |
| 17619 | if (!insn_postorder) { |
| 17620 | kvfree(insn_state); |
| 17621 | kvfree(insn_stack); |
| 17622 | return -ENOMEM; |
| 17623 | } |
| 17624 | |
| 17625 | ex_insn_beg = env->exception_callback_subprog |
| 17626 | ? env->subprog_info[env->exception_callback_subprog].start |
| 17627 | : 0; |
| 17628 | |
| 17629 | insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ |
| 17630 | insn_stack[0] = 0; /* 0 is the first instruction */ |
| 17631 | env->cfg.cur_stack = 1; |
| 17632 | |
| 17633 | walk_cfg: |
| 17634 | while (env->cfg.cur_stack > 0) { |
| 17635 | int t = insn_stack[env->cfg.cur_stack - 1]; |
| 17636 | |
| 17637 | ret = visit_insn(t, env); |
| 17638 | switch (ret) { |
| 17639 | case DONE_EXPLORING: |
| 17640 | insn_state[t] = EXPLORED; |
| 17641 | env->cfg.cur_stack--; |
| 17642 | insn_postorder[env->cfg.cur_postorder++] = t; |
| 17643 | break; |
| 17644 | case KEEP_EXPLORING: |
| 17645 | break; |
| 17646 | default: |
| 17647 | if (ret > 0) { |
| 17648 | verbose(env, "visit_insn internal bug\n"); |
| 17649 | ret = -EFAULT; |
| 17650 | } |
| 17651 | goto err_free; |
| 17652 | } |
| 17653 | } |
| 17654 | |
| 17655 | if (env->cfg.cur_stack < 0) { |
| 17656 | verbose(env, "pop stack internal bug\n"); |
| 17657 | ret = -EFAULT; |
| 17658 | goto err_free; |
| 17659 | } |
| 17660 | |
| 17661 | if (ex_insn_beg && insn_state[ex_insn_beg] != EXPLORED) { |
| 17662 | insn_state[ex_insn_beg] = DISCOVERED; |
| 17663 | insn_stack[0] = ex_insn_beg; |
| 17664 | env->cfg.cur_stack = 1; |
| 17665 | goto walk_cfg; |
| 17666 | } |
| 17667 | |
| 17668 | for (i = 0; i < insn_cnt; i++) { |
| 17669 | struct bpf_insn *insn = &env->prog->insnsi[i]; |
| 17670 | |
| 17671 | if (insn_state[i] != EXPLORED) { |
| 17672 | verbose(env, "unreachable insn %d\n", i); |
| 17673 | ret = -EINVAL; |
| 17674 | goto err_free; |
| 17675 | } |
| 17676 | if (bpf_is_ldimm64(insn)) { |
| 17677 | if (insn_state[i + 1] != 0) { |
| 17678 | verbose(env, "jump into the middle of ldimm64 insn %d\n", i); |
| 17679 | ret = -EINVAL; |
| 17680 | goto err_free; |
| 17681 | } |
| 17682 | i++; /* skip second half of ldimm64 */ |
| 17683 | } |
| 17684 | } |
| 17685 | ret = 0; /* cfg looks good */ |
| 17686 | env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data; |
| 17687 | env->prog->aux->might_sleep = env->subprog_info[0].might_sleep; |
| 17688 | |
| 17689 | err_free: |
| 17690 | kvfree(insn_state); |
| 17691 | kvfree(insn_stack); |
| 17692 | env->cfg.insn_state = env->cfg.insn_stack = NULL; |
| 17693 | return ret; |
| 17694 | } |
| 17695 | |
| 17696 | static int check_abnormal_return(struct bpf_verifier_env *env) |
| 17697 | { |
| 17698 | int i; |
| 17699 | |
| 17700 | for (i = 1; i < env->subprog_cnt; i++) { |
| 17701 | if (env->subprog_info[i].has_ld_abs) { |
| 17702 | verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); |
| 17703 | return -EINVAL; |
| 17704 | } |
| 17705 | if (env->subprog_info[i].has_tail_call) { |
| 17706 | verbose(env, "tail_call is not allowed in subprogs without BTF\n"); |
| 17707 | return -EINVAL; |
| 17708 | } |
| 17709 | } |
| 17710 | return 0; |
| 17711 | } |
| 17712 | |
| 17713 | /* The minimum supported BTF func info size */ |
| 17714 | #define MIN_BPF_FUNCINFO_SIZE 8 |
| 17715 | #define MAX_FUNCINFO_REC_SIZE 252 |
| 17716 | |
| 17717 | static int check_btf_func_early(struct bpf_verifier_env *env, |
| 17718 | const union bpf_attr *attr, |
| 17719 | bpfptr_t uattr) |
| 17720 | { |
| 17721 | u32 krec_size = sizeof(struct bpf_func_info); |
| 17722 | const struct btf_type *type, *func_proto; |
| 17723 | u32 i, nfuncs, urec_size, min_size; |
| 17724 | struct bpf_func_info *krecord; |
| 17725 | struct bpf_prog *prog; |
| 17726 | const struct btf *btf; |
| 17727 | u32 prev_offset = 0; |
| 17728 | bpfptr_t urecord; |
| 17729 | int ret = -ENOMEM; |
| 17730 | |
| 17731 | nfuncs = attr->func_info_cnt; |
| 17732 | if (!nfuncs) { |
| 17733 | if (check_abnormal_return(env)) |
| 17734 | return -EINVAL; |
| 17735 | return 0; |
| 17736 | } |
| 17737 | |
| 17738 | urec_size = attr->func_info_rec_size; |
| 17739 | if (urec_size < MIN_BPF_FUNCINFO_SIZE || |
| 17740 | urec_size > MAX_FUNCINFO_REC_SIZE || |
| 17741 | urec_size % sizeof(u32)) { |
| 17742 | verbose(env, "invalid func info rec size %u\n", urec_size); |
| 17743 | return -EINVAL; |
| 17744 | } |
| 17745 | |
| 17746 | prog = env->prog; |
| 17747 | btf = prog->aux->btf; |
| 17748 | |
| 17749 | urecord = make_bpfptr(attr->func_info, uattr.is_kernel); |
| 17750 | min_size = min_t(u32, krec_size, urec_size); |
| 17751 | |
| 17752 | krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); |
| 17753 | if (!krecord) |
| 17754 | return -ENOMEM; |
| 17755 | |
| 17756 | for (i = 0; i < nfuncs; i++) { |
| 17757 | ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); |
| 17758 | if (ret) { |
| 17759 | if (ret == -E2BIG) { |
| 17760 | verbose(env, "nonzero tailing record in func info"); |
| 17761 | /* set the size kernel expects so loader can zero |
| 17762 | * out the rest of the record. |
| 17763 | */ |
| 17764 | if (copy_to_bpfptr_offset(uattr, |
| 17765 | offsetof(union bpf_attr, func_info_rec_size), |
| 17766 | &min_size, sizeof(min_size))) |
| 17767 | ret = -EFAULT; |
| 17768 | } |
| 17769 | goto err_free; |
| 17770 | } |
| 17771 | |
| 17772 | if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { |
| 17773 | ret = -EFAULT; |
| 17774 | goto err_free; |
| 17775 | } |
| 17776 | |
| 17777 | /* check insn_off */ |
| 17778 | ret = -EINVAL; |
| 17779 | if (i == 0) { |
| 17780 | if (krecord[i].insn_off) { |
| 17781 | verbose(env, |
| 17782 | "nonzero insn_off %u for the first func info record", |
| 17783 | krecord[i].insn_off); |
| 17784 | goto err_free; |
| 17785 | } |
| 17786 | } else if (krecord[i].insn_off <= prev_offset) { |
| 17787 | verbose(env, |
| 17788 | "same or smaller insn offset (%u) than previous func info record (%u)", |
| 17789 | krecord[i].insn_off, prev_offset); |
| 17790 | goto err_free; |
| 17791 | } |
| 17792 | |
| 17793 | /* check type_id */ |
| 17794 | type = btf_type_by_id(btf, krecord[i].type_id); |
| 17795 | if (!type || !btf_type_is_func(type)) { |
| 17796 | verbose(env, "invalid type id %d in func info", |
| 17797 | krecord[i].type_id); |
| 17798 | goto err_free; |
| 17799 | } |
| 17800 | |
| 17801 | func_proto = btf_type_by_id(btf, type->type); |
| 17802 | if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) |
| 17803 | /* btf_func_check() already verified it during BTF load */ |
| 17804 | goto err_free; |
| 17805 | |
| 17806 | prev_offset = krecord[i].insn_off; |
| 17807 | bpfptr_add(&urecord, urec_size); |
| 17808 | } |
| 17809 | |
| 17810 | prog->aux->func_info = krecord; |
| 17811 | prog->aux->func_info_cnt = nfuncs; |
| 17812 | return 0; |
| 17813 | |
| 17814 | err_free: |
| 17815 | kvfree(krecord); |
| 17816 | return ret; |
| 17817 | } |
| 17818 | |
| 17819 | static int check_btf_func(struct bpf_verifier_env *env, |
| 17820 | const union bpf_attr *attr, |
| 17821 | bpfptr_t uattr) |
| 17822 | { |
| 17823 | const struct btf_type *type, *func_proto, *ret_type; |
| 17824 | u32 i, nfuncs, urec_size; |
| 17825 | struct bpf_func_info *krecord; |
| 17826 | struct bpf_func_info_aux *info_aux = NULL; |
| 17827 | struct bpf_prog *prog; |
| 17828 | const struct btf *btf; |
| 17829 | bpfptr_t urecord; |
| 17830 | bool scalar_return; |
| 17831 | int ret = -ENOMEM; |
| 17832 | |
| 17833 | nfuncs = attr->func_info_cnt; |
| 17834 | if (!nfuncs) { |
| 17835 | if (check_abnormal_return(env)) |
| 17836 | return -EINVAL; |
| 17837 | return 0; |
| 17838 | } |
| 17839 | if (nfuncs != env->subprog_cnt) { |
| 17840 | verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); |
| 17841 | return -EINVAL; |
| 17842 | } |
| 17843 | |
| 17844 | urec_size = attr->func_info_rec_size; |
| 17845 | |
| 17846 | prog = env->prog; |
| 17847 | btf = prog->aux->btf; |
| 17848 | |
| 17849 | urecord = make_bpfptr(attr->func_info, uattr.is_kernel); |
| 17850 | |
| 17851 | krecord = prog->aux->func_info; |
| 17852 | info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); |
| 17853 | if (!info_aux) |
| 17854 | return -ENOMEM; |
| 17855 | |
| 17856 | for (i = 0; i < nfuncs; i++) { |
| 17857 | /* check insn_off */ |
| 17858 | ret = -EINVAL; |
| 17859 | |
| 17860 | if (env->subprog_info[i].start != krecord[i].insn_off) { |
| 17861 | verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); |
| 17862 | goto err_free; |
| 17863 | } |
| 17864 | |
| 17865 | /* Already checked type_id */ |
| 17866 | type = btf_type_by_id(btf, krecord[i].type_id); |
| 17867 | info_aux[i].linkage = BTF_INFO_VLEN(type->info); |
| 17868 | /* Already checked func_proto */ |
| 17869 | func_proto = btf_type_by_id(btf, type->type); |
| 17870 | |
| 17871 | ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); |
| 17872 | scalar_return = |
| 17873 | btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type); |
| 17874 | if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { |
| 17875 | verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); |
| 17876 | goto err_free; |
| 17877 | } |
| 17878 | if (i && !scalar_return && env->subprog_info[i].has_tail_call) { |
| 17879 | verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); |
| 17880 | goto err_free; |
| 17881 | } |
| 17882 | |
| 17883 | bpfptr_add(&urecord, urec_size); |
| 17884 | } |
| 17885 | |
| 17886 | prog->aux->func_info_aux = info_aux; |
| 17887 | return 0; |
| 17888 | |
| 17889 | err_free: |
| 17890 | kfree(info_aux); |
| 17891 | return ret; |
| 17892 | } |
| 17893 | |
| 17894 | static void adjust_btf_func(struct bpf_verifier_env *env) |
| 17895 | { |
| 17896 | struct bpf_prog_aux *aux = env->prog->aux; |
| 17897 | int i; |
| 17898 | |
| 17899 | if (!aux->func_info) |
| 17900 | return; |
| 17901 | |
| 17902 | /* func_info is not available for hidden subprogs */ |
| 17903 | for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) |
| 17904 | aux->func_info[i].insn_off = env->subprog_info[i].start; |
| 17905 | } |
| 17906 | |
| 17907 | #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) |
| 17908 | #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE |
| 17909 | |
| 17910 | static int check_btf_line(struct bpf_verifier_env *env, |
| 17911 | const union bpf_attr *attr, |
| 17912 | bpfptr_t uattr) |
| 17913 | { |
| 17914 | u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; |
| 17915 | struct bpf_subprog_info *sub; |
| 17916 | struct bpf_line_info *linfo; |
| 17917 | struct bpf_prog *prog; |
| 17918 | const struct btf *btf; |
| 17919 | bpfptr_t ulinfo; |
| 17920 | int err; |
| 17921 | |
| 17922 | nr_linfo = attr->line_info_cnt; |
| 17923 | if (!nr_linfo) |
| 17924 | return 0; |
| 17925 | if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) |
| 17926 | return -EINVAL; |
| 17927 | |
| 17928 | rec_size = attr->line_info_rec_size; |
| 17929 | if (rec_size < MIN_BPF_LINEINFO_SIZE || |
| 17930 | rec_size > MAX_LINEINFO_REC_SIZE || |
| 17931 | rec_size & (sizeof(u32) - 1)) |
| 17932 | return -EINVAL; |
| 17933 | |
| 17934 | /* Need to zero it in case the userspace may |
| 17935 | * pass in a smaller bpf_line_info object. |
| 17936 | */ |
| 17937 | linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), |
| 17938 | GFP_KERNEL | __GFP_NOWARN); |
| 17939 | if (!linfo) |
| 17940 | return -ENOMEM; |
| 17941 | |
| 17942 | prog = env->prog; |
| 17943 | btf = prog->aux->btf; |
| 17944 | |
| 17945 | s = 0; |
| 17946 | sub = env->subprog_info; |
| 17947 | ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); |
| 17948 | expected_size = sizeof(struct bpf_line_info); |
| 17949 | ncopy = min_t(u32, expected_size, rec_size); |
| 17950 | for (i = 0; i < nr_linfo; i++) { |
| 17951 | err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); |
| 17952 | if (err) { |
| 17953 | if (err == -E2BIG) { |
| 17954 | verbose(env, "nonzero tailing record in line_info"); |
| 17955 | if (copy_to_bpfptr_offset(uattr, |
| 17956 | offsetof(union bpf_attr, line_info_rec_size), |
| 17957 | &expected_size, sizeof(expected_size))) |
| 17958 | err = -EFAULT; |
| 17959 | } |
| 17960 | goto err_free; |
| 17961 | } |
| 17962 | |
| 17963 | if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { |
| 17964 | err = -EFAULT; |
| 17965 | goto err_free; |
| 17966 | } |
| 17967 | |
| 17968 | /* |
| 17969 | * Check insn_off to ensure |
| 17970 | * 1) strictly increasing AND |
| 17971 | * 2) bounded by prog->len |
| 17972 | * |
| 17973 | * The linfo[0].insn_off == 0 check logically falls into |
| 17974 | * the later "missing bpf_line_info for func..." case |
| 17975 | * because the first linfo[0].insn_off must be the |
| 17976 | * first sub also and the first sub must have |
| 17977 | * subprog_info[0].start == 0. |
| 17978 | */ |
| 17979 | if ((i && linfo[i].insn_off <= prev_offset) || |
| 17980 | linfo[i].insn_off >= prog->len) { |
| 17981 | verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", |
| 17982 | i, linfo[i].insn_off, prev_offset, |
| 17983 | prog->len); |
| 17984 | err = -EINVAL; |
| 17985 | goto err_free; |
| 17986 | } |
| 17987 | |
| 17988 | if (!prog->insnsi[linfo[i].insn_off].code) { |
| 17989 | verbose(env, |
| 17990 | "Invalid insn code at line_info[%u].insn_off\n", |
| 17991 | i); |
| 17992 | err = -EINVAL; |
| 17993 | goto err_free; |
| 17994 | } |
| 17995 | |
| 17996 | if (!btf_name_by_offset(btf, linfo[i].line_off) || |
| 17997 | !btf_name_by_offset(btf, linfo[i].file_name_off)) { |
| 17998 | verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); |
| 17999 | err = -EINVAL; |
| 18000 | goto err_free; |
| 18001 | } |
| 18002 | |
| 18003 | if (s != env->subprog_cnt) { |
| 18004 | if (linfo[i].insn_off == sub[s].start) { |
| 18005 | sub[s].linfo_idx = i; |
| 18006 | s++; |
| 18007 | } else if (sub[s].start < linfo[i].insn_off) { |
| 18008 | verbose(env, "missing bpf_line_info for func#%u\n", s); |
| 18009 | err = -EINVAL; |
| 18010 | goto err_free; |
| 18011 | } |
| 18012 | } |
| 18013 | |
| 18014 | prev_offset = linfo[i].insn_off; |
| 18015 | bpfptr_add(&ulinfo, rec_size); |
| 18016 | } |
| 18017 | |
| 18018 | if (s != env->subprog_cnt) { |
| 18019 | verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", |
| 18020 | env->subprog_cnt - s, s); |
| 18021 | err = -EINVAL; |
| 18022 | goto err_free; |
| 18023 | } |
| 18024 | |
| 18025 | prog->aux->linfo = linfo; |
| 18026 | prog->aux->nr_linfo = nr_linfo; |
| 18027 | |
| 18028 | return 0; |
| 18029 | |
| 18030 | err_free: |
| 18031 | kvfree(linfo); |
| 18032 | return err; |
| 18033 | } |
| 18034 | |
| 18035 | #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) |
| 18036 | #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE |
| 18037 | |
| 18038 | static int check_core_relo(struct bpf_verifier_env *env, |
| 18039 | const union bpf_attr *attr, |
| 18040 | bpfptr_t uattr) |
| 18041 | { |
| 18042 | u32 i, nr_core_relo, ncopy, expected_size, rec_size; |
| 18043 | struct bpf_core_relo core_relo = {}; |
| 18044 | struct bpf_prog *prog = env->prog; |
| 18045 | const struct btf *btf = prog->aux->btf; |
| 18046 | struct bpf_core_ctx ctx = { |
| 18047 | .log = &env->log, |
| 18048 | .btf = btf, |
| 18049 | }; |
| 18050 | bpfptr_t u_core_relo; |
| 18051 | int err; |
| 18052 | |
| 18053 | nr_core_relo = attr->core_relo_cnt; |
| 18054 | if (!nr_core_relo) |
| 18055 | return 0; |
| 18056 | if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) |
| 18057 | return -EINVAL; |
| 18058 | |
| 18059 | rec_size = attr->core_relo_rec_size; |
| 18060 | if (rec_size < MIN_CORE_RELO_SIZE || |
| 18061 | rec_size > MAX_CORE_RELO_SIZE || |
| 18062 | rec_size % sizeof(u32)) |
| 18063 | return -EINVAL; |
| 18064 | |
| 18065 | u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); |
| 18066 | expected_size = sizeof(struct bpf_core_relo); |
| 18067 | ncopy = min_t(u32, expected_size, rec_size); |
| 18068 | |
| 18069 | /* Unlike func_info and line_info, copy and apply each CO-RE |
| 18070 | * relocation record one at a time. |
| 18071 | */ |
| 18072 | for (i = 0; i < nr_core_relo; i++) { |
| 18073 | /* future proofing when sizeof(bpf_core_relo) changes */ |
| 18074 | err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); |
| 18075 | if (err) { |
| 18076 | if (err == -E2BIG) { |
| 18077 | verbose(env, "nonzero tailing record in core_relo"); |
| 18078 | if (copy_to_bpfptr_offset(uattr, |
| 18079 | offsetof(union bpf_attr, core_relo_rec_size), |
| 18080 | &expected_size, sizeof(expected_size))) |
| 18081 | err = -EFAULT; |
| 18082 | } |
| 18083 | break; |
| 18084 | } |
| 18085 | |
| 18086 | if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { |
| 18087 | err = -EFAULT; |
| 18088 | break; |
| 18089 | } |
| 18090 | |
| 18091 | if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { |
| 18092 | verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", |
| 18093 | i, core_relo.insn_off, prog->len); |
| 18094 | err = -EINVAL; |
| 18095 | break; |
| 18096 | } |
| 18097 | |
| 18098 | err = bpf_core_apply(&ctx, &core_relo, i, |
| 18099 | &prog->insnsi[core_relo.insn_off / 8]); |
| 18100 | if (err) |
| 18101 | break; |
| 18102 | bpfptr_add(&u_core_relo, rec_size); |
| 18103 | } |
| 18104 | return err; |
| 18105 | } |
| 18106 | |
| 18107 | static int check_btf_info_early(struct bpf_verifier_env *env, |
| 18108 | const union bpf_attr *attr, |
| 18109 | bpfptr_t uattr) |
| 18110 | { |
| 18111 | struct btf *btf; |
| 18112 | int err; |
| 18113 | |
| 18114 | if (!attr->func_info_cnt && !attr->line_info_cnt) { |
| 18115 | if (check_abnormal_return(env)) |
| 18116 | return -EINVAL; |
| 18117 | return 0; |
| 18118 | } |
| 18119 | |
| 18120 | btf = btf_get_by_fd(attr->prog_btf_fd); |
| 18121 | if (IS_ERR(btf)) |
| 18122 | return PTR_ERR(btf); |
| 18123 | if (btf_is_kernel(btf)) { |
| 18124 | btf_put(btf); |
| 18125 | return -EACCES; |
| 18126 | } |
| 18127 | env->prog->aux->btf = btf; |
| 18128 | |
| 18129 | err = check_btf_func_early(env, attr, uattr); |
| 18130 | if (err) |
| 18131 | return err; |
| 18132 | return 0; |
| 18133 | } |
| 18134 | |
| 18135 | static int check_btf_info(struct bpf_verifier_env *env, |
| 18136 | const union bpf_attr *attr, |
| 18137 | bpfptr_t uattr) |
| 18138 | { |
| 18139 | int err; |
| 18140 | |
| 18141 | if (!attr->func_info_cnt && !attr->line_info_cnt) { |
| 18142 | if (check_abnormal_return(env)) |
| 18143 | return -EINVAL; |
| 18144 | return 0; |
| 18145 | } |
| 18146 | |
| 18147 | err = check_btf_func(env, attr, uattr); |
| 18148 | if (err) |
| 18149 | return err; |
| 18150 | |
| 18151 | err = check_btf_line(env, attr, uattr); |
| 18152 | if (err) |
| 18153 | return err; |
| 18154 | |
| 18155 | err = check_core_relo(env, attr, uattr); |
| 18156 | if (err) |
| 18157 | return err; |
| 18158 | |
| 18159 | return 0; |
| 18160 | } |
| 18161 | |
| 18162 | /* check %cur's range satisfies %old's */ |
| 18163 | static bool range_within(const struct bpf_reg_state *old, |
| 18164 | const struct bpf_reg_state *cur) |
| 18165 | { |
| 18166 | return old->umin_value <= cur->umin_value && |
| 18167 | old->umax_value >= cur->umax_value && |
| 18168 | old->smin_value <= cur->smin_value && |
| 18169 | old->smax_value >= cur->smax_value && |
| 18170 | old->u32_min_value <= cur->u32_min_value && |
| 18171 | old->u32_max_value >= cur->u32_max_value && |
| 18172 | old->s32_min_value <= cur->s32_min_value && |
| 18173 | old->s32_max_value >= cur->s32_max_value; |
| 18174 | } |
| 18175 | |
| 18176 | /* If in the old state two registers had the same id, then they need to have |
| 18177 | * the same id in the new state as well. But that id could be different from |
| 18178 | * the old state, so we need to track the mapping from old to new ids. |
| 18179 | * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent |
| 18180 | * regs with old id 5 must also have new id 9 for the new state to be safe. But |
| 18181 | * regs with a different old id could still have new id 9, we don't care about |
| 18182 | * that. |
| 18183 | * So we look through our idmap to see if this old id has been seen before. If |
| 18184 | * so, we require the new id to match; otherwise, we add the id pair to the map. |
| 18185 | */ |
| 18186 | static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) |
| 18187 | { |
| 18188 | struct bpf_id_pair *map = idmap->map; |
| 18189 | unsigned int i; |
| 18190 | |
| 18191 | /* either both IDs should be set or both should be zero */ |
| 18192 | if (!!old_id != !!cur_id) |
| 18193 | return false; |
| 18194 | |
| 18195 | if (old_id == 0) /* cur_id == 0 as well */ |
| 18196 | return true; |
| 18197 | |
| 18198 | for (i = 0; i < BPF_ID_MAP_SIZE; i++) { |
| 18199 | if (!map[i].old) { |
| 18200 | /* Reached an empty slot; haven't seen this id before */ |
| 18201 | map[i].old = old_id; |
| 18202 | map[i].cur = cur_id; |
| 18203 | return true; |
| 18204 | } |
| 18205 | if (map[i].old == old_id) |
| 18206 | return map[i].cur == cur_id; |
| 18207 | if (map[i].cur == cur_id) |
| 18208 | return false; |
| 18209 | } |
| 18210 | /* We ran out of idmap slots, which should be impossible */ |
| 18211 | WARN_ON_ONCE(1); |
| 18212 | return false; |
| 18213 | } |
| 18214 | |
| 18215 | /* Similar to check_ids(), but allocate a unique temporary ID |
| 18216 | * for 'old_id' or 'cur_id' of zero. |
| 18217 | * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid. |
| 18218 | */ |
| 18219 | static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) |
| 18220 | { |
| 18221 | old_id = old_id ? old_id : ++idmap->tmp_id_gen; |
| 18222 | cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; |
| 18223 | |
| 18224 | return check_ids(old_id, cur_id, idmap); |
| 18225 | } |
| 18226 | |
| 18227 | static void clean_func_state(struct bpf_verifier_env *env, |
| 18228 | struct bpf_func_state *st) |
| 18229 | { |
| 18230 | enum bpf_reg_liveness live; |
| 18231 | int i, j; |
| 18232 | |
| 18233 | for (i = 0; i < BPF_REG_FP; i++) { |
| 18234 | live = st->regs[i].live; |
| 18235 | /* liveness must not touch this register anymore */ |
| 18236 | st->regs[i].live |= REG_LIVE_DONE; |
| 18237 | if (!(live & REG_LIVE_READ)) |
| 18238 | /* since the register is unused, clear its state |
| 18239 | * to make further comparison simpler |
| 18240 | */ |
| 18241 | __mark_reg_not_init(env, &st->regs[i]); |
| 18242 | } |
| 18243 | |
| 18244 | for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { |
| 18245 | live = st->stack[i].spilled_ptr.live; |
| 18246 | /* liveness must not touch this stack slot anymore */ |
| 18247 | st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; |
| 18248 | if (!(live & REG_LIVE_READ)) { |
| 18249 | __mark_reg_not_init(env, &st->stack[i].spilled_ptr); |
| 18250 | for (j = 0; j < BPF_REG_SIZE; j++) |
| 18251 | st->stack[i].slot_type[j] = STACK_INVALID; |
| 18252 | } |
| 18253 | } |
| 18254 | } |
| 18255 | |
| 18256 | static void clean_verifier_state(struct bpf_verifier_env *env, |
| 18257 | struct bpf_verifier_state *st) |
| 18258 | { |
| 18259 | int i; |
| 18260 | |
| 18261 | if (st->frame[0]->regs[0].live & REG_LIVE_DONE) |
| 18262 | /* all regs in this state in all frames were already marked */ |
| 18263 | return; |
| 18264 | |
| 18265 | for (i = 0; i <= st->curframe; i++) |
| 18266 | clean_func_state(env, st->frame[i]); |
| 18267 | } |
| 18268 | |
| 18269 | /* the parentage chains form a tree. |
| 18270 | * the verifier states are added to state lists at given insn and |
| 18271 | * pushed into state stack for future exploration. |
| 18272 | * when the verifier reaches bpf_exit insn some of the verifer states |
| 18273 | * stored in the state lists have their final liveness state already, |
| 18274 | * but a lot of states will get revised from liveness point of view when |
| 18275 | * the verifier explores other branches. |
| 18276 | * Example: |
| 18277 | * 1: r0 = 1 |
| 18278 | * 2: if r1 == 100 goto pc+1 |
| 18279 | * 3: r0 = 2 |
| 18280 | * 4: exit |
| 18281 | * when the verifier reaches exit insn the register r0 in the state list of |
| 18282 | * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch |
| 18283 | * of insn 2 and goes exploring further. At the insn 4 it will walk the |
| 18284 | * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. |
| 18285 | * |
| 18286 | * Since the verifier pushes the branch states as it sees them while exploring |
| 18287 | * the program the condition of walking the branch instruction for the second |
| 18288 | * time means that all states below this branch were already explored and |
| 18289 | * their final liveness marks are already propagated. |
| 18290 | * Hence when the verifier completes the search of state list in is_state_visited() |
| 18291 | * we can call this clean_live_states() function to mark all liveness states |
| 18292 | * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' |
| 18293 | * will not be used. |
| 18294 | * This function also clears the registers and stack for states that !READ |
| 18295 | * to simplify state merging. |
| 18296 | * |
| 18297 | * Important note here that walking the same branch instruction in the callee |
| 18298 | * doesn't meant that the states are DONE. The verifier has to compare |
| 18299 | * the callsites |
| 18300 | */ |
| 18301 | static void clean_live_states(struct bpf_verifier_env *env, int insn, |
| 18302 | struct bpf_verifier_state *cur) |
| 18303 | { |
| 18304 | struct bpf_verifier_state *loop_entry; |
| 18305 | struct bpf_verifier_state_list *sl; |
| 18306 | struct list_head *pos, *head; |
| 18307 | |
| 18308 | head = explored_state(env, insn); |
| 18309 | list_for_each(pos, head) { |
| 18310 | sl = container_of(pos, struct bpf_verifier_state_list, node); |
| 18311 | if (sl->state.branches) |
| 18312 | continue; |
| 18313 | loop_entry = get_loop_entry(env, &sl->state); |
| 18314 | if (!IS_ERR_OR_NULL(loop_entry) && loop_entry->branches) |
| 18315 | continue; |
| 18316 | if (sl->state.insn_idx != insn || |
| 18317 | !same_callsites(&sl->state, cur)) |
| 18318 | continue; |
| 18319 | clean_verifier_state(env, &sl->state); |
| 18320 | } |
| 18321 | } |
| 18322 | |
| 18323 | static bool regs_exact(const struct bpf_reg_state *rold, |
| 18324 | const struct bpf_reg_state *rcur, |
| 18325 | struct bpf_idmap *idmap) |
| 18326 | { |
| 18327 | return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && |
| 18328 | check_ids(rold->id, rcur->id, idmap) && |
| 18329 | check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); |
| 18330 | } |
| 18331 | |
| 18332 | enum exact_level { |
| 18333 | NOT_EXACT, |
| 18334 | EXACT, |
| 18335 | RANGE_WITHIN |
| 18336 | }; |
| 18337 | |
| 18338 | /* Returns true if (rold safe implies rcur safe) */ |
| 18339 | static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, |
| 18340 | struct bpf_reg_state *rcur, struct bpf_idmap *idmap, |
| 18341 | enum exact_level exact) |
| 18342 | { |
| 18343 | if (exact == EXACT) |
| 18344 | return regs_exact(rold, rcur, idmap); |
| 18345 | |
| 18346 | if (!(rold->live & REG_LIVE_READ) && exact == NOT_EXACT) |
| 18347 | /* explored state didn't use this */ |
| 18348 | return true; |
| 18349 | if (rold->type == NOT_INIT) { |
| 18350 | if (exact == NOT_EXACT || rcur->type == NOT_INIT) |
| 18351 | /* explored state can't have used this */ |
| 18352 | return true; |
| 18353 | } |
| 18354 | |
| 18355 | /* Enforce that register types have to match exactly, including their |
| 18356 | * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general |
| 18357 | * rule. |
| 18358 | * |
| 18359 | * One can make a point that using a pointer register as unbounded |
| 18360 | * SCALAR would be technically acceptable, but this could lead to |
| 18361 | * pointer leaks because scalars are allowed to leak while pointers |
| 18362 | * are not. We could make this safe in special cases if root is |
| 18363 | * calling us, but it's probably not worth the hassle. |
| 18364 | * |
| 18365 | * Also, register types that are *not* MAYBE_NULL could technically be |
| 18366 | * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE |
| 18367 | * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point |
| 18368 | * to the same map). |
| 18369 | * However, if the old MAYBE_NULL register then got NULL checked, |
| 18370 | * doing so could have affected others with the same id, and we can't |
| 18371 | * check for that because we lost the id when we converted to |
| 18372 | * a non-MAYBE_NULL variant. |
| 18373 | * So, as a general rule we don't allow mixing MAYBE_NULL and |
| 18374 | * non-MAYBE_NULL registers as well. |
| 18375 | */ |
| 18376 | if (rold->type != rcur->type) |
| 18377 | return false; |
| 18378 | |
| 18379 | switch (base_type(rold->type)) { |
| 18380 | case SCALAR_VALUE: |
| 18381 | if (env->explore_alu_limits) { |
| 18382 | /* explore_alu_limits disables tnum_in() and range_within() |
| 18383 | * logic and requires everything to be strict |
| 18384 | */ |
| 18385 | return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && |
| 18386 | check_scalar_ids(rold->id, rcur->id, idmap); |
| 18387 | } |
| 18388 | if (!rold->precise && exact == NOT_EXACT) |
| 18389 | return true; |
| 18390 | if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) |
| 18391 | return false; |
| 18392 | if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) |
| 18393 | return false; |
| 18394 | /* Why check_ids() for scalar registers? |
| 18395 | * |
| 18396 | * Consider the following BPF code: |
| 18397 | * 1: r6 = ... unbound scalar, ID=a ... |
| 18398 | * 2: r7 = ... unbound scalar, ID=b ... |
| 18399 | * 3: if (r6 > r7) goto +1 |
| 18400 | * 4: r6 = r7 |
| 18401 | * 5: if (r6 > X) goto ... |
| 18402 | * 6: ... memory operation using r7 ... |
| 18403 | * |
| 18404 | * First verification path is [1-6]: |
| 18405 | * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; |
| 18406 | * - at (5) r6 would be marked <= X, sync_linked_regs() would also mark |
| 18407 | * r7 <= X, because r6 and r7 share same id. |
| 18408 | * Next verification path is [1-4, 6]. |
| 18409 | * |
| 18410 | * Instruction (6) would be reached in two states: |
| 18411 | * I. r6{.id=b}, r7{.id=b} via path 1-6; |
| 18412 | * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. |
| 18413 | * |
| 18414 | * Use check_ids() to distinguish these states. |
| 18415 | * --- |
| 18416 | * Also verify that new value satisfies old value range knowledge. |
| 18417 | */ |
| 18418 | return range_within(rold, rcur) && |
| 18419 | tnum_in(rold->var_off, rcur->var_off) && |
| 18420 | check_scalar_ids(rold->id, rcur->id, idmap); |
| 18421 | case PTR_TO_MAP_KEY: |
| 18422 | case PTR_TO_MAP_VALUE: |
| 18423 | case PTR_TO_MEM: |
| 18424 | case PTR_TO_BUF: |
| 18425 | case PTR_TO_TP_BUFFER: |
| 18426 | /* If the new min/max/var_off satisfy the old ones and |
| 18427 | * everything else matches, we are OK. |
| 18428 | */ |
| 18429 | return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 && |
| 18430 | range_within(rold, rcur) && |
| 18431 | tnum_in(rold->var_off, rcur->var_off) && |
| 18432 | check_ids(rold->id, rcur->id, idmap) && |
| 18433 | check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); |
| 18434 | case PTR_TO_PACKET_META: |
| 18435 | case PTR_TO_PACKET: |
| 18436 | /* We must have at least as much range as the old ptr |
| 18437 | * did, so that any accesses which were safe before are |
| 18438 | * still safe. This is true even if old range < old off, |
| 18439 | * since someone could have accessed through (ptr - k), or |
| 18440 | * even done ptr -= k in a register, to get a safe access. |
| 18441 | */ |
| 18442 | if (rold->range > rcur->range) |
| 18443 | return false; |
| 18444 | /* If the offsets don't match, we can't trust our alignment; |
| 18445 | * nor can we be sure that we won't fall out of range. |
| 18446 | */ |
| 18447 | if (rold->off != rcur->off) |
| 18448 | return false; |
| 18449 | /* id relations must be preserved */ |
| 18450 | if (!check_ids(rold->id, rcur->id, idmap)) |
| 18451 | return false; |
| 18452 | /* new val must satisfy old val knowledge */ |
| 18453 | return range_within(rold, rcur) && |
| 18454 | tnum_in(rold->var_off, rcur->var_off); |
| 18455 | case PTR_TO_STACK: |
| 18456 | /* two stack pointers are equal only if they're pointing to |
| 18457 | * the same stack frame, since fp-8 in foo != fp-8 in bar |
| 18458 | */ |
| 18459 | return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; |
| 18460 | case PTR_TO_ARENA: |
| 18461 | return true; |
| 18462 | default: |
| 18463 | return regs_exact(rold, rcur, idmap); |
| 18464 | } |
| 18465 | } |
| 18466 | |
| 18467 | static struct bpf_reg_state unbound_reg; |
| 18468 | |
| 18469 | static __init int unbound_reg_init(void) |
| 18470 | { |
| 18471 | __mark_reg_unknown_imprecise(&unbound_reg); |
| 18472 | unbound_reg.live |= REG_LIVE_READ; |
| 18473 | return 0; |
| 18474 | } |
| 18475 | late_initcall(unbound_reg_init); |
| 18476 | |
| 18477 | static bool is_stack_all_misc(struct bpf_verifier_env *env, |
| 18478 | struct bpf_stack_state *stack) |
| 18479 | { |
| 18480 | u32 i; |
| 18481 | |
| 18482 | for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i) { |
| 18483 | if ((stack->slot_type[i] == STACK_MISC) || |
| 18484 | (stack->slot_type[i] == STACK_INVALID && env->allow_uninit_stack)) |
| 18485 | continue; |
| 18486 | return false; |
| 18487 | } |
| 18488 | |
| 18489 | return true; |
| 18490 | } |
| 18491 | |
| 18492 | static struct bpf_reg_state *scalar_reg_for_stack(struct bpf_verifier_env *env, |
| 18493 | struct bpf_stack_state *stack) |
| 18494 | { |
| 18495 | if (is_spilled_scalar_reg64(stack)) |
| 18496 | return &stack->spilled_ptr; |
| 18497 | |
| 18498 | if (is_stack_all_misc(env, stack)) |
| 18499 | return &unbound_reg; |
| 18500 | |
| 18501 | return NULL; |
| 18502 | } |
| 18503 | |
| 18504 | static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, |
| 18505 | struct bpf_func_state *cur, struct bpf_idmap *idmap, |
| 18506 | enum exact_level exact) |
| 18507 | { |
| 18508 | int i, spi; |
| 18509 | |
| 18510 | /* walk slots of the explored stack and ignore any additional |
| 18511 | * slots in the current stack, since explored(safe) state |
| 18512 | * didn't use them |
| 18513 | */ |
| 18514 | for (i = 0; i < old->allocated_stack; i++) { |
| 18515 | struct bpf_reg_state *old_reg, *cur_reg; |
| 18516 | |
| 18517 | spi = i / BPF_REG_SIZE; |
| 18518 | |
| 18519 | if (exact != NOT_EXACT && |
| 18520 | (i >= cur->allocated_stack || |
| 18521 | old->stack[spi].slot_type[i % BPF_REG_SIZE] != |
| 18522 | cur->stack[spi].slot_type[i % BPF_REG_SIZE])) |
| 18523 | return false; |
| 18524 | |
| 18525 | if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) |
| 18526 | && exact == NOT_EXACT) { |
| 18527 | i += BPF_REG_SIZE - 1; |
| 18528 | /* explored state didn't use this */ |
| 18529 | continue; |
| 18530 | } |
| 18531 | |
| 18532 | if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) |
| 18533 | continue; |
| 18534 | |
| 18535 | if (env->allow_uninit_stack && |
| 18536 | old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) |
| 18537 | continue; |
| 18538 | |
| 18539 | /* explored stack has more populated slots than current stack |
| 18540 | * and these slots were used |
| 18541 | */ |
| 18542 | if (i >= cur->allocated_stack) |
| 18543 | return false; |
| 18544 | |
| 18545 | /* 64-bit scalar spill vs all slots MISC and vice versa. |
| 18546 | * Load from all slots MISC produces unbound scalar. |
| 18547 | * Construct a fake register for such stack and call |
| 18548 | * regsafe() to ensure scalar ids are compared. |
| 18549 | */ |
| 18550 | old_reg = scalar_reg_for_stack(env, &old->stack[spi]); |
| 18551 | cur_reg = scalar_reg_for_stack(env, &cur->stack[spi]); |
| 18552 | if (old_reg && cur_reg) { |
| 18553 | if (!regsafe(env, old_reg, cur_reg, idmap, exact)) |
| 18554 | return false; |
| 18555 | i += BPF_REG_SIZE - 1; |
| 18556 | continue; |
| 18557 | } |
| 18558 | |
| 18559 | /* if old state was safe with misc data in the stack |
| 18560 | * it will be safe with zero-initialized stack. |
| 18561 | * The opposite is not true |
| 18562 | */ |
| 18563 | if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && |
| 18564 | cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) |
| 18565 | continue; |
| 18566 | if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != |
| 18567 | cur->stack[spi].slot_type[i % BPF_REG_SIZE]) |
| 18568 | /* Ex: old explored (safe) state has STACK_SPILL in |
| 18569 | * this stack slot, but current has STACK_MISC -> |
| 18570 | * this verifier states are not equivalent, |
| 18571 | * return false to continue verification of this path |
| 18572 | */ |
| 18573 | return false; |
| 18574 | if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) |
| 18575 | continue; |
| 18576 | /* Both old and cur are having same slot_type */ |
| 18577 | switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { |
| 18578 | case STACK_SPILL: |
| 18579 | /* when explored and current stack slot are both storing |
| 18580 | * spilled registers, check that stored pointers types |
| 18581 | * are the same as well. |
| 18582 | * Ex: explored safe path could have stored |
| 18583 | * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} |
| 18584 | * but current path has stored: |
| 18585 | * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} |
| 18586 | * such verifier states are not equivalent. |
| 18587 | * return false to continue verification of this path |
| 18588 | */ |
| 18589 | if (!regsafe(env, &old->stack[spi].spilled_ptr, |
| 18590 | &cur->stack[spi].spilled_ptr, idmap, exact)) |
| 18591 | return false; |
| 18592 | break; |
| 18593 | case STACK_DYNPTR: |
| 18594 | old_reg = &old->stack[spi].spilled_ptr; |
| 18595 | cur_reg = &cur->stack[spi].spilled_ptr; |
| 18596 | if (old_reg->dynptr.type != cur_reg->dynptr.type || |
| 18597 | old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || |
| 18598 | !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) |
| 18599 | return false; |
| 18600 | break; |
| 18601 | case STACK_ITER: |
| 18602 | old_reg = &old->stack[spi].spilled_ptr; |
| 18603 | cur_reg = &cur->stack[spi].spilled_ptr; |
| 18604 | /* iter.depth is not compared between states as it |
| 18605 | * doesn't matter for correctness and would otherwise |
| 18606 | * prevent convergence; we maintain it only to prevent |
| 18607 | * infinite loop check triggering, see |
| 18608 | * iter_active_depths_differ() |
| 18609 | */ |
| 18610 | if (old_reg->iter.btf != cur_reg->iter.btf || |
| 18611 | old_reg->iter.btf_id != cur_reg->iter.btf_id || |
| 18612 | old_reg->iter.state != cur_reg->iter.state || |
| 18613 | /* ignore {old_reg,cur_reg}->iter.depth, see above */ |
| 18614 | !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) |
| 18615 | return false; |
| 18616 | break; |
| 18617 | case STACK_IRQ_FLAG: |
| 18618 | old_reg = &old->stack[spi].spilled_ptr; |
| 18619 | cur_reg = &cur->stack[spi].spilled_ptr; |
| 18620 | if (!check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap) || |
| 18621 | old_reg->irq.kfunc_class != cur_reg->irq.kfunc_class) |
| 18622 | return false; |
| 18623 | break; |
| 18624 | case STACK_MISC: |
| 18625 | case STACK_ZERO: |
| 18626 | case STACK_INVALID: |
| 18627 | continue; |
| 18628 | /* Ensure that new unhandled slot types return false by default */ |
| 18629 | default: |
| 18630 | return false; |
| 18631 | } |
| 18632 | } |
| 18633 | return true; |
| 18634 | } |
| 18635 | |
| 18636 | static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, |
| 18637 | struct bpf_idmap *idmap) |
| 18638 | { |
| 18639 | int i; |
| 18640 | |
| 18641 | if (old->acquired_refs != cur->acquired_refs) |
| 18642 | return false; |
| 18643 | |
| 18644 | if (old->active_locks != cur->active_locks) |
| 18645 | return false; |
| 18646 | |
| 18647 | if (old->active_preempt_locks != cur->active_preempt_locks) |
| 18648 | return false; |
| 18649 | |
| 18650 | if (old->active_rcu_lock != cur->active_rcu_lock) |
| 18651 | return false; |
| 18652 | |
| 18653 | if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap)) |
| 18654 | return false; |
| 18655 | |
| 18656 | if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) || |
| 18657 | old->active_lock_ptr != cur->active_lock_ptr) |
| 18658 | return false; |
| 18659 | |
| 18660 | for (i = 0; i < old->acquired_refs; i++) { |
| 18661 | if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) || |
| 18662 | old->refs[i].type != cur->refs[i].type) |
| 18663 | return false; |
| 18664 | switch (old->refs[i].type) { |
| 18665 | case REF_TYPE_PTR: |
| 18666 | case REF_TYPE_IRQ: |
| 18667 | break; |
| 18668 | case REF_TYPE_LOCK: |
| 18669 | case REF_TYPE_RES_LOCK: |
| 18670 | case REF_TYPE_RES_LOCK_IRQ: |
| 18671 | if (old->refs[i].ptr != cur->refs[i].ptr) |
| 18672 | return false; |
| 18673 | break; |
| 18674 | default: |
| 18675 | WARN_ONCE(1, "Unhandled enum type for reference state: %d\n", old->refs[i].type); |
| 18676 | return false; |
| 18677 | } |
| 18678 | } |
| 18679 | |
| 18680 | return true; |
| 18681 | } |
| 18682 | |
| 18683 | /* compare two verifier states |
| 18684 | * |
| 18685 | * all states stored in state_list are known to be valid, since |
| 18686 | * verifier reached 'bpf_exit' instruction through them |
| 18687 | * |
| 18688 | * this function is called when verifier exploring different branches of |
| 18689 | * execution popped from the state stack. If it sees an old state that has |
| 18690 | * more strict register state and more strict stack state then this execution |
| 18691 | * branch doesn't need to be explored further, since verifier already |
| 18692 | * concluded that more strict state leads to valid finish. |
| 18693 | * |
| 18694 | * Therefore two states are equivalent if register state is more conservative |
| 18695 | * and explored stack state is more conservative than the current one. |
| 18696 | * Example: |
| 18697 | * explored current |
| 18698 | * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) |
| 18699 | * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) |
| 18700 | * |
| 18701 | * In other words if current stack state (one being explored) has more |
| 18702 | * valid slots than old one that already passed validation, it means |
| 18703 | * the verifier can stop exploring and conclude that current state is valid too |
| 18704 | * |
| 18705 | * Similarly with registers. If explored state has register type as invalid |
| 18706 | * whereas register type in current state is meaningful, it means that |
| 18707 | * the current state will reach 'bpf_exit' instruction safely |
| 18708 | */ |
| 18709 | static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, |
| 18710 | struct bpf_func_state *cur, u32 insn_idx, enum exact_level exact) |
| 18711 | { |
| 18712 | u16 live_regs = env->insn_aux_data[insn_idx].live_regs_before; |
| 18713 | u16 i; |
| 18714 | |
| 18715 | if (old->callback_depth > cur->callback_depth) |
| 18716 | return false; |
| 18717 | |
| 18718 | for (i = 0; i < MAX_BPF_REG; i++) |
| 18719 | if (((1 << i) & live_regs) && |
| 18720 | !regsafe(env, &old->regs[i], &cur->regs[i], |
| 18721 | &env->idmap_scratch, exact)) |
| 18722 | return false; |
| 18723 | |
| 18724 | if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) |
| 18725 | return false; |
| 18726 | |
| 18727 | return true; |
| 18728 | } |
| 18729 | |
| 18730 | static void reset_idmap_scratch(struct bpf_verifier_env *env) |
| 18731 | { |
| 18732 | env->idmap_scratch.tmp_id_gen = env->id_gen; |
| 18733 | memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); |
| 18734 | } |
| 18735 | |
| 18736 | static bool states_equal(struct bpf_verifier_env *env, |
| 18737 | struct bpf_verifier_state *old, |
| 18738 | struct bpf_verifier_state *cur, |
| 18739 | enum exact_level exact) |
| 18740 | { |
| 18741 | u32 insn_idx; |
| 18742 | int i; |
| 18743 | |
| 18744 | if (old->curframe != cur->curframe) |
| 18745 | return false; |
| 18746 | |
| 18747 | reset_idmap_scratch(env); |
| 18748 | |
| 18749 | /* Verification state from speculative execution simulation |
| 18750 | * must never prune a non-speculative execution one. |
| 18751 | */ |
| 18752 | if (old->speculative && !cur->speculative) |
| 18753 | return false; |
| 18754 | |
| 18755 | if (old->in_sleepable != cur->in_sleepable) |
| 18756 | return false; |
| 18757 | |
| 18758 | if (!refsafe(old, cur, &env->idmap_scratch)) |
| 18759 | return false; |
| 18760 | |
| 18761 | /* for states to be equal callsites have to be the same |
| 18762 | * and all frame states need to be equivalent |
| 18763 | */ |
| 18764 | for (i = 0; i <= old->curframe; i++) { |
| 18765 | insn_idx = i == old->curframe |
| 18766 | ? env->insn_idx |
| 18767 | : old->frame[i + 1]->callsite; |
| 18768 | if (old->frame[i]->callsite != cur->frame[i]->callsite) |
| 18769 | return false; |
| 18770 | if (!func_states_equal(env, old->frame[i], cur->frame[i], insn_idx, exact)) |
| 18771 | return false; |
| 18772 | } |
| 18773 | return true; |
| 18774 | } |
| 18775 | |
| 18776 | /* Return 0 if no propagation happened. Return negative error code if error |
| 18777 | * happened. Otherwise, return the propagated bit. |
| 18778 | */ |
| 18779 | static int propagate_liveness_reg(struct bpf_verifier_env *env, |
| 18780 | struct bpf_reg_state *reg, |
| 18781 | struct bpf_reg_state *parent_reg) |
| 18782 | { |
| 18783 | u8 parent_flag = parent_reg->live & REG_LIVE_READ; |
| 18784 | u8 flag = reg->live & REG_LIVE_READ; |
| 18785 | int err; |
| 18786 | |
| 18787 | /* When comes here, read flags of PARENT_REG or REG could be any of |
| 18788 | * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need |
| 18789 | * of propagation if PARENT_REG has strongest REG_LIVE_READ64. |
| 18790 | */ |
| 18791 | if (parent_flag == REG_LIVE_READ64 || |
| 18792 | /* Or if there is no read flag from REG. */ |
| 18793 | !flag || |
| 18794 | /* Or if the read flag from REG is the same as PARENT_REG. */ |
| 18795 | parent_flag == flag) |
| 18796 | return 0; |
| 18797 | |
| 18798 | err = mark_reg_read(env, reg, parent_reg, flag); |
| 18799 | if (err) |
| 18800 | return err; |
| 18801 | |
| 18802 | return flag; |
| 18803 | } |
| 18804 | |
| 18805 | /* A write screens off any subsequent reads; but write marks come from the |
| 18806 | * straight-line code between a state and its parent. When we arrive at an |
| 18807 | * equivalent state (jump target or such) we didn't arrive by the straight-line |
| 18808 | * code, so read marks in the state must propagate to the parent regardless |
| 18809 | * of the state's write marks. That's what 'parent == state->parent' comparison |
| 18810 | * in mark_reg_read() is for. |
| 18811 | */ |
| 18812 | static int propagate_liveness(struct bpf_verifier_env *env, |
| 18813 | const struct bpf_verifier_state *vstate, |
| 18814 | struct bpf_verifier_state *vparent) |
| 18815 | { |
| 18816 | struct bpf_reg_state *state_reg, *parent_reg; |
| 18817 | struct bpf_func_state *state, *parent; |
| 18818 | int i, frame, err = 0; |
| 18819 | |
| 18820 | if (vparent->curframe != vstate->curframe) { |
| 18821 | WARN(1, "propagate_live: parent frame %d current frame %d\n", |
| 18822 | vparent->curframe, vstate->curframe); |
| 18823 | return -EFAULT; |
| 18824 | } |
| 18825 | /* Propagate read liveness of registers... */ |
| 18826 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
| 18827 | for (frame = 0; frame <= vstate->curframe; frame++) { |
| 18828 | parent = vparent->frame[frame]; |
| 18829 | state = vstate->frame[frame]; |
| 18830 | parent_reg = parent->regs; |
| 18831 | state_reg = state->regs; |
| 18832 | /* We don't need to worry about FP liveness, it's read-only */ |
| 18833 | for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { |
| 18834 | err = propagate_liveness_reg(env, &state_reg[i], |
| 18835 | &parent_reg[i]); |
| 18836 | if (err < 0) |
| 18837 | return err; |
| 18838 | if (err == REG_LIVE_READ64) |
| 18839 | mark_insn_zext(env, &parent_reg[i]); |
| 18840 | } |
| 18841 | |
| 18842 | /* Propagate stack slots. */ |
| 18843 | for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && |
| 18844 | i < parent->allocated_stack / BPF_REG_SIZE; i++) { |
| 18845 | parent_reg = &parent->stack[i].spilled_ptr; |
| 18846 | state_reg = &state->stack[i].spilled_ptr; |
| 18847 | err = propagate_liveness_reg(env, state_reg, |
| 18848 | parent_reg); |
| 18849 | if (err < 0) |
| 18850 | return err; |
| 18851 | } |
| 18852 | } |
| 18853 | return 0; |
| 18854 | } |
| 18855 | |
| 18856 | /* find precise scalars in the previous equivalent state and |
| 18857 | * propagate them into the current state |
| 18858 | */ |
| 18859 | static int propagate_precision(struct bpf_verifier_env *env, |
| 18860 | const struct bpf_verifier_state *old) |
| 18861 | { |
| 18862 | struct bpf_reg_state *state_reg; |
| 18863 | struct bpf_func_state *state; |
| 18864 | int i, err = 0, fr; |
| 18865 | bool first; |
| 18866 | |
| 18867 | for (fr = old->curframe; fr >= 0; fr--) { |
| 18868 | state = old->frame[fr]; |
| 18869 | state_reg = state->regs; |
| 18870 | first = true; |
| 18871 | for (i = 0; i < BPF_REG_FP; i++, state_reg++) { |
| 18872 | if (state_reg->type != SCALAR_VALUE || |
| 18873 | !state_reg->precise || |
| 18874 | !(state_reg->live & REG_LIVE_READ)) |
| 18875 | continue; |
| 18876 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 18877 | if (first) |
| 18878 | verbose(env, "frame %d: propagating r%d", fr, i); |
| 18879 | else |
| 18880 | verbose(env, ",r%d", i); |
| 18881 | } |
| 18882 | bt_set_frame_reg(&env->bt, fr, i); |
| 18883 | first = false; |
| 18884 | } |
| 18885 | |
| 18886 | for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { |
| 18887 | if (!is_spilled_reg(&state->stack[i])) |
| 18888 | continue; |
| 18889 | state_reg = &state->stack[i].spilled_ptr; |
| 18890 | if (state_reg->type != SCALAR_VALUE || |
| 18891 | !state_reg->precise || |
| 18892 | !(state_reg->live & REG_LIVE_READ)) |
| 18893 | continue; |
| 18894 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 18895 | if (first) |
| 18896 | verbose(env, "frame %d: propagating fp%d", |
| 18897 | fr, (-i - 1) * BPF_REG_SIZE); |
| 18898 | else |
| 18899 | verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); |
| 18900 | } |
| 18901 | bt_set_frame_slot(&env->bt, fr, i); |
| 18902 | first = false; |
| 18903 | } |
| 18904 | if (!first) |
| 18905 | verbose(env, "\n"); |
| 18906 | } |
| 18907 | |
| 18908 | err = mark_chain_precision_batch(env); |
| 18909 | if (err < 0) |
| 18910 | return err; |
| 18911 | |
| 18912 | return 0; |
| 18913 | } |
| 18914 | |
| 18915 | static bool states_maybe_looping(struct bpf_verifier_state *old, |
| 18916 | struct bpf_verifier_state *cur) |
| 18917 | { |
| 18918 | struct bpf_func_state *fold, *fcur; |
| 18919 | int i, fr = cur->curframe; |
| 18920 | |
| 18921 | if (old->curframe != fr) |
| 18922 | return false; |
| 18923 | |
| 18924 | fold = old->frame[fr]; |
| 18925 | fcur = cur->frame[fr]; |
| 18926 | for (i = 0; i < MAX_BPF_REG; i++) |
| 18927 | if (memcmp(&fold->regs[i], &fcur->regs[i], |
| 18928 | offsetof(struct bpf_reg_state, parent))) |
| 18929 | return false; |
| 18930 | return true; |
| 18931 | } |
| 18932 | |
| 18933 | static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx) |
| 18934 | { |
| 18935 | return env->insn_aux_data[insn_idx].is_iter_next; |
| 18936 | } |
| 18937 | |
| 18938 | /* is_state_visited() handles iter_next() (see process_iter_next_call() for |
| 18939 | * terminology) calls specially: as opposed to bounded BPF loops, it *expects* |
| 18940 | * states to match, which otherwise would look like an infinite loop. So while |
| 18941 | * iter_next() calls are taken care of, we still need to be careful and |
| 18942 | * prevent erroneous and too eager declaration of "ininite loop", when |
| 18943 | * iterators are involved. |
| 18944 | * |
| 18945 | * Here's a situation in pseudo-BPF assembly form: |
| 18946 | * |
| 18947 | * 0: again: ; set up iter_next() call args |
| 18948 | * 1: r1 = &it ; <CHECKPOINT HERE> |
| 18949 | * 2: call bpf_iter_num_next ; this is iter_next() call |
| 18950 | * 3: if r0 == 0 goto done |
| 18951 | * 4: ... something useful here ... |
| 18952 | * 5: goto again ; another iteration |
| 18953 | * 6: done: |
| 18954 | * 7: r1 = &it |
| 18955 | * 8: call bpf_iter_num_destroy ; clean up iter state |
| 18956 | * 9: exit |
| 18957 | * |
| 18958 | * This is a typical loop. Let's assume that we have a prune point at 1:, |
| 18959 | * before we get to `call bpf_iter_num_next` (e.g., because of that `goto |
| 18960 | * again`, assuming other heuristics don't get in a way). |
| 18961 | * |
| 18962 | * When we first time come to 1:, let's say we have some state X. We proceed |
| 18963 | * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit. |
| 18964 | * Now we come back to validate that forked ACTIVE state. We proceed through |
| 18965 | * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we |
| 18966 | * are converging. But the problem is that we don't know that yet, as this |
| 18967 | * convergence has to happen at iter_next() call site only. So if nothing is |
| 18968 | * done, at 1: verifier will use bounded loop logic and declare infinite |
| 18969 | * looping (and would be *technically* correct, if not for iterator's |
| 18970 | * "eventual sticky NULL" contract, see process_iter_next_call()). But we |
| 18971 | * don't want that. So what we do in process_iter_next_call() when we go on |
| 18972 | * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's |
| 18973 | * a different iteration. So when we suspect an infinite loop, we additionally |
| 18974 | * check if any of the *ACTIVE* iterator states depths differ. If yes, we |
| 18975 | * pretend we are not looping and wait for next iter_next() call. |
| 18976 | * |
| 18977 | * This only applies to ACTIVE state. In DRAINED state we don't expect to |
| 18978 | * loop, because that would actually mean infinite loop, as DRAINED state is |
| 18979 | * "sticky", and so we'll keep returning into the same instruction with the |
| 18980 | * same state (at least in one of possible code paths). |
| 18981 | * |
| 18982 | * This approach allows to keep infinite loop heuristic even in the face of |
| 18983 | * active iterator. E.g., C snippet below is and will be detected as |
| 18984 | * inifintely looping: |
| 18985 | * |
| 18986 | * struct bpf_iter_num it; |
| 18987 | * int *p, x; |
| 18988 | * |
| 18989 | * bpf_iter_num_new(&it, 0, 10); |
| 18990 | * while ((p = bpf_iter_num_next(&t))) { |
| 18991 | * x = p; |
| 18992 | * while (x--) {} // <<-- infinite loop here |
| 18993 | * } |
| 18994 | * |
| 18995 | */ |
| 18996 | static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) |
| 18997 | { |
| 18998 | struct bpf_reg_state *slot, *cur_slot; |
| 18999 | struct bpf_func_state *state; |
| 19000 | int i, fr; |
| 19001 | |
| 19002 | for (fr = old->curframe; fr >= 0; fr--) { |
| 19003 | state = old->frame[fr]; |
| 19004 | for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { |
| 19005 | if (state->stack[i].slot_type[0] != STACK_ITER) |
| 19006 | continue; |
| 19007 | |
| 19008 | slot = &state->stack[i].spilled_ptr; |
| 19009 | if (slot->iter.state != BPF_ITER_STATE_ACTIVE) |
| 19010 | continue; |
| 19011 | |
| 19012 | cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; |
| 19013 | if (cur_slot->iter.depth != slot->iter.depth) |
| 19014 | return true; |
| 19015 | } |
| 19016 | } |
| 19017 | return false; |
| 19018 | } |
| 19019 | |
| 19020 | static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) |
| 19021 | { |
| 19022 | struct bpf_verifier_state_list *new_sl; |
| 19023 | struct bpf_verifier_state_list *sl; |
| 19024 | struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; |
| 19025 | int i, j, n, err, states_cnt = 0; |
| 19026 | bool force_new_state, add_new_state, force_exact; |
| 19027 | struct list_head *pos, *tmp, *head; |
| 19028 | |
| 19029 | force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx) || |
| 19030 | /* Avoid accumulating infinitely long jmp history */ |
| 19031 | cur->insn_hist_end - cur->insn_hist_start > 40; |
| 19032 | |
| 19033 | /* bpf progs typically have pruning point every 4 instructions |
| 19034 | * http://vger.kernel.org/bpfconf2019.html#session-1 |
| 19035 | * Do not add new state for future pruning if the verifier hasn't seen |
| 19036 | * at least 2 jumps and at least 8 instructions. |
| 19037 | * This heuristics helps decrease 'total_states' and 'peak_states' metric. |
| 19038 | * In tests that amounts to up to 50% reduction into total verifier |
| 19039 | * memory consumption and 20% verifier time speedup. |
| 19040 | */ |
| 19041 | add_new_state = force_new_state; |
| 19042 | if (env->jmps_processed - env->prev_jmps_processed >= 2 && |
| 19043 | env->insn_processed - env->prev_insn_processed >= 8) |
| 19044 | add_new_state = true; |
| 19045 | |
| 19046 | clean_live_states(env, insn_idx, cur); |
| 19047 | |
| 19048 | head = explored_state(env, insn_idx); |
| 19049 | list_for_each_safe(pos, tmp, head) { |
| 19050 | sl = container_of(pos, struct bpf_verifier_state_list, node); |
| 19051 | states_cnt++; |
| 19052 | if (sl->state.insn_idx != insn_idx) |
| 19053 | continue; |
| 19054 | |
| 19055 | if (sl->state.branches) { |
| 19056 | struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; |
| 19057 | |
| 19058 | if (frame->in_async_callback_fn && |
| 19059 | frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { |
| 19060 | /* Different async_entry_cnt means that the verifier is |
| 19061 | * processing another entry into async callback. |
| 19062 | * Seeing the same state is not an indication of infinite |
| 19063 | * loop or infinite recursion. |
| 19064 | * But finding the same state doesn't mean that it's safe |
| 19065 | * to stop processing the current state. The previous state |
| 19066 | * hasn't yet reached bpf_exit, since state.branches > 0. |
| 19067 | * Checking in_async_callback_fn alone is not enough either. |
| 19068 | * Since the verifier still needs to catch infinite loops |
| 19069 | * inside async callbacks. |
| 19070 | */ |
| 19071 | goto skip_inf_loop_check; |
| 19072 | } |
| 19073 | /* BPF open-coded iterators loop detection is special. |
| 19074 | * states_maybe_looping() logic is too simplistic in detecting |
| 19075 | * states that *might* be equivalent, because it doesn't know |
| 19076 | * about ID remapping, so don't even perform it. |
| 19077 | * See process_iter_next_call() and iter_active_depths_differ() |
| 19078 | * for overview of the logic. When current and one of parent |
| 19079 | * states are detected as equivalent, it's a good thing: we prove |
| 19080 | * convergence and can stop simulating further iterations. |
| 19081 | * It's safe to assume that iterator loop will finish, taking into |
| 19082 | * account iter_next() contract of eventually returning |
| 19083 | * sticky NULL result. |
| 19084 | * |
| 19085 | * Note, that states have to be compared exactly in this case because |
| 19086 | * read and precision marks might not be finalized inside the loop. |
| 19087 | * E.g. as in the program below: |
| 19088 | * |
| 19089 | * 1. r7 = -16 |
| 19090 | * 2. r6 = bpf_get_prandom_u32() |
| 19091 | * 3. while (bpf_iter_num_next(&fp[-8])) { |
| 19092 | * 4. if (r6 != 42) { |
| 19093 | * 5. r7 = -32 |
| 19094 | * 6. r6 = bpf_get_prandom_u32() |
| 19095 | * 7. continue |
| 19096 | * 8. } |
| 19097 | * 9. r0 = r10 |
| 19098 | * 10. r0 += r7 |
| 19099 | * 11. r8 = *(u64 *)(r0 + 0) |
| 19100 | * 12. r6 = bpf_get_prandom_u32() |
| 19101 | * 13. } |
| 19102 | * |
| 19103 | * Here verifier would first visit path 1-3, create a checkpoint at 3 |
| 19104 | * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does |
| 19105 | * not have read or precision mark for r7 yet, thus inexact states |
| 19106 | * comparison would discard current state with r7=-32 |
| 19107 | * => unsafe memory access at 11 would not be caught. |
| 19108 | */ |
| 19109 | if (is_iter_next_insn(env, insn_idx)) { |
| 19110 | if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) { |
| 19111 | struct bpf_func_state *cur_frame; |
| 19112 | struct bpf_reg_state *iter_state, *iter_reg; |
| 19113 | int spi; |
| 19114 | |
| 19115 | cur_frame = cur->frame[cur->curframe]; |
| 19116 | /* btf_check_iter_kfuncs() enforces that |
| 19117 | * iter state pointer is always the first arg |
| 19118 | */ |
| 19119 | iter_reg = &cur_frame->regs[BPF_REG_1]; |
| 19120 | /* current state is valid due to states_equal(), |
| 19121 | * so we can assume valid iter and reg state, |
| 19122 | * no need for extra (re-)validations |
| 19123 | */ |
| 19124 | spi = __get_spi(iter_reg->off + iter_reg->var_off.value); |
| 19125 | iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; |
| 19126 | if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { |
| 19127 | update_loop_entry(env, cur, &sl->state); |
| 19128 | goto hit; |
| 19129 | } |
| 19130 | } |
| 19131 | goto skip_inf_loop_check; |
| 19132 | } |
| 19133 | if (is_may_goto_insn_at(env, insn_idx)) { |
| 19134 | if (sl->state.may_goto_depth != cur->may_goto_depth && |
| 19135 | states_equal(env, &sl->state, cur, RANGE_WITHIN)) { |
| 19136 | update_loop_entry(env, cur, &sl->state); |
| 19137 | goto hit; |
| 19138 | } |
| 19139 | } |
| 19140 | if (calls_callback(env, insn_idx)) { |
| 19141 | if (states_equal(env, &sl->state, cur, RANGE_WITHIN)) |
| 19142 | goto hit; |
| 19143 | goto skip_inf_loop_check; |
| 19144 | } |
| 19145 | /* attempt to detect infinite loop to avoid unnecessary doomed work */ |
| 19146 | if (states_maybe_looping(&sl->state, cur) && |
| 19147 | states_equal(env, &sl->state, cur, EXACT) && |
| 19148 | !iter_active_depths_differ(&sl->state, cur) && |
| 19149 | sl->state.may_goto_depth == cur->may_goto_depth && |
| 19150 | sl->state.callback_unroll_depth == cur->callback_unroll_depth) { |
| 19151 | verbose_linfo(env, insn_idx, "; "); |
| 19152 | verbose(env, "infinite loop detected at insn %d\n", insn_idx); |
| 19153 | verbose(env, "cur state:"); |
| 19154 | print_verifier_state(env, cur, cur->curframe, true); |
| 19155 | verbose(env, "old state:"); |
| 19156 | print_verifier_state(env, &sl->state, cur->curframe, true); |
| 19157 | return -EINVAL; |
| 19158 | } |
| 19159 | /* if the verifier is processing a loop, avoid adding new state |
| 19160 | * too often, since different loop iterations have distinct |
| 19161 | * states and may not help future pruning. |
| 19162 | * This threshold shouldn't be too low to make sure that |
| 19163 | * a loop with large bound will be rejected quickly. |
| 19164 | * The most abusive loop will be: |
| 19165 | * r1 += 1 |
| 19166 | * if r1 < 1000000 goto pc-2 |
| 19167 | * 1M insn_procssed limit / 100 == 10k peak states. |
| 19168 | * This threshold shouldn't be too high either, since states |
| 19169 | * at the end of the loop are likely to be useful in pruning. |
| 19170 | */ |
| 19171 | skip_inf_loop_check: |
| 19172 | if (!force_new_state && |
| 19173 | env->jmps_processed - env->prev_jmps_processed < 20 && |
| 19174 | env->insn_processed - env->prev_insn_processed < 100) |
| 19175 | add_new_state = false; |
| 19176 | goto miss; |
| 19177 | } |
| 19178 | /* If sl->state is a part of a loop and this loop's entry is a part of |
| 19179 | * current verification path then states have to be compared exactly. |
| 19180 | * 'force_exact' is needed to catch the following case: |
| 19181 | * |
| 19182 | * initial Here state 'succ' was processed first, |
| 19183 | * | it was eventually tracked to produce a |
| 19184 | * V state identical to 'hdr'. |
| 19185 | * .---------> hdr All branches from 'succ' had been explored |
| 19186 | * | | and thus 'succ' has its .branches == 0. |
| 19187 | * | V |
| 19188 | * | .------... Suppose states 'cur' and 'succ' correspond |
| 19189 | * | | | to the same instruction + callsites. |
| 19190 | * | V V In such case it is necessary to check |
| 19191 | * | ... ... if 'succ' and 'cur' are states_equal(). |
| 19192 | * | | | If 'succ' and 'cur' are a part of the |
| 19193 | * | V V same loop exact flag has to be set. |
| 19194 | * | succ <- cur To check if that is the case, verify |
| 19195 | * | | if loop entry of 'succ' is in current |
| 19196 | * | V DFS path. |
| 19197 | * | ... |
| 19198 | * | | |
| 19199 | * '----' |
| 19200 | * |
| 19201 | * Additional details are in the comment before get_loop_entry(). |
| 19202 | */ |
| 19203 | loop_entry = get_loop_entry(env, &sl->state); |
| 19204 | if (IS_ERR(loop_entry)) |
| 19205 | return PTR_ERR(loop_entry); |
| 19206 | force_exact = loop_entry && loop_entry->branches > 0; |
| 19207 | if (states_equal(env, &sl->state, cur, force_exact ? RANGE_WITHIN : NOT_EXACT)) { |
| 19208 | if (force_exact) |
| 19209 | update_loop_entry(env, cur, loop_entry); |
| 19210 | hit: |
| 19211 | sl->hit_cnt++; |
| 19212 | /* reached equivalent register/stack state, |
| 19213 | * prune the search. |
| 19214 | * Registers read by the continuation are read by us. |
| 19215 | * If we have any write marks in env->cur_state, they |
| 19216 | * will prevent corresponding reads in the continuation |
| 19217 | * from reaching our parent (an explored_state). Our |
| 19218 | * own state will get the read marks recorded, but |
| 19219 | * they'll be immediately forgotten as we're pruning |
| 19220 | * this state and will pop a new one. |
| 19221 | */ |
| 19222 | err = propagate_liveness(env, &sl->state, cur); |
| 19223 | |
| 19224 | /* if previous state reached the exit with precision and |
| 19225 | * current state is equivalent to it (except precision marks) |
| 19226 | * the precision needs to be propagated back in |
| 19227 | * the current state. |
| 19228 | */ |
| 19229 | if (is_jmp_point(env, env->insn_idx)) |
| 19230 | err = err ? : push_insn_history(env, cur, 0, 0); |
| 19231 | err = err ? : propagate_precision(env, &sl->state); |
| 19232 | if (err) |
| 19233 | return err; |
| 19234 | return 1; |
| 19235 | } |
| 19236 | miss: |
| 19237 | /* when new state is not going to be added do not increase miss count. |
| 19238 | * Otherwise several loop iterations will remove the state |
| 19239 | * recorded earlier. The goal of these heuristics is to have |
| 19240 | * states from some iterations of the loop (some in the beginning |
| 19241 | * and some at the end) to help pruning. |
| 19242 | */ |
| 19243 | if (add_new_state) |
| 19244 | sl->miss_cnt++; |
| 19245 | /* heuristic to determine whether this state is beneficial |
| 19246 | * to keep checking from state equivalence point of view. |
| 19247 | * Higher numbers increase max_states_per_insn and verification time, |
| 19248 | * but do not meaningfully decrease insn_processed. |
| 19249 | * 'n' controls how many times state could miss before eviction. |
| 19250 | * Use bigger 'n' for checkpoints because evicting checkpoint states |
| 19251 | * too early would hinder iterator convergence. |
| 19252 | */ |
| 19253 | n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; |
| 19254 | if (sl->miss_cnt > sl->hit_cnt * n + n) { |
| 19255 | /* the state is unlikely to be useful. Remove it to |
| 19256 | * speed up verification |
| 19257 | */ |
| 19258 | sl->in_free_list = true; |
| 19259 | list_del(&sl->node); |
| 19260 | list_add(&sl->node, &env->free_list); |
| 19261 | env->free_list_size++; |
| 19262 | env->explored_states_size--; |
| 19263 | maybe_free_verifier_state(env, sl); |
| 19264 | } |
| 19265 | } |
| 19266 | |
| 19267 | if (env->max_states_per_insn < states_cnt) |
| 19268 | env->max_states_per_insn = states_cnt; |
| 19269 | |
| 19270 | if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) |
| 19271 | return 0; |
| 19272 | |
| 19273 | if (!add_new_state) |
| 19274 | return 0; |
| 19275 | |
| 19276 | /* There were no equivalent states, remember the current one. |
| 19277 | * Technically the current state is not proven to be safe yet, |
| 19278 | * but it will either reach outer most bpf_exit (which means it's safe) |
| 19279 | * or it will be rejected. When there are no loops the verifier won't be |
| 19280 | * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) |
| 19281 | * again on the way to bpf_exit. |
| 19282 | * When looping the sl->state.branches will be > 0 and this state |
| 19283 | * will not be considered for equivalence until branches == 0. |
| 19284 | */ |
| 19285 | new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); |
| 19286 | if (!new_sl) |
| 19287 | return -ENOMEM; |
| 19288 | env->total_states++; |
| 19289 | env->explored_states_size++; |
| 19290 | update_peak_states(env); |
| 19291 | env->prev_jmps_processed = env->jmps_processed; |
| 19292 | env->prev_insn_processed = env->insn_processed; |
| 19293 | |
| 19294 | /* forget precise markings we inherited, see __mark_chain_precision */ |
| 19295 | if (env->bpf_capable) |
| 19296 | mark_all_scalars_imprecise(env, cur); |
| 19297 | |
| 19298 | /* add new state to the head of linked list */ |
| 19299 | new = &new_sl->state; |
| 19300 | err = copy_verifier_state(new, cur); |
| 19301 | if (err) { |
| 19302 | free_verifier_state(new, false); |
| 19303 | kfree(new_sl); |
| 19304 | return err; |
| 19305 | } |
| 19306 | new->insn_idx = insn_idx; |
| 19307 | WARN_ONCE(new->branches != 1, |
| 19308 | "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); |
| 19309 | |
| 19310 | cur->parent = new; |
| 19311 | cur->first_insn_idx = insn_idx; |
| 19312 | cur->insn_hist_start = cur->insn_hist_end; |
| 19313 | cur->dfs_depth = new->dfs_depth + 1; |
| 19314 | list_add(&new_sl->node, head); |
| 19315 | |
| 19316 | /* connect new state to parentage chain. Current frame needs all |
| 19317 | * registers connected. Only r6 - r9 of the callers are alive (pushed |
| 19318 | * to the stack implicitly by JITs) so in callers' frames connect just |
| 19319 | * r6 - r9 as an optimization. Callers will have r1 - r5 connected to |
| 19320 | * the state of the call instruction (with WRITTEN set), and r0 comes |
| 19321 | * from callee with its full parentage chain, anyway. |
| 19322 | */ |
| 19323 | /* clear write marks in current state: the writes we did are not writes |
| 19324 | * our child did, so they don't screen off its reads from us. |
| 19325 | * (There are no read marks in current state, because reads always mark |
| 19326 | * their parent and current state never has children yet. Only |
| 19327 | * explored_states can get read marks.) |
| 19328 | */ |
| 19329 | for (j = 0; j <= cur->curframe; j++) { |
| 19330 | for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) |
| 19331 | cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; |
| 19332 | for (i = 0; i < BPF_REG_FP; i++) |
| 19333 | cur->frame[j]->regs[i].live = REG_LIVE_NONE; |
| 19334 | } |
| 19335 | |
| 19336 | /* all stack frames are accessible from callee, clear them all */ |
| 19337 | for (j = 0; j <= cur->curframe; j++) { |
| 19338 | struct bpf_func_state *frame = cur->frame[j]; |
| 19339 | struct bpf_func_state *newframe = new->frame[j]; |
| 19340 | |
| 19341 | for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { |
| 19342 | frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; |
| 19343 | frame->stack[i].spilled_ptr.parent = |
| 19344 | &newframe->stack[i].spilled_ptr; |
| 19345 | } |
| 19346 | } |
| 19347 | return 0; |
| 19348 | } |
| 19349 | |
| 19350 | /* Return true if it's OK to have the same insn return a different type. */ |
| 19351 | static bool reg_type_mismatch_ok(enum bpf_reg_type type) |
| 19352 | { |
| 19353 | switch (base_type(type)) { |
| 19354 | case PTR_TO_CTX: |
| 19355 | case PTR_TO_SOCKET: |
| 19356 | case PTR_TO_SOCK_COMMON: |
| 19357 | case PTR_TO_TCP_SOCK: |
| 19358 | case PTR_TO_XDP_SOCK: |
| 19359 | case PTR_TO_BTF_ID: |
| 19360 | case PTR_TO_ARENA: |
| 19361 | return false; |
| 19362 | default: |
| 19363 | return true; |
| 19364 | } |
| 19365 | } |
| 19366 | |
| 19367 | /* If an instruction was previously used with particular pointer types, then we |
| 19368 | * need to be careful to avoid cases such as the below, where it may be ok |
| 19369 | * for one branch accessing the pointer, but not ok for the other branch: |
| 19370 | * |
| 19371 | * R1 = sock_ptr |
| 19372 | * goto X; |
| 19373 | * ... |
| 19374 | * R1 = some_other_valid_ptr; |
| 19375 | * goto X; |
| 19376 | * ... |
| 19377 | * R2 = *(u32 *)(R1 + 0); |
| 19378 | */ |
| 19379 | static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) |
| 19380 | { |
| 19381 | return src != prev && (!reg_type_mismatch_ok(src) || |
| 19382 | !reg_type_mismatch_ok(prev)); |
| 19383 | } |
| 19384 | |
| 19385 | static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, |
| 19386 | bool allow_trust_mismatch) |
| 19387 | { |
| 19388 | enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; |
| 19389 | |
| 19390 | if (*prev_type == NOT_INIT) { |
| 19391 | /* Saw a valid insn |
| 19392 | * dst_reg = *(u32 *)(src_reg + off) |
| 19393 | * save type to validate intersecting paths |
| 19394 | */ |
| 19395 | *prev_type = type; |
| 19396 | } else if (reg_type_mismatch(type, *prev_type)) { |
| 19397 | /* Abuser program is trying to use the same insn |
| 19398 | * dst_reg = *(u32*) (src_reg + off) |
| 19399 | * with different pointer types: |
| 19400 | * src_reg == ctx in one branch and |
| 19401 | * src_reg == stack|map in some other branch. |
| 19402 | * Reject it. |
| 19403 | */ |
| 19404 | if (allow_trust_mismatch && |
| 19405 | base_type(type) == PTR_TO_BTF_ID && |
| 19406 | base_type(*prev_type) == PTR_TO_BTF_ID) { |
| 19407 | /* |
| 19408 | * Have to support a use case when one path through |
| 19409 | * the program yields TRUSTED pointer while another |
| 19410 | * is UNTRUSTED. Fallback to UNTRUSTED to generate |
| 19411 | * BPF_PROBE_MEM/BPF_PROBE_MEMSX. |
| 19412 | */ |
| 19413 | *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; |
| 19414 | } else { |
| 19415 | verbose(env, "same insn cannot be used with different pointers\n"); |
| 19416 | return -EINVAL; |
| 19417 | } |
| 19418 | } |
| 19419 | |
| 19420 | return 0; |
| 19421 | } |
| 19422 | |
| 19423 | static int do_check(struct bpf_verifier_env *env) |
| 19424 | { |
| 19425 | bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); |
| 19426 | struct bpf_verifier_state *state = env->cur_state; |
| 19427 | struct bpf_insn *insns = env->prog->insnsi; |
| 19428 | struct bpf_reg_state *regs; |
| 19429 | int insn_cnt = env->prog->len; |
| 19430 | bool do_print_state = false; |
| 19431 | int prev_insn_idx = -1; |
| 19432 | |
| 19433 | for (;;) { |
| 19434 | bool exception_exit = false; |
| 19435 | struct bpf_insn *insn; |
| 19436 | u8 class; |
| 19437 | int err; |
| 19438 | |
| 19439 | /* reset current history entry on each new instruction */ |
| 19440 | env->cur_hist_ent = NULL; |
| 19441 | |
| 19442 | env->prev_insn_idx = prev_insn_idx; |
| 19443 | if (env->insn_idx >= insn_cnt) { |
| 19444 | verbose(env, "invalid insn idx %d insn_cnt %d\n", |
| 19445 | env->insn_idx, insn_cnt); |
| 19446 | return -EFAULT; |
| 19447 | } |
| 19448 | |
| 19449 | insn = &insns[env->insn_idx]; |
| 19450 | class = BPF_CLASS(insn->code); |
| 19451 | |
| 19452 | if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { |
| 19453 | verbose(env, |
| 19454 | "BPF program is too large. Processed %d insn\n", |
| 19455 | env->insn_processed); |
| 19456 | return -E2BIG; |
| 19457 | } |
| 19458 | |
| 19459 | state->last_insn_idx = env->prev_insn_idx; |
| 19460 | |
| 19461 | if (is_prune_point(env, env->insn_idx)) { |
| 19462 | err = is_state_visited(env, env->insn_idx); |
| 19463 | if (err < 0) |
| 19464 | return err; |
| 19465 | if (err == 1) { |
| 19466 | /* found equivalent state, can prune the search */ |
| 19467 | if (env->log.level & BPF_LOG_LEVEL) { |
| 19468 | if (do_print_state) |
| 19469 | verbose(env, "\nfrom %d to %d%s: safe\n", |
| 19470 | env->prev_insn_idx, env->insn_idx, |
| 19471 | env->cur_state->speculative ? |
| 19472 | " (speculative execution)" : ""); |
| 19473 | else |
| 19474 | verbose(env, "%d: safe\n", env->insn_idx); |
| 19475 | } |
| 19476 | goto process_bpf_exit; |
| 19477 | } |
| 19478 | } |
| 19479 | |
| 19480 | if (is_jmp_point(env, env->insn_idx)) { |
| 19481 | err = push_insn_history(env, state, 0, 0); |
| 19482 | if (err) |
| 19483 | return err; |
| 19484 | } |
| 19485 | |
| 19486 | if (signal_pending(current)) |
| 19487 | return -EAGAIN; |
| 19488 | |
| 19489 | if (need_resched()) |
| 19490 | cond_resched(); |
| 19491 | |
| 19492 | if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { |
| 19493 | verbose(env, "\nfrom %d to %d%s:", |
| 19494 | env->prev_insn_idx, env->insn_idx, |
| 19495 | env->cur_state->speculative ? |
| 19496 | " (speculative execution)" : ""); |
| 19497 | print_verifier_state(env, state, state->curframe, true); |
| 19498 | do_print_state = false; |
| 19499 | } |
| 19500 | |
| 19501 | if (env->log.level & BPF_LOG_LEVEL) { |
| 19502 | if (verifier_state_scratched(env)) |
| 19503 | print_insn_state(env, state, state->curframe); |
| 19504 | |
| 19505 | verbose_linfo(env, env->insn_idx, "; "); |
| 19506 | env->prev_log_pos = env->log.end_pos; |
| 19507 | verbose(env, "%d: ", env->insn_idx); |
| 19508 | verbose_insn(env, insn); |
| 19509 | env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; |
| 19510 | env->prev_log_pos = env->log.end_pos; |
| 19511 | } |
| 19512 | |
| 19513 | if (bpf_prog_is_offloaded(env->prog->aux)) { |
| 19514 | err = bpf_prog_offload_verify_insn(env, env->insn_idx, |
| 19515 | env->prev_insn_idx); |
| 19516 | if (err) |
| 19517 | return err; |
| 19518 | } |
| 19519 | |
| 19520 | regs = cur_regs(env); |
| 19521 | sanitize_mark_insn_seen(env); |
| 19522 | prev_insn_idx = env->insn_idx; |
| 19523 | |
| 19524 | if (class == BPF_ALU || class == BPF_ALU64) { |
| 19525 | err = check_alu_op(env, insn); |
| 19526 | if (err) |
| 19527 | return err; |
| 19528 | |
| 19529 | } else if (class == BPF_LDX) { |
| 19530 | bool is_ldsx = BPF_MODE(insn->code) == BPF_MEMSX; |
| 19531 | |
| 19532 | /* Check for reserved fields is already done in |
| 19533 | * resolve_pseudo_ldimm64(). |
| 19534 | */ |
| 19535 | err = check_load_mem(env, insn, false, is_ldsx, true, |
| 19536 | "ldx"); |
| 19537 | if (err) |
| 19538 | return err; |
| 19539 | } else if (class == BPF_STX) { |
| 19540 | if (BPF_MODE(insn->code) == BPF_ATOMIC) { |
| 19541 | err = check_atomic(env, insn); |
| 19542 | if (err) |
| 19543 | return err; |
| 19544 | env->insn_idx++; |
| 19545 | continue; |
| 19546 | } |
| 19547 | |
| 19548 | if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { |
| 19549 | verbose(env, "BPF_STX uses reserved fields\n"); |
| 19550 | return -EINVAL; |
| 19551 | } |
| 19552 | |
| 19553 | err = check_store_reg(env, insn, false); |
| 19554 | if (err) |
| 19555 | return err; |
| 19556 | } else if (class == BPF_ST) { |
| 19557 | enum bpf_reg_type dst_reg_type; |
| 19558 | |
| 19559 | if (BPF_MODE(insn->code) != BPF_MEM || |
| 19560 | insn->src_reg != BPF_REG_0) { |
| 19561 | verbose(env, "BPF_ST uses reserved fields\n"); |
| 19562 | return -EINVAL; |
| 19563 | } |
| 19564 | /* check src operand */ |
| 19565 | err = check_reg_arg(env, insn->dst_reg, SRC_OP); |
| 19566 | if (err) |
| 19567 | return err; |
| 19568 | |
| 19569 | dst_reg_type = regs[insn->dst_reg].type; |
| 19570 | |
| 19571 | /* check that memory (dst_reg + off) is writeable */ |
| 19572 | err = check_mem_access(env, env->insn_idx, insn->dst_reg, |
| 19573 | insn->off, BPF_SIZE(insn->code), |
| 19574 | BPF_WRITE, -1, false, false); |
| 19575 | if (err) |
| 19576 | return err; |
| 19577 | |
| 19578 | err = save_aux_ptr_type(env, dst_reg_type, false); |
| 19579 | if (err) |
| 19580 | return err; |
| 19581 | } else if (class == BPF_JMP || class == BPF_JMP32) { |
| 19582 | u8 opcode = BPF_OP(insn->code); |
| 19583 | |
| 19584 | env->jmps_processed++; |
| 19585 | if (opcode == BPF_CALL) { |
| 19586 | if (BPF_SRC(insn->code) != BPF_K || |
| 19587 | (insn->src_reg != BPF_PSEUDO_KFUNC_CALL |
| 19588 | && insn->off != 0) || |
| 19589 | (insn->src_reg != BPF_REG_0 && |
| 19590 | insn->src_reg != BPF_PSEUDO_CALL && |
| 19591 | insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || |
| 19592 | insn->dst_reg != BPF_REG_0 || |
| 19593 | class == BPF_JMP32) { |
| 19594 | verbose(env, "BPF_CALL uses reserved fields\n"); |
| 19595 | return -EINVAL; |
| 19596 | } |
| 19597 | |
| 19598 | if (env->cur_state->active_locks) { |
| 19599 | if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || |
| 19600 | (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && |
| 19601 | (insn->off != 0 || !kfunc_spin_allowed(insn->imm)))) { |
| 19602 | verbose(env, "function calls are not allowed while holding a lock\n"); |
| 19603 | return -EINVAL; |
| 19604 | } |
| 19605 | } |
| 19606 | if (insn->src_reg == BPF_PSEUDO_CALL) { |
| 19607 | err = check_func_call(env, insn, &env->insn_idx); |
| 19608 | } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { |
| 19609 | err = check_kfunc_call(env, insn, &env->insn_idx); |
| 19610 | if (!err && is_bpf_throw_kfunc(insn)) { |
| 19611 | exception_exit = true; |
| 19612 | goto process_bpf_exit_full; |
| 19613 | } |
| 19614 | } else { |
| 19615 | err = check_helper_call(env, insn, &env->insn_idx); |
| 19616 | } |
| 19617 | if (err) |
| 19618 | return err; |
| 19619 | |
| 19620 | mark_reg_scratched(env, BPF_REG_0); |
| 19621 | } else if (opcode == BPF_JA) { |
| 19622 | if (BPF_SRC(insn->code) != BPF_K || |
| 19623 | insn->src_reg != BPF_REG_0 || |
| 19624 | insn->dst_reg != BPF_REG_0 || |
| 19625 | (class == BPF_JMP && insn->imm != 0) || |
| 19626 | (class == BPF_JMP32 && insn->off != 0)) { |
| 19627 | verbose(env, "BPF_JA uses reserved fields\n"); |
| 19628 | return -EINVAL; |
| 19629 | } |
| 19630 | |
| 19631 | if (class == BPF_JMP) |
| 19632 | env->insn_idx += insn->off + 1; |
| 19633 | else |
| 19634 | env->insn_idx += insn->imm + 1; |
| 19635 | continue; |
| 19636 | |
| 19637 | } else if (opcode == BPF_EXIT) { |
| 19638 | if (BPF_SRC(insn->code) != BPF_K || |
| 19639 | insn->imm != 0 || |
| 19640 | insn->src_reg != BPF_REG_0 || |
| 19641 | insn->dst_reg != BPF_REG_0 || |
| 19642 | class == BPF_JMP32) { |
| 19643 | verbose(env, "BPF_EXIT uses reserved fields\n"); |
| 19644 | return -EINVAL; |
| 19645 | } |
| 19646 | process_bpf_exit_full: |
| 19647 | /* We must do check_reference_leak here before |
| 19648 | * prepare_func_exit to handle the case when |
| 19649 | * state->curframe > 0, it may be a callback |
| 19650 | * function, for which reference_state must |
| 19651 | * match caller reference state when it exits. |
| 19652 | */ |
| 19653 | err = check_resource_leak(env, exception_exit, !env->cur_state->curframe, |
| 19654 | "BPF_EXIT instruction in main prog"); |
| 19655 | if (err) |
| 19656 | return err; |
| 19657 | |
| 19658 | /* The side effect of the prepare_func_exit |
| 19659 | * which is being skipped is that it frees |
| 19660 | * bpf_func_state. Typically, process_bpf_exit |
| 19661 | * will only be hit with outermost exit. |
| 19662 | * copy_verifier_state in pop_stack will handle |
| 19663 | * freeing of any extra bpf_func_state left over |
| 19664 | * from not processing all nested function |
| 19665 | * exits. We also skip return code checks as |
| 19666 | * they are not needed for exceptional exits. |
| 19667 | */ |
| 19668 | if (exception_exit) |
| 19669 | goto process_bpf_exit; |
| 19670 | |
| 19671 | if (state->curframe) { |
| 19672 | /* exit from nested function */ |
| 19673 | err = prepare_func_exit(env, &env->insn_idx); |
| 19674 | if (err) |
| 19675 | return err; |
| 19676 | do_print_state = true; |
| 19677 | continue; |
| 19678 | } |
| 19679 | |
| 19680 | err = check_return_code(env, BPF_REG_0, "R0"); |
| 19681 | if (err) |
| 19682 | return err; |
| 19683 | process_bpf_exit: |
| 19684 | mark_verifier_state_scratched(env); |
| 19685 | update_branch_counts(env, env->cur_state); |
| 19686 | err = pop_stack(env, &prev_insn_idx, |
| 19687 | &env->insn_idx, pop_log); |
| 19688 | if (err < 0) { |
| 19689 | if (err != -ENOENT) |
| 19690 | return err; |
| 19691 | break; |
| 19692 | } else { |
| 19693 | if (verifier_bug_if(env->cur_state->loop_entry, env, |
| 19694 | "broken loop detection")) |
| 19695 | return -EFAULT; |
| 19696 | do_print_state = true; |
| 19697 | continue; |
| 19698 | } |
| 19699 | } else { |
| 19700 | err = check_cond_jmp_op(env, insn, &env->insn_idx); |
| 19701 | if (err) |
| 19702 | return err; |
| 19703 | } |
| 19704 | } else if (class == BPF_LD) { |
| 19705 | u8 mode = BPF_MODE(insn->code); |
| 19706 | |
| 19707 | if (mode == BPF_ABS || mode == BPF_IND) { |
| 19708 | err = check_ld_abs(env, insn); |
| 19709 | if (err) |
| 19710 | return err; |
| 19711 | |
| 19712 | } else if (mode == BPF_IMM) { |
| 19713 | err = check_ld_imm(env, insn); |
| 19714 | if (err) |
| 19715 | return err; |
| 19716 | |
| 19717 | env->insn_idx++; |
| 19718 | sanitize_mark_insn_seen(env); |
| 19719 | } else { |
| 19720 | verbose(env, "invalid BPF_LD mode\n"); |
| 19721 | return -EINVAL; |
| 19722 | } |
| 19723 | } else { |
| 19724 | verbose(env, "unknown insn class %d\n", class); |
| 19725 | return -EINVAL; |
| 19726 | } |
| 19727 | |
| 19728 | env->insn_idx++; |
| 19729 | } |
| 19730 | |
| 19731 | return 0; |
| 19732 | } |
| 19733 | |
| 19734 | static int find_btf_percpu_datasec(struct btf *btf) |
| 19735 | { |
| 19736 | const struct btf_type *t; |
| 19737 | const char *tname; |
| 19738 | int i, n; |
| 19739 | |
| 19740 | /* |
| 19741 | * Both vmlinux and module each have their own ".data..percpu" |
| 19742 | * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF |
| 19743 | * types to look at only module's own BTF types. |
| 19744 | */ |
| 19745 | n = btf_nr_types(btf); |
| 19746 | if (btf_is_module(btf)) |
| 19747 | i = btf_nr_types(btf_vmlinux); |
| 19748 | else |
| 19749 | i = 1; |
| 19750 | |
| 19751 | for(; i < n; i++) { |
| 19752 | t = btf_type_by_id(btf, i); |
| 19753 | if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) |
| 19754 | continue; |
| 19755 | |
| 19756 | tname = btf_name_by_offset(btf, t->name_off); |
| 19757 | if (!strcmp(tname, ".data..percpu")) |
| 19758 | return i; |
| 19759 | } |
| 19760 | |
| 19761 | return -ENOENT; |
| 19762 | } |
| 19763 | |
| 19764 | /* |
| 19765 | * Add btf to the used_btfs array and return the index. (If the btf was |
| 19766 | * already added, then just return the index.) Upon successful insertion |
| 19767 | * increase btf refcnt, and, if present, also refcount the corresponding |
| 19768 | * kernel module. |
| 19769 | */ |
| 19770 | static int __add_used_btf(struct bpf_verifier_env *env, struct btf *btf) |
| 19771 | { |
| 19772 | struct btf_mod_pair *btf_mod; |
| 19773 | int i; |
| 19774 | |
| 19775 | /* check whether we recorded this BTF (and maybe module) already */ |
| 19776 | for (i = 0; i < env->used_btf_cnt; i++) |
| 19777 | if (env->used_btfs[i].btf == btf) |
| 19778 | return i; |
| 19779 | |
| 19780 | if (env->used_btf_cnt >= MAX_USED_BTFS) |
| 19781 | return -E2BIG; |
| 19782 | |
| 19783 | btf_get(btf); |
| 19784 | |
| 19785 | btf_mod = &env->used_btfs[env->used_btf_cnt]; |
| 19786 | btf_mod->btf = btf; |
| 19787 | btf_mod->module = NULL; |
| 19788 | |
| 19789 | /* if we reference variables from kernel module, bump its refcount */ |
| 19790 | if (btf_is_module(btf)) { |
| 19791 | btf_mod->module = btf_try_get_module(btf); |
| 19792 | if (!btf_mod->module) { |
| 19793 | btf_put(btf); |
| 19794 | return -ENXIO; |
| 19795 | } |
| 19796 | } |
| 19797 | |
| 19798 | return env->used_btf_cnt++; |
| 19799 | } |
| 19800 | |
| 19801 | /* replace pseudo btf_id with kernel symbol address */ |
| 19802 | static int __check_pseudo_btf_id(struct bpf_verifier_env *env, |
| 19803 | struct bpf_insn *insn, |
| 19804 | struct bpf_insn_aux_data *aux, |
| 19805 | struct btf *btf) |
| 19806 | { |
| 19807 | const struct btf_var_secinfo *vsi; |
| 19808 | const struct btf_type *datasec; |
| 19809 | const struct btf_type *t; |
| 19810 | const char *sym_name; |
| 19811 | bool percpu = false; |
| 19812 | u32 type, id = insn->imm; |
| 19813 | s32 datasec_id; |
| 19814 | u64 addr; |
| 19815 | int i; |
| 19816 | |
| 19817 | t = btf_type_by_id(btf, id); |
| 19818 | if (!t) { |
| 19819 | verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); |
| 19820 | return -ENOENT; |
| 19821 | } |
| 19822 | |
| 19823 | if (!btf_type_is_var(t) && !btf_type_is_func(t)) { |
| 19824 | verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); |
| 19825 | return -EINVAL; |
| 19826 | } |
| 19827 | |
| 19828 | sym_name = btf_name_by_offset(btf, t->name_off); |
| 19829 | addr = kallsyms_lookup_name(sym_name); |
| 19830 | if (!addr) { |
| 19831 | verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", |
| 19832 | sym_name); |
| 19833 | return -ENOENT; |
| 19834 | } |
| 19835 | insn[0].imm = (u32)addr; |
| 19836 | insn[1].imm = addr >> 32; |
| 19837 | |
| 19838 | if (btf_type_is_func(t)) { |
| 19839 | aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; |
| 19840 | aux->btf_var.mem_size = 0; |
| 19841 | return 0; |
| 19842 | } |
| 19843 | |
| 19844 | datasec_id = find_btf_percpu_datasec(btf); |
| 19845 | if (datasec_id > 0) { |
| 19846 | datasec = btf_type_by_id(btf, datasec_id); |
| 19847 | for_each_vsi(i, datasec, vsi) { |
| 19848 | if (vsi->type == id) { |
| 19849 | percpu = true; |
| 19850 | break; |
| 19851 | } |
| 19852 | } |
| 19853 | } |
| 19854 | |
| 19855 | type = t->type; |
| 19856 | t = btf_type_skip_modifiers(btf, type, NULL); |
| 19857 | if (percpu) { |
| 19858 | aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; |
| 19859 | aux->btf_var.btf = btf; |
| 19860 | aux->btf_var.btf_id = type; |
| 19861 | } else if (!btf_type_is_struct(t)) { |
| 19862 | const struct btf_type *ret; |
| 19863 | const char *tname; |
| 19864 | u32 tsize; |
| 19865 | |
| 19866 | /* resolve the type size of ksym. */ |
| 19867 | ret = btf_resolve_size(btf, t, &tsize); |
| 19868 | if (IS_ERR(ret)) { |
| 19869 | tname = btf_name_by_offset(btf, t->name_off); |
| 19870 | verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", |
| 19871 | tname, PTR_ERR(ret)); |
| 19872 | return -EINVAL; |
| 19873 | } |
| 19874 | aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; |
| 19875 | aux->btf_var.mem_size = tsize; |
| 19876 | } else { |
| 19877 | aux->btf_var.reg_type = PTR_TO_BTF_ID; |
| 19878 | aux->btf_var.btf = btf; |
| 19879 | aux->btf_var.btf_id = type; |
| 19880 | } |
| 19881 | |
| 19882 | return 0; |
| 19883 | } |
| 19884 | |
| 19885 | static int check_pseudo_btf_id(struct bpf_verifier_env *env, |
| 19886 | struct bpf_insn *insn, |
| 19887 | struct bpf_insn_aux_data *aux) |
| 19888 | { |
| 19889 | struct btf *btf; |
| 19890 | int btf_fd; |
| 19891 | int err; |
| 19892 | |
| 19893 | btf_fd = insn[1].imm; |
| 19894 | if (btf_fd) { |
| 19895 | CLASS(fd, f)(btf_fd); |
| 19896 | |
| 19897 | btf = __btf_get_by_fd(f); |
| 19898 | if (IS_ERR(btf)) { |
| 19899 | verbose(env, "invalid module BTF object FD specified.\n"); |
| 19900 | return -EINVAL; |
| 19901 | } |
| 19902 | } else { |
| 19903 | if (!btf_vmlinux) { |
| 19904 | verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); |
| 19905 | return -EINVAL; |
| 19906 | } |
| 19907 | btf = btf_vmlinux; |
| 19908 | } |
| 19909 | |
| 19910 | err = __check_pseudo_btf_id(env, insn, aux, btf); |
| 19911 | if (err) |
| 19912 | return err; |
| 19913 | |
| 19914 | err = __add_used_btf(env, btf); |
| 19915 | if (err < 0) |
| 19916 | return err; |
| 19917 | return 0; |
| 19918 | } |
| 19919 | |
| 19920 | static bool is_tracing_prog_type(enum bpf_prog_type type) |
| 19921 | { |
| 19922 | switch (type) { |
| 19923 | case BPF_PROG_TYPE_KPROBE: |
| 19924 | case BPF_PROG_TYPE_TRACEPOINT: |
| 19925 | case BPF_PROG_TYPE_PERF_EVENT: |
| 19926 | case BPF_PROG_TYPE_RAW_TRACEPOINT: |
| 19927 | case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: |
| 19928 | return true; |
| 19929 | default: |
| 19930 | return false; |
| 19931 | } |
| 19932 | } |
| 19933 | |
| 19934 | static bool bpf_map_is_cgroup_storage(struct bpf_map *map) |
| 19935 | { |
| 19936 | return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || |
| 19937 | map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); |
| 19938 | } |
| 19939 | |
| 19940 | static int check_map_prog_compatibility(struct bpf_verifier_env *env, |
| 19941 | struct bpf_map *map, |
| 19942 | struct bpf_prog *prog) |
| 19943 | |
| 19944 | { |
| 19945 | enum bpf_prog_type prog_type = resolve_prog_type(prog); |
| 19946 | |
| 19947 | if (btf_record_has_field(map->record, BPF_LIST_HEAD) || |
| 19948 | btf_record_has_field(map->record, BPF_RB_ROOT)) { |
| 19949 | if (is_tracing_prog_type(prog_type)) { |
| 19950 | verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); |
| 19951 | return -EINVAL; |
| 19952 | } |
| 19953 | } |
| 19954 | |
| 19955 | if (btf_record_has_field(map->record, BPF_SPIN_LOCK | BPF_RES_SPIN_LOCK)) { |
| 19956 | if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { |
| 19957 | verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); |
| 19958 | return -EINVAL; |
| 19959 | } |
| 19960 | |
| 19961 | if (is_tracing_prog_type(prog_type)) { |
| 19962 | verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); |
| 19963 | return -EINVAL; |
| 19964 | } |
| 19965 | } |
| 19966 | |
| 19967 | if (btf_record_has_field(map->record, BPF_TIMER)) { |
| 19968 | if (is_tracing_prog_type(prog_type)) { |
| 19969 | verbose(env, "tracing progs cannot use bpf_timer yet\n"); |
| 19970 | return -EINVAL; |
| 19971 | } |
| 19972 | } |
| 19973 | |
| 19974 | if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { |
| 19975 | if (is_tracing_prog_type(prog_type)) { |
| 19976 | verbose(env, "tracing progs cannot use bpf_wq yet\n"); |
| 19977 | return -EINVAL; |
| 19978 | } |
| 19979 | } |
| 19980 | |
| 19981 | if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && |
| 19982 | !bpf_offload_prog_map_match(prog, map)) { |
| 19983 | verbose(env, "offload device mismatch between prog and map\n"); |
| 19984 | return -EINVAL; |
| 19985 | } |
| 19986 | |
| 19987 | if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { |
| 19988 | verbose(env, "bpf_struct_ops map cannot be used in prog\n"); |
| 19989 | return -EINVAL; |
| 19990 | } |
| 19991 | |
| 19992 | if (prog->sleepable) |
| 19993 | switch (map->map_type) { |
| 19994 | case BPF_MAP_TYPE_HASH: |
| 19995 | case BPF_MAP_TYPE_LRU_HASH: |
| 19996 | case BPF_MAP_TYPE_ARRAY: |
| 19997 | case BPF_MAP_TYPE_PERCPU_HASH: |
| 19998 | case BPF_MAP_TYPE_PERCPU_ARRAY: |
| 19999 | case BPF_MAP_TYPE_LRU_PERCPU_HASH: |
| 20000 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: |
| 20001 | case BPF_MAP_TYPE_HASH_OF_MAPS: |
| 20002 | case BPF_MAP_TYPE_RINGBUF: |
| 20003 | case BPF_MAP_TYPE_USER_RINGBUF: |
| 20004 | case BPF_MAP_TYPE_INODE_STORAGE: |
| 20005 | case BPF_MAP_TYPE_SK_STORAGE: |
| 20006 | case BPF_MAP_TYPE_TASK_STORAGE: |
| 20007 | case BPF_MAP_TYPE_CGRP_STORAGE: |
| 20008 | case BPF_MAP_TYPE_QUEUE: |
| 20009 | case BPF_MAP_TYPE_STACK: |
| 20010 | case BPF_MAP_TYPE_ARENA: |
| 20011 | break; |
| 20012 | default: |
| 20013 | verbose(env, |
| 20014 | "Sleepable programs can only use array, hash, ringbuf and local storage maps\n"); |
| 20015 | return -EINVAL; |
| 20016 | } |
| 20017 | |
| 20018 | if (bpf_map_is_cgroup_storage(map) && |
| 20019 | bpf_cgroup_storage_assign(env->prog->aux, map)) { |
| 20020 | verbose(env, "only one cgroup storage of each type is allowed\n"); |
| 20021 | return -EBUSY; |
| 20022 | } |
| 20023 | |
| 20024 | if (map->map_type == BPF_MAP_TYPE_ARENA) { |
| 20025 | if (env->prog->aux->arena) { |
| 20026 | verbose(env, "Only one arena per program\n"); |
| 20027 | return -EBUSY; |
| 20028 | } |
| 20029 | if (!env->allow_ptr_leaks || !env->bpf_capable) { |
| 20030 | verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n"); |
| 20031 | return -EPERM; |
| 20032 | } |
| 20033 | if (!env->prog->jit_requested) { |
| 20034 | verbose(env, "JIT is required to use arena\n"); |
| 20035 | return -EOPNOTSUPP; |
| 20036 | } |
| 20037 | if (!bpf_jit_supports_arena()) { |
| 20038 | verbose(env, "JIT doesn't support arena\n"); |
| 20039 | return -EOPNOTSUPP; |
| 20040 | } |
| 20041 | env->prog->aux->arena = (void *)map; |
| 20042 | if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) { |
| 20043 | verbose(env, "arena's user address must be set via map_extra or mmap()\n"); |
| 20044 | return -EINVAL; |
| 20045 | } |
| 20046 | } |
| 20047 | |
| 20048 | return 0; |
| 20049 | } |
| 20050 | |
| 20051 | static int __add_used_map(struct bpf_verifier_env *env, struct bpf_map *map) |
| 20052 | { |
| 20053 | int i, err; |
| 20054 | |
| 20055 | /* check whether we recorded this map already */ |
| 20056 | for (i = 0; i < env->used_map_cnt; i++) |
| 20057 | if (env->used_maps[i] == map) |
| 20058 | return i; |
| 20059 | |
| 20060 | if (env->used_map_cnt >= MAX_USED_MAPS) { |
| 20061 | verbose(env, "The total number of maps per program has reached the limit of %u\n", |
| 20062 | MAX_USED_MAPS); |
| 20063 | return -E2BIG; |
| 20064 | } |
| 20065 | |
| 20066 | err = check_map_prog_compatibility(env, map, env->prog); |
| 20067 | if (err) |
| 20068 | return err; |
| 20069 | |
| 20070 | if (env->prog->sleepable) |
| 20071 | atomic64_inc(&map->sleepable_refcnt); |
| 20072 | |
| 20073 | /* hold the map. If the program is rejected by verifier, |
| 20074 | * the map will be released by release_maps() or it |
| 20075 | * will be used by the valid program until it's unloaded |
| 20076 | * and all maps are released in bpf_free_used_maps() |
| 20077 | */ |
| 20078 | bpf_map_inc(map); |
| 20079 | |
| 20080 | env->used_maps[env->used_map_cnt++] = map; |
| 20081 | |
| 20082 | return env->used_map_cnt - 1; |
| 20083 | } |
| 20084 | |
| 20085 | /* Add map behind fd to used maps list, if it's not already there, and return |
| 20086 | * its index. |
| 20087 | * Returns <0 on error, or >= 0 index, on success. |
| 20088 | */ |
| 20089 | static int add_used_map(struct bpf_verifier_env *env, int fd) |
| 20090 | { |
| 20091 | struct bpf_map *map; |
| 20092 | CLASS(fd, f)(fd); |
| 20093 | |
| 20094 | map = __bpf_map_get(f); |
| 20095 | if (IS_ERR(map)) { |
| 20096 | verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); |
| 20097 | return PTR_ERR(map); |
| 20098 | } |
| 20099 | |
| 20100 | return __add_used_map(env, map); |
| 20101 | } |
| 20102 | |
| 20103 | /* find and rewrite pseudo imm in ld_imm64 instructions: |
| 20104 | * |
| 20105 | * 1. if it accesses map FD, replace it with actual map pointer. |
| 20106 | * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. |
| 20107 | * |
| 20108 | * NOTE: btf_vmlinux is required for converting pseudo btf_id. |
| 20109 | */ |
| 20110 | static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) |
| 20111 | { |
| 20112 | struct bpf_insn *insn = env->prog->insnsi; |
| 20113 | int insn_cnt = env->prog->len; |
| 20114 | int i, err; |
| 20115 | |
| 20116 | err = bpf_prog_calc_tag(env->prog); |
| 20117 | if (err) |
| 20118 | return err; |
| 20119 | |
| 20120 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 20121 | if (BPF_CLASS(insn->code) == BPF_LDX && |
| 20122 | ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || |
| 20123 | insn->imm != 0)) { |
| 20124 | verbose(env, "BPF_LDX uses reserved fields\n"); |
| 20125 | return -EINVAL; |
| 20126 | } |
| 20127 | |
| 20128 | if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { |
| 20129 | struct bpf_insn_aux_data *aux; |
| 20130 | struct bpf_map *map; |
| 20131 | int map_idx; |
| 20132 | u64 addr; |
| 20133 | u32 fd; |
| 20134 | |
| 20135 | if (i == insn_cnt - 1 || insn[1].code != 0 || |
| 20136 | insn[1].dst_reg != 0 || insn[1].src_reg != 0 || |
| 20137 | insn[1].off != 0) { |
| 20138 | verbose(env, "invalid bpf_ld_imm64 insn\n"); |
| 20139 | return -EINVAL; |
| 20140 | } |
| 20141 | |
| 20142 | if (insn[0].src_reg == 0) |
| 20143 | /* valid generic load 64-bit imm */ |
| 20144 | goto next_insn; |
| 20145 | |
| 20146 | if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { |
| 20147 | aux = &env->insn_aux_data[i]; |
| 20148 | err = check_pseudo_btf_id(env, insn, aux); |
| 20149 | if (err) |
| 20150 | return err; |
| 20151 | goto next_insn; |
| 20152 | } |
| 20153 | |
| 20154 | if (insn[0].src_reg == BPF_PSEUDO_FUNC) { |
| 20155 | aux = &env->insn_aux_data[i]; |
| 20156 | aux->ptr_type = PTR_TO_FUNC; |
| 20157 | goto next_insn; |
| 20158 | } |
| 20159 | |
| 20160 | /* In final convert_pseudo_ld_imm64() step, this is |
| 20161 | * converted into regular 64-bit imm load insn. |
| 20162 | */ |
| 20163 | switch (insn[0].src_reg) { |
| 20164 | case BPF_PSEUDO_MAP_VALUE: |
| 20165 | case BPF_PSEUDO_MAP_IDX_VALUE: |
| 20166 | break; |
| 20167 | case BPF_PSEUDO_MAP_FD: |
| 20168 | case BPF_PSEUDO_MAP_IDX: |
| 20169 | if (insn[1].imm == 0) |
| 20170 | break; |
| 20171 | fallthrough; |
| 20172 | default: |
| 20173 | verbose(env, "unrecognized bpf_ld_imm64 insn\n"); |
| 20174 | return -EINVAL; |
| 20175 | } |
| 20176 | |
| 20177 | switch (insn[0].src_reg) { |
| 20178 | case BPF_PSEUDO_MAP_IDX_VALUE: |
| 20179 | case BPF_PSEUDO_MAP_IDX: |
| 20180 | if (bpfptr_is_null(env->fd_array)) { |
| 20181 | verbose(env, "fd_idx without fd_array is invalid\n"); |
| 20182 | return -EPROTO; |
| 20183 | } |
| 20184 | if (copy_from_bpfptr_offset(&fd, env->fd_array, |
| 20185 | insn[0].imm * sizeof(fd), |
| 20186 | sizeof(fd))) |
| 20187 | return -EFAULT; |
| 20188 | break; |
| 20189 | default: |
| 20190 | fd = insn[0].imm; |
| 20191 | break; |
| 20192 | } |
| 20193 | |
| 20194 | map_idx = add_used_map(env, fd); |
| 20195 | if (map_idx < 0) |
| 20196 | return map_idx; |
| 20197 | map = env->used_maps[map_idx]; |
| 20198 | |
| 20199 | aux = &env->insn_aux_data[i]; |
| 20200 | aux->map_index = map_idx; |
| 20201 | |
| 20202 | if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || |
| 20203 | insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { |
| 20204 | addr = (unsigned long)map; |
| 20205 | } else { |
| 20206 | u32 off = insn[1].imm; |
| 20207 | |
| 20208 | if (off >= BPF_MAX_VAR_OFF) { |
| 20209 | verbose(env, "direct value offset of %u is not allowed\n", off); |
| 20210 | return -EINVAL; |
| 20211 | } |
| 20212 | |
| 20213 | if (!map->ops->map_direct_value_addr) { |
| 20214 | verbose(env, "no direct value access support for this map type\n"); |
| 20215 | return -EINVAL; |
| 20216 | } |
| 20217 | |
| 20218 | err = map->ops->map_direct_value_addr(map, &addr, off); |
| 20219 | if (err) { |
| 20220 | verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", |
| 20221 | map->value_size, off); |
| 20222 | return err; |
| 20223 | } |
| 20224 | |
| 20225 | aux->map_off = off; |
| 20226 | addr += off; |
| 20227 | } |
| 20228 | |
| 20229 | insn[0].imm = (u32)addr; |
| 20230 | insn[1].imm = addr >> 32; |
| 20231 | |
| 20232 | next_insn: |
| 20233 | insn++; |
| 20234 | i++; |
| 20235 | continue; |
| 20236 | } |
| 20237 | |
| 20238 | /* Basic sanity check before we invest more work here. */ |
| 20239 | if (!bpf_opcode_in_insntable(insn->code)) { |
| 20240 | verbose(env, "unknown opcode %02x\n", insn->code); |
| 20241 | return -EINVAL; |
| 20242 | } |
| 20243 | } |
| 20244 | |
| 20245 | /* now all pseudo BPF_LD_IMM64 instructions load valid |
| 20246 | * 'struct bpf_map *' into a register instead of user map_fd. |
| 20247 | * These pointers will be used later by verifier to validate map access. |
| 20248 | */ |
| 20249 | return 0; |
| 20250 | } |
| 20251 | |
| 20252 | /* drop refcnt of maps used by the rejected program */ |
| 20253 | static void release_maps(struct bpf_verifier_env *env) |
| 20254 | { |
| 20255 | __bpf_free_used_maps(env->prog->aux, env->used_maps, |
| 20256 | env->used_map_cnt); |
| 20257 | } |
| 20258 | |
| 20259 | /* drop refcnt of maps used by the rejected program */ |
| 20260 | static void release_btfs(struct bpf_verifier_env *env) |
| 20261 | { |
| 20262 | __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); |
| 20263 | } |
| 20264 | |
| 20265 | /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ |
| 20266 | static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) |
| 20267 | { |
| 20268 | struct bpf_insn *insn = env->prog->insnsi; |
| 20269 | int insn_cnt = env->prog->len; |
| 20270 | int i; |
| 20271 | |
| 20272 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 20273 | if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) |
| 20274 | continue; |
| 20275 | if (insn->src_reg == BPF_PSEUDO_FUNC) |
| 20276 | continue; |
| 20277 | insn->src_reg = 0; |
| 20278 | } |
| 20279 | } |
| 20280 | |
| 20281 | /* single env->prog->insni[off] instruction was replaced with the range |
| 20282 | * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying |
| 20283 | * [0, off) and [off, end) to new locations, so the patched range stays zero |
| 20284 | */ |
| 20285 | static void adjust_insn_aux_data(struct bpf_verifier_env *env, |
| 20286 | struct bpf_insn_aux_data *new_data, |
| 20287 | struct bpf_prog *new_prog, u32 off, u32 cnt) |
| 20288 | { |
| 20289 | struct bpf_insn_aux_data *old_data = env->insn_aux_data; |
| 20290 | struct bpf_insn *insn = new_prog->insnsi; |
| 20291 | u32 old_seen = old_data[off].seen; |
| 20292 | u32 prog_len; |
| 20293 | int i; |
| 20294 | |
| 20295 | /* aux info at OFF always needs adjustment, no matter fast path |
| 20296 | * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the |
| 20297 | * original insn at old prog. |
| 20298 | */ |
| 20299 | old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); |
| 20300 | |
| 20301 | if (cnt == 1) |
| 20302 | return; |
| 20303 | prog_len = new_prog->len; |
| 20304 | |
| 20305 | memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); |
| 20306 | memcpy(new_data + off + cnt - 1, old_data + off, |
| 20307 | sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); |
| 20308 | for (i = off; i < off + cnt - 1; i++) { |
| 20309 | /* Expand insni[off]'s seen count to the patched range. */ |
| 20310 | new_data[i].seen = old_seen; |
| 20311 | new_data[i].zext_dst = insn_has_def32(env, insn + i); |
| 20312 | } |
| 20313 | env->insn_aux_data = new_data; |
| 20314 | vfree(old_data); |
| 20315 | } |
| 20316 | |
| 20317 | static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) |
| 20318 | { |
| 20319 | int i; |
| 20320 | |
| 20321 | if (len == 1) |
| 20322 | return; |
| 20323 | /* NOTE: fake 'exit' subprog should be updated as well. */ |
| 20324 | for (i = 0; i <= env->subprog_cnt; i++) { |
| 20325 | if (env->subprog_info[i].start <= off) |
| 20326 | continue; |
| 20327 | env->subprog_info[i].start += len - 1; |
| 20328 | } |
| 20329 | } |
| 20330 | |
| 20331 | static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) |
| 20332 | { |
| 20333 | struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; |
| 20334 | int i, sz = prog->aux->size_poke_tab; |
| 20335 | struct bpf_jit_poke_descriptor *desc; |
| 20336 | |
| 20337 | for (i = 0; i < sz; i++) { |
| 20338 | desc = &tab[i]; |
| 20339 | if (desc->insn_idx <= off) |
| 20340 | continue; |
| 20341 | desc->insn_idx += len - 1; |
| 20342 | } |
| 20343 | } |
| 20344 | |
| 20345 | static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, |
| 20346 | const struct bpf_insn *patch, u32 len) |
| 20347 | { |
| 20348 | struct bpf_prog *new_prog; |
| 20349 | struct bpf_insn_aux_data *new_data = NULL; |
| 20350 | |
| 20351 | if (len > 1) { |
| 20352 | new_data = vzalloc(array_size(env->prog->len + len - 1, |
| 20353 | sizeof(struct bpf_insn_aux_data))); |
| 20354 | if (!new_data) |
| 20355 | return NULL; |
| 20356 | } |
| 20357 | |
| 20358 | new_prog = bpf_patch_insn_single(env->prog, off, patch, len); |
| 20359 | if (IS_ERR(new_prog)) { |
| 20360 | if (PTR_ERR(new_prog) == -ERANGE) |
| 20361 | verbose(env, |
| 20362 | "insn %d cannot be patched due to 16-bit range\n", |
| 20363 | env->insn_aux_data[off].orig_idx); |
| 20364 | vfree(new_data); |
| 20365 | return NULL; |
| 20366 | } |
| 20367 | adjust_insn_aux_data(env, new_data, new_prog, off, len); |
| 20368 | adjust_subprog_starts(env, off, len); |
| 20369 | adjust_poke_descs(new_prog, off, len); |
| 20370 | return new_prog; |
| 20371 | } |
| 20372 | |
| 20373 | /* |
| 20374 | * For all jmp insns in a given 'prog' that point to 'tgt_idx' insn adjust the |
| 20375 | * jump offset by 'delta'. |
| 20376 | */ |
| 20377 | static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) |
| 20378 | { |
| 20379 | struct bpf_insn *insn = prog->insnsi; |
| 20380 | u32 insn_cnt = prog->len, i; |
| 20381 | s32 imm; |
| 20382 | s16 off; |
| 20383 | |
| 20384 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 20385 | u8 code = insn->code; |
| 20386 | |
| 20387 | if (tgt_idx <= i && i < tgt_idx + delta) |
| 20388 | continue; |
| 20389 | |
| 20390 | if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || |
| 20391 | BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT) |
| 20392 | continue; |
| 20393 | |
| 20394 | if (insn->code == (BPF_JMP32 | BPF_JA)) { |
| 20395 | if (i + 1 + insn->imm != tgt_idx) |
| 20396 | continue; |
| 20397 | if (check_add_overflow(insn->imm, delta, &imm)) |
| 20398 | return -ERANGE; |
| 20399 | insn->imm = imm; |
| 20400 | } else { |
| 20401 | if (i + 1 + insn->off != tgt_idx) |
| 20402 | continue; |
| 20403 | if (check_add_overflow(insn->off, delta, &off)) |
| 20404 | return -ERANGE; |
| 20405 | insn->off = off; |
| 20406 | } |
| 20407 | } |
| 20408 | return 0; |
| 20409 | } |
| 20410 | |
| 20411 | static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, |
| 20412 | u32 off, u32 cnt) |
| 20413 | { |
| 20414 | int i, j; |
| 20415 | |
| 20416 | /* find first prog starting at or after off (first to remove) */ |
| 20417 | for (i = 0; i < env->subprog_cnt; i++) |
| 20418 | if (env->subprog_info[i].start >= off) |
| 20419 | break; |
| 20420 | /* find first prog starting at or after off + cnt (first to stay) */ |
| 20421 | for (j = i; j < env->subprog_cnt; j++) |
| 20422 | if (env->subprog_info[j].start >= off + cnt) |
| 20423 | break; |
| 20424 | /* if j doesn't start exactly at off + cnt, we are just removing |
| 20425 | * the front of previous prog |
| 20426 | */ |
| 20427 | if (env->subprog_info[j].start != off + cnt) |
| 20428 | j--; |
| 20429 | |
| 20430 | if (j > i) { |
| 20431 | struct bpf_prog_aux *aux = env->prog->aux; |
| 20432 | int move; |
| 20433 | |
| 20434 | /* move fake 'exit' subprog as well */ |
| 20435 | move = env->subprog_cnt + 1 - j; |
| 20436 | |
| 20437 | memmove(env->subprog_info + i, |
| 20438 | env->subprog_info + j, |
| 20439 | sizeof(*env->subprog_info) * move); |
| 20440 | env->subprog_cnt -= j - i; |
| 20441 | |
| 20442 | /* remove func_info */ |
| 20443 | if (aux->func_info) { |
| 20444 | move = aux->func_info_cnt - j; |
| 20445 | |
| 20446 | memmove(aux->func_info + i, |
| 20447 | aux->func_info + j, |
| 20448 | sizeof(*aux->func_info) * move); |
| 20449 | aux->func_info_cnt -= j - i; |
| 20450 | /* func_info->insn_off is set after all code rewrites, |
| 20451 | * in adjust_btf_func() - no need to adjust |
| 20452 | */ |
| 20453 | } |
| 20454 | } else { |
| 20455 | /* convert i from "first prog to remove" to "first to adjust" */ |
| 20456 | if (env->subprog_info[i].start == off) |
| 20457 | i++; |
| 20458 | } |
| 20459 | |
| 20460 | /* update fake 'exit' subprog as well */ |
| 20461 | for (; i <= env->subprog_cnt; i++) |
| 20462 | env->subprog_info[i].start -= cnt; |
| 20463 | |
| 20464 | return 0; |
| 20465 | } |
| 20466 | |
| 20467 | static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, |
| 20468 | u32 cnt) |
| 20469 | { |
| 20470 | struct bpf_prog *prog = env->prog; |
| 20471 | u32 i, l_off, l_cnt, nr_linfo; |
| 20472 | struct bpf_line_info *linfo; |
| 20473 | |
| 20474 | nr_linfo = prog->aux->nr_linfo; |
| 20475 | if (!nr_linfo) |
| 20476 | return 0; |
| 20477 | |
| 20478 | linfo = prog->aux->linfo; |
| 20479 | |
| 20480 | /* find first line info to remove, count lines to be removed */ |
| 20481 | for (i = 0; i < nr_linfo; i++) |
| 20482 | if (linfo[i].insn_off >= off) |
| 20483 | break; |
| 20484 | |
| 20485 | l_off = i; |
| 20486 | l_cnt = 0; |
| 20487 | for (; i < nr_linfo; i++) |
| 20488 | if (linfo[i].insn_off < off + cnt) |
| 20489 | l_cnt++; |
| 20490 | else |
| 20491 | break; |
| 20492 | |
| 20493 | /* First live insn doesn't match first live linfo, it needs to "inherit" |
| 20494 | * last removed linfo. prog is already modified, so prog->len == off |
| 20495 | * means no live instructions after (tail of the program was removed). |
| 20496 | */ |
| 20497 | if (prog->len != off && l_cnt && |
| 20498 | (i == nr_linfo || linfo[i].insn_off != off + cnt)) { |
| 20499 | l_cnt--; |
| 20500 | linfo[--i].insn_off = off + cnt; |
| 20501 | } |
| 20502 | |
| 20503 | /* remove the line info which refer to the removed instructions */ |
| 20504 | if (l_cnt) { |
| 20505 | memmove(linfo + l_off, linfo + i, |
| 20506 | sizeof(*linfo) * (nr_linfo - i)); |
| 20507 | |
| 20508 | prog->aux->nr_linfo -= l_cnt; |
| 20509 | nr_linfo = prog->aux->nr_linfo; |
| 20510 | } |
| 20511 | |
| 20512 | /* pull all linfo[i].insn_off >= off + cnt in by cnt */ |
| 20513 | for (i = l_off; i < nr_linfo; i++) |
| 20514 | linfo[i].insn_off -= cnt; |
| 20515 | |
| 20516 | /* fix up all subprogs (incl. 'exit') which start >= off */ |
| 20517 | for (i = 0; i <= env->subprog_cnt; i++) |
| 20518 | if (env->subprog_info[i].linfo_idx > l_off) { |
| 20519 | /* program may have started in the removed region but |
| 20520 | * may not be fully removed |
| 20521 | */ |
| 20522 | if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) |
| 20523 | env->subprog_info[i].linfo_idx -= l_cnt; |
| 20524 | else |
| 20525 | env->subprog_info[i].linfo_idx = l_off; |
| 20526 | } |
| 20527 | |
| 20528 | return 0; |
| 20529 | } |
| 20530 | |
| 20531 | static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) |
| 20532 | { |
| 20533 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
| 20534 | unsigned int orig_prog_len = env->prog->len; |
| 20535 | int err; |
| 20536 | |
| 20537 | if (bpf_prog_is_offloaded(env->prog->aux)) |
| 20538 | bpf_prog_offload_remove_insns(env, off, cnt); |
| 20539 | |
| 20540 | err = bpf_remove_insns(env->prog, off, cnt); |
| 20541 | if (err) |
| 20542 | return err; |
| 20543 | |
| 20544 | err = adjust_subprog_starts_after_remove(env, off, cnt); |
| 20545 | if (err) |
| 20546 | return err; |
| 20547 | |
| 20548 | err = bpf_adj_linfo_after_remove(env, off, cnt); |
| 20549 | if (err) |
| 20550 | return err; |
| 20551 | |
| 20552 | memmove(aux_data + off, aux_data + off + cnt, |
| 20553 | sizeof(*aux_data) * (orig_prog_len - off - cnt)); |
| 20554 | |
| 20555 | return 0; |
| 20556 | } |
| 20557 | |
| 20558 | /* The verifier does more data flow analysis than llvm and will not |
| 20559 | * explore branches that are dead at run time. Malicious programs can |
| 20560 | * have dead code too. Therefore replace all dead at-run-time code |
| 20561 | * with 'ja -1'. |
| 20562 | * |
| 20563 | * Just nops are not optimal, e.g. if they would sit at the end of the |
| 20564 | * program and through another bug we would manage to jump there, then |
| 20565 | * we'd execute beyond program memory otherwise. Returning exception |
| 20566 | * code also wouldn't work since we can have subprogs where the dead |
| 20567 | * code could be located. |
| 20568 | */ |
| 20569 | static void sanitize_dead_code(struct bpf_verifier_env *env) |
| 20570 | { |
| 20571 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
| 20572 | struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); |
| 20573 | struct bpf_insn *insn = env->prog->insnsi; |
| 20574 | const int insn_cnt = env->prog->len; |
| 20575 | int i; |
| 20576 | |
| 20577 | for (i = 0; i < insn_cnt; i++) { |
| 20578 | if (aux_data[i].seen) |
| 20579 | continue; |
| 20580 | memcpy(insn + i, &trap, sizeof(trap)); |
| 20581 | aux_data[i].zext_dst = false; |
| 20582 | } |
| 20583 | } |
| 20584 | |
| 20585 | static bool insn_is_cond_jump(u8 code) |
| 20586 | { |
| 20587 | u8 op; |
| 20588 | |
| 20589 | op = BPF_OP(code); |
| 20590 | if (BPF_CLASS(code) == BPF_JMP32) |
| 20591 | return op != BPF_JA; |
| 20592 | |
| 20593 | if (BPF_CLASS(code) != BPF_JMP) |
| 20594 | return false; |
| 20595 | |
| 20596 | return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; |
| 20597 | } |
| 20598 | |
| 20599 | static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) |
| 20600 | { |
| 20601 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
| 20602 | struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); |
| 20603 | struct bpf_insn *insn = env->prog->insnsi; |
| 20604 | const int insn_cnt = env->prog->len; |
| 20605 | int i; |
| 20606 | |
| 20607 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 20608 | if (!insn_is_cond_jump(insn->code)) |
| 20609 | continue; |
| 20610 | |
| 20611 | if (!aux_data[i + 1].seen) |
| 20612 | ja.off = insn->off; |
| 20613 | else if (!aux_data[i + 1 + insn->off].seen) |
| 20614 | ja.off = 0; |
| 20615 | else |
| 20616 | continue; |
| 20617 | |
| 20618 | if (bpf_prog_is_offloaded(env->prog->aux)) |
| 20619 | bpf_prog_offload_replace_insn(env, i, &ja); |
| 20620 | |
| 20621 | memcpy(insn, &ja, sizeof(ja)); |
| 20622 | } |
| 20623 | } |
| 20624 | |
| 20625 | static int opt_remove_dead_code(struct bpf_verifier_env *env) |
| 20626 | { |
| 20627 | struct bpf_insn_aux_data *aux_data = env->insn_aux_data; |
| 20628 | int insn_cnt = env->prog->len; |
| 20629 | int i, err; |
| 20630 | |
| 20631 | for (i = 0; i < insn_cnt; i++) { |
| 20632 | int j; |
| 20633 | |
| 20634 | j = 0; |
| 20635 | while (i + j < insn_cnt && !aux_data[i + j].seen) |
| 20636 | j++; |
| 20637 | if (!j) |
| 20638 | continue; |
| 20639 | |
| 20640 | err = verifier_remove_insns(env, i, j); |
| 20641 | if (err) |
| 20642 | return err; |
| 20643 | insn_cnt = env->prog->len; |
| 20644 | } |
| 20645 | |
| 20646 | return 0; |
| 20647 | } |
| 20648 | |
| 20649 | static const struct bpf_insn NOP = BPF_JMP_IMM(BPF_JA, 0, 0, 0); |
| 20650 | static const struct bpf_insn MAY_GOTO_0 = BPF_RAW_INSN(BPF_JMP | BPF_JCOND, 0, 0, 0, 0); |
| 20651 | |
| 20652 | static int opt_remove_nops(struct bpf_verifier_env *env) |
| 20653 | { |
| 20654 | struct bpf_insn *insn = env->prog->insnsi; |
| 20655 | int insn_cnt = env->prog->len; |
| 20656 | bool is_may_goto_0, is_ja; |
| 20657 | int i, err; |
| 20658 | |
| 20659 | for (i = 0; i < insn_cnt; i++) { |
| 20660 | is_may_goto_0 = !memcmp(&insn[i], &MAY_GOTO_0, sizeof(MAY_GOTO_0)); |
| 20661 | is_ja = !memcmp(&insn[i], &NOP, sizeof(NOP)); |
| 20662 | |
| 20663 | if (!is_may_goto_0 && !is_ja) |
| 20664 | continue; |
| 20665 | |
| 20666 | err = verifier_remove_insns(env, i, 1); |
| 20667 | if (err) |
| 20668 | return err; |
| 20669 | insn_cnt--; |
| 20670 | /* Go back one insn to catch may_goto +1; may_goto +0 sequence */ |
| 20671 | i -= (is_may_goto_0 && i > 0) ? 2 : 1; |
| 20672 | } |
| 20673 | |
| 20674 | return 0; |
| 20675 | } |
| 20676 | |
| 20677 | static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, |
| 20678 | const union bpf_attr *attr) |
| 20679 | { |
| 20680 | struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; |
| 20681 | struct bpf_insn_aux_data *aux = env->insn_aux_data; |
| 20682 | int i, patch_len, delta = 0, len = env->prog->len; |
| 20683 | struct bpf_insn *insns = env->prog->insnsi; |
| 20684 | struct bpf_prog *new_prog; |
| 20685 | bool rnd_hi32; |
| 20686 | |
| 20687 | rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; |
| 20688 | zext_patch[1] = BPF_ZEXT_REG(0); |
| 20689 | rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); |
| 20690 | rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); |
| 20691 | rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); |
| 20692 | for (i = 0; i < len; i++) { |
| 20693 | int adj_idx = i + delta; |
| 20694 | struct bpf_insn insn; |
| 20695 | int load_reg; |
| 20696 | |
| 20697 | insn = insns[adj_idx]; |
| 20698 | load_reg = insn_def_regno(&insn); |
| 20699 | if (!aux[adj_idx].zext_dst) { |
| 20700 | u8 code, class; |
| 20701 | u32 imm_rnd; |
| 20702 | |
| 20703 | if (!rnd_hi32) |
| 20704 | continue; |
| 20705 | |
| 20706 | code = insn.code; |
| 20707 | class = BPF_CLASS(code); |
| 20708 | if (load_reg == -1) |
| 20709 | continue; |
| 20710 | |
| 20711 | /* NOTE: arg "reg" (the fourth one) is only used for |
| 20712 | * BPF_STX + SRC_OP, so it is safe to pass NULL |
| 20713 | * here. |
| 20714 | */ |
| 20715 | if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { |
| 20716 | if (class == BPF_LD && |
| 20717 | BPF_MODE(code) == BPF_IMM) |
| 20718 | i++; |
| 20719 | continue; |
| 20720 | } |
| 20721 | |
| 20722 | /* ctx load could be transformed into wider load. */ |
| 20723 | if (class == BPF_LDX && |
| 20724 | aux[adj_idx].ptr_type == PTR_TO_CTX) |
| 20725 | continue; |
| 20726 | |
| 20727 | imm_rnd = get_random_u32(); |
| 20728 | rnd_hi32_patch[0] = insn; |
| 20729 | rnd_hi32_patch[1].imm = imm_rnd; |
| 20730 | rnd_hi32_patch[3].dst_reg = load_reg; |
| 20731 | patch = rnd_hi32_patch; |
| 20732 | patch_len = 4; |
| 20733 | goto apply_patch_buffer; |
| 20734 | } |
| 20735 | |
| 20736 | /* Add in an zero-extend instruction if a) the JIT has requested |
| 20737 | * it or b) it's a CMPXCHG. |
| 20738 | * |
| 20739 | * The latter is because: BPF_CMPXCHG always loads a value into |
| 20740 | * R0, therefore always zero-extends. However some archs' |
| 20741 | * equivalent instruction only does this load when the |
| 20742 | * comparison is successful. This detail of CMPXCHG is |
| 20743 | * orthogonal to the general zero-extension behaviour of the |
| 20744 | * CPU, so it's treated independently of bpf_jit_needs_zext. |
| 20745 | */ |
| 20746 | if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) |
| 20747 | continue; |
| 20748 | |
| 20749 | /* Zero-extension is done by the caller. */ |
| 20750 | if (bpf_pseudo_kfunc_call(&insn)) |
| 20751 | continue; |
| 20752 | |
| 20753 | if (verifier_bug_if(load_reg == -1, env, |
| 20754 | "zext_dst is set, but no reg is defined")) |
| 20755 | return -EFAULT; |
| 20756 | |
| 20757 | zext_patch[0] = insn; |
| 20758 | zext_patch[1].dst_reg = load_reg; |
| 20759 | zext_patch[1].src_reg = load_reg; |
| 20760 | patch = zext_patch; |
| 20761 | patch_len = 2; |
| 20762 | apply_patch_buffer: |
| 20763 | new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); |
| 20764 | if (!new_prog) |
| 20765 | return -ENOMEM; |
| 20766 | env->prog = new_prog; |
| 20767 | insns = new_prog->insnsi; |
| 20768 | aux = env->insn_aux_data; |
| 20769 | delta += patch_len - 1; |
| 20770 | } |
| 20771 | |
| 20772 | return 0; |
| 20773 | } |
| 20774 | |
| 20775 | /* convert load instructions that access fields of a context type into a |
| 20776 | * sequence of instructions that access fields of the underlying structure: |
| 20777 | * struct __sk_buff -> struct sk_buff |
| 20778 | * struct bpf_sock_ops -> struct sock |
| 20779 | */ |
| 20780 | static int convert_ctx_accesses(struct bpf_verifier_env *env) |
| 20781 | { |
| 20782 | struct bpf_subprog_info *subprogs = env->subprog_info; |
| 20783 | const struct bpf_verifier_ops *ops = env->ops; |
| 20784 | int i, cnt, size, ctx_field_size, ret, delta = 0, epilogue_cnt = 0; |
| 20785 | const int insn_cnt = env->prog->len; |
| 20786 | struct bpf_insn *epilogue_buf = env->epilogue_buf; |
| 20787 | struct bpf_insn *insn_buf = env->insn_buf; |
| 20788 | struct bpf_insn *insn; |
| 20789 | u32 target_size, size_default, off; |
| 20790 | struct bpf_prog *new_prog; |
| 20791 | enum bpf_access_type type; |
| 20792 | bool is_narrower_load; |
| 20793 | int epilogue_idx = 0; |
| 20794 | |
| 20795 | if (ops->gen_epilogue) { |
| 20796 | epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog, |
| 20797 | -(subprogs[0].stack_depth + 8)); |
| 20798 | if (epilogue_cnt >= INSN_BUF_SIZE) { |
| 20799 | verbose(env, "bpf verifier is misconfigured\n"); |
| 20800 | return -EINVAL; |
| 20801 | } else if (epilogue_cnt) { |
| 20802 | /* Save the ARG_PTR_TO_CTX for the epilogue to use */ |
| 20803 | cnt = 0; |
| 20804 | subprogs[0].stack_depth += 8; |
| 20805 | insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1, |
| 20806 | -subprogs[0].stack_depth); |
| 20807 | insn_buf[cnt++] = env->prog->insnsi[0]; |
| 20808 | new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); |
| 20809 | if (!new_prog) |
| 20810 | return -ENOMEM; |
| 20811 | env->prog = new_prog; |
| 20812 | delta += cnt - 1; |
| 20813 | |
| 20814 | ret = add_kfunc_in_insns(env, epilogue_buf, epilogue_cnt - 1); |
| 20815 | if (ret < 0) |
| 20816 | return ret; |
| 20817 | } |
| 20818 | } |
| 20819 | |
| 20820 | if (ops->gen_prologue || env->seen_direct_write) { |
| 20821 | if (!ops->gen_prologue) { |
| 20822 | verbose(env, "bpf verifier is misconfigured\n"); |
| 20823 | return -EINVAL; |
| 20824 | } |
| 20825 | cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, |
| 20826 | env->prog); |
| 20827 | if (cnt >= INSN_BUF_SIZE) { |
| 20828 | verbose(env, "bpf verifier is misconfigured\n"); |
| 20829 | return -EINVAL; |
| 20830 | } else if (cnt) { |
| 20831 | new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); |
| 20832 | if (!new_prog) |
| 20833 | return -ENOMEM; |
| 20834 | |
| 20835 | env->prog = new_prog; |
| 20836 | delta += cnt - 1; |
| 20837 | |
| 20838 | ret = add_kfunc_in_insns(env, insn_buf, cnt - 1); |
| 20839 | if (ret < 0) |
| 20840 | return ret; |
| 20841 | } |
| 20842 | } |
| 20843 | |
| 20844 | if (delta) |
| 20845 | WARN_ON(adjust_jmp_off(env->prog, 0, delta)); |
| 20846 | |
| 20847 | if (bpf_prog_is_offloaded(env->prog->aux)) |
| 20848 | return 0; |
| 20849 | |
| 20850 | insn = env->prog->insnsi + delta; |
| 20851 | |
| 20852 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 20853 | bpf_convert_ctx_access_t convert_ctx_access; |
| 20854 | u8 mode; |
| 20855 | |
| 20856 | if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || |
| 20857 | insn->code == (BPF_LDX | BPF_MEM | BPF_H) || |
| 20858 | insn->code == (BPF_LDX | BPF_MEM | BPF_W) || |
| 20859 | insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || |
| 20860 | insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || |
| 20861 | insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || |
| 20862 | insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { |
| 20863 | type = BPF_READ; |
| 20864 | } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || |
| 20865 | insn->code == (BPF_STX | BPF_MEM | BPF_H) || |
| 20866 | insn->code == (BPF_STX | BPF_MEM | BPF_W) || |
| 20867 | insn->code == (BPF_STX | BPF_MEM | BPF_DW) || |
| 20868 | insn->code == (BPF_ST | BPF_MEM | BPF_B) || |
| 20869 | insn->code == (BPF_ST | BPF_MEM | BPF_H) || |
| 20870 | insn->code == (BPF_ST | BPF_MEM | BPF_W) || |
| 20871 | insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { |
| 20872 | type = BPF_WRITE; |
| 20873 | } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_B) || |
| 20874 | insn->code == (BPF_STX | BPF_ATOMIC | BPF_H) || |
| 20875 | insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || |
| 20876 | insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && |
| 20877 | env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { |
| 20878 | insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); |
| 20879 | env->prog->aux->num_exentries++; |
| 20880 | continue; |
| 20881 | } else if (insn->code == (BPF_JMP | BPF_EXIT) && |
| 20882 | epilogue_cnt && |
| 20883 | i + delta < subprogs[1].start) { |
| 20884 | /* Generate epilogue for the main prog */ |
| 20885 | if (epilogue_idx) { |
| 20886 | /* jump back to the earlier generated epilogue */ |
| 20887 | insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1); |
| 20888 | cnt = 1; |
| 20889 | } else { |
| 20890 | memcpy(insn_buf, epilogue_buf, |
| 20891 | epilogue_cnt * sizeof(*epilogue_buf)); |
| 20892 | cnt = epilogue_cnt; |
| 20893 | /* epilogue_idx cannot be 0. It must have at |
| 20894 | * least one ctx ptr saving insn before the |
| 20895 | * epilogue. |
| 20896 | */ |
| 20897 | epilogue_idx = i + delta; |
| 20898 | } |
| 20899 | goto patch_insn_buf; |
| 20900 | } else { |
| 20901 | continue; |
| 20902 | } |
| 20903 | |
| 20904 | if (type == BPF_WRITE && |
| 20905 | env->insn_aux_data[i + delta].sanitize_stack_spill) { |
| 20906 | struct bpf_insn patch[] = { |
| 20907 | *insn, |
| 20908 | BPF_ST_NOSPEC(), |
| 20909 | }; |
| 20910 | |
| 20911 | cnt = ARRAY_SIZE(patch); |
| 20912 | new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); |
| 20913 | if (!new_prog) |
| 20914 | return -ENOMEM; |
| 20915 | |
| 20916 | delta += cnt - 1; |
| 20917 | env->prog = new_prog; |
| 20918 | insn = new_prog->insnsi + i + delta; |
| 20919 | continue; |
| 20920 | } |
| 20921 | |
| 20922 | switch ((int)env->insn_aux_data[i + delta].ptr_type) { |
| 20923 | case PTR_TO_CTX: |
| 20924 | if (!ops->convert_ctx_access) |
| 20925 | continue; |
| 20926 | convert_ctx_access = ops->convert_ctx_access; |
| 20927 | break; |
| 20928 | case PTR_TO_SOCKET: |
| 20929 | case PTR_TO_SOCK_COMMON: |
| 20930 | convert_ctx_access = bpf_sock_convert_ctx_access; |
| 20931 | break; |
| 20932 | case PTR_TO_TCP_SOCK: |
| 20933 | convert_ctx_access = bpf_tcp_sock_convert_ctx_access; |
| 20934 | break; |
| 20935 | case PTR_TO_XDP_SOCK: |
| 20936 | convert_ctx_access = bpf_xdp_sock_convert_ctx_access; |
| 20937 | break; |
| 20938 | case PTR_TO_BTF_ID: |
| 20939 | case PTR_TO_BTF_ID | PTR_UNTRUSTED: |
| 20940 | /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike |
| 20941 | * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot |
| 20942 | * be said once it is marked PTR_UNTRUSTED, hence we must handle |
| 20943 | * any faults for loads into such types. BPF_WRITE is disallowed |
| 20944 | * for this case. |
| 20945 | */ |
| 20946 | case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: |
| 20947 | if (type == BPF_READ) { |
| 20948 | if (BPF_MODE(insn->code) == BPF_MEM) |
| 20949 | insn->code = BPF_LDX | BPF_PROBE_MEM | |
| 20950 | BPF_SIZE((insn)->code); |
| 20951 | else |
| 20952 | insn->code = BPF_LDX | BPF_PROBE_MEMSX | |
| 20953 | BPF_SIZE((insn)->code); |
| 20954 | env->prog->aux->num_exentries++; |
| 20955 | } |
| 20956 | continue; |
| 20957 | case PTR_TO_ARENA: |
| 20958 | if (BPF_MODE(insn->code) == BPF_MEMSX) { |
| 20959 | verbose(env, "sign extending loads from arena are not supported yet\n"); |
| 20960 | return -EOPNOTSUPP; |
| 20961 | } |
| 20962 | insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code); |
| 20963 | env->prog->aux->num_exentries++; |
| 20964 | continue; |
| 20965 | default: |
| 20966 | continue; |
| 20967 | } |
| 20968 | |
| 20969 | ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; |
| 20970 | size = BPF_LDST_BYTES(insn); |
| 20971 | mode = BPF_MODE(insn->code); |
| 20972 | |
| 20973 | /* If the read access is a narrower load of the field, |
| 20974 | * convert to a 4/8-byte load, to minimum program type specific |
| 20975 | * convert_ctx_access changes. If conversion is successful, |
| 20976 | * we will apply proper mask to the result. |
| 20977 | */ |
| 20978 | is_narrower_load = size < ctx_field_size; |
| 20979 | size_default = bpf_ctx_off_adjust_machine(ctx_field_size); |
| 20980 | off = insn->off; |
| 20981 | if (is_narrower_load) { |
| 20982 | u8 size_code; |
| 20983 | |
| 20984 | if (type == BPF_WRITE) { |
| 20985 | verbose(env, "bpf verifier narrow ctx access misconfigured\n"); |
| 20986 | return -EINVAL; |
| 20987 | } |
| 20988 | |
| 20989 | size_code = BPF_H; |
| 20990 | if (ctx_field_size == 4) |
| 20991 | size_code = BPF_W; |
| 20992 | else if (ctx_field_size == 8) |
| 20993 | size_code = BPF_DW; |
| 20994 | |
| 20995 | insn->off = off & ~(size_default - 1); |
| 20996 | insn->code = BPF_LDX | BPF_MEM | size_code; |
| 20997 | } |
| 20998 | |
| 20999 | target_size = 0; |
| 21000 | cnt = convert_ctx_access(type, insn, insn_buf, env->prog, |
| 21001 | &target_size); |
| 21002 | if (cnt == 0 || cnt >= INSN_BUF_SIZE || |
| 21003 | (ctx_field_size && !target_size)) { |
| 21004 | verbose(env, "bpf verifier is misconfigured\n"); |
| 21005 | return -EINVAL; |
| 21006 | } |
| 21007 | |
| 21008 | if (is_narrower_load && size < target_size) { |
| 21009 | u8 shift = bpf_ctx_narrow_access_offset( |
| 21010 | off, size, size_default) * 8; |
| 21011 | if (shift && cnt + 1 >= INSN_BUF_SIZE) { |
| 21012 | verbose(env, "bpf verifier narrow ctx load misconfigured\n"); |
| 21013 | return -EINVAL; |
| 21014 | } |
| 21015 | if (ctx_field_size <= 4) { |
| 21016 | if (shift) |
| 21017 | insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, |
| 21018 | insn->dst_reg, |
| 21019 | shift); |
| 21020 | insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, |
| 21021 | (1 << size * 8) - 1); |
| 21022 | } else { |
| 21023 | if (shift) |
| 21024 | insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, |
| 21025 | insn->dst_reg, |
| 21026 | shift); |
| 21027 | insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, |
| 21028 | (1ULL << size * 8) - 1); |
| 21029 | } |
| 21030 | } |
| 21031 | if (mode == BPF_MEMSX) |
| 21032 | insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, |
| 21033 | insn->dst_reg, insn->dst_reg, |
| 21034 | size * 8, 0); |
| 21035 | |
| 21036 | patch_insn_buf: |
| 21037 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21038 | if (!new_prog) |
| 21039 | return -ENOMEM; |
| 21040 | |
| 21041 | delta += cnt - 1; |
| 21042 | |
| 21043 | /* keep walking new program and skip insns we just inserted */ |
| 21044 | env->prog = new_prog; |
| 21045 | insn = new_prog->insnsi + i + delta; |
| 21046 | } |
| 21047 | |
| 21048 | return 0; |
| 21049 | } |
| 21050 | |
| 21051 | static int jit_subprogs(struct bpf_verifier_env *env) |
| 21052 | { |
| 21053 | struct bpf_prog *prog = env->prog, **func, *tmp; |
| 21054 | int i, j, subprog_start, subprog_end = 0, len, subprog; |
| 21055 | struct bpf_map *map_ptr; |
| 21056 | struct bpf_insn *insn; |
| 21057 | void *old_bpf_func; |
| 21058 | int err, num_exentries; |
| 21059 | |
| 21060 | if (env->subprog_cnt <= 1) |
| 21061 | return 0; |
| 21062 | |
| 21063 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { |
| 21064 | if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) |
| 21065 | continue; |
| 21066 | |
| 21067 | /* Upon error here we cannot fall back to interpreter but |
| 21068 | * need a hard reject of the program. Thus -EFAULT is |
| 21069 | * propagated in any case. |
| 21070 | */ |
| 21071 | subprog = find_subprog(env, i + insn->imm + 1); |
| 21072 | if (verifier_bug_if(subprog < 0, env, "No program to jit at insn %d", |
| 21073 | i + insn->imm + 1)) |
| 21074 | return -EFAULT; |
| 21075 | /* temporarily remember subprog id inside insn instead of |
| 21076 | * aux_data, since next loop will split up all insns into funcs |
| 21077 | */ |
| 21078 | insn->off = subprog; |
| 21079 | /* remember original imm in case JIT fails and fallback |
| 21080 | * to interpreter will be needed |
| 21081 | */ |
| 21082 | env->insn_aux_data[i].call_imm = insn->imm; |
| 21083 | /* point imm to __bpf_call_base+1 from JITs point of view */ |
| 21084 | insn->imm = 1; |
| 21085 | if (bpf_pseudo_func(insn)) { |
| 21086 | #if defined(MODULES_VADDR) |
| 21087 | u64 addr = MODULES_VADDR; |
| 21088 | #else |
| 21089 | u64 addr = VMALLOC_START; |
| 21090 | #endif |
| 21091 | /* jit (e.g. x86_64) may emit fewer instructions |
| 21092 | * if it learns a u32 imm is the same as a u64 imm. |
| 21093 | * Set close enough to possible prog address. |
| 21094 | */ |
| 21095 | insn[0].imm = (u32)addr; |
| 21096 | insn[1].imm = addr >> 32; |
| 21097 | } |
| 21098 | } |
| 21099 | |
| 21100 | err = bpf_prog_alloc_jited_linfo(prog); |
| 21101 | if (err) |
| 21102 | goto out_undo_insn; |
| 21103 | |
| 21104 | err = -ENOMEM; |
| 21105 | func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); |
| 21106 | if (!func) |
| 21107 | goto out_undo_insn; |
| 21108 | |
| 21109 | for (i = 0; i < env->subprog_cnt; i++) { |
| 21110 | subprog_start = subprog_end; |
| 21111 | subprog_end = env->subprog_info[i + 1].start; |
| 21112 | |
| 21113 | len = subprog_end - subprog_start; |
| 21114 | /* bpf_prog_run() doesn't call subprogs directly, |
| 21115 | * hence main prog stats include the runtime of subprogs. |
| 21116 | * subprogs don't have IDs and not reachable via prog_get_next_id |
| 21117 | * func[i]->stats will never be accessed and stays NULL |
| 21118 | */ |
| 21119 | func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); |
| 21120 | if (!func[i]) |
| 21121 | goto out_free; |
| 21122 | memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], |
| 21123 | len * sizeof(struct bpf_insn)); |
| 21124 | func[i]->type = prog->type; |
| 21125 | func[i]->len = len; |
| 21126 | if (bpf_prog_calc_tag(func[i])) |
| 21127 | goto out_free; |
| 21128 | func[i]->is_func = 1; |
| 21129 | func[i]->sleepable = prog->sleepable; |
| 21130 | func[i]->aux->func_idx = i; |
| 21131 | /* Below members will be freed only at prog->aux */ |
| 21132 | func[i]->aux->btf = prog->aux->btf; |
| 21133 | func[i]->aux->func_info = prog->aux->func_info; |
| 21134 | func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; |
| 21135 | func[i]->aux->poke_tab = prog->aux->poke_tab; |
| 21136 | func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; |
| 21137 | |
| 21138 | for (j = 0; j < prog->aux->size_poke_tab; j++) { |
| 21139 | struct bpf_jit_poke_descriptor *poke; |
| 21140 | |
| 21141 | poke = &prog->aux->poke_tab[j]; |
| 21142 | if (poke->insn_idx < subprog_end && |
| 21143 | poke->insn_idx >= subprog_start) |
| 21144 | poke->aux = func[i]->aux; |
| 21145 | } |
| 21146 | |
| 21147 | func[i]->aux->name[0] = 'F'; |
| 21148 | func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; |
| 21149 | if (env->subprog_info[i].priv_stack_mode == PRIV_STACK_ADAPTIVE) |
| 21150 | func[i]->aux->jits_use_priv_stack = true; |
| 21151 | |
| 21152 | func[i]->jit_requested = 1; |
| 21153 | func[i]->blinding_requested = prog->blinding_requested; |
| 21154 | func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; |
| 21155 | func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; |
| 21156 | func[i]->aux->linfo = prog->aux->linfo; |
| 21157 | func[i]->aux->nr_linfo = prog->aux->nr_linfo; |
| 21158 | func[i]->aux->jited_linfo = prog->aux->jited_linfo; |
| 21159 | func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; |
| 21160 | func[i]->aux->arena = prog->aux->arena; |
| 21161 | num_exentries = 0; |
| 21162 | insn = func[i]->insnsi; |
| 21163 | for (j = 0; j < func[i]->len; j++, insn++) { |
| 21164 | if (BPF_CLASS(insn->code) == BPF_LDX && |
| 21165 | (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
| 21166 | BPF_MODE(insn->code) == BPF_PROBE_MEM32 || |
| 21167 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) |
| 21168 | num_exentries++; |
| 21169 | if ((BPF_CLASS(insn->code) == BPF_STX || |
| 21170 | BPF_CLASS(insn->code) == BPF_ST) && |
| 21171 | BPF_MODE(insn->code) == BPF_PROBE_MEM32) |
| 21172 | num_exentries++; |
| 21173 | if (BPF_CLASS(insn->code) == BPF_STX && |
| 21174 | BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) |
| 21175 | num_exentries++; |
| 21176 | } |
| 21177 | func[i]->aux->num_exentries = num_exentries; |
| 21178 | func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; |
| 21179 | func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; |
| 21180 | func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data; |
| 21181 | func[i]->aux->might_sleep = env->subprog_info[i].might_sleep; |
| 21182 | if (!i) |
| 21183 | func[i]->aux->exception_boundary = env->seen_exception; |
| 21184 | func[i] = bpf_int_jit_compile(func[i]); |
| 21185 | if (!func[i]->jited) { |
| 21186 | err = -ENOTSUPP; |
| 21187 | goto out_free; |
| 21188 | } |
| 21189 | cond_resched(); |
| 21190 | } |
| 21191 | |
| 21192 | /* at this point all bpf functions were successfully JITed |
| 21193 | * now populate all bpf_calls with correct addresses and |
| 21194 | * run last pass of JIT |
| 21195 | */ |
| 21196 | for (i = 0; i < env->subprog_cnt; i++) { |
| 21197 | insn = func[i]->insnsi; |
| 21198 | for (j = 0; j < func[i]->len; j++, insn++) { |
| 21199 | if (bpf_pseudo_func(insn)) { |
| 21200 | subprog = insn->off; |
| 21201 | insn[0].imm = (u32)(long)func[subprog]->bpf_func; |
| 21202 | insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; |
| 21203 | continue; |
| 21204 | } |
| 21205 | if (!bpf_pseudo_call(insn)) |
| 21206 | continue; |
| 21207 | subprog = insn->off; |
| 21208 | insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); |
| 21209 | } |
| 21210 | |
| 21211 | /* we use the aux data to keep a list of the start addresses |
| 21212 | * of the JITed images for each function in the program |
| 21213 | * |
| 21214 | * for some architectures, such as powerpc64, the imm field |
| 21215 | * might not be large enough to hold the offset of the start |
| 21216 | * address of the callee's JITed image from __bpf_call_base |
| 21217 | * |
| 21218 | * in such cases, we can lookup the start address of a callee |
| 21219 | * by using its subprog id, available from the off field of |
| 21220 | * the call instruction, as an index for this list |
| 21221 | */ |
| 21222 | func[i]->aux->func = func; |
| 21223 | func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; |
| 21224 | func[i]->aux->real_func_cnt = env->subprog_cnt; |
| 21225 | } |
| 21226 | for (i = 0; i < env->subprog_cnt; i++) { |
| 21227 | old_bpf_func = func[i]->bpf_func; |
| 21228 | tmp = bpf_int_jit_compile(func[i]); |
| 21229 | if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { |
| 21230 | verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); |
| 21231 | err = -ENOTSUPP; |
| 21232 | goto out_free; |
| 21233 | } |
| 21234 | cond_resched(); |
| 21235 | } |
| 21236 | |
| 21237 | /* finally lock prog and jit images for all functions and |
| 21238 | * populate kallsysm. Begin at the first subprogram, since |
| 21239 | * bpf_prog_load will add the kallsyms for the main program. |
| 21240 | */ |
| 21241 | for (i = 1; i < env->subprog_cnt; i++) { |
| 21242 | err = bpf_prog_lock_ro(func[i]); |
| 21243 | if (err) |
| 21244 | goto out_free; |
| 21245 | } |
| 21246 | |
| 21247 | for (i = 1; i < env->subprog_cnt; i++) |
| 21248 | bpf_prog_kallsyms_add(func[i]); |
| 21249 | |
| 21250 | /* Last step: make now unused interpreter insns from main |
| 21251 | * prog consistent for later dump requests, so they can |
| 21252 | * later look the same as if they were interpreted only. |
| 21253 | */ |
| 21254 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { |
| 21255 | if (bpf_pseudo_func(insn)) { |
| 21256 | insn[0].imm = env->insn_aux_data[i].call_imm; |
| 21257 | insn[1].imm = insn->off; |
| 21258 | insn->off = 0; |
| 21259 | continue; |
| 21260 | } |
| 21261 | if (!bpf_pseudo_call(insn)) |
| 21262 | continue; |
| 21263 | insn->off = env->insn_aux_data[i].call_imm; |
| 21264 | subprog = find_subprog(env, i + insn->off + 1); |
| 21265 | insn->imm = subprog; |
| 21266 | } |
| 21267 | |
| 21268 | prog->jited = 1; |
| 21269 | prog->bpf_func = func[0]->bpf_func; |
| 21270 | prog->jited_len = func[0]->jited_len; |
| 21271 | prog->aux->extable = func[0]->aux->extable; |
| 21272 | prog->aux->num_exentries = func[0]->aux->num_exentries; |
| 21273 | prog->aux->func = func; |
| 21274 | prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; |
| 21275 | prog->aux->real_func_cnt = env->subprog_cnt; |
| 21276 | prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; |
| 21277 | prog->aux->exception_boundary = func[0]->aux->exception_boundary; |
| 21278 | bpf_prog_jit_attempt_done(prog); |
| 21279 | return 0; |
| 21280 | out_free: |
| 21281 | /* We failed JIT'ing, so at this point we need to unregister poke |
| 21282 | * descriptors from subprogs, so that kernel is not attempting to |
| 21283 | * patch it anymore as we're freeing the subprog JIT memory. |
| 21284 | */ |
| 21285 | for (i = 0; i < prog->aux->size_poke_tab; i++) { |
| 21286 | map_ptr = prog->aux->poke_tab[i].tail_call.map; |
| 21287 | map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); |
| 21288 | } |
| 21289 | /* At this point we're guaranteed that poke descriptors are not |
| 21290 | * live anymore. We can just unlink its descriptor table as it's |
| 21291 | * released with the main prog. |
| 21292 | */ |
| 21293 | for (i = 0; i < env->subprog_cnt; i++) { |
| 21294 | if (!func[i]) |
| 21295 | continue; |
| 21296 | func[i]->aux->poke_tab = NULL; |
| 21297 | bpf_jit_free(func[i]); |
| 21298 | } |
| 21299 | kfree(func); |
| 21300 | out_undo_insn: |
| 21301 | /* cleanup main prog to be interpreted */ |
| 21302 | prog->jit_requested = 0; |
| 21303 | prog->blinding_requested = 0; |
| 21304 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { |
| 21305 | if (!bpf_pseudo_call(insn)) |
| 21306 | continue; |
| 21307 | insn->off = 0; |
| 21308 | insn->imm = env->insn_aux_data[i].call_imm; |
| 21309 | } |
| 21310 | bpf_prog_jit_attempt_done(prog); |
| 21311 | return err; |
| 21312 | } |
| 21313 | |
| 21314 | static int fixup_call_args(struct bpf_verifier_env *env) |
| 21315 | { |
| 21316 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
| 21317 | struct bpf_prog *prog = env->prog; |
| 21318 | struct bpf_insn *insn = prog->insnsi; |
| 21319 | bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); |
| 21320 | int i, depth; |
| 21321 | #endif |
| 21322 | int err = 0; |
| 21323 | |
| 21324 | if (env->prog->jit_requested && |
| 21325 | !bpf_prog_is_offloaded(env->prog->aux)) { |
| 21326 | err = jit_subprogs(env); |
| 21327 | if (err == 0) |
| 21328 | return 0; |
| 21329 | if (err == -EFAULT) |
| 21330 | return err; |
| 21331 | } |
| 21332 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
| 21333 | if (has_kfunc_call) { |
| 21334 | verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); |
| 21335 | return -EINVAL; |
| 21336 | } |
| 21337 | if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { |
| 21338 | /* When JIT fails the progs with bpf2bpf calls and tail_calls |
| 21339 | * have to be rejected, since interpreter doesn't support them yet. |
| 21340 | */ |
| 21341 | verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); |
| 21342 | return -EINVAL; |
| 21343 | } |
| 21344 | for (i = 0; i < prog->len; i++, insn++) { |
| 21345 | if (bpf_pseudo_func(insn)) { |
| 21346 | /* When JIT fails the progs with callback calls |
| 21347 | * have to be rejected, since interpreter doesn't support them yet. |
| 21348 | */ |
| 21349 | verbose(env, "callbacks are not allowed in non-JITed programs\n"); |
| 21350 | return -EINVAL; |
| 21351 | } |
| 21352 | |
| 21353 | if (!bpf_pseudo_call(insn)) |
| 21354 | continue; |
| 21355 | depth = get_callee_stack_depth(env, insn, i); |
| 21356 | if (depth < 0) |
| 21357 | return depth; |
| 21358 | bpf_patch_call_args(insn, depth); |
| 21359 | } |
| 21360 | err = 0; |
| 21361 | #endif |
| 21362 | return err; |
| 21363 | } |
| 21364 | |
| 21365 | /* replace a generic kfunc with a specialized version if necessary */ |
| 21366 | static void specialize_kfunc(struct bpf_verifier_env *env, |
| 21367 | u32 func_id, u16 offset, unsigned long *addr) |
| 21368 | { |
| 21369 | struct bpf_prog *prog = env->prog; |
| 21370 | bool seen_direct_write; |
| 21371 | void *xdp_kfunc; |
| 21372 | bool is_rdonly; |
| 21373 | |
| 21374 | if (bpf_dev_bound_kfunc_id(func_id)) { |
| 21375 | xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id); |
| 21376 | if (xdp_kfunc) { |
| 21377 | *addr = (unsigned long)xdp_kfunc; |
| 21378 | return; |
| 21379 | } |
| 21380 | /* fallback to default kfunc when not supported by netdev */ |
| 21381 | } |
| 21382 | |
| 21383 | if (offset) |
| 21384 | return; |
| 21385 | |
| 21386 | if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { |
| 21387 | seen_direct_write = env->seen_direct_write; |
| 21388 | is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE); |
| 21389 | |
| 21390 | if (is_rdonly) |
| 21391 | *addr = (unsigned long)bpf_dynptr_from_skb_rdonly; |
| 21392 | |
| 21393 | /* restore env->seen_direct_write to its original value, since |
| 21394 | * may_access_direct_pkt_data mutates it |
| 21395 | */ |
| 21396 | env->seen_direct_write = seen_direct_write; |
| 21397 | } |
| 21398 | |
| 21399 | if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr] && |
| 21400 | bpf_lsm_has_d_inode_locked(prog)) |
| 21401 | *addr = (unsigned long)bpf_set_dentry_xattr_locked; |
| 21402 | |
| 21403 | if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr] && |
| 21404 | bpf_lsm_has_d_inode_locked(prog)) |
| 21405 | *addr = (unsigned long)bpf_remove_dentry_xattr_locked; |
| 21406 | } |
| 21407 | |
| 21408 | static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux, |
| 21409 | u16 struct_meta_reg, |
| 21410 | u16 node_offset_reg, |
| 21411 | struct bpf_insn *insn, |
| 21412 | struct bpf_insn *insn_buf, |
| 21413 | int *cnt) |
| 21414 | { |
| 21415 | struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; |
| 21416 | struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) }; |
| 21417 | |
| 21418 | insn_buf[0] = addr[0]; |
| 21419 | insn_buf[1] = addr[1]; |
| 21420 | insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); |
| 21421 | insn_buf[3] = *insn; |
| 21422 | *cnt = 4; |
| 21423 | } |
| 21424 | |
| 21425 | static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| 21426 | struct bpf_insn *insn_buf, int insn_idx, int *cnt) |
| 21427 | { |
| 21428 | const struct bpf_kfunc_desc *desc; |
| 21429 | |
| 21430 | if (!insn->imm) { |
| 21431 | verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); |
| 21432 | return -EINVAL; |
| 21433 | } |
| 21434 | |
| 21435 | *cnt = 0; |
| 21436 | |
| 21437 | /* insn->imm has the btf func_id. Replace it with an offset relative to |
| 21438 | * __bpf_call_base, unless the JIT needs to call functions that are |
| 21439 | * further than 32 bits away (bpf_jit_supports_far_kfunc_call()). |
| 21440 | */ |
| 21441 | desc = find_kfunc_desc(env->prog, insn->imm, insn->off); |
| 21442 | if (!desc) { |
| 21443 | verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", |
| 21444 | insn->imm); |
| 21445 | return -EFAULT; |
| 21446 | } |
| 21447 | |
| 21448 | if (!bpf_jit_supports_far_kfunc_call()) |
| 21449 | insn->imm = BPF_CALL_IMM(desc->addr); |
| 21450 | if (insn->off) |
| 21451 | return 0; |
| 21452 | if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || |
| 21453 | desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { |
| 21454 | struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; |
| 21455 | struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; |
| 21456 | u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; |
| 21457 | |
| 21458 | if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { |
| 21459 | verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", |
| 21460 | insn_idx); |
| 21461 | return -EFAULT; |
| 21462 | } |
| 21463 | |
| 21464 | insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size); |
| 21465 | insn_buf[1] = addr[0]; |
| 21466 | insn_buf[2] = addr[1]; |
| 21467 | insn_buf[3] = *insn; |
| 21468 | *cnt = 4; |
| 21469 | } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || |
| 21470 | desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || |
| 21471 | desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { |
| 21472 | struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; |
| 21473 | struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; |
| 21474 | |
| 21475 | if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { |
| 21476 | verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", |
| 21477 | insn_idx); |
| 21478 | return -EFAULT; |
| 21479 | } |
| 21480 | |
| 21481 | if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && |
| 21482 | !kptr_struct_meta) { |
| 21483 | verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", |
| 21484 | insn_idx); |
| 21485 | return -EFAULT; |
| 21486 | } |
| 21487 | |
| 21488 | insn_buf[0] = addr[0]; |
| 21489 | insn_buf[1] = addr[1]; |
| 21490 | insn_buf[2] = *insn; |
| 21491 | *cnt = 3; |
| 21492 | } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || |
| 21493 | desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || |
| 21494 | desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { |
| 21495 | struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; |
| 21496 | int struct_meta_reg = BPF_REG_3; |
| 21497 | int node_offset_reg = BPF_REG_4; |
| 21498 | |
| 21499 | /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ |
| 21500 | if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { |
| 21501 | struct_meta_reg = BPF_REG_4; |
| 21502 | node_offset_reg = BPF_REG_5; |
| 21503 | } |
| 21504 | |
| 21505 | if (!kptr_struct_meta) { |
| 21506 | verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", |
| 21507 | insn_idx); |
| 21508 | return -EFAULT; |
| 21509 | } |
| 21510 | |
| 21511 | __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, |
| 21512 | node_offset_reg, insn, insn_buf, cnt); |
| 21513 | } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || |
| 21514 | desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { |
| 21515 | insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); |
| 21516 | *cnt = 1; |
| 21517 | } |
| 21518 | |
| 21519 | if (env->insn_aux_data[insn_idx].arg_prog) { |
| 21520 | u32 regno = env->insn_aux_data[insn_idx].arg_prog; |
| 21521 | struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(regno, (long)env->prog->aux) }; |
| 21522 | int idx = *cnt; |
| 21523 | |
| 21524 | insn_buf[idx++] = ld_addrs[0]; |
| 21525 | insn_buf[idx++] = ld_addrs[1]; |
| 21526 | insn_buf[idx++] = *insn; |
| 21527 | *cnt = idx; |
| 21528 | } |
| 21529 | return 0; |
| 21530 | } |
| 21531 | |
| 21532 | /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */ |
| 21533 | static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len) |
| 21534 | { |
| 21535 | struct bpf_subprog_info *info = env->subprog_info; |
| 21536 | int cnt = env->subprog_cnt; |
| 21537 | struct bpf_prog *prog; |
| 21538 | |
| 21539 | /* We only reserve one slot for hidden subprogs in subprog_info. */ |
| 21540 | if (env->hidden_subprog_cnt) { |
| 21541 | verbose(env, "verifier internal error: only one hidden subprog supported\n"); |
| 21542 | return -EFAULT; |
| 21543 | } |
| 21544 | /* We're not patching any existing instruction, just appending the new |
| 21545 | * ones for the hidden subprog. Hence all of the adjustment operations |
| 21546 | * in bpf_patch_insn_data are no-ops. |
| 21547 | */ |
| 21548 | prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); |
| 21549 | if (!prog) |
| 21550 | return -ENOMEM; |
| 21551 | env->prog = prog; |
| 21552 | info[cnt + 1].start = info[cnt].start; |
| 21553 | info[cnt].start = prog->len - len + 1; |
| 21554 | env->subprog_cnt++; |
| 21555 | env->hidden_subprog_cnt++; |
| 21556 | return 0; |
| 21557 | } |
| 21558 | |
| 21559 | /* Do various post-verification rewrites in a single program pass. |
| 21560 | * These rewrites simplify JIT and interpreter implementations. |
| 21561 | */ |
| 21562 | static int do_misc_fixups(struct bpf_verifier_env *env) |
| 21563 | { |
| 21564 | struct bpf_prog *prog = env->prog; |
| 21565 | enum bpf_attach_type eatype = prog->expected_attach_type; |
| 21566 | enum bpf_prog_type prog_type = resolve_prog_type(prog); |
| 21567 | struct bpf_insn *insn = prog->insnsi; |
| 21568 | const struct bpf_func_proto *fn; |
| 21569 | const int insn_cnt = prog->len; |
| 21570 | const struct bpf_map_ops *ops; |
| 21571 | struct bpf_insn_aux_data *aux; |
| 21572 | struct bpf_insn *insn_buf = env->insn_buf; |
| 21573 | struct bpf_prog *new_prog; |
| 21574 | struct bpf_map *map_ptr; |
| 21575 | int i, ret, cnt, delta = 0, cur_subprog = 0; |
| 21576 | struct bpf_subprog_info *subprogs = env->subprog_info; |
| 21577 | u16 stack_depth = subprogs[cur_subprog].stack_depth; |
| 21578 | u16 stack_depth_extra = 0; |
| 21579 | |
| 21580 | if (env->seen_exception && !env->exception_callback_subprog) { |
| 21581 | struct bpf_insn patch[] = { |
| 21582 | env->prog->insnsi[insn_cnt - 1], |
| 21583 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), |
| 21584 | BPF_EXIT_INSN(), |
| 21585 | }; |
| 21586 | |
| 21587 | ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch)); |
| 21588 | if (ret < 0) |
| 21589 | return ret; |
| 21590 | prog = env->prog; |
| 21591 | insn = prog->insnsi; |
| 21592 | |
| 21593 | env->exception_callback_subprog = env->subprog_cnt - 1; |
| 21594 | /* Don't update insn_cnt, as add_hidden_subprog always appends insns */ |
| 21595 | mark_subprog_exc_cb(env, env->exception_callback_subprog); |
| 21596 | } |
| 21597 | |
| 21598 | for (i = 0; i < insn_cnt;) { |
| 21599 | if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) { |
| 21600 | if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) || |
| 21601 | (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) { |
| 21602 | /* convert to 32-bit mov that clears upper 32-bit */ |
| 21603 | insn->code = BPF_ALU | BPF_MOV | BPF_X; |
| 21604 | /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */ |
| 21605 | insn->off = 0; |
| 21606 | insn->imm = 0; |
| 21607 | } /* cast from as(0) to as(1) should be handled by JIT */ |
| 21608 | goto next_insn; |
| 21609 | } |
| 21610 | |
| 21611 | if (env->insn_aux_data[i + delta].needs_zext) |
| 21612 | /* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */ |
| 21613 | insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code); |
| 21614 | |
| 21615 | /* Make sdiv/smod divide-by-minus-one exceptions impossible. */ |
| 21616 | if ((insn->code == (BPF_ALU64 | BPF_MOD | BPF_K) || |
| 21617 | insn->code == (BPF_ALU64 | BPF_DIV | BPF_K) || |
| 21618 | insn->code == (BPF_ALU | BPF_MOD | BPF_K) || |
| 21619 | insn->code == (BPF_ALU | BPF_DIV | BPF_K)) && |
| 21620 | insn->off == 1 && insn->imm == -1) { |
| 21621 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; |
| 21622 | bool isdiv = BPF_OP(insn->code) == BPF_DIV; |
| 21623 | struct bpf_insn *patchlet; |
| 21624 | struct bpf_insn chk_and_sdiv[] = { |
| 21625 | BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | |
| 21626 | BPF_NEG | BPF_K, insn->dst_reg, |
| 21627 | 0, 0, 0), |
| 21628 | }; |
| 21629 | struct bpf_insn chk_and_smod[] = { |
| 21630 | BPF_MOV32_IMM(insn->dst_reg, 0), |
| 21631 | }; |
| 21632 | |
| 21633 | patchlet = isdiv ? chk_and_sdiv : chk_and_smod; |
| 21634 | cnt = isdiv ? ARRAY_SIZE(chk_and_sdiv) : ARRAY_SIZE(chk_and_smod); |
| 21635 | |
| 21636 | new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); |
| 21637 | if (!new_prog) |
| 21638 | return -ENOMEM; |
| 21639 | |
| 21640 | delta += cnt - 1; |
| 21641 | env->prog = prog = new_prog; |
| 21642 | insn = new_prog->insnsi + i + delta; |
| 21643 | goto next_insn; |
| 21644 | } |
| 21645 | |
| 21646 | /* Make divide-by-zero and divide-by-minus-one exceptions impossible. */ |
| 21647 | if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || |
| 21648 | insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || |
| 21649 | insn->code == (BPF_ALU | BPF_MOD | BPF_X) || |
| 21650 | insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { |
| 21651 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; |
| 21652 | bool isdiv = BPF_OP(insn->code) == BPF_DIV; |
| 21653 | bool is_sdiv = isdiv && insn->off == 1; |
| 21654 | bool is_smod = !isdiv && insn->off == 1; |
| 21655 | struct bpf_insn *patchlet; |
| 21656 | struct bpf_insn chk_and_div[] = { |
| 21657 | /* [R,W]x div 0 -> 0 */ |
| 21658 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21659 | BPF_JNE | BPF_K, insn->src_reg, |
| 21660 | 0, 2, 0), |
| 21661 | BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), |
| 21662 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
| 21663 | *insn, |
| 21664 | }; |
| 21665 | struct bpf_insn chk_and_mod[] = { |
| 21666 | /* [R,W]x mod 0 -> [R,W]x */ |
| 21667 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21668 | BPF_JEQ | BPF_K, insn->src_reg, |
| 21669 | 0, 1 + (is64 ? 0 : 1), 0), |
| 21670 | *insn, |
| 21671 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
| 21672 | BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), |
| 21673 | }; |
| 21674 | struct bpf_insn chk_and_sdiv[] = { |
| 21675 | /* [R,W]x sdiv 0 -> 0 |
| 21676 | * LLONG_MIN sdiv -1 -> LLONG_MIN |
| 21677 | * INT_MIN sdiv -1 -> INT_MIN |
| 21678 | */ |
| 21679 | BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), |
| 21680 | BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | |
| 21681 | BPF_ADD | BPF_K, BPF_REG_AX, |
| 21682 | 0, 0, 1), |
| 21683 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21684 | BPF_JGT | BPF_K, BPF_REG_AX, |
| 21685 | 0, 4, 1), |
| 21686 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21687 | BPF_JEQ | BPF_K, BPF_REG_AX, |
| 21688 | 0, 1, 0), |
| 21689 | BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | |
| 21690 | BPF_MOV | BPF_K, insn->dst_reg, |
| 21691 | 0, 0, 0), |
| 21692 | /* BPF_NEG(LLONG_MIN) == -LLONG_MIN == LLONG_MIN */ |
| 21693 | BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | |
| 21694 | BPF_NEG | BPF_K, insn->dst_reg, |
| 21695 | 0, 0, 0), |
| 21696 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
| 21697 | *insn, |
| 21698 | }; |
| 21699 | struct bpf_insn chk_and_smod[] = { |
| 21700 | /* [R,W]x mod 0 -> [R,W]x */ |
| 21701 | /* [R,W]x mod -1 -> 0 */ |
| 21702 | BPF_MOV64_REG(BPF_REG_AX, insn->src_reg), |
| 21703 | BPF_RAW_INSN((is64 ? BPF_ALU64 : BPF_ALU) | |
| 21704 | BPF_ADD | BPF_K, BPF_REG_AX, |
| 21705 | 0, 0, 1), |
| 21706 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21707 | BPF_JGT | BPF_K, BPF_REG_AX, |
| 21708 | 0, 3, 1), |
| 21709 | BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | |
| 21710 | BPF_JEQ | BPF_K, BPF_REG_AX, |
| 21711 | 0, 3 + (is64 ? 0 : 1), 1), |
| 21712 | BPF_MOV32_IMM(insn->dst_reg, 0), |
| 21713 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
| 21714 | *insn, |
| 21715 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), |
| 21716 | BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), |
| 21717 | }; |
| 21718 | |
| 21719 | if (is_sdiv) { |
| 21720 | patchlet = chk_and_sdiv; |
| 21721 | cnt = ARRAY_SIZE(chk_and_sdiv); |
| 21722 | } else if (is_smod) { |
| 21723 | patchlet = chk_and_smod; |
| 21724 | cnt = ARRAY_SIZE(chk_and_smod) - (is64 ? 2 : 0); |
| 21725 | } else { |
| 21726 | patchlet = isdiv ? chk_and_div : chk_and_mod; |
| 21727 | cnt = isdiv ? ARRAY_SIZE(chk_and_div) : |
| 21728 | ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); |
| 21729 | } |
| 21730 | |
| 21731 | new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); |
| 21732 | if (!new_prog) |
| 21733 | return -ENOMEM; |
| 21734 | |
| 21735 | delta += cnt - 1; |
| 21736 | env->prog = prog = new_prog; |
| 21737 | insn = new_prog->insnsi + i + delta; |
| 21738 | goto next_insn; |
| 21739 | } |
| 21740 | |
| 21741 | /* Make it impossible to de-reference a userspace address */ |
| 21742 | if (BPF_CLASS(insn->code) == BPF_LDX && |
| 21743 | (BPF_MODE(insn->code) == BPF_PROBE_MEM || |
| 21744 | BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { |
| 21745 | struct bpf_insn *patch = &insn_buf[0]; |
| 21746 | u64 uaddress_limit = bpf_arch_uaddress_limit(); |
| 21747 | |
| 21748 | if (!uaddress_limit) |
| 21749 | goto next_insn; |
| 21750 | |
| 21751 | *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); |
| 21752 | if (insn->off) |
| 21753 | *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); |
| 21754 | *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32); |
| 21755 | *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2); |
| 21756 | *patch++ = *insn; |
| 21757 | *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); |
| 21758 | *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); |
| 21759 | |
| 21760 | cnt = patch - insn_buf; |
| 21761 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21762 | if (!new_prog) |
| 21763 | return -ENOMEM; |
| 21764 | |
| 21765 | delta += cnt - 1; |
| 21766 | env->prog = prog = new_prog; |
| 21767 | insn = new_prog->insnsi + i + delta; |
| 21768 | goto next_insn; |
| 21769 | } |
| 21770 | |
| 21771 | /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ |
| 21772 | if (BPF_CLASS(insn->code) == BPF_LD && |
| 21773 | (BPF_MODE(insn->code) == BPF_ABS || |
| 21774 | BPF_MODE(insn->code) == BPF_IND)) { |
| 21775 | cnt = env->ops->gen_ld_abs(insn, insn_buf); |
| 21776 | if (cnt == 0 || cnt >= INSN_BUF_SIZE) { |
| 21777 | verbose(env, "bpf verifier is misconfigured\n"); |
| 21778 | return -EINVAL; |
| 21779 | } |
| 21780 | |
| 21781 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21782 | if (!new_prog) |
| 21783 | return -ENOMEM; |
| 21784 | |
| 21785 | delta += cnt - 1; |
| 21786 | env->prog = prog = new_prog; |
| 21787 | insn = new_prog->insnsi + i + delta; |
| 21788 | goto next_insn; |
| 21789 | } |
| 21790 | |
| 21791 | /* Rewrite pointer arithmetic to mitigate speculation attacks. */ |
| 21792 | if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || |
| 21793 | insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { |
| 21794 | const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; |
| 21795 | const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; |
| 21796 | struct bpf_insn *patch = &insn_buf[0]; |
| 21797 | bool issrc, isneg, isimm; |
| 21798 | u32 off_reg; |
| 21799 | |
| 21800 | aux = &env->insn_aux_data[i + delta]; |
| 21801 | if (!aux->alu_state || |
| 21802 | aux->alu_state == BPF_ALU_NON_POINTER) |
| 21803 | goto next_insn; |
| 21804 | |
| 21805 | isneg = aux->alu_state & BPF_ALU_NEG_VALUE; |
| 21806 | issrc = (aux->alu_state & BPF_ALU_SANITIZE) == |
| 21807 | BPF_ALU_SANITIZE_SRC; |
| 21808 | isimm = aux->alu_state & BPF_ALU_IMMEDIATE; |
| 21809 | |
| 21810 | off_reg = issrc ? insn->src_reg : insn->dst_reg; |
| 21811 | if (isimm) { |
| 21812 | *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); |
| 21813 | } else { |
| 21814 | if (isneg) |
| 21815 | *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); |
| 21816 | *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); |
| 21817 | *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); |
| 21818 | *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); |
| 21819 | *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); |
| 21820 | *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); |
| 21821 | *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); |
| 21822 | } |
| 21823 | if (!issrc) |
| 21824 | *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); |
| 21825 | insn->src_reg = BPF_REG_AX; |
| 21826 | if (isneg) |
| 21827 | insn->code = insn->code == code_add ? |
| 21828 | code_sub : code_add; |
| 21829 | *patch++ = *insn; |
| 21830 | if (issrc && isneg && !isimm) |
| 21831 | *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); |
| 21832 | cnt = patch - insn_buf; |
| 21833 | |
| 21834 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21835 | if (!new_prog) |
| 21836 | return -ENOMEM; |
| 21837 | |
| 21838 | delta += cnt - 1; |
| 21839 | env->prog = prog = new_prog; |
| 21840 | insn = new_prog->insnsi + i + delta; |
| 21841 | goto next_insn; |
| 21842 | } |
| 21843 | |
| 21844 | if (is_may_goto_insn(insn) && bpf_jit_supports_timed_may_goto()) { |
| 21845 | int stack_off_cnt = -stack_depth - 16; |
| 21846 | |
| 21847 | /* |
| 21848 | * Two 8 byte slots, depth-16 stores the count, and |
| 21849 | * depth-8 stores the start timestamp of the loop. |
| 21850 | * |
| 21851 | * The starting value of count is BPF_MAX_TIMED_LOOPS |
| 21852 | * (0xffff). Every iteration loads it and subs it by 1, |
| 21853 | * until the value becomes 0 in AX (thus, 1 in stack), |
| 21854 | * after which we call arch_bpf_timed_may_goto, which |
| 21855 | * either sets AX to 0xffff to keep looping, or to 0 |
| 21856 | * upon timeout. AX is then stored into the stack. In |
| 21857 | * the next iteration, we either see 0 and break out, or |
| 21858 | * continue iterating until the next time value is 0 |
| 21859 | * after subtraction, rinse and repeat. |
| 21860 | */ |
| 21861 | stack_depth_extra = 16; |
| 21862 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off_cnt); |
| 21863 | if (insn->off >= 0) |
| 21864 | insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 5); |
| 21865 | else |
| 21866 | insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); |
| 21867 | insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1); |
| 21868 | insn_buf[3] = BPF_JMP_IMM(BPF_JNE, BPF_REG_AX, 0, 2); |
| 21869 | /* |
| 21870 | * AX is used as an argument to pass in stack_off_cnt |
| 21871 | * (to add to r10/fp), and also as the return value of |
| 21872 | * the call to arch_bpf_timed_may_goto. |
| 21873 | */ |
| 21874 | insn_buf[4] = BPF_MOV64_IMM(BPF_REG_AX, stack_off_cnt); |
| 21875 | insn_buf[5] = BPF_EMIT_CALL(arch_bpf_timed_may_goto); |
| 21876 | insn_buf[6] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off_cnt); |
| 21877 | cnt = 7; |
| 21878 | |
| 21879 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21880 | if (!new_prog) |
| 21881 | return -ENOMEM; |
| 21882 | |
| 21883 | delta += cnt - 1; |
| 21884 | env->prog = prog = new_prog; |
| 21885 | insn = new_prog->insnsi + i + delta; |
| 21886 | goto next_insn; |
| 21887 | } else if (is_may_goto_insn(insn)) { |
| 21888 | int stack_off = -stack_depth - 8; |
| 21889 | |
| 21890 | stack_depth_extra = 8; |
| 21891 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_AX, BPF_REG_10, stack_off); |
| 21892 | if (insn->off >= 0) |
| 21893 | insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off + 2); |
| 21894 | else |
| 21895 | insn_buf[1] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_AX, 0, insn->off - 1); |
| 21896 | insn_buf[2] = BPF_ALU64_IMM(BPF_SUB, BPF_REG_AX, 1); |
| 21897 | insn_buf[3] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_AX, stack_off); |
| 21898 | cnt = 4; |
| 21899 | |
| 21900 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21901 | if (!new_prog) |
| 21902 | return -ENOMEM; |
| 21903 | |
| 21904 | delta += cnt - 1; |
| 21905 | env->prog = prog = new_prog; |
| 21906 | insn = new_prog->insnsi + i + delta; |
| 21907 | goto next_insn; |
| 21908 | } |
| 21909 | |
| 21910 | if (insn->code != (BPF_JMP | BPF_CALL)) |
| 21911 | goto next_insn; |
| 21912 | if (insn->src_reg == BPF_PSEUDO_CALL) |
| 21913 | goto next_insn; |
| 21914 | if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { |
| 21915 | ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); |
| 21916 | if (ret) |
| 21917 | return ret; |
| 21918 | if (cnt == 0) |
| 21919 | goto next_insn; |
| 21920 | |
| 21921 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 21922 | if (!new_prog) |
| 21923 | return -ENOMEM; |
| 21924 | |
| 21925 | delta += cnt - 1; |
| 21926 | env->prog = prog = new_prog; |
| 21927 | insn = new_prog->insnsi + i + delta; |
| 21928 | goto next_insn; |
| 21929 | } |
| 21930 | |
| 21931 | /* Skip inlining the helper call if the JIT does it. */ |
| 21932 | if (bpf_jit_inlines_helper_call(insn->imm)) |
| 21933 | goto next_insn; |
| 21934 | |
| 21935 | if (insn->imm == BPF_FUNC_get_route_realm) |
| 21936 | prog->dst_needed = 1; |
| 21937 | if (insn->imm == BPF_FUNC_get_prandom_u32) |
| 21938 | bpf_user_rnd_init_once(); |
| 21939 | if (insn->imm == BPF_FUNC_override_return) |
| 21940 | prog->kprobe_override = 1; |
| 21941 | if (insn->imm == BPF_FUNC_tail_call) { |
| 21942 | /* If we tail call into other programs, we |
| 21943 | * cannot make any assumptions since they can |
| 21944 | * be replaced dynamically during runtime in |
| 21945 | * the program array. |
| 21946 | */ |
| 21947 | prog->cb_access = 1; |
| 21948 | if (!allow_tail_call_in_subprogs(env)) |
| 21949 | prog->aux->stack_depth = MAX_BPF_STACK; |
| 21950 | prog->aux->max_pkt_offset = MAX_PACKET_OFF; |
| 21951 | |
| 21952 | /* mark bpf_tail_call as different opcode to avoid |
| 21953 | * conditional branch in the interpreter for every normal |
| 21954 | * call and to prevent accidental JITing by JIT compiler |
| 21955 | * that doesn't support bpf_tail_call yet |
| 21956 | */ |
| 21957 | insn->imm = 0; |
| 21958 | insn->code = BPF_JMP | BPF_TAIL_CALL; |
| 21959 | |
| 21960 | aux = &env->insn_aux_data[i + delta]; |
| 21961 | if (env->bpf_capable && !prog->blinding_requested && |
| 21962 | prog->jit_requested && |
| 21963 | !bpf_map_key_poisoned(aux) && |
| 21964 | !bpf_map_ptr_poisoned(aux) && |
| 21965 | !bpf_map_ptr_unpriv(aux)) { |
| 21966 | struct bpf_jit_poke_descriptor desc = { |
| 21967 | .reason = BPF_POKE_REASON_TAIL_CALL, |
| 21968 | .tail_call.map = aux->map_ptr_state.map_ptr, |
| 21969 | .tail_call.key = bpf_map_key_immediate(aux), |
| 21970 | .insn_idx = i + delta, |
| 21971 | }; |
| 21972 | |
| 21973 | ret = bpf_jit_add_poke_descriptor(prog, &desc); |
| 21974 | if (ret < 0) { |
| 21975 | verbose(env, "adding tail call poke descriptor failed\n"); |
| 21976 | return ret; |
| 21977 | } |
| 21978 | |
| 21979 | insn->imm = ret + 1; |
| 21980 | goto next_insn; |
| 21981 | } |
| 21982 | |
| 21983 | if (!bpf_map_ptr_unpriv(aux)) |
| 21984 | goto next_insn; |
| 21985 | |
| 21986 | /* instead of changing every JIT dealing with tail_call |
| 21987 | * emit two extra insns: |
| 21988 | * if (index >= max_entries) goto out; |
| 21989 | * index &= array->index_mask; |
| 21990 | * to avoid out-of-bounds cpu speculation |
| 21991 | */ |
| 21992 | if (bpf_map_ptr_poisoned(aux)) { |
| 21993 | verbose(env, "tail_call abusing map_ptr\n"); |
| 21994 | return -EINVAL; |
| 21995 | } |
| 21996 | |
| 21997 | map_ptr = aux->map_ptr_state.map_ptr; |
| 21998 | insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, |
| 21999 | map_ptr->max_entries, 2); |
| 22000 | insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, |
| 22001 | container_of(map_ptr, |
| 22002 | struct bpf_array, |
| 22003 | map)->index_mask); |
| 22004 | insn_buf[2] = *insn; |
| 22005 | cnt = 3; |
| 22006 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22007 | if (!new_prog) |
| 22008 | return -ENOMEM; |
| 22009 | |
| 22010 | delta += cnt - 1; |
| 22011 | env->prog = prog = new_prog; |
| 22012 | insn = new_prog->insnsi + i + delta; |
| 22013 | goto next_insn; |
| 22014 | } |
| 22015 | |
| 22016 | if (insn->imm == BPF_FUNC_timer_set_callback) { |
| 22017 | /* The verifier will process callback_fn as many times as necessary |
| 22018 | * with different maps and the register states prepared by |
| 22019 | * set_timer_callback_state will be accurate. |
| 22020 | * |
| 22021 | * The following use case is valid: |
| 22022 | * map1 is shared by prog1, prog2, prog3. |
| 22023 | * prog1 calls bpf_timer_init for some map1 elements |
| 22024 | * prog2 calls bpf_timer_set_callback for some map1 elements. |
| 22025 | * Those that were not bpf_timer_init-ed will return -EINVAL. |
| 22026 | * prog3 calls bpf_timer_start for some map1 elements. |
| 22027 | * Those that were not both bpf_timer_init-ed and |
| 22028 | * bpf_timer_set_callback-ed will return -EINVAL. |
| 22029 | */ |
| 22030 | struct bpf_insn ld_addrs[2] = { |
| 22031 | BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), |
| 22032 | }; |
| 22033 | |
| 22034 | insn_buf[0] = ld_addrs[0]; |
| 22035 | insn_buf[1] = ld_addrs[1]; |
| 22036 | insn_buf[2] = *insn; |
| 22037 | cnt = 3; |
| 22038 | |
| 22039 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22040 | if (!new_prog) |
| 22041 | return -ENOMEM; |
| 22042 | |
| 22043 | delta += cnt - 1; |
| 22044 | env->prog = prog = new_prog; |
| 22045 | insn = new_prog->insnsi + i + delta; |
| 22046 | goto patch_call_imm; |
| 22047 | } |
| 22048 | |
| 22049 | if (is_storage_get_function(insn->imm)) { |
| 22050 | if (!in_sleepable(env) || |
| 22051 | env->insn_aux_data[i + delta].storage_get_func_atomic) |
| 22052 | insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); |
| 22053 | else |
| 22054 | insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); |
| 22055 | insn_buf[1] = *insn; |
| 22056 | cnt = 2; |
| 22057 | |
| 22058 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22059 | if (!new_prog) |
| 22060 | return -ENOMEM; |
| 22061 | |
| 22062 | delta += cnt - 1; |
| 22063 | env->prog = prog = new_prog; |
| 22064 | insn = new_prog->insnsi + i + delta; |
| 22065 | goto patch_call_imm; |
| 22066 | } |
| 22067 | |
| 22068 | /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */ |
| 22069 | if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { |
| 22070 | /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data, |
| 22071 | * bpf_mem_alloc() returns a ptr to the percpu data ptr. |
| 22072 | */ |
| 22073 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); |
| 22074 | insn_buf[1] = *insn; |
| 22075 | cnt = 2; |
| 22076 | |
| 22077 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22078 | if (!new_prog) |
| 22079 | return -ENOMEM; |
| 22080 | |
| 22081 | delta += cnt - 1; |
| 22082 | env->prog = prog = new_prog; |
| 22083 | insn = new_prog->insnsi + i + delta; |
| 22084 | goto patch_call_imm; |
| 22085 | } |
| 22086 | |
| 22087 | /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup |
| 22088 | * and other inlining handlers are currently limited to 64 bit |
| 22089 | * only. |
| 22090 | */ |
| 22091 | if (prog->jit_requested && BITS_PER_LONG == 64 && |
| 22092 | (insn->imm == BPF_FUNC_map_lookup_elem || |
| 22093 | insn->imm == BPF_FUNC_map_update_elem || |
| 22094 | insn->imm == BPF_FUNC_map_delete_elem || |
| 22095 | insn->imm == BPF_FUNC_map_push_elem || |
| 22096 | insn->imm == BPF_FUNC_map_pop_elem || |
| 22097 | insn->imm == BPF_FUNC_map_peek_elem || |
| 22098 | insn->imm == BPF_FUNC_redirect_map || |
| 22099 | insn->imm == BPF_FUNC_for_each_map_elem || |
| 22100 | insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { |
| 22101 | aux = &env->insn_aux_data[i + delta]; |
| 22102 | if (bpf_map_ptr_poisoned(aux)) |
| 22103 | goto patch_call_imm; |
| 22104 | |
| 22105 | map_ptr = aux->map_ptr_state.map_ptr; |
| 22106 | ops = map_ptr->ops; |
| 22107 | if (insn->imm == BPF_FUNC_map_lookup_elem && |
| 22108 | ops->map_gen_lookup) { |
| 22109 | cnt = ops->map_gen_lookup(map_ptr, insn_buf); |
| 22110 | if (cnt == -EOPNOTSUPP) |
| 22111 | goto patch_map_ops_generic; |
| 22112 | if (cnt <= 0 || cnt >= INSN_BUF_SIZE) { |
| 22113 | verbose(env, "bpf verifier is misconfigured\n"); |
| 22114 | return -EINVAL; |
| 22115 | } |
| 22116 | |
| 22117 | new_prog = bpf_patch_insn_data(env, i + delta, |
| 22118 | insn_buf, cnt); |
| 22119 | if (!new_prog) |
| 22120 | return -ENOMEM; |
| 22121 | |
| 22122 | delta += cnt - 1; |
| 22123 | env->prog = prog = new_prog; |
| 22124 | insn = new_prog->insnsi + i + delta; |
| 22125 | goto next_insn; |
| 22126 | } |
| 22127 | |
| 22128 | BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, |
| 22129 | (void *(*)(struct bpf_map *map, void *key))NULL)); |
| 22130 | BUILD_BUG_ON(!__same_type(ops->map_delete_elem, |
| 22131 | (long (*)(struct bpf_map *map, void *key))NULL)); |
| 22132 | BUILD_BUG_ON(!__same_type(ops->map_update_elem, |
| 22133 | (long (*)(struct bpf_map *map, void *key, void *value, |
| 22134 | u64 flags))NULL)); |
| 22135 | BUILD_BUG_ON(!__same_type(ops->map_push_elem, |
| 22136 | (long (*)(struct bpf_map *map, void *value, |
| 22137 | u64 flags))NULL)); |
| 22138 | BUILD_BUG_ON(!__same_type(ops->map_pop_elem, |
| 22139 | (long (*)(struct bpf_map *map, void *value))NULL)); |
| 22140 | BUILD_BUG_ON(!__same_type(ops->map_peek_elem, |
| 22141 | (long (*)(struct bpf_map *map, void *value))NULL)); |
| 22142 | BUILD_BUG_ON(!__same_type(ops->map_redirect, |
| 22143 | (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL)); |
| 22144 | BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, |
| 22145 | (long (*)(struct bpf_map *map, |
| 22146 | bpf_callback_t callback_fn, |
| 22147 | void *callback_ctx, |
| 22148 | u64 flags))NULL)); |
| 22149 | BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, |
| 22150 | (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); |
| 22151 | |
| 22152 | patch_map_ops_generic: |
| 22153 | switch (insn->imm) { |
| 22154 | case BPF_FUNC_map_lookup_elem: |
| 22155 | insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); |
| 22156 | goto next_insn; |
| 22157 | case BPF_FUNC_map_update_elem: |
| 22158 | insn->imm = BPF_CALL_IMM(ops->map_update_elem); |
| 22159 | goto next_insn; |
| 22160 | case BPF_FUNC_map_delete_elem: |
| 22161 | insn->imm = BPF_CALL_IMM(ops->map_delete_elem); |
| 22162 | goto next_insn; |
| 22163 | case BPF_FUNC_map_push_elem: |
| 22164 | insn->imm = BPF_CALL_IMM(ops->map_push_elem); |
| 22165 | goto next_insn; |
| 22166 | case BPF_FUNC_map_pop_elem: |
| 22167 | insn->imm = BPF_CALL_IMM(ops->map_pop_elem); |
| 22168 | goto next_insn; |
| 22169 | case BPF_FUNC_map_peek_elem: |
| 22170 | insn->imm = BPF_CALL_IMM(ops->map_peek_elem); |
| 22171 | goto next_insn; |
| 22172 | case BPF_FUNC_redirect_map: |
| 22173 | insn->imm = BPF_CALL_IMM(ops->map_redirect); |
| 22174 | goto next_insn; |
| 22175 | case BPF_FUNC_for_each_map_elem: |
| 22176 | insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); |
| 22177 | goto next_insn; |
| 22178 | case BPF_FUNC_map_lookup_percpu_elem: |
| 22179 | insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); |
| 22180 | goto next_insn; |
| 22181 | } |
| 22182 | |
| 22183 | goto patch_call_imm; |
| 22184 | } |
| 22185 | |
| 22186 | /* Implement bpf_jiffies64 inline. */ |
| 22187 | if (prog->jit_requested && BITS_PER_LONG == 64 && |
| 22188 | insn->imm == BPF_FUNC_jiffies64) { |
| 22189 | struct bpf_insn ld_jiffies_addr[2] = { |
| 22190 | BPF_LD_IMM64(BPF_REG_0, |
| 22191 | (unsigned long)&jiffies), |
| 22192 | }; |
| 22193 | |
| 22194 | insn_buf[0] = ld_jiffies_addr[0]; |
| 22195 | insn_buf[1] = ld_jiffies_addr[1]; |
| 22196 | insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, |
| 22197 | BPF_REG_0, 0); |
| 22198 | cnt = 3; |
| 22199 | |
| 22200 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, |
| 22201 | cnt); |
| 22202 | if (!new_prog) |
| 22203 | return -ENOMEM; |
| 22204 | |
| 22205 | delta += cnt - 1; |
| 22206 | env->prog = prog = new_prog; |
| 22207 | insn = new_prog->insnsi + i + delta; |
| 22208 | goto next_insn; |
| 22209 | } |
| 22210 | |
| 22211 | #if defined(CONFIG_X86_64) && !defined(CONFIG_UML) |
| 22212 | /* Implement bpf_get_smp_processor_id() inline. */ |
| 22213 | if (insn->imm == BPF_FUNC_get_smp_processor_id && |
| 22214 | verifier_inlines_helper_call(env, insn->imm)) { |
| 22215 | /* BPF_FUNC_get_smp_processor_id inlining is an |
| 22216 | * optimization, so if cpu_number is ever |
| 22217 | * changed in some incompatible and hard to support |
| 22218 | * way, it's fine to back out this inlining logic |
| 22219 | */ |
| 22220 | #ifdef CONFIG_SMP |
| 22221 | insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, (u32)(unsigned long)&cpu_number); |
| 22222 | insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); |
| 22223 | insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); |
| 22224 | cnt = 3; |
| 22225 | #else |
| 22226 | insn_buf[0] = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); |
| 22227 | cnt = 1; |
| 22228 | #endif |
| 22229 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22230 | if (!new_prog) |
| 22231 | return -ENOMEM; |
| 22232 | |
| 22233 | delta += cnt - 1; |
| 22234 | env->prog = prog = new_prog; |
| 22235 | insn = new_prog->insnsi + i + delta; |
| 22236 | goto next_insn; |
| 22237 | } |
| 22238 | #endif |
| 22239 | /* Implement bpf_get_func_arg inline. */ |
| 22240 | if (prog_type == BPF_PROG_TYPE_TRACING && |
| 22241 | insn->imm == BPF_FUNC_get_func_arg) { |
| 22242 | /* Load nr_args from ctx - 8 */ |
| 22243 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); |
| 22244 | insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); |
| 22245 | insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); |
| 22246 | insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); |
| 22247 | insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); |
| 22248 | insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); |
| 22249 | insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); |
| 22250 | insn_buf[7] = BPF_JMP_A(1); |
| 22251 | insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); |
| 22252 | cnt = 9; |
| 22253 | |
| 22254 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22255 | if (!new_prog) |
| 22256 | return -ENOMEM; |
| 22257 | |
| 22258 | delta += cnt - 1; |
| 22259 | env->prog = prog = new_prog; |
| 22260 | insn = new_prog->insnsi + i + delta; |
| 22261 | goto next_insn; |
| 22262 | } |
| 22263 | |
| 22264 | /* Implement bpf_get_func_ret inline. */ |
| 22265 | if (prog_type == BPF_PROG_TYPE_TRACING && |
| 22266 | insn->imm == BPF_FUNC_get_func_ret) { |
| 22267 | if (eatype == BPF_TRACE_FEXIT || |
| 22268 | eatype == BPF_MODIFY_RETURN) { |
| 22269 | /* Load nr_args from ctx - 8 */ |
| 22270 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); |
| 22271 | insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); |
| 22272 | insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); |
| 22273 | insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); |
| 22274 | insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); |
| 22275 | insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0); |
| 22276 | cnt = 6; |
| 22277 | } else { |
| 22278 | insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); |
| 22279 | cnt = 1; |
| 22280 | } |
| 22281 | |
| 22282 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22283 | if (!new_prog) |
| 22284 | return -ENOMEM; |
| 22285 | |
| 22286 | delta += cnt - 1; |
| 22287 | env->prog = prog = new_prog; |
| 22288 | insn = new_prog->insnsi + i + delta; |
| 22289 | goto next_insn; |
| 22290 | } |
| 22291 | |
| 22292 | /* Implement get_func_arg_cnt inline. */ |
| 22293 | if (prog_type == BPF_PROG_TYPE_TRACING && |
| 22294 | insn->imm == BPF_FUNC_get_func_arg_cnt) { |
| 22295 | /* Load nr_args from ctx - 8 */ |
| 22296 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); |
| 22297 | |
| 22298 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); |
| 22299 | if (!new_prog) |
| 22300 | return -ENOMEM; |
| 22301 | |
| 22302 | env->prog = prog = new_prog; |
| 22303 | insn = new_prog->insnsi + i + delta; |
| 22304 | goto next_insn; |
| 22305 | } |
| 22306 | |
| 22307 | /* Implement bpf_get_func_ip inline. */ |
| 22308 | if (prog_type == BPF_PROG_TYPE_TRACING && |
| 22309 | insn->imm == BPF_FUNC_get_func_ip) { |
| 22310 | /* Load IP address from ctx - 16 */ |
| 22311 | insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); |
| 22312 | |
| 22313 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); |
| 22314 | if (!new_prog) |
| 22315 | return -ENOMEM; |
| 22316 | |
| 22317 | env->prog = prog = new_prog; |
| 22318 | insn = new_prog->insnsi + i + delta; |
| 22319 | goto next_insn; |
| 22320 | } |
| 22321 | |
| 22322 | /* Implement bpf_get_branch_snapshot inline. */ |
| 22323 | if (IS_ENABLED(CONFIG_PERF_EVENTS) && |
| 22324 | prog->jit_requested && BITS_PER_LONG == 64 && |
| 22325 | insn->imm == BPF_FUNC_get_branch_snapshot) { |
| 22326 | /* We are dealing with the following func protos: |
| 22327 | * u64 bpf_get_branch_snapshot(void *buf, u32 size, u64 flags); |
| 22328 | * int perf_snapshot_branch_stack(struct perf_branch_entry *entries, u32 cnt); |
| 22329 | */ |
| 22330 | const u32 br_entry_size = sizeof(struct perf_branch_entry); |
| 22331 | |
| 22332 | /* struct perf_branch_entry is part of UAPI and is |
| 22333 | * used as an array element, so extremely unlikely to |
| 22334 | * ever grow or shrink |
| 22335 | */ |
| 22336 | BUILD_BUG_ON(br_entry_size != 24); |
| 22337 | |
| 22338 | /* if (unlikely(flags)) return -EINVAL */ |
| 22339 | insn_buf[0] = BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 0, 7); |
| 22340 | |
| 22341 | /* Transform size (bytes) into number of entries (cnt = size / 24). |
| 22342 | * But to avoid expensive division instruction, we implement |
| 22343 | * divide-by-3 through multiplication, followed by further |
| 22344 | * division by 8 through 3-bit right shift. |
| 22345 | * Refer to book "Hacker's Delight, 2nd ed." by Henry S. Warren, Jr., |
| 22346 | * p. 227, chapter "Unsigned Division by 3" for details and proofs. |
| 22347 | * |
| 22348 | * N / 3 <=> M * N / 2^33, where M = (2^33 + 1) / 3 = 0xaaaaaaab. |
| 22349 | */ |
| 22350 | insn_buf[1] = BPF_MOV32_IMM(BPF_REG_0, 0xaaaaaaab); |
| 22351 | insn_buf[2] = BPF_ALU64_REG(BPF_MUL, BPF_REG_2, BPF_REG_0); |
| 22352 | insn_buf[3] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36); |
| 22353 | |
| 22354 | /* call perf_snapshot_branch_stack implementation */ |
| 22355 | insn_buf[4] = BPF_EMIT_CALL(static_call_query(perf_snapshot_branch_stack)); |
| 22356 | /* if (entry_cnt == 0) return -ENOENT */ |
| 22357 | insn_buf[5] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4); |
| 22358 | /* return entry_cnt * sizeof(struct perf_branch_entry) */ |
| 22359 | insn_buf[6] = BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, br_entry_size); |
| 22360 | insn_buf[7] = BPF_JMP_A(3); |
| 22361 | /* return -EINVAL; */ |
| 22362 | insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); |
| 22363 | insn_buf[9] = BPF_JMP_A(1); |
| 22364 | /* return -ENOENT; */ |
| 22365 | insn_buf[10] = BPF_MOV64_IMM(BPF_REG_0, -ENOENT); |
| 22366 | cnt = 11; |
| 22367 | |
| 22368 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22369 | if (!new_prog) |
| 22370 | return -ENOMEM; |
| 22371 | |
| 22372 | delta += cnt - 1; |
| 22373 | env->prog = prog = new_prog; |
| 22374 | insn = new_prog->insnsi + i + delta; |
| 22375 | goto next_insn; |
| 22376 | } |
| 22377 | |
| 22378 | /* Implement bpf_kptr_xchg inline */ |
| 22379 | if (prog->jit_requested && BITS_PER_LONG == 64 && |
| 22380 | insn->imm == BPF_FUNC_kptr_xchg && |
| 22381 | bpf_jit_supports_ptr_xchg()) { |
| 22382 | insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); |
| 22383 | insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); |
| 22384 | cnt = 2; |
| 22385 | |
| 22386 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); |
| 22387 | if (!new_prog) |
| 22388 | return -ENOMEM; |
| 22389 | |
| 22390 | delta += cnt - 1; |
| 22391 | env->prog = prog = new_prog; |
| 22392 | insn = new_prog->insnsi + i + delta; |
| 22393 | goto next_insn; |
| 22394 | } |
| 22395 | patch_call_imm: |
| 22396 | fn = env->ops->get_func_proto(insn->imm, env->prog); |
| 22397 | /* all functions that have prototype and verifier allowed |
| 22398 | * programs to call them, must be real in-kernel functions |
| 22399 | */ |
| 22400 | if (!fn->func) { |
| 22401 | verbose(env, |
| 22402 | "kernel subsystem misconfigured func %s#%d\n", |
| 22403 | func_id_name(insn->imm), insn->imm); |
| 22404 | return -EFAULT; |
| 22405 | } |
| 22406 | insn->imm = fn->func - __bpf_call_base; |
| 22407 | next_insn: |
| 22408 | if (subprogs[cur_subprog + 1].start == i + delta + 1) { |
| 22409 | subprogs[cur_subprog].stack_depth += stack_depth_extra; |
| 22410 | subprogs[cur_subprog].stack_extra = stack_depth_extra; |
| 22411 | |
| 22412 | stack_depth = subprogs[cur_subprog].stack_depth; |
| 22413 | if (stack_depth > MAX_BPF_STACK && !prog->jit_requested) { |
| 22414 | verbose(env, "stack size %d(extra %d) is too large\n", |
| 22415 | stack_depth, stack_depth_extra); |
| 22416 | return -EINVAL; |
| 22417 | } |
| 22418 | cur_subprog++; |
| 22419 | stack_depth = subprogs[cur_subprog].stack_depth; |
| 22420 | stack_depth_extra = 0; |
| 22421 | } |
| 22422 | i++; |
| 22423 | insn++; |
| 22424 | } |
| 22425 | |
| 22426 | env->prog->aux->stack_depth = subprogs[0].stack_depth; |
| 22427 | for (i = 0; i < env->subprog_cnt; i++) { |
| 22428 | int delta = bpf_jit_supports_timed_may_goto() ? 2 : 1; |
| 22429 | int subprog_start = subprogs[i].start; |
| 22430 | int stack_slots = subprogs[i].stack_extra / 8; |
| 22431 | int slots = delta, cnt = 0; |
| 22432 | |
| 22433 | if (!stack_slots) |
| 22434 | continue; |
| 22435 | /* We need two slots in case timed may_goto is supported. */ |
| 22436 | if (stack_slots > slots) { |
| 22437 | verifier_bug(env, "stack_slots supports may_goto only"); |
| 22438 | return -EFAULT; |
| 22439 | } |
| 22440 | |
| 22441 | stack_depth = subprogs[i].stack_depth; |
| 22442 | if (bpf_jit_supports_timed_may_goto()) { |
| 22443 | insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, |
| 22444 | BPF_MAX_TIMED_LOOPS); |
| 22445 | insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth + 8, 0); |
| 22446 | } else { |
| 22447 | /* Add ST insn to subprog prologue to init extra stack */ |
| 22448 | insn_buf[cnt++] = BPF_ST_MEM(BPF_DW, BPF_REG_FP, -stack_depth, |
| 22449 | BPF_MAX_LOOPS); |
| 22450 | } |
| 22451 | /* Copy first actual insn to preserve it */ |
| 22452 | insn_buf[cnt++] = env->prog->insnsi[subprog_start]; |
| 22453 | |
| 22454 | new_prog = bpf_patch_insn_data(env, subprog_start, insn_buf, cnt); |
| 22455 | if (!new_prog) |
| 22456 | return -ENOMEM; |
| 22457 | env->prog = prog = new_prog; |
| 22458 | /* |
| 22459 | * If may_goto is a first insn of a prog there could be a jmp |
| 22460 | * insn that points to it, hence adjust all such jmps to point |
| 22461 | * to insn after BPF_ST that inits may_goto count. |
| 22462 | * Adjustment will succeed because bpf_patch_insn_data() didn't fail. |
| 22463 | */ |
| 22464 | WARN_ON(adjust_jmp_off(env->prog, subprog_start, delta)); |
| 22465 | } |
| 22466 | |
| 22467 | /* Since poke tab is now finalized, publish aux to tracker. */ |
| 22468 | for (i = 0; i < prog->aux->size_poke_tab; i++) { |
| 22469 | map_ptr = prog->aux->poke_tab[i].tail_call.map; |
| 22470 | if (!map_ptr->ops->map_poke_track || |
| 22471 | !map_ptr->ops->map_poke_untrack || |
| 22472 | !map_ptr->ops->map_poke_run) { |
| 22473 | verbose(env, "bpf verifier is misconfigured\n"); |
| 22474 | return -EINVAL; |
| 22475 | } |
| 22476 | |
| 22477 | ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); |
| 22478 | if (ret < 0) { |
| 22479 | verbose(env, "tracking tail call prog failed\n"); |
| 22480 | return ret; |
| 22481 | } |
| 22482 | } |
| 22483 | |
| 22484 | sort_kfunc_descs_by_imm_off(env->prog); |
| 22485 | |
| 22486 | return 0; |
| 22487 | } |
| 22488 | |
| 22489 | static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, |
| 22490 | int position, |
| 22491 | s32 stack_base, |
| 22492 | u32 callback_subprogno, |
| 22493 | u32 *total_cnt) |
| 22494 | { |
| 22495 | s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; |
| 22496 | s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; |
| 22497 | s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; |
| 22498 | int reg_loop_max = BPF_REG_6; |
| 22499 | int reg_loop_cnt = BPF_REG_7; |
| 22500 | int reg_loop_ctx = BPF_REG_8; |
| 22501 | |
| 22502 | struct bpf_insn *insn_buf = env->insn_buf; |
| 22503 | struct bpf_prog *new_prog; |
| 22504 | u32 callback_start; |
| 22505 | u32 call_insn_offset; |
| 22506 | s32 callback_offset; |
| 22507 | u32 cnt = 0; |
| 22508 | |
| 22509 | /* This represents an inlined version of bpf_iter.c:bpf_loop, |
| 22510 | * be careful to modify this code in sync. |
| 22511 | */ |
| 22512 | |
| 22513 | /* Return error and jump to the end of the patch if |
| 22514 | * expected number of iterations is too big. |
| 22515 | */ |
| 22516 | insn_buf[cnt++] = BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2); |
| 22517 | insn_buf[cnt++] = BPF_MOV32_IMM(BPF_REG_0, -E2BIG); |
| 22518 | insn_buf[cnt++] = BPF_JMP_IMM(BPF_JA, 0, 0, 16); |
| 22519 | /* spill R6, R7, R8 to use these as loop vars */ |
| 22520 | insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset); |
| 22521 | insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset); |
| 22522 | insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset); |
| 22523 | /* initialize loop vars */ |
| 22524 | insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_max, BPF_REG_1); |
| 22525 | insn_buf[cnt++] = BPF_MOV32_IMM(reg_loop_cnt, 0); |
| 22526 | insn_buf[cnt++] = BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3); |
| 22527 | /* loop header, |
| 22528 | * if reg_loop_cnt >= reg_loop_max skip the loop body |
| 22529 | */ |
| 22530 | insn_buf[cnt++] = BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5); |
| 22531 | /* callback call, |
| 22532 | * correct callback offset would be set after patching |
| 22533 | */ |
| 22534 | insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt); |
| 22535 | insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx); |
| 22536 | insn_buf[cnt++] = BPF_CALL_REL(0); |
| 22537 | /* increment loop counter */ |
| 22538 | insn_buf[cnt++] = BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1); |
| 22539 | /* jump to loop header if callback returned 0 */ |
| 22540 | insn_buf[cnt++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6); |
| 22541 | /* return value of bpf_loop, |
| 22542 | * set R0 to the number of iterations |
| 22543 | */ |
| 22544 | insn_buf[cnt++] = BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt); |
| 22545 | /* restore original values of R6, R7, R8 */ |
| 22546 | insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset); |
| 22547 | insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset); |
| 22548 | insn_buf[cnt++] = BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset); |
| 22549 | |
| 22550 | *total_cnt = cnt; |
| 22551 | new_prog = bpf_patch_insn_data(env, position, insn_buf, cnt); |
| 22552 | if (!new_prog) |
| 22553 | return new_prog; |
| 22554 | |
| 22555 | /* callback start is known only after patching */ |
| 22556 | callback_start = env->subprog_info[callback_subprogno].start; |
| 22557 | /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ |
| 22558 | call_insn_offset = position + 12; |
| 22559 | callback_offset = callback_start - call_insn_offset - 1; |
| 22560 | new_prog->insnsi[call_insn_offset].imm = callback_offset; |
| 22561 | |
| 22562 | return new_prog; |
| 22563 | } |
| 22564 | |
| 22565 | static bool is_bpf_loop_call(struct bpf_insn *insn) |
| 22566 | { |
| 22567 | return insn->code == (BPF_JMP | BPF_CALL) && |
| 22568 | insn->src_reg == 0 && |
| 22569 | insn->imm == BPF_FUNC_loop; |
| 22570 | } |
| 22571 | |
| 22572 | /* For all sub-programs in the program (including main) check |
| 22573 | * insn_aux_data to see if there are bpf_loop calls that require |
| 22574 | * inlining. If such calls are found the calls are replaced with a |
| 22575 | * sequence of instructions produced by `inline_bpf_loop` function and |
| 22576 | * subprog stack_depth is increased by the size of 3 registers. |
| 22577 | * This stack space is used to spill values of the R6, R7, R8. These |
| 22578 | * registers are used to store the loop bound, counter and context |
| 22579 | * variables. |
| 22580 | */ |
| 22581 | static int optimize_bpf_loop(struct bpf_verifier_env *env) |
| 22582 | { |
| 22583 | struct bpf_subprog_info *subprogs = env->subprog_info; |
| 22584 | int i, cur_subprog = 0, cnt, delta = 0; |
| 22585 | struct bpf_insn *insn = env->prog->insnsi; |
| 22586 | int insn_cnt = env->prog->len; |
| 22587 | u16 stack_depth = subprogs[cur_subprog].stack_depth; |
| 22588 | u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; |
| 22589 | u16 stack_depth_extra = 0; |
| 22590 | |
| 22591 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 22592 | struct bpf_loop_inline_state *inline_state = |
| 22593 | &env->insn_aux_data[i + delta].loop_inline_state; |
| 22594 | |
| 22595 | if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { |
| 22596 | struct bpf_prog *new_prog; |
| 22597 | |
| 22598 | stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; |
| 22599 | new_prog = inline_bpf_loop(env, |
| 22600 | i + delta, |
| 22601 | -(stack_depth + stack_depth_extra), |
| 22602 | inline_state->callback_subprogno, |
| 22603 | &cnt); |
| 22604 | if (!new_prog) |
| 22605 | return -ENOMEM; |
| 22606 | |
| 22607 | delta += cnt - 1; |
| 22608 | env->prog = new_prog; |
| 22609 | insn = new_prog->insnsi + i + delta; |
| 22610 | } |
| 22611 | |
| 22612 | if (subprogs[cur_subprog + 1].start == i + delta + 1) { |
| 22613 | subprogs[cur_subprog].stack_depth += stack_depth_extra; |
| 22614 | cur_subprog++; |
| 22615 | stack_depth = subprogs[cur_subprog].stack_depth; |
| 22616 | stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; |
| 22617 | stack_depth_extra = 0; |
| 22618 | } |
| 22619 | } |
| 22620 | |
| 22621 | env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; |
| 22622 | |
| 22623 | return 0; |
| 22624 | } |
| 22625 | |
| 22626 | /* Remove unnecessary spill/fill pairs, members of fastcall pattern, |
| 22627 | * adjust subprograms stack depth when possible. |
| 22628 | */ |
| 22629 | static int remove_fastcall_spills_fills(struct bpf_verifier_env *env) |
| 22630 | { |
| 22631 | struct bpf_subprog_info *subprog = env->subprog_info; |
| 22632 | struct bpf_insn_aux_data *aux = env->insn_aux_data; |
| 22633 | struct bpf_insn *insn = env->prog->insnsi; |
| 22634 | int insn_cnt = env->prog->len; |
| 22635 | u32 spills_num; |
| 22636 | bool modified = false; |
| 22637 | int i, j; |
| 22638 | |
| 22639 | for (i = 0; i < insn_cnt; i++, insn++) { |
| 22640 | if (aux[i].fastcall_spills_num > 0) { |
| 22641 | spills_num = aux[i].fastcall_spills_num; |
| 22642 | /* NOPs would be removed by opt_remove_nops() */ |
| 22643 | for (j = 1; j <= spills_num; ++j) { |
| 22644 | *(insn - j) = NOP; |
| 22645 | *(insn + j) = NOP; |
| 22646 | } |
| 22647 | modified = true; |
| 22648 | } |
| 22649 | if ((subprog + 1)->start == i + 1) { |
| 22650 | if (modified && !subprog->keep_fastcall_stack) |
| 22651 | subprog->stack_depth = -subprog->fastcall_stack_off; |
| 22652 | subprog++; |
| 22653 | modified = false; |
| 22654 | } |
| 22655 | } |
| 22656 | |
| 22657 | return 0; |
| 22658 | } |
| 22659 | |
| 22660 | static void free_states(struct bpf_verifier_env *env) |
| 22661 | { |
| 22662 | struct bpf_verifier_state_list *sl; |
| 22663 | struct list_head *head, *pos, *tmp; |
| 22664 | int i; |
| 22665 | |
| 22666 | list_for_each_safe(pos, tmp, &env->free_list) { |
| 22667 | sl = container_of(pos, struct bpf_verifier_state_list, node); |
| 22668 | free_verifier_state(&sl->state, false); |
| 22669 | kfree(sl); |
| 22670 | } |
| 22671 | INIT_LIST_HEAD(&env->free_list); |
| 22672 | |
| 22673 | if (!env->explored_states) |
| 22674 | return; |
| 22675 | |
| 22676 | for (i = 0; i < state_htab_size(env); i++) { |
| 22677 | head = &env->explored_states[i]; |
| 22678 | |
| 22679 | list_for_each_safe(pos, tmp, head) { |
| 22680 | sl = container_of(pos, struct bpf_verifier_state_list, node); |
| 22681 | free_verifier_state(&sl->state, false); |
| 22682 | kfree(sl); |
| 22683 | } |
| 22684 | INIT_LIST_HEAD(&env->explored_states[i]); |
| 22685 | } |
| 22686 | } |
| 22687 | |
| 22688 | static int do_check_common(struct bpf_verifier_env *env, int subprog) |
| 22689 | { |
| 22690 | bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); |
| 22691 | struct bpf_subprog_info *sub = subprog_info(env, subprog); |
| 22692 | struct bpf_prog_aux *aux = env->prog->aux; |
| 22693 | struct bpf_verifier_state *state; |
| 22694 | struct bpf_reg_state *regs; |
| 22695 | int ret, i; |
| 22696 | |
| 22697 | env->prev_linfo = NULL; |
| 22698 | env->pass_cnt++; |
| 22699 | |
| 22700 | state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); |
| 22701 | if (!state) |
| 22702 | return -ENOMEM; |
| 22703 | state->curframe = 0; |
| 22704 | state->speculative = false; |
| 22705 | state->branches = 1; |
| 22706 | state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); |
| 22707 | if (!state->frame[0]) { |
| 22708 | kfree(state); |
| 22709 | return -ENOMEM; |
| 22710 | } |
| 22711 | env->cur_state = state; |
| 22712 | init_func_state(env, state->frame[0], |
| 22713 | BPF_MAIN_FUNC /* callsite */, |
| 22714 | 0 /* frameno */, |
| 22715 | subprog); |
| 22716 | state->first_insn_idx = env->subprog_info[subprog].start; |
| 22717 | state->last_insn_idx = -1; |
| 22718 | |
| 22719 | regs = state->frame[state->curframe]->regs; |
| 22720 | if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { |
| 22721 | const char *sub_name = subprog_name(env, subprog); |
| 22722 | struct bpf_subprog_arg_info *arg; |
| 22723 | struct bpf_reg_state *reg; |
| 22724 | |
| 22725 | verbose(env, "Validating %s() func#%d...\n", sub_name, subprog); |
| 22726 | ret = btf_prepare_func_args(env, subprog); |
| 22727 | if (ret) |
| 22728 | goto out; |
| 22729 | |
| 22730 | if (subprog_is_exc_cb(env, subprog)) { |
| 22731 | state->frame[0]->in_exception_callback_fn = true; |
| 22732 | /* We have already ensured that the callback returns an integer, just |
| 22733 | * like all global subprogs. We need to determine it only has a single |
| 22734 | * scalar argument. |
| 22735 | */ |
| 22736 | if (sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_ANYTHING) { |
| 22737 | verbose(env, "exception cb only supports single integer argument\n"); |
| 22738 | ret = -EINVAL; |
| 22739 | goto out; |
| 22740 | } |
| 22741 | } |
| 22742 | for (i = BPF_REG_1; i <= sub->arg_cnt; i++) { |
| 22743 | arg = &sub->args[i - BPF_REG_1]; |
| 22744 | reg = ®s[i]; |
| 22745 | |
| 22746 | if (arg->arg_type == ARG_PTR_TO_CTX) { |
| 22747 | reg->type = PTR_TO_CTX; |
| 22748 | mark_reg_known_zero(env, regs, i); |
| 22749 | } else if (arg->arg_type == ARG_ANYTHING) { |
| 22750 | reg->type = SCALAR_VALUE; |
| 22751 | mark_reg_unknown(env, regs, i); |
| 22752 | } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { |
| 22753 | /* assume unspecial LOCAL dynptr type */ |
| 22754 | __mark_dynptr_reg(reg, BPF_DYNPTR_TYPE_LOCAL, true, ++env->id_gen); |
| 22755 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_MEM) { |
| 22756 | reg->type = PTR_TO_MEM; |
| 22757 | if (arg->arg_type & PTR_MAYBE_NULL) |
| 22758 | reg->type |= PTR_MAYBE_NULL; |
| 22759 | mark_reg_known_zero(env, regs, i); |
| 22760 | reg->mem_size = arg->mem_size; |
| 22761 | reg->id = ++env->id_gen; |
| 22762 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) { |
| 22763 | reg->type = PTR_TO_BTF_ID; |
| 22764 | if (arg->arg_type & PTR_MAYBE_NULL) |
| 22765 | reg->type |= PTR_MAYBE_NULL; |
| 22766 | if (arg->arg_type & PTR_UNTRUSTED) |
| 22767 | reg->type |= PTR_UNTRUSTED; |
| 22768 | if (arg->arg_type & PTR_TRUSTED) |
| 22769 | reg->type |= PTR_TRUSTED; |
| 22770 | mark_reg_known_zero(env, regs, i); |
| 22771 | reg->btf = bpf_get_btf_vmlinux(); /* can't fail at this point */ |
| 22772 | reg->btf_id = arg->btf_id; |
| 22773 | reg->id = ++env->id_gen; |
| 22774 | } else if (base_type(arg->arg_type) == ARG_PTR_TO_ARENA) { |
| 22775 | /* caller can pass either PTR_TO_ARENA or SCALAR */ |
| 22776 | mark_reg_unknown(env, regs, i); |
| 22777 | } else { |
| 22778 | WARN_ONCE(1, "BUG: unhandled arg#%d type %d\n", |
| 22779 | i - BPF_REG_1, arg->arg_type); |
| 22780 | ret = -EFAULT; |
| 22781 | goto out; |
| 22782 | } |
| 22783 | } |
| 22784 | } else { |
| 22785 | /* if main BPF program has associated BTF info, validate that |
| 22786 | * it's matching expected signature, and otherwise mark BTF |
| 22787 | * info for main program as unreliable |
| 22788 | */ |
| 22789 | if (env->prog->aux->func_info_aux) { |
| 22790 | ret = btf_prepare_func_args(env, 0); |
| 22791 | if (ret || sub->arg_cnt != 1 || sub->args[0].arg_type != ARG_PTR_TO_CTX) |
| 22792 | env->prog->aux->func_info_aux[0].unreliable = true; |
| 22793 | } |
| 22794 | |
| 22795 | /* 1st arg to a function */ |
| 22796 | regs[BPF_REG_1].type = PTR_TO_CTX; |
| 22797 | mark_reg_known_zero(env, regs, BPF_REG_1); |
| 22798 | } |
| 22799 | |
| 22800 | /* Acquire references for struct_ops program arguments tagged with "__ref" */ |
| 22801 | if (!subprog && env->prog->type == BPF_PROG_TYPE_STRUCT_OPS) { |
| 22802 | for (i = 0; i < aux->ctx_arg_info_size; i++) |
| 22803 | aux->ctx_arg_info[i].ref_obj_id = aux->ctx_arg_info[i].refcounted ? |
| 22804 | acquire_reference(env, 0) : 0; |
| 22805 | } |
| 22806 | |
| 22807 | ret = do_check(env); |
| 22808 | out: |
| 22809 | /* check for NULL is necessary, since cur_state can be freed inside |
| 22810 | * do_check() under memory pressure. |
| 22811 | */ |
| 22812 | if (env->cur_state) { |
| 22813 | free_verifier_state(env->cur_state, true); |
| 22814 | env->cur_state = NULL; |
| 22815 | } |
| 22816 | while (!pop_stack(env, NULL, NULL, false)); |
| 22817 | if (!ret && pop_log) |
| 22818 | bpf_vlog_reset(&env->log, 0); |
| 22819 | free_states(env); |
| 22820 | return ret; |
| 22821 | } |
| 22822 | |
| 22823 | /* Lazily verify all global functions based on their BTF, if they are called |
| 22824 | * from main BPF program or any of subprograms transitively. |
| 22825 | * BPF global subprogs called from dead code are not validated. |
| 22826 | * All callable global functions must pass verification. |
| 22827 | * Otherwise the whole program is rejected. |
| 22828 | * Consider: |
| 22829 | * int bar(int); |
| 22830 | * int foo(int f) |
| 22831 | * { |
| 22832 | * return bar(f); |
| 22833 | * } |
| 22834 | * int bar(int b) |
| 22835 | * { |
| 22836 | * ... |
| 22837 | * } |
| 22838 | * foo() will be verified first for R1=any_scalar_value. During verification it |
| 22839 | * will be assumed that bar() already verified successfully and call to bar() |
| 22840 | * from foo() will be checked for type match only. Later bar() will be verified |
| 22841 | * independently to check that it's safe for R1=any_scalar_value. |
| 22842 | */ |
| 22843 | static int do_check_subprogs(struct bpf_verifier_env *env) |
| 22844 | { |
| 22845 | struct bpf_prog_aux *aux = env->prog->aux; |
| 22846 | struct bpf_func_info_aux *sub_aux; |
| 22847 | int i, ret, new_cnt; |
| 22848 | |
| 22849 | if (!aux->func_info) |
| 22850 | return 0; |
| 22851 | |
| 22852 | /* exception callback is presumed to be always called */ |
| 22853 | if (env->exception_callback_subprog) |
| 22854 | subprog_aux(env, env->exception_callback_subprog)->called = true; |
| 22855 | |
| 22856 | again: |
| 22857 | new_cnt = 0; |
| 22858 | for (i = 1; i < env->subprog_cnt; i++) { |
| 22859 | if (!subprog_is_global(env, i)) |
| 22860 | continue; |
| 22861 | |
| 22862 | sub_aux = subprog_aux(env, i); |
| 22863 | if (!sub_aux->called || sub_aux->verified) |
| 22864 | continue; |
| 22865 | |
| 22866 | env->insn_idx = env->subprog_info[i].start; |
| 22867 | WARN_ON_ONCE(env->insn_idx == 0); |
| 22868 | ret = do_check_common(env, i); |
| 22869 | if (ret) { |
| 22870 | return ret; |
| 22871 | } else if (env->log.level & BPF_LOG_LEVEL) { |
| 22872 | verbose(env, "Func#%d ('%s') is safe for any args that match its prototype\n", |
| 22873 | i, subprog_name(env, i)); |
| 22874 | } |
| 22875 | |
| 22876 | /* We verified new global subprog, it might have called some |
| 22877 | * more global subprogs that we haven't verified yet, so we |
| 22878 | * need to do another pass over subprogs to verify those. |
| 22879 | */ |
| 22880 | sub_aux->verified = true; |
| 22881 | new_cnt++; |
| 22882 | } |
| 22883 | |
| 22884 | /* We can't loop forever as we verify at least one global subprog on |
| 22885 | * each pass. |
| 22886 | */ |
| 22887 | if (new_cnt) |
| 22888 | goto again; |
| 22889 | |
| 22890 | return 0; |
| 22891 | } |
| 22892 | |
| 22893 | static int do_check_main(struct bpf_verifier_env *env) |
| 22894 | { |
| 22895 | int ret; |
| 22896 | |
| 22897 | env->insn_idx = 0; |
| 22898 | ret = do_check_common(env, 0); |
| 22899 | if (!ret) |
| 22900 | env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; |
| 22901 | return ret; |
| 22902 | } |
| 22903 | |
| 22904 | |
| 22905 | static void print_verification_stats(struct bpf_verifier_env *env) |
| 22906 | { |
| 22907 | int i; |
| 22908 | |
| 22909 | if (env->log.level & BPF_LOG_STATS) { |
| 22910 | verbose(env, "verification time %lld usec\n", |
| 22911 | div_u64(env->verification_time, 1000)); |
| 22912 | verbose(env, "stack depth "); |
| 22913 | for (i = 0; i < env->subprog_cnt; i++) { |
| 22914 | u32 depth = env->subprog_info[i].stack_depth; |
| 22915 | |
| 22916 | verbose(env, "%d", depth); |
| 22917 | if (i + 1 < env->subprog_cnt) |
| 22918 | verbose(env, "+"); |
| 22919 | } |
| 22920 | verbose(env, "\n"); |
| 22921 | } |
| 22922 | verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " |
| 22923 | "total_states %d peak_states %d mark_read %d\n", |
| 22924 | env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, |
| 22925 | env->max_states_per_insn, env->total_states, |
| 22926 | env->peak_states, env->longest_mark_read_walk); |
| 22927 | } |
| 22928 | |
| 22929 | int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog, |
| 22930 | const struct bpf_ctx_arg_aux *info, u32 cnt) |
| 22931 | { |
| 22932 | prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL); |
| 22933 | prog->aux->ctx_arg_info_size = cnt; |
| 22934 | |
| 22935 | return prog->aux->ctx_arg_info ? 0 : -ENOMEM; |
| 22936 | } |
| 22937 | |
| 22938 | static int check_struct_ops_btf_id(struct bpf_verifier_env *env) |
| 22939 | { |
| 22940 | const struct btf_type *t, *func_proto; |
| 22941 | const struct bpf_struct_ops_desc *st_ops_desc; |
| 22942 | const struct bpf_struct_ops *st_ops; |
| 22943 | const struct btf_member *member; |
| 22944 | struct bpf_prog *prog = env->prog; |
| 22945 | bool has_refcounted_arg = false; |
| 22946 | u32 btf_id, member_idx, member_off; |
| 22947 | struct btf *btf; |
| 22948 | const char *mname; |
| 22949 | int i, err; |
| 22950 | |
| 22951 | if (!prog->gpl_compatible) { |
| 22952 | verbose(env, "struct ops programs must have a GPL compatible license\n"); |
| 22953 | return -EINVAL; |
| 22954 | } |
| 22955 | |
| 22956 | if (!prog->aux->attach_btf_id) |
| 22957 | return -ENOTSUPP; |
| 22958 | |
| 22959 | btf = prog->aux->attach_btf; |
| 22960 | if (btf_is_module(btf)) { |
| 22961 | /* Make sure st_ops is valid through the lifetime of env */ |
| 22962 | env->attach_btf_mod = btf_try_get_module(btf); |
| 22963 | if (!env->attach_btf_mod) { |
| 22964 | verbose(env, "struct_ops module %s is not found\n", |
| 22965 | btf_get_name(btf)); |
| 22966 | return -ENOTSUPP; |
| 22967 | } |
| 22968 | } |
| 22969 | |
| 22970 | btf_id = prog->aux->attach_btf_id; |
| 22971 | st_ops_desc = bpf_struct_ops_find(btf, btf_id); |
| 22972 | if (!st_ops_desc) { |
| 22973 | verbose(env, "attach_btf_id %u is not a supported struct\n", |
| 22974 | btf_id); |
| 22975 | return -ENOTSUPP; |
| 22976 | } |
| 22977 | st_ops = st_ops_desc->st_ops; |
| 22978 | |
| 22979 | t = st_ops_desc->type; |
| 22980 | member_idx = prog->expected_attach_type; |
| 22981 | if (member_idx >= btf_type_vlen(t)) { |
| 22982 | verbose(env, "attach to invalid member idx %u of struct %s\n", |
| 22983 | member_idx, st_ops->name); |
| 22984 | return -EINVAL; |
| 22985 | } |
| 22986 | |
| 22987 | member = &btf_type_member(t)[member_idx]; |
| 22988 | mname = btf_name_by_offset(btf, member->name_off); |
| 22989 | func_proto = btf_type_resolve_func_ptr(btf, member->type, |
| 22990 | NULL); |
| 22991 | if (!func_proto) { |
| 22992 | verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", |
| 22993 | mname, member_idx, st_ops->name); |
| 22994 | return -EINVAL; |
| 22995 | } |
| 22996 | |
| 22997 | member_off = __btf_member_bit_offset(t, member) / 8; |
| 22998 | err = bpf_struct_ops_supported(st_ops, member_off); |
| 22999 | if (err) { |
| 23000 | verbose(env, "attach to unsupported member %s of struct %s\n", |
| 23001 | mname, st_ops->name); |
| 23002 | return err; |
| 23003 | } |
| 23004 | |
| 23005 | if (st_ops->check_member) { |
| 23006 | err = st_ops->check_member(t, member, prog); |
| 23007 | |
| 23008 | if (err) { |
| 23009 | verbose(env, "attach to unsupported member %s of struct %s\n", |
| 23010 | mname, st_ops->name); |
| 23011 | return err; |
| 23012 | } |
| 23013 | } |
| 23014 | |
| 23015 | if (prog->aux->priv_stack_requested && !bpf_jit_supports_private_stack()) { |
| 23016 | verbose(env, "Private stack not supported by jit\n"); |
| 23017 | return -EACCES; |
| 23018 | } |
| 23019 | |
| 23020 | for (i = 0; i < st_ops_desc->arg_info[member_idx].cnt; i++) { |
| 23021 | if (st_ops_desc->arg_info[member_idx].info->refcounted) { |
| 23022 | has_refcounted_arg = true; |
| 23023 | break; |
| 23024 | } |
| 23025 | } |
| 23026 | |
| 23027 | /* Tail call is not allowed for programs with refcounted arguments since we |
| 23028 | * cannot guarantee that valid refcounted kptrs will be passed to the callee. |
| 23029 | */ |
| 23030 | for (i = 0; i < env->subprog_cnt; i++) { |
| 23031 | if (has_refcounted_arg && env->subprog_info[i].has_tail_call) { |
| 23032 | verbose(env, "program with __ref argument cannot tail call\n"); |
| 23033 | return -EINVAL; |
| 23034 | } |
| 23035 | } |
| 23036 | |
| 23037 | prog->aux->st_ops = st_ops; |
| 23038 | prog->aux->attach_st_ops_member_off = member_off; |
| 23039 | |
| 23040 | prog->aux->attach_func_proto = func_proto; |
| 23041 | prog->aux->attach_func_name = mname; |
| 23042 | env->ops = st_ops->verifier_ops; |
| 23043 | |
| 23044 | return bpf_prog_ctx_arg_info_init(prog, st_ops_desc->arg_info[member_idx].info, |
| 23045 | st_ops_desc->arg_info[member_idx].cnt); |
| 23046 | } |
| 23047 | #define SECURITY_PREFIX "security_" |
| 23048 | |
| 23049 | static int check_attach_modify_return(unsigned long addr, const char *func_name) |
| 23050 | { |
| 23051 | if (within_error_injection_list(addr) || |
| 23052 | !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) |
| 23053 | return 0; |
| 23054 | |
| 23055 | return -EINVAL; |
| 23056 | } |
| 23057 | |
| 23058 | /* list of non-sleepable functions that are otherwise on |
| 23059 | * ALLOW_ERROR_INJECTION list |
| 23060 | */ |
| 23061 | BTF_SET_START(btf_non_sleepable_error_inject) |
| 23062 | /* Three functions below can be called from sleepable and non-sleepable context. |
| 23063 | * Assume non-sleepable from bpf safety point of view. |
| 23064 | */ |
| 23065 | BTF_ID(func, __filemap_add_folio) |
| 23066 | #ifdef CONFIG_FAIL_PAGE_ALLOC |
| 23067 | BTF_ID(func, should_fail_alloc_page) |
| 23068 | #endif |
| 23069 | #ifdef CONFIG_FAILSLAB |
| 23070 | BTF_ID(func, should_failslab) |
| 23071 | #endif |
| 23072 | BTF_SET_END(btf_non_sleepable_error_inject) |
| 23073 | |
| 23074 | static int check_non_sleepable_error_inject(u32 btf_id) |
| 23075 | { |
| 23076 | return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); |
| 23077 | } |
| 23078 | |
| 23079 | int bpf_check_attach_target(struct bpf_verifier_log *log, |
| 23080 | const struct bpf_prog *prog, |
| 23081 | const struct bpf_prog *tgt_prog, |
| 23082 | u32 btf_id, |
| 23083 | struct bpf_attach_target_info *tgt_info) |
| 23084 | { |
| 23085 | bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; |
| 23086 | bool prog_tracing = prog->type == BPF_PROG_TYPE_TRACING; |
| 23087 | char trace_symbol[KSYM_SYMBOL_LEN]; |
| 23088 | const char prefix[] = "btf_trace_"; |
| 23089 | struct bpf_raw_event_map *btp; |
| 23090 | int ret = 0, subprog = -1, i; |
| 23091 | const struct btf_type *t; |
| 23092 | bool conservative = true; |
| 23093 | const char *tname, *fname; |
| 23094 | struct btf *btf; |
| 23095 | long addr = 0; |
| 23096 | struct module *mod = NULL; |
| 23097 | |
| 23098 | if (!btf_id) { |
| 23099 | bpf_log(log, "Tracing programs must provide btf_id\n"); |
| 23100 | return -EINVAL; |
| 23101 | } |
| 23102 | btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; |
| 23103 | if (!btf) { |
| 23104 | bpf_log(log, |
| 23105 | "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); |
| 23106 | return -EINVAL; |
| 23107 | } |
| 23108 | t = btf_type_by_id(btf, btf_id); |
| 23109 | if (!t) { |
| 23110 | bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); |
| 23111 | return -EINVAL; |
| 23112 | } |
| 23113 | tname = btf_name_by_offset(btf, t->name_off); |
| 23114 | if (!tname) { |
| 23115 | bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); |
| 23116 | return -EINVAL; |
| 23117 | } |
| 23118 | if (tgt_prog) { |
| 23119 | struct bpf_prog_aux *aux = tgt_prog->aux; |
| 23120 | bool tgt_changes_pkt_data; |
| 23121 | bool tgt_might_sleep; |
| 23122 | |
| 23123 | if (bpf_prog_is_dev_bound(prog->aux) && |
| 23124 | !bpf_prog_dev_bound_match(prog, tgt_prog)) { |
| 23125 | bpf_log(log, "Target program bound device mismatch"); |
| 23126 | return -EINVAL; |
| 23127 | } |
| 23128 | |
| 23129 | for (i = 0; i < aux->func_info_cnt; i++) |
| 23130 | if (aux->func_info[i].type_id == btf_id) { |
| 23131 | subprog = i; |
| 23132 | break; |
| 23133 | } |
| 23134 | if (subprog == -1) { |
| 23135 | bpf_log(log, "Subprog %s doesn't exist\n", tname); |
| 23136 | return -EINVAL; |
| 23137 | } |
| 23138 | if (aux->func && aux->func[subprog]->aux->exception_cb) { |
| 23139 | bpf_log(log, |
| 23140 | "%s programs cannot attach to exception callback\n", |
| 23141 | prog_extension ? "Extension" : "FENTRY/FEXIT"); |
| 23142 | return -EINVAL; |
| 23143 | } |
| 23144 | conservative = aux->func_info_aux[subprog].unreliable; |
| 23145 | if (prog_extension) { |
| 23146 | if (conservative) { |
| 23147 | bpf_log(log, |
| 23148 | "Cannot replace static functions\n"); |
| 23149 | return -EINVAL; |
| 23150 | } |
| 23151 | if (!prog->jit_requested) { |
| 23152 | bpf_log(log, |
| 23153 | "Extension programs should be JITed\n"); |
| 23154 | return -EINVAL; |
| 23155 | } |
| 23156 | tgt_changes_pkt_data = aux->func |
| 23157 | ? aux->func[subprog]->aux->changes_pkt_data |
| 23158 | : aux->changes_pkt_data; |
| 23159 | if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) { |
| 23160 | bpf_log(log, |
| 23161 | "Extension program changes packet data, while original does not\n"); |
| 23162 | return -EINVAL; |
| 23163 | } |
| 23164 | |
| 23165 | tgt_might_sleep = aux->func |
| 23166 | ? aux->func[subprog]->aux->might_sleep |
| 23167 | : aux->might_sleep; |
| 23168 | if (prog->aux->might_sleep && !tgt_might_sleep) { |
| 23169 | bpf_log(log, |
| 23170 | "Extension program may sleep, while original does not\n"); |
| 23171 | return -EINVAL; |
| 23172 | } |
| 23173 | } |
| 23174 | if (!tgt_prog->jited) { |
| 23175 | bpf_log(log, "Can attach to only JITed progs\n"); |
| 23176 | return -EINVAL; |
| 23177 | } |
| 23178 | if (prog_tracing) { |
| 23179 | if (aux->attach_tracing_prog) { |
| 23180 | /* |
| 23181 | * Target program is an fentry/fexit which is already attached |
| 23182 | * to another tracing program. More levels of nesting |
| 23183 | * attachment are not allowed. |
| 23184 | */ |
| 23185 | bpf_log(log, "Cannot nest tracing program attach more than once\n"); |
| 23186 | return -EINVAL; |
| 23187 | } |
| 23188 | } else if (tgt_prog->type == prog->type) { |
| 23189 | /* |
| 23190 | * To avoid potential call chain cycles, prevent attaching of a |
| 23191 | * program extension to another extension. It's ok to attach |
| 23192 | * fentry/fexit to extension program. |
| 23193 | */ |
| 23194 | bpf_log(log, "Cannot recursively attach\n"); |
| 23195 | return -EINVAL; |
| 23196 | } |
| 23197 | if (tgt_prog->type == BPF_PROG_TYPE_TRACING && |
| 23198 | prog_extension && |
| 23199 | (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || |
| 23200 | tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { |
| 23201 | /* Program extensions can extend all program types |
| 23202 | * except fentry/fexit. The reason is the following. |
| 23203 | * The fentry/fexit programs are used for performance |
| 23204 | * analysis, stats and can be attached to any program |
| 23205 | * type. When extension program is replacing XDP function |
| 23206 | * it is necessary to allow performance analysis of all |
| 23207 | * functions. Both original XDP program and its program |
| 23208 | * extension. Hence attaching fentry/fexit to |
| 23209 | * BPF_PROG_TYPE_EXT is allowed. If extending of |
| 23210 | * fentry/fexit was allowed it would be possible to create |
| 23211 | * long call chain fentry->extension->fentry->extension |
| 23212 | * beyond reasonable stack size. Hence extending fentry |
| 23213 | * is not allowed. |
| 23214 | */ |
| 23215 | bpf_log(log, "Cannot extend fentry/fexit\n"); |
| 23216 | return -EINVAL; |
| 23217 | } |
| 23218 | } else { |
| 23219 | if (prog_extension) { |
| 23220 | bpf_log(log, "Cannot replace kernel functions\n"); |
| 23221 | return -EINVAL; |
| 23222 | } |
| 23223 | } |
| 23224 | |
| 23225 | switch (prog->expected_attach_type) { |
| 23226 | case BPF_TRACE_RAW_TP: |
| 23227 | if (tgt_prog) { |
| 23228 | bpf_log(log, |
| 23229 | "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); |
| 23230 | return -EINVAL; |
| 23231 | } |
| 23232 | if (!btf_type_is_typedef(t)) { |
| 23233 | bpf_log(log, "attach_btf_id %u is not a typedef\n", |
| 23234 | btf_id); |
| 23235 | return -EINVAL; |
| 23236 | } |
| 23237 | if (strncmp(prefix, tname, sizeof(prefix) - 1)) { |
| 23238 | bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", |
| 23239 | btf_id, tname); |
| 23240 | return -EINVAL; |
| 23241 | } |
| 23242 | tname += sizeof(prefix) - 1; |
| 23243 | |
| 23244 | /* The func_proto of "btf_trace_##tname" is generated from typedef without argument |
| 23245 | * names. Thus using bpf_raw_event_map to get argument names. |
| 23246 | */ |
| 23247 | btp = bpf_get_raw_tracepoint(tname); |
| 23248 | if (!btp) |
| 23249 | return -EINVAL; |
| 23250 | fname = kallsyms_lookup((unsigned long)btp->bpf_func, NULL, NULL, NULL, |
| 23251 | trace_symbol); |
| 23252 | bpf_put_raw_tracepoint(btp); |
| 23253 | |
| 23254 | if (fname) |
| 23255 | ret = btf_find_by_name_kind(btf, fname, BTF_KIND_FUNC); |
| 23256 | |
| 23257 | if (!fname || ret < 0) { |
| 23258 | bpf_log(log, "Cannot find btf of tracepoint template, fall back to %s%s.\n", |
| 23259 | prefix, tname); |
| 23260 | t = btf_type_by_id(btf, t->type); |
| 23261 | if (!btf_type_is_ptr(t)) |
| 23262 | /* should never happen in valid vmlinux build */ |
| 23263 | return -EINVAL; |
| 23264 | } else { |
| 23265 | t = btf_type_by_id(btf, ret); |
| 23266 | if (!btf_type_is_func(t)) |
| 23267 | /* should never happen in valid vmlinux build */ |
| 23268 | return -EINVAL; |
| 23269 | } |
| 23270 | |
| 23271 | t = btf_type_by_id(btf, t->type); |
| 23272 | if (!btf_type_is_func_proto(t)) |
| 23273 | /* should never happen in valid vmlinux build */ |
| 23274 | return -EINVAL; |
| 23275 | |
| 23276 | break; |
| 23277 | case BPF_TRACE_ITER: |
| 23278 | if (!btf_type_is_func(t)) { |
| 23279 | bpf_log(log, "attach_btf_id %u is not a function\n", |
| 23280 | btf_id); |
| 23281 | return -EINVAL; |
| 23282 | } |
| 23283 | t = btf_type_by_id(btf, t->type); |
| 23284 | if (!btf_type_is_func_proto(t)) |
| 23285 | return -EINVAL; |
| 23286 | ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); |
| 23287 | if (ret) |
| 23288 | return ret; |
| 23289 | break; |
| 23290 | default: |
| 23291 | if (!prog_extension) |
| 23292 | return -EINVAL; |
| 23293 | fallthrough; |
| 23294 | case BPF_MODIFY_RETURN: |
| 23295 | case BPF_LSM_MAC: |
| 23296 | case BPF_LSM_CGROUP: |
| 23297 | case BPF_TRACE_FENTRY: |
| 23298 | case BPF_TRACE_FEXIT: |
| 23299 | if (!btf_type_is_func(t)) { |
| 23300 | bpf_log(log, "attach_btf_id %u is not a function\n", |
| 23301 | btf_id); |
| 23302 | return -EINVAL; |
| 23303 | } |
| 23304 | if (prog_extension && |
| 23305 | btf_check_type_match(log, prog, btf, t)) |
| 23306 | return -EINVAL; |
| 23307 | t = btf_type_by_id(btf, t->type); |
| 23308 | if (!btf_type_is_func_proto(t)) |
| 23309 | return -EINVAL; |
| 23310 | |
| 23311 | if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && |
| 23312 | (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || |
| 23313 | prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) |
| 23314 | return -EINVAL; |
| 23315 | |
| 23316 | if (tgt_prog && conservative) |
| 23317 | t = NULL; |
| 23318 | |
| 23319 | ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); |
| 23320 | if (ret < 0) |
| 23321 | return ret; |
| 23322 | |
| 23323 | if (tgt_prog) { |
| 23324 | if (subprog == 0) |
| 23325 | addr = (long) tgt_prog->bpf_func; |
| 23326 | else |
| 23327 | addr = (long) tgt_prog->aux->func[subprog]->bpf_func; |
| 23328 | } else { |
| 23329 | if (btf_is_module(btf)) { |
| 23330 | mod = btf_try_get_module(btf); |
| 23331 | if (mod) |
| 23332 | addr = find_kallsyms_symbol_value(mod, tname); |
| 23333 | else |
| 23334 | addr = 0; |
| 23335 | } else { |
| 23336 | addr = kallsyms_lookup_name(tname); |
| 23337 | } |
| 23338 | if (!addr) { |
| 23339 | module_put(mod); |
| 23340 | bpf_log(log, |
| 23341 | "The address of function %s cannot be found\n", |
| 23342 | tname); |
| 23343 | return -ENOENT; |
| 23344 | } |
| 23345 | } |
| 23346 | |
| 23347 | if (prog->sleepable) { |
| 23348 | ret = -EINVAL; |
| 23349 | switch (prog->type) { |
| 23350 | case BPF_PROG_TYPE_TRACING: |
| 23351 | |
| 23352 | /* fentry/fexit/fmod_ret progs can be sleepable if they are |
| 23353 | * attached to ALLOW_ERROR_INJECTION and are not in denylist. |
| 23354 | */ |
| 23355 | if (!check_non_sleepable_error_inject(btf_id) && |
| 23356 | within_error_injection_list(addr)) |
| 23357 | ret = 0; |
| 23358 | /* fentry/fexit/fmod_ret progs can also be sleepable if they are |
| 23359 | * in the fmodret id set with the KF_SLEEPABLE flag. |
| 23360 | */ |
| 23361 | else { |
| 23362 | u32 *flags = btf_kfunc_is_modify_return(btf, btf_id, |
| 23363 | prog); |
| 23364 | |
| 23365 | if (flags && (*flags & KF_SLEEPABLE)) |
| 23366 | ret = 0; |
| 23367 | } |
| 23368 | break; |
| 23369 | case BPF_PROG_TYPE_LSM: |
| 23370 | /* LSM progs check that they are attached to bpf_lsm_*() funcs. |
| 23371 | * Only some of them are sleepable. |
| 23372 | */ |
| 23373 | if (bpf_lsm_is_sleepable_hook(btf_id)) |
| 23374 | ret = 0; |
| 23375 | break; |
| 23376 | default: |
| 23377 | break; |
| 23378 | } |
| 23379 | if (ret) { |
| 23380 | module_put(mod); |
| 23381 | bpf_log(log, "%s is not sleepable\n", tname); |
| 23382 | return ret; |
| 23383 | } |
| 23384 | } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { |
| 23385 | if (tgt_prog) { |
| 23386 | module_put(mod); |
| 23387 | bpf_log(log, "can't modify return codes of BPF programs\n"); |
| 23388 | return -EINVAL; |
| 23389 | } |
| 23390 | ret = -EINVAL; |
| 23391 | if (btf_kfunc_is_modify_return(btf, btf_id, prog) || |
| 23392 | !check_attach_modify_return(addr, tname)) |
| 23393 | ret = 0; |
| 23394 | if (ret) { |
| 23395 | module_put(mod); |
| 23396 | bpf_log(log, "%s() is not modifiable\n", tname); |
| 23397 | return ret; |
| 23398 | } |
| 23399 | } |
| 23400 | |
| 23401 | break; |
| 23402 | } |
| 23403 | tgt_info->tgt_addr = addr; |
| 23404 | tgt_info->tgt_name = tname; |
| 23405 | tgt_info->tgt_type = t; |
| 23406 | tgt_info->tgt_mod = mod; |
| 23407 | return 0; |
| 23408 | } |
| 23409 | |
| 23410 | BTF_SET_START(btf_id_deny) |
| 23411 | BTF_ID_UNUSED |
| 23412 | #ifdef CONFIG_SMP |
| 23413 | BTF_ID(func, migrate_disable) |
| 23414 | BTF_ID(func, migrate_enable) |
| 23415 | #endif |
| 23416 | #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU |
| 23417 | BTF_ID(func, rcu_read_unlock_strict) |
| 23418 | #endif |
| 23419 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) |
| 23420 | BTF_ID(func, preempt_count_add) |
| 23421 | BTF_ID(func, preempt_count_sub) |
| 23422 | #endif |
| 23423 | #ifdef CONFIG_PREEMPT_RCU |
| 23424 | BTF_ID(func, __rcu_read_lock) |
| 23425 | BTF_ID(func, __rcu_read_unlock) |
| 23426 | #endif |
| 23427 | BTF_SET_END(btf_id_deny) |
| 23428 | |
| 23429 | /* fexit and fmod_ret can't be used to attach to __noreturn functions. |
| 23430 | * Currently, we must manually list all __noreturn functions here. Once a more |
| 23431 | * robust solution is implemented, this workaround can be removed. |
| 23432 | */ |
| 23433 | BTF_SET_START(noreturn_deny) |
| 23434 | #ifdef CONFIG_IA32_EMULATION |
| 23435 | BTF_ID(func, __ia32_sys_exit) |
| 23436 | BTF_ID(func, __ia32_sys_exit_group) |
| 23437 | #endif |
| 23438 | #ifdef CONFIG_KUNIT |
| 23439 | BTF_ID(func, __kunit_abort) |
| 23440 | BTF_ID(func, kunit_try_catch_throw) |
| 23441 | #endif |
| 23442 | #ifdef CONFIG_MODULES |
| 23443 | BTF_ID(func, __module_put_and_kthread_exit) |
| 23444 | #endif |
| 23445 | #ifdef CONFIG_X86_64 |
| 23446 | BTF_ID(func, __x64_sys_exit) |
| 23447 | BTF_ID(func, __x64_sys_exit_group) |
| 23448 | #endif |
| 23449 | BTF_ID(func, do_exit) |
| 23450 | BTF_ID(func, do_group_exit) |
| 23451 | BTF_ID(func, kthread_complete_and_exit) |
| 23452 | BTF_ID(func, kthread_exit) |
| 23453 | BTF_ID(func, make_task_dead) |
| 23454 | BTF_SET_END(noreturn_deny) |
| 23455 | |
| 23456 | static bool can_be_sleepable(struct bpf_prog *prog) |
| 23457 | { |
| 23458 | if (prog->type == BPF_PROG_TYPE_TRACING) { |
| 23459 | switch (prog->expected_attach_type) { |
| 23460 | case BPF_TRACE_FENTRY: |
| 23461 | case BPF_TRACE_FEXIT: |
| 23462 | case BPF_MODIFY_RETURN: |
| 23463 | case BPF_TRACE_ITER: |
| 23464 | return true; |
| 23465 | default: |
| 23466 | return false; |
| 23467 | } |
| 23468 | } |
| 23469 | return prog->type == BPF_PROG_TYPE_LSM || |
| 23470 | prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || |
| 23471 | prog->type == BPF_PROG_TYPE_STRUCT_OPS; |
| 23472 | } |
| 23473 | |
| 23474 | static int check_attach_btf_id(struct bpf_verifier_env *env) |
| 23475 | { |
| 23476 | struct bpf_prog *prog = env->prog; |
| 23477 | struct bpf_prog *tgt_prog = prog->aux->dst_prog; |
| 23478 | struct bpf_attach_target_info tgt_info = {}; |
| 23479 | u32 btf_id = prog->aux->attach_btf_id; |
| 23480 | struct bpf_trampoline *tr; |
| 23481 | int ret; |
| 23482 | u64 key; |
| 23483 | |
| 23484 | if (prog->type == BPF_PROG_TYPE_SYSCALL) { |
| 23485 | if (prog->sleepable) |
| 23486 | /* attach_btf_id checked to be zero already */ |
| 23487 | return 0; |
| 23488 | verbose(env, "Syscall programs can only be sleepable\n"); |
| 23489 | return -EINVAL; |
| 23490 | } |
| 23491 | |
| 23492 | if (prog->sleepable && !can_be_sleepable(prog)) { |
| 23493 | verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); |
| 23494 | return -EINVAL; |
| 23495 | } |
| 23496 | |
| 23497 | if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) |
| 23498 | return check_struct_ops_btf_id(env); |
| 23499 | |
| 23500 | if (prog->type != BPF_PROG_TYPE_TRACING && |
| 23501 | prog->type != BPF_PROG_TYPE_LSM && |
| 23502 | prog->type != BPF_PROG_TYPE_EXT) |
| 23503 | return 0; |
| 23504 | |
| 23505 | ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); |
| 23506 | if (ret) |
| 23507 | return ret; |
| 23508 | |
| 23509 | if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { |
| 23510 | /* to make freplace equivalent to their targets, they need to |
| 23511 | * inherit env->ops and expected_attach_type for the rest of the |
| 23512 | * verification |
| 23513 | */ |
| 23514 | env->ops = bpf_verifier_ops[tgt_prog->type]; |
| 23515 | prog->expected_attach_type = tgt_prog->expected_attach_type; |
| 23516 | } |
| 23517 | |
| 23518 | /* store info about the attachment target that will be used later */ |
| 23519 | prog->aux->attach_func_proto = tgt_info.tgt_type; |
| 23520 | prog->aux->attach_func_name = tgt_info.tgt_name; |
| 23521 | prog->aux->mod = tgt_info.tgt_mod; |
| 23522 | |
| 23523 | if (tgt_prog) { |
| 23524 | prog->aux->saved_dst_prog_type = tgt_prog->type; |
| 23525 | prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; |
| 23526 | } |
| 23527 | |
| 23528 | if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { |
| 23529 | prog->aux->attach_btf_trace = true; |
| 23530 | return 0; |
| 23531 | } else if (prog->expected_attach_type == BPF_TRACE_ITER) { |
| 23532 | return bpf_iter_prog_supported(prog); |
| 23533 | } |
| 23534 | |
| 23535 | if (prog->type == BPF_PROG_TYPE_LSM) { |
| 23536 | ret = bpf_lsm_verify_prog(&env->log, prog); |
| 23537 | if (ret < 0) |
| 23538 | return ret; |
| 23539 | } else if (prog->type == BPF_PROG_TYPE_TRACING && |
| 23540 | btf_id_set_contains(&btf_id_deny, btf_id)) { |
| 23541 | return -EINVAL; |
| 23542 | } else if ((prog->expected_attach_type == BPF_TRACE_FEXIT || |
| 23543 | prog->expected_attach_type == BPF_MODIFY_RETURN) && |
| 23544 | btf_id_set_contains(&noreturn_deny, btf_id)) { |
| 23545 | verbose(env, "Attaching fexit/fmod_ret to __noreturn functions is rejected.\n"); |
| 23546 | return -EINVAL; |
| 23547 | } |
| 23548 | |
| 23549 | key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); |
| 23550 | tr = bpf_trampoline_get(key, &tgt_info); |
| 23551 | if (!tr) |
| 23552 | return -ENOMEM; |
| 23553 | |
| 23554 | if (tgt_prog && tgt_prog->aux->tail_call_reachable) |
| 23555 | tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; |
| 23556 | |
| 23557 | prog->aux->dst_trampoline = tr; |
| 23558 | return 0; |
| 23559 | } |
| 23560 | |
| 23561 | struct btf *bpf_get_btf_vmlinux(void) |
| 23562 | { |
| 23563 | if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { |
| 23564 | mutex_lock(&bpf_verifier_lock); |
| 23565 | if (!btf_vmlinux) |
| 23566 | btf_vmlinux = btf_parse_vmlinux(); |
| 23567 | mutex_unlock(&bpf_verifier_lock); |
| 23568 | } |
| 23569 | return btf_vmlinux; |
| 23570 | } |
| 23571 | |
| 23572 | /* |
| 23573 | * The add_fd_from_fd_array() is executed only if fd_array_cnt is non-zero. In |
| 23574 | * this case expect that every file descriptor in the array is either a map or |
| 23575 | * a BTF. Everything else is considered to be trash. |
| 23576 | */ |
| 23577 | static int add_fd_from_fd_array(struct bpf_verifier_env *env, int fd) |
| 23578 | { |
| 23579 | struct bpf_map *map; |
| 23580 | struct btf *btf; |
| 23581 | CLASS(fd, f)(fd); |
| 23582 | int err; |
| 23583 | |
| 23584 | map = __bpf_map_get(f); |
| 23585 | if (!IS_ERR(map)) { |
| 23586 | err = __add_used_map(env, map); |
| 23587 | if (err < 0) |
| 23588 | return err; |
| 23589 | return 0; |
| 23590 | } |
| 23591 | |
| 23592 | btf = __btf_get_by_fd(f); |
| 23593 | if (!IS_ERR(btf)) { |
| 23594 | err = __add_used_btf(env, btf); |
| 23595 | if (err < 0) |
| 23596 | return err; |
| 23597 | return 0; |
| 23598 | } |
| 23599 | |
| 23600 | verbose(env, "fd %d is not pointing to valid bpf_map or btf\n", fd); |
| 23601 | return PTR_ERR(map); |
| 23602 | } |
| 23603 | |
| 23604 | static int process_fd_array(struct bpf_verifier_env *env, union bpf_attr *attr, bpfptr_t uattr) |
| 23605 | { |
| 23606 | size_t size = sizeof(int); |
| 23607 | int ret; |
| 23608 | int fd; |
| 23609 | u32 i; |
| 23610 | |
| 23611 | env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); |
| 23612 | |
| 23613 | /* |
| 23614 | * The only difference between old (no fd_array_cnt is given) and new |
| 23615 | * APIs is that in the latter case the fd_array is expected to be |
| 23616 | * continuous and is scanned for map fds right away |
| 23617 | */ |
| 23618 | if (!attr->fd_array_cnt) |
| 23619 | return 0; |
| 23620 | |
| 23621 | /* Check for integer overflow */ |
| 23622 | if (attr->fd_array_cnt >= (U32_MAX / size)) { |
| 23623 | verbose(env, "fd_array_cnt is too big (%u)\n", attr->fd_array_cnt); |
| 23624 | return -EINVAL; |
| 23625 | } |
| 23626 | |
| 23627 | for (i = 0; i < attr->fd_array_cnt; i++) { |
| 23628 | if (copy_from_bpfptr_offset(&fd, env->fd_array, i * size, size)) |
| 23629 | return -EFAULT; |
| 23630 | |
| 23631 | ret = add_fd_from_fd_array(env, fd); |
| 23632 | if (ret) |
| 23633 | return ret; |
| 23634 | } |
| 23635 | |
| 23636 | return 0; |
| 23637 | } |
| 23638 | |
| 23639 | static bool can_fallthrough(struct bpf_insn *insn) |
| 23640 | { |
| 23641 | u8 class = BPF_CLASS(insn->code); |
| 23642 | u8 opcode = BPF_OP(insn->code); |
| 23643 | |
| 23644 | if (class != BPF_JMP && class != BPF_JMP32) |
| 23645 | return true; |
| 23646 | |
| 23647 | if (opcode == BPF_EXIT || opcode == BPF_JA) |
| 23648 | return false; |
| 23649 | |
| 23650 | return true; |
| 23651 | } |
| 23652 | |
| 23653 | static bool can_jump(struct bpf_insn *insn) |
| 23654 | { |
| 23655 | u8 class = BPF_CLASS(insn->code); |
| 23656 | u8 opcode = BPF_OP(insn->code); |
| 23657 | |
| 23658 | if (class != BPF_JMP && class != BPF_JMP32) |
| 23659 | return false; |
| 23660 | |
| 23661 | switch (opcode) { |
| 23662 | case BPF_JA: |
| 23663 | case BPF_JEQ: |
| 23664 | case BPF_JNE: |
| 23665 | case BPF_JLT: |
| 23666 | case BPF_JLE: |
| 23667 | case BPF_JGT: |
| 23668 | case BPF_JGE: |
| 23669 | case BPF_JSGT: |
| 23670 | case BPF_JSGE: |
| 23671 | case BPF_JSLT: |
| 23672 | case BPF_JSLE: |
| 23673 | case BPF_JCOND: |
| 23674 | return true; |
| 23675 | } |
| 23676 | |
| 23677 | return false; |
| 23678 | } |
| 23679 | |
| 23680 | static int insn_successors(struct bpf_prog *prog, u32 idx, u32 succ[2]) |
| 23681 | { |
| 23682 | struct bpf_insn *insn = &prog->insnsi[idx]; |
| 23683 | int i = 0, insn_sz; |
| 23684 | u32 dst; |
| 23685 | |
| 23686 | insn_sz = bpf_is_ldimm64(insn) ? 2 : 1; |
| 23687 | if (can_fallthrough(insn) && idx + 1 < prog->len) |
| 23688 | succ[i++] = idx + insn_sz; |
| 23689 | |
| 23690 | if (can_jump(insn)) { |
| 23691 | dst = idx + jmp_offset(insn) + 1; |
| 23692 | if (i == 0 || succ[0] != dst) |
| 23693 | succ[i++] = dst; |
| 23694 | } |
| 23695 | |
| 23696 | return i; |
| 23697 | } |
| 23698 | |
| 23699 | /* Each field is a register bitmask */ |
| 23700 | struct insn_live_regs { |
| 23701 | u16 use; /* registers read by instruction */ |
| 23702 | u16 def; /* registers written by instruction */ |
| 23703 | u16 in; /* registers that may be alive before instruction */ |
| 23704 | u16 out; /* registers that may be alive after instruction */ |
| 23705 | }; |
| 23706 | |
| 23707 | /* Bitmask with 1s for all caller saved registers */ |
| 23708 | #define ALL_CALLER_SAVED_REGS ((1u << CALLER_SAVED_REGS) - 1) |
| 23709 | |
| 23710 | /* Compute info->{use,def} fields for the instruction */ |
| 23711 | static void compute_insn_live_regs(struct bpf_verifier_env *env, |
| 23712 | struct bpf_insn *insn, |
| 23713 | struct insn_live_regs *info) |
| 23714 | { |
| 23715 | struct call_summary cs; |
| 23716 | u8 class = BPF_CLASS(insn->code); |
| 23717 | u8 code = BPF_OP(insn->code); |
| 23718 | u8 mode = BPF_MODE(insn->code); |
| 23719 | u16 src = BIT(insn->src_reg); |
| 23720 | u16 dst = BIT(insn->dst_reg); |
| 23721 | u16 r0 = BIT(0); |
| 23722 | u16 def = 0; |
| 23723 | u16 use = 0xffff; |
| 23724 | |
| 23725 | switch (class) { |
| 23726 | case BPF_LD: |
| 23727 | switch (mode) { |
| 23728 | case BPF_IMM: |
| 23729 | if (BPF_SIZE(insn->code) == BPF_DW) { |
| 23730 | def = dst; |
| 23731 | use = 0; |
| 23732 | } |
| 23733 | break; |
| 23734 | case BPF_LD | BPF_ABS: |
| 23735 | case BPF_LD | BPF_IND: |
| 23736 | /* stick with defaults */ |
| 23737 | break; |
| 23738 | } |
| 23739 | break; |
| 23740 | case BPF_LDX: |
| 23741 | switch (mode) { |
| 23742 | case BPF_MEM: |
| 23743 | case BPF_MEMSX: |
| 23744 | def = dst; |
| 23745 | use = src; |
| 23746 | break; |
| 23747 | } |
| 23748 | break; |
| 23749 | case BPF_ST: |
| 23750 | switch (mode) { |
| 23751 | case BPF_MEM: |
| 23752 | def = 0; |
| 23753 | use = dst; |
| 23754 | break; |
| 23755 | } |
| 23756 | break; |
| 23757 | case BPF_STX: |
| 23758 | switch (mode) { |
| 23759 | case BPF_MEM: |
| 23760 | def = 0; |
| 23761 | use = dst | src; |
| 23762 | break; |
| 23763 | case BPF_ATOMIC: |
| 23764 | switch (insn->imm) { |
| 23765 | case BPF_CMPXCHG: |
| 23766 | use = r0 | dst | src; |
| 23767 | def = r0; |
| 23768 | break; |
| 23769 | case BPF_LOAD_ACQ: |
| 23770 | def = dst; |
| 23771 | use = src; |
| 23772 | break; |
| 23773 | case BPF_STORE_REL: |
| 23774 | def = 0; |
| 23775 | use = dst | src; |
| 23776 | break; |
| 23777 | default: |
| 23778 | use = dst | src; |
| 23779 | if (insn->imm & BPF_FETCH) |
| 23780 | def = src; |
| 23781 | else |
| 23782 | def = 0; |
| 23783 | } |
| 23784 | break; |
| 23785 | } |
| 23786 | break; |
| 23787 | case BPF_ALU: |
| 23788 | case BPF_ALU64: |
| 23789 | switch (code) { |
| 23790 | case BPF_END: |
| 23791 | use = dst; |
| 23792 | def = dst; |
| 23793 | break; |
| 23794 | case BPF_MOV: |
| 23795 | def = dst; |
| 23796 | if (BPF_SRC(insn->code) == BPF_K) |
| 23797 | use = 0; |
| 23798 | else |
| 23799 | use = src; |
| 23800 | break; |
| 23801 | default: |
| 23802 | def = dst; |
| 23803 | if (BPF_SRC(insn->code) == BPF_K) |
| 23804 | use = dst; |
| 23805 | else |
| 23806 | use = dst | src; |
| 23807 | } |
| 23808 | break; |
| 23809 | case BPF_JMP: |
| 23810 | case BPF_JMP32: |
| 23811 | switch (code) { |
| 23812 | case BPF_JA: |
| 23813 | case BPF_JCOND: |
| 23814 | def = 0; |
| 23815 | use = 0; |
| 23816 | break; |
| 23817 | case BPF_EXIT: |
| 23818 | def = 0; |
| 23819 | use = r0; |
| 23820 | break; |
| 23821 | case BPF_CALL: |
| 23822 | def = ALL_CALLER_SAVED_REGS; |
| 23823 | use = def & ~BIT(BPF_REG_0); |
| 23824 | if (get_call_summary(env, insn, &cs)) |
| 23825 | use = GENMASK(cs.num_params, 1); |
| 23826 | break; |
| 23827 | default: |
| 23828 | def = 0; |
| 23829 | if (BPF_SRC(insn->code) == BPF_K) |
| 23830 | use = dst; |
| 23831 | else |
| 23832 | use = dst | src; |
| 23833 | } |
| 23834 | break; |
| 23835 | } |
| 23836 | |
| 23837 | info->def = def; |
| 23838 | info->use = use; |
| 23839 | } |
| 23840 | |
| 23841 | /* Compute may-live registers after each instruction in the program. |
| 23842 | * The register is live after the instruction I if it is read by some |
| 23843 | * instruction S following I during program execution and is not |
| 23844 | * overwritten between I and S. |
| 23845 | * |
| 23846 | * Store result in env->insn_aux_data[i].live_regs. |
| 23847 | */ |
| 23848 | static int compute_live_registers(struct bpf_verifier_env *env) |
| 23849 | { |
| 23850 | struct bpf_insn_aux_data *insn_aux = env->insn_aux_data; |
| 23851 | struct bpf_insn *insns = env->prog->insnsi; |
| 23852 | struct insn_live_regs *state; |
| 23853 | int insn_cnt = env->prog->len; |
| 23854 | int err = 0, i, j; |
| 23855 | bool changed; |
| 23856 | |
| 23857 | /* Use the following algorithm: |
| 23858 | * - define the following: |
| 23859 | * - I.use : a set of all registers read by instruction I; |
| 23860 | * - I.def : a set of all registers written by instruction I; |
| 23861 | * - I.in : a set of all registers that may be alive before I execution; |
| 23862 | * - I.out : a set of all registers that may be alive after I execution; |
| 23863 | * - insn_successors(I): a set of instructions S that might immediately |
| 23864 | * follow I for some program execution; |
| 23865 | * - associate separate empty sets 'I.in' and 'I.out' with each instruction; |
| 23866 | * - visit each instruction in a postorder and update |
| 23867 | * state[i].in, state[i].out as follows: |
| 23868 | * |
| 23869 | * state[i].out = U [state[s].in for S in insn_successors(i)] |
| 23870 | * state[i].in = (state[i].out / state[i].def) U state[i].use |
| 23871 | * |
| 23872 | * (where U stands for set union, / stands for set difference) |
| 23873 | * - repeat the computation while {in,out} fields changes for |
| 23874 | * any instruction. |
| 23875 | */ |
| 23876 | state = kvcalloc(insn_cnt, sizeof(*state), GFP_KERNEL); |
| 23877 | if (!state) { |
| 23878 | err = -ENOMEM; |
| 23879 | goto out; |
| 23880 | } |
| 23881 | |
| 23882 | for (i = 0; i < insn_cnt; ++i) |
| 23883 | compute_insn_live_regs(env, &insns[i], &state[i]); |
| 23884 | |
| 23885 | changed = true; |
| 23886 | while (changed) { |
| 23887 | changed = false; |
| 23888 | for (i = 0; i < env->cfg.cur_postorder; ++i) { |
| 23889 | int insn_idx = env->cfg.insn_postorder[i]; |
| 23890 | struct insn_live_regs *live = &state[insn_idx]; |
| 23891 | int succ_num; |
| 23892 | u32 succ[2]; |
| 23893 | u16 new_out = 0; |
| 23894 | u16 new_in = 0; |
| 23895 | |
| 23896 | succ_num = insn_successors(env->prog, insn_idx, succ); |
| 23897 | for (int s = 0; s < succ_num; ++s) |
| 23898 | new_out |= state[succ[s]].in; |
| 23899 | new_in = (new_out & ~live->def) | live->use; |
| 23900 | if (new_out != live->out || new_in != live->in) { |
| 23901 | live->in = new_in; |
| 23902 | live->out = new_out; |
| 23903 | changed = true; |
| 23904 | } |
| 23905 | } |
| 23906 | } |
| 23907 | |
| 23908 | for (i = 0; i < insn_cnt; ++i) |
| 23909 | insn_aux[i].live_regs_before = state[i].in; |
| 23910 | |
| 23911 | if (env->log.level & BPF_LOG_LEVEL2) { |
| 23912 | verbose(env, "Live regs before insn:\n"); |
| 23913 | for (i = 0; i < insn_cnt; ++i) { |
| 23914 | verbose(env, "%3d: ", i); |
| 23915 | for (j = BPF_REG_0; j < BPF_REG_10; ++j) |
| 23916 | if (insn_aux[i].live_regs_before & BIT(j)) |
| 23917 | verbose(env, "%d", j); |
| 23918 | else |
| 23919 | verbose(env, "."); |
| 23920 | verbose(env, " "); |
| 23921 | verbose_insn(env, &insns[i]); |
| 23922 | if (bpf_is_ldimm64(&insns[i])) |
| 23923 | i++; |
| 23924 | } |
| 23925 | } |
| 23926 | |
| 23927 | out: |
| 23928 | kvfree(state); |
| 23929 | kvfree(env->cfg.insn_postorder); |
| 23930 | env->cfg.insn_postorder = NULL; |
| 23931 | env->cfg.cur_postorder = 0; |
| 23932 | return err; |
| 23933 | } |
| 23934 | |
| 23935 | int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) |
| 23936 | { |
| 23937 | u64 start_time = ktime_get_ns(); |
| 23938 | struct bpf_verifier_env *env; |
| 23939 | int i, len, ret = -EINVAL, err; |
| 23940 | u32 log_true_size; |
| 23941 | bool is_priv; |
| 23942 | |
| 23943 | /* no program is valid */ |
| 23944 | if (ARRAY_SIZE(bpf_verifier_ops) == 0) |
| 23945 | return -EINVAL; |
| 23946 | |
| 23947 | /* 'struct bpf_verifier_env' can be global, but since it's not small, |
| 23948 | * allocate/free it every time bpf_check() is called |
| 23949 | */ |
| 23950 | env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); |
| 23951 | if (!env) |
| 23952 | return -ENOMEM; |
| 23953 | |
| 23954 | env->bt.env = env; |
| 23955 | |
| 23956 | len = (*prog)->len; |
| 23957 | env->insn_aux_data = |
| 23958 | vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); |
| 23959 | ret = -ENOMEM; |
| 23960 | if (!env->insn_aux_data) |
| 23961 | goto err_free_env; |
| 23962 | for (i = 0; i < len; i++) |
| 23963 | env->insn_aux_data[i].orig_idx = i; |
| 23964 | env->prog = *prog; |
| 23965 | env->ops = bpf_verifier_ops[env->prog->type]; |
| 23966 | |
| 23967 | env->allow_ptr_leaks = bpf_allow_ptr_leaks(env->prog->aux->token); |
| 23968 | env->allow_uninit_stack = bpf_allow_uninit_stack(env->prog->aux->token); |
| 23969 | env->bypass_spec_v1 = bpf_bypass_spec_v1(env->prog->aux->token); |
| 23970 | env->bypass_spec_v4 = bpf_bypass_spec_v4(env->prog->aux->token); |
| 23971 | env->bpf_capable = is_priv = bpf_token_capable(env->prog->aux->token, CAP_BPF); |
| 23972 | |
| 23973 | bpf_get_btf_vmlinux(); |
| 23974 | |
| 23975 | /* grab the mutex to protect few globals used by verifier */ |
| 23976 | if (!is_priv) |
| 23977 | mutex_lock(&bpf_verifier_lock); |
| 23978 | |
| 23979 | /* user could have requested verbose verifier output |
| 23980 | * and supplied buffer to store the verification trace |
| 23981 | */ |
| 23982 | ret = bpf_vlog_init(&env->log, attr->log_level, |
| 23983 | (char __user *) (unsigned long) attr->log_buf, |
| 23984 | attr->log_size); |
| 23985 | if (ret) |
| 23986 | goto err_unlock; |
| 23987 | |
| 23988 | ret = process_fd_array(env, attr, uattr); |
| 23989 | if (ret) |
| 23990 | goto skip_full_check; |
| 23991 | |
| 23992 | mark_verifier_state_clean(env); |
| 23993 | |
| 23994 | if (IS_ERR(btf_vmlinux)) { |
| 23995 | /* Either gcc or pahole or kernel are broken. */ |
| 23996 | verbose(env, "in-kernel BTF is malformed\n"); |
| 23997 | ret = PTR_ERR(btf_vmlinux); |
| 23998 | goto skip_full_check; |
| 23999 | } |
| 24000 | |
| 24001 | env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); |
| 24002 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) |
| 24003 | env->strict_alignment = true; |
| 24004 | if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) |
| 24005 | env->strict_alignment = false; |
| 24006 | |
| 24007 | if (is_priv) |
| 24008 | env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; |
| 24009 | env->test_reg_invariants = attr->prog_flags & BPF_F_TEST_REG_INVARIANTS; |
| 24010 | |
| 24011 | env->explored_states = kvcalloc(state_htab_size(env), |
| 24012 | sizeof(struct list_head), |
| 24013 | GFP_USER); |
| 24014 | ret = -ENOMEM; |
| 24015 | if (!env->explored_states) |
| 24016 | goto skip_full_check; |
| 24017 | |
| 24018 | for (i = 0; i < state_htab_size(env); i++) |
| 24019 | INIT_LIST_HEAD(&env->explored_states[i]); |
| 24020 | INIT_LIST_HEAD(&env->free_list); |
| 24021 | |
| 24022 | ret = check_btf_info_early(env, attr, uattr); |
| 24023 | if (ret < 0) |
| 24024 | goto skip_full_check; |
| 24025 | |
| 24026 | ret = add_subprog_and_kfunc(env); |
| 24027 | if (ret < 0) |
| 24028 | goto skip_full_check; |
| 24029 | |
| 24030 | ret = check_subprogs(env); |
| 24031 | if (ret < 0) |
| 24032 | goto skip_full_check; |
| 24033 | |
| 24034 | ret = check_btf_info(env, attr, uattr); |
| 24035 | if (ret < 0) |
| 24036 | goto skip_full_check; |
| 24037 | |
| 24038 | ret = resolve_pseudo_ldimm64(env); |
| 24039 | if (ret < 0) |
| 24040 | goto skip_full_check; |
| 24041 | |
| 24042 | if (bpf_prog_is_offloaded(env->prog->aux)) { |
| 24043 | ret = bpf_prog_offload_verifier_prep(env->prog); |
| 24044 | if (ret) |
| 24045 | goto skip_full_check; |
| 24046 | } |
| 24047 | |
| 24048 | ret = check_cfg(env); |
| 24049 | if (ret < 0) |
| 24050 | goto skip_full_check; |
| 24051 | |
| 24052 | ret = check_attach_btf_id(env); |
| 24053 | if (ret) |
| 24054 | goto skip_full_check; |
| 24055 | |
| 24056 | ret = compute_live_registers(env); |
| 24057 | if (ret < 0) |
| 24058 | goto skip_full_check; |
| 24059 | |
| 24060 | ret = mark_fastcall_patterns(env); |
| 24061 | if (ret < 0) |
| 24062 | goto skip_full_check; |
| 24063 | |
| 24064 | ret = do_check_main(env); |
| 24065 | ret = ret ?: do_check_subprogs(env); |
| 24066 | |
| 24067 | if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) |
| 24068 | ret = bpf_prog_offload_finalize(env); |
| 24069 | |
| 24070 | skip_full_check: |
| 24071 | kvfree(env->explored_states); |
| 24072 | |
| 24073 | /* might decrease stack depth, keep it before passes that |
| 24074 | * allocate additional slots. |
| 24075 | */ |
| 24076 | if (ret == 0) |
| 24077 | ret = remove_fastcall_spills_fills(env); |
| 24078 | |
| 24079 | if (ret == 0) |
| 24080 | ret = check_max_stack_depth(env); |
| 24081 | |
| 24082 | /* instruction rewrites happen after this point */ |
| 24083 | if (ret == 0) |
| 24084 | ret = optimize_bpf_loop(env); |
| 24085 | |
| 24086 | if (is_priv) { |
| 24087 | if (ret == 0) |
| 24088 | opt_hard_wire_dead_code_branches(env); |
| 24089 | if (ret == 0) |
| 24090 | ret = opt_remove_dead_code(env); |
| 24091 | if (ret == 0) |
| 24092 | ret = opt_remove_nops(env); |
| 24093 | } else { |
| 24094 | if (ret == 0) |
| 24095 | sanitize_dead_code(env); |
| 24096 | } |
| 24097 | |
| 24098 | if (ret == 0) |
| 24099 | /* program is valid, convert *(u32*)(ctx + off) accesses */ |
| 24100 | ret = convert_ctx_accesses(env); |
| 24101 | |
| 24102 | if (ret == 0) |
| 24103 | ret = do_misc_fixups(env); |
| 24104 | |
| 24105 | /* do 32-bit optimization after insn patching has done so those patched |
| 24106 | * insns could be handled correctly. |
| 24107 | */ |
| 24108 | if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { |
| 24109 | ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); |
| 24110 | env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret |
| 24111 | : false; |
| 24112 | } |
| 24113 | |
| 24114 | if (ret == 0) |
| 24115 | ret = fixup_call_args(env); |
| 24116 | |
| 24117 | env->verification_time = ktime_get_ns() - start_time; |
| 24118 | print_verification_stats(env); |
| 24119 | env->prog->aux->verified_insns = env->insn_processed; |
| 24120 | |
| 24121 | /* preserve original error even if log finalization is successful */ |
| 24122 | err = bpf_vlog_finalize(&env->log, &log_true_size); |
| 24123 | if (err) |
| 24124 | ret = err; |
| 24125 | |
| 24126 | if (uattr_size >= offsetofend(union bpf_attr, log_true_size) && |
| 24127 | copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size), |
| 24128 | &log_true_size, sizeof(log_true_size))) { |
| 24129 | ret = -EFAULT; |
| 24130 | goto err_release_maps; |
| 24131 | } |
| 24132 | |
| 24133 | if (ret) |
| 24134 | goto err_release_maps; |
| 24135 | |
| 24136 | if (env->used_map_cnt) { |
| 24137 | /* if program passed verifier, update used_maps in bpf_prog_info */ |
| 24138 | env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, |
| 24139 | sizeof(env->used_maps[0]), |
| 24140 | GFP_KERNEL); |
| 24141 | |
| 24142 | if (!env->prog->aux->used_maps) { |
| 24143 | ret = -ENOMEM; |
| 24144 | goto err_release_maps; |
| 24145 | } |
| 24146 | |
| 24147 | memcpy(env->prog->aux->used_maps, env->used_maps, |
| 24148 | sizeof(env->used_maps[0]) * env->used_map_cnt); |
| 24149 | env->prog->aux->used_map_cnt = env->used_map_cnt; |
| 24150 | } |
| 24151 | if (env->used_btf_cnt) { |
| 24152 | /* if program passed verifier, update used_btfs in bpf_prog_aux */ |
| 24153 | env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, |
| 24154 | sizeof(env->used_btfs[0]), |
| 24155 | GFP_KERNEL); |
| 24156 | if (!env->prog->aux->used_btfs) { |
| 24157 | ret = -ENOMEM; |
| 24158 | goto err_release_maps; |
| 24159 | } |
| 24160 | |
| 24161 | memcpy(env->prog->aux->used_btfs, env->used_btfs, |
| 24162 | sizeof(env->used_btfs[0]) * env->used_btf_cnt); |
| 24163 | env->prog->aux->used_btf_cnt = env->used_btf_cnt; |
| 24164 | } |
| 24165 | if (env->used_map_cnt || env->used_btf_cnt) { |
| 24166 | /* program is valid. Convert pseudo bpf_ld_imm64 into generic |
| 24167 | * bpf_ld_imm64 instructions |
| 24168 | */ |
| 24169 | convert_pseudo_ld_imm64(env); |
| 24170 | } |
| 24171 | |
| 24172 | adjust_btf_func(env); |
| 24173 | |
| 24174 | err_release_maps: |
| 24175 | if (!env->prog->aux->used_maps) |
| 24176 | /* if we didn't copy map pointers into bpf_prog_info, release |
| 24177 | * them now. Otherwise free_used_maps() will release them. |
| 24178 | */ |
| 24179 | release_maps(env); |
| 24180 | if (!env->prog->aux->used_btfs) |
| 24181 | release_btfs(env); |
| 24182 | |
| 24183 | /* extension progs temporarily inherit the attach_type of their targets |
| 24184 | for verification purposes, so set it back to zero before returning |
| 24185 | */ |
| 24186 | if (env->prog->type == BPF_PROG_TYPE_EXT) |
| 24187 | env->prog->expected_attach_type = 0; |
| 24188 | |
| 24189 | *prog = env->prog; |
| 24190 | |
| 24191 | module_put(env->attach_btf_mod); |
| 24192 | err_unlock: |
| 24193 | if (!is_priv) |
| 24194 | mutex_unlock(&bpf_verifier_lock); |
| 24195 | vfree(env->insn_aux_data); |
| 24196 | kvfree(env->insn_hist); |
| 24197 | err_free_env: |
| 24198 | kvfree(env->cfg.insn_postorder); |
| 24199 | kvfree(env); |
| 24200 | return ret; |
| 24201 | } |