1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 * Copyright (c) 2016 Facebook 4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 5 */ 6 #include <uapi/linux/btf.h> 7 #include <linux/bpf-cgroup.h> 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/bpf.h> 12 #include <linux/btf.h> 13 #include <linux/bpf_verifier.h> 14 #include <linux/filter.h> 15 #include <net/netlink.h> 16 #include <linux/file.h> 17 #include <linux/vmalloc.h> 18 #include <linux/stringify.h> 19 #include <linux/bsearch.h> 20 #include <linux/sort.h> 21 #include <linux/perf_event.h> 22 #include <linux/ctype.h> 23 #include <linux/error-injection.h> 24 #include <linux/bpf_lsm.h> 25 #include <linux/btf_ids.h> 26 #include <linux/poison.h> 27 #include <linux/module.h> 28 #include <linux/cpumask.h> 29 #include <net/xdp.h> 30 31 #include "disasm.h" 32 33 static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { 34 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ 35 [_id] = & _name ## _verifier_ops, 36 #define BPF_MAP_TYPE(_id, _ops) 37 #define BPF_LINK_TYPE(_id, _name) 38 #include <linux/bpf_types.h> 39 #undef BPF_PROG_TYPE 40 #undef BPF_MAP_TYPE 41 #undef BPF_LINK_TYPE 42 }; 43 44 /* bpf_check() is a static code analyzer that walks eBPF program 45 * instruction by instruction and updates register/stack state. 46 * All paths of conditional branches are analyzed until 'bpf_exit' insn. 47 * 48 * The first pass is depth-first-search to check that the program is a DAG. 49 * It rejects the following programs: 50 * - larger than BPF_MAXINSNS insns 51 * - if loop is present (detected via back-edge) 52 * - unreachable insns exist (shouldn't be a forest. program = one function) 53 * - out of bounds or malformed jumps 54 * The second pass is all possible path descent from the 1st insn. 55 * Since it's analyzing all paths through the program, the length of the 56 * analysis is limited to 64k insn, which may be hit even if total number of 57 * insn is less then 4K, but there are too many branches that change stack/regs. 58 * Number of 'branches to be analyzed' is limited to 1k 59 * 60 * On entry to each instruction, each register has a type, and the instruction 61 * changes the types of the registers depending on instruction semantics. 62 * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is 63 * copied to R1. 64 * 65 * All registers are 64-bit. 66 * R0 - return register 67 * R1-R5 argument passing registers 68 * R6-R9 callee saved registers 69 * R10 - frame pointer read-only 70 * 71 * At the start of BPF program the register R1 contains a pointer to bpf_context 72 * and has type PTR_TO_CTX. 73 * 74 * Verifier tracks arithmetic operations on pointers in case: 75 * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 76 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), 77 * 1st insn copies R10 (which has FRAME_PTR) type into R1 78 * and 2nd arithmetic instruction is pattern matched to recognize 79 * that it wants to construct a pointer to some element within stack. 80 * So after 2nd insn, the register R1 has type PTR_TO_STACK 81 * (and -20 constant is saved for further stack bounds checking). 82 * Meaning that this reg is a pointer to stack plus known immediate constant. 83 * 84 * Most of the time the registers have SCALAR_VALUE type, which 85 * means the register has some value, but it's not a valid pointer. 86 * (like pointer plus pointer becomes SCALAR_VALUE type) 87 * 88 * When verifier sees load or store instructions the type of base register 89 * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are 90 * four pointer types recognized by check_mem_access() function. 91 * 92 * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' 93 * and the range of [ptr, ptr + map's value_size) is accessible. 94 * 95 * registers used to pass values to function calls are checked against 96 * function argument constraints. 97 * 98 * ARG_PTR_TO_MAP_KEY is one of such argument constraints. 99 * It means that the register type passed to this function must be 100 * PTR_TO_STACK and it will be used inside the function as 101 * 'pointer to map element key' 102 * 103 * For example the argument constraints for bpf_map_lookup_elem(): 104 * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 105 * .arg1_type = ARG_CONST_MAP_PTR, 106 * .arg2_type = ARG_PTR_TO_MAP_KEY, 107 * 108 * ret_type says that this function returns 'pointer to map elem value or null' 109 * function expects 1st argument to be a const pointer to 'struct bpf_map' and 110 * 2nd argument should be a pointer to stack, which will be used inside 111 * the helper function as a pointer to map element key. 112 * 113 * On the kernel side the helper function looks like: 114 * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 115 * { 116 * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; 117 * void *key = (void *) (unsigned long) r2; 118 * void *value; 119 * 120 * here kernel can access 'key' and 'map' pointers safely, knowing that 121 * [key, key + map->key_size) bytes are valid and were initialized on 122 * the stack of eBPF program. 123 * } 124 * 125 * Corresponding eBPF program may look like: 126 * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR 127 * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK 128 * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP 129 * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 130 * here verifier looks at prototype of map_lookup_elem() and sees: 131 * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, 132 * Now verifier knows that this map has key of R1->map_ptr->key_size bytes 133 * 134 * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, 135 * Now verifier checks that [R2, R2 + map's key_size) are within stack limits 136 * and were initialized prior to this call. 137 * If it's ok, then verifier allows this BPF_CALL insn and looks at 138 * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets 139 * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function 140 * returns either pointer to map value or NULL. 141 * 142 * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' 143 * insn, the register holding that pointer in the true branch changes state to 144 * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false 145 * branch. See check_cond_jmp_op(). 146 * 147 * After the call R0 is set to return type of the function and registers R1-R5 148 * are set to NOT_INIT to indicate that they are no longer readable. 149 * 150 * The following reference types represent a potential reference to a kernel 151 * resource which, after first being allocated, must be checked and freed by 152 * the BPF program: 153 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET 154 * 155 * When the verifier sees a helper call return a reference type, it allocates a 156 * pointer id for the reference and stores it in the current function state. 157 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into 158 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type 159 * passes through a NULL-check conditional. For the branch wherein the state is 160 * changed to CONST_IMM, the verifier releases the reference. 161 * 162 * For each helper function that allocates a reference, such as 163 * bpf_sk_lookup_tcp(), there is a corresponding release function, such as 164 * bpf_sk_release(). When a reference type passes into the release function, 165 * the verifier also releases the reference. If any unchecked or unreleased 166 * reference remains at the end of the program, the verifier rejects it. 167 */ 168 169 /* verifier_state + insn_idx are pushed to stack when branch is encountered */ 170 struct bpf_verifier_stack_elem { 171 /* verifer state is 'st' 172 * before processing instruction 'insn_idx' 173 * and after processing instruction 'prev_insn_idx' 174 */ 175 struct bpf_verifier_state st; 176 int insn_idx; 177 int prev_insn_idx; 178 struct bpf_verifier_stack_elem *next; 179 /* length of verifier log at the time this state was pushed on stack */ 180 u32 log_pos; 181 }; 182 183 #define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 184 #define BPF_COMPLEXITY_LIMIT_STATES 64 185 186 #define BPF_MAP_KEY_POISON (1ULL << 63) 187 #define BPF_MAP_KEY_SEEN (1ULL << 62) 188 189 #define BPF_MAP_PTR_UNPRIV 1UL 190 #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ 191 POISON_POINTER_DELTA)) 192 #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) 193 194 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); 195 static int release_reference(struct bpf_verifier_env *env, int ref_obj_id); 196 static void invalidate_non_owning_refs(struct bpf_verifier_env *env); 197 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env); 198 static int ref_set_non_owning(struct bpf_verifier_env *env, 199 struct bpf_reg_state *reg); 200 static void specialize_kfunc(struct bpf_verifier_env *env, 201 u32 func_id, u16 offset, unsigned long *addr); 202 static bool is_trusted_reg(const struct bpf_reg_state *reg); 203 204 static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) 205 { 206 return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; 207 } 208 209 static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) 210 { 211 return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; 212 } 213 214 static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, 215 const struct bpf_map *map, bool unpriv) 216 { 217 BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); 218 unpriv |= bpf_map_ptr_unpriv(aux); 219 aux->map_ptr_state = (unsigned long)map | 220 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); 221 } 222 223 static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) 224 { 225 return aux->map_key_state & BPF_MAP_KEY_POISON; 226 } 227 228 static bool bpf_map_key_unseen(const struct bpf_insn_aux_data *aux) 229 { 230 return !(aux->map_key_state & BPF_MAP_KEY_SEEN); 231 } 232 233 static u64 bpf_map_key_immediate(const struct bpf_insn_aux_data *aux) 234 { 235 return aux->map_key_state & ~(BPF_MAP_KEY_SEEN | BPF_MAP_KEY_POISON); 236 } 237 238 static void bpf_map_key_store(struct bpf_insn_aux_data *aux, u64 state) 239 { 240 bool poisoned = bpf_map_key_poisoned(aux); 241 242 aux->map_key_state = state | BPF_MAP_KEY_SEEN | 243 (poisoned ? BPF_MAP_KEY_POISON : 0ULL); 244 } 245 246 static bool bpf_helper_call(const struct bpf_insn *insn) 247 { 248 return insn->code == (BPF_JMP | BPF_CALL) && 249 insn->src_reg == 0; 250 } 251 252 static bool bpf_pseudo_call(const struct bpf_insn *insn) 253 { 254 return insn->code == (BPF_JMP | BPF_CALL) && 255 insn->src_reg == BPF_PSEUDO_CALL; 256 } 257 258 static bool bpf_pseudo_kfunc_call(const struct bpf_insn *insn) 259 { 260 return insn->code == (BPF_JMP | BPF_CALL) && 261 insn->src_reg == BPF_PSEUDO_KFUNC_CALL; 262 } 263 264 struct bpf_call_arg_meta { 265 struct bpf_map *map_ptr; 266 bool raw_mode; 267 bool pkt_access; 268 u8 release_regno; 269 int regno; 270 int access_size; 271 int mem_size; 272 u64 msize_max_value; 273 int ref_obj_id; 274 int dynptr_id; 275 int map_uid; 276 int func_id; 277 struct btf *btf; 278 u32 btf_id; 279 struct btf *ret_btf; 280 u32 ret_btf_id; 281 u32 subprogno; 282 struct btf_field *kptr_field; 283 }; 284 285 struct bpf_kfunc_call_arg_meta { 286 /* In parameters */ 287 struct btf *btf; 288 u32 func_id; 289 u32 kfunc_flags; 290 const struct btf_type *func_proto; 291 const char *func_name; 292 /* Out parameters */ 293 u32 ref_obj_id; 294 u8 release_regno; 295 bool r0_rdonly; 296 u32 ret_btf_id; 297 u64 r0_size; 298 u32 subprogno; 299 struct { 300 u64 value; 301 bool found; 302 } arg_constant; 303 304 /* arg_{btf,btf_id,owning_ref} are used by kfunc-specific handling, 305 * generally to pass info about user-defined local kptr types to later 306 * verification logic 307 * bpf_obj_drop/bpf_percpu_obj_drop 308 * Record the local kptr type to be drop'd 309 * bpf_refcount_acquire (via KF_ARG_PTR_TO_REFCOUNTED_KPTR arg type) 310 * Record the local kptr type to be refcount_incr'd and use 311 * arg_owning_ref to determine whether refcount_acquire should be 312 * fallible 313 */ 314 struct btf *arg_btf; 315 u32 arg_btf_id; 316 bool arg_owning_ref; 317 318 struct { 319 struct btf_field *field; 320 } arg_list_head; 321 struct { 322 struct btf_field *field; 323 } arg_rbtree_root; 324 struct { 325 enum bpf_dynptr_type type; 326 u32 id; 327 u32 ref_obj_id; 328 } initialized_dynptr; 329 struct { 330 u8 spi; 331 u8 frameno; 332 } iter; 333 u64 mem_size; 334 }; 335 336 struct btf *btf_vmlinux; 337 338 static DEFINE_MUTEX(bpf_verifier_lock); 339 340 static const struct bpf_line_info * 341 find_linfo(const struct bpf_verifier_env *env, u32 insn_off) 342 { 343 const struct bpf_line_info *linfo; 344 const struct bpf_prog *prog; 345 u32 i, nr_linfo; 346 347 prog = env->prog; 348 nr_linfo = prog->aux->nr_linfo; 349 350 if (!nr_linfo || insn_off >= prog->len) 351 return NULL; 352 353 linfo = prog->aux->linfo; 354 for (i = 1; i < nr_linfo; i++) 355 if (insn_off < linfo[i].insn_off) 356 break; 357 358 return &linfo[i - 1]; 359 } 360 361 __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) 362 { 363 struct bpf_verifier_env *env = private_data; 364 va_list args; 365 366 if (!bpf_verifier_log_needed(&env->log)) 367 return; 368 369 va_start(args, fmt); 370 bpf_verifier_vlog(&env->log, fmt, args); 371 va_end(args); 372 } 373 374 static const char *ltrim(const char *s) 375 { 376 while (isspace(*s)) 377 s++; 378 379 return s; 380 } 381 382 __printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, 383 u32 insn_off, 384 const char *prefix_fmt, ...) 385 { 386 const struct bpf_line_info *linfo; 387 388 if (!bpf_verifier_log_needed(&env->log)) 389 return; 390 391 linfo = find_linfo(env, insn_off); 392 if (!linfo || linfo == env->prev_linfo) 393 return; 394 395 if (prefix_fmt) { 396 va_list args; 397 398 va_start(args, prefix_fmt); 399 bpf_verifier_vlog(&env->log, prefix_fmt, args); 400 va_end(args); 401 } 402 403 verbose(env, "%s\n", 404 ltrim(btf_name_by_offset(env->prog->aux->btf, 405 linfo->line_off))); 406 407 env->prev_linfo = linfo; 408 } 409 410 static void verbose_invalid_scalar(struct bpf_verifier_env *env, 411 struct bpf_reg_state *reg, 412 struct tnum *range, const char *ctx, 413 const char *reg_name) 414 { 415 char tn_buf[48]; 416 417 verbose(env, "At %s the register %s ", ctx, reg_name); 418 if (!tnum_is_unknown(reg->var_off)) { 419 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 420 verbose(env, "has value %s", tn_buf); 421 } else { 422 verbose(env, "has unknown scalar value"); 423 } 424 tnum_strn(tn_buf, sizeof(tn_buf), *range); 425 verbose(env, " should have been in %s\n", tn_buf); 426 } 427 428 static bool type_is_pkt_pointer(enum bpf_reg_type type) 429 { 430 type = base_type(type); 431 return type == PTR_TO_PACKET || 432 type == PTR_TO_PACKET_META; 433 } 434 435 static bool type_is_sk_pointer(enum bpf_reg_type type) 436 { 437 return type == PTR_TO_SOCKET || 438 type == PTR_TO_SOCK_COMMON || 439 type == PTR_TO_TCP_SOCK || 440 type == PTR_TO_XDP_SOCK; 441 } 442 443 static bool type_may_be_null(u32 type) 444 { 445 return type & PTR_MAYBE_NULL; 446 } 447 448 static bool reg_not_null(const struct bpf_reg_state *reg) 449 { 450 enum bpf_reg_type type; 451 452 type = reg->type; 453 if (type_may_be_null(type)) 454 return false; 455 456 type = base_type(type); 457 return type == PTR_TO_SOCKET || 458 type == PTR_TO_TCP_SOCK || 459 type == PTR_TO_MAP_VALUE || 460 type == PTR_TO_MAP_KEY || 461 type == PTR_TO_SOCK_COMMON || 462 (type == PTR_TO_BTF_ID && is_trusted_reg(reg)) || 463 type == PTR_TO_MEM; 464 } 465 466 static bool type_is_ptr_alloc_obj(u32 type) 467 { 468 return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC; 469 } 470 471 static bool type_is_non_owning_ref(u32 type) 472 { 473 return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF; 474 } 475 476 static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg) 477 { 478 struct btf_record *rec = NULL; 479 struct btf_struct_meta *meta; 480 481 if (reg->type == PTR_TO_MAP_VALUE) { 482 rec = reg->map_ptr->record; 483 } else if (type_is_ptr_alloc_obj(reg->type)) { 484 meta = btf_find_struct_meta(reg->btf, reg->btf_id); 485 if (meta) 486 rec = meta->record; 487 } 488 return rec; 489 } 490 491 static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog) 492 { 493 struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux; 494 495 return aux && aux[subprog].linkage == BTF_FUNC_GLOBAL; 496 } 497 498 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) 499 { 500 return btf_record_has_field(reg_btf_record(reg), BPF_SPIN_LOCK); 501 } 502 503 static bool type_is_rdonly_mem(u32 type) 504 { 505 return type & MEM_RDONLY; 506 } 507 508 static bool is_acquire_function(enum bpf_func_id func_id, 509 const struct bpf_map *map) 510 { 511 enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC; 512 513 if (func_id == BPF_FUNC_sk_lookup_tcp || 514 func_id == BPF_FUNC_sk_lookup_udp || 515 func_id == BPF_FUNC_skc_lookup_tcp || 516 func_id == BPF_FUNC_ringbuf_reserve || 517 func_id == BPF_FUNC_kptr_xchg) 518 return true; 519 520 if (func_id == BPF_FUNC_map_lookup_elem && 521 (map_type == BPF_MAP_TYPE_SOCKMAP || 522 map_type == BPF_MAP_TYPE_SOCKHASH)) 523 return true; 524 525 return false; 526 } 527 528 static bool is_ptr_cast_function(enum bpf_func_id func_id) 529 { 530 return func_id == BPF_FUNC_tcp_sock || 531 func_id == BPF_FUNC_sk_fullsock || 532 func_id == BPF_FUNC_skc_to_tcp_sock || 533 func_id == BPF_FUNC_skc_to_tcp6_sock || 534 func_id == BPF_FUNC_skc_to_udp6_sock || 535 func_id == BPF_FUNC_skc_to_mptcp_sock || 536 func_id == BPF_FUNC_skc_to_tcp_timewait_sock || 537 func_id == BPF_FUNC_skc_to_tcp_request_sock; 538 } 539 540 static bool is_dynptr_ref_function(enum bpf_func_id func_id) 541 { 542 return func_id == BPF_FUNC_dynptr_data; 543 } 544 545 static bool is_callback_calling_kfunc(u32 btf_id); 546 static bool is_bpf_throw_kfunc(struct bpf_insn *insn); 547 548 static bool is_callback_calling_function(enum bpf_func_id func_id) 549 { 550 return func_id == BPF_FUNC_for_each_map_elem || 551 func_id == BPF_FUNC_timer_set_callback || 552 func_id == BPF_FUNC_find_vma || 553 func_id == BPF_FUNC_loop || 554 func_id == BPF_FUNC_user_ringbuf_drain; 555 } 556 557 static bool is_async_callback_calling_function(enum bpf_func_id func_id) 558 { 559 return func_id == BPF_FUNC_timer_set_callback; 560 } 561 562 static bool is_storage_get_function(enum bpf_func_id func_id) 563 { 564 return func_id == BPF_FUNC_sk_storage_get || 565 func_id == BPF_FUNC_inode_storage_get || 566 func_id == BPF_FUNC_task_storage_get || 567 func_id == BPF_FUNC_cgrp_storage_get; 568 } 569 570 static bool helper_multiple_ref_obj_use(enum bpf_func_id func_id, 571 const struct bpf_map *map) 572 { 573 int ref_obj_uses = 0; 574 575 if (is_ptr_cast_function(func_id)) 576 ref_obj_uses++; 577 if (is_acquire_function(func_id, map)) 578 ref_obj_uses++; 579 if (is_dynptr_ref_function(func_id)) 580 ref_obj_uses++; 581 582 return ref_obj_uses > 1; 583 } 584 585 static bool is_cmpxchg_insn(const struct bpf_insn *insn) 586 { 587 return BPF_CLASS(insn->code) == BPF_STX && 588 BPF_MODE(insn->code) == BPF_ATOMIC && 589 insn->imm == BPF_CMPXCHG; 590 } 591 592 /* string representation of 'enum bpf_reg_type' 593 * 594 * Note that reg_type_str() can not appear more than once in a single verbose() 595 * statement. 596 */ 597 static const char *reg_type_str(struct bpf_verifier_env *env, 598 enum bpf_reg_type type) 599 { 600 char postfix[16] = {0}, prefix[64] = {0}; 601 static const char * const str[] = { 602 [NOT_INIT] = "?", 603 [SCALAR_VALUE] = "scalar", 604 [PTR_TO_CTX] = "ctx", 605 [CONST_PTR_TO_MAP] = "map_ptr", 606 [PTR_TO_MAP_VALUE] = "map_value", 607 [PTR_TO_STACK] = "fp", 608 [PTR_TO_PACKET] = "pkt", 609 [PTR_TO_PACKET_META] = "pkt_meta", 610 [PTR_TO_PACKET_END] = "pkt_end", 611 [PTR_TO_FLOW_KEYS] = "flow_keys", 612 [PTR_TO_SOCKET] = "sock", 613 [PTR_TO_SOCK_COMMON] = "sock_common", 614 [PTR_TO_TCP_SOCK] = "tcp_sock", 615 [PTR_TO_TP_BUFFER] = "tp_buffer", 616 [PTR_TO_XDP_SOCK] = "xdp_sock", 617 [PTR_TO_BTF_ID] = "ptr_", 618 [PTR_TO_MEM] = "mem", 619 [PTR_TO_BUF] = "buf", 620 [PTR_TO_FUNC] = "func", 621 [PTR_TO_MAP_KEY] = "map_key", 622 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr", 623 }; 624 625 if (type & PTR_MAYBE_NULL) { 626 if (base_type(type) == PTR_TO_BTF_ID) 627 strncpy(postfix, "or_null_", 16); 628 else 629 strncpy(postfix, "_or_null", 16); 630 } 631 632 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s", 633 type & MEM_RDONLY ? "rdonly_" : "", 634 type & MEM_RINGBUF ? "ringbuf_" : "", 635 type & MEM_USER ? "user_" : "", 636 type & MEM_PERCPU ? "percpu_" : "", 637 type & MEM_RCU ? "rcu_" : "", 638 type & PTR_UNTRUSTED ? "untrusted_" : "", 639 type & PTR_TRUSTED ? "trusted_" : "" 640 ); 641 642 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s", 643 prefix, str[base_type(type)], postfix); 644 return env->tmp_str_buf; 645 } 646 647 static char slot_type_char[] = { 648 [STACK_INVALID] = '?', 649 [STACK_SPILL] = 'r', 650 [STACK_MISC] = 'm', 651 [STACK_ZERO] = '0', 652 [STACK_DYNPTR] = 'd', 653 [STACK_ITER] = 'i', 654 }; 655 656 static void print_liveness(struct bpf_verifier_env *env, 657 enum bpf_reg_liveness live) 658 { 659 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) 660 verbose(env, "_"); 661 if (live & REG_LIVE_READ) 662 verbose(env, "r"); 663 if (live & REG_LIVE_WRITTEN) 664 verbose(env, "w"); 665 if (live & REG_LIVE_DONE) 666 verbose(env, "D"); 667 } 668 669 static int __get_spi(s32 off) 670 { 671 return (-off - 1) / BPF_REG_SIZE; 672 } 673 674 static struct bpf_func_state *func(struct bpf_verifier_env *env, 675 const struct bpf_reg_state *reg) 676 { 677 struct bpf_verifier_state *cur = env->cur_state; 678 679 return cur->frame[reg->frameno]; 680 } 681 682 static bool is_spi_bounds_valid(struct bpf_func_state *state, int spi, int nr_slots) 683 { 684 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; 685 686 /* We need to check that slots between [spi - nr_slots + 1, spi] are 687 * within [0, allocated_stack). 688 * 689 * Please note that the spi grows downwards. For example, a dynptr 690 * takes the size of two stack slots; the first slot will be at 691 * spi and the second slot will be at spi - 1. 692 */ 693 return spi - nr_slots + 1 >= 0 && spi < allocated_slots; 694 } 695 696 static int stack_slot_obj_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 697 const char *obj_kind, int nr_slots) 698 { 699 int off, spi; 700 701 if (!tnum_is_const(reg->var_off)) { 702 verbose(env, "%s has to be at a constant offset\n", obj_kind); 703 return -EINVAL; 704 } 705 706 off = reg->off + reg->var_off.value; 707 if (off % BPF_REG_SIZE) { 708 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 709 return -EINVAL; 710 } 711 712 spi = __get_spi(off); 713 if (spi + 1 < nr_slots) { 714 verbose(env, "cannot pass in %s at an offset=%d\n", obj_kind, off); 715 return -EINVAL; 716 } 717 718 if (!is_spi_bounds_valid(func(env, reg), spi, nr_slots)) 719 return -ERANGE; 720 return spi; 721 } 722 723 static int dynptr_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 724 { 725 return stack_slot_obj_get_spi(env, reg, "dynptr", BPF_DYNPTR_NR_SLOTS); 726 } 727 728 static int iter_get_spi(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int nr_slots) 729 { 730 return stack_slot_obj_get_spi(env, reg, "iter", nr_slots); 731 } 732 733 static const char *btf_type_name(const struct btf *btf, u32 id) 734 { 735 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off); 736 } 737 738 static const char *dynptr_type_str(enum bpf_dynptr_type type) 739 { 740 switch (type) { 741 case BPF_DYNPTR_TYPE_LOCAL: 742 return "local"; 743 case BPF_DYNPTR_TYPE_RINGBUF: 744 return "ringbuf"; 745 case BPF_DYNPTR_TYPE_SKB: 746 return "skb"; 747 case BPF_DYNPTR_TYPE_XDP: 748 return "xdp"; 749 case BPF_DYNPTR_TYPE_INVALID: 750 return "<invalid>"; 751 default: 752 WARN_ONCE(1, "unknown dynptr type %d\n", type); 753 return "<unknown>"; 754 } 755 } 756 757 static const char *iter_type_str(const struct btf *btf, u32 btf_id) 758 { 759 if (!btf || btf_id == 0) 760 return "<invalid>"; 761 762 /* we already validated that type is valid and has conforming name */ 763 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1; 764 } 765 766 static const char *iter_state_str(enum bpf_iter_state state) 767 { 768 switch (state) { 769 case BPF_ITER_STATE_ACTIVE: 770 return "active"; 771 case BPF_ITER_STATE_DRAINED: 772 return "drained"; 773 case BPF_ITER_STATE_INVALID: 774 return "<invalid>"; 775 default: 776 WARN_ONCE(1, "unknown iter state %d\n", state); 777 return "<unknown>"; 778 } 779 } 780 781 static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) 782 { 783 env->scratched_regs |= 1U << regno; 784 } 785 786 static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi) 787 { 788 env->scratched_stack_slots |= 1ULL << spi; 789 } 790 791 static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno) 792 { 793 return (env->scratched_regs >> regno) & 1; 794 } 795 796 static bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno) 797 { 798 return (env->scratched_stack_slots >> regno) & 1; 799 } 800 801 static bool verifier_state_scratched(const struct bpf_verifier_env *env) 802 { 803 return env->scratched_regs || env->scratched_stack_slots; 804 } 805 806 static void mark_verifier_state_clean(struct bpf_verifier_env *env) 807 { 808 env->scratched_regs = 0U; 809 env->scratched_stack_slots = 0ULL; 810 } 811 812 /* Used for printing the entire verifier state. */ 813 static void mark_verifier_state_scratched(struct bpf_verifier_env *env) 814 { 815 env->scratched_regs = ~0U; 816 env->scratched_stack_slots = ~0ULL; 817 } 818 819 static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type) 820 { 821 switch (arg_type & DYNPTR_TYPE_FLAG_MASK) { 822 case DYNPTR_TYPE_LOCAL: 823 return BPF_DYNPTR_TYPE_LOCAL; 824 case DYNPTR_TYPE_RINGBUF: 825 return BPF_DYNPTR_TYPE_RINGBUF; 826 case DYNPTR_TYPE_SKB: 827 return BPF_DYNPTR_TYPE_SKB; 828 case DYNPTR_TYPE_XDP: 829 return BPF_DYNPTR_TYPE_XDP; 830 default: 831 return BPF_DYNPTR_TYPE_INVALID; 832 } 833 } 834 835 static enum bpf_type_flag get_dynptr_type_flag(enum bpf_dynptr_type type) 836 { 837 switch (type) { 838 case BPF_DYNPTR_TYPE_LOCAL: 839 return DYNPTR_TYPE_LOCAL; 840 case BPF_DYNPTR_TYPE_RINGBUF: 841 return DYNPTR_TYPE_RINGBUF; 842 case BPF_DYNPTR_TYPE_SKB: 843 return DYNPTR_TYPE_SKB; 844 case BPF_DYNPTR_TYPE_XDP: 845 return DYNPTR_TYPE_XDP; 846 default: 847 return 0; 848 } 849 } 850 851 static bool dynptr_type_refcounted(enum bpf_dynptr_type type) 852 { 853 return type == BPF_DYNPTR_TYPE_RINGBUF; 854 } 855 856 static void __mark_dynptr_reg(struct bpf_reg_state *reg, 857 enum bpf_dynptr_type type, 858 bool first_slot, int dynptr_id); 859 860 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 861 struct bpf_reg_state *reg); 862 863 static void mark_dynptr_stack_regs(struct bpf_verifier_env *env, 864 struct bpf_reg_state *sreg1, 865 struct bpf_reg_state *sreg2, 866 enum bpf_dynptr_type type) 867 { 868 int id = ++env->id_gen; 869 870 __mark_dynptr_reg(sreg1, type, true, id); 871 __mark_dynptr_reg(sreg2, type, false, id); 872 } 873 874 static void mark_dynptr_cb_reg(struct bpf_verifier_env *env, 875 struct bpf_reg_state *reg, 876 enum bpf_dynptr_type type) 877 { 878 __mark_dynptr_reg(reg, type, true, ++env->id_gen); 879 } 880 881 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 882 struct bpf_func_state *state, int spi); 883 884 static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 885 enum bpf_arg_type arg_type, int insn_idx, int clone_ref_obj_id) 886 { 887 struct bpf_func_state *state = func(env, reg); 888 enum bpf_dynptr_type type; 889 int spi, i, err; 890 891 spi = dynptr_get_spi(env, reg); 892 if (spi < 0) 893 return spi; 894 895 /* We cannot assume both spi and spi - 1 belong to the same dynptr, 896 * hence we need to call destroy_if_dynptr_stack_slot twice for both, 897 * to ensure that for the following example: 898 * [d1][d1][d2][d2] 899 * spi 3 2 1 0 900 * So marking spi = 2 should lead to destruction of both d1 and d2. In 901 * case they do belong to same dynptr, second call won't see slot_type 902 * as STACK_DYNPTR and will simply skip destruction. 903 */ 904 err = destroy_if_dynptr_stack_slot(env, state, spi); 905 if (err) 906 return err; 907 err = destroy_if_dynptr_stack_slot(env, state, spi - 1); 908 if (err) 909 return err; 910 911 for (i = 0; i < BPF_REG_SIZE; i++) { 912 state->stack[spi].slot_type[i] = STACK_DYNPTR; 913 state->stack[spi - 1].slot_type[i] = STACK_DYNPTR; 914 } 915 916 type = arg_to_dynptr_type(arg_type); 917 if (type == BPF_DYNPTR_TYPE_INVALID) 918 return -EINVAL; 919 920 mark_dynptr_stack_regs(env, &state->stack[spi].spilled_ptr, 921 &state->stack[spi - 1].spilled_ptr, type); 922 923 if (dynptr_type_refcounted(type)) { 924 /* The id is used to track proper releasing */ 925 int id; 926 927 if (clone_ref_obj_id) 928 id = clone_ref_obj_id; 929 else 930 id = acquire_reference_state(env, insn_idx); 931 932 if (id < 0) 933 return id; 934 935 state->stack[spi].spilled_ptr.ref_obj_id = id; 936 state->stack[spi - 1].spilled_ptr.ref_obj_id = id; 937 } 938 939 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 940 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 941 942 return 0; 943 } 944 945 static void invalidate_dynptr(struct bpf_verifier_env *env, struct bpf_func_state *state, int spi) 946 { 947 int i; 948 949 for (i = 0; i < BPF_REG_SIZE; i++) { 950 state->stack[spi].slot_type[i] = STACK_INVALID; 951 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 952 } 953 954 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 955 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 956 957 /* Why do we need to set REG_LIVE_WRITTEN for STACK_INVALID slot? 958 * 959 * While we don't allow reading STACK_INVALID, it is still possible to 960 * do <8 byte writes marking some but not all slots as STACK_MISC. Then, 961 * helpers or insns can do partial read of that part without failing, 962 * but check_stack_range_initialized, check_stack_read_var_off, and 963 * check_stack_read_fixed_off will do mark_reg_read for all 8-bytes of 964 * the slot conservatively. Hence we need to prevent those liveness 965 * marking walks. 966 * 967 * This was not a problem before because STACK_INVALID is only set by 968 * default (where the default reg state has its reg->parent as NULL), or 969 * in clean_live_states after REG_LIVE_DONE (at which point 970 * mark_reg_read won't walk reg->parent chain), but not randomly during 971 * verifier state exploration (like we did above). Hence, for our case 972 * parentage chain will still be live (i.e. reg->parent may be 973 * non-NULL), while earlier reg->parent was NULL, so we need 974 * REG_LIVE_WRITTEN to screen off read marker propagation when it is 975 * done later on reads or by mark_dynptr_read as well to unnecessary 976 * mark registers in verifier state. 977 */ 978 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 979 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 980 } 981 982 static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 983 { 984 struct bpf_func_state *state = func(env, reg); 985 int spi, ref_obj_id, i; 986 987 spi = dynptr_get_spi(env, reg); 988 if (spi < 0) 989 return spi; 990 991 if (!dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 992 invalidate_dynptr(env, state, spi); 993 return 0; 994 } 995 996 ref_obj_id = state->stack[spi].spilled_ptr.ref_obj_id; 997 998 /* If the dynptr has a ref_obj_id, then we need to invalidate 999 * two things: 1000 * 1001 * 1) Any dynptrs with a matching ref_obj_id (clones) 1002 * 2) Any slices derived from this dynptr. 1003 */ 1004 1005 /* Invalidate any slices associated with this dynptr */ 1006 WARN_ON_ONCE(release_reference(env, ref_obj_id)); 1007 1008 /* Invalidate any dynptr clones */ 1009 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { 1010 if (state->stack[i].spilled_ptr.ref_obj_id != ref_obj_id) 1011 continue; 1012 1013 /* it should always be the case that if the ref obj id 1014 * matches then the stack slot also belongs to a 1015 * dynptr 1016 */ 1017 if (state->stack[i].slot_type[0] != STACK_DYNPTR) { 1018 verbose(env, "verifier internal error: misconfigured ref_obj_id\n"); 1019 return -EFAULT; 1020 } 1021 if (state->stack[i].spilled_ptr.dynptr.first_slot) 1022 invalidate_dynptr(env, state, i); 1023 } 1024 1025 return 0; 1026 } 1027 1028 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1029 struct bpf_reg_state *reg); 1030 1031 static void mark_reg_invalid(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1032 { 1033 if (!env->allow_ptr_leaks) 1034 __mark_reg_not_init(env, reg); 1035 else 1036 __mark_reg_unknown(env, reg); 1037 } 1038 1039 static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env, 1040 struct bpf_func_state *state, int spi) 1041 { 1042 struct bpf_func_state *fstate; 1043 struct bpf_reg_state *dreg; 1044 int i, dynptr_id; 1045 1046 /* We always ensure that STACK_DYNPTR is never set partially, 1047 * hence just checking for slot_type[0] is enough. This is 1048 * different for STACK_SPILL, where it may be only set for 1049 * 1 byte, so code has to use is_spilled_reg. 1050 */ 1051 if (state->stack[spi].slot_type[0] != STACK_DYNPTR) 1052 return 0; 1053 1054 /* Reposition spi to first slot */ 1055 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 1056 spi = spi + 1; 1057 1058 if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) { 1059 verbose(env, "cannot overwrite referenced dynptr\n"); 1060 return -EINVAL; 1061 } 1062 1063 mark_stack_slot_scratched(env, spi); 1064 mark_stack_slot_scratched(env, spi - 1); 1065 1066 /* Writing partially to one dynptr stack slot destroys both. */ 1067 for (i = 0; i < BPF_REG_SIZE; i++) { 1068 state->stack[spi].slot_type[i] = STACK_INVALID; 1069 state->stack[spi - 1].slot_type[i] = STACK_INVALID; 1070 } 1071 1072 dynptr_id = state->stack[spi].spilled_ptr.id; 1073 /* Invalidate any slices associated with this dynptr */ 1074 bpf_for_each_reg_in_vstate(env->cur_state, fstate, dreg, ({ 1075 /* Dynptr slices are only PTR_TO_MEM_OR_NULL and PTR_TO_MEM */ 1076 if (dreg->type != (PTR_TO_MEM | PTR_MAYBE_NULL) && dreg->type != PTR_TO_MEM) 1077 continue; 1078 if (dreg->dynptr_id == dynptr_id) 1079 mark_reg_invalid(env, dreg); 1080 })); 1081 1082 /* Do not release reference state, we are destroying dynptr on stack, 1083 * not using some helper to release it. Just reset register. 1084 */ 1085 __mark_reg_not_init(env, &state->stack[spi].spilled_ptr); 1086 __mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr); 1087 1088 /* Same reason as unmark_stack_slots_dynptr above */ 1089 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 1090 state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN; 1091 1092 return 0; 1093 } 1094 1095 static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1096 { 1097 int spi; 1098 1099 if (reg->type == CONST_PTR_TO_DYNPTR) 1100 return false; 1101 1102 spi = dynptr_get_spi(env, reg); 1103 1104 /* -ERANGE (i.e. spi not falling into allocated stack slots) isn't an 1105 * error because this just means the stack state hasn't been updated yet. 1106 * We will do check_mem_access to check and update stack bounds later. 1107 */ 1108 if (spi < 0 && spi != -ERANGE) 1109 return false; 1110 1111 /* We don't need to check if the stack slots are marked by previous 1112 * dynptr initializations because we allow overwriting existing unreferenced 1113 * STACK_DYNPTR slots, see mark_stack_slots_dynptr which calls 1114 * destroy_if_dynptr_stack_slot to ensure dynptr objects at the slots we are 1115 * touching are completely destructed before we reinitialize them for a new 1116 * one. For referenced ones, destroy_if_dynptr_stack_slot returns an error early 1117 * instead of delaying it until the end where the user will get "Unreleased 1118 * reference" error. 1119 */ 1120 return true; 1121 } 1122 1123 static bool is_dynptr_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 1124 { 1125 struct bpf_func_state *state = func(env, reg); 1126 int i, spi; 1127 1128 /* This already represents first slot of initialized bpf_dynptr. 1129 * 1130 * CONST_PTR_TO_DYNPTR already has fixed and var_off as 0 due to 1131 * check_func_arg_reg_off's logic, so we don't need to check its 1132 * offset and alignment. 1133 */ 1134 if (reg->type == CONST_PTR_TO_DYNPTR) 1135 return true; 1136 1137 spi = dynptr_get_spi(env, reg); 1138 if (spi < 0) 1139 return false; 1140 if (!state->stack[spi].spilled_ptr.dynptr.first_slot) 1141 return false; 1142 1143 for (i = 0; i < BPF_REG_SIZE; i++) { 1144 if (state->stack[spi].slot_type[i] != STACK_DYNPTR || 1145 state->stack[spi - 1].slot_type[i] != STACK_DYNPTR) 1146 return false; 1147 } 1148 1149 return true; 1150 } 1151 1152 static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 1153 enum bpf_arg_type arg_type) 1154 { 1155 struct bpf_func_state *state = func(env, reg); 1156 enum bpf_dynptr_type dynptr_type; 1157 int spi; 1158 1159 /* ARG_PTR_TO_DYNPTR takes any type of dynptr */ 1160 if (arg_type == ARG_PTR_TO_DYNPTR) 1161 return true; 1162 1163 dynptr_type = arg_to_dynptr_type(arg_type); 1164 if (reg->type == CONST_PTR_TO_DYNPTR) { 1165 return reg->dynptr.type == dynptr_type; 1166 } else { 1167 spi = dynptr_get_spi(env, reg); 1168 if (spi < 0) 1169 return false; 1170 return state->stack[spi].spilled_ptr.dynptr.type == dynptr_type; 1171 } 1172 } 1173 1174 static void __mark_reg_known_zero(struct bpf_reg_state *reg); 1175 1176 static bool in_rcu_cs(struct bpf_verifier_env *env); 1177 1178 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta); 1179 1180 static int mark_stack_slots_iter(struct bpf_verifier_env *env, 1181 struct bpf_kfunc_call_arg_meta *meta, 1182 struct bpf_reg_state *reg, int insn_idx, 1183 struct btf *btf, u32 btf_id, int nr_slots) 1184 { 1185 struct bpf_func_state *state = func(env, reg); 1186 int spi, i, j, id; 1187 1188 spi = iter_get_spi(env, reg, nr_slots); 1189 if (spi < 0) 1190 return spi; 1191 1192 id = acquire_reference_state(env, insn_idx); 1193 if (id < 0) 1194 return id; 1195 1196 for (i = 0; i < nr_slots; i++) { 1197 struct bpf_stack_state *slot = &state->stack[spi - i]; 1198 struct bpf_reg_state *st = &slot->spilled_ptr; 1199 1200 __mark_reg_known_zero(st); 1201 st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ 1202 if (is_kfunc_rcu_protected(meta)) { 1203 if (in_rcu_cs(env)) 1204 st->type |= MEM_RCU; 1205 else 1206 st->type |= PTR_UNTRUSTED; 1207 } 1208 st->live |= REG_LIVE_WRITTEN; 1209 st->ref_obj_id = i == 0 ? id : 0; 1210 st->iter.btf = btf; 1211 st->iter.btf_id = btf_id; 1212 st->iter.state = BPF_ITER_STATE_ACTIVE; 1213 st->iter.depth = 0; 1214 1215 for (j = 0; j < BPF_REG_SIZE; j++) 1216 slot->slot_type[j] = STACK_ITER; 1217 1218 mark_stack_slot_scratched(env, spi - i); 1219 } 1220 1221 return 0; 1222 } 1223 1224 static int unmark_stack_slots_iter(struct bpf_verifier_env *env, 1225 struct bpf_reg_state *reg, int nr_slots) 1226 { 1227 struct bpf_func_state *state = func(env, reg); 1228 int spi, i, j; 1229 1230 spi = iter_get_spi(env, reg, nr_slots); 1231 if (spi < 0) 1232 return spi; 1233 1234 for (i = 0; i < nr_slots; i++) { 1235 struct bpf_stack_state *slot = &state->stack[spi - i]; 1236 struct bpf_reg_state *st = &slot->spilled_ptr; 1237 1238 if (i == 0) 1239 WARN_ON_ONCE(release_reference(env, st->ref_obj_id)); 1240 1241 __mark_reg_not_init(env, st); 1242 1243 /* see unmark_stack_slots_dynptr() for why we need to set REG_LIVE_WRITTEN */ 1244 st->live |= REG_LIVE_WRITTEN; 1245 1246 for (j = 0; j < BPF_REG_SIZE; j++) 1247 slot->slot_type[j] = STACK_INVALID; 1248 1249 mark_stack_slot_scratched(env, spi - i); 1250 } 1251 1252 return 0; 1253 } 1254 1255 static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, 1256 struct bpf_reg_state *reg, int nr_slots) 1257 { 1258 struct bpf_func_state *state = func(env, reg); 1259 int spi, i, j; 1260 1261 /* For -ERANGE (i.e. spi not falling into allocated stack slots), we 1262 * will do check_mem_access to check and update stack bounds later, so 1263 * return true for that case. 1264 */ 1265 spi = iter_get_spi(env, reg, nr_slots); 1266 if (spi == -ERANGE) 1267 return true; 1268 if (spi < 0) 1269 return false; 1270 1271 for (i = 0; i < nr_slots; i++) { 1272 struct bpf_stack_state *slot = &state->stack[spi - i]; 1273 1274 for (j = 0; j < BPF_REG_SIZE; j++) 1275 if (slot->slot_type[j] == STACK_ITER) 1276 return false; 1277 } 1278 1279 return true; 1280 } 1281 1282 static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 1283 struct btf *btf, u32 btf_id, int nr_slots) 1284 { 1285 struct bpf_func_state *state = func(env, reg); 1286 int spi, i, j; 1287 1288 spi = iter_get_spi(env, reg, nr_slots); 1289 if (spi < 0) 1290 return -EINVAL; 1291 1292 for (i = 0; i < nr_slots; i++) { 1293 struct bpf_stack_state *slot = &state->stack[spi - i]; 1294 struct bpf_reg_state *st = &slot->spilled_ptr; 1295 1296 if (st->type & PTR_UNTRUSTED) 1297 return -EPROTO; 1298 /* only main (first) slot has ref_obj_id set */ 1299 if (i == 0 && !st->ref_obj_id) 1300 return -EINVAL; 1301 if (i != 0 && st->ref_obj_id) 1302 return -EINVAL; 1303 if (st->iter.btf != btf || st->iter.btf_id != btf_id) 1304 return -EINVAL; 1305 1306 for (j = 0; j < BPF_REG_SIZE; j++) 1307 if (slot->slot_type[j] != STACK_ITER) 1308 return -EINVAL; 1309 } 1310 1311 return 0; 1312 } 1313 1314 /* Check if given stack slot is "special": 1315 * - spilled register state (STACK_SPILL); 1316 * - dynptr state (STACK_DYNPTR); 1317 * - iter state (STACK_ITER). 1318 */ 1319 static bool is_stack_slot_special(const struct bpf_stack_state *stack) 1320 { 1321 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; 1322 1323 switch (type) { 1324 case STACK_SPILL: 1325 case STACK_DYNPTR: 1326 case STACK_ITER: 1327 return true; 1328 case STACK_INVALID: 1329 case STACK_MISC: 1330 case STACK_ZERO: 1331 return false; 1332 default: 1333 WARN_ONCE(1, "unknown stack slot type %d\n", type); 1334 return true; 1335 } 1336 } 1337 1338 /* The reg state of a pointer or a bounded scalar was saved when 1339 * it was spilled to the stack. 1340 */ 1341 static bool is_spilled_reg(const struct bpf_stack_state *stack) 1342 { 1343 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; 1344 } 1345 1346 static bool is_spilled_scalar_reg(const struct bpf_stack_state *stack) 1347 { 1348 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && 1349 stack->spilled_ptr.type == SCALAR_VALUE; 1350 } 1351 1352 static void scrub_spilled_slot(u8 *stype) 1353 { 1354 if (*stype != STACK_INVALID) 1355 *stype = STACK_MISC; 1356 } 1357 1358 static void print_scalar_ranges(struct bpf_verifier_env *env, 1359 const struct bpf_reg_state *reg, 1360 const char **sep) 1361 { 1362 struct { 1363 const char *name; 1364 u64 val; 1365 bool omit; 1366 } minmaxs[] = { 1367 {"smin", reg->smin_value, reg->smin_value == S64_MIN}, 1368 {"smax", reg->smax_value, reg->smax_value == S64_MAX}, 1369 {"umin", reg->umin_value, reg->umin_value == 0}, 1370 {"umax", reg->umax_value, reg->umax_value == U64_MAX}, 1371 {"smin32", (s64)reg->s32_min_value, reg->s32_min_value == S32_MIN}, 1372 {"smax32", (s64)reg->s32_max_value, reg->s32_max_value == S32_MAX}, 1373 {"umin32", reg->u32_min_value, reg->u32_min_value == 0}, 1374 {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX}, 1375 }, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)]; 1376 bool neg1, neg2; 1377 1378 for (m1 = &minmaxs[0]; m1 < mend; m1++) { 1379 if (m1->omit) 1380 continue; 1381 1382 neg1 = m1->name[0] == 's' && (s64)m1->val < 0; 1383 1384 verbose(env, "%s%s=", *sep, m1->name); 1385 *sep = ","; 1386 1387 for (m2 = m1 + 2; m2 < mend; m2 += 2) { 1388 if (m2->omit || m2->val != m1->val) 1389 continue; 1390 /* don't mix negatives with positives */ 1391 neg2 = m2->name[0] == 's' && (s64)m2->val < 0; 1392 if (neg2 != neg1) 1393 continue; 1394 m2->omit = true; 1395 verbose(env, "%s=", m2->name); 1396 } 1397 1398 verbose(env, m1->name[0] == 's' ? "%lld" : "%llu", m1->val); 1399 } 1400 } 1401 1402 static void print_verifier_state(struct bpf_verifier_env *env, 1403 const struct bpf_func_state *state, 1404 bool print_all) 1405 { 1406 const struct bpf_reg_state *reg; 1407 enum bpf_reg_type t; 1408 int i; 1409 1410 if (state->frameno) 1411 verbose(env, " frame%d:", state->frameno); 1412 for (i = 0; i < MAX_BPF_REG; i++) { 1413 reg = &state->regs[i]; 1414 t = reg->type; 1415 if (t == NOT_INIT) 1416 continue; 1417 if (!print_all && !reg_scratched(env, i)) 1418 continue; 1419 verbose(env, " R%d", i); 1420 print_liveness(env, reg->live); 1421 verbose(env, "="); 1422 if (t == SCALAR_VALUE && reg->precise) 1423 verbose(env, "P"); 1424 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && 1425 tnum_is_const(reg->var_off)) { 1426 /* reg->off should be 0 for SCALAR_VALUE */ 1427 verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 1428 verbose(env, "%lld", reg->var_off.value + reg->off); 1429 } else { 1430 const char *sep = ""; 1431 1432 verbose(env, "%s", reg_type_str(env, t)); 1433 if (base_type(t) == PTR_TO_BTF_ID) 1434 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); 1435 verbose(env, "("); 1436 /* 1437 * _a stands for append, was shortened to avoid multiline statements below. 1438 * This macro is used to output a comma separated list of attributes. 1439 */ 1440 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) 1441 1442 if (reg->id) 1443 verbose_a("id=%d", reg->id); 1444 if (reg->ref_obj_id) 1445 verbose_a("ref_obj_id=%d", reg->ref_obj_id); 1446 if (type_is_non_owning_ref(reg->type)) 1447 verbose_a("%s", "non_own_ref"); 1448 if (t != SCALAR_VALUE) 1449 verbose_a("off=%d", reg->off); 1450 if (type_is_pkt_pointer(t)) 1451 verbose_a("r=%d", reg->range); 1452 else if (base_type(t) == CONST_PTR_TO_MAP || 1453 base_type(t) == PTR_TO_MAP_KEY || 1454 base_type(t) == PTR_TO_MAP_VALUE) 1455 verbose_a("ks=%d,vs=%d", 1456 reg->map_ptr->key_size, 1457 reg->map_ptr->value_size); 1458 if (tnum_is_const(reg->var_off)) { 1459 /* Typically an immediate SCALAR_VALUE, but 1460 * could be a pointer whose offset is too big 1461 * for reg->off 1462 */ 1463 verbose_a("imm=%llx", reg->var_off.value); 1464 } else { 1465 print_scalar_ranges(env, reg, &sep); 1466 if (!tnum_is_unknown(reg->var_off)) { 1467 char tn_buf[48]; 1468 1469 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 1470 verbose_a("var_off=%s", tn_buf); 1471 } 1472 } 1473 #undef verbose_a 1474 1475 verbose(env, ")"); 1476 } 1477 } 1478 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 1479 char types_buf[BPF_REG_SIZE + 1]; 1480 bool valid = false; 1481 int j; 1482 1483 for (j = 0; j < BPF_REG_SIZE; j++) { 1484 if (state->stack[i].slot_type[j] != STACK_INVALID) 1485 valid = true; 1486 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]]; 1487 } 1488 types_buf[BPF_REG_SIZE] = 0; 1489 if (!valid) 1490 continue; 1491 if (!print_all && !stack_slot_scratched(env, i)) 1492 continue; 1493 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) { 1494 case STACK_SPILL: 1495 reg = &state->stack[i].spilled_ptr; 1496 t = reg->type; 1497 1498 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1499 print_liveness(env, reg->live); 1500 verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); 1501 if (t == SCALAR_VALUE && reg->precise) 1502 verbose(env, "P"); 1503 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) 1504 verbose(env, "%lld", reg->var_off.value + reg->off); 1505 break; 1506 case STACK_DYNPTR: 1507 i += BPF_DYNPTR_NR_SLOTS - 1; 1508 reg = &state->stack[i].spilled_ptr; 1509 1510 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1511 print_liveness(env, reg->live); 1512 verbose(env, "=dynptr_%s", dynptr_type_str(reg->dynptr.type)); 1513 if (reg->ref_obj_id) 1514 verbose(env, "(ref_id=%d)", reg->ref_obj_id); 1515 break; 1516 case STACK_ITER: 1517 /* only main slot has ref_obj_id set; skip others */ 1518 reg = &state->stack[i].spilled_ptr; 1519 if (!reg->ref_obj_id) 1520 continue; 1521 1522 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1523 print_liveness(env, reg->live); 1524 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)", 1525 iter_type_str(reg->iter.btf, reg->iter.btf_id), 1526 reg->ref_obj_id, iter_state_str(reg->iter.state), 1527 reg->iter.depth); 1528 break; 1529 case STACK_MISC: 1530 case STACK_ZERO: 1531 default: 1532 reg = &state->stack[i].spilled_ptr; 1533 1534 for (j = 0; j < BPF_REG_SIZE; j++) 1535 types_buf[j] = slot_type_char[state->stack[i].slot_type[j]]; 1536 types_buf[BPF_REG_SIZE] = 0; 1537 1538 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); 1539 print_liveness(env, reg->live); 1540 verbose(env, "=%s", types_buf); 1541 break; 1542 } 1543 } 1544 if (state->acquired_refs && state->refs[0].id) { 1545 verbose(env, " refs=%d", state->refs[0].id); 1546 for (i = 1; i < state->acquired_refs; i++) 1547 if (state->refs[i].id) 1548 verbose(env, ",%d", state->refs[i].id); 1549 } 1550 if (state->in_callback_fn) 1551 verbose(env, " cb"); 1552 if (state->in_async_callback_fn) 1553 verbose(env, " async_cb"); 1554 verbose(env, "\n"); 1555 if (!print_all) 1556 mark_verifier_state_clean(env); 1557 } 1558 1559 static inline u32 vlog_alignment(u32 pos) 1560 { 1561 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT), 1562 BPF_LOG_MIN_ALIGNMENT) - pos - 1; 1563 } 1564 1565 static void print_insn_state(struct bpf_verifier_env *env, 1566 const struct bpf_func_state *state) 1567 { 1568 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { 1569 /* remove new line character */ 1570 bpf_vlog_reset(&env->log, env->prev_log_pos - 1); 1571 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' '); 1572 } else { 1573 verbose(env, "%d:", env->insn_idx); 1574 } 1575 print_verifier_state(env, state, false); 1576 } 1577 1578 /* copy array src of length n * size bytes to dst. dst is reallocated if it's too 1579 * small to hold src. This is different from krealloc since we don't want to preserve 1580 * the contents of dst. 1581 * 1582 * Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could 1583 * not be allocated. 1584 */ 1585 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags) 1586 { 1587 size_t alloc_bytes; 1588 void *orig = dst; 1589 size_t bytes; 1590 1591 if (ZERO_OR_NULL_PTR(src)) 1592 goto out; 1593 1594 if (unlikely(check_mul_overflow(n, size, &bytes))) 1595 return NULL; 1596 1597 alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes)); 1598 dst = krealloc(orig, alloc_bytes, flags); 1599 if (!dst) { 1600 kfree(orig); 1601 return NULL; 1602 } 1603 1604 memcpy(dst, src, bytes); 1605 out: 1606 return dst ? dst : ZERO_SIZE_PTR; 1607 } 1608 1609 /* resize an array from old_n items to new_n items. the array is reallocated if it's too 1610 * small to hold new_n items. new items are zeroed out if the array grows. 1611 * 1612 * Contrary to krealloc_array, does not free arr if new_n is zero. 1613 */ 1614 static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size) 1615 { 1616 size_t alloc_size; 1617 void *new_arr; 1618 1619 if (!new_n || old_n == new_n) 1620 goto out; 1621 1622 alloc_size = kmalloc_size_roundup(size_mul(new_n, size)); 1623 new_arr = krealloc(arr, alloc_size, GFP_KERNEL); 1624 if (!new_arr) { 1625 kfree(arr); 1626 return NULL; 1627 } 1628 arr = new_arr; 1629 1630 if (new_n > old_n) 1631 memset(arr + old_n * size, 0, (new_n - old_n) * size); 1632 1633 out: 1634 return arr ? arr : ZERO_SIZE_PTR; 1635 } 1636 1637 static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1638 { 1639 dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs, 1640 sizeof(struct bpf_reference_state), GFP_KERNEL); 1641 if (!dst->refs) 1642 return -ENOMEM; 1643 1644 dst->acquired_refs = src->acquired_refs; 1645 return 0; 1646 } 1647 1648 static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) 1649 { 1650 size_t n = src->allocated_stack / BPF_REG_SIZE; 1651 1652 dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state), 1653 GFP_KERNEL); 1654 if (!dst->stack) 1655 return -ENOMEM; 1656 1657 dst->allocated_stack = src->allocated_stack; 1658 return 0; 1659 } 1660 1661 static int resize_reference_state(struct bpf_func_state *state, size_t n) 1662 { 1663 state->refs = realloc_array(state->refs, state->acquired_refs, n, 1664 sizeof(struct bpf_reference_state)); 1665 if (!state->refs) 1666 return -ENOMEM; 1667 1668 state->acquired_refs = n; 1669 return 0; 1670 } 1671 1672 static int grow_stack_state(struct bpf_func_state *state, int size) 1673 { 1674 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; 1675 1676 if (old_n >= n) 1677 return 0; 1678 1679 state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state)); 1680 if (!state->stack) 1681 return -ENOMEM; 1682 1683 state->allocated_stack = size; 1684 return 0; 1685 } 1686 1687 /* Acquire a pointer id from the env and update the state->refs to include 1688 * this new pointer reference. 1689 * On success, returns a valid pointer id to associate with the register 1690 * On failure, returns a negative errno. 1691 */ 1692 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) 1693 { 1694 struct bpf_func_state *state = cur_func(env); 1695 int new_ofs = state->acquired_refs; 1696 int id, err; 1697 1698 err = resize_reference_state(state, state->acquired_refs + 1); 1699 if (err) 1700 return err; 1701 id = ++env->id_gen; 1702 state->refs[new_ofs].id = id; 1703 state->refs[new_ofs].insn_idx = insn_idx; 1704 state->refs[new_ofs].callback_ref = state->in_callback_fn ? state->frameno : 0; 1705 1706 return id; 1707 } 1708 1709 /* release function corresponding to acquire_reference_state(). Idempotent. */ 1710 static int release_reference_state(struct bpf_func_state *state, int ptr_id) 1711 { 1712 int i, last_idx; 1713 1714 last_idx = state->acquired_refs - 1; 1715 for (i = 0; i < state->acquired_refs; i++) { 1716 if (state->refs[i].id == ptr_id) { 1717 /* Cannot release caller references in callbacks */ 1718 if (state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 1719 return -EINVAL; 1720 if (last_idx && i != last_idx) 1721 memcpy(&state->refs[i], &state->refs[last_idx], 1722 sizeof(*state->refs)); 1723 memset(&state->refs[last_idx], 0, sizeof(*state->refs)); 1724 state->acquired_refs--; 1725 return 0; 1726 } 1727 } 1728 return -EINVAL; 1729 } 1730 1731 static void free_func_state(struct bpf_func_state *state) 1732 { 1733 if (!state) 1734 return; 1735 kfree(state->refs); 1736 kfree(state->stack); 1737 kfree(state); 1738 } 1739 1740 static void clear_jmp_history(struct bpf_verifier_state *state) 1741 { 1742 kfree(state->jmp_history); 1743 state->jmp_history = NULL; 1744 state->jmp_history_cnt = 0; 1745 } 1746 1747 static void free_verifier_state(struct bpf_verifier_state *state, 1748 bool free_self) 1749 { 1750 int i; 1751 1752 for (i = 0; i <= state->curframe; i++) { 1753 free_func_state(state->frame[i]); 1754 state->frame[i] = NULL; 1755 } 1756 clear_jmp_history(state); 1757 if (free_self) 1758 kfree(state); 1759 } 1760 1761 /* copy verifier state from src to dst growing dst stack space 1762 * when necessary to accommodate larger src stack 1763 */ 1764 static int copy_func_state(struct bpf_func_state *dst, 1765 const struct bpf_func_state *src) 1766 { 1767 int err; 1768 1769 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); 1770 err = copy_reference_state(dst, src); 1771 if (err) 1772 return err; 1773 return copy_stack_state(dst, src); 1774 } 1775 1776 static int copy_verifier_state(struct bpf_verifier_state *dst_state, 1777 const struct bpf_verifier_state *src) 1778 { 1779 struct bpf_func_state *dst; 1780 int i, err; 1781 1782 dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history, 1783 src->jmp_history_cnt, sizeof(struct bpf_idx_pair), 1784 GFP_USER); 1785 if (!dst_state->jmp_history) 1786 return -ENOMEM; 1787 dst_state->jmp_history_cnt = src->jmp_history_cnt; 1788 1789 /* if dst has more stack frames then src frame, free them, this is also 1790 * necessary in case of exceptional exits using bpf_throw. 1791 */ 1792 for (i = src->curframe + 1; i <= dst_state->curframe; i++) { 1793 free_func_state(dst_state->frame[i]); 1794 dst_state->frame[i] = NULL; 1795 } 1796 dst_state->speculative = src->speculative; 1797 dst_state->active_rcu_lock = src->active_rcu_lock; 1798 dst_state->curframe = src->curframe; 1799 dst_state->active_lock.ptr = src->active_lock.ptr; 1800 dst_state->active_lock.id = src->active_lock.id; 1801 dst_state->branches = src->branches; 1802 dst_state->parent = src->parent; 1803 dst_state->first_insn_idx = src->first_insn_idx; 1804 dst_state->last_insn_idx = src->last_insn_idx; 1805 dst_state->dfs_depth = src->dfs_depth; 1806 dst_state->used_as_loop_entry = src->used_as_loop_entry; 1807 for (i = 0; i <= src->curframe; i++) { 1808 dst = dst_state->frame[i]; 1809 if (!dst) { 1810 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 1811 if (!dst) 1812 return -ENOMEM; 1813 dst_state->frame[i] = dst; 1814 } 1815 err = copy_func_state(dst, src->frame[i]); 1816 if (err) 1817 return err; 1818 } 1819 return 0; 1820 } 1821 1822 static u32 state_htab_size(struct bpf_verifier_env *env) 1823 { 1824 return env->prog->len; 1825 } 1826 1827 static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx) 1828 { 1829 struct bpf_verifier_state *cur = env->cur_state; 1830 struct bpf_func_state *state = cur->frame[cur->curframe]; 1831 1832 return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; 1833 } 1834 1835 static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b) 1836 { 1837 int fr; 1838 1839 if (a->curframe != b->curframe) 1840 return false; 1841 1842 for (fr = a->curframe; fr >= 0; fr--) 1843 if (a->frame[fr]->callsite != b->frame[fr]->callsite) 1844 return false; 1845 1846 return true; 1847 } 1848 1849 /* Open coded iterators allow back-edges in the state graph in order to 1850 * check unbounded loops that iterators. 1851 * 1852 * In is_state_visited() it is necessary to know if explored states are 1853 * part of some loops in order to decide whether non-exact states 1854 * comparison could be used: 1855 * - non-exact states comparison establishes sub-state relation and uses 1856 * read and precision marks to do so, these marks are propagated from 1857 * children states and thus are not guaranteed to be final in a loop; 1858 * - exact states comparison just checks if current and explored states 1859 * are identical (and thus form a back-edge). 1860 * 1861 * Paper "A New Algorithm for Identifying Loops in Decompilation" 1862 * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient 1863 * algorithm for loop structure detection and gives an overview of 1864 * relevant terminology. It also has helpful illustrations. 1865 * 1866 * [1] https://api.semanticscholar.org/CorpusID:15784067 1867 * 1868 * We use a similar algorithm but because loop nested structure is 1869 * irrelevant for verifier ours is significantly simpler and resembles 1870 * strongly connected components algorithm from Sedgewick's textbook. 1871 * 1872 * Define topmost loop entry as a first node of the loop traversed in a 1873 * depth first search starting from initial state. The goal of the loop 1874 * tracking algorithm is to associate topmost loop entries with states 1875 * derived from these entries. 1876 * 1877 * For each step in the DFS states traversal algorithm needs to identify 1878 * the following situations: 1879 * 1880 * initial initial initial 1881 * | | | 1882 * V V V 1883 * ... ... .---------> hdr 1884 * | | | | 1885 * V V | V 1886 * cur .-> succ | .------... 1887 * | | | | | | 1888 * V | V | V V 1889 * succ '-- cur | ... ... 1890 * | | | 1891 * | V V 1892 * | succ <- cur 1893 * | | 1894 * | V 1895 * | ... 1896 * | | 1897 * '----' 1898 * 1899 * (A) successor state of cur (B) successor state of cur or it's entry 1900 * not yet traversed are in current DFS path, thus cur and succ 1901 * are members of the same outermost loop 1902 * 1903 * initial initial 1904 * | | 1905 * V V 1906 * ... ... 1907 * | | 1908 * V V 1909 * .------... .------... 1910 * | | | | 1911 * V V V V 1912 * .-> hdr ... ... ... 1913 * | | | | | 1914 * | V V V V 1915 * | succ <- cur succ <- cur 1916 * | | | 1917 * | V V 1918 * | ... ... 1919 * | | | 1920 * '----' exit 1921 * 1922 * (C) successor state of cur is a part of some loop but this loop 1923 * does not include cur or successor state is not in a loop at all. 1924 * 1925 * Algorithm could be described as the following python code: 1926 * 1927 * traversed = set() # Set of traversed nodes 1928 * entries = {} # Mapping from node to loop entry 1929 * depths = {} # Depth level assigned to graph node 1930 * path = set() # Current DFS path 1931 * 1932 * # Find outermost loop entry known for n 1933 * def get_loop_entry(n): 1934 * h = entries.get(n, None) 1935 * while h in entries and entries[h] != h: 1936 * h = entries[h] 1937 * return h 1938 * 1939 * # Update n's loop entry if h's outermost entry comes 1940 * # before n's outermost entry in current DFS path. 1941 * def update_loop_entry(n, h): 1942 * n1 = get_loop_entry(n) or n 1943 * h1 = get_loop_entry(h) or h 1944 * if h1 in path and depths[h1] <= depths[n1]: 1945 * entries[n] = h1 1946 * 1947 * def dfs(n, depth): 1948 * traversed.add(n) 1949 * path.add(n) 1950 * depths[n] = depth 1951 * for succ in G.successors(n): 1952 * if succ not in traversed: 1953 * # Case A: explore succ and update cur's loop entry 1954 * # only if succ's entry is in current DFS path. 1955 * dfs(succ, depth + 1) 1956 * h = get_loop_entry(succ) 1957 * update_loop_entry(n, h) 1958 * else: 1959 * # Case B or C depending on `h1 in path` check in update_loop_entry(). 1960 * update_loop_entry(n, succ) 1961 * path.remove(n) 1962 * 1963 * To adapt this algorithm for use with verifier: 1964 * - use st->branch == 0 as a signal that DFS of succ had been finished 1965 * and cur's loop entry has to be updated (case A), handle this in 1966 * update_branch_counts(); 1967 * - use st->branch > 0 as a signal that st is in the current DFS path; 1968 * - handle cases B and C in is_state_visited(); 1969 * - update topmost loop entry for intermediate states in get_loop_entry(). 1970 */ 1971 static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st) 1972 { 1973 struct bpf_verifier_state *topmost = st->loop_entry, *old; 1974 1975 while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) 1976 topmost = topmost->loop_entry; 1977 /* Update loop entries for intermediate states to avoid this 1978 * traversal in future get_loop_entry() calls. 1979 */ 1980 while (st && st->loop_entry != topmost) { 1981 old = st->loop_entry; 1982 st->loop_entry = topmost; 1983 st = old; 1984 } 1985 return topmost; 1986 } 1987 1988 static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) 1989 { 1990 struct bpf_verifier_state *cur1, *hdr1; 1991 1992 cur1 = get_loop_entry(cur) ?: cur; 1993 hdr1 = get_loop_entry(hdr) ?: hdr; 1994 /* The head1->branches check decides between cases B and C in 1995 * comment for get_loop_entry(). If hdr1->branches == 0 then 1996 * head's topmost loop entry is not in current DFS path, 1997 * hence 'cur' and 'hdr' are not in the same loop and there is 1998 * no need to update cur->loop_entry. 1999 */ 2000 if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { 2001 cur->loop_entry = hdr; 2002 hdr->used_as_loop_entry = true; 2003 } 2004 } 2005 2006 static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 2007 { 2008 while (st) { 2009 u32 br = --st->branches; 2010 2011 /* br == 0 signals that DFS exploration for 'st' is finished, 2012 * thus it is necessary to update parent's loop entry if it 2013 * turned out that st is a part of some loop. 2014 * This is a part of 'case A' in get_loop_entry() comment. 2015 */ 2016 if (br == 0 && st->parent && st->loop_entry) 2017 update_loop_entry(st->parent, st->loop_entry); 2018 2019 /* WARN_ON(br > 1) technically makes sense here, 2020 * but see comment in push_stack(), hence: 2021 */ 2022 WARN_ONCE((int)br < 0, 2023 "BUG update_branch_counts:branches_to_explore=%d\n", 2024 br); 2025 if (br) 2026 break; 2027 st = st->parent; 2028 } 2029 } 2030 2031 static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, 2032 int *insn_idx, bool pop_log) 2033 { 2034 struct bpf_verifier_state *cur = env->cur_state; 2035 struct bpf_verifier_stack_elem *elem, *head = env->head; 2036 int err; 2037 2038 if (env->head == NULL) 2039 return -ENOENT; 2040 2041 if (cur) { 2042 err = copy_verifier_state(cur, &head->st); 2043 if (err) 2044 return err; 2045 } 2046 if (pop_log) 2047 bpf_vlog_reset(&env->log, head->log_pos); 2048 if (insn_idx) 2049 *insn_idx = head->insn_idx; 2050 if (prev_insn_idx) 2051 *prev_insn_idx = head->prev_insn_idx; 2052 elem = head->next; 2053 free_verifier_state(&head->st, false); 2054 kfree(head); 2055 env->head = elem; 2056 env->stack_size--; 2057 return 0; 2058 } 2059 2060 static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, 2061 int insn_idx, int prev_insn_idx, 2062 bool speculative) 2063 { 2064 struct bpf_verifier_state *cur = env->cur_state; 2065 struct bpf_verifier_stack_elem *elem; 2066 int err; 2067 2068 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 2069 if (!elem) 2070 goto err; 2071 2072 elem->insn_idx = insn_idx; 2073 elem->prev_insn_idx = prev_insn_idx; 2074 elem->next = env->head; 2075 elem->log_pos = env->log.end_pos; 2076 env->head = elem; 2077 env->stack_size++; 2078 err = copy_verifier_state(&elem->st, cur); 2079 if (err) 2080 goto err; 2081 elem->st.speculative |= speculative; 2082 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 2083 verbose(env, "The sequence of %d jumps is too complex.\n", 2084 env->stack_size); 2085 goto err; 2086 } 2087 if (elem->st.parent) { 2088 ++elem->st.parent->branches; 2089 /* WARN_ON(branches > 2) technically makes sense here, 2090 * but 2091 * 1. speculative states will bump 'branches' for non-branch 2092 * instructions 2093 * 2. is_state_visited() heuristics may decide not to create 2094 * a new state for a sequence of branches and all such current 2095 * and cloned states will be pointing to a single parent state 2096 * which might have large 'branches' count. 2097 */ 2098 } 2099 return &elem->st; 2100 err: 2101 free_verifier_state(env->cur_state, true); 2102 env->cur_state = NULL; 2103 /* pop all elements and return */ 2104 while (!pop_stack(env, NULL, NULL, false)); 2105 return NULL; 2106 } 2107 2108 #define CALLER_SAVED_REGS 6 2109 static const int caller_saved[CALLER_SAVED_REGS] = { 2110 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 2111 }; 2112 2113 /* This helper doesn't clear reg->id */ 2114 static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm) 2115 { 2116 reg->var_off = tnum_const(imm); 2117 reg->smin_value = (s64)imm; 2118 reg->smax_value = (s64)imm; 2119 reg->umin_value = imm; 2120 reg->umax_value = imm; 2121 2122 reg->s32_min_value = (s32)imm; 2123 reg->s32_max_value = (s32)imm; 2124 reg->u32_min_value = (u32)imm; 2125 reg->u32_max_value = (u32)imm; 2126 } 2127 2128 /* Mark the unknown part of a register (variable offset or scalar value) as 2129 * known to have the value @imm. 2130 */ 2131 static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) 2132 { 2133 /* Clear off and union(map_ptr, range) */ 2134 memset(((u8 *)reg) + sizeof(reg->type), 0, 2135 offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); 2136 reg->id = 0; 2137 reg->ref_obj_id = 0; 2138 ___mark_reg_known(reg, imm); 2139 } 2140 2141 static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm) 2142 { 2143 reg->var_off = tnum_const_subreg(reg->var_off, imm); 2144 reg->s32_min_value = (s32)imm; 2145 reg->s32_max_value = (s32)imm; 2146 reg->u32_min_value = (u32)imm; 2147 reg->u32_max_value = (u32)imm; 2148 } 2149 2150 /* Mark the 'variable offset' part of a register as zero. This should be 2151 * used only on registers holding a pointer type. 2152 */ 2153 static void __mark_reg_known_zero(struct bpf_reg_state *reg) 2154 { 2155 __mark_reg_known(reg, 0); 2156 } 2157 2158 static void __mark_reg_const_zero(struct bpf_reg_state *reg) 2159 { 2160 __mark_reg_known(reg, 0); 2161 reg->type = SCALAR_VALUE; 2162 } 2163 2164 static void mark_reg_known_zero(struct bpf_verifier_env *env, 2165 struct bpf_reg_state *regs, u32 regno) 2166 { 2167 if (WARN_ON(regno >= MAX_BPF_REG)) { 2168 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 2169 /* Something bad happened, let's kill all regs */ 2170 for (regno = 0; regno < MAX_BPF_REG; regno++) 2171 __mark_reg_not_init(env, regs + regno); 2172 return; 2173 } 2174 __mark_reg_known_zero(regs + regno); 2175 } 2176 2177 static void __mark_dynptr_reg(struct bpf_reg_state *reg, enum bpf_dynptr_type type, 2178 bool first_slot, int dynptr_id) 2179 { 2180 /* reg->type has no meaning for STACK_DYNPTR, but when we set reg for 2181 * callback arguments, it does need to be CONST_PTR_TO_DYNPTR, so simply 2182 * set it unconditionally as it is ignored for STACK_DYNPTR anyway. 2183 */ 2184 __mark_reg_known_zero(reg); 2185 reg->type = CONST_PTR_TO_DYNPTR; 2186 /* Give each dynptr a unique id to uniquely associate slices to it. */ 2187 reg->id = dynptr_id; 2188 reg->dynptr.type = type; 2189 reg->dynptr.first_slot = first_slot; 2190 } 2191 2192 static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) 2193 { 2194 if (base_type(reg->type) == PTR_TO_MAP_VALUE) { 2195 const struct bpf_map *map = reg->map_ptr; 2196 2197 if (map->inner_map_meta) { 2198 reg->type = CONST_PTR_TO_MAP; 2199 reg->map_ptr = map->inner_map_meta; 2200 /* transfer reg's id which is unique for every map_lookup_elem 2201 * as UID of the inner map. 2202 */ 2203 if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) 2204 reg->map_uid = reg->id; 2205 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { 2206 reg->type = PTR_TO_XDP_SOCK; 2207 } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || 2208 map->map_type == BPF_MAP_TYPE_SOCKHASH) { 2209 reg->type = PTR_TO_SOCKET; 2210 } else { 2211 reg->type = PTR_TO_MAP_VALUE; 2212 } 2213 return; 2214 } 2215 2216 reg->type &= ~PTR_MAYBE_NULL; 2217 } 2218 2219 static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno, 2220 struct btf_field_graph_root *ds_head) 2221 { 2222 __mark_reg_known_zero(®s[regno]); 2223 regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC; 2224 regs[regno].btf = ds_head->btf; 2225 regs[regno].btf_id = ds_head->value_btf_id; 2226 regs[regno].off = ds_head->node_offset; 2227 } 2228 2229 static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) 2230 { 2231 return type_is_pkt_pointer(reg->type); 2232 } 2233 2234 static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) 2235 { 2236 return reg_is_pkt_pointer(reg) || 2237 reg->type == PTR_TO_PACKET_END; 2238 } 2239 2240 static bool reg_is_dynptr_slice_pkt(const struct bpf_reg_state *reg) 2241 { 2242 return base_type(reg->type) == PTR_TO_MEM && 2243 (reg->type & DYNPTR_TYPE_SKB || reg->type & DYNPTR_TYPE_XDP); 2244 } 2245 2246 /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ 2247 static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, 2248 enum bpf_reg_type which) 2249 { 2250 /* The register can already have a range from prior markings. 2251 * This is fine as long as it hasn't been advanced from its 2252 * origin. 2253 */ 2254 return reg->type == which && 2255 reg->id == 0 && 2256 reg->off == 0 && 2257 tnum_equals_const(reg->var_off, 0); 2258 } 2259 2260 /* Reset the min/max bounds of a register */ 2261 static void __mark_reg_unbounded(struct bpf_reg_state *reg) 2262 { 2263 reg->smin_value = S64_MIN; 2264 reg->smax_value = S64_MAX; 2265 reg->umin_value = 0; 2266 reg->umax_value = U64_MAX; 2267 2268 reg->s32_min_value = S32_MIN; 2269 reg->s32_max_value = S32_MAX; 2270 reg->u32_min_value = 0; 2271 reg->u32_max_value = U32_MAX; 2272 } 2273 2274 static void __mark_reg64_unbounded(struct bpf_reg_state *reg) 2275 { 2276 reg->smin_value = S64_MIN; 2277 reg->smax_value = S64_MAX; 2278 reg->umin_value = 0; 2279 reg->umax_value = U64_MAX; 2280 } 2281 2282 static void __mark_reg32_unbounded(struct bpf_reg_state *reg) 2283 { 2284 reg->s32_min_value = S32_MIN; 2285 reg->s32_max_value = S32_MAX; 2286 reg->u32_min_value = 0; 2287 reg->u32_max_value = U32_MAX; 2288 } 2289 2290 static void __update_reg32_bounds(struct bpf_reg_state *reg) 2291 { 2292 struct tnum var32_off = tnum_subreg(reg->var_off); 2293 2294 /* min signed is max(sign bit) | min(other bits) */ 2295 reg->s32_min_value = max_t(s32, reg->s32_min_value, 2296 var32_off.value | (var32_off.mask & S32_MIN)); 2297 /* max signed is min(sign bit) | max(other bits) */ 2298 reg->s32_max_value = min_t(s32, reg->s32_max_value, 2299 var32_off.value | (var32_off.mask & S32_MAX)); 2300 reg->u32_min_value = max_t(u32, reg->u32_min_value, (u32)var32_off.value); 2301 reg->u32_max_value = min(reg->u32_max_value, 2302 (u32)(var32_off.value | var32_off.mask)); 2303 } 2304 2305 static void __update_reg64_bounds(struct bpf_reg_state *reg) 2306 { 2307 /* min signed is max(sign bit) | min(other bits) */ 2308 reg->smin_value = max_t(s64, reg->smin_value, 2309 reg->var_off.value | (reg->var_off.mask & S64_MIN)); 2310 /* max signed is min(sign bit) | max(other bits) */ 2311 reg->smax_value = min_t(s64, reg->smax_value, 2312 reg->var_off.value | (reg->var_off.mask & S64_MAX)); 2313 reg->umin_value = max(reg->umin_value, reg->var_off.value); 2314 reg->umax_value = min(reg->umax_value, 2315 reg->var_off.value | reg->var_off.mask); 2316 } 2317 2318 static void __update_reg_bounds(struct bpf_reg_state *reg) 2319 { 2320 __update_reg32_bounds(reg); 2321 __update_reg64_bounds(reg); 2322 } 2323 2324 /* Uses signed min/max values to inform unsigned, and vice-versa */ 2325 static void __reg32_deduce_bounds(struct bpf_reg_state *reg) 2326 { 2327 /* Learn sign from signed bounds. 2328 * If we cannot cross the sign boundary, then signed and unsigned bounds 2329 * are the same, so combine. This works even in the negative case, e.g. 2330 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 2331 */ 2332 if (reg->s32_min_value >= 0 || reg->s32_max_value < 0) { 2333 reg->s32_min_value = reg->u32_min_value = 2334 max_t(u32, reg->s32_min_value, reg->u32_min_value); 2335 reg->s32_max_value = reg->u32_max_value = 2336 min_t(u32, reg->s32_max_value, reg->u32_max_value); 2337 return; 2338 } 2339 /* Learn sign from unsigned bounds. Signed bounds cross the sign 2340 * boundary, so we must be careful. 2341 */ 2342 if ((s32)reg->u32_max_value >= 0) { 2343 /* Positive. We can't learn anything from the smin, but smax 2344 * is positive, hence safe. 2345 */ 2346 reg->s32_min_value = reg->u32_min_value; 2347 reg->s32_max_value = reg->u32_max_value = 2348 min_t(u32, reg->s32_max_value, reg->u32_max_value); 2349 } else if ((s32)reg->u32_min_value < 0) { 2350 /* Negative. We can't learn anything from the smax, but smin 2351 * is negative, hence safe. 2352 */ 2353 reg->s32_min_value = reg->u32_min_value = 2354 max_t(u32, reg->s32_min_value, reg->u32_min_value); 2355 reg->s32_max_value = reg->u32_max_value; 2356 } 2357 } 2358 2359 static void __reg64_deduce_bounds(struct bpf_reg_state *reg) 2360 { 2361 /* Learn sign from signed bounds. 2362 * If we cannot cross the sign boundary, then signed and unsigned bounds 2363 * are the same, so combine. This works even in the negative case, e.g. 2364 * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. 2365 */ 2366 if (reg->smin_value >= 0 || reg->smax_value < 0) { 2367 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 2368 reg->umin_value); 2369 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 2370 reg->umax_value); 2371 return; 2372 } 2373 /* Learn sign from unsigned bounds. Signed bounds cross the sign 2374 * boundary, so we must be careful. 2375 */ 2376 if ((s64)reg->umax_value >= 0) { 2377 /* Positive. We can't learn anything from the smin, but smax 2378 * is positive, hence safe. 2379 */ 2380 reg->smin_value = reg->umin_value; 2381 reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, 2382 reg->umax_value); 2383 } else if ((s64)reg->umin_value < 0) { 2384 /* Negative. We can't learn anything from the smax, but smin 2385 * is negative, hence safe. 2386 */ 2387 reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, 2388 reg->umin_value); 2389 reg->smax_value = reg->umax_value; 2390 } 2391 } 2392 2393 static void __reg_deduce_bounds(struct bpf_reg_state *reg) 2394 { 2395 __reg32_deduce_bounds(reg); 2396 __reg64_deduce_bounds(reg); 2397 } 2398 2399 /* Attempts to improve var_off based on unsigned min/max information */ 2400 static void __reg_bound_offset(struct bpf_reg_state *reg) 2401 { 2402 struct tnum var64_off = tnum_intersect(reg->var_off, 2403 tnum_range(reg->umin_value, 2404 reg->umax_value)); 2405 struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off), 2406 tnum_range(reg->u32_min_value, 2407 reg->u32_max_value)); 2408 2409 reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); 2410 } 2411 2412 static void reg_bounds_sync(struct bpf_reg_state *reg) 2413 { 2414 /* We might have learned new bounds from the var_off. */ 2415 __update_reg_bounds(reg); 2416 /* We might have learned something about the sign bit. */ 2417 __reg_deduce_bounds(reg); 2418 /* We might have learned some bits from the bounds. */ 2419 __reg_bound_offset(reg); 2420 /* Intersecting with the old var_off might have improved our bounds 2421 * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), 2422 * then new var_off is (0; 0x7f...fc) which improves our umax. 2423 */ 2424 __update_reg_bounds(reg); 2425 } 2426 2427 static bool __reg32_bound_s64(s32 a) 2428 { 2429 return a >= 0 && a <= S32_MAX; 2430 } 2431 2432 static void __reg_assign_32_into_64(struct bpf_reg_state *reg) 2433 { 2434 reg->umin_value = reg->u32_min_value; 2435 reg->umax_value = reg->u32_max_value; 2436 2437 /* Attempt to pull 32-bit signed bounds into 64-bit bounds but must 2438 * be positive otherwise set to worse case bounds and refine later 2439 * from tnum. 2440 */ 2441 if (__reg32_bound_s64(reg->s32_min_value) && 2442 __reg32_bound_s64(reg->s32_max_value)) { 2443 reg->smin_value = reg->s32_min_value; 2444 reg->smax_value = reg->s32_max_value; 2445 } else { 2446 reg->smin_value = 0; 2447 reg->smax_value = U32_MAX; 2448 } 2449 } 2450 2451 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) 2452 { 2453 /* special case when 64-bit register has upper 32-bit register 2454 * zeroed. Typically happens after zext or <<32, >>32 sequence 2455 * allowing us to use 32-bit bounds directly, 2456 */ 2457 if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { 2458 __reg_assign_32_into_64(reg); 2459 } else { 2460 /* Otherwise the best we can do is push lower 32bit known and 2461 * unknown bits into register (var_off set from jmp logic) 2462 * then learn as much as possible from the 64-bit tnum 2463 * known and unknown bits. The previous smin/smax bounds are 2464 * invalid here because of jmp32 compare so mark them unknown 2465 * so they do not impact tnum bounds calculation. 2466 */ 2467 __mark_reg64_unbounded(reg); 2468 } 2469 reg_bounds_sync(reg); 2470 } 2471 2472 static bool __reg64_bound_s32(s64 a) 2473 { 2474 return a >= S32_MIN && a <= S32_MAX; 2475 } 2476 2477 static bool __reg64_bound_u32(u64 a) 2478 { 2479 return a >= U32_MIN && a <= U32_MAX; 2480 } 2481 2482 static void __reg_combine_64_into_32(struct bpf_reg_state *reg) 2483 { 2484 __mark_reg32_unbounded(reg); 2485 if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { 2486 reg->s32_min_value = (s32)reg->smin_value; 2487 reg->s32_max_value = (s32)reg->smax_value; 2488 } 2489 if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { 2490 reg->u32_min_value = (u32)reg->umin_value; 2491 reg->u32_max_value = (u32)reg->umax_value; 2492 } 2493 reg_bounds_sync(reg); 2494 } 2495 2496 /* Mark a register as having a completely unknown (scalar) value. */ 2497 static void __mark_reg_unknown(const struct bpf_verifier_env *env, 2498 struct bpf_reg_state *reg) 2499 { 2500 /* 2501 * Clear type, off, and union(map_ptr, range) and 2502 * padding between 'type' and union 2503 */ 2504 memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); 2505 reg->type = SCALAR_VALUE; 2506 reg->id = 0; 2507 reg->ref_obj_id = 0; 2508 reg->var_off = tnum_unknown; 2509 reg->frameno = 0; 2510 reg->precise = !env->bpf_capable; 2511 __mark_reg_unbounded(reg); 2512 } 2513 2514 static void mark_reg_unknown(struct bpf_verifier_env *env, 2515 struct bpf_reg_state *regs, u32 regno) 2516 { 2517 if (WARN_ON(regno >= MAX_BPF_REG)) { 2518 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 2519 /* Something bad happened, let's kill all regs except FP */ 2520 for (regno = 0; regno < BPF_REG_FP; regno++) 2521 __mark_reg_not_init(env, regs + regno); 2522 return; 2523 } 2524 __mark_reg_unknown(env, regs + regno); 2525 } 2526 2527 static void __mark_reg_not_init(const struct bpf_verifier_env *env, 2528 struct bpf_reg_state *reg) 2529 { 2530 __mark_reg_unknown(env, reg); 2531 reg->type = NOT_INIT; 2532 } 2533 2534 static void mark_reg_not_init(struct bpf_verifier_env *env, 2535 struct bpf_reg_state *regs, u32 regno) 2536 { 2537 if (WARN_ON(regno >= MAX_BPF_REG)) { 2538 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 2539 /* Something bad happened, let's kill all regs except FP */ 2540 for (regno = 0; regno < BPF_REG_FP; regno++) 2541 __mark_reg_not_init(env, regs + regno); 2542 return; 2543 } 2544 __mark_reg_not_init(env, regs + regno); 2545 } 2546 2547 static void mark_btf_ld_reg(struct bpf_verifier_env *env, 2548 struct bpf_reg_state *regs, u32 regno, 2549 enum bpf_reg_type reg_type, 2550 struct btf *btf, u32 btf_id, 2551 enum bpf_type_flag flag) 2552 { 2553 if (reg_type == SCALAR_VALUE) { 2554 mark_reg_unknown(env, regs, regno); 2555 return; 2556 } 2557 mark_reg_known_zero(env, regs, regno); 2558 regs[regno].type = PTR_TO_BTF_ID | flag; 2559 regs[regno].btf = btf; 2560 regs[regno].btf_id = btf_id; 2561 } 2562 2563 #define DEF_NOT_SUBREG (0) 2564 static void init_reg_state(struct bpf_verifier_env *env, 2565 struct bpf_func_state *state) 2566 { 2567 struct bpf_reg_state *regs = state->regs; 2568 int i; 2569 2570 for (i = 0; i < MAX_BPF_REG; i++) { 2571 mark_reg_not_init(env, regs, i); 2572 regs[i].live = REG_LIVE_NONE; 2573 regs[i].parent = NULL; 2574 regs[i].subreg_def = DEF_NOT_SUBREG; 2575 } 2576 2577 /* frame pointer */ 2578 regs[BPF_REG_FP].type = PTR_TO_STACK; 2579 mark_reg_known_zero(env, regs, BPF_REG_FP); 2580 regs[BPF_REG_FP].frameno = state->frameno; 2581 } 2582 2583 #define BPF_MAIN_FUNC (-1) 2584 static void init_func_state(struct bpf_verifier_env *env, 2585 struct bpf_func_state *state, 2586 int callsite, int frameno, int subprogno) 2587 { 2588 state->callsite = callsite; 2589 state->frameno = frameno; 2590 state->subprogno = subprogno; 2591 state->callback_ret_range = tnum_range(0, 0); 2592 init_reg_state(env, state); 2593 mark_verifier_state_scratched(env); 2594 } 2595 2596 /* Similar to push_stack(), but for async callbacks */ 2597 static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, 2598 int insn_idx, int prev_insn_idx, 2599 int subprog) 2600 { 2601 struct bpf_verifier_stack_elem *elem; 2602 struct bpf_func_state *frame; 2603 2604 elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); 2605 if (!elem) 2606 goto err; 2607 2608 elem->insn_idx = insn_idx; 2609 elem->prev_insn_idx = prev_insn_idx; 2610 elem->next = env->head; 2611 elem->log_pos = env->log.end_pos; 2612 env->head = elem; 2613 env->stack_size++; 2614 if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { 2615 verbose(env, 2616 "The sequence of %d jumps is too complex for async cb.\n", 2617 env->stack_size); 2618 goto err; 2619 } 2620 /* Unlike push_stack() do not copy_verifier_state(). 2621 * The caller state doesn't matter. 2622 * This is async callback. It starts in a fresh stack. 2623 * Initialize it similar to do_check_common(). 2624 */ 2625 elem->st.branches = 1; 2626 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 2627 if (!frame) 2628 goto err; 2629 init_func_state(env, frame, 2630 BPF_MAIN_FUNC /* callsite */, 2631 0 /* frameno within this callchain */, 2632 subprog /* subprog number within this prog */); 2633 elem->st.frame[0] = frame; 2634 return &elem->st; 2635 err: 2636 free_verifier_state(env->cur_state, true); 2637 env->cur_state = NULL; 2638 /* pop all elements and return */ 2639 while (!pop_stack(env, NULL, NULL, false)); 2640 return NULL; 2641 } 2642 2643 2644 enum reg_arg_type { 2645 SRC_OP, /* register is used as source operand */ 2646 DST_OP, /* register is used as destination operand */ 2647 DST_OP_NO_MARK /* same as above, check only, don't mark */ 2648 }; 2649 2650 static int cmp_subprogs(const void *a, const void *b) 2651 { 2652 return ((struct bpf_subprog_info *)a)->start - 2653 ((struct bpf_subprog_info *)b)->start; 2654 } 2655 2656 static int find_subprog(struct bpf_verifier_env *env, int off) 2657 { 2658 struct bpf_subprog_info *p; 2659 2660 p = bsearch(&off, env->subprog_info, env->subprog_cnt, 2661 sizeof(env->subprog_info[0]), cmp_subprogs); 2662 if (!p) 2663 return -ENOENT; 2664 return p - env->subprog_info; 2665 2666 } 2667 2668 static int add_subprog(struct bpf_verifier_env *env, int off) 2669 { 2670 int insn_cnt = env->prog->len; 2671 int ret; 2672 2673 if (off >= insn_cnt || off < 0) { 2674 verbose(env, "call to invalid destination\n"); 2675 return -EINVAL; 2676 } 2677 ret = find_subprog(env, off); 2678 if (ret >= 0) 2679 return ret; 2680 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 2681 verbose(env, "too many subprograms\n"); 2682 return -E2BIG; 2683 } 2684 /* determine subprog starts. The end is one before the next starts */ 2685 env->subprog_info[env->subprog_cnt++].start = off; 2686 sort(env->subprog_info, env->subprog_cnt, 2687 sizeof(env->subprog_info[0]), cmp_subprogs, NULL); 2688 return env->subprog_cnt - 1; 2689 } 2690 2691 static int bpf_find_exception_callback_insn_off(struct bpf_verifier_env *env) 2692 { 2693 struct bpf_prog_aux *aux = env->prog->aux; 2694 struct btf *btf = aux->btf; 2695 const struct btf_type *t; 2696 u32 main_btf_id, id; 2697 const char *name; 2698 int ret, i; 2699 2700 /* Non-zero func_info_cnt implies valid btf */ 2701 if (!aux->func_info_cnt) 2702 return 0; 2703 main_btf_id = aux->func_info[0].type_id; 2704 2705 t = btf_type_by_id(btf, main_btf_id); 2706 if (!t) { 2707 verbose(env, "invalid btf id for main subprog in func_info\n"); 2708 return -EINVAL; 2709 } 2710 2711 name = btf_find_decl_tag_value(btf, t, -1, "exception_callback:"); 2712 if (IS_ERR(name)) { 2713 ret = PTR_ERR(name); 2714 /* If there is no tag present, there is no exception callback */ 2715 if (ret == -ENOENT) 2716 ret = 0; 2717 else if (ret == -EEXIST) 2718 verbose(env, "multiple exception callback tags for main subprog\n"); 2719 return ret; 2720 } 2721 2722 ret = btf_find_by_name_kind(btf, name, BTF_KIND_FUNC); 2723 if (ret < 0) { 2724 verbose(env, "exception callback '%s' could not be found in BTF\n", name); 2725 return ret; 2726 } 2727 id = ret; 2728 t = btf_type_by_id(btf, id); 2729 if (btf_func_linkage(t) != BTF_FUNC_GLOBAL) { 2730 verbose(env, "exception callback '%s' must have global linkage\n", name); 2731 return -EINVAL; 2732 } 2733 ret = 0; 2734 for (i = 0; i < aux->func_info_cnt; i++) { 2735 if (aux->func_info[i].type_id != id) 2736 continue; 2737 ret = aux->func_info[i].insn_off; 2738 /* Further func_info and subprog checks will also happen 2739 * later, so assume this is the right insn_off for now. 2740 */ 2741 if (!ret) { 2742 verbose(env, "invalid exception callback insn_off in func_info: 0\n"); 2743 ret = -EINVAL; 2744 } 2745 } 2746 if (!ret) { 2747 verbose(env, "exception callback type id not found in func_info\n"); 2748 ret = -EINVAL; 2749 } 2750 return ret; 2751 } 2752 2753 #define MAX_KFUNC_DESCS 256 2754 #define MAX_KFUNC_BTFS 256 2755 2756 struct bpf_kfunc_desc { 2757 struct btf_func_model func_model; 2758 u32 func_id; 2759 s32 imm; 2760 u16 offset; 2761 unsigned long addr; 2762 }; 2763 2764 struct bpf_kfunc_btf { 2765 struct btf *btf; 2766 struct module *module; 2767 u16 offset; 2768 }; 2769 2770 struct bpf_kfunc_desc_tab { 2771 /* Sorted by func_id (BTF ID) and offset (fd_array offset) during 2772 * verification. JITs do lookups by bpf_insn, where func_id may not be 2773 * available, therefore at the end of verification do_misc_fixups() 2774 * sorts this by imm and offset. 2775 */ 2776 struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS]; 2777 u32 nr_descs; 2778 }; 2779 2780 struct bpf_kfunc_btf_tab { 2781 struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS]; 2782 u32 nr_descs; 2783 }; 2784 2785 static int kfunc_desc_cmp_by_id_off(const void *a, const void *b) 2786 { 2787 const struct bpf_kfunc_desc *d0 = a; 2788 const struct bpf_kfunc_desc *d1 = b; 2789 2790 /* func_id is not greater than BTF_MAX_TYPE */ 2791 return d0->func_id - d1->func_id ?: d0->offset - d1->offset; 2792 } 2793 2794 static int kfunc_btf_cmp_by_off(const void *a, const void *b) 2795 { 2796 const struct bpf_kfunc_btf *d0 = a; 2797 const struct bpf_kfunc_btf *d1 = b; 2798 2799 return d0->offset - d1->offset; 2800 } 2801 2802 static const struct bpf_kfunc_desc * 2803 find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset) 2804 { 2805 struct bpf_kfunc_desc desc = { 2806 .func_id = func_id, 2807 .offset = offset, 2808 }; 2809 struct bpf_kfunc_desc_tab *tab; 2810 2811 tab = prog->aux->kfunc_tab; 2812 return bsearch(&desc, tab->descs, tab->nr_descs, 2813 sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off); 2814 } 2815 2816 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, 2817 u16 btf_fd_idx, u8 **func_addr) 2818 { 2819 const struct bpf_kfunc_desc *desc; 2820 2821 desc = find_kfunc_desc(prog, func_id, btf_fd_idx); 2822 if (!desc) 2823 return -EFAULT; 2824 2825 *func_addr = (u8 *)desc->addr; 2826 return 0; 2827 } 2828 2829 static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env, 2830 s16 offset) 2831 { 2832 struct bpf_kfunc_btf kf_btf = { .offset = offset }; 2833 struct bpf_kfunc_btf_tab *tab; 2834 struct bpf_kfunc_btf *b; 2835 struct module *mod; 2836 struct btf *btf; 2837 int btf_fd; 2838 2839 tab = env->prog->aux->kfunc_btf_tab; 2840 b = bsearch(&kf_btf, tab->descs, tab->nr_descs, 2841 sizeof(tab->descs[0]), kfunc_btf_cmp_by_off); 2842 if (!b) { 2843 if (tab->nr_descs == MAX_KFUNC_BTFS) { 2844 verbose(env, "too many different module BTFs\n"); 2845 return ERR_PTR(-E2BIG); 2846 } 2847 2848 if (bpfptr_is_null(env->fd_array)) { 2849 verbose(env, "kfunc offset > 0 without fd_array is invalid\n"); 2850 return ERR_PTR(-EPROTO); 2851 } 2852 2853 if (copy_from_bpfptr_offset(&btf_fd, env->fd_array, 2854 offset * sizeof(btf_fd), 2855 sizeof(btf_fd))) 2856 return ERR_PTR(-EFAULT); 2857 2858 btf = btf_get_by_fd(btf_fd); 2859 if (IS_ERR(btf)) { 2860 verbose(env, "invalid module BTF fd specified\n"); 2861 return btf; 2862 } 2863 2864 if (!btf_is_module(btf)) { 2865 verbose(env, "BTF fd for kfunc is not a module BTF\n"); 2866 btf_put(btf); 2867 return ERR_PTR(-EINVAL); 2868 } 2869 2870 mod = btf_try_get_module(btf); 2871 if (!mod) { 2872 btf_put(btf); 2873 return ERR_PTR(-ENXIO); 2874 } 2875 2876 b = &tab->descs[tab->nr_descs++]; 2877 b->btf = btf; 2878 b->module = mod; 2879 b->offset = offset; 2880 2881 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 2882 kfunc_btf_cmp_by_off, NULL); 2883 } 2884 return b->btf; 2885 } 2886 2887 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab) 2888 { 2889 if (!tab) 2890 return; 2891 2892 while (tab->nr_descs--) { 2893 module_put(tab->descs[tab->nr_descs].module); 2894 btf_put(tab->descs[tab->nr_descs].btf); 2895 } 2896 kfree(tab); 2897 } 2898 2899 static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env, s16 offset) 2900 { 2901 if (offset) { 2902 if (offset < 0) { 2903 /* In the future, this can be allowed to increase limit 2904 * of fd index into fd_array, interpreted as u16. 2905 */ 2906 verbose(env, "negative offset disallowed for kernel module function call\n"); 2907 return ERR_PTR(-EINVAL); 2908 } 2909 2910 return __find_kfunc_desc_btf(env, offset); 2911 } 2912 return btf_vmlinux ?: ERR_PTR(-ENOENT); 2913 } 2914 2915 static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset) 2916 { 2917 const struct btf_type *func, *func_proto; 2918 struct bpf_kfunc_btf_tab *btf_tab; 2919 struct bpf_kfunc_desc_tab *tab; 2920 struct bpf_prog_aux *prog_aux; 2921 struct bpf_kfunc_desc *desc; 2922 const char *func_name; 2923 struct btf *desc_btf; 2924 unsigned long call_imm; 2925 unsigned long addr; 2926 int err; 2927 2928 prog_aux = env->prog->aux; 2929 tab = prog_aux->kfunc_tab; 2930 btf_tab = prog_aux->kfunc_btf_tab; 2931 if (!tab) { 2932 if (!btf_vmlinux) { 2933 verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n"); 2934 return -ENOTSUPP; 2935 } 2936 2937 if (!env->prog->jit_requested) { 2938 verbose(env, "JIT is required for calling kernel function\n"); 2939 return -ENOTSUPP; 2940 } 2941 2942 if (!bpf_jit_supports_kfunc_call()) { 2943 verbose(env, "JIT does not support calling kernel function\n"); 2944 return -ENOTSUPP; 2945 } 2946 2947 if (!env->prog->gpl_compatible) { 2948 verbose(env, "cannot call kernel function from non-GPL compatible program\n"); 2949 return -EINVAL; 2950 } 2951 2952 tab = kzalloc(sizeof(*tab), GFP_KERNEL); 2953 if (!tab) 2954 return -ENOMEM; 2955 prog_aux->kfunc_tab = tab; 2956 } 2957 2958 /* func_id == 0 is always invalid, but instead of returning an error, be 2959 * conservative and wait until the code elimination pass before returning 2960 * error, so that invalid calls that get pruned out can be in BPF programs 2961 * loaded from userspace. It is also required that offset be untouched 2962 * for such calls. 2963 */ 2964 if (!func_id && !offset) 2965 return 0; 2966 2967 if (!btf_tab && offset) { 2968 btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL); 2969 if (!btf_tab) 2970 return -ENOMEM; 2971 prog_aux->kfunc_btf_tab = btf_tab; 2972 } 2973 2974 desc_btf = find_kfunc_desc_btf(env, offset); 2975 if (IS_ERR(desc_btf)) { 2976 verbose(env, "failed to find BTF for kernel function\n"); 2977 return PTR_ERR(desc_btf); 2978 } 2979 2980 if (find_kfunc_desc(env->prog, func_id, offset)) 2981 return 0; 2982 2983 if (tab->nr_descs == MAX_KFUNC_DESCS) { 2984 verbose(env, "too many different kernel function calls\n"); 2985 return -E2BIG; 2986 } 2987 2988 func = btf_type_by_id(desc_btf, func_id); 2989 if (!func || !btf_type_is_func(func)) { 2990 verbose(env, "kernel btf_id %u is not a function\n", 2991 func_id); 2992 return -EINVAL; 2993 } 2994 func_proto = btf_type_by_id(desc_btf, func->type); 2995 if (!func_proto || !btf_type_is_func_proto(func_proto)) { 2996 verbose(env, "kernel function btf_id %u does not have a valid func_proto\n", 2997 func_id); 2998 return -EINVAL; 2999 } 3000 3001 func_name = btf_name_by_offset(desc_btf, func->name_off); 3002 addr = kallsyms_lookup_name(func_name); 3003 if (!addr) { 3004 verbose(env, "cannot find address for kernel function %s\n", 3005 func_name); 3006 return -EINVAL; 3007 } 3008 specialize_kfunc(env, func_id, offset, &addr); 3009 3010 if (bpf_jit_supports_far_kfunc_call()) { 3011 call_imm = func_id; 3012 } else { 3013 call_imm = BPF_CALL_IMM(addr); 3014 /* Check whether the relative offset overflows desc->imm */ 3015 if ((unsigned long)(s32)call_imm != call_imm) { 3016 verbose(env, "address of kernel function %s is out of range\n", 3017 func_name); 3018 return -EINVAL; 3019 } 3020 } 3021 3022 if (bpf_dev_bound_kfunc_id(func_id)) { 3023 err = bpf_dev_bound_kfunc_check(&env->log, prog_aux); 3024 if (err) 3025 return err; 3026 } 3027 3028 desc = &tab->descs[tab->nr_descs++]; 3029 desc->func_id = func_id; 3030 desc->imm = call_imm; 3031 desc->offset = offset; 3032 desc->addr = addr; 3033 err = btf_distill_func_proto(&env->log, desc_btf, 3034 func_proto, func_name, 3035 &desc->func_model); 3036 if (!err) 3037 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 3038 kfunc_desc_cmp_by_id_off, NULL); 3039 return err; 3040 } 3041 3042 static int kfunc_desc_cmp_by_imm_off(const void *a, const void *b) 3043 { 3044 const struct bpf_kfunc_desc *d0 = a; 3045 const struct bpf_kfunc_desc *d1 = b; 3046 3047 if (d0->imm != d1->imm) 3048 return d0->imm < d1->imm ? -1 : 1; 3049 if (d0->offset != d1->offset) 3050 return d0->offset < d1->offset ? -1 : 1; 3051 return 0; 3052 } 3053 3054 static void sort_kfunc_descs_by_imm_off(struct bpf_prog *prog) 3055 { 3056 struct bpf_kfunc_desc_tab *tab; 3057 3058 tab = prog->aux->kfunc_tab; 3059 if (!tab) 3060 return; 3061 3062 sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]), 3063 kfunc_desc_cmp_by_imm_off, NULL); 3064 } 3065 3066 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) 3067 { 3068 return !!prog->aux->kfunc_tab; 3069 } 3070 3071 const struct btf_func_model * 3072 bpf_jit_find_kfunc_model(const struct bpf_prog *prog, 3073 const struct bpf_insn *insn) 3074 { 3075 const struct bpf_kfunc_desc desc = { 3076 .imm = insn->imm, 3077 .offset = insn->off, 3078 }; 3079 const struct bpf_kfunc_desc *res; 3080 struct bpf_kfunc_desc_tab *tab; 3081 3082 tab = prog->aux->kfunc_tab; 3083 res = bsearch(&desc, tab->descs, tab->nr_descs, 3084 sizeof(tab->descs[0]), kfunc_desc_cmp_by_imm_off); 3085 3086 return res ? &res->func_model : NULL; 3087 } 3088 3089 static int add_subprog_and_kfunc(struct bpf_verifier_env *env) 3090 { 3091 struct bpf_subprog_info *subprog = env->subprog_info; 3092 int i, ret, insn_cnt = env->prog->len, ex_cb_insn; 3093 struct bpf_insn *insn = env->prog->insnsi; 3094 3095 /* Add entry function. */ 3096 ret = add_subprog(env, 0); 3097 if (ret) 3098 return ret; 3099 3100 for (i = 0; i < insn_cnt; i++, insn++) { 3101 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn) && 3102 !bpf_pseudo_kfunc_call(insn)) 3103 continue; 3104 3105 if (!env->bpf_capable) { 3106 verbose(env, "loading/calling other bpf or kernel functions are allowed for CAP_BPF and CAP_SYS_ADMIN\n"); 3107 return -EPERM; 3108 } 3109 3110 if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) 3111 ret = add_subprog(env, i + insn->imm + 1); 3112 else 3113 ret = add_kfunc_call(env, insn->imm, insn->off); 3114 3115 if (ret < 0) 3116 return ret; 3117 } 3118 3119 ret = bpf_find_exception_callback_insn_off(env); 3120 if (ret < 0) 3121 return ret; 3122 ex_cb_insn = ret; 3123 3124 /* If ex_cb_insn > 0, this means that the main program has a subprog 3125 * marked using BTF decl tag to serve as the exception callback. 3126 */ 3127 if (ex_cb_insn) { 3128 ret = add_subprog(env, ex_cb_insn); 3129 if (ret < 0) 3130 return ret; 3131 for (i = 1; i < env->subprog_cnt; i++) { 3132 if (env->subprog_info[i].start != ex_cb_insn) 3133 continue; 3134 env->exception_callback_subprog = i; 3135 break; 3136 } 3137 } 3138 3139 /* Add a fake 'exit' subprog which could simplify subprog iteration 3140 * logic. 'subprog_cnt' should not be increased. 3141 */ 3142 subprog[env->subprog_cnt].start = insn_cnt; 3143 3144 if (env->log.level & BPF_LOG_LEVEL2) 3145 for (i = 0; i < env->subprog_cnt; i++) 3146 verbose(env, "func#%d @%d\n", i, subprog[i].start); 3147 3148 return 0; 3149 } 3150 3151 static int check_subprogs(struct bpf_verifier_env *env) 3152 { 3153 int i, subprog_start, subprog_end, off, cur_subprog = 0; 3154 struct bpf_subprog_info *subprog = env->subprog_info; 3155 struct bpf_insn *insn = env->prog->insnsi; 3156 int insn_cnt = env->prog->len; 3157 3158 /* now check that all jumps are within the same subprog */ 3159 subprog_start = subprog[cur_subprog].start; 3160 subprog_end = subprog[cur_subprog + 1].start; 3161 for (i = 0; i < insn_cnt; i++) { 3162 u8 code = insn[i].code; 3163 3164 if (code == (BPF_JMP | BPF_CALL) && 3165 insn[i].src_reg == 0 && 3166 insn[i].imm == BPF_FUNC_tail_call) 3167 subprog[cur_subprog].has_tail_call = true; 3168 if (BPF_CLASS(code) == BPF_LD && 3169 (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) 3170 subprog[cur_subprog].has_ld_abs = true; 3171 if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) 3172 goto next; 3173 if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) 3174 goto next; 3175 if (code == (BPF_JMP32 | BPF_JA)) 3176 off = i + insn[i].imm + 1; 3177 else 3178 off = i + insn[i].off + 1; 3179 if (off < subprog_start || off >= subprog_end) { 3180 verbose(env, "jump out of range from insn %d to %d\n", i, off); 3181 return -EINVAL; 3182 } 3183 next: 3184 if (i == subprog_end - 1) { 3185 /* to avoid fall-through from one subprog into another 3186 * the last insn of the subprog should be either exit 3187 * or unconditional jump back or bpf_throw call 3188 */ 3189 if (code != (BPF_JMP | BPF_EXIT) && 3190 code != (BPF_JMP32 | BPF_JA) && 3191 code != (BPF_JMP | BPF_JA)) { 3192 verbose(env, "last insn is not an exit or jmp\n"); 3193 return -EINVAL; 3194 } 3195 subprog_start = subprog_end; 3196 cur_subprog++; 3197 if (cur_subprog < env->subprog_cnt) 3198 subprog_end = subprog[cur_subprog + 1].start; 3199 } 3200 } 3201 return 0; 3202 } 3203 3204 /* Parentage chain of this register (or stack slot) should take care of all 3205 * issues like callee-saved registers, stack slot allocation time, etc. 3206 */ 3207 static int mark_reg_read(struct bpf_verifier_env *env, 3208 const struct bpf_reg_state *state, 3209 struct bpf_reg_state *parent, u8 flag) 3210 { 3211 bool writes = parent == state->parent; /* Observe write marks */ 3212 int cnt = 0; 3213 3214 while (parent) { 3215 /* if read wasn't screened by an earlier write ... */ 3216 if (writes && state->live & REG_LIVE_WRITTEN) 3217 break; 3218 if (parent->live & REG_LIVE_DONE) { 3219 verbose(env, "verifier BUG type %s var_off %lld off %d\n", 3220 reg_type_str(env, parent->type), 3221 parent->var_off.value, parent->off); 3222 return -EFAULT; 3223 } 3224 /* The first condition is more likely to be true than the 3225 * second, checked it first. 3226 */ 3227 if ((parent->live & REG_LIVE_READ) == flag || 3228 parent->live & REG_LIVE_READ64) 3229 /* The parentage chain never changes and 3230 * this parent was already marked as LIVE_READ. 3231 * There is no need to keep walking the chain again and 3232 * keep re-marking all parents as LIVE_READ. 3233 * This case happens when the same register is read 3234 * multiple times without writes into it in-between. 3235 * Also, if parent has the stronger REG_LIVE_READ64 set, 3236 * then no need to set the weak REG_LIVE_READ32. 3237 */ 3238 break; 3239 /* ... then we depend on parent's value */ 3240 parent->live |= flag; 3241 /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ 3242 if (flag == REG_LIVE_READ64) 3243 parent->live &= ~REG_LIVE_READ32; 3244 state = parent; 3245 parent = state->parent; 3246 writes = true; 3247 cnt++; 3248 } 3249 3250 if (env->longest_mark_read_walk < cnt) 3251 env->longest_mark_read_walk = cnt; 3252 return 0; 3253 } 3254 3255 static int mark_dynptr_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 3256 { 3257 struct bpf_func_state *state = func(env, reg); 3258 int spi, ret; 3259 3260 /* For CONST_PTR_TO_DYNPTR, it must have already been done by 3261 * check_reg_arg in check_helper_call and mark_btf_func_reg_size in 3262 * check_kfunc_call. 3263 */ 3264 if (reg->type == CONST_PTR_TO_DYNPTR) 3265 return 0; 3266 spi = dynptr_get_spi(env, reg); 3267 if (spi < 0) 3268 return spi; 3269 /* Caller ensures dynptr is valid and initialized, which means spi is in 3270 * bounds and spi is the first dynptr slot. Simply mark stack slot as 3271 * read. 3272 */ 3273 ret = mark_reg_read(env, &state->stack[spi].spilled_ptr, 3274 state->stack[spi].spilled_ptr.parent, REG_LIVE_READ64); 3275 if (ret) 3276 return ret; 3277 return mark_reg_read(env, &state->stack[spi - 1].spilled_ptr, 3278 state->stack[spi - 1].spilled_ptr.parent, REG_LIVE_READ64); 3279 } 3280 3281 static int mark_iter_read(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 3282 int spi, int nr_slots) 3283 { 3284 struct bpf_func_state *state = func(env, reg); 3285 int err, i; 3286 3287 for (i = 0; i < nr_slots; i++) { 3288 struct bpf_reg_state *st = &state->stack[spi - i].spilled_ptr; 3289 3290 err = mark_reg_read(env, st, st->parent, REG_LIVE_READ64); 3291 if (err) 3292 return err; 3293 3294 mark_stack_slot_scratched(env, spi - i); 3295 } 3296 3297 return 0; 3298 } 3299 3300 /* This function is supposed to be used by the following 32-bit optimization 3301 * code only. It returns TRUE if the source or destination register operates 3302 * on 64-bit, otherwise return FALSE. 3303 */ 3304 static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, 3305 u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) 3306 { 3307 u8 code, class, op; 3308 3309 code = insn->code; 3310 class = BPF_CLASS(code); 3311 op = BPF_OP(code); 3312 if (class == BPF_JMP) { 3313 /* BPF_EXIT for "main" will reach here. Return TRUE 3314 * conservatively. 3315 */ 3316 if (op == BPF_EXIT) 3317 return true; 3318 if (op == BPF_CALL) { 3319 /* BPF to BPF call will reach here because of marking 3320 * caller saved clobber with DST_OP_NO_MARK for which we 3321 * don't care the register def because they are anyway 3322 * marked as NOT_INIT already. 3323 */ 3324 if (insn->src_reg == BPF_PSEUDO_CALL) 3325 return false; 3326 /* Helper call will reach here because of arg type 3327 * check, conservatively return TRUE. 3328 */ 3329 if (t == SRC_OP) 3330 return true; 3331 3332 return false; 3333 } 3334 } 3335 3336 if (class == BPF_ALU64 && op == BPF_END && (insn->imm == 16 || insn->imm == 32)) 3337 return false; 3338 3339 if (class == BPF_ALU64 || class == BPF_JMP || 3340 (class == BPF_ALU && op == BPF_END && insn->imm == 64)) 3341 return true; 3342 3343 if (class == BPF_ALU || class == BPF_JMP32) 3344 return false; 3345 3346 if (class == BPF_LDX) { 3347 if (t != SRC_OP) 3348 return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX; 3349 /* LDX source must be ptr. */ 3350 return true; 3351 } 3352 3353 if (class == BPF_STX) { 3354 /* BPF_STX (including atomic variants) has multiple source 3355 * operands, one of which is a ptr. Check whether the caller is 3356 * asking about it. 3357 */ 3358 if (t == SRC_OP && reg->type != SCALAR_VALUE) 3359 return true; 3360 return BPF_SIZE(code) == BPF_DW; 3361 } 3362 3363 if (class == BPF_LD) { 3364 u8 mode = BPF_MODE(code); 3365 3366 /* LD_IMM64 */ 3367 if (mode == BPF_IMM) 3368 return true; 3369 3370 /* Both LD_IND and LD_ABS return 32-bit data. */ 3371 if (t != SRC_OP) 3372 return false; 3373 3374 /* Implicit ctx ptr. */ 3375 if (regno == BPF_REG_6) 3376 return true; 3377 3378 /* Explicit source could be any width. */ 3379 return true; 3380 } 3381 3382 if (class == BPF_ST) 3383 /* The only source register for BPF_ST is a ptr. */ 3384 return true; 3385 3386 /* Conservatively return true at default. */ 3387 return true; 3388 } 3389 3390 /* Return the regno defined by the insn, or -1. */ 3391 static int insn_def_regno(const struct bpf_insn *insn) 3392 { 3393 switch (BPF_CLASS(insn->code)) { 3394 case BPF_JMP: 3395 case BPF_JMP32: 3396 case BPF_ST: 3397 return -1; 3398 case BPF_STX: 3399 if (BPF_MODE(insn->code) == BPF_ATOMIC && 3400 (insn->imm & BPF_FETCH)) { 3401 if (insn->imm == BPF_CMPXCHG) 3402 return BPF_REG_0; 3403 else 3404 return insn->src_reg; 3405 } else { 3406 return -1; 3407 } 3408 default: 3409 return insn->dst_reg; 3410 } 3411 } 3412 3413 /* Return TRUE if INSN has defined any 32-bit value explicitly. */ 3414 static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) 3415 { 3416 int dst_reg = insn_def_regno(insn); 3417 3418 if (dst_reg == -1) 3419 return false; 3420 3421 return !is_reg64(env, insn, dst_reg, NULL, DST_OP); 3422 } 3423 3424 static void mark_insn_zext(struct bpf_verifier_env *env, 3425 struct bpf_reg_state *reg) 3426 { 3427 s32 def_idx = reg->subreg_def; 3428 3429 if (def_idx == DEF_NOT_SUBREG) 3430 return; 3431 3432 env->insn_aux_data[def_idx - 1].zext_dst = true; 3433 /* The dst will be zero extended, so won't be sub-register anymore. */ 3434 reg->subreg_def = DEF_NOT_SUBREG; 3435 } 3436 3437 static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, 3438 enum reg_arg_type t) 3439 { 3440 struct bpf_verifier_state *vstate = env->cur_state; 3441 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 3442 struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; 3443 struct bpf_reg_state *reg, *regs = state->regs; 3444 bool rw64; 3445 3446 if (regno >= MAX_BPF_REG) { 3447 verbose(env, "R%d is invalid\n", regno); 3448 return -EINVAL; 3449 } 3450 3451 mark_reg_scratched(env, regno); 3452 3453 reg = ®s[regno]; 3454 rw64 = is_reg64(env, insn, regno, reg, t); 3455 if (t == SRC_OP) { 3456 /* check whether register used as source operand can be read */ 3457 if (reg->type == NOT_INIT) { 3458 verbose(env, "R%d !read_ok\n", regno); 3459 return -EACCES; 3460 } 3461 /* We don't need to worry about FP liveness because it's read-only */ 3462 if (regno == BPF_REG_FP) 3463 return 0; 3464 3465 if (rw64) 3466 mark_insn_zext(env, reg); 3467 3468 return mark_reg_read(env, reg, reg->parent, 3469 rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); 3470 } else { 3471 /* check whether register used as dest operand can be written to */ 3472 if (regno == BPF_REG_FP) { 3473 verbose(env, "frame pointer is read only\n"); 3474 return -EACCES; 3475 } 3476 reg->live |= REG_LIVE_WRITTEN; 3477 reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; 3478 if (t == DST_OP) 3479 mark_reg_unknown(env, regs, regno); 3480 } 3481 return 0; 3482 } 3483 3484 static void mark_jmp_point(struct bpf_verifier_env *env, int idx) 3485 { 3486 env->insn_aux_data[idx].jmp_point = true; 3487 } 3488 3489 static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx) 3490 { 3491 return env->insn_aux_data[insn_idx].jmp_point; 3492 } 3493 3494 /* for any branch, call, exit record the history of jmps in the given state */ 3495 static int push_jmp_history(struct bpf_verifier_env *env, 3496 struct bpf_verifier_state *cur) 3497 { 3498 u32 cnt = cur->jmp_history_cnt; 3499 struct bpf_idx_pair *p; 3500 size_t alloc_size; 3501 3502 if (!is_jmp_point(env, env->insn_idx)) 3503 return 0; 3504 3505 cnt++; 3506 alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p))); 3507 p = krealloc(cur->jmp_history, alloc_size, GFP_USER); 3508 if (!p) 3509 return -ENOMEM; 3510 p[cnt - 1].idx = env->insn_idx; 3511 p[cnt - 1].prev_idx = env->prev_insn_idx; 3512 cur->jmp_history = p; 3513 cur->jmp_history_cnt = cnt; 3514 return 0; 3515 } 3516 3517 /* Backtrack one insn at a time. If idx is not at the top of recorded 3518 * history then previous instruction came from straight line execution. 3519 */ 3520 static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, 3521 u32 *history) 3522 { 3523 u32 cnt = *history; 3524 3525 if (cnt && st->jmp_history[cnt - 1].idx == i) { 3526 i = st->jmp_history[cnt - 1].prev_idx; 3527 (*history)--; 3528 } else { 3529 i--; 3530 } 3531 return i; 3532 } 3533 3534 static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn) 3535 { 3536 const struct btf_type *func; 3537 struct btf *desc_btf; 3538 3539 if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL) 3540 return NULL; 3541 3542 desc_btf = find_kfunc_desc_btf(data, insn->off); 3543 if (IS_ERR(desc_btf)) 3544 return "<error>"; 3545 3546 func = btf_type_by_id(desc_btf, insn->imm); 3547 return btf_name_by_offset(desc_btf, func->name_off); 3548 } 3549 3550 static inline void bt_init(struct backtrack_state *bt, u32 frame) 3551 { 3552 bt->frame = frame; 3553 } 3554 3555 static inline void bt_reset(struct backtrack_state *bt) 3556 { 3557 struct bpf_verifier_env *env = bt->env; 3558 3559 memset(bt, 0, sizeof(*bt)); 3560 bt->env = env; 3561 } 3562 3563 static inline u32 bt_empty(struct backtrack_state *bt) 3564 { 3565 u64 mask = 0; 3566 int i; 3567 3568 for (i = 0; i <= bt->frame; i++) 3569 mask |= bt->reg_masks[i] | bt->stack_masks[i]; 3570 3571 return mask == 0; 3572 } 3573 3574 static inline int bt_subprog_enter(struct backtrack_state *bt) 3575 { 3576 if (bt->frame == MAX_CALL_FRAMES - 1) { 3577 verbose(bt->env, "BUG subprog enter from frame %d\n", bt->frame); 3578 WARN_ONCE(1, "verifier backtracking bug"); 3579 return -EFAULT; 3580 } 3581 bt->frame++; 3582 return 0; 3583 } 3584 3585 static inline int bt_subprog_exit(struct backtrack_state *bt) 3586 { 3587 if (bt->frame == 0) { 3588 verbose(bt->env, "BUG subprog exit from frame 0\n"); 3589 WARN_ONCE(1, "verifier backtracking bug"); 3590 return -EFAULT; 3591 } 3592 bt->frame--; 3593 return 0; 3594 } 3595 3596 static inline void bt_set_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3597 { 3598 bt->reg_masks[frame] |= 1 << reg; 3599 } 3600 3601 static inline void bt_clear_frame_reg(struct backtrack_state *bt, u32 frame, u32 reg) 3602 { 3603 bt->reg_masks[frame] &= ~(1 << reg); 3604 } 3605 3606 static inline void bt_set_reg(struct backtrack_state *bt, u32 reg) 3607 { 3608 bt_set_frame_reg(bt, bt->frame, reg); 3609 } 3610 3611 static inline void bt_clear_reg(struct backtrack_state *bt, u32 reg) 3612 { 3613 bt_clear_frame_reg(bt, bt->frame, reg); 3614 } 3615 3616 static inline void bt_set_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3617 { 3618 bt->stack_masks[frame] |= 1ull << slot; 3619 } 3620 3621 static inline void bt_clear_frame_slot(struct backtrack_state *bt, u32 frame, u32 slot) 3622 { 3623 bt->stack_masks[frame] &= ~(1ull << slot); 3624 } 3625 3626 static inline void bt_set_slot(struct backtrack_state *bt, u32 slot) 3627 { 3628 bt_set_frame_slot(bt, bt->frame, slot); 3629 } 3630 3631 static inline void bt_clear_slot(struct backtrack_state *bt, u32 slot) 3632 { 3633 bt_clear_frame_slot(bt, bt->frame, slot); 3634 } 3635 3636 static inline u32 bt_frame_reg_mask(struct backtrack_state *bt, u32 frame) 3637 { 3638 return bt->reg_masks[frame]; 3639 } 3640 3641 static inline u32 bt_reg_mask(struct backtrack_state *bt) 3642 { 3643 return bt->reg_masks[bt->frame]; 3644 } 3645 3646 static inline u64 bt_frame_stack_mask(struct backtrack_state *bt, u32 frame) 3647 { 3648 return bt->stack_masks[frame]; 3649 } 3650 3651 static inline u64 bt_stack_mask(struct backtrack_state *bt) 3652 { 3653 return bt->stack_masks[bt->frame]; 3654 } 3655 3656 static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg) 3657 { 3658 return bt->reg_masks[bt->frame] & (1 << reg); 3659 } 3660 3661 static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot) 3662 { 3663 return bt->stack_masks[bt->frame] & (1ull << slot); 3664 } 3665 3666 /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */ 3667 static void fmt_reg_mask(char *buf, ssize_t buf_sz, u32 reg_mask) 3668 { 3669 DECLARE_BITMAP(mask, 64); 3670 bool first = true; 3671 int i, n; 3672 3673 buf[0] = '\0'; 3674 3675 bitmap_from_u64(mask, reg_mask); 3676 for_each_set_bit(i, mask, 32) { 3677 n = snprintf(buf, buf_sz, "%sr%d", first ? "" : ",", i); 3678 first = false; 3679 buf += n; 3680 buf_sz -= n; 3681 if (buf_sz < 0) 3682 break; 3683 } 3684 } 3685 /* format stack slots bitmask, e.g., "-8,-24,-40" for 0x15 mask */ 3686 static void fmt_stack_mask(char *buf, ssize_t buf_sz, u64 stack_mask) 3687 { 3688 DECLARE_BITMAP(mask, 64); 3689 bool first = true; 3690 int i, n; 3691 3692 buf[0] = '\0'; 3693 3694 bitmap_from_u64(mask, stack_mask); 3695 for_each_set_bit(i, mask, 64) { 3696 n = snprintf(buf, buf_sz, "%s%d", first ? "" : ",", -(i + 1) * 8); 3697 first = false; 3698 buf += n; 3699 buf_sz -= n; 3700 if (buf_sz < 0) 3701 break; 3702 } 3703 } 3704 3705 /* For given verifier state backtrack_insn() is called from the last insn to 3706 * the first insn. Its purpose is to compute a bitmask of registers and 3707 * stack slots that needs precision in the parent verifier state. 3708 * 3709 * @idx is an index of the instruction we are currently processing; 3710 * @subseq_idx is an index of the subsequent instruction that: 3711 * - *would be* executed next, if jump history is viewed in forward order; 3712 * - *was* processed previously during backtracking. 3713 */ 3714 static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx, 3715 struct backtrack_state *bt) 3716 { 3717 const struct bpf_insn_cbs cbs = { 3718 .cb_call = disasm_kfunc_name, 3719 .cb_print = verbose, 3720 .private_data = env, 3721 }; 3722 struct bpf_insn *insn = env->prog->insnsi + idx; 3723 u8 class = BPF_CLASS(insn->code); 3724 u8 opcode = BPF_OP(insn->code); 3725 u8 mode = BPF_MODE(insn->code); 3726 u32 dreg = insn->dst_reg; 3727 u32 sreg = insn->src_reg; 3728 u32 spi, i; 3729 3730 if (insn->code == 0) 3731 return 0; 3732 if (env->log.level & BPF_LOG_LEVEL2) { 3733 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_reg_mask(bt)); 3734 verbose(env, "mark_precise: frame%d: regs=%s ", 3735 bt->frame, env->tmp_str_buf); 3736 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, bt_stack_mask(bt)); 3737 verbose(env, "stack=%s before ", env->tmp_str_buf); 3738 verbose(env, "%d: ", idx); 3739 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 3740 } 3741 3742 if (class == BPF_ALU || class == BPF_ALU64) { 3743 if (!bt_is_reg_set(bt, dreg)) 3744 return 0; 3745 if (opcode == BPF_END || opcode == BPF_NEG) { 3746 /* sreg is reserved and unused 3747 * dreg still need precision before this insn 3748 */ 3749 return 0; 3750 } else if (opcode == BPF_MOV) { 3751 if (BPF_SRC(insn->code) == BPF_X) { 3752 /* dreg = sreg or dreg = (s8, s16, s32)sreg 3753 * dreg needs precision after this insn 3754 * sreg needs precision before this insn 3755 */ 3756 bt_clear_reg(bt, dreg); 3757 bt_set_reg(bt, sreg); 3758 } else { 3759 /* dreg = K 3760 * dreg needs precision after this insn. 3761 * Corresponding register is already marked 3762 * as precise=true in this verifier state. 3763 * No further markings in parent are necessary 3764 */ 3765 bt_clear_reg(bt, dreg); 3766 } 3767 } else { 3768 if (BPF_SRC(insn->code) == BPF_X) { 3769 /* dreg += sreg 3770 * both dreg and sreg need precision 3771 * before this insn 3772 */ 3773 bt_set_reg(bt, sreg); 3774 } /* else dreg += K 3775 * dreg still needs precision before this insn 3776 */ 3777 } 3778 } else if (class == BPF_LDX) { 3779 if (!bt_is_reg_set(bt, dreg)) 3780 return 0; 3781 bt_clear_reg(bt, dreg); 3782 3783 /* scalars can only be spilled into stack w/o losing precision. 3784 * Load from any other memory can be zero extended. 3785 * The desire to keep that precision is already indicated 3786 * by 'precise' mark in corresponding register of this state. 3787 * No further tracking necessary. 3788 */ 3789 if (insn->src_reg != BPF_REG_FP) 3790 return 0; 3791 3792 /* dreg = *(u64 *)[fp - off] was a fill from the stack. 3793 * that [fp - off] slot contains scalar that needs to be 3794 * tracked with precision 3795 */ 3796 spi = (-insn->off - 1) / BPF_REG_SIZE; 3797 if (spi >= 64) { 3798 verbose(env, "BUG spi %d\n", spi); 3799 WARN_ONCE(1, "verifier backtracking bug"); 3800 return -EFAULT; 3801 } 3802 bt_set_slot(bt, spi); 3803 } else if (class == BPF_STX || class == BPF_ST) { 3804 if (bt_is_reg_set(bt, dreg)) 3805 /* stx & st shouldn't be using _scalar_ dst_reg 3806 * to access memory. It means backtracking 3807 * encountered a case of pointer subtraction. 3808 */ 3809 return -ENOTSUPP; 3810 /* scalars can only be spilled into stack */ 3811 if (insn->dst_reg != BPF_REG_FP) 3812 return 0; 3813 spi = (-insn->off - 1) / BPF_REG_SIZE; 3814 if (spi >= 64) { 3815 verbose(env, "BUG spi %d\n", spi); 3816 WARN_ONCE(1, "verifier backtracking bug"); 3817 return -EFAULT; 3818 } 3819 if (!bt_is_slot_set(bt, spi)) 3820 return 0; 3821 bt_clear_slot(bt, spi); 3822 if (class == BPF_STX) 3823 bt_set_reg(bt, sreg); 3824 } else if (class == BPF_JMP || class == BPF_JMP32) { 3825 if (bpf_pseudo_call(insn)) { 3826 int subprog_insn_idx, subprog; 3827 3828 subprog_insn_idx = idx + insn->imm + 1; 3829 subprog = find_subprog(env, subprog_insn_idx); 3830 if (subprog < 0) 3831 return -EFAULT; 3832 3833 if (subprog_is_global(env, subprog)) { 3834 /* check that jump history doesn't have any 3835 * extra instructions from subprog; the next 3836 * instruction after call to global subprog 3837 * should be literally next instruction in 3838 * caller program 3839 */ 3840 WARN_ONCE(idx + 1 != subseq_idx, "verifier backtracking bug"); 3841 /* r1-r5 are invalidated after subprog call, 3842 * so for global func call it shouldn't be set 3843 * anymore 3844 */ 3845 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3846 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3847 WARN_ONCE(1, "verifier backtracking bug"); 3848 return -EFAULT; 3849 } 3850 /* global subprog always sets R0 */ 3851 bt_clear_reg(bt, BPF_REG_0); 3852 return 0; 3853 } else { 3854 /* static subprog call instruction, which 3855 * means that we are exiting current subprog, 3856 * so only r1-r5 could be still requested as 3857 * precise, r0 and r6-r10 or any stack slot in 3858 * the current frame should be zero by now 3859 */ 3860 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3861 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3862 WARN_ONCE(1, "verifier backtracking bug"); 3863 return -EFAULT; 3864 } 3865 /* we don't track register spills perfectly, 3866 * so fallback to force-precise instead of failing */ 3867 if (bt_stack_mask(bt) != 0) 3868 return -ENOTSUPP; 3869 /* propagate r1-r5 to the caller */ 3870 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 3871 if (bt_is_reg_set(bt, i)) { 3872 bt_clear_reg(bt, i); 3873 bt_set_frame_reg(bt, bt->frame - 1, i); 3874 } 3875 } 3876 if (bt_subprog_exit(bt)) 3877 return -EFAULT; 3878 return 0; 3879 } 3880 } else if ((bpf_helper_call(insn) && 3881 is_callback_calling_function(insn->imm) && 3882 !is_async_callback_calling_function(insn->imm)) || 3883 (bpf_pseudo_kfunc_call(insn) && is_callback_calling_kfunc(insn->imm))) { 3884 /* callback-calling helper or kfunc call, which means 3885 * we are exiting from subprog, but unlike the subprog 3886 * call handling above, we shouldn't propagate 3887 * precision of r1-r5 (if any requested), as they are 3888 * not actually arguments passed directly to callback 3889 * subprogs 3890 */ 3891 if (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) { 3892 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3893 WARN_ONCE(1, "verifier backtracking bug"); 3894 return -EFAULT; 3895 } 3896 if (bt_stack_mask(bt) != 0) 3897 return -ENOTSUPP; 3898 /* clear r1-r5 in callback subprog's mask */ 3899 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 3900 bt_clear_reg(bt, i); 3901 if (bt_subprog_exit(bt)) 3902 return -EFAULT; 3903 return 0; 3904 } else if (opcode == BPF_CALL) { 3905 /* kfunc with imm==0 is invalid and fixup_kfunc_call will 3906 * catch this error later. Make backtracking conservative 3907 * with ENOTSUPP. 3908 */ 3909 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) 3910 return -ENOTSUPP; 3911 /* regular helper call sets R0 */ 3912 bt_clear_reg(bt, BPF_REG_0); 3913 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3914 /* if backtracing was looking for registers R1-R5 3915 * they should have been found already. 3916 */ 3917 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3918 WARN_ONCE(1, "verifier backtracking bug"); 3919 return -EFAULT; 3920 } 3921 } else if (opcode == BPF_EXIT) { 3922 bool r0_precise; 3923 3924 if (bt_reg_mask(bt) & BPF_REGMASK_ARGS) { 3925 /* if backtracing was looking for registers R1-R5 3926 * they should have been found already. 3927 */ 3928 verbose(env, "BUG regs %x\n", bt_reg_mask(bt)); 3929 WARN_ONCE(1, "verifier backtracking bug"); 3930 return -EFAULT; 3931 } 3932 3933 /* BPF_EXIT in subprog or callback always returns 3934 * right after the call instruction, so by checking 3935 * whether the instruction at subseq_idx-1 is subprog 3936 * call or not we can distinguish actual exit from 3937 * *subprog* from exit from *callback*. In the former 3938 * case, we need to propagate r0 precision, if 3939 * necessary. In the former we never do that. 3940 */ 3941 r0_precise = subseq_idx - 1 >= 0 && 3942 bpf_pseudo_call(&env->prog->insnsi[subseq_idx - 1]) && 3943 bt_is_reg_set(bt, BPF_REG_0); 3944 3945 bt_clear_reg(bt, BPF_REG_0); 3946 if (bt_subprog_enter(bt)) 3947 return -EFAULT; 3948 3949 if (r0_precise) 3950 bt_set_reg(bt, BPF_REG_0); 3951 /* r6-r9 and stack slots will stay set in caller frame 3952 * bitmasks until we return back from callee(s) 3953 */ 3954 return 0; 3955 } else if (BPF_SRC(insn->code) == BPF_X) { 3956 if (!bt_is_reg_set(bt, dreg) && !bt_is_reg_set(bt, sreg)) 3957 return 0; 3958 /* dreg <cond> sreg 3959 * Both dreg and sreg need precision before 3960 * this insn. If only sreg was marked precise 3961 * before it would be equally necessary to 3962 * propagate it to dreg. 3963 */ 3964 bt_set_reg(bt, dreg); 3965 bt_set_reg(bt, sreg); 3966 /* else dreg <cond> K 3967 * Only dreg still needs precision before 3968 * this insn, so for the K-based conditional 3969 * there is nothing new to be marked. 3970 */ 3971 } 3972 } else if (class == BPF_LD) { 3973 if (!bt_is_reg_set(bt, dreg)) 3974 return 0; 3975 bt_clear_reg(bt, dreg); 3976 /* It's ld_imm64 or ld_abs or ld_ind. 3977 * For ld_imm64 no further tracking of precision 3978 * into parent is necessary 3979 */ 3980 if (mode == BPF_IND || mode == BPF_ABS) 3981 /* to be analyzed */ 3982 return -ENOTSUPP; 3983 } 3984 return 0; 3985 } 3986 3987 /* the scalar precision tracking algorithm: 3988 * . at the start all registers have precise=false. 3989 * . scalar ranges are tracked as normal through alu and jmp insns. 3990 * . once precise value of the scalar register is used in: 3991 * . ptr + scalar alu 3992 * . if (scalar cond K|scalar) 3993 * . helper_call(.., scalar, ...) where ARG_CONST is expected 3994 * backtrack through the verifier states and mark all registers and 3995 * stack slots with spilled constants that these scalar regisers 3996 * should be precise. 3997 * . during state pruning two registers (or spilled stack slots) 3998 * are equivalent if both are not precise. 3999 * 4000 * Note the verifier cannot simply walk register parentage chain, 4001 * since many different registers and stack slots could have been 4002 * used to compute single precise scalar. 4003 * 4004 * The approach of starting with precise=true for all registers and then 4005 * backtrack to mark a register as not precise when the verifier detects 4006 * that program doesn't care about specific value (e.g., when helper 4007 * takes register as ARG_ANYTHING parameter) is not safe. 4008 * 4009 * It's ok to walk single parentage chain of the verifier states. 4010 * It's possible that this backtracking will go all the way till 1st insn. 4011 * All other branches will be explored for needing precision later. 4012 * 4013 * The backtracking needs to deal with cases like: 4014 * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) 4015 * r9 -= r8 4016 * r5 = r9 4017 * if r5 > 0x79f goto pc+7 4018 * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) 4019 * r5 += 1 4020 * ... 4021 * call bpf_perf_event_output#25 4022 * where .arg5_type = ARG_CONST_SIZE_OR_ZERO 4023 * 4024 * and this case: 4025 * r6 = 1 4026 * call foo // uses callee's r6 inside to compute r0 4027 * r0 += r6 4028 * if r0 == 0 goto 4029 * 4030 * to track above reg_mask/stack_mask needs to be independent for each frame. 4031 * 4032 * Also if parent's curframe > frame where backtracking started, 4033 * the verifier need to mark registers in both frames, otherwise callees 4034 * may incorrectly prune callers. This is similar to 4035 * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") 4036 * 4037 * For now backtracking falls back into conservative marking. 4038 */ 4039 static void mark_all_scalars_precise(struct bpf_verifier_env *env, 4040 struct bpf_verifier_state *st) 4041 { 4042 struct bpf_func_state *func; 4043 struct bpf_reg_state *reg; 4044 int i, j; 4045 4046 if (env->log.level & BPF_LOG_LEVEL2) { 4047 verbose(env, "mark_precise: frame%d: falling back to forcing all scalars precise\n", 4048 st->curframe); 4049 } 4050 4051 /* big hammer: mark all scalars precise in this path. 4052 * pop_stack may still get !precise scalars. 4053 * We also skip current state and go straight to first parent state, 4054 * because precision markings in current non-checkpointed state are 4055 * not needed. See why in the comment in __mark_chain_precision below. 4056 */ 4057 for (st = st->parent; st; st = st->parent) { 4058 for (i = 0; i <= st->curframe; i++) { 4059 func = st->frame[i]; 4060 for (j = 0; j < BPF_REG_FP; j++) { 4061 reg = &func->regs[j]; 4062 if (reg->type != SCALAR_VALUE || reg->precise) 4063 continue; 4064 reg->precise = true; 4065 if (env->log.level & BPF_LOG_LEVEL2) { 4066 verbose(env, "force_precise: frame%d: forcing r%d to be precise\n", 4067 i, j); 4068 } 4069 } 4070 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 4071 if (!is_spilled_reg(&func->stack[j])) 4072 continue; 4073 reg = &func->stack[j].spilled_ptr; 4074 if (reg->type != SCALAR_VALUE || reg->precise) 4075 continue; 4076 reg->precise = true; 4077 if (env->log.level & BPF_LOG_LEVEL2) { 4078 verbose(env, "force_precise: frame%d: forcing fp%d to be precise\n", 4079 i, -(j + 1) * 8); 4080 } 4081 } 4082 } 4083 } 4084 } 4085 4086 static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 4087 { 4088 struct bpf_func_state *func; 4089 struct bpf_reg_state *reg; 4090 int i, j; 4091 4092 for (i = 0; i <= st->curframe; i++) { 4093 func = st->frame[i]; 4094 for (j = 0; j < BPF_REG_FP; j++) { 4095 reg = &func->regs[j]; 4096 if (reg->type != SCALAR_VALUE) 4097 continue; 4098 reg->precise = false; 4099 } 4100 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { 4101 if (!is_spilled_reg(&func->stack[j])) 4102 continue; 4103 reg = &func->stack[j].spilled_ptr; 4104 if (reg->type != SCALAR_VALUE) 4105 continue; 4106 reg->precise = false; 4107 } 4108 } 4109 } 4110 4111 static bool idset_contains(struct bpf_idset *s, u32 id) 4112 { 4113 u32 i; 4114 4115 for (i = 0; i < s->count; ++i) 4116 if (s->ids[i] == id) 4117 return true; 4118 4119 return false; 4120 } 4121 4122 static int idset_push(struct bpf_idset *s, u32 id) 4123 { 4124 if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) 4125 return -EFAULT; 4126 s->ids[s->count++] = id; 4127 return 0; 4128 } 4129 4130 static void idset_reset(struct bpf_idset *s) 4131 { 4132 s->count = 0; 4133 } 4134 4135 /* Collect a set of IDs for all registers currently marked as precise in env->bt. 4136 * Mark all registers with these IDs as precise. 4137 */ 4138 static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) 4139 { 4140 struct bpf_idset *precise_ids = &env->idset_scratch; 4141 struct backtrack_state *bt = &env->bt; 4142 struct bpf_func_state *func; 4143 struct bpf_reg_state *reg; 4144 DECLARE_BITMAP(mask, 64); 4145 int i, fr; 4146 4147 idset_reset(precise_ids); 4148 4149 for (fr = bt->frame; fr >= 0; fr--) { 4150 func = st->frame[fr]; 4151 4152 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 4153 for_each_set_bit(i, mask, 32) { 4154 reg = &func->regs[i]; 4155 if (!reg->id || reg->type != SCALAR_VALUE) 4156 continue; 4157 if (idset_push(precise_ids, reg->id)) 4158 return -EFAULT; 4159 } 4160 4161 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 4162 for_each_set_bit(i, mask, 64) { 4163 if (i >= func->allocated_stack / BPF_REG_SIZE) 4164 break; 4165 if (!is_spilled_scalar_reg(&func->stack[i])) 4166 continue; 4167 reg = &func->stack[i].spilled_ptr; 4168 if (!reg->id) 4169 continue; 4170 if (idset_push(precise_ids, reg->id)) 4171 return -EFAULT; 4172 } 4173 } 4174 4175 for (fr = 0; fr <= st->curframe; ++fr) { 4176 func = st->frame[fr]; 4177 4178 for (i = BPF_REG_0; i < BPF_REG_10; ++i) { 4179 reg = &func->regs[i]; 4180 if (!reg->id) 4181 continue; 4182 if (!idset_contains(precise_ids, reg->id)) 4183 continue; 4184 bt_set_frame_reg(bt, fr, i); 4185 } 4186 for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { 4187 if (!is_spilled_scalar_reg(&func->stack[i])) 4188 continue; 4189 reg = &func->stack[i].spilled_ptr; 4190 if (!reg->id) 4191 continue; 4192 if (!idset_contains(precise_ids, reg->id)) 4193 continue; 4194 bt_set_frame_slot(bt, fr, i); 4195 } 4196 } 4197 4198 return 0; 4199 } 4200 4201 /* 4202 * __mark_chain_precision() backtracks BPF program instruction sequence and 4203 * chain of verifier states making sure that register *regno* (if regno >= 0) 4204 * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked 4205 * SCALARS, as well as any other registers and slots that contribute to 4206 * a tracked state of given registers/stack slots, depending on specific BPF 4207 * assembly instructions (see backtrack_insns() for exact instruction handling 4208 * logic). This backtracking relies on recorded jmp_history and is able to 4209 * traverse entire chain of parent states. This process ends only when all the 4210 * necessary registers/slots and their transitive dependencies are marked as 4211 * precise. 4212 * 4213 * One important and subtle aspect is that precise marks *do not matter* in 4214 * the currently verified state (current state). It is important to understand 4215 * why this is the case. 4216 * 4217 * First, note that current state is the state that is not yet "checkpointed", 4218 * i.e., it is not yet put into env->explored_states, and it has no children 4219 * states as well. It's ephemeral, and can end up either a) being discarded if 4220 * compatible explored state is found at some point or BPF_EXIT instruction is 4221 * reached or b) checkpointed and put into env->explored_states, branching out 4222 * into one or more children states. 4223 * 4224 * In the former case, precise markings in current state are completely 4225 * ignored by state comparison code (see regsafe() for details). Only 4226 * checkpointed ("old") state precise markings are important, and if old 4227 * state's register/slot is precise, regsafe() assumes current state's 4228 * register/slot as precise and checks value ranges exactly and precisely. If 4229 * states turn out to be compatible, current state's necessary precise 4230 * markings and any required parent states' precise markings are enforced 4231 * after the fact with propagate_precision() logic, after the fact. But it's 4232 * important to realize that in this case, even after marking current state 4233 * registers/slots as precise, we immediately discard current state. So what 4234 * actually matters is any of the precise markings propagated into current 4235 * state's parent states, which are always checkpointed (due to b) case above). 4236 * As such, for scenario a) it doesn't matter if current state has precise 4237 * markings set or not. 4238 * 4239 * Now, for the scenario b), checkpointing and forking into child(ren) 4240 * state(s). Note that before current state gets to checkpointing step, any 4241 * processed instruction always assumes precise SCALAR register/slot 4242 * knowledge: if precise value or range is useful to prune jump branch, BPF 4243 * verifier takes this opportunity enthusiastically. Similarly, when 4244 * register's value is used to calculate offset or memory address, exact 4245 * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to 4246 * what we mentioned above about state comparison ignoring precise markings 4247 * during state comparison, BPF verifier ignores and also assumes precise 4248 * markings *at will* during instruction verification process. But as verifier 4249 * assumes precision, it also propagates any precision dependencies across 4250 * parent states, which are not yet finalized, so can be further restricted 4251 * based on new knowledge gained from restrictions enforced by their children 4252 * states. This is so that once those parent states are finalized, i.e., when 4253 * they have no more active children state, state comparison logic in 4254 * is_state_visited() would enforce strict and precise SCALAR ranges, if 4255 * required for correctness. 4256 * 4257 * To build a bit more intuition, note also that once a state is checkpointed, 4258 * the path we took to get to that state is not important. This is crucial 4259 * property for state pruning. When state is checkpointed and finalized at 4260 * some instruction index, it can be correctly and safely used to "short 4261 * circuit" any *compatible* state that reaches exactly the same instruction 4262 * index. I.e., if we jumped to that instruction from a completely different 4263 * code path than original finalized state was derived from, it doesn't 4264 * matter, current state can be discarded because from that instruction 4265 * forward having a compatible state will ensure we will safely reach the 4266 * exit. States describe preconditions for further exploration, but completely 4267 * forget the history of how we got here. 4268 * 4269 * This also means that even if we needed precise SCALAR range to get to 4270 * finalized state, but from that point forward *that same* SCALAR register is 4271 * never used in a precise context (i.e., it's precise value is not needed for 4272 * correctness), it's correct and safe to mark such register as "imprecise" 4273 * (i.e., precise marking set to false). This is what we rely on when we do 4274 * not set precise marking in current state. If no child state requires 4275 * precision for any given SCALAR register, it's safe to dictate that it can 4276 * be imprecise. If any child state does require this register to be precise, 4277 * we'll mark it precise later retroactively during precise markings 4278 * propagation from child state to parent states. 4279 * 4280 * Skipping precise marking setting in current state is a mild version of 4281 * relying on the above observation. But we can utilize this property even 4282 * more aggressively by proactively forgetting any precise marking in the 4283 * current state (which we inherited from the parent state), right before we 4284 * checkpoint it and branch off into new child state. This is done by 4285 * mark_all_scalars_imprecise() to hopefully get more permissive and generic 4286 * finalized states which help in short circuiting more future states. 4287 */ 4288 static int __mark_chain_precision(struct bpf_verifier_env *env, int regno) 4289 { 4290 struct backtrack_state *bt = &env->bt; 4291 struct bpf_verifier_state *st = env->cur_state; 4292 int first_idx = st->first_insn_idx; 4293 int last_idx = env->insn_idx; 4294 int subseq_idx = -1; 4295 struct bpf_func_state *func; 4296 struct bpf_reg_state *reg; 4297 bool skip_first = true; 4298 int i, fr, err; 4299 4300 if (!env->bpf_capable) 4301 return 0; 4302 4303 /* set frame number from which we are starting to backtrack */ 4304 bt_init(bt, env->cur_state->curframe); 4305 4306 /* Do sanity checks against current state of register and/or stack 4307 * slot, but don't set precise flag in current state, as precision 4308 * tracking in the current state is unnecessary. 4309 */ 4310 func = st->frame[bt->frame]; 4311 if (regno >= 0) { 4312 reg = &func->regs[regno]; 4313 if (reg->type != SCALAR_VALUE) { 4314 WARN_ONCE(1, "backtracing misuse"); 4315 return -EFAULT; 4316 } 4317 bt_set_reg(bt, regno); 4318 } 4319 4320 if (bt_empty(bt)) 4321 return 0; 4322 4323 for (;;) { 4324 DECLARE_BITMAP(mask, 64); 4325 u32 history = st->jmp_history_cnt; 4326 4327 if (env->log.level & BPF_LOG_LEVEL2) { 4328 verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n", 4329 bt->frame, last_idx, first_idx, subseq_idx); 4330 } 4331 4332 /* If some register with scalar ID is marked as precise, 4333 * make sure that all registers sharing this ID are also precise. 4334 * This is needed to estimate effect of find_equal_scalars(). 4335 * Do this at the last instruction of each state, 4336 * bpf_reg_state::id fields are valid for these instructions. 4337 * 4338 * Allows to track precision in situation like below: 4339 * 4340 * r2 = unknown value 4341 * ... 4342 * --- state #0 --- 4343 * ... 4344 * r1 = r2 // r1 and r2 now share the same ID 4345 * ... 4346 * --- state #1 {r1.id = A, r2.id = A} --- 4347 * ... 4348 * if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1 4349 * ... 4350 * --- state #2 {r1.id = A, r2.id = A} --- 4351 * r3 = r10 4352 * r3 += r1 // need to mark both r1 and r2 4353 */ 4354 if (mark_precise_scalar_ids(env, st)) 4355 return -EFAULT; 4356 4357 if (last_idx < 0) { 4358 /* we are at the entry into subprog, which 4359 * is expected for global funcs, but only if 4360 * requested precise registers are R1-R5 4361 * (which are global func's input arguments) 4362 */ 4363 if (st->curframe == 0 && 4364 st->frame[0]->subprogno > 0 && 4365 st->frame[0]->callsite == BPF_MAIN_FUNC && 4366 bt_stack_mask(bt) == 0 && 4367 (bt_reg_mask(bt) & ~BPF_REGMASK_ARGS) == 0) { 4368 bitmap_from_u64(mask, bt_reg_mask(bt)); 4369 for_each_set_bit(i, mask, 32) { 4370 reg = &st->frame[0]->regs[i]; 4371 bt_clear_reg(bt, i); 4372 if (reg->type == SCALAR_VALUE) 4373 reg->precise = true; 4374 } 4375 return 0; 4376 } 4377 4378 verbose(env, "BUG backtracking func entry subprog %d reg_mask %x stack_mask %llx\n", 4379 st->frame[0]->subprogno, bt_reg_mask(bt), bt_stack_mask(bt)); 4380 WARN_ONCE(1, "verifier backtracking bug"); 4381 return -EFAULT; 4382 } 4383 4384 for (i = last_idx;;) { 4385 if (skip_first) { 4386 err = 0; 4387 skip_first = false; 4388 } else { 4389 err = backtrack_insn(env, i, subseq_idx, bt); 4390 } 4391 if (err == -ENOTSUPP) { 4392 mark_all_scalars_precise(env, env->cur_state); 4393 bt_reset(bt); 4394 return 0; 4395 } else if (err) { 4396 return err; 4397 } 4398 if (bt_empty(bt)) 4399 /* Found assignment(s) into tracked register in this state. 4400 * Since this state is already marked, just return. 4401 * Nothing to be tracked further in the parent state. 4402 */ 4403 return 0; 4404 if (i == first_idx) 4405 break; 4406 subseq_idx = i; 4407 i = get_prev_insn_idx(st, i, &history); 4408 if (i >= env->prog->len) { 4409 /* This can happen if backtracking reached insn 0 4410 * and there are still reg_mask or stack_mask 4411 * to backtrack. 4412 * It means the backtracking missed the spot where 4413 * particular register was initialized with a constant. 4414 */ 4415 verbose(env, "BUG backtracking idx %d\n", i); 4416 WARN_ONCE(1, "verifier backtracking bug"); 4417 return -EFAULT; 4418 } 4419 } 4420 st = st->parent; 4421 if (!st) 4422 break; 4423 4424 for (fr = bt->frame; fr >= 0; fr--) { 4425 func = st->frame[fr]; 4426 bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); 4427 for_each_set_bit(i, mask, 32) { 4428 reg = &func->regs[i]; 4429 if (reg->type != SCALAR_VALUE) { 4430 bt_clear_frame_reg(bt, fr, i); 4431 continue; 4432 } 4433 if (reg->precise) 4434 bt_clear_frame_reg(bt, fr, i); 4435 else 4436 reg->precise = true; 4437 } 4438 4439 bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); 4440 for_each_set_bit(i, mask, 64) { 4441 if (i >= func->allocated_stack / BPF_REG_SIZE) { 4442 /* the sequence of instructions: 4443 * 2: (bf) r3 = r10 4444 * 3: (7b) *(u64 *)(r3 -8) = r0 4445 * 4: (79) r4 = *(u64 *)(r10 -8) 4446 * doesn't contain jmps. It's backtracked 4447 * as a single block. 4448 * During backtracking insn 3 is not recognized as 4449 * stack access, so at the end of backtracking 4450 * stack slot fp-8 is still marked in stack_mask. 4451 * However the parent state may not have accessed 4452 * fp-8 and it's "unallocated" stack space. 4453 * In such case fallback to conservative. 4454 */ 4455 mark_all_scalars_precise(env, env->cur_state); 4456 bt_reset(bt); 4457 return 0; 4458 } 4459 4460 if (!is_spilled_scalar_reg(&func->stack[i])) { 4461 bt_clear_frame_slot(bt, fr, i); 4462 continue; 4463 } 4464 reg = &func->stack[i].spilled_ptr; 4465 if (reg->precise) 4466 bt_clear_frame_slot(bt, fr, i); 4467 else 4468 reg->precise = true; 4469 } 4470 if (env->log.level & BPF_LOG_LEVEL2) { 4471 fmt_reg_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4472 bt_frame_reg_mask(bt, fr)); 4473 verbose(env, "mark_precise: frame%d: parent state regs=%s ", 4474 fr, env->tmp_str_buf); 4475 fmt_stack_mask(env->tmp_str_buf, TMP_STR_BUF_LEN, 4476 bt_frame_stack_mask(bt, fr)); 4477 verbose(env, "stack=%s: ", env->tmp_str_buf); 4478 print_verifier_state(env, func, true); 4479 } 4480 } 4481 4482 if (bt_empty(bt)) 4483 return 0; 4484 4485 subseq_idx = first_idx; 4486 last_idx = st->last_insn_idx; 4487 first_idx = st->first_insn_idx; 4488 } 4489 4490 /* if we still have requested precise regs or slots, we missed 4491 * something (e.g., stack access through non-r10 register), so 4492 * fallback to marking all precise 4493 */ 4494 if (!bt_empty(bt)) { 4495 mark_all_scalars_precise(env, env->cur_state); 4496 bt_reset(bt); 4497 } 4498 4499 return 0; 4500 } 4501 4502 int mark_chain_precision(struct bpf_verifier_env *env, int regno) 4503 { 4504 return __mark_chain_precision(env, regno); 4505 } 4506 4507 /* mark_chain_precision_batch() assumes that env->bt is set in the caller to 4508 * desired reg and stack masks across all relevant frames 4509 */ 4510 static int mark_chain_precision_batch(struct bpf_verifier_env *env) 4511 { 4512 return __mark_chain_precision(env, -1); 4513 } 4514 4515 static bool is_spillable_regtype(enum bpf_reg_type type) 4516 { 4517 switch (base_type(type)) { 4518 case PTR_TO_MAP_VALUE: 4519 case PTR_TO_STACK: 4520 case PTR_TO_CTX: 4521 case PTR_TO_PACKET: 4522 case PTR_TO_PACKET_META: 4523 case PTR_TO_PACKET_END: 4524 case PTR_TO_FLOW_KEYS: 4525 case CONST_PTR_TO_MAP: 4526 case PTR_TO_SOCKET: 4527 case PTR_TO_SOCK_COMMON: 4528 case PTR_TO_TCP_SOCK: 4529 case PTR_TO_XDP_SOCK: 4530 case PTR_TO_BTF_ID: 4531 case PTR_TO_BUF: 4532 case PTR_TO_MEM: 4533 case PTR_TO_FUNC: 4534 case PTR_TO_MAP_KEY: 4535 return true; 4536 default: 4537 return false; 4538 } 4539 } 4540 4541 /* Does this register contain a constant zero? */ 4542 static bool register_is_null(struct bpf_reg_state *reg) 4543 { 4544 return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); 4545 } 4546 4547 static bool register_is_const(struct bpf_reg_state *reg) 4548 { 4549 return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); 4550 } 4551 4552 static bool __is_scalar_unbounded(struct bpf_reg_state *reg) 4553 { 4554 return tnum_is_unknown(reg->var_off) && 4555 reg->smin_value == S64_MIN && reg->smax_value == S64_MAX && 4556 reg->umin_value == 0 && reg->umax_value == U64_MAX && 4557 reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX && 4558 reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX; 4559 } 4560 4561 static bool register_is_bounded(struct bpf_reg_state *reg) 4562 { 4563 return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg); 4564 } 4565 4566 static bool __is_pointer_value(bool allow_ptr_leaks, 4567 const struct bpf_reg_state *reg) 4568 { 4569 if (allow_ptr_leaks) 4570 return false; 4571 4572 return reg->type != SCALAR_VALUE; 4573 } 4574 4575 /* Copy src state preserving dst->parent and dst->live fields */ 4576 static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src) 4577 { 4578 struct bpf_reg_state *parent = dst->parent; 4579 enum bpf_reg_liveness live = dst->live; 4580 4581 *dst = *src; 4582 dst->parent = parent; 4583 dst->live = live; 4584 } 4585 4586 static void save_register_state(struct bpf_func_state *state, 4587 int spi, struct bpf_reg_state *reg, 4588 int size) 4589 { 4590 int i; 4591 4592 copy_register_state(&state->stack[spi].spilled_ptr, reg); 4593 if (size == BPF_REG_SIZE) 4594 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4595 4596 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) 4597 state->stack[spi].slot_type[i - 1] = STACK_SPILL; 4598 4599 /* size < 8 bytes spill */ 4600 for (; i; i--) 4601 scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]); 4602 } 4603 4604 static bool is_bpf_st_mem(struct bpf_insn *insn) 4605 { 4606 return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM; 4607 } 4608 4609 /* check_stack_{read,write}_fixed_off functions track spill/fill of registers, 4610 * stack boundary and alignment are checked in check_mem_access() 4611 */ 4612 static int check_stack_write_fixed_off(struct bpf_verifier_env *env, 4613 /* stack frame we're writing to */ 4614 struct bpf_func_state *state, 4615 int off, int size, int value_regno, 4616 int insn_idx) 4617 { 4618 struct bpf_func_state *cur; /* state of the current function */ 4619 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; 4620 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4621 struct bpf_reg_state *reg = NULL; 4622 u32 dst_reg = insn->dst_reg; 4623 4624 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); 4625 if (err) 4626 return err; 4627 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 4628 * so it's aligned access and [off, off + size) are within stack limits 4629 */ 4630 if (!env->allow_ptr_leaks && 4631 state->stack[spi].slot_type[0] == STACK_SPILL && 4632 size != BPF_REG_SIZE) { 4633 verbose(env, "attempt to corrupt spilled pointer on stack\n"); 4634 return -EACCES; 4635 } 4636 4637 cur = env->cur_state->frame[env->cur_state->curframe]; 4638 if (value_regno >= 0) 4639 reg = &cur->regs[value_regno]; 4640 if (!env->bypass_spec_v4) { 4641 bool sanitize = reg && is_spillable_regtype(reg->type); 4642 4643 for (i = 0; i < size; i++) { 4644 u8 type = state->stack[spi].slot_type[i]; 4645 4646 if (type != STACK_MISC && type != STACK_ZERO) { 4647 sanitize = true; 4648 break; 4649 } 4650 } 4651 4652 if (sanitize) 4653 env->insn_aux_data[insn_idx].sanitize_stack_spill = true; 4654 } 4655 4656 err = destroy_if_dynptr_stack_slot(env, state, spi); 4657 if (err) 4658 return err; 4659 4660 mark_stack_slot_scratched(env, spi); 4661 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && 4662 !register_is_null(reg) && env->bpf_capable) { 4663 if (dst_reg != BPF_REG_FP) { 4664 /* The backtracking logic can only recognize explicit 4665 * stack slot address like [fp - 8]. Other spill of 4666 * scalar via different register has to be conservative. 4667 * Backtrack from here and mark all registers as precise 4668 * that contributed into 'reg' being a constant. 4669 */ 4670 err = mark_chain_precision(env, value_regno); 4671 if (err) 4672 return err; 4673 } 4674 save_register_state(state, spi, reg, size); 4675 /* Break the relation on a narrowing spill. */ 4676 if (fls64(reg->umax_value) > BITS_PER_BYTE * size) 4677 state->stack[spi].spilled_ptr.id = 0; 4678 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && 4679 insn->imm != 0 && env->bpf_capable) { 4680 struct bpf_reg_state fake_reg = {}; 4681 4682 __mark_reg_known(&fake_reg, insn->imm); 4683 fake_reg.type = SCALAR_VALUE; 4684 save_register_state(state, spi, &fake_reg, size); 4685 } else if (reg && is_spillable_regtype(reg->type)) { 4686 /* register containing pointer is being spilled into stack */ 4687 if (size != BPF_REG_SIZE) { 4688 verbose_linfo(env, insn_idx, "; "); 4689 verbose(env, "invalid size of register spill\n"); 4690 return -EACCES; 4691 } 4692 if (state != cur && reg->type == PTR_TO_STACK) { 4693 verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); 4694 return -EINVAL; 4695 } 4696 save_register_state(state, spi, reg, size); 4697 } else { 4698 u8 type = STACK_MISC; 4699 4700 /* regular write of data into stack destroys any spilled ptr */ 4701 state->stack[spi].spilled_ptr.type = NOT_INIT; 4702 /* Mark slots as STACK_MISC if they belonged to spilled ptr/dynptr/iter. */ 4703 if (is_stack_slot_special(&state->stack[spi])) 4704 for (i = 0; i < BPF_REG_SIZE; i++) 4705 scrub_spilled_slot(&state->stack[spi].slot_type[i]); 4706 4707 /* only mark the slot as written if all 8 bytes were written 4708 * otherwise read propagation may incorrectly stop too soon 4709 * when stack slots are partially written. 4710 * This heuristic means that read propagation will be 4711 * conservative, since it will add reg_live_read marks 4712 * to stack slots all the way to first state when programs 4713 * writes+reads less than 8 bytes 4714 */ 4715 if (size == BPF_REG_SIZE) 4716 state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; 4717 4718 /* when we zero initialize stack slots mark them as such */ 4719 if ((reg && register_is_null(reg)) || 4720 (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) { 4721 /* backtracking doesn't work for STACK_ZERO yet. */ 4722 err = mark_chain_precision(env, value_regno); 4723 if (err) 4724 return err; 4725 type = STACK_ZERO; 4726 } 4727 4728 /* Mark slots affected by this stack write. */ 4729 for (i = 0; i < size; i++) 4730 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = 4731 type; 4732 } 4733 return 0; 4734 } 4735 4736 /* Write the stack: 'stack[ptr_regno + off] = value_regno'. 'ptr_regno' is 4737 * known to contain a variable offset. 4738 * This function checks whether the write is permitted and conservatively 4739 * tracks the effects of the write, considering that each stack slot in the 4740 * dynamic range is potentially written to. 4741 * 4742 * 'off' includes 'regno->off'. 4743 * 'value_regno' can be -1, meaning that an unknown value is being written to 4744 * the stack. 4745 * 4746 * Spilled pointers in range are not marked as written because we don't know 4747 * what's going to be actually written. This means that read propagation for 4748 * future reads cannot be terminated by this write. 4749 * 4750 * For privileged programs, uninitialized stack slots are considered 4751 * initialized by this write (even though we don't know exactly what offsets 4752 * are going to be written to). The idea is that we don't want the verifier to 4753 * reject future reads that access slots written to through variable offsets. 4754 */ 4755 static int check_stack_write_var_off(struct bpf_verifier_env *env, 4756 /* func where register points to */ 4757 struct bpf_func_state *state, 4758 int ptr_regno, int off, int size, 4759 int value_regno, int insn_idx) 4760 { 4761 struct bpf_func_state *cur; /* state of the current function */ 4762 int min_off, max_off; 4763 int i, err; 4764 struct bpf_reg_state *ptr_reg = NULL, *value_reg = NULL; 4765 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 4766 bool writing_zero = false; 4767 /* set if the fact that we're writing a zero is used to let any 4768 * stack slots remain STACK_ZERO 4769 */ 4770 bool zero_used = false; 4771 4772 cur = env->cur_state->frame[env->cur_state->curframe]; 4773 ptr_reg = &cur->regs[ptr_regno]; 4774 min_off = ptr_reg->smin_value + off; 4775 max_off = ptr_reg->smax_value + off + size; 4776 if (value_regno >= 0) 4777 value_reg = &cur->regs[value_regno]; 4778 if ((value_reg && register_is_null(value_reg)) || 4779 (!value_reg && is_bpf_st_mem(insn) && insn->imm == 0)) 4780 writing_zero = true; 4781 4782 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); 4783 if (err) 4784 return err; 4785 4786 for (i = min_off; i < max_off; i++) { 4787 int spi; 4788 4789 spi = __get_spi(i); 4790 err = destroy_if_dynptr_stack_slot(env, state, spi); 4791 if (err) 4792 return err; 4793 } 4794 4795 /* Variable offset writes destroy any spilled pointers in range. */ 4796 for (i = min_off; i < max_off; i++) { 4797 u8 new_type, *stype; 4798 int slot, spi; 4799 4800 slot = -i - 1; 4801 spi = slot / BPF_REG_SIZE; 4802 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 4803 mark_stack_slot_scratched(env, spi); 4804 4805 if (!env->allow_ptr_leaks && *stype != STACK_MISC && *stype != STACK_ZERO) { 4806 /* Reject the write if range we may write to has not 4807 * been initialized beforehand. If we didn't reject 4808 * here, the ptr status would be erased below (even 4809 * though not all slots are actually overwritten), 4810 * possibly opening the door to leaks. 4811 * 4812 * We do however catch STACK_INVALID case below, and 4813 * only allow reading possibly uninitialized memory 4814 * later for CAP_PERFMON, as the write may not happen to 4815 * that slot. 4816 */ 4817 verbose(env, "spilled ptr in range of var-offset stack write; insn %d, ptr off: %d", 4818 insn_idx, i); 4819 return -EINVAL; 4820 } 4821 4822 /* Erase all spilled pointers. */ 4823 state->stack[spi].spilled_ptr.type = NOT_INIT; 4824 4825 /* Update the slot type. */ 4826 new_type = STACK_MISC; 4827 if (writing_zero && *stype == STACK_ZERO) { 4828 new_type = STACK_ZERO; 4829 zero_used = true; 4830 } 4831 /* If the slot is STACK_INVALID, we check whether it's OK to 4832 * pretend that it will be initialized by this write. The slot 4833 * might not actually be written to, and so if we mark it as 4834 * initialized future reads might leak uninitialized memory. 4835 * For privileged programs, we will accept such reads to slots 4836 * that may or may not be written because, if we're reject 4837 * them, the error would be too confusing. 4838 */ 4839 if (*stype == STACK_INVALID && !env->allow_uninit_stack) { 4840 verbose(env, "uninit stack in range of var-offset write prohibited for !root; insn %d, off: %d", 4841 insn_idx, i); 4842 return -EINVAL; 4843 } 4844 *stype = new_type; 4845 } 4846 if (zero_used) { 4847 /* backtracking doesn't work for STACK_ZERO yet. */ 4848 err = mark_chain_precision(env, value_regno); 4849 if (err) 4850 return err; 4851 } 4852 return 0; 4853 } 4854 4855 /* When register 'dst_regno' is assigned some values from stack[min_off, 4856 * max_off), we set the register's type according to the types of the 4857 * respective stack slots. If all the stack values are known to be zeros, then 4858 * so is the destination reg. Otherwise, the register is considered to be 4859 * SCALAR. This function does not deal with register filling; the caller must 4860 * ensure that all spilled registers in the stack range have been marked as 4861 * read. 4862 */ 4863 static void mark_reg_stack_read(struct bpf_verifier_env *env, 4864 /* func where src register points to */ 4865 struct bpf_func_state *ptr_state, 4866 int min_off, int max_off, int dst_regno) 4867 { 4868 struct bpf_verifier_state *vstate = env->cur_state; 4869 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4870 int i, slot, spi; 4871 u8 *stype; 4872 int zeros = 0; 4873 4874 for (i = min_off; i < max_off; i++) { 4875 slot = -i - 1; 4876 spi = slot / BPF_REG_SIZE; 4877 mark_stack_slot_scratched(env, spi); 4878 stype = ptr_state->stack[spi].slot_type; 4879 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) 4880 break; 4881 zeros++; 4882 } 4883 if (zeros == max_off - min_off) { 4884 /* any access_size read into register is zero extended, 4885 * so the whole register == const_zero 4886 */ 4887 __mark_reg_const_zero(&state->regs[dst_regno]); 4888 /* backtracking doesn't support STACK_ZERO yet, 4889 * so mark it precise here, so that later 4890 * backtracking can stop here. 4891 * Backtracking may not need this if this register 4892 * doesn't participate in pointer adjustment. 4893 * Forward propagation of precise flag is not 4894 * necessary either. This mark is only to stop 4895 * backtracking. Any register that contributed 4896 * to const 0 was marked precise before spill. 4897 */ 4898 state->regs[dst_regno].precise = true; 4899 } else { 4900 /* have read misc data from the stack */ 4901 mark_reg_unknown(env, state->regs, dst_regno); 4902 } 4903 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4904 } 4905 4906 /* Read the stack at 'off' and put the results into the register indicated by 4907 * 'dst_regno'. It handles reg filling if the addressed stack slot is a 4908 * spilled reg. 4909 * 4910 * 'dst_regno' can be -1, meaning that the read value is not going to a 4911 * register. 4912 * 4913 * The access is assumed to be within the current stack bounds. 4914 */ 4915 static int check_stack_read_fixed_off(struct bpf_verifier_env *env, 4916 /* func where src register points to */ 4917 struct bpf_func_state *reg_state, 4918 int off, int size, int dst_regno) 4919 { 4920 struct bpf_verifier_state *vstate = env->cur_state; 4921 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 4922 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; 4923 struct bpf_reg_state *reg; 4924 u8 *stype, type; 4925 4926 stype = reg_state->stack[spi].slot_type; 4927 reg = ®_state->stack[spi].spilled_ptr; 4928 4929 mark_stack_slot_scratched(env, spi); 4930 4931 if (is_spilled_reg(®_state->stack[spi])) { 4932 u8 spill_size = 1; 4933 4934 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) 4935 spill_size++; 4936 4937 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { 4938 if (reg->type != SCALAR_VALUE) { 4939 verbose_linfo(env, env->insn_idx, "; "); 4940 verbose(env, "invalid size of register fill\n"); 4941 return -EACCES; 4942 } 4943 4944 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4945 if (dst_regno < 0) 4946 return 0; 4947 4948 if (!(off % BPF_REG_SIZE) && size == spill_size) { 4949 /* The earlier check_reg_arg() has decided the 4950 * subreg_def for this insn. Save it first. 4951 */ 4952 s32 subreg_def = state->regs[dst_regno].subreg_def; 4953 4954 copy_register_state(&state->regs[dst_regno], reg); 4955 state->regs[dst_regno].subreg_def = subreg_def; 4956 } else { 4957 for (i = 0; i < size; i++) { 4958 type = stype[(slot - i) % BPF_REG_SIZE]; 4959 if (type == STACK_SPILL) 4960 continue; 4961 if (type == STACK_MISC) 4962 continue; 4963 if (type == STACK_INVALID && env->allow_uninit_stack) 4964 continue; 4965 verbose(env, "invalid read from stack off %d+%d size %d\n", 4966 off, i, size); 4967 return -EACCES; 4968 } 4969 mark_reg_unknown(env, state->regs, dst_regno); 4970 } 4971 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4972 return 0; 4973 } 4974 4975 if (dst_regno >= 0) { 4976 /* restore register state from stack */ 4977 copy_register_state(&state->regs[dst_regno], reg); 4978 /* mark reg as written since spilled pointer state likely 4979 * has its liveness marks cleared by is_state_visited() 4980 * which resets stack/reg liveness for state transitions 4981 */ 4982 state->regs[dst_regno].live |= REG_LIVE_WRITTEN; 4983 } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { 4984 /* If dst_regno==-1, the caller is asking us whether 4985 * it is acceptable to use this value as a SCALAR_VALUE 4986 * (e.g. for XADD). 4987 * We must not allow unprivileged callers to do that 4988 * with spilled pointers. 4989 */ 4990 verbose(env, "leaking pointer from stack off %d\n", 4991 off); 4992 return -EACCES; 4993 } 4994 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 4995 } else { 4996 for (i = 0; i < size; i++) { 4997 type = stype[(slot - i) % BPF_REG_SIZE]; 4998 if (type == STACK_MISC) 4999 continue; 5000 if (type == STACK_ZERO) 5001 continue; 5002 if (type == STACK_INVALID && env->allow_uninit_stack) 5003 continue; 5004 verbose(env, "invalid read from stack off %d+%d size %d\n", 5005 off, i, size); 5006 return -EACCES; 5007 } 5008 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 5009 if (dst_regno >= 0) 5010 mark_reg_stack_read(env, reg_state, off, off + size, dst_regno); 5011 } 5012 return 0; 5013 } 5014 5015 enum bpf_access_src { 5016 ACCESS_DIRECT = 1, /* the access is performed by an instruction */ 5017 ACCESS_HELPER = 2, /* the access is performed by a helper */ 5018 }; 5019 5020 static int check_stack_range_initialized(struct bpf_verifier_env *env, 5021 int regno, int off, int access_size, 5022 bool zero_size_allowed, 5023 enum bpf_access_src type, 5024 struct bpf_call_arg_meta *meta); 5025 5026 static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 5027 { 5028 return cur_regs(env) + regno; 5029 } 5030 5031 /* Read the stack at 'ptr_regno + off' and put the result into the register 5032 * 'dst_regno'. 5033 * 'off' includes the pointer register's fixed offset(i.e. 'ptr_regno.off'), 5034 * but not its variable offset. 5035 * 'size' is assumed to be <= reg size and the access is assumed to be aligned. 5036 * 5037 * As opposed to check_stack_read_fixed_off, this function doesn't deal with 5038 * filling registers (i.e. reads of spilled register cannot be detected when 5039 * the offset is not fixed). We conservatively mark 'dst_regno' as containing 5040 * SCALAR_VALUE. That's why we assert that the 'ptr_regno' has a variable 5041 * offset; for a fixed offset check_stack_read_fixed_off should be used 5042 * instead. 5043 */ 5044 static int check_stack_read_var_off(struct bpf_verifier_env *env, 5045 int ptr_regno, int off, int size, int dst_regno) 5046 { 5047 /* The state of the source register. */ 5048 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 5049 struct bpf_func_state *ptr_state = func(env, reg); 5050 int err; 5051 int min_off, max_off; 5052 5053 /* Note that we pass a NULL meta, so raw access will not be permitted. 5054 */ 5055 err = check_stack_range_initialized(env, ptr_regno, off, size, 5056 false, ACCESS_DIRECT, NULL); 5057 if (err) 5058 return err; 5059 5060 min_off = reg->smin_value + off; 5061 max_off = reg->smax_value + off; 5062 mark_reg_stack_read(env, ptr_state, min_off, max_off + size, dst_regno); 5063 return 0; 5064 } 5065 5066 /* check_stack_read dispatches to check_stack_read_fixed_off or 5067 * check_stack_read_var_off. 5068 * 5069 * The caller must ensure that the offset falls within the allocated stack 5070 * bounds. 5071 * 5072 * 'dst_regno' is a register which will receive the value from the stack. It 5073 * can be -1, meaning that the read value is not going to a register. 5074 */ 5075 static int check_stack_read(struct bpf_verifier_env *env, 5076 int ptr_regno, int off, int size, 5077 int dst_regno) 5078 { 5079 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 5080 struct bpf_func_state *state = func(env, reg); 5081 int err; 5082 /* Some accesses are only permitted with a static offset. */ 5083 bool var_off = !tnum_is_const(reg->var_off); 5084 5085 /* The offset is required to be static when reads don't go to a 5086 * register, in order to not leak pointers (see 5087 * check_stack_read_fixed_off). 5088 */ 5089 if (dst_regno < 0 && var_off) { 5090 char tn_buf[48]; 5091 5092 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5093 verbose(env, "variable offset stack pointer cannot be passed into helper function; var_off=%s off=%d size=%d\n", 5094 tn_buf, off, size); 5095 return -EACCES; 5096 } 5097 /* Variable offset is prohibited for unprivileged mode for simplicity 5098 * since it requires corresponding support in Spectre masking for stack 5099 * ALU. See also retrieve_ptr_limit(). The check in 5100 * check_stack_access_for_ptr_arithmetic() called by 5101 * adjust_ptr_min_max_vals() prevents users from creating stack pointers 5102 * with variable offsets, therefore no check is required here. Further, 5103 * just checking it here would be insufficient as speculative stack 5104 * writes could still lead to unsafe speculative behaviour. 5105 */ 5106 if (!var_off) { 5107 off += reg->var_off.value; 5108 err = check_stack_read_fixed_off(env, state, off, size, 5109 dst_regno); 5110 } else { 5111 /* Variable offset stack reads need more conservative handling 5112 * than fixed offset ones. Note that dst_regno >= 0 on this 5113 * branch. 5114 */ 5115 err = check_stack_read_var_off(env, ptr_regno, off, size, 5116 dst_regno); 5117 } 5118 return err; 5119 } 5120 5121 5122 /* check_stack_write dispatches to check_stack_write_fixed_off or 5123 * check_stack_write_var_off. 5124 * 5125 * 'ptr_regno' is the register used as a pointer into the stack. 5126 * 'off' includes 'ptr_regno->off', but not its variable offset (if any). 5127 * 'value_regno' is the register whose value we're writing to the stack. It can 5128 * be -1, meaning that we're not writing from a register. 5129 * 5130 * The caller must ensure that the offset falls within the maximum stack size. 5131 */ 5132 static int check_stack_write(struct bpf_verifier_env *env, 5133 int ptr_regno, int off, int size, 5134 int value_regno, int insn_idx) 5135 { 5136 struct bpf_reg_state *reg = reg_state(env, ptr_regno); 5137 struct bpf_func_state *state = func(env, reg); 5138 int err; 5139 5140 if (tnum_is_const(reg->var_off)) { 5141 off += reg->var_off.value; 5142 err = check_stack_write_fixed_off(env, state, off, size, 5143 value_regno, insn_idx); 5144 } else { 5145 /* Variable offset stack reads need more conservative handling 5146 * than fixed offset ones. 5147 */ 5148 err = check_stack_write_var_off(env, state, 5149 ptr_regno, off, size, 5150 value_regno, insn_idx); 5151 } 5152 return err; 5153 } 5154 5155 static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, 5156 int off, int size, enum bpf_access_type type) 5157 { 5158 struct bpf_reg_state *regs = cur_regs(env); 5159 struct bpf_map *map = regs[regno].map_ptr; 5160 u32 cap = bpf_map_flags_to_cap(map); 5161 5162 if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { 5163 verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", 5164 map->value_size, off, size); 5165 return -EACCES; 5166 } 5167 5168 if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { 5169 verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", 5170 map->value_size, off, size); 5171 return -EACCES; 5172 } 5173 5174 return 0; 5175 } 5176 5177 /* check read/write into memory region (e.g., map value, ringbuf sample, etc) */ 5178 static int __check_mem_access(struct bpf_verifier_env *env, int regno, 5179 int off, int size, u32 mem_size, 5180 bool zero_size_allowed) 5181 { 5182 bool size_ok = size > 0 || (size == 0 && zero_size_allowed); 5183 struct bpf_reg_state *reg; 5184 5185 if (off >= 0 && size_ok && (u64)off + size <= mem_size) 5186 return 0; 5187 5188 reg = &cur_regs(env)[regno]; 5189 switch (reg->type) { 5190 case PTR_TO_MAP_KEY: 5191 verbose(env, "invalid access to map key, key_size=%d off=%d size=%d\n", 5192 mem_size, off, size); 5193 break; 5194 case PTR_TO_MAP_VALUE: 5195 verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", 5196 mem_size, off, size); 5197 break; 5198 case PTR_TO_PACKET: 5199 case PTR_TO_PACKET_META: 5200 case PTR_TO_PACKET_END: 5201 verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", 5202 off, size, regno, reg->id, off, mem_size); 5203 break; 5204 case PTR_TO_MEM: 5205 default: 5206 verbose(env, "invalid access to memory, mem_size=%u off=%d size=%d\n", 5207 mem_size, off, size); 5208 } 5209 5210 return -EACCES; 5211 } 5212 5213 /* check read/write into a memory region with possible variable offset */ 5214 static int check_mem_region_access(struct bpf_verifier_env *env, u32 regno, 5215 int off, int size, u32 mem_size, 5216 bool zero_size_allowed) 5217 { 5218 struct bpf_verifier_state *vstate = env->cur_state; 5219 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5220 struct bpf_reg_state *reg = &state->regs[regno]; 5221 int err; 5222 5223 /* We may have adjusted the register pointing to memory region, so we 5224 * need to try adding each of min_value and max_value to off 5225 * to make sure our theoretical access will be safe. 5226 * 5227 * The minimum value is only important with signed 5228 * comparisons where we can't assume the floor of a 5229 * value is 0. If we are using signed variables for our 5230 * index'es we need to make sure that whatever we use 5231 * will have a set floor within our range. 5232 */ 5233 if (reg->smin_value < 0 && 5234 (reg->smin_value == S64_MIN || 5235 (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || 5236 reg->smin_value + off < 0)) { 5237 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5238 regno); 5239 return -EACCES; 5240 } 5241 err = __check_mem_access(env, regno, reg->smin_value + off, size, 5242 mem_size, zero_size_allowed); 5243 if (err) { 5244 verbose(env, "R%d min value is outside of the allowed memory range\n", 5245 regno); 5246 return err; 5247 } 5248 5249 /* If we haven't set a max value then we need to bail since we can't be 5250 * sure we won't do bad things. 5251 * If reg->umax_value + off could overflow, treat that as unbounded too. 5252 */ 5253 if (reg->umax_value >= BPF_MAX_VAR_OFF) { 5254 verbose(env, "R%d unbounded memory access, make sure to bounds check any such access\n", 5255 regno); 5256 return -EACCES; 5257 } 5258 err = __check_mem_access(env, regno, reg->umax_value + off, size, 5259 mem_size, zero_size_allowed); 5260 if (err) { 5261 verbose(env, "R%d max value is outside of the allowed memory range\n", 5262 regno); 5263 return err; 5264 } 5265 5266 return 0; 5267 } 5268 5269 static int __check_ptr_off_reg(struct bpf_verifier_env *env, 5270 const struct bpf_reg_state *reg, int regno, 5271 bool fixed_off_ok) 5272 { 5273 /* Access to this pointer-typed register or passing it to a helper 5274 * is only allowed in its original, unmodified form. 5275 */ 5276 5277 if (reg->off < 0) { 5278 verbose(env, "negative offset %s ptr R%d off=%d disallowed\n", 5279 reg_type_str(env, reg->type), regno, reg->off); 5280 return -EACCES; 5281 } 5282 5283 if (!fixed_off_ok && reg->off) { 5284 verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n", 5285 reg_type_str(env, reg->type), regno, reg->off); 5286 return -EACCES; 5287 } 5288 5289 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 5290 char tn_buf[48]; 5291 5292 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5293 verbose(env, "variable %s access var_off=%s disallowed\n", 5294 reg_type_str(env, reg->type), tn_buf); 5295 return -EACCES; 5296 } 5297 5298 return 0; 5299 } 5300 5301 int check_ptr_off_reg(struct bpf_verifier_env *env, 5302 const struct bpf_reg_state *reg, int regno) 5303 { 5304 return __check_ptr_off_reg(env, reg, regno, false); 5305 } 5306 5307 static int map_kptr_match_type(struct bpf_verifier_env *env, 5308 struct btf_field *kptr_field, 5309 struct bpf_reg_state *reg, u32 regno) 5310 { 5311 const char *targ_name = btf_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); 5312 int perm_flags; 5313 const char *reg_name = ""; 5314 5315 if (btf_is_kernel(reg->btf)) { 5316 perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; 5317 5318 /* Only unreferenced case accepts untrusted pointers */ 5319 if (kptr_field->type == BPF_KPTR_UNREF) 5320 perm_flags |= PTR_UNTRUSTED; 5321 } else { 5322 perm_flags = PTR_MAYBE_NULL | MEM_ALLOC; 5323 if (kptr_field->type == BPF_KPTR_PERCPU) 5324 perm_flags |= MEM_PERCPU; 5325 } 5326 5327 if (base_type(reg->type) != PTR_TO_BTF_ID || (type_flag(reg->type) & ~perm_flags)) 5328 goto bad_type; 5329 5330 /* We need to verify reg->type and reg->btf, before accessing reg->btf */ 5331 reg_name = btf_type_name(reg->btf, reg->btf_id); 5332 5333 /* For ref_ptr case, release function check should ensure we get one 5334 * referenced PTR_TO_BTF_ID, and that its fixed offset is 0. For the 5335 * normal store of unreferenced kptr, we must ensure var_off is zero. 5336 * Since ref_ptr cannot be accessed directly by BPF insns, checks for 5337 * reg->off and reg->ref_obj_id are not needed here. 5338 */ 5339 if (__check_ptr_off_reg(env, reg, regno, true)) 5340 return -EACCES; 5341 5342 /* A full type match is needed, as BTF can be vmlinux, module or prog BTF, and 5343 * we also need to take into account the reg->off. 5344 * 5345 * We want to support cases like: 5346 * 5347 * struct foo { 5348 * struct bar br; 5349 * struct baz bz; 5350 * }; 5351 * 5352 * struct foo *v; 5353 * v = func(); // PTR_TO_BTF_ID 5354 * val->foo = v; // reg->off is zero, btf and btf_id match type 5355 * val->bar = &v->br; // reg->off is still zero, but we need to retry with 5356 * // first member type of struct after comparison fails 5357 * val->baz = &v->bz; // reg->off is non-zero, so struct needs to be walked 5358 * // to match type 5359 * 5360 * In the kptr_ref case, check_func_arg_reg_off already ensures reg->off 5361 * is zero. We must also ensure that btf_struct_ids_match does not walk 5362 * the struct to match type against first member of struct, i.e. reject 5363 * second case from above. Hence, when type is BPF_KPTR_REF, we set 5364 * strict mode to true for type match. 5365 */ 5366 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 5367 kptr_field->kptr.btf, kptr_field->kptr.btf_id, 5368 kptr_field->type != BPF_KPTR_UNREF)) 5369 goto bad_type; 5370 return 0; 5371 bad_type: 5372 verbose(env, "invalid kptr access, R%d type=%s%s ", regno, 5373 reg_type_str(env, reg->type), reg_name); 5374 verbose(env, "expected=%s%s", reg_type_str(env, PTR_TO_BTF_ID), targ_name); 5375 if (kptr_field->type == BPF_KPTR_UNREF) 5376 verbose(env, " or %s%s\n", reg_type_str(env, PTR_TO_BTF_ID | PTR_UNTRUSTED), 5377 targ_name); 5378 else 5379 verbose(env, "\n"); 5380 return -EINVAL; 5381 } 5382 5383 /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() 5384 * can dereference RCU protected pointers and result is PTR_TRUSTED. 5385 */ 5386 static bool in_rcu_cs(struct bpf_verifier_env *env) 5387 { 5388 return env->cur_state->active_rcu_lock || 5389 env->cur_state->active_lock.ptr || 5390 !env->prog->aux->sleepable; 5391 } 5392 5393 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ 5394 BTF_SET_START(rcu_protected_types) 5395 BTF_ID(struct, prog_test_ref_kfunc) 5396 #ifdef CONFIG_CGROUPS 5397 BTF_ID(struct, cgroup) 5398 #endif 5399 BTF_ID(struct, bpf_cpumask) 5400 BTF_ID(struct, task_struct) 5401 BTF_SET_END(rcu_protected_types) 5402 5403 static bool rcu_protected_object(const struct btf *btf, u32 btf_id) 5404 { 5405 if (!btf_is_kernel(btf)) 5406 return false; 5407 return btf_id_set_contains(&rcu_protected_types, btf_id); 5408 } 5409 5410 static bool rcu_safe_kptr(const struct btf_field *field) 5411 { 5412 const struct btf_field_kptr *kptr = &field->kptr; 5413 5414 return field->type == BPF_KPTR_PERCPU || 5415 (field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id)); 5416 } 5417 5418 static u32 btf_ld_kptr_type(struct bpf_verifier_env *env, struct btf_field *kptr_field) 5419 { 5420 if (rcu_safe_kptr(kptr_field) && in_rcu_cs(env)) { 5421 if (kptr_field->type != BPF_KPTR_PERCPU) 5422 return PTR_MAYBE_NULL | MEM_RCU; 5423 return PTR_MAYBE_NULL | MEM_RCU | MEM_PERCPU; 5424 } 5425 return PTR_MAYBE_NULL | PTR_UNTRUSTED; 5426 } 5427 5428 static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, 5429 int value_regno, int insn_idx, 5430 struct btf_field *kptr_field) 5431 { 5432 struct bpf_insn *insn = &env->prog->insnsi[insn_idx]; 5433 int class = BPF_CLASS(insn->code); 5434 struct bpf_reg_state *val_reg; 5435 5436 /* Things we already checked for in check_map_access and caller: 5437 * - Reject cases where variable offset may touch kptr 5438 * - size of access (must be BPF_DW) 5439 * - tnum_is_const(reg->var_off) 5440 * - kptr_field->offset == off + reg->var_off.value 5441 */ 5442 /* Only BPF_[LDX,STX,ST] | BPF_MEM | BPF_DW is supported */ 5443 if (BPF_MODE(insn->code) != BPF_MEM) { 5444 verbose(env, "kptr in map can only be accessed using BPF_MEM instruction mode\n"); 5445 return -EACCES; 5446 } 5447 5448 /* We only allow loading referenced kptr, since it will be marked as 5449 * untrusted, similar to unreferenced kptr. 5450 */ 5451 if (class != BPF_LDX && 5452 (kptr_field->type == BPF_KPTR_REF || kptr_field->type == BPF_KPTR_PERCPU)) { 5453 verbose(env, "store to referenced kptr disallowed\n"); 5454 return -EACCES; 5455 } 5456 5457 if (class == BPF_LDX) { 5458 val_reg = reg_state(env, value_regno); 5459 /* We can simply mark the value_regno receiving the pointer 5460 * value from map as PTR_TO_BTF_ID, with the correct type. 5461 */ 5462 mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, 5463 kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field)); 5464 /* For mark_ptr_or_null_reg */ 5465 val_reg->id = ++env->id_gen; 5466 } else if (class == BPF_STX) { 5467 val_reg = reg_state(env, value_regno); 5468 if (!register_is_null(val_reg) && 5469 map_kptr_match_type(env, kptr_field, val_reg, value_regno)) 5470 return -EACCES; 5471 } else if (class == BPF_ST) { 5472 if (insn->imm) { 5473 verbose(env, "BPF_ST imm must be 0 when storing to kptr at off=%u\n", 5474 kptr_field->offset); 5475 return -EACCES; 5476 } 5477 } else { 5478 verbose(env, "kptr in map can only be accessed using BPF_LDX/BPF_STX/BPF_ST\n"); 5479 return -EACCES; 5480 } 5481 return 0; 5482 } 5483 5484 /* check read/write into a map element with possible variable offset */ 5485 static int check_map_access(struct bpf_verifier_env *env, u32 regno, 5486 int off, int size, bool zero_size_allowed, 5487 enum bpf_access_src src) 5488 { 5489 struct bpf_verifier_state *vstate = env->cur_state; 5490 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 5491 struct bpf_reg_state *reg = &state->regs[regno]; 5492 struct bpf_map *map = reg->map_ptr; 5493 struct btf_record *rec; 5494 int err, i; 5495 5496 err = check_mem_region_access(env, regno, off, size, map->value_size, 5497 zero_size_allowed); 5498 if (err) 5499 return err; 5500 5501 if (IS_ERR_OR_NULL(map->record)) 5502 return 0; 5503 rec = map->record; 5504 for (i = 0; i < rec->cnt; i++) { 5505 struct btf_field *field = &rec->fields[i]; 5506 u32 p = field->offset; 5507 5508 /* If any part of a field can be touched by load/store, reject 5509 * this program. To check that [x1, x2) overlaps with [y1, y2), 5510 * it is sufficient to check x1 < y2 && y1 < x2. 5511 */ 5512 if (reg->smin_value + off < p + btf_field_type_size(field->type) && 5513 p < reg->umax_value + off + size) { 5514 switch (field->type) { 5515 case BPF_KPTR_UNREF: 5516 case BPF_KPTR_REF: 5517 case BPF_KPTR_PERCPU: 5518 if (src != ACCESS_DIRECT) { 5519 verbose(env, "kptr cannot be accessed indirectly by helper\n"); 5520 return -EACCES; 5521 } 5522 if (!tnum_is_const(reg->var_off)) { 5523 verbose(env, "kptr access cannot have variable offset\n"); 5524 return -EACCES; 5525 } 5526 if (p != off + reg->var_off.value) { 5527 verbose(env, "kptr access misaligned expected=%u off=%llu\n", 5528 p, off + reg->var_off.value); 5529 return -EACCES; 5530 } 5531 if (size != bpf_size_to_bytes(BPF_DW)) { 5532 verbose(env, "kptr access size must be BPF_DW\n"); 5533 return -EACCES; 5534 } 5535 break; 5536 default: 5537 verbose(env, "%s cannot be accessed directly by load/store\n", 5538 btf_field_type_name(field->type)); 5539 return -EACCES; 5540 } 5541 } 5542 } 5543 return 0; 5544 } 5545 5546 #define MAX_PACKET_OFF 0xffff 5547 5548 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, 5549 const struct bpf_call_arg_meta *meta, 5550 enum bpf_access_type t) 5551 { 5552 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 5553 5554 switch (prog_type) { 5555 /* Program types only with direct read access go here! */ 5556 case BPF_PROG_TYPE_LWT_IN: 5557 case BPF_PROG_TYPE_LWT_OUT: 5558 case BPF_PROG_TYPE_LWT_SEG6LOCAL: 5559 case BPF_PROG_TYPE_SK_REUSEPORT: 5560 case BPF_PROG_TYPE_FLOW_DISSECTOR: 5561 case BPF_PROG_TYPE_CGROUP_SKB: 5562 if (t == BPF_WRITE) 5563 return false; 5564 fallthrough; 5565 5566 /* Program types with direct read + write access go here! */ 5567 case BPF_PROG_TYPE_SCHED_CLS: 5568 case BPF_PROG_TYPE_SCHED_ACT: 5569 case BPF_PROG_TYPE_XDP: 5570 case BPF_PROG_TYPE_LWT_XMIT: 5571 case BPF_PROG_TYPE_SK_SKB: 5572 case BPF_PROG_TYPE_SK_MSG: 5573 if (meta) 5574 return meta->pkt_access; 5575 5576 env->seen_direct_write = true; 5577 return true; 5578 5579 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 5580 if (t == BPF_WRITE) 5581 env->seen_direct_write = true; 5582 5583 return true; 5584 5585 default: 5586 return false; 5587 } 5588 } 5589 5590 static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, 5591 int size, bool zero_size_allowed) 5592 { 5593 struct bpf_reg_state *regs = cur_regs(env); 5594 struct bpf_reg_state *reg = ®s[regno]; 5595 int err; 5596 5597 /* We may have added a variable offset to the packet pointer; but any 5598 * reg->range we have comes after that. We are only checking the fixed 5599 * offset. 5600 */ 5601 5602 /* We don't allow negative numbers, because we aren't tracking enough 5603 * detail to prove they're safe. 5604 */ 5605 if (reg->smin_value < 0) { 5606 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5607 regno); 5608 return -EACCES; 5609 } 5610 5611 err = reg->range < 0 ? -EINVAL : 5612 __check_mem_access(env, regno, off, size, reg->range, 5613 zero_size_allowed); 5614 if (err) { 5615 verbose(env, "R%d offset is outside of the packet\n", regno); 5616 return err; 5617 } 5618 5619 /* __check_mem_access has made sure "off + size - 1" is within u16. 5620 * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, 5621 * otherwise find_good_pkt_pointers would have refused to set range info 5622 * that __check_mem_access would have rejected this pkt access. 5623 * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. 5624 */ 5625 env->prog->aux->max_pkt_offset = 5626 max_t(u32, env->prog->aux->max_pkt_offset, 5627 off + reg->umax_value + size - 1); 5628 5629 return err; 5630 } 5631 5632 /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ 5633 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, 5634 enum bpf_access_type t, enum bpf_reg_type *reg_type, 5635 struct btf **btf, u32 *btf_id) 5636 { 5637 struct bpf_insn_access_aux info = { 5638 .reg_type = *reg_type, 5639 .log = &env->log, 5640 }; 5641 5642 if (env->ops->is_valid_access && 5643 env->ops->is_valid_access(off, size, t, env->prog, &info)) { 5644 /* A non zero info.ctx_field_size indicates that this field is a 5645 * candidate for later verifier transformation to load the whole 5646 * field and then apply a mask when accessed with a narrower 5647 * access than actual ctx access size. A zero info.ctx_field_size 5648 * will only allow for whole field access and rejects any other 5649 * type of narrower access. 5650 */ 5651 *reg_type = info.reg_type; 5652 5653 if (base_type(*reg_type) == PTR_TO_BTF_ID) { 5654 *btf = info.btf; 5655 *btf_id = info.btf_id; 5656 } else { 5657 env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; 5658 } 5659 /* remember the offset of last byte accessed in ctx */ 5660 if (env->prog->aux->max_ctx_offset < off + size) 5661 env->prog->aux->max_ctx_offset = off + size; 5662 return 0; 5663 } 5664 5665 verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); 5666 return -EACCES; 5667 } 5668 5669 static int check_flow_keys_access(struct bpf_verifier_env *env, int off, 5670 int size) 5671 { 5672 if (size < 0 || off < 0 || 5673 (u64)off + size > sizeof(struct bpf_flow_keys)) { 5674 verbose(env, "invalid access to flow keys off=%d size=%d\n", 5675 off, size); 5676 return -EACCES; 5677 } 5678 return 0; 5679 } 5680 5681 static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, 5682 u32 regno, int off, int size, 5683 enum bpf_access_type t) 5684 { 5685 struct bpf_reg_state *regs = cur_regs(env); 5686 struct bpf_reg_state *reg = ®s[regno]; 5687 struct bpf_insn_access_aux info = {}; 5688 bool valid; 5689 5690 if (reg->smin_value < 0) { 5691 verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", 5692 regno); 5693 return -EACCES; 5694 } 5695 5696 switch (reg->type) { 5697 case PTR_TO_SOCK_COMMON: 5698 valid = bpf_sock_common_is_valid_access(off, size, t, &info); 5699 break; 5700 case PTR_TO_SOCKET: 5701 valid = bpf_sock_is_valid_access(off, size, t, &info); 5702 break; 5703 case PTR_TO_TCP_SOCK: 5704 valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); 5705 break; 5706 case PTR_TO_XDP_SOCK: 5707 valid = bpf_xdp_sock_is_valid_access(off, size, t, &info); 5708 break; 5709 default: 5710 valid = false; 5711 } 5712 5713 5714 if (valid) { 5715 env->insn_aux_data[insn_idx].ctx_field_size = 5716 info.ctx_field_size; 5717 return 0; 5718 } 5719 5720 verbose(env, "R%d invalid %s access off=%d size=%d\n", 5721 regno, reg_type_str(env, reg->type), off, size); 5722 5723 return -EACCES; 5724 } 5725 5726 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 5727 { 5728 return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 5729 } 5730 5731 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 5732 { 5733 const struct bpf_reg_state *reg = reg_state(env, regno); 5734 5735 return reg->type == PTR_TO_CTX; 5736 } 5737 5738 static bool is_sk_reg(struct bpf_verifier_env *env, int regno) 5739 { 5740 const struct bpf_reg_state *reg = reg_state(env, regno); 5741 5742 return type_is_sk_pointer(reg->type); 5743 } 5744 5745 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 5746 { 5747 const struct bpf_reg_state *reg = reg_state(env, regno); 5748 5749 return type_is_pkt_pointer(reg->type); 5750 } 5751 5752 static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 5753 { 5754 const struct bpf_reg_state *reg = reg_state(env, regno); 5755 5756 /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 5757 return reg->type == PTR_TO_FLOW_KEYS; 5758 } 5759 5760 static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { 5761 #ifdef CONFIG_NET 5762 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK], 5763 [PTR_TO_SOCK_COMMON] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 5764 [PTR_TO_TCP_SOCK] = &btf_sock_ids[BTF_SOCK_TYPE_TCP], 5765 #endif 5766 [CONST_PTR_TO_MAP] = btf_bpf_map_id, 5767 }; 5768 5769 static bool is_trusted_reg(const struct bpf_reg_state *reg) 5770 { 5771 /* A referenced register is always trusted. */ 5772 if (reg->ref_obj_id) 5773 return true; 5774 5775 /* Types listed in the reg2btf_ids are always trusted */ 5776 if (reg2btf_ids[base_type(reg->type)]) 5777 return true; 5778 5779 /* If a register is not referenced, it is trusted if it has the 5780 * MEM_ALLOC or PTR_TRUSTED type modifiers, and no others. Some of the 5781 * other type modifiers may be safe, but we elect to take an opt-in 5782 * approach here as some (e.g. PTR_UNTRUSTED and PTR_MAYBE_NULL) are 5783 * not. 5784 * 5785 * Eventually, we should make PTR_TRUSTED the single source of truth 5786 * for whether a register is trusted. 5787 */ 5788 return type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS && 5789 !bpf_type_has_unsafe_modifiers(reg->type); 5790 } 5791 5792 static bool is_rcu_reg(const struct bpf_reg_state *reg) 5793 { 5794 return reg->type & MEM_RCU; 5795 } 5796 5797 static void clear_trusted_flags(enum bpf_type_flag *flag) 5798 { 5799 *flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU); 5800 } 5801 5802 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, 5803 const struct bpf_reg_state *reg, 5804 int off, int size, bool strict) 5805 { 5806 struct tnum reg_off; 5807 int ip_align; 5808 5809 /* Byte size accesses are always allowed. */ 5810 if (!strict || size == 1) 5811 return 0; 5812 5813 /* For platforms that do not have a Kconfig enabling 5814 * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of 5815 * NET_IP_ALIGN is universally set to '2'. And on platforms 5816 * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get 5817 * to this code only in strict mode where we want to emulate 5818 * the NET_IP_ALIGN==2 checking. Therefore use an 5819 * unconditional IP align value of '2'. 5820 */ 5821 ip_align = 2; 5822 5823 reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); 5824 if (!tnum_is_aligned(reg_off, size)) { 5825 char tn_buf[48]; 5826 5827 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5828 verbose(env, 5829 "misaligned packet access off %d+%s+%d+%d size %d\n", 5830 ip_align, tn_buf, reg->off, off, size); 5831 return -EACCES; 5832 } 5833 5834 return 0; 5835 } 5836 5837 static int check_generic_ptr_alignment(struct bpf_verifier_env *env, 5838 const struct bpf_reg_state *reg, 5839 const char *pointer_desc, 5840 int off, int size, bool strict) 5841 { 5842 struct tnum reg_off; 5843 5844 /* Byte size accesses are always allowed. */ 5845 if (!strict || size == 1) 5846 return 0; 5847 5848 reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); 5849 if (!tnum_is_aligned(reg_off, size)) { 5850 char tn_buf[48]; 5851 5852 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 5853 verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", 5854 pointer_desc, tn_buf, reg->off, off, size); 5855 return -EACCES; 5856 } 5857 5858 return 0; 5859 } 5860 5861 static int check_ptr_alignment(struct bpf_verifier_env *env, 5862 const struct bpf_reg_state *reg, int off, 5863 int size, bool strict_alignment_once) 5864 { 5865 bool strict = env->strict_alignment || strict_alignment_once; 5866 const char *pointer_desc = ""; 5867 5868 switch (reg->type) { 5869 case PTR_TO_PACKET: 5870 case PTR_TO_PACKET_META: 5871 /* Special case, because of NET_IP_ALIGN. Given metadata sits 5872 * right in front, treat it the very same way. 5873 */ 5874 return check_pkt_ptr_alignment(env, reg, off, size, strict); 5875 case PTR_TO_FLOW_KEYS: 5876 pointer_desc = "flow keys "; 5877 break; 5878 case PTR_TO_MAP_KEY: 5879 pointer_desc = "key "; 5880 break; 5881 case PTR_TO_MAP_VALUE: 5882 pointer_desc = "value "; 5883 break; 5884 case PTR_TO_CTX: 5885 pointer_desc = "context "; 5886 break; 5887 case PTR_TO_STACK: 5888 pointer_desc = "stack "; 5889 /* The stack spill tracking logic in check_stack_write_fixed_off() 5890 * and check_stack_read_fixed_off() relies on stack accesses being 5891 * aligned. 5892 */ 5893 strict = true; 5894 break; 5895 case PTR_TO_SOCKET: 5896 pointer_desc = "sock "; 5897 break; 5898 case PTR_TO_SOCK_COMMON: 5899 pointer_desc = "sock_common "; 5900 break; 5901 case PTR_TO_TCP_SOCK: 5902 pointer_desc = "tcp_sock "; 5903 break; 5904 case PTR_TO_XDP_SOCK: 5905 pointer_desc = "xdp_sock "; 5906 break; 5907 default: 5908 break; 5909 } 5910 return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, 5911 strict); 5912 } 5913 5914 static int update_stack_depth(struct bpf_verifier_env *env, 5915 const struct bpf_func_state *func, 5916 int off) 5917 { 5918 u16 stack = env->subprog_info[func->subprogno].stack_depth; 5919 5920 if (stack >= -off) 5921 return 0; 5922 5923 /* update known max for given subprogram */ 5924 env->subprog_info[func->subprogno].stack_depth = -off; 5925 return 0; 5926 } 5927 5928 /* starting from main bpf function walk all instructions of the function 5929 * and recursively walk all callees that given function can call. 5930 * Ignore jump and exit insns. 5931 * Since recursion is prevented by check_cfg() this algorithm 5932 * only needs a local stack of MAX_CALL_FRAMES to remember callsites 5933 */ 5934 static int check_max_stack_depth_subprog(struct bpf_verifier_env *env, int idx) 5935 { 5936 struct bpf_subprog_info *subprog = env->subprog_info; 5937 struct bpf_insn *insn = env->prog->insnsi; 5938 int depth = 0, frame = 0, i, subprog_end; 5939 bool tail_call_reachable = false; 5940 int ret_insn[MAX_CALL_FRAMES]; 5941 int ret_prog[MAX_CALL_FRAMES]; 5942 int j; 5943 5944 i = subprog[idx].start; 5945 process_func: 5946 /* protect against potential stack overflow that might happen when 5947 * bpf2bpf calls get combined with tailcalls. Limit the caller's stack 5948 * depth for such case down to 256 so that the worst case scenario 5949 * would result in 8k stack size (32 which is tailcall limit * 256 = 5950 * 8k). 5951 * 5952 * To get the idea what might happen, see an example: 5953 * func1 -> sub rsp, 128 5954 * subfunc1 -> sub rsp, 256 5955 * tailcall1 -> add rsp, 256 5956 * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) 5957 * subfunc2 -> sub rsp, 64 5958 * subfunc22 -> sub rsp, 128 5959 * tailcall2 -> add rsp, 128 5960 * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) 5961 * 5962 * tailcall will unwind the current stack frame but it will not get rid 5963 * of caller's stack as shown on the example above. 5964 */ 5965 if (idx && subprog[idx].has_tail_call && depth >= 256) { 5966 verbose(env, 5967 "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", 5968 depth); 5969 return -EACCES; 5970 } 5971 /* round up to 32-bytes, since this is granularity 5972 * of interpreter stack size 5973 */ 5974 depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 5975 if (depth > MAX_BPF_STACK) { 5976 verbose(env, "combined stack size of %d calls is %d. Too large\n", 5977 frame + 1, depth); 5978 return -EACCES; 5979 } 5980 continue_func: 5981 subprog_end = subprog[idx + 1].start; 5982 for (; i < subprog_end; i++) { 5983 int next_insn, sidx; 5984 5985 if (bpf_pseudo_kfunc_call(insn + i) && !insn[i].off) { 5986 bool err = false; 5987 5988 if (!is_bpf_throw_kfunc(insn + i)) 5989 continue; 5990 if (subprog[idx].is_cb) 5991 err = true; 5992 for (int c = 0; c < frame && !err; c++) { 5993 if (subprog[ret_prog[c]].is_cb) { 5994 err = true; 5995 break; 5996 } 5997 } 5998 if (!err) 5999 continue; 6000 verbose(env, 6001 "bpf_throw kfunc (insn %d) cannot be called from callback subprog %d\n", 6002 i, idx); 6003 return -EINVAL; 6004 } 6005 6006 if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) 6007 continue; 6008 /* remember insn and function to return to */ 6009 ret_insn[frame] = i + 1; 6010 ret_prog[frame] = idx; 6011 6012 /* find the callee */ 6013 next_insn = i + insn[i].imm + 1; 6014 sidx = find_subprog(env, next_insn); 6015 if (sidx < 0) { 6016 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 6017 next_insn); 6018 return -EFAULT; 6019 } 6020 if (subprog[sidx].is_async_cb) { 6021 if (subprog[sidx].has_tail_call) { 6022 verbose(env, "verifier bug. subprog has tail_call and async cb\n"); 6023 return -EFAULT; 6024 } 6025 /* async callbacks don't increase bpf prog stack size unless called directly */ 6026 if (!bpf_pseudo_call(insn + i)) 6027 continue; 6028 if (subprog[sidx].is_exception_cb) { 6029 verbose(env, "insn %d cannot call exception cb directly\n", i); 6030 return -EINVAL; 6031 } 6032 } 6033 i = next_insn; 6034 idx = sidx; 6035 6036 if (subprog[idx].has_tail_call) 6037 tail_call_reachable = true; 6038 6039 frame++; 6040 if (frame >= MAX_CALL_FRAMES) { 6041 verbose(env, "the call stack of %d frames is too deep !\n", 6042 frame); 6043 return -E2BIG; 6044 } 6045 goto process_func; 6046 } 6047 /* if tail call got detected across bpf2bpf calls then mark each of the 6048 * currently present subprog frames as tail call reachable subprogs; 6049 * this info will be utilized by JIT so that we will be preserving the 6050 * tail call counter throughout bpf2bpf calls combined with tailcalls 6051 */ 6052 if (tail_call_reachable) 6053 for (j = 0; j < frame; j++) { 6054 if (subprog[ret_prog[j]].is_exception_cb) { 6055 verbose(env, "cannot tail call within exception cb\n"); 6056 return -EINVAL; 6057 } 6058 subprog[ret_prog[j]].tail_call_reachable = true; 6059 } 6060 if (subprog[0].tail_call_reachable) 6061 env->prog->aux->tail_call_reachable = true; 6062 6063 /* end of for() loop means the last insn of the 'subprog' 6064 * was reached. Doesn't matter whether it was JA or EXIT 6065 */ 6066 if (frame == 0) 6067 return 0; 6068 depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); 6069 frame--; 6070 i = ret_insn[frame]; 6071 idx = ret_prog[frame]; 6072 goto continue_func; 6073 } 6074 6075 static int check_max_stack_depth(struct bpf_verifier_env *env) 6076 { 6077 struct bpf_subprog_info *si = env->subprog_info; 6078 int ret; 6079 6080 for (int i = 0; i < env->subprog_cnt; i++) { 6081 if (!i || si[i].is_async_cb) { 6082 ret = check_max_stack_depth_subprog(env, i); 6083 if (ret < 0) 6084 return ret; 6085 } 6086 continue; 6087 } 6088 return 0; 6089 } 6090 6091 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 6092 static int get_callee_stack_depth(struct bpf_verifier_env *env, 6093 const struct bpf_insn *insn, int idx) 6094 { 6095 int start = idx + insn->imm + 1, subprog; 6096 6097 subprog = find_subprog(env, start); 6098 if (subprog < 0) { 6099 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 6100 start); 6101 return -EFAULT; 6102 } 6103 return env->subprog_info[subprog].stack_depth; 6104 } 6105 #endif 6106 6107 static int __check_buffer_access(struct bpf_verifier_env *env, 6108 const char *buf_info, 6109 const struct bpf_reg_state *reg, 6110 int regno, int off, int size) 6111 { 6112 if (off < 0) { 6113 verbose(env, 6114 "R%d invalid %s buffer access: off=%d, size=%d\n", 6115 regno, buf_info, off, size); 6116 return -EACCES; 6117 } 6118 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 6119 char tn_buf[48]; 6120 6121 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6122 verbose(env, 6123 "R%d invalid variable buffer offset: off=%d, var_off=%s\n", 6124 regno, off, tn_buf); 6125 return -EACCES; 6126 } 6127 6128 return 0; 6129 } 6130 6131 static int check_tp_buffer_access(struct bpf_verifier_env *env, 6132 const struct bpf_reg_state *reg, 6133 int regno, int off, int size) 6134 { 6135 int err; 6136 6137 err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); 6138 if (err) 6139 return err; 6140 6141 if (off + size > env->prog->aux->max_tp_access) 6142 env->prog->aux->max_tp_access = off + size; 6143 6144 return 0; 6145 } 6146 6147 static int check_buffer_access(struct bpf_verifier_env *env, 6148 const struct bpf_reg_state *reg, 6149 int regno, int off, int size, 6150 bool zero_size_allowed, 6151 u32 *max_access) 6152 { 6153 const char *buf_info = type_is_rdonly_mem(reg->type) ? "rdonly" : "rdwr"; 6154 int err; 6155 6156 err = __check_buffer_access(env, buf_info, reg, regno, off, size); 6157 if (err) 6158 return err; 6159 6160 if (off + size > *max_access) 6161 *max_access = off + size; 6162 6163 return 0; 6164 } 6165 6166 /* BPF architecture zero extends alu32 ops into 64-bit registesr */ 6167 static void zext_32_to_64(struct bpf_reg_state *reg) 6168 { 6169 reg->var_off = tnum_subreg(reg->var_off); 6170 __reg_assign_32_into_64(reg); 6171 } 6172 6173 /* truncate register to smaller size (in bytes) 6174 * must be called with size < BPF_REG_SIZE 6175 */ 6176 static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) 6177 { 6178 u64 mask; 6179 6180 /* clear high bits in bit representation */ 6181 reg->var_off = tnum_cast(reg->var_off, size); 6182 6183 /* fix arithmetic bounds */ 6184 mask = ((u64)1 << (size * 8)) - 1; 6185 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { 6186 reg->umin_value &= mask; 6187 reg->umax_value &= mask; 6188 } else { 6189 reg->umin_value = 0; 6190 reg->umax_value = mask; 6191 } 6192 reg->smin_value = reg->umin_value; 6193 reg->smax_value = reg->umax_value; 6194 6195 /* If size is smaller than 32bit register the 32bit register 6196 * values are also truncated so we push 64-bit bounds into 6197 * 32-bit bounds. Above were truncated < 32-bits already. 6198 */ 6199 if (size >= 4) 6200 return; 6201 __reg_combine_64_into_32(reg); 6202 } 6203 6204 static void set_sext64_default_val(struct bpf_reg_state *reg, int size) 6205 { 6206 if (size == 1) { 6207 reg->smin_value = reg->s32_min_value = S8_MIN; 6208 reg->smax_value = reg->s32_max_value = S8_MAX; 6209 } else if (size == 2) { 6210 reg->smin_value = reg->s32_min_value = S16_MIN; 6211 reg->smax_value = reg->s32_max_value = S16_MAX; 6212 } else { 6213 /* size == 4 */ 6214 reg->smin_value = reg->s32_min_value = S32_MIN; 6215 reg->smax_value = reg->s32_max_value = S32_MAX; 6216 } 6217 reg->umin_value = reg->u32_min_value = 0; 6218 reg->umax_value = U64_MAX; 6219 reg->u32_max_value = U32_MAX; 6220 reg->var_off = tnum_unknown; 6221 } 6222 6223 static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size) 6224 { 6225 s64 init_s64_max, init_s64_min, s64_max, s64_min, u64_cval; 6226 u64 top_smax_value, top_smin_value; 6227 u64 num_bits = size * 8; 6228 6229 if (tnum_is_const(reg->var_off)) { 6230 u64_cval = reg->var_off.value; 6231 if (size == 1) 6232 reg->var_off = tnum_const((s8)u64_cval); 6233 else if (size == 2) 6234 reg->var_off = tnum_const((s16)u64_cval); 6235 else 6236 /* size == 4 */ 6237 reg->var_off = tnum_const((s32)u64_cval); 6238 6239 u64_cval = reg->var_off.value; 6240 reg->smax_value = reg->smin_value = u64_cval; 6241 reg->umax_value = reg->umin_value = u64_cval; 6242 reg->s32_max_value = reg->s32_min_value = u64_cval; 6243 reg->u32_max_value = reg->u32_min_value = u64_cval; 6244 return; 6245 } 6246 6247 top_smax_value = ((u64)reg->smax_value >> num_bits) << num_bits; 6248 top_smin_value = ((u64)reg->smin_value >> num_bits) << num_bits; 6249 6250 if (top_smax_value != top_smin_value) 6251 goto out; 6252 6253 /* find the s64_min and s64_min after sign extension */ 6254 if (size == 1) { 6255 init_s64_max = (s8)reg->smax_value; 6256 init_s64_min = (s8)reg->smin_value; 6257 } else if (size == 2) { 6258 init_s64_max = (s16)reg->smax_value; 6259 init_s64_min = (s16)reg->smin_value; 6260 } else { 6261 init_s64_max = (s32)reg->smax_value; 6262 init_s64_min = (s32)reg->smin_value; 6263 } 6264 6265 s64_max = max(init_s64_max, init_s64_min); 6266 s64_min = min(init_s64_max, init_s64_min); 6267 6268 /* both of s64_max/s64_min positive or negative */ 6269 if ((s64_max >= 0) == (s64_min >= 0)) { 6270 reg->smin_value = reg->s32_min_value = s64_min; 6271 reg->smax_value = reg->s32_max_value = s64_max; 6272 reg->umin_value = reg->u32_min_value = s64_min; 6273 reg->umax_value = reg->u32_max_value = s64_max; 6274 reg->var_off = tnum_range(s64_min, s64_max); 6275 return; 6276 } 6277 6278 out: 6279 set_sext64_default_val(reg, size); 6280 } 6281 6282 static void set_sext32_default_val(struct bpf_reg_state *reg, int size) 6283 { 6284 if (size == 1) { 6285 reg->s32_min_value = S8_MIN; 6286 reg->s32_max_value = S8_MAX; 6287 } else { 6288 /* size == 2 */ 6289 reg->s32_min_value = S16_MIN; 6290 reg->s32_max_value = S16_MAX; 6291 } 6292 reg->u32_min_value = 0; 6293 reg->u32_max_value = U32_MAX; 6294 } 6295 6296 static void coerce_subreg_to_size_sx(struct bpf_reg_state *reg, int size) 6297 { 6298 s32 init_s32_max, init_s32_min, s32_max, s32_min, u32_val; 6299 u32 top_smax_value, top_smin_value; 6300 u32 num_bits = size * 8; 6301 6302 if (tnum_is_const(reg->var_off)) { 6303 u32_val = reg->var_off.value; 6304 if (size == 1) 6305 reg->var_off = tnum_const((s8)u32_val); 6306 else 6307 reg->var_off = tnum_const((s16)u32_val); 6308 6309 u32_val = reg->var_off.value; 6310 reg->s32_min_value = reg->s32_max_value = u32_val; 6311 reg->u32_min_value = reg->u32_max_value = u32_val; 6312 return; 6313 } 6314 6315 top_smax_value = ((u32)reg->s32_max_value >> num_bits) << num_bits; 6316 top_smin_value = ((u32)reg->s32_min_value >> num_bits) << num_bits; 6317 6318 if (top_smax_value != top_smin_value) 6319 goto out; 6320 6321 /* find the s32_min and s32_min after sign extension */ 6322 if (size == 1) { 6323 init_s32_max = (s8)reg->s32_max_value; 6324 init_s32_min = (s8)reg->s32_min_value; 6325 } else { 6326 /* size == 2 */ 6327 init_s32_max = (s16)reg->s32_max_value; 6328 init_s32_min = (s16)reg->s32_min_value; 6329 } 6330 s32_max = max(init_s32_max, init_s32_min); 6331 s32_min = min(init_s32_max, init_s32_min); 6332 6333 if ((s32_min >= 0) == (s32_max >= 0)) { 6334 reg->s32_min_value = s32_min; 6335 reg->s32_max_value = s32_max; 6336 reg->u32_min_value = (u32)s32_min; 6337 reg->u32_max_value = (u32)s32_max; 6338 return; 6339 } 6340 6341 out: 6342 set_sext32_default_val(reg, size); 6343 } 6344 6345 static bool bpf_map_is_rdonly(const struct bpf_map *map) 6346 { 6347 /* A map is considered read-only if the following condition are true: 6348 * 6349 * 1) BPF program side cannot change any of the map content. The 6350 * BPF_F_RDONLY_PROG flag is throughout the lifetime of a map 6351 * and was set at map creation time. 6352 * 2) The map value(s) have been initialized from user space by a 6353 * loader and then "frozen", such that no new map update/delete 6354 * operations from syscall side are possible for the rest of 6355 * the map's lifetime from that point onwards. 6356 * 3) Any parallel/pending map update/delete operations from syscall 6357 * side have been completed. Only after that point, it's safe to 6358 * assume that map value(s) are immutable. 6359 */ 6360 return (map->map_flags & BPF_F_RDONLY_PROG) && 6361 READ_ONCE(map->frozen) && 6362 !bpf_map_write_active(map); 6363 } 6364 6365 static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val, 6366 bool is_ldsx) 6367 { 6368 void *ptr; 6369 u64 addr; 6370 int err; 6371 6372 err = map->ops->map_direct_value_addr(map, &addr, off); 6373 if (err) 6374 return err; 6375 ptr = (void *)(long)addr + off; 6376 6377 switch (size) { 6378 case sizeof(u8): 6379 *val = is_ldsx ? (s64)*(s8 *)ptr : (u64)*(u8 *)ptr; 6380 break; 6381 case sizeof(u16): 6382 *val = is_ldsx ? (s64)*(s16 *)ptr : (u64)*(u16 *)ptr; 6383 break; 6384 case sizeof(u32): 6385 *val = is_ldsx ? (s64)*(s32 *)ptr : (u64)*(u32 *)ptr; 6386 break; 6387 case sizeof(u64): 6388 *val = *(u64 *)ptr; 6389 break; 6390 default: 6391 return -EINVAL; 6392 } 6393 return 0; 6394 } 6395 6396 #define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu) 6397 #define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null) 6398 #define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted) 6399 6400 /* 6401 * Allow list few fields as RCU trusted or full trusted. 6402 * This logic doesn't allow mix tagging and will be removed once GCC supports 6403 * btf_type_tag. 6404 */ 6405 6406 /* RCU trusted: these fields are trusted in RCU CS and never NULL */ 6407 BTF_TYPE_SAFE_RCU(struct task_struct) { 6408 const cpumask_t *cpus_ptr; 6409 struct css_set __rcu *cgroups; 6410 struct task_struct __rcu *real_parent; 6411 struct task_struct *group_leader; 6412 }; 6413 6414 BTF_TYPE_SAFE_RCU(struct cgroup) { 6415 /* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */ 6416 struct kernfs_node *kn; 6417 }; 6418 6419 BTF_TYPE_SAFE_RCU(struct css_set) { 6420 struct cgroup *dfl_cgrp; 6421 }; 6422 6423 /* RCU trusted: these fields are trusted in RCU CS and can be NULL */ 6424 BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) { 6425 struct file __rcu *exe_file; 6426 }; 6427 6428 /* skb->sk, req->sk are not RCU protected, but we mark them as such 6429 * because bpf prog accessible sockets are SOCK_RCU_FREE. 6430 */ 6431 BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) { 6432 struct sock *sk; 6433 }; 6434 6435 BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) { 6436 struct sock *sk; 6437 }; 6438 6439 /* full trusted: these fields are trusted even outside of RCU CS and never NULL */ 6440 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { 6441 struct seq_file *seq; 6442 }; 6443 6444 BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { 6445 struct bpf_iter_meta *meta; 6446 struct task_struct *task; 6447 }; 6448 6449 BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { 6450 struct file *file; 6451 }; 6452 6453 BTF_TYPE_SAFE_TRUSTED(struct file) { 6454 struct inode *f_inode; 6455 }; 6456 6457 BTF_TYPE_SAFE_TRUSTED(struct dentry) { 6458 /* no negative dentry-s in places where bpf can see it */ 6459 struct inode *d_inode; 6460 }; 6461 6462 BTF_TYPE_SAFE_TRUSTED(struct socket) { 6463 struct sock *sk; 6464 }; 6465 6466 static bool type_is_rcu(struct bpf_verifier_env *env, 6467 struct bpf_reg_state *reg, 6468 const char *field_name, u32 btf_id) 6469 { 6470 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct)); 6471 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup)); 6472 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set)); 6473 6474 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu"); 6475 } 6476 6477 static bool type_is_rcu_or_null(struct bpf_verifier_env *env, 6478 struct bpf_reg_state *reg, 6479 const char *field_name, u32 btf_id) 6480 { 6481 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct)); 6482 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff)); 6483 BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock)); 6484 6485 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null"); 6486 } 6487 6488 static bool type_is_trusted(struct bpf_verifier_env *env, 6489 struct bpf_reg_state *reg, 6490 const char *field_name, u32 btf_id) 6491 { 6492 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta)); 6493 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task)); 6494 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm)); 6495 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file)); 6496 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry)); 6497 BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket)); 6498 6499 return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted"); 6500 } 6501 6502 static int check_ptr_to_btf_access(struct bpf_verifier_env *env, 6503 struct bpf_reg_state *regs, 6504 int regno, int off, int size, 6505 enum bpf_access_type atype, 6506 int value_regno) 6507 { 6508 struct bpf_reg_state *reg = regs + regno; 6509 const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); 6510 const char *tname = btf_name_by_offset(reg->btf, t->name_off); 6511 const char *field_name = NULL; 6512 enum bpf_type_flag flag = 0; 6513 u32 btf_id = 0; 6514 int ret; 6515 6516 if (!env->allow_ptr_leaks) { 6517 verbose(env, 6518 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6519 tname); 6520 return -EPERM; 6521 } 6522 if (!env->prog->gpl_compatible && btf_is_kernel(reg->btf)) { 6523 verbose(env, 6524 "Cannot access kernel 'struct %s' from non-GPL compatible program\n", 6525 tname); 6526 return -EINVAL; 6527 } 6528 if (off < 0) { 6529 verbose(env, 6530 "R%d is ptr_%s invalid negative access: off=%d\n", 6531 regno, tname, off); 6532 return -EACCES; 6533 } 6534 if (!tnum_is_const(reg->var_off) || reg->var_off.value) { 6535 char tn_buf[48]; 6536 6537 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6538 verbose(env, 6539 "R%d is ptr_%s invalid variable offset: off=%d, var_off=%s\n", 6540 regno, tname, off, tn_buf); 6541 return -EACCES; 6542 } 6543 6544 if (reg->type & MEM_USER) { 6545 verbose(env, 6546 "R%d is ptr_%s access user memory: off=%d\n", 6547 regno, tname, off); 6548 return -EACCES; 6549 } 6550 6551 if (reg->type & MEM_PERCPU) { 6552 verbose(env, 6553 "R%d is ptr_%s access percpu memory: off=%d\n", 6554 regno, tname, off); 6555 return -EACCES; 6556 } 6557 6558 if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) { 6559 if (!btf_is_kernel(reg->btf)) { 6560 verbose(env, "verifier internal error: reg->btf must be kernel btf\n"); 6561 return -EFAULT; 6562 } 6563 ret = env->ops->btf_struct_access(&env->log, reg, off, size); 6564 } else { 6565 /* Writes are permitted with default btf_struct_access for 6566 * program allocated objects (which always have ref_obj_id > 0), 6567 * but not for untrusted PTR_TO_BTF_ID | MEM_ALLOC. 6568 */ 6569 if (atype != BPF_READ && !type_is_ptr_alloc_obj(reg->type)) { 6570 verbose(env, "only read is supported\n"); 6571 return -EACCES; 6572 } 6573 6574 if (type_is_alloc(reg->type) && !type_is_non_owning_ref(reg->type) && 6575 !(reg->type & MEM_RCU) && !reg->ref_obj_id) { 6576 verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); 6577 return -EFAULT; 6578 } 6579 6580 ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name); 6581 } 6582 6583 if (ret < 0) 6584 return ret; 6585 6586 if (ret != PTR_TO_BTF_ID) { 6587 /* just mark; */ 6588 6589 } else if (type_flag(reg->type) & PTR_UNTRUSTED) { 6590 /* If this is an untrusted pointer, all pointers formed by walking it 6591 * also inherit the untrusted flag. 6592 */ 6593 flag = PTR_UNTRUSTED; 6594 6595 } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) { 6596 /* By default any pointer obtained from walking a trusted pointer is no 6597 * longer trusted, unless the field being accessed has explicitly been 6598 * marked as inheriting its parent's state of trust (either full or RCU). 6599 * For example: 6600 * 'cgroups' pointer is untrusted if task->cgroups dereference 6601 * happened in a sleepable program outside of bpf_rcu_read_lock() 6602 * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). 6603 * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED. 6604 * 6605 * A regular RCU-protected pointer with __rcu tag can also be deemed 6606 * trusted if we are in an RCU CS. Such pointer can be NULL. 6607 */ 6608 if (type_is_trusted(env, reg, field_name, btf_id)) { 6609 flag |= PTR_TRUSTED; 6610 } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { 6611 if (type_is_rcu(env, reg, field_name, btf_id)) { 6612 /* ignore __rcu tag and mark it MEM_RCU */ 6613 flag |= MEM_RCU; 6614 } else if (flag & MEM_RCU || 6615 type_is_rcu_or_null(env, reg, field_name, btf_id)) { 6616 /* __rcu tagged pointers can be NULL */ 6617 flag |= MEM_RCU | PTR_MAYBE_NULL; 6618 6619 /* We always trust them */ 6620 if (type_is_rcu_or_null(env, reg, field_name, btf_id) && 6621 flag & PTR_UNTRUSTED) 6622 flag &= ~PTR_UNTRUSTED; 6623 } else if (flag & (MEM_PERCPU | MEM_USER)) { 6624 /* keep as-is */ 6625 } else { 6626 /* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */ 6627 clear_trusted_flags(&flag); 6628 } 6629 } else { 6630 /* 6631 * If not in RCU CS or MEM_RCU pointer can be NULL then 6632 * aggressively mark as untrusted otherwise such 6633 * pointers will be plain PTR_TO_BTF_ID without flags 6634 * and will be allowed to be passed into helpers for 6635 * compat reasons. 6636 */ 6637 flag = PTR_UNTRUSTED; 6638 } 6639 } else { 6640 /* Old compat. Deprecated */ 6641 clear_trusted_flags(&flag); 6642 } 6643 6644 if (atype == BPF_READ && value_regno >= 0) 6645 mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); 6646 6647 return 0; 6648 } 6649 6650 static int check_ptr_to_map_access(struct bpf_verifier_env *env, 6651 struct bpf_reg_state *regs, 6652 int regno, int off, int size, 6653 enum bpf_access_type atype, 6654 int value_regno) 6655 { 6656 struct bpf_reg_state *reg = regs + regno; 6657 struct bpf_map *map = reg->map_ptr; 6658 struct bpf_reg_state map_reg; 6659 enum bpf_type_flag flag = 0; 6660 const struct btf_type *t; 6661 const char *tname; 6662 u32 btf_id; 6663 int ret; 6664 6665 if (!btf_vmlinux) { 6666 verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); 6667 return -ENOTSUPP; 6668 } 6669 6670 if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { 6671 verbose(env, "map_ptr access not supported for map type %d\n", 6672 map->map_type); 6673 return -ENOTSUPP; 6674 } 6675 6676 t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); 6677 tname = btf_name_by_offset(btf_vmlinux, t->name_off); 6678 6679 if (!env->allow_ptr_leaks) { 6680 verbose(env, 6681 "'struct %s' access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", 6682 tname); 6683 return -EPERM; 6684 } 6685 6686 if (off < 0) { 6687 verbose(env, "R%d is %s invalid negative access: off=%d\n", 6688 regno, tname, off); 6689 return -EACCES; 6690 } 6691 6692 if (atype != BPF_READ) { 6693 verbose(env, "only read from %s is supported\n", tname); 6694 return -EACCES; 6695 } 6696 6697 /* Simulate access to a PTR_TO_BTF_ID */ 6698 memset(&map_reg, 0, sizeof(map_reg)); 6699 mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0); 6700 ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL); 6701 if (ret < 0) 6702 return ret; 6703 6704 if (value_regno >= 0) 6705 mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); 6706 6707 return 0; 6708 } 6709 6710 /* Check that the stack access at the given offset is within bounds. The 6711 * maximum valid offset is -1. 6712 * 6713 * The minimum valid offset is -MAX_BPF_STACK for writes, and 6714 * -state->allocated_stack for reads. 6715 */ 6716 static int check_stack_slot_within_bounds(int off, 6717 struct bpf_func_state *state, 6718 enum bpf_access_type t) 6719 { 6720 int min_valid_off; 6721 6722 if (t == BPF_WRITE) 6723 min_valid_off = -MAX_BPF_STACK; 6724 else 6725 min_valid_off = -state->allocated_stack; 6726 6727 if (off < min_valid_off || off > -1) 6728 return -EACCES; 6729 return 0; 6730 } 6731 6732 /* Check that the stack access at 'regno + off' falls within the maximum stack 6733 * bounds. 6734 * 6735 * 'off' includes `regno->offset`, but not its dynamic part (if any). 6736 */ 6737 static int check_stack_access_within_bounds( 6738 struct bpf_verifier_env *env, 6739 int regno, int off, int access_size, 6740 enum bpf_access_src src, enum bpf_access_type type) 6741 { 6742 struct bpf_reg_state *regs = cur_regs(env); 6743 struct bpf_reg_state *reg = regs + regno; 6744 struct bpf_func_state *state = func(env, reg); 6745 int min_off, max_off; 6746 int err; 6747 char *err_extra; 6748 6749 if (src == ACCESS_HELPER) 6750 /* We don't know if helpers are reading or writing (or both). */ 6751 err_extra = " indirect access to"; 6752 else if (type == BPF_READ) 6753 err_extra = " read from"; 6754 else 6755 err_extra = " write to"; 6756 6757 if (tnum_is_const(reg->var_off)) { 6758 min_off = reg->var_off.value + off; 6759 if (access_size > 0) 6760 max_off = min_off + access_size - 1; 6761 else 6762 max_off = min_off; 6763 } else { 6764 if (reg->smax_value >= BPF_MAX_VAR_OFF || 6765 reg->smin_value <= -BPF_MAX_VAR_OFF) { 6766 verbose(env, "invalid unbounded variable-offset%s stack R%d\n", 6767 err_extra, regno); 6768 return -EACCES; 6769 } 6770 min_off = reg->smin_value + off; 6771 if (access_size > 0) 6772 max_off = reg->smax_value + off + access_size - 1; 6773 else 6774 max_off = min_off; 6775 } 6776 6777 err = check_stack_slot_within_bounds(min_off, state, type); 6778 if (!err) 6779 err = check_stack_slot_within_bounds(max_off, state, type); 6780 6781 if (err) { 6782 if (tnum_is_const(reg->var_off)) { 6783 verbose(env, "invalid%s stack R%d off=%d size=%d\n", 6784 err_extra, regno, off, access_size); 6785 } else { 6786 char tn_buf[48]; 6787 6788 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 6789 verbose(env, "invalid variable-offset%s stack R%d var_off=%s size=%d\n", 6790 err_extra, regno, tn_buf, access_size); 6791 } 6792 } 6793 return err; 6794 } 6795 6796 /* check whether memory at (regno + off) is accessible for t = (read | write) 6797 * if t==write, value_regno is a register which value is stored into memory 6798 * if t==read, value_regno is a register which will receive the value from memory 6799 * if t==write && value_regno==-1, some unknown value is stored into memory 6800 * if t==read && value_regno==-1, don't care what we read from memory 6801 */ 6802 static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, 6803 int off, int bpf_size, enum bpf_access_type t, 6804 int value_regno, bool strict_alignment_once, bool is_ldsx) 6805 { 6806 struct bpf_reg_state *regs = cur_regs(env); 6807 struct bpf_reg_state *reg = regs + regno; 6808 struct bpf_func_state *state; 6809 int size, err = 0; 6810 6811 size = bpf_size_to_bytes(bpf_size); 6812 if (size < 0) 6813 return size; 6814 6815 /* alignment checks will add in reg->off themselves */ 6816 err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); 6817 if (err) 6818 return err; 6819 6820 /* for access checks, reg->off is just part of off */ 6821 off += reg->off; 6822 6823 if (reg->type == PTR_TO_MAP_KEY) { 6824 if (t == BPF_WRITE) { 6825 verbose(env, "write to change key R%d not allowed\n", regno); 6826 return -EACCES; 6827 } 6828 6829 err = check_mem_region_access(env, regno, off, size, 6830 reg->map_ptr->key_size, false); 6831 if (err) 6832 return err; 6833 if (value_regno >= 0) 6834 mark_reg_unknown(env, regs, value_regno); 6835 } else if (reg->type == PTR_TO_MAP_VALUE) { 6836 struct btf_field *kptr_field = NULL; 6837 6838 if (t == BPF_WRITE && value_regno >= 0 && 6839 is_pointer_value(env, value_regno)) { 6840 verbose(env, "R%d leaks addr into map\n", value_regno); 6841 return -EACCES; 6842 } 6843 err = check_map_access_type(env, regno, off, size, t); 6844 if (err) 6845 return err; 6846 err = check_map_access(env, regno, off, size, false, ACCESS_DIRECT); 6847 if (err) 6848 return err; 6849 if (tnum_is_const(reg->var_off)) 6850 kptr_field = btf_record_find(reg->map_ptr->record, 6851 off + reg->var_off.value, BPF_KPTR); 6852 if (kptr_field) { 6853 err = check_map_kptr_access(env, regno, value_regno, insn_idx, kptr_field); 6854 } else if (t == BPF_READ && value_regno >= 0) { 6855 struct bpf_map *map = reg->map_ptr; 6856 6857 /* if map is read-only, track its contents as scalars */ 6858 if (tnum_is_const(reg->var_off) && 6859 bpf_map_is_rdonly(map) && 6860 map->ops->map_direct_value_addr) { 6861 int map_off = off + reg->var_off.value; 6862 u64 val = 0; 6863 6864 err = bpf_map_direct_read(map, map_off, size, 6865 &val, is_ldsx); 6866 if (err) 6867 return err; 6868 6869 regs[value_regno].type = SCALAR_VALUE; 6870 __mark_reg_known(®s[value_regno], val); 6871 } else { 6872 mark_reg_unknown(env, regs, value_regno); 6873 } 6874 } 6875 } else if (base_type(reg->type) == PTR_TO_MEM) { 6876 bool rdonly_mem = type_is_rdonly_mem(reg->type); 6877 6878 if (type_may_be_null(reg->type)) { 6879 verbose(env, "R%d invalid mem access '%s'\n", regno, 6880 reg_type_str(env, reg->type)); 6881 return -EACCES; 6882 } 6883 6884 if (t == BPF_WRITE && rdonly_mem) { 6885 verbose(env, "R%d cannot write into %s\n", 6886 regno, reg_type_str(env, reg->type)); 6887 return -EACCES; 6888 } 6889 6890 if (t == BPF_WRITE && value_regno >= 0 && 6891 is_pointer_value(env, value_regno)) { 6892 verbose(env, "R%d leaks addr into mem\n", value_regno); 6893 return -EACCES; 6894 } 6895 6896 err = check_mem_region_access(env, regno, off, size, 6897 reg->mem_size, false); 6898 if (!err && value_regno >= 0 && (t == BPF_READ || rdonly_mem)) 6899 mark_reg_unknown(env, regs, value_regno); 6900 } else if (reg->type == PTR_TO_CTX) { 6901 enum bpf_reg_type reg_type = SCALAR_VALUE; 6902 struct btf *btf = NULL; 6903 u32 btf_id = 0; 6904 6905 if (t == BPF_WRITE && value_regno >= 0 && 6906 is_pointer_value(env, value_regno)) { 6907 verbose(env, "R%d leaks addr into ctx\n", value_regno); 6908 return -EACCES; 6909 } 6910 6911 err = check_ptr_off_reg(env, reg, regno); 6912 if (err < 0) 6913 return err; 6914 6915 err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, 6916 &btf_id); 6917 if (err) 6918 verbose_linfo(env, insn_idx, "; "); 6919 if (!err && t == BPF_READ && value_regno >= 0) { 6920 /* ctx access returns either a scalar, or a 6921 * PTR_TO_PACKET[_META,_END]. In the latter 6922 * case, we know the offset is zero. 6923 */ 6924 if (reg_type == SCALAR_VALUE) { 6925 mark_reg_unknown(env, regs, value_regno); 6926 } else { 6927 mark_reg_known_zero(env, regs, 6928 value_regno); 6929 if (type_may_be_null(reg_type)) 6930 regs[value_regno].id = ++env->id_gen; 6931 /* A load of ctx field could have different 6932 * actual load size with the one encoded in the 6933 * insn. When the dst is PTR, it is for sure not 6934 * a sub-register. 6935 */ 6936 regs[value_regno].subreg_def = DEF_NOT_SUBREG; 6937 if (base_type(reg_type) == PTR_TO_BTF_ID) { 6938 regs[value_regno].btf = btf; 6939 regs[value_regno].btf_id = btf_id; 6940 } 6941 } 6942 regs[value_regno].type = reg_type; 6943 } 6944 6945 } else if (reg->type == PTR_TO_STACK) { 6946 /* Basic bounds checks. */ 6947 err = check_stack_access_within_bounds(env, regno, off, size, ACCESS_DIRECT, t); 6948 if (err) 6949 return err; 6950 6951 state = func(env, reg); 6952 err = update_stack_depth(env, state, off); 6953 if (err) 6954 return err; 6955 6956 if (t == BPF_READ) 6957 err = check_stack_read(env, regno, off, size, 6958 value_regno); 6959 else 6960 err = check_stack_write(env, regno, off, size, 6961 value_regno, insn_idx); 6962 } else if (reg_is_pkt_pointer(reg)) { 6963 if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { 6964 verbose(env, "cannot write into packet\n"); 6965 return -EACCES; 6966 } 6967 if (t == BPF_WRITE && value_regno >= 0 && 6968 is_pointer_value(env, value_regno)) { 6969 verbose(env, "R%d leaks addr into packet\n", 6970 value_regno); 6971 return -EACCES; 6972 } 6973 err = check_packet_access(env, regno, off, size, false); 6974 if (!err && t == BPF_READ && value_regno >= 0) 6975 mark_reg_unknown(env, regs, value_regno); 6976 } else if (reg->type == PTR_TO_FLOW_KEYS) { 6977 if (t == BPF_WRITE && value_regno >= 0 && 6978 is_pointer_value(env, value_regno)) { 6979 verbose(env, "R%d leaks addr into flow keys\n", 6980 value_regno); 6981 return -EACCES; 6982 } 6983 6984 err = check_flow_keys_access(env, off, size); 6985 if (!err && t == BPF_READ && value_regno >= 0) 6986 mark_reg_unknown(env, regs, value_regno); 6987 } else if (type_is_sk_pointer(reg->type)) { 6988 if (t == BPF_WRITE) { 6989 verbose(env, "R%d cannot write into %s\n", 6990 regno, reg_type_str(env, reg->type)); 6991 return -EACCES; 6992 } 6993 err = check_sock_access(env, insn_idx, regno, off, size, t); 6994 if (!err && value_regno >= 0) 6995 mark_reg_unknown(env, regs, value_regno); 6996 } else if (reg->type == PTR_TO_TP_BUFFER) { 6997 err = check_tp_buffer_access(env, reg, regno, off, size); 6998 if (!err && t == BPF_READ && value_regno >= 0) 6999 mark_reg_unknown(env, regs, value_regno); 7000 } else if (base_type(reg->type) == PTR_TO_BTF_ID && 7001 !type_may_be_null(reg->type)) { 7002 err = check_ptr_to_btf_access(env, regs, regno, off, size, t, 7003 value_regno); 7004 } else if (reg->type == CONST_PTR_TO_MAP) { 7005 err = check_ptr_to_map_access(env, regs, regno, off, size, t, 7006 value_regno); 7007 } else if (base_type(reg->type) == PTR_TO_BUF) { 7008 bool rdonly_mem = type_is_rdonly_mem(reg->type); 7009 u32 *max_access; 7010 7011 if (rdonly_mem) { 7012 if (t == BPF_WRITE) { 7013 verbose(env, "R%d cannot write into %s\n", 7014 regno, reg_type_str(env, reg->type)); 7015 return -EACCES; 7016 } 7017 max_access = &env->prog->aux->max_rdonly_access; 7018 } else { 7019 max_access = &env->prog->aux->max_rdwr_access; 7020 } 7021 7022 err = check_buffer_access(env, reg, regno, off, size, false, 7023 max_access); 7024 7025 if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ)) 7026 mark_reg_unknown(env, regs, value_regno); 7027 } else { 7028 verbose(env, "R%d invalid mem access '%s'\n", regno, 7029 reg_type_str(env, reg->type)); 7030 return -EACCES; 7031 } 7032 7033 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 7034 regs[value_regno].type == SCALAR_VALUE) { 7035 if (!is_ldsx) 7036 /* b/h/w load zero-extends, mark upper bits as known 0 */ 7037 coerce_reg_to_size(®s[value_regno], size); 7038 else 7039 coerce_reg_to_size_sx(®s[value_regno], size); 7040 } 7041 return err; 7042 } 7043 7044 static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) 7045 { 7046 int load_reg; 7047 int err; 7048 7049 switch (insn->imm) { 7050 case BPF_ADD: 7051 case BPF_ADD | BPF_FETCH: 7052 case BPF_AND: 7053 case BPF_AND | BPF_FETCH: 7054 case BPF_OR: 7055 case BPF_OR | BPF_FETCH: 7056 case BPF_XOR: 7057 case BPF_XOR | BPF_FETCH: 7058 case BPF_XCHG: 7059 case BPF_CMPXCHG: 7060 break; 7061 default: 7062 verbose(env, "BPF_ATOMIC uses invalid atomic opcode %02x\n", insn->imm); 7063 return -EINVAL; 7064 } 7065 7066 if (BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) { 7067 verbose(env, "invalid atomic operand size\n"); 7068 return -EINVAL; 7069 } 7070 7071 /* check src1 operand */ 7072 err = check_reg_arg(env, insn->src_reg, SRC_OP); 7073 if (err) 7074 return err; 7075 7076 /* check src2 operand */ 7077 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 7078 if (err) 7079 return err; 7080 7081 if (insn->imm == BPF_CMPXCHG) { 7082 /* Check comparison of R0 with memory location */ 7083 const u32 aux_reg = BPF_REG_0; 7084 7085 err = check_reg_arg(env, aux_reg, SRC_OP); 7086 if (err) 7087 return err; 7088 7089 if (is_pointer_value(env, aux_reg)) { 7090 verbose(env, "R%d leaks addr into mem\n", aux_reg); 7091 return -EACCES; 7092 } 7093 } 7094 7095 if (is_pointer_value(env, insn->src_reg)) { 7096 verbose(env, "R%d leaks addr into mem\n", insn->src_reg); 7097 return -EACCES; 7098 } 7099 7100 if (is_ctx_reg(env, insn->dst_reg) || 7101 is_pkt_reg(env, insn->dst_reg) || 7102 is_flow_key_reg(env, insn->dst_reg) || 7103 is_sk_reg(env, insn->dst_reg)) { 7104 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", 7105 insn->dst_reg, 7106 reg_type_str(env, reg_state(env, insn->dst_reg)->type)); 7107 return -EACCES; 7108 } 7109 7110 if (insn->imm & BPF_FETCH) { 7111 if (insn->imm == BPF_CMPXCHG) 7112 load_reg = BPF_REG_0; 7113 else 7114 load_reg = insn->src_reg; 7115 7116 /* check and record load of old value */ 7117 err = check_reg_arg(env, load_reg, DST_OP); 7118 if (err) 7119 return err; 7120 } else { 7121 /* This instruction accesses a memory location but doesn't 7122 * actually load it into a register. 7123 */ 7124 load_reg = -1; 7125 } 7126 7127 /* Check whether we can read the memory, with second call for fetch 7128 * case to simulate the register fill. 7129 */ 7130 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 7131 BPF_SIZE(insn->code), BPF_READ, -1, true, false); 7132 if (!err && load_reg >= 0) 7133 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 7134 BPF_SIZE(insn->code), BPF_READ, load_reg, 7135 true, false); 7136 if (err) 7137 return err; 7138 7139 /* Check whether we can write into the same memory. */ 7140 err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, 7141 BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); 7142 if (err) 7143 return err; 7144 7145 return 0; 7146 } 7147 7148 /* When register 'regno' is used to read the stack (either directly or through 7149 * a helper function) make sure that it's within stack boundary and, depending 7150 * on the access type, that all elements of the stack are initialized. 7151 * 7152 * 'off' includes 'regno->off', but not its dynamic part (if any). 7153 * 7154 * All registers that have been spilled on the stack in the slots within the 7155 * read offsets are marked as read. 7156 */ 7157 static int check_stack_range_initialized( 7158 struct bpf_verifier_env *env, int regno, int off, 7159 int access_size, bool zero_size_allowed, 7160 enum bpf_access_src type, struct bpf_call_arg_meta *meta) 7161 { 7162 struct bpf_reg_state *reg = reg_state(env, regno); 7163 struct bpf_func_state *state = func(env, reg); 7164 int err, min_off, max_off, i, j, slot, spi; 7165 char *err_extra = type == ACCESS_HELPER ? " indirect" : ""; 7166 enum bpf_access_type bounds_check_type; 7167 /* Some accesses can write anything into the stack, others are 7168 * read-only. 7169 */ 7170 bool clobber = false; 7171 7172 if (access_size == 0 && !zero_size_allowed) { 7173 verbose(env, "invalid zero-sized read\n"); 7174 return -EACCES; 7175 } 7176 7177 if (type == ACCESS_HELPER) { 7178 /* The bounds checks for writes are more permissive than for 7179 * reads. However, if raw_mode is not set, we'll do extra 7180 * checks below. 7181 */ 7182 bounds_check_type = BPF_WRITE; 7183 clobber = true; 7184 } else { 7185 bounds_check_type = BPF_READ; 7186 } 7187 err = check_stack_access_within_bounds(env, regno, off, access_size, 7188 type, bounds_check_type); 7189 if (err) 7190 return err; 7191 7192 7193 if (tnum_is_const(reg->var_off)) { 7194 min_off = max_off = reg->var_off.value + off; 7195 } else { 7196 /* Variable offset is prohibited for unprivileged mode for 7197 * simplicity since it requires corresponding support in 7198 * Spectre masking for stack ALU. 7199 * See also retrieve_ptr_limit(). 7200 */ 7201 if (!env->bypass_spec_v1) { 7202 char tn_buf[48]; 7203 7204 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7205 verbose(env, "R%d%s variable offset stack access prohibited for !root, var_off=%s\n", 7206 regno, err_extra, tn_buf); 7207 return -EACCES; 7208 } 7209 /* Only initialized buffer on stack is allowed to be accessed 7210 * with variable offset. With uninitialized buffer it's hard to 7211 * guarantee that whole memory is marked as initialized on 7212 * helper return since specific bounds are unknown what may 7213 * cause uninitialized stack leaking. 7214 */ 7215 if (meta && meta->raw_mode) 7216 meta = NULL; 7217 7218 min_off = reg->smin_value + off; 7219 max_off = reg->smax_value + off; 7220 } 7221 7222 if (meta && meta->raw_mode) { 7223 /* Ensure we won't be overwriting dynptrs when simulating byte 7224 * by byte access in check_helper_call using meta.access_size. 7225 * This would be a problem if we have a helper in the future 7226 * which takes: 7227 * 7228 * helper(uninit_mem, len, dynptr) 7229 * 7230 * Now, uninint_mem may overlap with dynptr pointer. Hence, it 7231 * may end up writing to dynptr itself when touching memory from 7232 * arg 1. This can be relaxed on a case by case basis for known 7233 * safe cases, but reject due to the possibilitiy of aliasing by 7234 * default. 7235 */ 7236 for (i = min_off; i < max_off + access_size; i++) { 7237 int stack_off = -i - 1; 7238 7239 spi = __get_spi(i); 7240 /* raw_mode may write past allocated_stack */ 7241 if (state->allocated_stack <= stack_off) 7242 continue; 7243 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { 7244 verbose(env, "potential write to dynptr at off=%d disallowed\n", i); 7245 return -EACCES; 7246 } 7247 } 7248 meta->access_size = access_size; 7249 meta->regno = regno; 7250 return 0; 7251 } 7252 7253 for (i = min_off; i < max_off + access_size; i++) { 7254 u8 *stype; 7255 7256 slot = -i - 1; 7257 spi = slot / BPF_REG_SIZE; 7258 if (state->allocated_stack <= slot) 7259 goto err; 7260 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; 7261 if (*stype == STACK_MISC) 7262 goto mark; 7263 if ((*stype == STACK_ZERO) || 7264 (*stype == STACK_INVALID && env->allow_uninit_stack)) { 7265 if (clobber) { 7266 /* helper can write anything into the stack */ 7267 *stype = STACK_MISC; 7268 } 7269 goto mark; 7270 } 7271 7272 if (is_spilled_reg(&state->stack[spi]) && 7273 (state->stack[spi].spilled_ptr.type == SCALAR_VALUE || 7274 env->allow_ptr_leaks)) { 7275 if (clobber) { 7276 __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 7277 for (j = 0; j < BPF_REG_SIZE; j++) 7278 scrub_spilled_slot(&state->stack[spi].slot_type[j]); 7279 } 7280 goto mark; 7281 } 7282 7283 err: 7284 if (tnum_is_const(reg->var_off)) { 7285 verbose(env, "invalid%s read from stack R%d off %d+%d size %d\n", 7286 err_extra, regno, min_off, i - min_off, access_size); 7287 } else { 7288 char tn_buf[48]; 7289 7290 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 7291 verbose(env, "invalid%s read from stack R%d var_off %s+%d size %d\n", 7292 err_extra, regno, tn_buf, i - min_off, access_size); 7293 } 7294 return -EACCES; 7295 mark: 7296 /* reading any byte out of 8-byte 'spill_slot' will cause 7297 * the whole slot to be marked as 'read' 7298 */ 7299 mark_reg_read(env, &state->stack[spi].spilled_ptr, 7300 state->stack[spi].spilled_ptr.parent, 7301 REG_LIVE_READ64); 7302 /* We do not set REG_LIVE_WRITTEN for stack slot, as we can not 7303 * be sure that whether stack slot is written to or not. Hence, 7304 * we must still conservatively propagate reads upwards even if 7305 * helper may write to the entire memory range. 7306 */ 7307 } 7308 return update_stack_depth(env, state, min_off); 7309 } 7310 7311 static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, 7312 int access_size, bool zero_size_allowed, 7313 struct bpf_call_arg_meta *meta) 7314 { 7315 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7316 u32 *max_access; 7317 7318 switch (base_type(reg->type)) { 7319 case PTR_TO_PACKET: 7320 case PTR_TO_PACKET_META: 7321 return check_packet_access(env, regno, reg->off, access_size, 7322 zero_size_allowed); 7323 case PTR_TO_MAP_KEY: 7324 if (meta && meta->raw_mode) { 7325 verbose(env, "R%d cannot write into %s\n", regno, 7326 reg_type_str(env, reg->type)); 7327 return -EACCES; 7328 } 7329 return check_mem_region_access(env, regno, reg->off, access_size, 7330 reg->map_ptr->key_size, false); 7331 case PTR_TO_MAP_VALUE: 7332 if (check_map_access_type(env, regno, reg->off, access_size, 7333 meta && meta->raw_mode ? BPF_WRITE : 7334 BPF_READ)) 7335 return -EACCES; 7336 return check_map_access(env, regno, reg->off, access_size, 7337 zero_size_allowed, ACCESS_HELPER); 7338 case PTR_TO_MEM: 7339 if (type_is_rdonly_mem(reg->type)) { 7340 if (meta && meta->raw_mode) { 7341 verbose(env, "R%d cannot write into %s\n", regno, 7342 reg_type_str(env, reg->type)); 7343 return -EACCES; 7344 } 7345 } 7346 return check_mem_region_access(env, regno, reg->off, 7347 access_size, reg->mem_size, 7348 zero_size_allowed); 7349 case PTR_TO_BUF: 7350 if (type_is_rdonly_mem(reg->type)) { 7351 if (meta && meta->raw_mode) { 7352 verbose(env, "R%d cannot write into %s\n", regno, 7353 reg_type_str(env, reg->type)); 7354 return -EACCES; 7355 } 7356 7357 max_access = &env->prog->aux->max_rdonly_access; 7358 } else { 7359 max_access = &env->prog->aux->max_rdwr_access; 7360 } 7361 return check_buffer_access(env, reg, regno, reg->off, 7362 access_size, zero_size_allowed, 7363 max_access); 7364 case PTR_TO_STACK: 7365 return check_stack_range_initialized( 7366 env, 7367 regno, reg->off, access_size, 7368 zero_size_allowed, ACCESS_HELPER, meta); 7369 case PTR_TO_BTF_ID: 7370 return check_ptr_to_btf_access(env, regs, regno, reg->off, 7371 access_size, BPF_READ, -1); 7372 case PTR_TO_CTX: 7373 /* in case the function doesn't know how to access the context, 7374 * (because we are in a program of type SYSCALL for example), we 7375 * can not statically check its size. 7376 * Dynamically check it now. 7377 */ 7378 if (!env->ops->convert_ctx_access) { 7379 enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; 7380 int offset = access_size - 1; 7381 7382 /* Allow zero-byte read from PTR_TO_CTX */ 7383 if (access_size == 0) 7384 return zero_size_allowed ? 0 : -EACCES; 7385 7386 return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, 7387 atype, -1, false, false); 7388 } 7389 7390 fallthrough; 7391 default: /* scalar_value or invalid ptr */ 7392 /* Allow zero-byte read from NULL, regardless of pointer type */ 7393 if (zero_size_allowed && access_size == 0 && 7394 register_is_null(reg)) 7395 return 0; 7396 7397 verbose(env, "R%d type=%s ", regno, 7398 reg_type_str(env, reg->type)); 7399 verbose(env, "expected=%s\n", reg_type_str(env, PTR_TO_STACK)); 7400 return -EACCES; 7401 } 7402 } 7403 7404 static int check_mem_size_reg(struct bpf_verifier_env *env, 7405 struct bpf_reg_state *reg, u32 regno, 7406 bool zero_size_allowed, 7407 struct bpf_call_arg_meta *meta) 7408 { 7409 int err; 7410 7411 /* This is used to refine r0 return value bounds for helpers 7412 * that enforce this value as an upper bound on return values. 7413 * See do_refine_retval_range() for helpers that can refine 7414 * the return value. C type of helper is u32 so we pull register 7415 * bound from umax_value however, if negative verifier errors 7416 * out. Only upper bounds can be learned because retval is an 7417 * int type and negative retvals are allowed. 7418 */ 7419 meta->msize_max_value = reg->umax_value; 7420 7421 /* The register is SCALAR_VALUE; the access check 7422 * happens using its boundaries. 7423 */ 7424 if (!tnum_is_const(reg->var_off)) 7425 /* For unprivileged variable accesses, disable raw 7426 * mode so that the program is required to 7427 * initialize all the memory that the helper could 7428 * just partially fill up. 7429 */ 7430 meta = NULL; 7431 7432 if (reg->smin_value < 0) { 7433 verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", 7434 regno); 7435 return -EACCES; 7436 } 7437 7438 if (reg->umin_value == 0) { 7439 err = check_helper_mem_access(env, regno - 1, 0, 7440 zero_size_allowed, 7441 meta); 7442 if (err) 7443 return err; 7444 } 7445 7446 if (reg->umax_value >= BPF_MAX_VAR_SIZ) { 7447 verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", 7448 regno); 7449 return -EACCES; 7450 } 7451 err = check_helper_mem_access(env, regno - 1, 7452 reg->umax_value, 7453 zero_size_allowed, meta); 7454 if (!err) 7455 err = mark_chain_precision(env, regno); 7456 return err; 7457 } 7458 7459 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7460 u32 regno, u32 mem_size) 7461 { 7462 bool may_be_null = type_may_be_null(reg->type); 7463 struct bpf_reg_state saved_reg; 7464 struct bpf_call_arg_meta meta; 7465 int err; 7466 7467 if (register_is_null(reg)) 7468 return 0; 7469 7470 memset(&meta, 0, sizeof(meta)); 7471 /* Assuming that the register contains a value check if the memory 7472 * access is safe. Temporarily save and restore the register's state as 7473 * the conversion shouldn't be visible to a caller. 7474 */ 7475 if (may_be_null) { 7476 saved_reg = *reg; 7477 mark_ptr_not_null_reg(reg); 7478 } 7479 7480 err = check_helper_mem_access(env, regno, mem_size, true, &meta); 7481 /* Check access for BPF_WRITE */ 7482 meta.raw_mode = true; 7483 err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); 7484 7485 if (may_be_null) 7486 *reg = saved_reg; 7487 7488 return err; 7489 } 7490 7491 static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 7492 u32 regno) 7493 { 7494 struct bpf_reg_state *mem_reg = &cur_regs(env)[regno - 1]; 7495 bool may_be_null = type_may_be_null(mem_reg->type); 7496 struct bpf_reg_state saved_reg; 7497 struct bpf_call_arg_meta meta; 7498 int err; 7499 7500 WARN_ON_ONCE(regno < BPF_REG_2 || regno > BPF_REG_5); 7501 7502 memset(&meta, 0, sizeof(meta)); 7503 7504 if (may_be_null) { 7505 saved_reg = *mem_reg; 7506 mark_ptr_not_null_reg(mem_reg); 7507 } 7508 7509 err = check_mem_size_reg(env, reg, regno, true, &meta); 7510 /* Check access for BPF_WRITE */ 7511 meta.raw_mode = true; 7512 err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); 7513 7514 if (may_be_null) 7515 *mem_reg = saved_reg; 7516 return err; 7517 } 7518 7519 /* Implementation details: 7520 * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL. 7521 * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL. 7522 * Two bpf_map_lookups (even with the same key) will have different reg->id. 7523 * Two separate bpf_obj_new will also have different reg->id. 7524 * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier 7525 * clears reg->id after value_or_null->value transition, since the verifier only 7526 * cares about the range of access to valid map value pointer and doesn't care 7527 * about actual address of the map element. 7528 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps 7529 * reg->id > 0 after value_or_null->value transition. By doing so 7530 * two bpf_map_lookups will be considered two different pointers that 7531 * point to different bpf_spin_locks. Likewise for pointers to allocated objects 7532 * returned from bpf_obj_new. 7533 * The verifier allows taking only one bpf_spin_lock at a time to avoid 7534 * dead-locks. 7535 * Since only one bpf_spin_lock is allowed the checks are simpler than 7536 * reg_is_refcounted() logic. The verifier needs to remember only 7537 * one spin_lock instead of array of acquired_refs. 7538 * cur_state->active_lock remembers which map value element or allocated 7539 * object got locked and clears it after bpf_spin_unlock. 7540 */ 7541 static int process_spin_lock(struct bpf_verifier_env *env, int regno, 7542 bool is_lock) 7543 { 7544 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7545 struct bpf_verifier_state *cur = env->cur_state; 7546 bool is_const = tnum_is_const(reg->var_off); 7547 u64 val = reg->var_off.value; 7548 struct bpf_map *map = NULL; 7549 struct btf *btf = NULL; 7550 struct btf_record *rec; 7551 7552 if (!is_const) { 7553 verbose(env, 7554 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", 7555 regno); 7556 return -EINVAL; 7557 } 7558 if (reg->type == PTR_TO_MAP_VALUE) { 7559 map = reg->map_ptr; 7560 if (!map->btf) { 7561 verbose(env, 7562 "map '%s' has to have BTF in order to use bpf_spin_lock\n", 7563 map->name); 7564 return -EINVAL; 7565 } 7566 } else { 7567 btf = reg->btf; 7568 } 7569 7570 rec = reg_btf_record(reg); 7571 if (!btf_record_has_field(rec, BPF_SPIN_LOCK)) { 7572 verbose(env, "%s '%s' has no valid bpf_spin_lock\n", map ? "map" : "local", 7573 map ? map->name : "kptr"); 7574 return -EINVAL; 7575 } 7576 if (rec->spin_lock_off != val + reg->off) { 7577 verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n", 7578 val + reg->off, rec->spin_lock_off); 7579 return -EINVAL; 7580 } 7581 if (is_lock) { 7582 if (cur->active_lock.ptr) { 7583 verbose(env, 7584 "Locking two bpf_spin_locks are not allowed\n"); 7585 return -EINVAL; 7586 } 7587 if (map) 7588 cur->active_lock.ptr = map; 7589 else 7590 cur->active_lock.ptr = btf; 7591 cur->active_lock.id = reg->id; 7592 } else { 7593 void *ptr; 7594 7595 if (map) 7596 ptr = map; 7597 else 7598 ptr = btf; 7599 7600 if (!cur->active_lock.ptr) { 7601 verbose(env, "bpf_spin_unlock without taking a lock\n"); 7602 return -EINVAL; 7603 } 7604 if (cur->active_lock.ptr != ptr || 7605 cur->active_lock.id != reg->id) { 7606 verbose(env, "bpf_spin_unlock of different lock\n"); 7607 return -EINVAL; 7608 } 7609 7610 invalidate_non_owning_refs(env); 7611 7612 cur->active_lock.ptr = NULL; 7613 cur->active_lock.id = 0; 7614 } 7615 return 0; 7616 } 7617 7618 static int process_timer_func(struct bpf_verifier_env *env, int regno, 7619 struct bpf_call_arg_meta *meta) 7620 { 7621 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7622 bool is_const = tnum_is_const(reg->var_off); 7623 struct bpf_map *map = reg->map_ptr; 7624 u64 val = reg->var_off.value; 7625 7626 if (!is_const) { 7627 verbose(env, 7628 "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", 7629 regno); 7630 return -EINVAL; 7631 } 7632 if (!map->btf) { 7633 verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", 7634 map->name); 7635 return -EINVAL; 7636 } 7637 if (!btf_record_has_field(map->record, BPF_TIMER)) { 7638 verbose(env, "map '%s' has no valid bpf_timer\n", map->name); 7639 return -EINVAL; 7640 } 7641 if (map->record->timer_off != val + reg->off) { 7642 verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", 7643 val + reg->off, map->record->timer_off); 7644 return -EINVAL; 7645 } 7646 if (meta->map_ptr) { 7647 verbose(env, "verifier bug. Two map pointers in a timer helper\n"); 7648 return -EFAULT; 7649 } 7650 meta->map_uid = reg->map_uid; 7651 meta->map_ptr = map; 7652 return 0; 7653 } 7654 7655 static int process_kptr_func(struct bpf_verifier_env *env, int regno, 7656 struct bpf_call_arg_meta *meta) 7657 { 7658 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7659 struct bpf_map *map_ptr = reg->map_ptr; 7660 struct btf_field *kptr_field; 7661 u32 kptr_off; 7662 7663 if (!tnum_is_const(reg->var_off)) { 7664 verbose(env, 7665 "R%d doesn't have constant offset. kptr has to be at the constant offset\n", 7666 regno); 7667 return -EINVAL; 7668 } 7669 if (!map_ptr->btf) { 7670 verbose(env, "map '%s' has to have BTF in order to use bpf_kptr_xchg\n", 7671 map_ptr->name); 7672 return -EINVAL; 7673 } 7674 if (!btf_record_has_field(map_ptr->record, BPF_KPTR)) { 7675 verbose(env, "map '%s' has no valid kptr\n", map_ptr->name); 7676 return -EINVAL; 7677 } 7678 7679 meta->map_ptr = map_ptr; 7680 kptr_off = reg->off + reg->var_off.value; 7681 kptr_field = btf_record_find(map_ptr->record, kptr_off, BPF_KPTR); 7682 if (!kptr_field) { 7683 verbose(env, "off=%d doesn't point to kptr\n", kptr_off); 7684 return -EACCES; 7685 } 7686 if (kptr_field->type != BPF_KPTR_REF && kptr_field->type != BPF_KPTR_PERCPU) { 7687 verbose(env, "off=%d kptr isn't referenced kptr\n", kptr_off); 7688 return -EACCES; 7689 } 7690 meta->kptr_field = kptr_field; 7691 return 0; 7692 } 7693 7694 /* There are two register types representing a bpf_dynptr, one is PTR_TO_STACK 7695 * which points to a stack slot, and the other is CONST_PTR_TO_DYNPTR. 7696 * 7697 * In both cases we deal with the first 8 bytes, but need to mark the next 8 7698 * bytes as STACK_DYNPTR in case of PTR_TO_STACK. In case of 7699 * CONST_PTR_TO_DYNPTR, we are guaranteed to get the beginning of the object. 7700 * 7701 * Mutability of bpf_dynptr is at two levels, one is at the level of struct 7702 * bpf_dynptr itself, i.e. whether the helper is receiving a pointer to struct 7703 * bpf_dynptr or pointer to const struct bpf_dynptr. In the former case, it can 7704 * mutate the view of the dynptr and also possibly destroy it. In the latter 7705 * case, it cannot mutate the bpf_dynptr itself but it can still mutate the 7706 * memory that dynptr points to. 7707 * 7708 * The verifier will keep track both levels of mutation (bpf_dynptr's in 7709 * reg->type and the memory's in reg->dynptr.type), but there is no support for 7710 * readonly dynptr view yet, hence only the first case is tracked and checked. 7711 * 7712 * This is consistent with how C applies the const modifier to a struct object, 7713 * where the pointer itself inside bpf_dynptr becomes const but not what it 7714 * points to. 7715 * 7716 * Helpers which do not mutate the bpf_dynptr set MEM_RDONLY in their argument 7717 * type, and declare it as 'const struct bpf_dynptr *' in their prototype. 7718 */ 7719 static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn_idx, 7720 enum bpf_arg_type arg_type, int clone_ref_obj_id) 7721 { 7722 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7723 int err; 7724 7725 /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an 7726 * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): 7727 */ 7728 if ((arg_type & (MEM_UNINIT | MEM_RDONLY)) == (MEM_UNINIT | MEM_RDONLY)) { 7729 verbose(env, "verifier internal error: misconfigured dynptr helper type flags\n"); 7730 return -EFAULT; 7731 } 7732 7733 /* MEM_UNINIT - Points to memory that is an appropriate candidate for 7734 * constructing a mutable bpf_dynptr object. 7735 * 7736 * Currently, this is only possible with PTR_TO_STACK 7737 * pointing to a region of at least 16 bytes which doesn't 7738 * contain an existing bpf_dynptr. 7739 * 7740 * MEM_RDONLY - Points to a initialized bpf_dynptr that will not be 7741 * mutated or destroyed. However, the memory it points to 7742 * may be mutated. 7743 * 7744 * None - Points to a initialized dynptr that can be mutated and 7745 * destroyed, including mutation of the memory it points 7746 * to. 7747 */ 7748 if (arg_type & MEM_UNINIT) { 7749 int i; 7750 7751 if (!is_dynptr_reg_valid_uninit(env, reg)) { 7752 verbose(env, "Dynptr has to be an uninitialized dynptr\n"); 7753 return -EINVAL; 7754 } 7755 7756 /* we write BPF_DW bits (8 bytes) at a time */ 7757 for (i = 0; i < BPF_DYNPTR_SIZE; i += 8) { 7758 err = check_mem_access(env, insn_idx, regno, 7759 i, BPF_DW, BPF_WRITE, -1, false, false); 7760 if (err) 7761 return err; 7762 } 7763 7764 err = mark_stack_slots_dynptr(env, reg, arg_type, insn_idx, clone_ref_obj_id); 7765 } else /* MEM_RDONLY and None case from above */ { 7766 /* For the reg->type == PTR_TO_STACK case, bpf_dynptr is never const */ 7767 if (reg->type == CONST_PTR_TO_DYNPTR && !(arg_type & MEM_RDONLY)) { 7768 verbose(env, "cannot pass pointer to const bpf_dynptr, the helper mutates it\n"); 7769 return -EINVAL; 7770 } 7771 7772 if (!is_dynptr_reg_valid_init(env, reg)) { 7773 verbose(env, 7774 "Expected an initialized dynptr as arg #%d\n", 7775 regno); 7776 return -EINVAL; 7777 } 7778 7779 /* Fold modifiers (in this case, MEM_RDONLY) when checking expected type */ 7780 if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { 7781 verbose(env, 7782 "Expected a dynptr of type %s as arg #%d\n", 7783 dynptr_type_str(arg_to_dynptr_type(arg_type)), regno); 7784 return -EINVAL; 7785 } 7786 7787 err = mark_dynptr_read(env, reg); 7788 } 7789 return err; 7790 } 7791 7792 static u32 iter_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg, int spi) 7793 { 7794 struct bpf_func_state *state = func(env, reg); 7795 7796 return state->stack[spi].spilled_ptr.ref_obj_id; 7797 } 7798 7799 static bool is_iter_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7800 { 7801 return meta->kfunc_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY); 7802 } 7803 7804 static bool is_iter_new_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7805 { 7806 return meta->kfunc_flags & KF_ITER_NEW; 7807 } 7808 7809 static bool is_iter_next_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7810 { 7811 return meta->kfunc_flags & KF_ITER_NEXT; 7812 } 7813 7814 static bool is_iter_destroy_kfunc(struct bpf_kfunc_call_arg_meta *meta) 7815 { 7816 return meta->kfunc_flags & KF_ITER_DESTROY; 7817 } 7818 7819 static bool is_kfunc_arg_iter(struct bpf_kfunc_call_arg_meta *meta, int arg) 7820 { 7821 /* btf_check_iter_kfuncs() guarantees that first argument of any iter 7822 * kfunc is iter state pointer 7823 */ 7824 return arg == 0 && is_iter_kfunc(meta); 7825 } 7826 7827 static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_idx, 7828 struct bpf_kfunc_call_arg_meta *meta) 7829 { 7830 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 7831 const struct btf_type *t; 7832 const struct btf_param *arg; 7833 int spi, err, i, nr_slots; 7834 u32 btf_id; 7835 7836 /* btf_check_iter_kfuncs() ensures we don't need to validate anything here */ 7837 arg = &btf_params(meta->func_proto)[0]; 7838 t = btf_type_skip_modifiers(meta->btf, arg->type, NULL); /* PTR */ 7839 t = btf_type_skip_modifiers(meta->btf, t->type, &btf_id); /* STRUCT */ 7840 nr_slots = t->size / BPF_REG_SIZE; 7841 7842 if (is_iter_new_kfunc(meta)) { 7843 /* bpf_iter_<type>_new() expects pointer to uninit iter state */ 7844 if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { 7845 verbose(env, "expected uninitialized iter_%s as arg #%d\n", 7846 iter_type_str(meta->btf, btf_id), regno); 7847 return -EINVAL; 7848 } 7849 7850 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { 7851 err = check_mem_access(env, insn_idx, regno, 7852 i, BPF_DW, BPF_WRITE, -1, false, false); 7853 if (err) 7854 return err; 7855 } 7856 7857 err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); 7858 if (err) 7859 return err; 7860 } else { 7861 /* iter_next() or iter_destroy() expect initialized iter state*/ 7862 err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); 7863 switch (err) { 7864 case 0: 7865 break; 7866 case -EINVAL: 7867 verbose(env, "expected an initialized iter_%s as arg #%d\n", 7868 iter_type_str(meta->btf, btf_id), regno); 7869 return err; 7870 case -EPROTO: 7871 verbose(env, "expected an RCU CS when using %s\n", meta->func_name); 7872 return err; 7873 default: 7874 return err; 7875 } 7876 7877 spi = iter_get_spi(env, reg, nr_slots); 7878 if (spi < 0) 7879 return spi; 7880 7881 err = mark_iter_read(env, reg, spi, nr_slots); 7882 if (err) 7883 return err; 7884 7885 /* remember meta->iter info for process_iter_next_call() */ 7886 meta->iter.spi = spi; 7887 meta->iter.frameno = reg->frameno; 7888 meta->ref_obj_id = iter_ref_obj_id(env, reg, spi); 7889 7890 if (is_iter_destroy_kfunc(meta)) { 7891 err = unmark_stack_slots_iter(env, reg, nr_slots); 7892 if (err) 7893 return err; 7894 } 7895 } 7896 7897 return 0; 7898 } 7899 7900 /* Look for a previous loop entry at insn_idx: nearest parent state 7901 * stopped at insn_idx with callsites matching those in cur->frame. 7902 */ 7903 static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env, 7904 struct bpf_verifier_state *cur, 7905 int insn_idx) 7906 { 7907 struct bpf_verifier_state_list *sl; 7908 struct bpf_verifier_state *st; 7909 7910 /* Explored states are pushed in stack order, most recent states come first */ 7911 sl = *explored_state(env, insn_idx); 7912 for (; sl; sl = sl->next) { 7913 /* If st->branches != 0 state is a part of current DFS verification path, 7914 * hence cur & st for a loop. 7915 */ 7916 st = &sl->state; 7917 if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && 7918 st->dfs_depth < cur->dfs_depth) 7919 return st; 7920 } 7921 7922 return NULL; 7923 } 7924 7925 static void reset_idmap_scratch(struct bpf_verifier_env *env); 7926 static bool regs_exact(const struct bpf_reg_state *rold, 7927 const struct bpf_reg_state *rcur, 7928 struct bpf_idmap *idmap); 7929 7930 static void maybe_widen_reg(struct bpf_verifier_env *env, 7931 struct bpf_reg_state *rold, struct bpf_reg_state *rcur, 7932 struct bpf_idmap *idmap) 7933 { 7934 if (rold->type != SCALAR_VALUE) 7935 return; 7936 if (rold->type != rcur->type) 7937 return; 7938 if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) 7939 return; 7940 __mark_reg_unknown(env, rcur); 7941 } 7942 7943 static int widen_imprecise_scalars(struct bpf_verifier_env *env, 7944 struct bpf_verifier_state *old, 7945 struct bpf_verifier_state *cur) 7946 { 7947 struct bpf_func_state *fold, *fcur; 7948 int i, fr; 7949 7950 reset_idmap_scratch(env); 7951 for (fr = old->curframe; fr >= 0; fr--) { 7952 fold = old->frame[fr]; 7953 fcur = cur->frame[fr]; 7954 7955 for (i = 0; i < MAX_BPF_REG; i++) 7956 maybe_widen_reg(env, 7957 &fold->regs[i], 7958 &fcur->regs[i], 7959 &env->idmap_scratch); 7960 7961 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { 7962 if (!is_spilled_reg(&fold->stack[i]) || 7963 !is_spilled_reg(&fcur->stack[i])) 7964 continue; 7965 7966 maybe_widen_reg(env, 7967 &fold->stack[i].spilled_ptr, 7968 &fcur->stack[i].spilled_ptr, 7969 &env->idmap_scratch); 7970 } 7971 } 7972 return 0; 7973 } 7974 7975 /* process_iter_next_call() is called when verifier gets to iterator's next 7976 * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer 7977 * to it as just "iter_next()" in comments below. 7978 * 7979 * BPF verifier relies on a crucial contract for any iter_next() 7980 * implementation: it should *eventually* return NULL, and once that happens 7981 * it should keep returning NULL. That is, once iterator exhausts elements to 7982 * iterate, it should never reset or spuriously return new elements. 7983 * 7984 * With the assumption of such contract, process_iter_next_call() simulates 7985 * a fork in the verifier state to validate loop logic correctness and safety 7986 * without having to simulate infinite amount of iterations. 7987 * 7988 * In current state, we first assume that iter_next() returned NULL and 7989 * iterator state is set to DRAINED (BPF_ITER_STATE_DRAINED). In such 7990 * conditions we should not form an infinite loop and should eventually reach 7991 * exit. 7992 * 7993 * Besides that, we also fork current state and enqueue it for later 7994 * verification. In a forked state we keep iterator state as ACTIVE 7995 * (BPF_ITER_STATE_ACTIVE) and assume non-NULL return from iter_next(). We 7996 * also bump iteration depth to prevent erroneous infinite loop detection 7997 * later on (see iter_active_depths_differ() comment for details). In this 7998 * state we assume that we'll eventually loop back to another iter_next() 7999 * calls (it could be in exactly same location or in some other instruction, 8000 * it doesn't matter, we don't make any unnecessary assumptions about this, 8001 * everything revolves around iterator state in a stack slot, not which 8002 * instruction is calling iter_next()). When that happens, we either will come 8003 * to iter_next() with equivalent state and can conclude that next iteration 8004 * will proceed in exactly the same way as we just verified, so it's safe to 8005 * assume that loop converges. If not, we'll go on another iteration 8006 * simulation with a different input state, until all possible starting states 8007 * are validated or we reach maximum number of instructions limit. 8008 * 8009 * This way, we will either exhaustively discover all possible input states 8010 * that iterator loop can start with and eventually will converge, or we'll 8011 * effectively regress into bounded loop simulation logic and either reach 8012 * maximum number of instructions if loop is not provably convergent, or there 8013 * is some statically known limit on number of iterations (e.g., if there is 8014 * an explicit `if n > 100 then break;` statement somewhere in the loop). 8015 * 8016 * Iteration convergence logic in is_state_visited() relies on exact 8017 * states comparison, which ignores read and precision marks. 8018 * This is necessary because read and precision marks are not finalized 8019 * while in the loop. Exact comparison might preclude convergence for 8020 * simple programs like below: 8021 * 8022 * i = 0; 8023 * while(iter_next(&it)) 8024 * i++; 8025 * 8026 * At each iteration step i++ would produce a new distinct state and 8027 * eventually instruction processing limit would be reached. 8028 * 8029 * To avoid such behavior speculatively forget (widen) range for 8030 * imprecise scalar registers, if those registers were not precise at the 8031 * end of the previous iteration and do not match exactly. 8032 * 8033 * This is a conservative heuristic that allows to verify wide range of programs, 8034 * however it precludes verification of programs that conjure an 8035 * imprecise value on the first loop iteration and use it as precise on a second. 8036 * For example, the following safe program would fail to verify: 8037 * 8038 * struct bpf_num_iter it; 8039 * int arr[10]; 8040 * int i = 0, a = 0; 8041 * bpf_iter_num_new(&it, 0, 10); 8042 * while (bpf_iter_num_next(&it)) { 8043 * if (a == 0) { 8044 * a = 1; 8045 * i = 7; // Because i changed verifier would forget 8046 * // it's range on second loop entry. 8047 * } else { 8048 * arr[i] = 42; // This would fail to verify. 8049 * } 8050 * } 8051 * bpf_iter_num_destroy(&it); 8052 */ 8053 static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, 8054 struct bpf_kfunc_call_arg_meta *meta) 8055 { 8056 struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; 8057 struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; 8058 struct bpf_reg_state *cur_iter, *queued_iter; 8059 int iter_frameno = meta->iter.frameno; 8060 int iter_spi = meta->iter.spi; 8061 8062 BTF_TYPE_EMIT(struct bpf_iter); 8063 8064 cur_iter = &env->cur_state->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 8065 8066 if (cur_iter->iter.state != BPF_ITER_STATE_ACTIVE && 8067 cur_iter->iter.state != BPF_ITER_STATE_DRAINED) { 8068 verbose(env, "verifier internal error: unexpected iterator state %d (%s)\n", 8069 cur_iter->iter.state, iter_state_str(cur_iter->iter.state)); 8070 return -EFAULT; 8071 } 8072 8073 if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { 8074 /* Because iter_next() call is a checkpoint is_state_visitied() 8075 * should guarantee parent state with same call sites and insn_idx. 8076 */ 8077 if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || 8078 !same_callsites(cur_st->parent, cur_st)) { 8079 verbose(env, "bug: bad parent state for iter next call"); 8080 return -EFAULT; 8081 } 8082 /* Note cur_st->parent in the call below, it is necessary to skip 8083 * checkpoint created for cur_st by is_state_visited() 8084 * right at this instruction. 8085 */ 8086 prev_st = find_prev_entry(env, cur_st->parent, insn_idx); 8087 /* branch out active iter state */ 8088 queued_st = push_stack(env, insn_idx + 1, insn_idx, false); 8089 if (!queued_st) 8090 return -ENOMEM; 8091 8092 queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; 8093 queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; 8094 queued_iter->iter.depth++; 8095 if (prev_st) 8096 widen_imprecise_scalars(env, prev_st, queued_st); 8097 8098 queued_fr = queued_st->frame[queued_st->curframe]; 8099 mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); 8100 } 8101 8102 /* switch to DRAINED state, but keep the depth unchanged */ 8103 /* mark current iter state as drained and assume returned NULL */ 8104 cur_iter->iter.state = BPF_ITER_STATE_DRAINED; 8105 __mark_reg_const_zero(&cur_fr->regs[BPF_REG_0]); 8106 8107 return 0; 8108 } 8109 8110 static bool arg_type_is_mem_size(enum bpf_arg_type type) 8111 { 8112 return type == ARG_CONST_SIZE || 8113 type == ARG_CONST_SIZE_OR_ZERO; 8114 } 8115 8116 static bool arg_type_is_release(enum bpf_arg_type type) 8117 { 8118 return type & OBJ_RELEASE; 8119 } 8120 8121 static bool arg_type_is_dynptr(enum bpf_arg_type type) 8122 { 8123 return base_type(type) == ARG_PTR_TO_DYNPTR; 8124 } 8125 8126 static int int_ptr_type_to_size(enum bpf_arg_type type) 8127 { 8128 if (type == ARG_PTR_TO_INT) 8129 return sizeof(u32); 8130 else if (type == ARG_PTR_TO_LONG) 8131 return sizeof(u64); 8132 8133 return -EINVAL; 8134 } 8135 8136 static int resolve_map_arg_type(struct bpf_verifier_env *env, 8137 const struct bpf_call_arg_meta *meta, 8138 enum bpf_arg_type *arg_type) 8139 { 8140 if (!meta->map_ptr) { 8141 /* kernel subsystem misconfigured verifier */ 8142 verbose(env, "invalid map_ptr to access map->type\n"); 8143 return -EACCES; 8144 } 8145 8146 switch (meta->map_ptr->map_type) { 8147 case BPF_MAP_TYPE_SOCKMAP: 8148 case BPF_MAP_TYPE_SOCKHASH: 8149 if (*arg_type == ARG_PTR_TO_MAP_VALUE) { 8150 *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON; 8151 } else { 8152 verbose(env, "invalid arg_type for sockmap/sockhash\n"); 8153 return -EINVAL; 8154 } 8155 break; 8156 case BPF_MAP_TYPE_BLOOM_FILTER: 8157 if (meta->func_id == BPF_FUNC_map_peek_elem) 8158 *arg_type = ARG_PTR_TO_MAP_VALUE; 8159 break; 8160 default: 8161 break; 8162 } 8163 return 0; 8164 } 8165 8166 struct bpf_reg_types { 8167 const enum bpf_reg_type types[10]; 8168 u32 *btf_id; 8169 }; 8170 8171 static const struct bpf_reg_types sock_types = { 8172 .types = { 8173 PTR_TO_SOCK_COMMON, 8174 PTR_TO_SOCKET, 8175 PTR_TO_TCP_SOCK, 8176 PTR_TO_XDP_SOCK, 8177 }, 8178 }; 8179 8180 #ifdef CONFIG_NET 8181 static const struct bpf_reg_types btf_id_sock_common_types = { 8182 .types = { 8183 PTR_TO_SOCK_COMMON, 8184 PTR_TO_SOCKET, 8185 PTR_TO_TCP_SOCK, 8186 PTR_TO_XDP_SOCK, 8187 PTR_TO_BTF_ID, 8188 PTR_TO_BTF_ID | PTR_TRUSTED, 8189 }, 8190 .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], 8191 }; 8192 #endif 8193 8194 static const struct bpf_reg_types mem_types = { 8195 .types = { 8196 PTR_TO_STACK, 8197 PTR_TO_PACKET, 8198 PTR_TO_PACKET_META, 8199 PTR_TO_MAP_KEY, 8200 PTR_TO_MAP_VALUE, 8201 PTR_TO_MEM, 8202 PTR_TO_MEM | MEM_RINGBUF, 8203 PTR_TO_BUF, 8204 PTR_TO_BTF_ID | PTR_TRUSTED, 8205 }, 8206 }; 8207 8208 static const struct bpf_reg_types int_ptr_types = { 8209 .types = { 8210 PTR_TO_STACK, 8211 PTR_TO_PACKET, 8212 PTR_TO_PACKET_META, 8213 PTR_TO_MAP_KEY, 8214 PTR_TO_MAP_VALUE, 8215 }, 8216 }; 8217 8218 static const struct bpf_reg_types spin_lock_types = { 8219 .types = { 8220 PTR_TO_MAP_VALUE, 8221 PTR_TO_BTF_ID | MEM_ALLOC, 8222 } 8223 }; 8224 8225 static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; 8226 static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; 8227 static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; 8228 static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } }; 8229 static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } }; 8230 static const struct bpf_reg_types btf_ptr_types = { 8231 .types = { 8232 PTR_TO_BTF_ID, 8233 PTR_TO_BTF_ID | PTR_TRUSTED, 8234 PTR_TO_BTF_ID | MEM_RCU, 8235 }, 8236 }; 8237 static const struct bpf_reg_types percpu_btf_ptr_types = { 8238 .types = { 8239 PTR_TO_BTF_ID | MEM_PERCPU, 8240 PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU, 8241 PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED, 8242 } 8243 }; 8244 static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; 8245 static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; 8246 static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; 8247 static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } }; 8248 static const struct bpf_reg_types kptr_types = { .types = { PTR_TO_MAP_VALUE } }; 8249 static const struct bpf_reg_types dynptr_types = { 8250 .types = { 8251 PTR_TO_STACK, 8252 CONST_PTR_TO_DYNPTR, 8253 } 8254 }; 8255 8256 static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { 8257 [ARG_PTR_TO_MAP_KEY] = &mem_types, 8258 [ARG_PTR_TO_MAP_VALUE] = &mem_types, 8259 [ARG_CONST_SIZE] = &scalar_types, 8260 [ARG_CONST_SIZE_OR_ZERO] = &scalar_types, 8261 [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types, 8262 [ARG_CONST_MAP_PTR] = &const_map_ptr_types, 8263 [ARG_PTR_TO_CTX] = &context_types, 8264 [ARG_PTR_TO_SOCK_COMMON] = &sock_types, 8265 #ifdef CONFIG_NET 8266 [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types, 8267 #endif 8268 [ARG_PTR_TO_SOCKET] = &fullsock_types, 8269 [ARG_PTR_TO_BTF_ID] = &btf_ptr_types, 8270 [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, 8271 [ARG_PTR_TO_MEM] = &mem_types, 8272 [ARG_PTR_TO_RINGBUF_MEM] = &ringbuf_mem_types, 8273 [ARG_PTR_TO_INT] = &int_ptr_types, 8274 [ARG_PTR_TO_LONG] = &int_ptr_types, 8275 [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, 8276 [ARG_PTR_TO_FUNC] = &func_ptr_types, 8277 [ARG_PTR_TO_STACK] = &stack_ptr_types, 8278 [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, 8279 [ARG_PTR_TO_TIMER] = &timer_types, 8280 [ARG_PTR_TO_KPTR] = &kptr_types, 8281 [ARG_PTR_TO_DYNPTR] = &dynptr_types, 8282 }; 8283 8284 static int check_reg_type(struct bpf_verifier_env *env, u32 regno, 8285 enum bpf_arg_type arg_type, 8286 const u32 *arg_btf_id, 8287 struct bpf_call_arg_meta *meta) 8288 { 8289 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 8290 enum bpf_reg_type expected, type = reg->type; 8291 const struct bpf_reg_types *compatible; 8292 int i, j; 8293 8294 compatible = compatible_reg_types[base_type(arg_type)]; 8295 if (!compatible) { 8296 verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type); 8297 return -EFAULT; 8298 } 8299 8300 /* ARG_PTR_TO_MEM + RDONLY is compatible with PTR_TO_MEM and PTR_TO_MEM + RDONLY, 8301 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM and NOT with PTR_TO_MEM + RDONLY 8302 * 8303 * Same for MAYBE_NULL: 8304 * 8305 * ARG_PTR_TO_MEM + MAYBE_NULL is compatible with PTR_TO_MEM and PTR_TO_MEM + MAYBE_NULL, 8306 * but ARG_PTR_TO_MEM is compatible only with PTR_TO_MEM but NOT with PTR_TO_MEM + MAYBE_NULL 8307 * 8308 * ARG_PTR_TO_MEM is compatible with PTR_TO_MEM that is tagged with a dynptr type. 8309 * 8310 * Therefore we fold these flags depending on the arg_type before comparison. 8311 */ 8312 if (arg_type & MEM_RDONLY) 8313 type &= ~MEM_RDONLY; 8314 if (arg_type & PTR_MAYBE_NULL) 8315 type &= ~PTR_MAYBE_NULL; 8316 if (base_type(arg_type) == ARG_PTR_TO_MEM) 8317 type &= ~DYNPTR_TYPE_FLAG_MASK; 8318 8319 if (meta->func_id == BPF_FUNC_kptr_xchg && type_is_alloc(type)) { 8320 type &= ~MEM_ALLOC; 8321 type &= ~MEM_PERCPU; 8322 } 8323 8324 for (i = 0; i < ARRAY_SIZE(compatible->types); i++) { 8325 expected = compatible->types[i]; 8326 if (expected == NOT_INIT) 8327 break; 8328 8329 if (type == expected) 8330 goto found; 8331 } 8332 8333 verbose(env, "R%d type=%s expected=", regno, reg_type_str(env, reg->type)); 8334 for (j = 0; j + 1 < i; j++) 8335 verbose(env, "%s, ", reg_type_str(env, compatible->types[j])); 8336 verbose(env, "%s\n", reg_type_str(env, compatible->types[j])); 8337 return -EACCES; 8338 8339 found: 8340 if (base_type(reg->type) != PTR_TO_BTF_ID) 8341 return 0; 8342 8343 if (compatible == &mem_types) { 8344 if (!(arg_type & MEM_RDONLY)) { 8345 verbose(env, 8346 "%s() may write into memory pointed by R%d type=%s\n", 8347 func_id_name(meta->func_id), 8348 regno, reg_type_str(env, reg->type)); 8349 return -EACCES; 8350 } 8351 return 0; 8352 } 8353 8354 switch ((int)reg->type) { 8355 case PTR_TO_BTF_ID: 8356 case PTR_TO_BTF_ID | PTR_TRUSTED: 8357 case PTR_TO_BTF_ID | MEM_RCU: 8358 case PTR_TO_BTF_ID | PTR_MAYBE_NULL: 8359 case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU: 8360 { 8361 /* For bpf_sk_release, it needs to match against first member 8362 * 'struct sock_common', hence make an exception for it. This 8363 * allows bpf_sk_release to work for multiple socket types. 8364 */ 8365 bool strict_type_match = arg_type_is_release(arg_type) && 8366 meta->func_id != BPF_FUNC_sk_release; 8367 8368 if (type_may_be_null(reg->type) && 8369 (!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) { 8370 verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno); 8371 return -EACCES; 8372 } 8373 8374 if (!arg_btf_id) { 8375 if (!compatible->btf_id) { 8376 verbose(env, "verifier internal error: missing arg compatible BTF ID\n"); 8377 return -EFAULT; 8378 } 8379 arg_btf_id = compatible->btf_id; 8380 } 8381 8382 if (meta->func_id == BPF_FUNC_kptr_xchg) { 8383 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 8384 return -EACCES; 8385 } else { 8386 if (arg_btf_id == BPF_PTR_POISON) { 8387 verbose(env, "verifier internal error:"); 8388 verbose(env, "R%d has non-overwritten BPF_PTR_POISON type\n", 8389 regno); 8390 return -EACCES; 8391 } 8392 8393 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off, 8394 btf_vmlinux, *arg_btf_id, 8395 strict_type_match)) { 8396 verbose(env, "R%d is of type %s but %s is expected\n", 8397 regno, btf_type_name(reg->btf, reg->btf_id), 8398 btf_type_name(btf_vmlinux, *arg_btf_id)); 8399 return -EACCES; 8400 } 8401 } 8402 break; 8403 } 8404 case PTR_TO_BTF_ID | MEM_ALLOC: 8405 case PTR_TO_BTF_ID | MEM_PERCPU | MEM_ALLOC: 8406 if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock && 8407 meta->func_id != BPF_FUNC_kptr_xchg) { 8408 verbose(env, "verifier internal error: unimplemented handling of MEM_ALLOC\n"); 8409 return -EFAULT; 8410 } 8411 if (meta->func_id == BPF_FUNC_kptr_xchg) { 8412 if (map_kptr_match_type(env, meta->kptr_field, reg, regno)) 8413 return -EACCES; 8414 } 8415 break; 8416 case PTR_TO_BTF_ID | MEM_PERCPU: 8417 case PTR_TO_BTF_ID | MEM_PERCPU | MEM_RCU: 8418 case PTR_TO_BTF_ID | MEM_PERCPU | PTR_TRUSTED: 8419 /* Handled by helper specific checks */ 8420 break; 8421 default: 8422 verbose(env, "verifier internal error: invalid PTR_TO_BTF_ID register for type match\n"); 8423 return -EFAULT; 8424 } 8425 return 0; 8426 } 8427 8428 static struct btf_field * 8429 reg_find_field_offset(const struct bpf_reg_state *reg, s32 off, u32 fields) 8430 { 8431 struct btf_field *field; 8432 struct btf_record *rec; 8433 8434 rec = reg_btf_record(reg); 8435 if (!rec) 8436 return NULL; 8437 8438 field = btf_record_find(rec, off, fields); 8439 if (!field) 8440 return NULL; 8441 8442 return field; 8443 } 8444 8445 int check_func_arg_reg_off(struct bpf_verifier_env *env, 8446 const struct bpf_reg_state *reg, int regno, 8447 enum bpf_arg_type arg_type) 8448 { 8449 u32 type = reg->type; 8450 8451 /* When referenced register is passed to release function, its fixed 8452 * offset must be 0. 8453 * 8454 * We will check arg_type_is_release reg has ref_obj_id when storing 8455 * meta->release_regno. 8456 */ 8457 if (arg_type_is_release(arg_type)) { 8458 /* ARG_PTR_TO_DYNPTR with OBJ_RELEASE is a bit special, as it 8459 * may not directly point to the object being released, but to 8460 * dynptr pointing to such object, which might be at some offset 8461 * on the stack. In that case, we simply to fallback to the 8462 * default handling. 8463 */ 8464 if (arg_type_is_dynptr(arg_type) && type == PTR_TO_STACK) 8465 return 0; 8466 8467 /* Doing check_ptr_off_reg check for the offset will catch this 8468 * because fixed_off_ok is false, but checking here allows us 8469 * to give the user a better error message. 8470 */ 8471 if (reg->off) { 8472 verbose(env, "R%d must have zero offset when passed to release func or trusted arg to kfunc\n", 8473 regno); 8474 return -EINVAL; 8475 } 8476 return __check_ptr_off_reg(env, reg, regno, false); 8477 } 8478 8479 switch (type) { 8480 /* Pointer types where both fixed and variable offset is explicitly allowed: */ 8481 case PTR_TO_STACK: 8482 case PTR_TO_PACKET: 8483 case PTR_TO_PACKET_META: 8484 case PTR_TO_MAP_KEY: 8485 case PTR_TO_MAP_VALUE: 8486 case PTR_TO_MEM: 8487 case PTR_TO_MEM | MEM_RDONLY: 8488 case PTR_TO_MEM | MEM_RINGBUF: 8489 case PTR_TO_BUF: 8490 case PTR_TO_BUF | MEM_RDONLY: 8491 case SCALAR_VALUE: 8492 return 0; 8493 /* All the rest must be rejected, except PTR_TO_BTF_ID which allows 8494 * fixed offset. 8495 */ 8496 case PTR_TO_BTF_ID: 8497 case PTR_TO_BTF_ID | MEM_ALLOC: 8498 case PTR_TO_BTF_ID | PTR_TRUSTED: 8499 case PTR_TO_BTF_ID | MEM_RCU: 8500 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF: 8501 case PTR_TO_BTF_ID | MEM_ALLOC | NON_OWN_REF | MEM_RCU: 8502 /* When referenced PTR_TO_BTF_ID is passed to release function, 8503 * its fixed offset must be 0. In the other cases, fixed offset 8504 * can be non-zero. This was already checked above. So pass 8505 * fixed_off_ok as true to allow fixed offset for all other 8506 * cases. var_off always must be 0 for PTR_TO_BTF_ID, hence we 8507 * still need to do checks instead of returning. 8508 */ 8509 return __check_ptr_off_reg(env, reg, regno, true); 8510 default: 8511 return __check_ptr_off_reg(env, reg, regno, false); 8512 } 8513 } 8514 8515 static struct bpf_reg_state *get_dynptr_arg_reg(struct bpf_verifier_env *env, 8516 const struct bpf_func_proto *fn, 8517 struct bpf_reg_state *regs) 8518 { 8519 struct bpf_reg_state *state = NULL; 8520 int i; 8521 8522 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) 8523 if (arg_type_is_dynptr(fn->arg_type[i])) { 8524 if (state) { 8525 verbose(env, "verifier internal error: multiple dynptr args\n"); 8526 return NULL; 8527 } 8528 state = ®s[BPF_REG_1 + i]; 8529 } 8530 8531 if (!state) 8532 verbose(env, "verifier internal error: no dynptr arg found\n"); 8533 8534 return state; 8535 } 8536 8537 static int dynptr_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8538 { 8539 struct bpf_func_state *state = func(env, reg); 8540 int spi; 8541 8542 if (reg->type == CONST_PTR_TO_DYNPTR) 8543 return reg->id; 8544 spi = dynptr_get_spi(env, reg); 8545 if (spi < 0) 8546 return spi; 8547 return state->stack[spi].spilled_ptr.id; 8548 } 8549 8550 static int dynptr_ref_obj_id(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 8551 { 8552 struct bpf_func_state *state = func(env, reg); 8553 int spi; 8554 8555 if (reg->type == CONST_PTR_TO_DYNPTR) 8556 return reg->ref_obj_id; 8557 spi = dynptr_get_spi(env, reg); 8558 if (spi < 0) 8559 return spi; 8560 return state->stack[spi].spilled_ptr.ref_obj_id; 8561 } 8562 8563 static enum bpf_dynptr_type dynptr_get_type(struct bpf_verifier_env *env, 8564 struct bpf_reg_state *reg) 8565 { 8566 struct bpf_func_state *state = func(env, reg); 8567 int spi; 8568 8569 if (reg->type == CONST_PTR_TO_DYNPTR) 8570 return reg->dynptr.type; 8571 8572 spi = __get_spi(reg->off); 8573 if (spi < 0) { 8574 verbose(env, "verifier internal error: invalid spi when querying dynptr type\n"); 8575 return BPF_DYNPTR_TYPE_INVALID; 8576 } 8577 8578 return state->stack[spi].spilled_ptr.dynptr.type; 8579 } 8580 8581 static int check_func_arg(struct bpf_verifier_env *env, u32 arg, 8582 struct bpf_call_arg_meta *meta, 8583 const struct bpf_func_proto *fn, 8584 int insn_idx) 8585 { 8586 u32 regno = BPF_REG_1 + arg; 8587 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; 8588 enum bpf_arg_type arg_type = fn->arg_type[arg]; 8589 enum bpf_reg_type type = reg->type; 8590 u32 *arg_btf_id = NULL; 8591 int err = 0; 8592 8593 if (arg_type == ARG_DONTCARE) 8594 return 0; 8595 8596 err = check_reg_arg(env, regno, SRC_OP); 8597 if (err) 8598 return err; 8599 8600 if (arg_type == ARG_ANYTHING) { 8601 if (is_pointer_value(env, regno)) { 8602 verbose(env, "R%d leaks addr into helper function\n", 8603 regno); 8604 return -EACCES; 8605 } 8606 return 0; 8607 } 8608 8609 if (type_is_pkt_pointer(type) && 8610 !may_access_direct_pkt_data(env, meta, BPF_READ)) { 8611 verbose(env, "helper access to the packet is not allowed\n"); 8612 return -EACCES; 8613 } 8614 8615 if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) { 8616 err = resolve_map_arg_type(env, meta, &arg_type); 8617 if (err) 8618 return err; 8619 } 8620 8621 if (register_is_null(reg) && type_may_be_null(arg_type)) 8622 /* A NULL register has a SCALAR_VALUE type, so skip 8623 * type checking. 8624 */ 8625 goto skip_type_check; 8626 8627 /* arg_btf_id and arg_size are in a union. */ 8628 if (base_type(arg_type) == ARG_PTR_TO_BTF_ID || 8629 base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK) 8630 arg_btf_id = fn->arg_btf_id[arg]; 8631 8632 err = check_reg_type(env, regno, arg_type, arg_btf_id, meta); 8633 if (err) 8634 return err; 8635 8636 err = check_func_arg_reg_off(env, reg, regno, arg_type); 8637 if (err) 8638 return err; 8639 8640 skip_type_check: 8641 if (arg_type_is_release(arg_type)) { 8642 if (arg_type_is_dynptr(arg_type)) { 8643 struct bpf_func_state *state = func(env, reg); 8644 int spi; 8645 8646 /* Only dynptr created on stack can be released, thus 8647 * the get_spi and stack state checks for spilled_ptr 8648 * should only be done before process_dynptr_func for 8649 * PTR_TO_STACK. 8650 */ 8651 if (reg->type == PTR_TO_STACK) { 8652 spi = dynptr_get_spi(env, reg); 8653 if (spi < 0 || !state->stack[spi].spilled_ptr.ref_obj_id) { 8654 verbose(env, "arg %d is an unacquired reference\n", regno); 8655 return -EINVAL; 8656 } 8657 } else { 8658 verbose(env, "cannot release unowned const bpf_dynptr\n"); 8659 return -EINVAL; 8660 } 8661 } else if (!reg->ref_obj_id && !register_is_null(reg)) { 8662 verbose(env, "R%d must be referenced when passed to release function\n", 8663 regno); 8664 return -EINVAL; 8665 } 8666 if (meta->release_regno) { 8667 verbose(env, "verifier internal error: more than one release argument\n"); 8668 return -EFAULT; 8669 } 8670 meta->release_regno = regno; 8671 } 8672 8673 if (reg->ref_obj_id) { 8674 if (meta->ref_obj_id) { 8675 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 8676 regno, reg->ref_obj_id, 8677 meta->ref_obj_id); 8678 return -EFAULT; 8679 } 8680 meta->ref_obj_id = reg->ref_obj_id; 8681 } 8682 8683 switch (base_type(arg_type)) { 8684 case ARG_CONST_MAP_PTR: 8685 /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ 8686 if (meta->map_ptr) { 8687 /* Use map_uid (which is unique id of inner map) to reject: 8688 * inner_map1 = bpf_map_lookup_elem(outer_map, key1) 8689 * inner_map2 = bpf_map_lookup_elem(outer_map, key2) 8690 * if (inner_map1 && inner_map2) { 8691 * timer = bpf_map_lookup_elem(inner_map1); 8692 * if (timer) 8693 * // mismatch would have been allowed 8694 * bpf_timer_init(timer, inner_map2); 8695 * } 8696 * 8697 * Comparing map_ptr is enough to distinguish normal and outer maps. 8698 */ 8699 if (meta->map_ptr != reg->map_ptr || 8700 meta->map_uid != reg->map_uid) { 8701 verbose(env, 8702 "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", 8703 meta->map_uid, reg->map_uid); 8704 return -EINVAL; 8705 } 8706 } 8707 meta->map_ptr = reg->map_ptr; 8708 meta->map_uid = reg->map_uid; 8709 break; 8710 case ARG_PTR_TO_MAP_KEY: 8711 /* bpf_map_xxx(..., map_ptr, ..., key) call: 8712 * check that [key, key + map->key_size) are within 8713 * stack limits and initialized 8714 */ 8715 if (!meta->map_ptr) { 8716 /* in function declaration map_ptr must come before 8717 * map_key, so that it's verified and known before 8718 * we have to check map_key here. Otherwise it means 8719 * that kernel subsystem misconfigured verifier 8720 */ 8721 verbose(env, "invalid map_ptr to access map->key\n"); 8722 return -EACCES; 8723 } 8724 err = check_helper_mem_access(env, regno, 8725 meta->map_ptr->key_size, false, 8726 NULL); 8727 break; 8728 case ARG_PTR_TO_MAP_VALUE: 8729 if (type_may_be_null(arg_type) && register_is_null(reg)) 8730 return 0; 8731 8732 /* bpf_map_xxx(..., map_ptr, ..., value) call: 8733 * check [value, value + map->value_size) validity 8734 */ 8735 if (!meta->map_ptr) { 8736 /* kernel subsystem misconfigured verifier */ 8737 verbose(env, "invalid map_ptr to access map->value\n"); 8738 return -EACCES; 8739 } 8740 meta->raw_mode = arg_type & MEM_UNINIT; 8741 err = check_helper_mem_access(env, regno, 8742 meta->map_ptr->value_size, false, 8743 meta); 8744 break; 8745 case ARG_PTR_TO_PERCPU_BTF_ID: 8746 if (!reg->btf_id) { 8747 verbose(env, "Helper has invalid btf_id in R%d\n", regno); 8748 return -EACCES; 8749 } 8750 meta->ret_btf = reg->btf; 8751 meta->ret_btf_id = reg->btf_id; 8752 break; 8753 case ARG_PTR_TO_SPIN_LOCK: 8754 if (in_rbtree_lock_required_cb(env)) { 8755 verbose(env, "can't spin_{lock,unlock} in rbtree cb\n"); 8756 return -EACCES; 8757 } 8758 if (meta->func_id == BPF_FUNC_spin_lock) { 8759 err = process_spin_lock(env, regno, true); 8760 if (err) 8761 return err; 8762 } else if (meta->func_id == BPF_FUNC_spin_unlock) { 8763 err = process_spin_lock(env, regno, false); 8764 if (err) 8765 return err; 8766 } else { 8767 verbose(env, "verifier internal error\n"); 8768 return -EFAULT; 8769 } 8770 break; 8771 case ARG_PTR_TO_TIMER: 8772 err = process_timer_func(env, regno, meta); 8773 if (err) 8774 return err; 8775 break; 8776 case ARG_PTR_TO_FUNC: 8777 meta->subprogno = reg->subprogno; 8778 break; 8779 case ARG_PTR_TO_MEM: 8780 /* The access to this pointer is only checked when we hit the 8781 * next is_mem_size argument below. 8782 */ 8783 meta->raw_mode = arg_type & MEM_UNINIT; 8784 if (arg_type & MEM_FIXED_SIZE) { 8785 err = check_helper_mem_access(env, regno, 8786 fn->arg_size[arg], false, 8787 meta); 8788 } 8789 break; 8790 case ARG_CONST_SIZE: 8791 err = check_mem_size_reg(env, reg, regno, false, meta); 8792 break; 8793 case ARG_CONST_SIZE_OR_ZERO: 8794 err = check_mem_size_reg(env, reg, regno, true, meta); 8795 break; 8796 case ARG_PTR_TO_DYNPTR: 8797 err = process_dynptr_func(env, regno, insn_idx, arg_type, 0); 8798 if (err) 8799 return err; 8800 break; 8801 case ARG_CONST_ALLOC_SIZE_OR_ZERO: 8802 if (!tnum_is_const(reg->var_off)) { 8803 verbose(env, "R%d is not a known constant'\n", 8804 regno); 8805 return -EACCES; 8806 } 8807 meta->mem_size = reg->var_off.value; 8808 err = mark_chain_precision(env, regno); 8809 if (err) 8810 return err; 8811 break; 8812 case ARG_PTR_TO_INT: 8813 case ARG_PTR_TO_LONG: 8814 { 8815 int size = int_ptr_type_to_size(arg_type); 8816 8817 err = check_helper_mem_access(env, regno, size, false, meta); 8818 if (err) 8819 return err; 8820 err = check_ptr_alignment(env, reg, 0, size, true); 8821 break; 8822 } 8823 case ARG_PTR_TO_CONST_STR: 8824 { 8825 struct bpf_map *map = reg->map_ptr; 8826 int map_off; 8827 u64 map_addr; 8828 char *str_ptr; 8829 8830 if (!bpf_map_is_rdonly(map)) { 8831 verbose(env, "R%d does not point to a readonly map'\n", regno); 8832 return -EACCES; 8833 } 8834 8835 if (!tnum_is_const(reg->var_off)) { 8836 verbose(env, "R%d is not a constant address'\n", regno); 8837 return -EACCES; 8838 } 8839 8840 if (!map->ops->map_direct_value_addr) { 8841 verbose(env, "no direct value access support for this map type\n"); 8842 return -EACCES; 8843 } 8844 8845 err = check_map_access(env, regno, reg->off, 8846 map->value_size - reg->off, false, 8847 ACCESS_HELPER); 8848 if (err) 8849 return err; 8850 8851 map_off = reg->off + reg->var_off.value; 8852 err = map->ops->map_direct_value_addr(map, &map_addr, map_off); 8853 if (err) { 8854 verbose(env, "direct value access on string failed\n"); 8855 return err; 8856 } 8857 8858 str_ptr = (char *)(long)(map_addr); 8859 if (!strnchr(str_ptr + map_off, map->value_size - map_off, 0)) { 8860 verbose(env, "string is not zero-terminated\n"); 8861 return -EINVAL; 8862 } 8863 break; 8864 } 8865 case ARG_PTR_TO_KPTR: 8866 err = process_kptr_func(env, regno, meta); 8867 if (err) 8868 return err; 8869 break; 8870 } 8871 8872 return err; 8873 } 8874 8875 static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) 8876 { 8877 enum bpf_attach_type eatype = env->prog->expected_attach_type; 8878 enum bpf_prog_type type = resolve_prog_type(env->prog); 8879 8880 if (func_id != BPF_FUNC_map_update_elem) 8881 return false; 8882 8883 /* It's not possible to get access to a locked struct sock in these 8884 * contexts, so updating is safe. 8885 */ 8886 switch (type) { 8887 case BPF_PROG_TYPE_TRACING: 8888 if (eatype == BPF_TRACE_ITER) 8889 return true; 8890 break; 8891 case BPF_PROG_TYPE_SOCKET_FILTER: 8892 case BPF_PROG_TYPE_SCHED_CLS: 8893 case BPF_PROG_TYPE_SCHED_ACT: 8894 case BPF_PROG_TYPE_XDP: 8895 case BPF_PROG_TYPE_SK_REUSEPORT: 8896 case BPF_PROG_TYPE_FLOW_DISSECTOR: 8897 case BPF_PROG_TYPE_SK_LOOKUP: 8898 return true; 8899 default: 8900 break; 8901 } 8902 8903 verbose(env, "cannot update sockmap in this context\n"); 8904 return false; 8905 } 8906 8907 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env) 8908 { 8909 return env->prog->jit_requested && 8910 bpf_jit_supports_subprog_tailcalls(); 8911 } 8912 8913 static int check_map_func_compatibility(struct bpf_verifier_env *env, 8914 struct bpf_map *map, int func_id) 8915 { 8916 if (!map) 8917 return 0; 8918 8919 /* We need a two way check, first is from map perspective ... */ 8920 switch (map->map_type) { 8921 case BPF_MAP_TYPE_PROG_ARRAY: 8922 if (func_id != BPF_FUNC_tail_call) 8923 goto error; 8924 break; 8925 case BPF_MAP_TYPE_PERF_EVENT_ARRAY: 8926 if (func_id != BPF_FUNC_perf_event_read && 8927 func_id != BPF_FUNC_perf_event_output && 8928 func_id != BPF_FUNC_skb_output && 8929 func_id != BPF_FUNC_perf_event_read_value && 8930 func_id != BPF_FUNC_xdp_output) 8931 goto error; 8932 break; 8933 case BPF_MAP_TYPE_RINGBUF: 8934 if (func_id != BPF_FUNC_ringbuf_output && 8935 func_id != BPF_FUNC_ringbuf_reserve && 8936 func_id != BPF_FUNC_ringbuf_query && 8937 func_id != BPF_FUNC_ringbuf_reserve_dynptr && 8938 func_id != BPF_FUNC_ringbuf_submit_dynptr && 8939 func_id != BPF_FUNC_ringbuf_discard_dynptr) 8940 goto error; 8941 break; 8942 case BPF_MAP_TYPE_USER_RINGBUF: 8943 if (func_id != BPF_FUNC_user_ringbuf_drain) 8944 goto error; 8945 break; 8946 case BPF_MAP_TYPE_STACK_TRACE: 8947 if (func_id != BPF_FUNC_get_stackid) 8948 goto error; 8949 break; 8950 case BPF_MAP_TYPE_CGROUP_ARRAY: 8951 if (func_id != BPF_FUNC_skb_under_cgroup && 8952 func_id != BPF_FUNC_current_task_under_cgroup) 8953 goto error; 8954 break; 8955 case BPF_MAP_TYPE_CGROUP_STORAGE: 8956 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: 8957 if (func_id != BPF_FUNC_get_local_storage) 8958 goto error; 8959 break; 8960 case BPF_MAP_TYPE_DEVMAP: 8961 case BPF_MAP_TYPE_DEVMAP_HASH: 8962 if (func_id != BPF_FUNC_redirect_map && 8963 func_id != BPF_FUNC_map_lookup_elem) 8964 goto error; 8965 break; 8966 /* Restrict bpf side of cpumap and xskmap, open when use-cases 8967 * appear. 8968 */ 8969 case BPF_MAP_TYPE_CPUMAP: 8970 if (func_id != BPF_FUNC_redirect_map) 8971 goto error; 8972 break; 8973 case BPF_MAP_TYPE_XSKMAP: 8974 if (func_id != BPF_FUNC_redirect_map && 8975 func_id != BPF_FUNC_map_lookup_elem) 8976 goto error; 8977 break; 8978 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 8979 case BPF_MAP_TYPE_HASH_OF_MAPS: 8980 if (func_id != BPF_FUNC_map_lookup_elem) 8981 goto error; 8982 break; 8983 case BPF_MAP_TYPE_SOCKMAP: 8984 if (func_id != BPF_FUNC_sk_redirect_map && 8985 func_id != BPF_FUNC_sock_map_update && 8986 func_id != BPF_FUNC_map_delete_elem && 8987 func_id != BPF_FUNC_msg_redirect_map && 8988 func_id != BPF_FUNC_sk_select_reuseport && 8989 func_id != BPF_FUNC_map_lookup_elem && 8990 !may_update_sockmap(env, func_id)) 8991 goto error; 8992 break; 8993 case BPF_MAP_TYPE_SOCKHASH: 8994 if (func_id != BPF_FUNC_sk_redirect_hash && 8995 func_id != BPF_FUNC_sock_hash_update && 8996 func_id != BPF_FUNC_map_delete_elem && 8997 func_id != BPF_FUNC_msg_redirect_hash && 8998 func_id != BPF_FUNC_sk_select_reuseport && 8999 func_id != BPF_FUNC_map_lookup_elem && 9000 !may_update_sockmap(env, func_id)) 9001 goto error; 9002 break; 9003 case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: 9004 if (func_id != BPF_FUNC_sk_select_reuseport) 9005 goto error; 9006 break; 9007 case BPF_MAP_TYPE_QUEUE: 9008 case BPF_MAP_TYPE_STACK: 9009 if (func_id != BPF_FUNC_map_peek_elem && 9010 func_id != BPF_FUNC_map_pop_elem && 9011 func_id != BPF_FUNC_map_push_elem) 9012 goto error; 9013 break; 9014 case BPF_MAP_TYPE_SK_STORAGE: 9015 if (func_id != BPF_FUNC_sk_storage_get && 9016 func_id != BPF_FUNC_sk_storage_delete && 9017 func_id != BPF_FUNC_kptr_xchg) 9018 goto error; 9019 break; 9020 case BPF_MAP_TYPE_INODE_STORAGE: 9021 if (func_id != BPF_FUNC_inode_storage_get && 9022 func_id != BPF_FUNC_inode_storage_delete && 9023 func_id != BPF_FUNC_kptr_xchg) 9024 goto error; 9025 break; 9026 case BPF_MAP_TYPE_TASK_STORAGE: 9027 if (func_id != BPF_FUNC_task_storage_get && 9028 func_id != BPF_FUNC_task_storage_delete && 9029 func_id != BPF_FUNC_kptr_xchg) 9030 goto error; 9031 break; 9032 case BPF_MAP_TYPE_CGRP_STORAGE: 9033 if (func_id != BPF_FUNC_cgrp_storage_get && 9034 func_id != BPF_FUNC_cgrp_storage_delete && 9035 func_id != BPF_FUNC_kptr_xchg) 9036 goto error; 9037 break; 9038 case BPF_MAP_TYPE_BLOOM_FILTER: 9039 if (func_id != BPF_FUNC_map_peek_elem && 9040 func_id != BPF_FUNC_map_push_elem) 9041 goto error; 9042 break; 9043 default: 9044 break; 9045 } 9046 9047 /* ... and second from the function itself. */ 9048 switch (func_id) { 9049 case BPF_FUNC_tail_call: 9050 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 9051 goto error; 9052 if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) { 9053 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 9054 return -EINVAL; 9055 } 9056 break; 9057 case BPF_FUNC_perf_event_read: 9058 case BPF_FUNC_perf_event_output: 9059 case BPF_FUNC_perf_event_read_value: 9060 case BPF_FUNC_skb_output: 9061 case BPF_FUNC_xdp_output: 9062 if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) 9063 goto error; 9064 break; 9065 case BPF_FUNC_ringbuf_output: 9066 case BPF_FUNC_ringbuf_reserve: 9067 case BPF_FUNC_ringbuf_query: 9068 case BPF_FUNC_ringbuf_reserve_dynptr: 9069 case BPF_FUNC_ringbuf_submit_dynptr: 9070 case BPF_FUNC_ringbuf_discard_dynptr: 9071 if (map->map_type != BPF_MAP_TYPE_RINGBUF) 9072 goto error; 9073 break; 9074 case BPF_FUNC_user_ringbuf_drain: 9075 if (map->map_type != BPF_MAP_TYPE_USER_RINGBUF) 9076 goto error; 9077 break; 9078 case BPF_FUNC_get_stackid: 9079 if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) 9080 goto error; 9081 break; 9082 case BPF_FUNC_current_task_under_cgroup: 9083 case BPF_FUNC_skb_under_cgroup: 9084 if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) 9085 goto error; 9086 break; 9087 case BPF_FUNC_redirect_map: 9088 if (map->map_type != BPF_MAP_TYPE_DEVMAP && 9089 map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && 9090 map->map_type != BPF_MAP_TYPE_CPUMAP && 9091 map->map_type != BPF_MAP_TYPE_XSKMAP) 9092 goto error; 9093 break; 9094 case BPF_FUNC_sk_redirect_map: 9095 case BPF_FUNC_msg_redirect_map: 9096 case BPF_FUNC_sock_map_update: 9097 if (map->map_type != BPF_MAP_TYPE_SOCKMAP) 9098 goto error; 9099 break; 9100 case BPF_FUNC_sk_redirect_hash: 9101 case BPF_FUNC_msg_redirect_hash: 9102 case BPF_FUNC_sock_hash_update: 9103 if (map->map_type != BPF_MAP_TYPE_SOCKHASH) 9104 goto error; 9105 break; 9106 case BPF_FUNC_get_local_storage: 9107 if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && 9108 map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 9109 goto error; 9110 break; 9111 case BPF_FUNC_sk_select_reuseport: 9112 if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY && 9113 map->map_type != BPF_MAP_TYPE_SOCKMAP && 9114 map->map_type != BPF_MAP_TYPE_SOCKHASH) 9115 goto error; 9116 break; 9117 case BPF_FUNC_map_pop_elem: 9118 if (map->map_type != BPF_MAP_TYPE_QUEUE && 9119 map->map_type != BPF_MAP_TYPE_STACK) 9120 goto error; 9121 break; 9122 case BPF_FUNC_map_peek_elem: 9123 case BPF_FUNC_map_push_elem: 9124 if (map->map_type != BPF_MAP_TYPE_QUEUE && 9125 map->map_type != BPF_MAP_TYPE_STACK && 9126 map->map_type != BPF_MAP_TYPE_BLOOM_FILTER) 9127 goto error; 9128 break; 9129 case BPF_FUNC_map_lookup_percpu_elem: 9130 if (map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && 9131 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && 9132 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH) 9133 goto error; 9134 break; 9135 case BPF_FUNC_sk_storage_get: 9136 case BPF_FUNC_sk_storage_delete: 9137 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) 9138 goto error; 9139 break; 9140 case BPF_FUNC_inode_storage_get: 9141 case BPF_FUNC_inode_storage_delete: 9142 if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE) 9143 goto error; 9144 break; 9145 case BPF_FUNC_task_storage_get: 9146 case BPF_FUNC_task_storage_delete: 9147 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) 9148 goto error; 9149 break; 9150 case BPF_FUNC_cgrp_storage_get: 9151 case BPF_FUNC_cgrp_storage_delete: 9152 if (map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) 9153 goto error; 9154 break; 9155 default: 9156 break; 9157 } 9158 9159 return 0; 9160 error: 9161 verbose(env, "cannot pass map_type %d into func %s#%d\n", 9162 map->map_type, func_id_name(func_id), func_id); 9163 return -EINVAL; 9164 } 9165 9166 static bool check_raw_mode_ok(const struct bpf_func_proto *fn) 9167 { 9168 int count = 0; 9169 9170 if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) 9171 count++; 9172 if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) 9173 count++; 9174 if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) 9175 count++; 9176 if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) 9177 count++; 9178 if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) 9179 count++; 9180 9181 /* We only support one arg being in raw mode at the moment, 9182 * which is sufficient for the helper functions we have 9183 * right now. 9184 */ 9185 return count <= 1; 9186 } 9187 9188 static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg) 9189 { 9190 bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE; 9191 bool has_size = fn->arg_size[arg] != 0; 9192 bool is_next_size = false; 9193 9194 if (arg + 1 < ARRAY_SIZE(fn->arg_type)) 9195 is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]); 9196 9197 if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM) 9198 return is_next_size; 9199 9200 return has_size == is_next_size || is_next_size == is_fixed; 9201 } 9202 9203 static bool check_arg_pair_ok(const struct bpf_func_proto *fn) 9204 { 9205 /* bpf_xxx(..., buf, len) call will access 'len' 9206 * bytes from memory 'buf'. Both arg types need 9207 * to be paired, so make sure there's no buggy 9208 * helper function specification. 9209 */ 9210 if (arg_type_is_mem_size(fn->arg1_type) || 9211 check_args_pair_invalid(fn, 0) || 9212 check_args_pair_invalid(fn, 1) || 9213 check_args_pair_invalid(fn, 2) || 9214 check_args_pair_invalid(fn, 3) || 9215 check_args_pair_invalid(fn, 4)) 9216 return false; 9217 9218 return true; 9219 } 9220 9221 static bool check_btf_id_ok(const struct bpf_func_proto *fn) 9222 { 9223 int i; 9224 9225 for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) { 9226 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID) 9227 return !!fn->arg_btf_id[i]; 9228 if (base_type(fn->arg_type[i]) == ARG_PTR_TO_SPIN_LOCK) 9229 return fn->arg_btf_id[i] == BPF_PTR_POISON; 9230 if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] && 9231 /* arg_btf_id and arg_size are in a union. */ 9232 (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM || 9233 !(fn->arg_type[i] & MEM_FIXED_SIZE))) 9234 return false; 9235 } 9236 9237 return true; 9238 } 9239 9240 static int check_func_proto(const struct bpf_func_proto *fn, int func_id) 9241 { 9242 return check_raw_mode_ok(fn) && 9243 check_arg_pair_ok(fn) && 9244 check_btf_id_ok(fn) ? 0 : -EINVAL; 9245 } 9246 9247 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 9248 * are now invalid, so turn them into unknown SCALAR_VALUE. 9249 * 9250 * This also applies to dynptr slices belonging to skb and xdp dynptrs, 9251 * since these slices point to packet data. 9252 */ 9253 static void clear_all_pkt_pointers(struct bpf_verifier_env *env) 9254 { 9255 struct bpf_func_state *state; 9256 struct bpf_reg_state *reg; 9257 9258 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 9259 if (reg_is_pkt_pointer_any(reg) || reg_is_dynptr_slice_pkt(reg)) 9260 mark_reg_invalid(env, reg); 9261 })); 9262 } 9263 9264 enum { 9265 AT_PKT_END = -1, 9266 BEYOND_PKT_END = -2, 9267 }; 9268 9269 static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open) 9270 { 9271 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 9272 struct bpf_reg_state *reg = &state->regs[regn]; 9273 9274 if (reg->type != PTR_TO_PACKET) 9275 /* PTR_TO_PACKET_META is not supported yet */ 9276 return; 9277 9278 /* The 'reg' is pkt > pkt_end or pkt >= pkt_end. 9279 * How far beyond pkt_end it goes is unknown. 9280 * if (!range_open) it's the case of pkt >= pkt_end 9281 * if (range_open) it's the case of pkt > pkt_end 9282 * hence this pointer is at least 1 byte bigger than pkt_end 9283 */ 9284 if (range_open) 9285 reg->range = BEYOND_PKT_END; 9286 else 9287 reg->range = AT_PKT_END; 9288 } 9289 9290 /* The pointer with the specified id has released its reference to kernel 9291 * resources. Identify all copies of the same pointer and clear the reference. 9292 */ 9293 static int release_reference(struct bpf_verifier_env *env, 9294 int ref_obj_id) 9295 { 9296 struct bpf_func_state *state; 9297 struct bpf_reg_state *reg; 9298 int err; 9299 9300 err = release_reference_state(cur_func(env), ref_obj_id); 9301 if (err) 9302 return err; 9303 9304 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 9305 if (reg->ref_obj_id == ref_obj_id) 9306 mark_reg_invalid(env, reg); 9307 })); 9308 9309 return 0; 9310 } 9311 9312 static void invalidate_non_owning_refs(struct bpf_verifier_env *env) 9313 { 9314 struct bpf_func_state *unused; 9315 struct bpf_reg_state *reg; 9316 9317 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 9318 if (type_is_non_owning_ref(reg->type)) 9319 mark_reg_invalid(env, reg); 9320 })); 9321 } 9322 9323 static void clear_caller_saved_regs(struct bpf_verifier_env *env, 9324 struct bpf_reg_state *regs) 9325 { 9326 int i; 9327 9328 /* after the call registers r0 - r5 were scratched */ 9329 for (i = 0; i < CALLER_SAVED_REGS; i++) { 9330 mark_reg_not_init(env, regs, caller_saved[i]); 9331 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 9332 } 9333 } 9334 9335 typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, 9336 struct bpf_func_state *caller, 9337 struct bpf_func_state *callee, 9338 int insn_idx); 9339 9340 static int set_callee_state(struct bpf_verifier_env *env, 9341 struct bpf_func_state *caller, 9342 struct bpf_func_state *callee, int insn_idx); 9343 9344 static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9345 int *insn_idx, int subprog, 9346 set_callee_state_fn set_callee_state_cb) 9347 { 9348 struct bpf_verifier_state *state = env->cur_state; 9349 struct bpf_func_state *caller, *callee; 9350 int err; 9351 9352 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 9353 verbose(env, "the call stack of %d frames is too deep\n", 9354 state->curframe + 2); 9355 return -E2BIG; 9356 } 9357 9358 caller = state->frame[state->curframe]; 9359 if (state->frame[state->curframe + 1]) { 9360 verbose(env, "verifier bug. Frame %d already allocated\n", 9361 state->curframe + 1); 9362 return -EFAULT; 9363 } 9364 9365 err = btf_check_subprog_call(env, subprog, caller->regs); 9366 if (err == -EFAULT) 9367 return err; 9368 if (subprog_is_global(env, subprog)) { 9369 if (err) { 9370 verbose(env, "Caller passes invalid args into func#%d\n", 9371 subprog); 9372 return err; 9373 } else { 9374 if (env->log.level & BPF_LOG_LEVEL) 9375 verbose(env, 9376 "Func#%d is global and valid. Skipping.\n", 9377 subprog); 9378 clear_caller_saved_regs(env, caller->regs); 9379 9380 /* All global functions return a 64-bit SCALAR_VALUE */ 9381 mark_reg_unknown(env, caller->regs, BPF_REG_0); 9382 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 9383 9384 /* continue with next insn after call */ 9385 return 0; 9386 } 9387 } 9388 9389 /* set_callee_state is used for direct subprog calls, but we are 9390 * interested in validating only BPF helpers that can call subprogs as 9391 * callbacks 9392 */ 9393 if (set_callee_state_cb != set_callee_state) { 9394 env->subprog_info[subprog].is_cb = true; 9395 if (bpf_pseudo_kfunc_call(insn) && 9396 !is_callback_calling_kfunc(insn->imm)) { 9397 verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", 9398 func_id_name(insn->imm), insn->imm); 9399 return -EFAULT; 9400 } else if (!bpf_pseudo_kfunc_call(insn) && 9401 !is_callback_calling_function(insn->imm)) { /* helper */ 9402 verbose(env, "verifier bug: helper %s#%d not marked as callback-calling\n", 9403 func_id_name(insn->imm), insn->imm); 9404 return -EFAULT; 9405 } 9406 } 9407 9408 if (insn->code == (BPF_JMP | BPF_CALL) && 9409 insn->src_reg == 0 && 9410 insn->imm == BPF_FUNC_timer_set_callback) { 9411 struct bpf_verifier_state *async_cb; 9412 9413 /* there is no real recursion here. timer callbacks are async */ 9414 env->subprog_info[subprog].is_async_cb = true; 9415 async_cb = push_async_cb(env, env->subprog_info[subprog].start, 9416 *insn_idx, subprog); 9417 if (!async_cb) 9418 return -EFAULT; 9419 callee = async_cb->frame[0]; 9420 callee->async_entry_cnt = caller->async_entry_cnt + 1; 9421 9422 /* Convert bpf_timer_set_callback() args into timer callback args */ 9423 err = set_callee_state_cb(env, caller, callee, *insn_idx); 9424 if (err) 9425 return err; 9426 9427 clear_caller_saved_regs(env, caller->regs); 9428 mark_reg_unknown(env, caller->regs, BPF_REG_0); 9429 caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 9430 /* continue with next insn after call */ 9431 return 0; 9432 } 9433 9434 callee = kzalloc(sizeof(*callee), GFP_KERNEL); 9435 if (!callee) 9436 return -ENOMEM; 9437 state->frame[state->curframe + 1] = callee; 9438 9439 /* callee cannot access r0, r6 - r9 for reading and has to write 9440 * into its own stack before reading from it. 9441 * callee can read/write into caller's stack 9442 */ 9443 init_func_state(env, callee, 9444 /* remember the callsite, it will be used by bpf_exit */ 9445 *insn_idx /* callsite */, 9446 state->curframe + 1 /* frameno within this callchain */, 9447 subprog /* subprog number within this prog */); 9448 9449 /* Transfer references to the callee */ 9450 err = copy_reference_state(callee, caller); 9451 if (err) 9452 goto err_out; 9453 9454 err = set_callee_state_cb(env, caller, callee, *insn_idx); 9455 if (err) 9456 goto err_out; 9457 9458 clear_caller_saved_regs(env, caller->regs); 9459 9460 /* only increment it after check_reg_arg() finished */ 9461 state->curframe++; 9462 9463 /* and go analyze first insn of the callee */ 9464 *insn_idx = env->subprog_info[subprog].start - 1; 9465 9466 if (env->log.level & BPF_LOG_LEVEL) { 9467 verbose(env, "caller:\n"); 9468 print_verifier_state(env, caller, true); 9469 verbose(env, "callee:\n"); 9470 print_verifier_state(env, callee, true); 9471 } 9472 return 0; 9473 9474 err_out: 9475 free_func_state(callee); 9476 state->frame[state->curframe + 1] = NULL; 9477 return err; 9478 } 9479 9480 int map_set_for_each_callback_args(struct bpf_verifier_env *env, 9481 struct bpf_func_state *caller, 9482 struct bpf_func_state *callee) 9483 { 9484 /* bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, 9485 * void *callback_ctx, u64 flags); 9486 * callback_fn(struct bpf_map *map, void *key, void *value, 9487 * void *callback_ctx); 9488 */ 9489 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 9490 9491 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9492 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9493 callee->regs[BPF_REG_2].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9494 9495 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9496 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9497 callee->regs[BPF_REG_3].map_ptr = caller->regs[BPF_REG_1].map_ptr; 9498 9499 /* pointer to stack or null */ 9500 callee->regs[BPF_REG_4] = caller->regs[BPF_REG_3]; 9501 9502 /* unused */ 9503 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9504 return 0; 9505 } 9506 9507 static int set_callee_state(struct bpf_verifier_env *env, 9508 struct bpf_func_state *caller, 9509 struct bpf_func_state *callee, int insn_idx) 9510 { 9511 int i; 9512 9513 /* copy r1 - r5 args that callee can access. The copy includes parent 9514 * pointers, which connects us up to the liveness chain 9515 */ 9516 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 9517 callee->regs[i] = caller->regs[i]; 9518 return 0; 9519 } 9520 9521 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 9522 int *insn_idx) 9523 { 9524 int subprog, target_insn; 9525 9526 target_insn = *insn_idx + insn->imm + 1; 9527 subprog = find_subprog(env, target_insn); 9528 if (subprog < 0) { 9529 verbose(env, "verifier bug. No program starts at insn %d\n", 9530 target_insn); 9531 return -EFAULT; 9532 } 9533 9534 return __check_func_call(env, insn, insn_idx, subprog, set_callee_state); 9535 } 9536 9537 static int set_map_elem_callback_state(struct bpf_verifier_env *env, 9538 struct bpf_func_state *caller, 9539 struct bpf_func_state *callee, 9540 int insn_idx) 9541 { 9542 struct bpf_insn_aux_data *insn_aux = &env->insn_aux_data[insn_idx]; 9543 struct bpf_map *map; 9544 int err; 9545 9546 if (bpf_map_ptr_poisoned(insn_aux)) { 9547 verbose(env, "tail_call abusing map_ptr\n"); 9548 return -EINVAL; 9549 } 9550 9551 map = BPF_MAP_PTR(insn_aux->map_ptr_state); 9552 if (!map->ops->map_set_for_each_callback_args || 9553 !map->ops->map_for_each_callback) { 9554 verbose(env, "callback function not allowed for map\n"); 9555 return -ENOTSUPP; 9556 } 9557 9558 err = map->ops->map_set_for_each_callback_args(env, caller, callee); 9559 if (err) 9560 return err; 9561 9562 callee->in_callback_fn = true; 9563 callee->callback_ret_range = tnum_range(0, 1); 9564 return 0; 9565 } 9566 9567 static int set_loop_callback_state(struct bpf_verifier_env *env, 9568 struct bpf_func_state *caller, 9569 struct bpf_func_state *callee, 9570 int insn_idx) 9571 { 9572 /* bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, 9573 * u64 flags); 9574 * callback_fn(u32 index, void *callback_ctx); 9575 */ 9576 callee->regs[BPF_REG_1].type = SCALAR_VALUE; 9577 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9578 9579 /* unused */ 9580 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9581 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9582 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9583 9584 callee->in_callback_fn = true; 9585 callee->callback_ret_range = tnum_range(0, 1); 9586 return 0; 9587 } 9588 9589 static int set_timer_callback_state(struct bpf_verifier_env *env, 9590 struct bpf_func_state *caller, 9591 struct bpf_func_state *callee, 9592 int insn_idx) 9593 { 9594 struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; 9595 9596 /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); 9597 * callback_fn(struct bpf_map *map, void *key, void *value); 9598 */ 9599 callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; 9600 __mark_reg_known_zero(&callee->regs[BPF_REG_1]); 9601 callee->regs[BPF_REG_1].map_ptr = map_ptr; 9602 9603 callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; 9604 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9605 callee->regs[BPF_REG_2].map_ptr = map_ptr; 9606 9607 callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; 9608 __mark_reg_known_zero(&callee->regs[BPF_REG_3]); 9609 callee->regs[BPF_REG_3].map_ptr = map_ptr; 9610 9611 /* unused */ 9612 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9613 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9614 callee->in_async_callback_fn = true; 9615 callee->callback_ret_range = tnum_range(0, 1); 9616 return 0; 9617 } 9618 9619 static int set_find_vma_callback_state(struct bpf_verifier_env *env, 9620 struct bpf_func_state *caller, 9621 struct bpf_func_state *callee, 9622 int insn_idx) 9623 { 9624 /* bpf_find_vma(struct task_struct *task, u64 addr, 9625 * void *callback_fn, void *callback_ctx, u64 flags) 9626 * (callback_fn)(struct task_struct *task, 9627 * struct vm_area_struct *vma, void *callback_ctx); 9628 */ 9629 callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1]; 9630 9631 callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID; 9632 __mark_reg_known_zero(&callee->regs[BPF_REG_2]); 9633 callee->regs[BPF_REG_2].btf = btf_vmlinux; 9634 callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA], 9635 9636 /* pointer to stack or null */ 9637 callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4]; 9638 9639 /* unused */ 9640 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9641 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9642 callee->in_callback_fn = true; 9643 callee->callback_ret_range = tnum_range(0, 1); 9644 return 0; 9645 } 9646 9647 static int set_user_ringbuf_callback_state(struct bpf_verifier_env *env, 9648 struct bpf_func_state *caller, 9649 struct bpf_func_state *callee, 9650 int insn_idx) 9651 { 9652 /* bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void 9653 * callback_ctx, u64 flags); 9654 * callback_fn(const struct bpf_dynptr_t* dynptr, void *callback_ctx); 9655 */ 9656 __mark_reg_not_init(env, &callee->regs[BPF_REG_0]); 9657 mark_dynptr_cb_reg(env, &callee->regs[BPF_REG_1], BPF_DYNPTR_TYPE_LOCAL); 9658 callee->regs[BPF_REG_2] = caller->regs[BPF_REG_3]; 9659 9660 /* unused */ 9661 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9662 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9663 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9664 9665 callee->in_callback_fn = true; 9666 callee->callback_ret_range = tnum_range(0, 1); 9667 return 0; 9668 } 9669 9670 static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, 9671 struct bpf_func_state *caller, 9672 struct bpf_func_state *callee, 9673 int insn_idx) 9674 { 9675 /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, 9676 * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)); 9677 * 9678 * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset 9679 * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd 9680 * by this point, so look at 'root' 9681 */ 9682 struct btf_field *field; 9683 9684 field = reg_find_field_offset(&caller->regs[BPF_REG_1], caller->regs[BPF_REG_1].off, 9685 BPF_RB_ROOT); 9686 if (!field || !field->graph_root.value_btf_id) 9687 return -EFAULT; 9688 9689 mark_reg_graph_node(callee->regs, BPF_REG_1, &field->graph_root); 9690 ref_set_non_owning(env, &callee->regs[BPF_REG_1]); 9691 mark_reg_graph_node(callee->regs, BPF_REG_2, &field->graph_root); 9692 ref_set_non_owning(env, &callee->regs[BPF_REG_2]); 9693 9694 __mark_reg_not_init(env, &callee->regs[BPF_REG_3]); 9695 __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); 9696 __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); 9697 callee->in_callback_fn = true; 9698 callee->callback_ret_range = tnum_range(0, 1); 9699 return 0; 9700 } 9701 9702 static bool is_rbtree_lock_required_kfunc(u32 btf_id); 9703 9704 /* Are we currently verifying the callback for a rbtree helper that must 9705 * be called with lock held? If so, no need to complain about unreleased 9706 * lock 9707 */ 9708 static bool in_rbtree_lock_required_cb(struct bpf_verifier_env *env) 9709 { 9710 struct bpf_verifier_state *state = env->cur_state; 9711 struct bpf_insn *insn = env->prog->insnsi; 9712 struct bpf_func_state *callee; 9713 int kfunc_btf_id; 9714 9715 if (!state->curframe) 9716 return false; 9717 9718 callee = state->frame[state->curframe]; 9719 9720 if (!callee->in_callback_fn) 9721 return false; 9722 9723 kfunc_btf_id = insn[callee->callsite].imm; 9724 return is_rbtree_lock_required_kfunc(kfunc_btf_id); 9725 } 9726 9727 static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) 9728 { 9729 struct bpf_verifier_state *state = env->cur_state; 9730 struct bpf_func_state *caller, *callee; 9731 struct bpf_reg_state *r0; 9732 int err; 9733 9734 callee = state->frame[state->curframe]; 9735 r0 = &callee->regs[BPF_REG_0]; 9736 if (r0->type == PTR_TO_STACK) { 9737 /* technically it's ok to return caller's stack pointer 9738 * (or caller's caller's pointer) back to the caller, 9739 * since these pointers are valid. Only current stack 9740 * pointer will be invalid as soon as function exits, 9741 * but let's be conservative 9742 */ 9743 verbose(env, "cannot return stack pointer to the caller\n"); 9744 return -EINVAL; 9745 } 9746 9747 caller = state->frame[state->curframe - 1]; 9748 if (callee->in_callback_fn) { 9749 /* enforce R0 return value range [0, 1]. */ 9750 struct tnum range = callee->callback_ret_range; 9751 9752 if (r0->type != SCALAR_VALUE) { 9753 verbose(env, "R0 not a scalar value\n"); 9754 return -EACCES; 9755 } 9756 if (!tnum_in(range, r0->var_off)) { 9757 verbose_invalid_scalar(env, r0, &range, "callback return", "R0"); 9758 return -EINVAL; 9759 } 9760 } else { 9761 /* return to the caller whatever r0 had in the callee */ 9762 caller->regs[BPF_REG_0] = *r0; 9763 } 9764 9765 /* callback_fn frame should have released its own additions to parent's 9766 * reference state at this point, or check_reference_leak would 9767 * complain, hence it must be the same as the caller. There is no need 9768 * to copy it back. 9769 */ 9770 if (!callee->in_callback_fn) { 9771 /* Transfer references to the caller */ 9772 err = copy_reference_state(caller, callee); 9773 if (err) 9774 return err; 9775 } 9776 9777 *insn_idx = callee->callsite + 1; 9778 if (env->log.level & BPF_LOG_LEVEL) { 9779 verbose(env, "returning from callee:\n"); 9780 print_verifier_state(env, callee, true); 9781 verbose(env, "to caller at %d:\n", *insn_idx); 9782 print_verifier_state(env, caller, true); 9783 } 9784 /* clear everything in the callee. In case of exceptional exits using 9785 * bpf_throw, this will be done by copy_verifier_state for extra frames. */ 9786 free_func_state(callee); 9787 state->frame[state->curframe--] = NULL; 9788 return 0; 9789 } 9790 9791 static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, 9792 int func_id, 9793 struct bpf_call_arg_meta *meta) 9794 { 9795 struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; 9796 9797 if (ret_type != RET_INTEGER) 9798 return; 9799 9800 switch (func_id) { 9801 case BPF_FUNC_get_stack: 9802 case BPF_FUNC_get_task_stack: 9803 case BPF_FUNC_probe_read_str: 9804 case BPF_FUNC_probe_read_kernel_str: 9805 case BPF_FUNC_probe_read_user_str: 9806 ret_reg->smax_value = meta->msize_max_value; 9807 ret_reg->s32_max_value = meta->msize_max_value; 9808 ret_reg->smin_value = -MAX_ERRNO; 9809 ret_reg->s32_min_value = -MAX_ERRNO; 9810 reg_bounds_sync(ret_reg); 9811 break; 9812 case BPF_FUNC_get_smp_processor_id: 9813 ret_reg->umax_value = nr_cpu_ids - 1; 9814 ret_reg->u32_max_value = nr_cpu_ids - 1; 9815 ret_reg->smax_value = nr_cpu_ids - 1; 9816 ret_reg->s32_max_value = nr_cpu_ids - 1; 9817 ret_reg->umin_value = 0; 9818 ret_reg->u32_min_value = 0; 9819 ret_reg->smin_value = 0; 9820 ret_reg->s32_min_value = 0; 9821 reg_bounds_sync(ret_reg); 9822 break; 9823 } 9824 } 9825 9826 static int 9827 record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9828 int func_id, int insn_idx) 9829 { 9830 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9831 struct bpf_map *map = meta->map_ptr; 9832 9833 if (func_id != BPF_FUNC_tail_call && 9834 func_id != BPF_FUNC_map_lookup_elem && 9835 func_id != BPF_FUNC_map_update_elem && 9836 func_id != BPF_FUNC_map_delete_elem && 9837 func_id != BPF_FUNC_map_push_elem && 9838 func_id != BPF_FUNC_map_pop_elem && 9839 func_id != BPF_FUNC_map_peek_elem && 9840 func_id != BPF_FUNC_for_each_map_elem && 9841 func_id != BPF_FUNC_redirect_map && 9842 func_id != BPF_FUNC_map_lookup_percpu_elem) 9843 return 0; 9844 9845 if (map == NULL) { 9846 verbose(env, "kernel subsystem misconfigured verifier\n"); 9847 return -EINVAL; 9848 } 9849 9850 /* In case of read-only, some additional restrictions 9851 * need to be applied in order to prevent altering the 9852 * state of the map from program side. 9853 */ 9854 if ((map->map_flags & BPF_F_RDONLY_PROG) && 9855 (func_id == BPF_FUNC_map_delete_elem || 9856 func_id == BPF_FUNC_map_update_elem || 9857 func_id == BPF_FUNC_map_push_elem || 9858 func_id == BPF_FUNC_map_pop_elem)) { 9859 verbose(env, "write into map forbidden\n"); 9860 return -EACCES; 9861 } 9862 9863 if (!BPF_MAP_PTR(aux->map_ptr_state)) 9864 bpf_map_ptr_store(aux, meta->map_ptr, 9865 !meta->map_ptr->bypass_spec_v1); 9866 else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) 9867 bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, 9868 !meta->map_ptr->bypass_spec_v1); 9869 return 0; 9870 } 9871 9872 static int 9873 record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, 9874 int func_id, int insn_idx) 9875 { 9876 struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; 9877 struct bpf_reg_state *regs = cur_regs(env), *reg; 9878 struct bpf_map *map = meta->map_ptr; 9879 u64 val, max; 9880 int err; 9881 9882 if (func_id != BPF_FUNC_tail_call) 9883 return 0; 9884 if (!map || map->map_type != BPF_MAP_TYPE_PROG_ARRAY) { 9885 verbose(env, "kernel subsystem misconfigured verifier\n"); 9886 return -EINVAL; 9887 } 9888 9889 reg = ®s[BPF_REG_3]; 9890 val = reg->var_off.value; 9891 max = map->max_entries; 9892 9893 if (!(register_is_const(reg) && val < max)) { 9894 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9895 return 0; 9896 } 9897 9898 err = mark_chain_precision(env, BPF_REG_3); 9899 if (err) 9900 return err; 9901 if (bpf_map_key_unseen(aux)) 9902 bpf_map_key_store(aux, val); 9903 else if (!bpf_map_key_poisoned(aux) && 9904 bpf_map_key_immediate(aux) != val) 9905 bpf_map_key_store(aux, BPF_MAP_KEY_POISON); 9906 return 0; 9907 } 9908 9909 static int check_reference_leak(struct bpf_verifier_env *env, bool exception_exit) 9910 { 9911 struct bpf_func_state *state = cur_func(env); 9912 bool refs_lingering = false; 9913 int i; 9914 9915 if (!exception_exit && state->frameno && !state->in_callback_fn) 9916 return 0; 9917 9918 for (i = 0; i < state->acquired_refs; i++) { 9919 if (!exception_exit && state->in_callback_fn && state->refs[i].callback_ref != state->frameno) 9920 continue; 9921 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", 9922 state->refs[i].id, state->refs[i].insn_idx); 9923 refs_lingering = true; 9924 } 9925 return refs_lingering ? -EINVAL : 0; 9926 } 9927 9928 static int check_bpf_snprintf_call(struct bpf_verifier_env *env, 9929 struct bpf_reg_state *regs) 9930 { 9931 struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; 9932 struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; 9933 struct bpf_map *fmt_map = fmt_reg->map_ptr; 9934 struct bpf_bprintf_data data = {}; 9935 int err, fmt_map_off, num_args; 9936 u64 fmt_addr; 9937 char *fmt; 9938 9939 /* data must be an array of u64 */ 9940 if (data_len_reg->var_off.value % 8) 9941 return -EINVAL; 9942 num_args = data_len_reg->var_off.value / 8; 9943 9944 /* fmt being ARG_PTR_TO_CONST_STR guarantees that var_off is const 9945 * and map_direct_value_addr is set. 9946 */ 9947 fmt_map_off = fmt_reg->off + fmt_reg->var_off.value; 9948 err = fmt_map->ops->map_direct_value_addr(fmt_map, &fmt_addr, 9949 fmt_map_off); 9950 if (err) { 9951 verbose(env, "verifier bug\n"); 9952 return -EFAULT; 9953 } 9954 fmt = (char *)(long)fmt_addr + fmt_map_off; 9955 9956 /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we 9957 * can focus on validating the format specifiers. 9958 */ 9959 err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); 9960 if (err < 0) 9961 verbose(env, "Invalid format string\n"); 9962 9963 return err; 9964 } 9965 9966 static int check_get_func_ip(struct bpf_verifier_env *env) 9967 { 9968 enum bpf_prog_type type = resolve_prog_type(env->prog); 9969 int func_id = BPF_FUNC_get_func_ip; 9970 9971 if (type == BPF_PROG_TYPE_TRACING) { 9972 if (!bpf_prog_has_trampoline(env->prog)) { 9973 verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", 9974 func_id_name(func_id), func_id); 9975 return -ENOTSUPP; 9976 } 9977 return 0; 9978 } else if (type == BPF_PROG_TYPE_KPROBE) { 9979 return 0; 9980 } 9981 9982 verbose(env, "func %s#%d not supported for program type %d\n", 9983 func_id_name(func_id), func_id, type); 9984 return -ENOTSUPP; 9985 } 9986 9987 static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) 9988 { 9989 return &env->insn_aux_data[env->insn_idx]; 9990 } 9991 9992 static bool loop_flag_is_zero(struct bpf_verifier_env *env) 9993 { 9994 struct bpf_reg_state *regs = cur_regs(env); 9995 struct bpf_reg_state *reg = ®s[BPF_REG_4]; 9996 bool reg_is_null = register_is_null(reg); 9997 9998 if (reg_is_null) 9999 mark_chain_precision(env, BPF_REG_4); 10000 10001 return reg_is_null; 10002 } 10003 10004 static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno) 10005 { 10006 struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state; 10007 10008 if (!state->initialized) { 10009 state->initialized = 1; 10010 state->fit_for_inline = loop_flag_is_zero(env); 10011 state->callback_subprogno = subprogno; 10012 return; 10013 } 10014 10015 if (!state->fit_for_inline) 10016 return; 10017 10018 state->fit_for_inline = (loop_flag_is_zero(env) && 10019 state->callback_subprogno == subprogno); 10020 } 10021 10022 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 10023 int *insn_idx_p) 10024 { 10025 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 10026 bool returns_cpu_specific_alloc_ptr = false; 10027 const struct bpf_func_proto *fn = NULL; 10028 enum bpf_return_type ret_type; 10029 enum bpf_type_flag ret_flag; 10030 struct bpf_reg_state *regs; 10031 struct bpf_call_arg_meta meta; 10032 int insn_idx = *insn_idx_p; 10033 bool changes_data; 10034 int i, err, func_id; 10035 10036 /* find function prototype */ 10037 func_id = insn->imm; 10038 if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { 10039 verbose(env, "invalid func %s#%d\n", func_id_name(func_id), 10040 func_id); 10041 return -EINVAL; 10042 } 10043 10044 if (env->ops->get_func_proto) 10045 fn = env->ops->get_func_proto(func_id, env->prog); 10046 if (!fn) { 10047 verbose(env, "unknown func %s#%d\n", func_id_name(func_id), 10048 func_id); 10049 return -EINVAL; 10050 } 10051 10052 /* eBPF programs must be GPL compatible to use GPL-ed functions */ 10053 if (!env->prog->gpl_compatible && fn->gpl_only) { 10054 verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); 10055 return -EINVAL; 10056 } 10057 10058 if (fn->allowed && !fn->allowed(env->prog)) { 10059 verbose(env, "helper call is not allowed in probe\n"); 10060 return -EINVAL; 10061 } 10062 10063 if (!env->prog->aux->sleepable && fn->might_sleep) { 10064 verbose(env, "helper call might sleep in a non-sleepable prog\n"); 10065 return -EINVAL; 10066 } 10067 10068 /* With LD_ABS/IND some JITs save/restore skb from r1. */ 10069 changes_data = bpf_helper_changes_pkt_data(fn->func); 10070 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { 10071 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", 10072 func_id_name(func_id), func_id); 10073 return -EINVAL; 10074 } 10075 10076 memset(&meta, 0, sizeof(meta)); 10077 meta.pkt_access = fn->pkt_access; 10078 10079 err = check_func_proto(fn, func_id); 10080 if (err) { 10081 verbose(env, "kernel subsystem misconfigured func %s#%d\n", 10082 func_id_name(func_id), func_id); 10083 return err; 10084 } 10085 10086 if (env->cur_state->active_rcu_lock) { 10087 if (fn->might_sleep) { 10088 verbose(env, "sleepable helper %s#%d in rcu_read_lock region\n", 10089 func_id_name(func_id), func_id); 10090 return -EINVAL; 10091 } 10092 10093 if (env->prog->aux->sleepable && is_storage_get_function(func_id)) 10094 env->insn_aux_data[insn_idx].storage_get_func_atomic = true; 10095 } 10096 10097 meta.func_id = func_id; 10098 /* check args */ 10099 for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { 10100 err = check_func_arg(env, i, &meta, fn, insn_idx); 10101 if (err) 10102 return err; 10103 } 10104 10105 err = record_func_map(env, &meta, func_id, insn_idx); 10106 if (err) 10107 return err; 10108 10109 err = record_func_key(env, &meta, func_id, insn_idx); 10110 if (err) 10111 return err; 10112 10113 /* Mark slots with STACK_MISC in case of raw mode, stack offset 10114 * is inferred from register state. 10115 */ 10116 for (i = 0; i < meta.access_size; i++) { 10117 err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, 10118 BPF_WRITE, -1, false, false); 10119 if (err) 10120 return err; 10121 } 10122 10123 regs = cur_regs(env); 10124 10125 if (meta.release_regno) { 10126 err = -EINVAL; 10127 /* This can only be set for PTR_TO_STACK, as CONST_PTR_TO_DYNPTR cannot 10128 * be released by any dynptr helper. Hence, unmark_stack_slots_dynptr 10129 * is safe to do directly. 10130 */ 10131 if (arg_type_is_dynptr(fn->arg_type[meta.release_regno - BPF_REG_1])) { 10132 if (regs[meta.release_regno].type == CONST_PTR_TO_DYNPTR) { 10133 verbose(env, "verifier internal error: CONST_PTR_TO_DYNPTR cannot be released\n"); 10134 return -EFAULT; 10135 } 10136 err = unmark_stack_slots_dynptr(env, ®s[meta.release_regno]); 10137 } else if (func_id == BPF_FUNC_kptr_xchg && meta.ref_obj_id) { 10138 u32 ref_obj_id = meta.ref_obj_id; 10139 bool in_rcu = in_rcu_cs(env); 10140 struct bpf_func_state *state; 10141 struct bpf_reg_state *reg; 10142 10143 err = release_reference_state(cur_func(env), ref_obj_id); 10144 if (!err) { 10145 bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ 10146 if (reg->ref_obj_id == ref_obj_id) { 10147 if (in_rcu && (reg->type & MEM_ALLOC) && (reg->type & MEM_PERCPU)) { 10148 reg->ref_obj_id = 0; 10149 reg->type &= ~MEM_ALLOC; 10150 reg->type |= MEM_RCU; 10151 } else { 10152 mark_reg_invalid(env, reg); 10153 } 10154 } 10155 })); 10156 } 10157 } else if (meta.ref_obj_id) { 10158 err = release_reference(env, meta.ref_obj_id); 10159 } else if (register_is_null(®s[meta.release_regno])) { 10160 /* meta.ref_obj_id can only be 0 if register that is meant to be 10161 * released is NULL, which must be > R0. 10162 */ 10163 err = 0; 10164 } 10165 if (err) { 10166 verbose(env, "func %s#%d reference has not been acquired before\n", 10167 func_id_name(func_id), func_id); 10168 return err; 10169 } 10170 } 10171 10172 switch (func_id) { 10173 case BPF_FUNC_tail_call: 10174 err = check_reference_leak(env, false); 10175 if (err) { 10176 verbose(env, "tail_call would lead to reference leak\n"); 10177 return err; 10178 } 10179 break; 10180 case BPF_FUNC_get_local_storage: 10181 /* check that flags argument in get_local_storage(map, flags) is 0, 10182 * this is required because get_local_storage() can't return an error. 10183 */ 10184 if (!register_is_null(®s[BPF_REG_2])) { 10185 verbose(env, "get_local_storage() doesn't support non-zero flags\n"); 10186 return -EINVAL; 10187 } 10188 break; 10189 case BPF_FUNC_for_each_map_elem: 10190 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 10191 set_map_elem_callback_state); 10192 break; 10193 case BPF_FUNC_timer_set_callback: 10194 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 10195 set_timer_callback_state); 10196 break; 10197 case BPF_FUNC_find_vma: 10198 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 10199 set_find_vma_callback_state); 10200 break; 10201 case BPF_FUNC_snprintf: 10202 err = check_bpf_snprintf_call(env, regs); 10203 break; 10204 case BPF_FUNC_loop: 10205 update_loop_inline_state(env, meta.subprogno); 10206 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 10207 set_loop_callback_state); 10208 break; 10209 case BPF_FUNC_dynptr_from_mem: 10210 if (regs[BPF_REG_1].type != PTR_TO_MAP_VALUE) { 10211 verbose(env, "Unsupported reg type %s for bpf_dynptr_from_mem data\n", 10212 reg_type_str(env, regs[BPF_REG_1].type)); 10213 return -EACCES; 10214 } 10215 break; 10216 case BPF_FUNC_set_retval: 10217 if (prog_type == BPF_PROG_TYPE_LSM && 10218 env->prog->expected_attach_type == BPF_LSM_CGROUP) { 10219 if (!env->prog->aux->attach_func_proto->type) { 10220 /* Make sure programs that attach to void 10221 * hooks don't try to modify return value. 10222 */ 10223 verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 10224 return -EINVAL; 10225 } 10226 } 10227 break; 10228 case BPF_FUNC_dynptr_data: 10229 { 10230 struct bpf_reg_state *reg; 10231 int id, ref_obj_id; 10232 10233 reg = get_dynptr_arg_reg(env, fn, regs); 10234 if (!reg) 10235 return -EFAULT; 10236 10237 10238 if (meta.dynptr_id) { 10239 verbose(env, "verifier internal error: meta.dynptr_id already set\n"); 10240 return -EFAULT; 10241 } 10242 if (meta.ref_obj_id) { 10243 verbose(env, "verifier internal error: meta.ref_obj_id already set\n"); 10244 return -EFAULT; 10245 } 10246 10247 id = dynptr_id(env, reg); 10248 if (id < 0) { 10249 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 10250 return id; 10251 } 10252 10253 ref_obj_id = dynptr_ref_obj_id(env, reg); 10254 if (ref_obj_id < 0) { 10255 verbose(env, "verifier internal error: failed to obtain dynptr ref_obj_id\n"); 10256 return ref_obj_id; 10257 } 10258 10259 meta.dynptr_id = id; 10260 meta.ref_obj_id = ref_obj_id; 10261 10262 break; 10263 } 10264 case BPF_FUNC_dynptr_write: 10265 { 10266 enum bpf_dynptr_type dynptr_type; 10267 struct bpf_reg_state *reg; 10268 10269 reg = get_dynptr_arg_reg(env, fn, regs); 10270 if (!reg) 10271 return -EFAULT; 10272 10273 dynptr_type = dynptr_get_type(env, reg); 10274 if (dynptr_type == BPF_DYNPTR_TYPE_INVALID) 10275 return -EFAULT; 10276 10277 if (dynptr_type == BPF_DYNPTR_TYPE_SKB) 10278 /* this will trigger clear_all_pkt_pointers(), which will 10279 * invalidate all dynptr slices associated with the skb 10280 */ 10281 changes_data = true; 10282 10283 break; 10284 } 10285 case BPF_FUNC_per_cpu_ptr: 10286 case BPF_FUNC_this_cpu_ptr: 10287 { 10288 struct bpf_reg_state *reg = ®s[BPF_REG_1]; 10289 const struct btf_type *type; 10290 10291 if (reg->type & MEM_RCU) { 10292 type = btf_type_by_id(reg->btf, reg->btf_id); 10293 if (!type || !btf_type_is_struct(type)) { 10294 verbose(env, "Helper has invalid btf/btf_id in R1\n"); 10295 return -EFAULT; 10296 } 10297 returns_cpu_specific_alloc_ptr = true; 10298 env->insn_aux_data[insn_idx].call_with_percpu_alloc_ptr = true; 10299 } 10300 break; 10301 } 10302 case BPF_FUNC_user_ringbuf_drain: 10303 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 10304 set_user_ringbuf_callback_state); 10305 break; 10306 } 10307 10308 if (err) 10309 return err; 10310 10311 /* reset caller saved regs */ 10312 for (i = 0; i < CALLER_SAVED_REGS; i++) { 10313 mark_reg_not_init(env, regs, caller_saved[i]); 10314 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 10315 } 10316 10317 /* helper call returns 64-bit value. */ 10318 regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; 10319 10320 /* update return register (already marked as written above) */ 10321 ret_type = fn->ret_type; 10322 ret_flag = type_flag(ret_type); 10323 10324 switch (base_type(ret_type)) { 10325 case RET_INTEGER: 10326 /* sets type to SCALAR_VALUE */ 10327 mark_reg_unknown(env, regs, BPF_REG_0); 10328 break; 10329 case RET_VOID: 10330 regs[BPF_REG_0].type = NOT_INIT; 10331 break; 10332 case RET_PTR_TO_MAP_VALUE: 10333 /* There is no offset yet applied, variable or fixed */ 10334 mark_reg_known_zero(env, regs, BPF_REG_0); 10335 /* remember map_ptr, so that check_map_access() 10336 * can check 'value_size' boundary of memory access 10337 * to map element returned from bpf_map_lookup_elem() 10338 */ 10339 if (meta.map_ptr == NULL) { 10340 verbose(env, 10341 "kernel subsystem misconfigured verifier\n"); 10342 return -EINVAL; 10343 } 10344 regs[BPF_REG_0].map_ptr = meta.map_ptr; 10345 regs[BPF_REG_0].map_uid = meta.map_uid; 10346 regs[BPF_REG_0].type = PTR_TO_MAP_VALUE | ret_flag; 10347 if (!type_may_be_null(ret_type) && 10348 btf_record_has_field(meta.map_ptr->record, BPF_SPIN_LOCK)) { 10349 regs[BPF_REG_0].id = ++env->id_gen; 10350 } 10351 break; 10352 case RET_PTR_TO_SOCKET: 10353 mark_reg_known_zero(env, regs, BPF_REG_0); 10354 regs[BPF_REG_0].type = PTR_TO_SOCKET | ret_flag; 10355 break; 10356 case RET_PTR_TO_SOCK_COMMON: 10357 mark_reg_known_zero(env, regs, BPF_REG_0); 10358 regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON | ret_flag; 10359 break; 10360 case RET_PTR_TO_TCP_SOCK: 10361 mark_reg_known_zero(env, regs, BPF_REG_0); 10362 regs[BPF_REG_0].type = PTR_TO_TCP_SOCK | ret_flag; 10363 break; 10364 case RET_PTR_TO_MEM: 10365 mark_reg_known_zero(env, regs, BPF_REG_0); 10366 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 10367 regs[BPF_REG_0].mem_size = meta.mem_size; 10368 break; 10369 case RET_PTR_TO_MEM_OR_BTF_ID: 10370 { 10371 const struct btf_type *t; 10372 10373 mark_reg_known_zero(env, regs, BPF_REG_0); 10374 t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL); 10375 if (!btf_type_is_struct(t)) { 10376 u32 tsize; 10377 const struct btf_type *ret; 10378 const char *tname; 10379 10380 /* resolve the type size of ksym. */ 10381 ret = btf_resolve_size(meta.ret_btf, t, &tsize); 10382 if (IS_ERR(ret)) { 10383 tname = btf_name_by_offset(meta.ret_btf, t->name_off); 10384 verbose(env, "unable to resolve the size of type '%s': %ld\n", 10385 tname, PTR_ERR(ret)); 10386 return -EINVAL; 10387 } 10388 regs[BPF_REG_0].type = PTR_TO_MEM | ret_flag; 10389 regs[BPF_REG_0].mem_size = tsize; 10390 } else { 10391 if (returns_cpu_specific_alloc_ptr) { 10392 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC | MEM_RCU; 10393 } else { 10394 /* MEM_RDONLY may be carried from ret_flag, but it 10395 * doesn't apply on PTR_TO_BTF_ID. Fold it, otherwise 10396 * it will confuse the check of PTR_TO_BTF_ID in 10397 * check_mem_access(). 10398 */ 10399 ret_flag &= ~MEM_RDONLY; 10400 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 10401 } 10402 10403 regs[BPF_REG_0].btf = meta.ret_btf; 10404 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 10405 } 10406 break; 10407 } 10408 case RET_PTR_TO_BTF_ID: 10409 { 10410 struct btf *ret_btf; 10411 int ret_btf_id; 10412 10413 mark_reg_known_zero(env, regs, BPF_REG_0); 10414 regs[BPF_REG_0].type = PTR_TO_BTF_ID | ret_flag; 10415 if (func_id == BPF_FUNC_kptr_xchg) { 10416 ret_btf = meta.kptr_field->kptr.btf; 10417 ret_btf_id = meta.kptr_field->kptr.btf_id; 10418 if (!btf_is_kernel(ret_btf)) { 10419 regs[BPF_REG_0].type |= MEM_ALLOC; 10420 if (meta.kptr_field->type == BPF_KPTR_PERCPU) 10421 regs[BPF_REG_0].type |= MEM_PERCPU; 10422 } 10423 } else { 10424 if (fn->ret_btf_id == BPF_PTR_POISON) { 10425 verbose(env, "verifier internal error:"); 10426 verbose(env, "func %s has non-overwritten BPF_PTR_POISON return type\n", 10427 func_id_name(func_id)); 10428 return -EINVAL; 10429 } 10430 ret_btf = btf_vmlinux; 10431 ret_btf_id = *fn->ret_btf_id; 10432 } 10433 if (ret_btf_id == 0) { 10434 verbose(env, "invalid return type %u of func %s#%d\n", 10435 base_type(ret_type), func_id_name(func_id), 10436 func_id); 10437 return -EINVAL; 10438 } 10439 regs[BPF_REG_0].btf = ret_btf; 10440 regs[BPF_REG_0].btf_id = ret_btf_id; 10441 break; 10442 } 10443 default: 10444 verbose(env, "unknown return type %u of func %s#%d\n", 10445 base_type(ret_type), func_id_name(func_id), func_id); 10446 return -EINVAL; 10447 } 10448 10449 if (type_may_be_null(regs[BPF_REG_0].type)) 10450 regs[BPF_REG_0].id = ++env->id_gen; 10451 10452 if (helper_multiple_ref_obj_use(func_id, meta.map_ptr)) { 10453 verbose(env, "verifier internal error: func %s#%d sets ref_obj_id more than once\n", 10454 func_id_name(func_id), func_id); 10455 return -EFAULT; 10456 } 10457 10458 if (is_dynptr_ref_function(func_id)) 10459 regs[BPF_REG_0].dynptr_id = meta.dynptr_id; 10460 10461 if (is_ptr_cast_function(func_id) || is_dynptr_ref_function(func_id)) { 10462 /* For release_reference() */ 10463 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 10464 } else if (is_acquire_function(func_id, meta.map_ptr)) { 10465 int id = acquire_reference_state(env, insn_idx); 10466 10467 if (id < 0) 10468 return id; 10469 /* For mark_ptr_or_null_reg() */ 10470 regs[BPF_REG_0].id = id; 10471 /* For release_reference() */ 10472 regs[BPF_REG_0].ref_obj_id = id; 10473 } 10474 10475 do_refine_retval_range(regs, fn->ret_type, func_id, &meta); 10476 10477 err = check_map_func_compatibility(env, meta.map_ptr, func_id); 10478 if (err) 10479 return err; 10480 10481 if ((func_id == BPF_FUNC_get_stack || 10482 func_id == BPF_FUNC_get_task_stack) && 10483 !env->prog->has_callchain_buf) { 10484 const char *err_str; 10485 10486 #ifdef CONFIG_PERF_EVENTS 10487 err = get_callchain_buffers(sysctl_perf_event_max_stack); 10488 err_str = "cannot get callchain buffer for func %s#%d\n"; 10489 #else 10490 err = -ENOTSUPP; 10491 err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; 10492 #endif 10493 if (err) { 10494 verbose(env, err_str, func_id_name(func_id), func_id); 10495 return err; 10496 } 10497 10498 env->prog->has_callchain_buf = true; 10499 } 10500 10501 if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) 10502 env->prog->call_get_stack = true; 10503 10504 if (func_id == BPF_FUNC_get_func_ip) { 10505 if (check_get_func_ip(env)) 10506 return -ENOTSUPP; 10507 env->prog->call_get_func_ip = true; 10508 } 10509 10510 if (changes_data) 10511 clear_all_pkt_pointers(env); 10512 return 0; 10513 } 10514 10515 /* mark_btf_func_reg_size() is used when the reg size is determined by 10516 * the BTF func_proto's return value size and argument. 10517 */ 10518 static void mark_btf_func_reg_size(struct bpf_verifier_env *env, u32 regno, 10519 size_t reg_size) 10520 { 10521 struct bpf_reg_state *reg = &cur_regs(env)[regno]; 10522 10523 if (regno == BPF_REG_0) { 10524 /* Function return value */ 10525 reg->live |= REG_LIVE_WRITTEN; 10526 reg->subreg_def = reg_size == sizeof(u64) ? 10527 DEF_NOT_SUBREG : env->insn_idx + 1; 10528 } else { 10529 /* Function argument */ 10530 if (reg_size == sizeof(u64)) { 10531 mark_insn_zext(env, reg); 10532 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); 10533 } else { 10534 mark_reg_read(env, reg, reg->parent, REG_LIVE_READ32); 10535 } 10536 } 10537 } 10538 10539 static bool is_kfunc_acquire(struct bpf_kfunc_call_arg_meta *meta) 10540 { 10541 return meta->kfunc_flags & KF_ACQUIRE; 10542 } 10543 10544 static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) 10545 { 10546 return meta->kfunc_flags & KF_RELEASE; 10547 } 10548 10549 static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) 10550 { 10551 return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); 10552 } 10553 10554 static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) 10555 { 10556 return meta->kfunc_flags & KF_SLEEPABLE; 10557 } 10558 10559 static bool is_kfunc_destructive(struct bpf_kfunc_call_arg_meta *meta) 10560 { 10561 return meta->kfunc_flags & KF_DESTRUCTIVE; 10562 } 10563 10564 static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) 10565 { 10566 return meta->kfunc_flags & KF_RCU; 10567 } 10568 10569 static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta) 10570 { 10571 return meta->kfunc_flags & KF_RCU_PROTECTED; 10572 } 10573 10574 static bool __kfunc_param_match_suffix(const struct btf *btf, 10575 const struct btf_param *arg, 10576 const char *suffix) 10577 { 10578 int suffix_len = strlen(suffix), len; 10579 const char *param_name; 10580 10581 /* In the future, this can be ported to use BTF tagging */ 10582 param_name = btf_name_by_offset(btf, arg->name_off); 10583 if (str_is_empty(param_name)) 10584 return false; 10585 len = strlen(param_name); 10586 if (len < suffix_len) 10587 return false; 10588 param_name += len - suffix_len; 10589 return !strncmp(param_name, suffix, suffix_len); 10590 } 10591 10592 static bool is_kfunc_arg_mem_size(const struct btf *btf, 10593 const struct btf_param *arg, 10594 const struct bpf_reg_state *reg) 10595 { 10596 const struct btf_type *t; 10597 10598 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10599 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10600 return false; 10601 10602 return __kfunc_param_match_suffix(btf, arg, "__sz"); 10603 } 10604 10605 static bool is_kfunc_arg_const_mem_size(const struct btf *btf, 10606 const struct btf_param *arg, 10607 const struct bpf_reg_state *reg) 10608 { 10609 const struct btf_type *t; 10610 10611 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10612 if (!btf_type_is_scalar(t) || reg->type != SCALAR_VALUE) 10613 return false; 10614 10615 return __kfunc_param_match_suffix(btf, arg, "__szk"); 10616 } 10617 10618 static bool is_kfunc_arg_optional(const struct btf *btf, const struct btf_param *arg) 10619 { 10620 return __kfunc_param_match_suffix(btf, arg, "__opt"); 10621 } 10622 10623 static bool is_kfunc_arg_constant(const struct btf *btf, const struct btf_param *arg) 10624 { 10625 return __kfunc_param_match_suffix(btf, arg, "__k"); 10626 } 10627 10628 static bool is_kfunc_arg_ignore(const struct btf *btf, const struct btf_param *arg) 10629 { 10630 return __kfunc_param_match_suffix(btf, arg, "__ign"); 10631 } 10632 10633 static bool is_kfunc_arg_alloc_obj(const struct btf *btf, const struct btf_param *arg) 10634 { 10635 return __kfunc_param_match_suffix(btf, arg, "__alloc"); 10636 } 10637 10638 static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *arg) 10639 { 10640 return __kfunc_param_match_suffix(btf, arg, "__uninit"); 10641 } 10642 10643 static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) 10644 { 10645 return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); 10646 } 10647 10648 static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg) 10649 { 10650 return __kfunc_param_match_suffix(btf, arg, "__nullable"); 10651 } 10652 10653 static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, 10654 const struct btf_param *arg, 10655 const char *name) 10656 { 10657 int len, target_len = strlen(name); 10658 const char *param_name; 10659 10660 param_name = btf_name_by_offset(btf, arg->name_off); 10661 if (str_is_empty(param_name)) 10662 return false; 10663 len = strlen(param_name); 10664 if (len != target_len) 10665 return false; 10666 if (strcmp(param_name, name)) 10667 return false; 10668 10669 return true; 10670 } 10671 10672 enum { 10673 KF_ARG_DYNPTR_ID, 10674 KF_ARG_LIST_HEAD_ID, 10675 KF_ARG_LIST_NODE_ID, 10676 KF_ARG_RB_ROOT_ID, 10677 KF_ARG_RB_NODE_ID, 10678 }; 10679 10680 BTF_ID_LIST(kf_arg_btf_ids) 10681 BTF_ID(struct, bpf_dynptr_kern) 10682 BTF_ID(struct, bpf_list_head) 10683 BTF_ID(struct, bpf_list_node) 10684 BTF_ID(struct, bpf_rb_root) 10685 BTF_ID(struct, bpf_rb_node) 10686 10687 static bool __is_kfunc_ptr_arg_type(const struct btf *btf, 10688 const struct btf_param *arg, int type) 10689 { 10690 const struct btf_type *t; 10691 u32 res_id; 10692 10693 t = btf_type_skip_modifiers(btf, arg->type, NULL); 10694 if (!t) 10695 return false; 10696 if (!btf_type_is_ptr(t)) 10697 return false; 10698 t = btf_type_skip_modifiers(btf, t->type, &res_id); 10699 if (!t) 10700 return false; 10701 return btf_types_are_same(btf, res_id, btf_vmlinux, kf_arg_btf_ids[type]); 10702 } 10703 10704 static bool is_kfunc_arg_dynptr(const struct btf *btf, const struct btf_param *arg) 10705 { 10706 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_DYNPTR_ID); 10707 } 10708 10709 static bool is_kfunc_arg_list_head(const struct btf *btf, const struct btf_param *arg) 10710 { 10711 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_HEAD_ID); 10712 } 10713 10714 static bool is_kfunc_arg_list_node(const struct btf *btf, const struct btf_param *arg) 10715 { 10716 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_LIST_NODE_ID); 10717 } 10718 10719 static bool is_kfunc_arg_rbtree_root(const struct btf *btf, const struct btf_param *arg) 10720 { 10721 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_ROOT_ID); 10722 } 10723 10724 static bool is_kfunc_arg_rbtree_node(const struct btf *btf, const struct btf_param *arg) 10725 { 10726 return __is_kfunc_ptr_arg_type(btf, arg, KF_ARG_RB_NODE_ID); 10727 } 10728 10729 static bool is_kfunc_arg_callback(struct bpf_verifier_env *env, const struct btf *btf, 10730 const struct btf_param *arg) 10731 { 10732 const struct btf_type *t; 10733 10734 t = btf_type_resolve_func_ptr(btf, arg->type, NULL); 10735 if (!t) 10736 return false; 10737 10738 return true; 10739 } 10740 10741 /* Returns true if struct is composed of scalars, 4 levels of nesting allowed */ 10742 static bool __btf_type_is_scalar_struct(struct bpf_verifier_env *env, 10743 const struct btf *btf, 10744 const struct btf_type *t, int rec) 10745 { 10746 const struct btf_type *member_type; 10747 const struct btf_member *member; 10748 u32 i; 10749 10750 if (!btf_type_is_struct(t)) 10751 return false; 10752 10753 for_each_member(i, t, member) { 10754 const struct btf_array *array; 10755 10756 member_type = btf_type_skip_modifiers(btf, member->type, NULL); 10757 if (btf_type_is_struct(member_type)) { 10758 if (rec >= 3) { 10759 verbose(env, "max struct nesting depth exceeded\n"); 10760 return false; 10761 } 10762 if (!__btf_type_is_scalar_struct(env, btf, member_type, rec + 1)) 10763 return false; 10764 continue; 10765 } 10766 if (btf_type_is_array(member_type)) { 10767 array = btf_array(member_type); 10768 if (!array->nelems) 10769 return false; 10770 member_type = btf_type_skip_modifiers(btf, array->type, NULL); 10771 if (!btf_type_is_scalar(member_type)) 10772 return false; 10773 continue; 10774 } 10775 if (!btf_type_is_scalar(member_type)) 10776 return false; 10777 } 10778 return true; 10779 } 10780 10781 enum kfunc_ptr_arg_type { 10782 KF_ARG_PTR_TO_CTX, 10783 KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ 10784 KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */ 10785 KF_ARG_PTR_TO_DYNPTR, 10786 KF_ARG_PTR_TO_ITER, 10787 KF_ARG_PTR_TO_LIST_HEAD, 10788 KF_ARG_PTR_TO_LIST_NODE, 10789 KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ 10790 KF_ARG_PTR_TO_MEM, 10791 KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ 10792 KF_ARG_PTR_TO_CALLBACK, 10793 KF_ARG_PTR_TO_RB_ROOT, 10794 KF_ARG_PTR_TO_RB_NODE, 10795 KF_ARG_PTR_TO_NULL, 10796 }; 10797 10798 enum special_kfunc_type { 10799 KF_bpf_obj_new_impl, 10800 KF_bpf_obj_drop_impl, 10801 KF_bpf_refcount_acquire_impl, 10802 KF_bpf_list_push_front_impl, 10803 KF_bpf_list_push_back_impl, 10804 KF_bpf_list_pop_front, 10805 KF_bpf_list_pop_back, 10806 KF_bpf_cast_to_kern_ctx, 10807 KF_bpf_rdonly_cast, 10808 KF_bpf_rcu_read_lock, 10809 KF_bpf_rcu_read_unlock, 10810 KF_bpf_rbtree_remove, 10811 KF_bpf_rbtree_add_impl, 10812 KF_bpf_rbtree_first, 10813 KF_bpf_dynptr_from_skb, 10814 KF_bpf_dynptr_from_xdp, 10815 KF_bpf_dynptr_slice, 10816 KF_bpf_dynptr_slice_rdwr, 10817 KF_bpf_dynptr_clone, 10818 KF_bpf_percpu_obj_new_impl, 10819 KF_bpf_percpu_obj_drop_impl, 10820 KF_bpf_throw, 10821 KF_bpf_iter_css_task_new, 10822 }; 10823 10824 BTF_SET_START(special_kfunc_set) 10825 BTF_ID(func, bpf_obj_new_impl) 10826 BTF_ID(func, bpf_obj_drop_impl) 10827 BTF_ID(func, bpf_refcount_acquire_impl) 10828 BTF_ID(func, bpf_list_push_front_impl) 10829 BTF_ID(func, bpf_list_push_back_impl) 10830 BTF_ID(func, bpf_list_pop_front) 10831 BTF_ID(func, bpf_list_pop_back) 10832 BTF_ID(func, bpf_cast_to_kern_ctx) 10833 BTF_ID(func, bpf_rdonly_cast) 10834 BTF_ID(func, bpf_rbtree_remove) 10835 BTF_ID(func, bpf_rbtree_add_impl) 10836 BTF_ID(func, bpf_rbtree_first) 10837 BTF_ID(func, bpf_dynptr_from_skb) 10838 BTF_ID(func, bpf_dynptr_from_xdp) 10839 BTF_ID(func, bpf_dynptr_slice) 10840 BTF_ID(func, bpf_dynptr_slice_rdwr) 10841 BTF_ID(func, bpf_dynptr_clone) 10842 BTF_ID(func, bpf_percpu_obj_new_impl) 10843 BTF_ID(func, bpf_percpu_obj_drop_impl) 10844 BTF_ID(func, bpf_throw) 10845 #ifdef CONFIG_CGROUPS 10846 BTF_ID(func, bpf_iter_css_task_new) 10847 #endif 10848 BTF_SET_END(special_kfunc_set) 10849 10850 BTF_ID_LIST(special_kfunc_list) 10851 BTF_ID(func, bpf_obj_new_impl) 10852 BTF_ID(func, bpf_obj_drop_impl) 10853 BTF_ID(func, bpf_refcount_acquire_impl) 10854 BTF_ID(func, bpf_list_push_front_impl) 10855 BTF_ID(func, bpf_list_push_back_impl) 10856 BTF_ID(func, bpf_list_pop_front) 10857 BTF_ID(func, bpf_list_pop_back) 10858 BTF_ID(func, bpf_cast_to_kern_ctx) 10859 BTF_ID(func, bpf_rdonly_cast) 10860 BTF_ID(func, bpf_rcu_read_lock) 10861 BTF_ID(func, bpf_rcu_read_unlock) 10862 BTF_ID(func, bpf_rbtree_remove) 10863 BTF_ID(func, bpf_rbtree_add_impl) 10864 BTF_ID(func, bpf_rbtree_first) 10865 BTF_ID(func, bpf_dynptr_from_skb) 10866 BTF_ID(func, bpf_dynptr_from_xdp) 10867 BTF_ID(func, bpf_dynptr_slice) 10868 BTF_ID(func, bpf_dynptr_slice_rdwr) 10869 BTF_ID(func, bpf_dynptr_clone) 10870 BTF_ID(func, bpf_percpu_obj_new_impl) 10871 BTF_ID(func, bpf_percpu_obj_drop_impl) 10872 BTF_ID(func, bpf_throw) 10873 #ifdef CONFIG_CGROUPS 10874 BTF_ID(func, bpf_iter_css_task_new) 10875 #else 10876 BTF_ID_UNUSED 10877 #endif 10878 10879 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) 10880 { 10881 if (meta->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 10882 meta->arg_owning_ref) { 10883 return false; 10884 } 10885 10886 return meta->kfunc_flags & KF_RET_NULL; 10887 } 10888 10889 static bool is_kfunc_bpf_rcu_read_lock(struct bpf_kfunc_call_arg_meta *meta) 10890 { 10891 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_lock]; 10892 } 10893 10894 static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta) 10895 { 10896 return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; 10897 } 10898 10899 static enum kfunc_ptr_arg_type 10900 get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, 10901 struct bpf_kfunc_call_arg_meta *meta, 10902 const struct btf_type *t, const struct btf_type *ref_t, 10903 const char *ref_tname, const struct btf_param *args, 10904 int argno, int nargs) 10905 { 10906 u32 regno = argno + 1; 10907 struct bpf_reg_state *regs = cur_regs(env); 10908 struct bpf_reg_state *reg = ®s[regno]; 10909 bool arg_mem_size = false; 10910 10911 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) 10912 return KF_ARG_PTR_TO_CTX; 10913 10914 /* In this function, we verify the kfunc's BTF as per the argument type, 10915 * leaving the rest of the verification with respect to the register 10916 * type to our caller. When a set of conditions hold in the BTF type of 10917 * arguments, we resolve it to a known kfunc_ptr_arg_type. 10918 */ 10919 if (btf_get_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) 10920 return KF_ARG_PTR_TO_CTX; 10921 10922 if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) 10923 return KF_ARG_PTR_TO_ALLOC_BTF_ID; 10924 10925 if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) 10926 return KF_ARG_PTR_TO_REFCOUNTED_KPTR; 10927 10928 if (is_kfunc_arg_dynptr(meta->btf, &args[argno])) 10929 return KF_ARG_PTR_TO_DYNPTR; 10930 10931 if (is_kfunc_arg_iter(meta, argno)) 10932 return KF_ARG_PTR_TO_ITER; 10933 10934 if (is_kfunc_arg_list_head(meta->btf, &args[argno])) 10935 return KF_ARG_PTR_TO_LIST_HEAD; 10936 10937 if (is_kfunc_arg_list_node(meta->btf, &args[argno])) 10938 return KF_ARG_PTR_TO_LIST_NODE; 10939 10940 if (is_kfunc_arg_rbtree_root(meta->btf, &args[argno])) 10941 return KF_ARG_PTR_TO_RB_ROOT; 10942 10943 if (is_kfunc_arg_rbtree_node(meta->btf, &args[argno])) 10944 return KF_ARG_PTR_TO_RB_NODE; 10945 10946 if ((base_type(reg->type) == PTR_TO_BTF_ID || reg2btf_ids[base_type(reg->type)])) { 10947 if (!btf_type_is_struct(ref_t)) { 10948 verbose(env, "kernel function %s args#%d pointer type %s %s is not supported\n", 10949 meta->func_name, argno, btf_type_str(ref_t), ref_tname); 10950 return -EINVAL; 10951 } 10952 return KF_ARG_PTR_TO_BTF_ID; 10953 } 10954 10955 if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) 10956 return KF_ARG_PTR_TO_CALLBACK; 10957 10958 if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) 10959 return KF_ARG_PTR_TO_NULL; 10960 10961 if (argno + 1 < nargs && 10962 (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || 10963 is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) 10964 arg_mem_size = true; 10965 10966 /* This is the catch all argument type of register types supported by 10967 * check_helper_mem_access. However, we only allow when argument type is 10968 * pointer to scalar, or struct composed (recursively) of scalars. When 10969 * arg_mem_size is true, the pointer can be void *. 10970 */ 10971 if (!btf_type_is_scalar(ref_t) && !__btf_type_is_scalar_struct(env, meta->btf, ref_t, 0) && 10972 (arg_mem_size ? !btf_type_is_void(ref_t) : 1)) { 10973 verbose(env, "arg#%d pointer type %s %s must point to %sscalar, or struct with scalar\n", 10974 argno, btf_type_str(ref_t), ref_tname, arg_mem_size ? "void, " : ""); 10975 return -EINVAL; 10976 } 10977 return arg_mem_size ? KF_ARG_PTR_TO_MEM_SIZE : KF_ARG_PTR_TO_MEM; 10978 } 10979 10980 static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, 10981 struct bpf_reg_state *reg, 10982 const struct btf_type *ref_t, 10983 const char *ref_tname, u32 ref_id, 10984 struct bpf_kfunc_call_arg_meta *meta, 10985 int argno) 10986 { 10987 const struct btf_type *reg_ref_t; 10988 bool strict_type_match = false; 10989 const struct btf *reg_btf; 10990 const char *reg_ref_tname; 10991 u32 reg_ref_id; 10992 10993 if (base_type(reg->type) == PTR_TO_BTF_ID) { 10994 reg_btf = reg->btf; 10995 reg_ref_id = reg->btf_id; 10996 } else { 10997 reg_btf = btf_vmlinux; 10998 reg_ref_id = *reg2btf_ids[base_type(reg->type)]; 10999 } 11000 11001 /* Enforce strict type matching for calls to kfuncs that are acquiring 11002 * or releasing a reference, or are no-cast aliases. We do _not_ 11003 * enforce strict matching for plain KF_TRUSTED_ARGS kfuncs by default, 11004 * as we want to enable BPF programs to pass types that are bitwise 11005 * equivalent without forcing them to explicitly cast with something 11006 * like bpf_cast_to_kern_ctx(). 11007 * 11008 * For example, say we had a type like the following: 11009 * 11010 * struct bpf_cpumask { 11011 * cpumask_t cpumask; 11012 * refcount_t usage; 11013 * }; 11014 * 11015 * Note that as specified in <linux/cpumask.h>, cpumask_t is typedef'ed 11016 * to a struct cpumask, so it would be safe to pass a struct 11017 * bpf_cpumask * to a kfunc expecting a struct cpumask *. 11018 * 11019 * The philosophy here is similar to how we allow scalars of different 11020 * types to be passed to kfuncs as long as the size is the same. The 11021 * only difference here is that we're simply allowing 11022 * btf_struct_ids_match() to walk the struct at the 0th offset, and 11023 * resolve types. 11024 */ 11025 if (is_kfunc_acquire(meta) || 11026 (is_kfunc_release(meta) && reg->ref_obj_id) || 11027 btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) 11028 strict_type_match = true; 11029 11030 WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); 11031 11032 reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); 11033 reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); 11034 if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { 11035 verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", 11036 meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, 11037 btf_type_str(reg_ref_t), reg_ref_tname); 11038 return -EINVAL; 11039 } 11040 return 0; 11041 } 11042 11043 static int ref_set_non_owning(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 11044 { 11045 struct bpf_verifier_state *state = env->cur_state; 11046 struct btf_record *rec = reg_btf_record(reg); 11047 11048 if (!state->active_lock.ptr) { 11049 verbose(env, "verifier internal error: ref_set_non_owning w/o active lock\n"); 11050 return -EFAULT; 11051 } 11052 11053 if (type_flag(reg->type) & NON_OWN_REF) { 11054 verbose(env, "verifier internal error: NON_OWN_REF already set\n"); 11055 return -EFAULT; 11056 } 11057 11058 reg->type |= NON_OWN_REF; 11059 if (rec->refcount_off >= 0) 11060 reg->type |= MEM_RCU; 11061 11062 return 0; 11063 } 11064 11065 static int ref_convert_owning_non_owning(struct bpf_verifier_env *env, u32 ref_obj_id) 11066 { 11067 struct bpf_func_state *state, *unused; 11068 struct bpf_reg_state *reg; 11069 int i; 11070 11071 state = cur_func(env); 11072 11073 if (!ref_obj_id) { 11074 verbose(env, "verifier internal error: ref_obj_id is zero for " 11075 "owning -> non-owning conversion\n"); 11076 return -EFAULT; 11077 } 11078 11079 for (i = 0; i < state->acquired_refs; i++) { 11080 if (state->refs[i].id != ref_obj_id) 11081 continue; 11082 11083 /* Clear ref_obj_id here so release_reference doesn't clobber 11084 * the whole reg 11085 */ 11086 bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ 11087 if (reg->ref_obj_id == ref_obj_id) { 11088 reg->ref_obj_id = 0; 11089 ref_set_non_owning(env, reg); 11090 } 11091 })); 11092 return 0; 11093 } 11094 11095 verbose(env, "verifier internal error: ref state missing for ref_obj_id\n"); 11096 return -EFAULT; 11097 } 11098 11099 /* Implementation details: 11100 * 11101 * Each register points to some region of memory, which we define as an 11102 * allocation. Each allocation may embed a bpf_spin_lock which protects any 11103 * special BPF objects (bpf_list_head, bpf_rb_root, etc.) part of the same 11104 * allocation. The lock and the data it protects are colocated in the same 11105 * memory region. 11106 * 11107 * Hence, everytime a register holds a pointer value pointing to such 11108 * allocation, the verifier preserves a unique reg->id for it. 11109 * 11110 * The verifier remembers the lock 'ptr' and the lock 'id' whenever 11111 * bpf_spin_lock is called. 11112 * 11113 * To enable this, lock state in the verifier captures two values: 11114 * active_lock.ptr = Register's type specific pointer 11115 * active_lock.id = A unique ID for each register pointer value 11116 * 11117 * Currently, PTR_TO_MAP_VALUE and PTR_TO_BTF_ID | MEM_ALLOC are the two 11118 * supported register types. 11119 * 11120 * The active_lock.ptr in case of map values is the reg->map_ptr, and in case of 11121 * allocated objects is the reg->btf pointer. 11122 * 11123 * The active_lock.id is non-unique for maps supporting direct_value_addr, as we 11124 * can establish the provenance of the map value statically for each distinct 11125 * lookup into such maps. They always contain a single map value hence unique 11126 * IDs for each pseudo load pessimizes the algorithm and rejects valid programs. 11127 * 11128 * So, in case of global variables, they use array maps with max_entries = 1, 11129 * hence their active_lock.ptr becomes map_ptr and id = 0 (since they all point 11130 * into the same map value as max_entries is 1, as described above). 11131 * 11132 * In case of inner map lookups, the inner map pointer has same map_ptr as the 11133 * outer map pointer (in verifier context), but each lookup into an inner map 11134 * assigns a fresh reg->id to the lookup, so while lookups into distinct inner 11135 * maps from the same outer map share the same map_ptr as active_lock.ptr, they 11136 * will get different reg->id assigned to each lookup, hence different 11137 * active_lock.id. 11138 * 11139 * In case of allocated objects, active_lock.ptr is the reg->btf, and the 11140 * reg->id is a unique ID preserved after the NULL pointer check on the pointer 11141 * returned from bpf_obj_new. Each allocation receives a new reg->id. 11142 */ 11143 static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_reg_state *reg) 11144 { 11145 void *ptr; 11146 u32 id; 11147 11148 switch ((int)reg->type) { 11149 case PTR_TO_MAP_VALUE: 11150 ptr = reg->map_ptr; 11151 break; 11152 case PTR_TO_BTF_ID | MEM_ALLOC: 11153 ptr = reg->btf; 11154 break; 11155 default: 11156 verbose(env, "verifier internal error: unknown reg type for lock check\n"); 11157 return -EFAULT; 11158 } 11159 id = reg->id; 11160 11161 if (!env->cur_state->active_lock.ptr) 11162 return -EINVAL; 11163 if (env->cur_state->active_lock.ptr != ptr || 11164 env->cur_state->active_lock.id != id) { 11165 verbose(env, "held lock and object are not in the same allocation\n"); 11166 return -EINVAL; 11167 } 11168 return 0; 11169 } 11170 11171 static bool is_bpf_list_api_kfunc(u32 btf_id) 11172 { 11173 return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11174 btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 11175 btf_id == special_kfunc_list[KF_bpf_list_pop_front] || 11176 btf_id == special_kfunc_list[KF_bpf_list_pop_back]; 11177 } 11178 11179 static bool is_bpf_rbtree_api_kfunc(u32 btf_id) 11180 { 11181 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || 11182 btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 11183 btf_id == special_kfunc_list[KF_bpf_rbtree_first]; 11184 } 11185 11186 static bool is_bpf_graph_api_kfunc(u32 btf_id) 11187 { 11188 return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || 11189 btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; 11190 } 11191 11192 static bool is_callback_calling_kfunc(u32 btf_id) 11193 { 11194 return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; 11195 } 11196 11197 static bool is_bpf_throw_kfunc(struct bpf_insn *insn) 11198 { 11199 return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && 11200 insn->imm == special_kfunc_list[KF_bpf_throw]; 11201 } 11202 11203 static bool is_rbtree_lock_required_kfunc(u32 btf_id) 11204 { 11205 return is_bpf_rbtree_api_kfunc(btf_id); 11206 } 11207 11208 static bool check_kfunc_is_graph_root_api(struct bpf_verifier_env *env, 11209 enum btf_field_type head_field_type, 11210 u32 kfunc_btf_id) 11211 { 11212 bool ret; 11213 11214 switch (head_field_type) { 11215 case BPF_LIST_HEAD: 11216 ret = is_bpf_list_api_kfunc(kfunc_btf_id); 11217 break; 11218 case BPF_RB_ROOT: 11219 ret = is_bpf_rbtree_api_kfunc(kfunc_btf_id); 11220 break; 11221 default: 11222 verbose(env, "verifier internal error: unexpected graph root argument type %s\n", 11223 btf_field_type_name(head_field_type)); 11224 return false; 11225 } 11226 11227 if (!ret) 11228 verbose(env, "verifier internal error: %s head arg for unknown kfunc\n", 11229 btf_field_type_name(head_field_type)); 11230 return ret; 11231 } 11232 11233 static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, 11234 enum btf_field_type node_field_type, 11235 u32 kfunc_btf_id) 11236 { 11237 bool ret; 11238 11239 switch (node_field_type) { 11240 case BPF_LIST_NODE: 11241 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11242 kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); 11243 break; 11244 case BPF_RB_NODE: 11245 ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || 11246 kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]); 11247 break; 11248 default: 11249 verbose(env, "verifier internal error: unexpected graph node argument type %s\n", 11250 btf_field_type_name(node_field_type)); 11251 return false; 11252 } 11253 11254 if (!ret) 11255 verbose(env, "verifier internal error: %s node arg for unknown kfunc\n", 11256 btf_field_type_name(node_field_type)); 11257 return ret; 11258 } 11259 11260 static int 11261 __process_kf_arg_ptr_to_graph_root(struct bpf_verifier_env *env, 11262 struct bpf_reg_state *reg, u32 regno, 11263 struct bpf_kfunc_call_arg_meta *meta, 11264 enum btf_field_type head_field_type, 11265 struct btf_field **head_field) 11266 { 11267 const char *head_type_name; 11268 struct btf_field *field; 11269 struct btf_record *rec; 11270 u32 head_off; 11271 11272 if (meta->btf != btf_vmlinux) { 11273 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 11274 return -EFAULT; 11275 } 11276 11277 if (!check_kfunc_is_graph_root_api(env, head_field_type, meta->func_id)) 11278 return -EFAULT; 11279 11280 head_type_name = btf_field_type_name(head_field_type); 11281 if (!tnum_is_const(reg->var_off)) { 11282 verbose(env, 11283 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 11284 regno, head_type_name); 11285 return -EINVAL; 11286 } 11287 11288 rec = reg_btf_record(reg); 11289 head_off = reg->off + reg->var_off.value; 11290 field = btf_record_find(rec, head_off, head_field_type); 11291 if (!field) { 11292 verbose(env, "%s not found at offset=%u\n", head_type_name, head_off); 11293 return -EINVAL; 11294 } 11295 11296 /* All functions require bpf_list_head to be protected using a bpf_spin_lock */ 11297 if (check_reg_allocation_locked(env, reg)) { 11298 verbose(env, "bpf_spin_lock at off=%d must be held for %s\n", 11299 rec->spin_lock_off, head_type_name); 11300 return -EINVAL; 11301 } 11302 11303 if (*head_field) { 11304 verbose(env, "verifier internal error: repeating %s arg\n", head_type_name); 11305 return -EFAULT; 11306 } 11307 *head_field = field; 11308 return 0; 11309 } 11310 11311 static int process_kf_arg_ptr_to_list_head(struct bpf_verifier_env *env, 11312 struct bpf_reg_state *reg, u32 regno, 11313 struct bpf_kfunc_call_arg_meta *meta) 11314 { 11315 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_LIST_HEAD, 11316 &meta->arg_list_head.field); 11317 } 11318 11319 static int process_kf_arg_ptr_to_rbtree_root(struct bpf_verifier_env *env, 11320 struct bpf_reg_state *reg, u32 regno, 11321 struct bpf_kfunc_call_arg_meta *meta) 11322 { 11323 return __process_kf_arg_ptr_to_graph_root(env, reg, regno, meta, BPF_RB_ROOT, 11324 &meta->arg_rbtree_root.field); 11325 } 11326 11327 static int 11328 __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, 11329 struct bpf_reg_state *reg, u32 regno, 11330 struct bpf_kfunc_call_arg_meta *meta, 11331 enum btf_field_type head_field_type, 11332 enum btf_field_type node_field_type, 11333 struct btf_field **node_field) 11334 { 11335 const char *node_type_name; 11336 const struct btf_type *et, *t; 11337 struct btf_field *field; 11338 u32 node_off; 11339 11340 if (meta->btf != btf_vmlinux) { 11341 verbose(env, "verifier internal error: unexpected btf mismatch in kfunc call\n"); 11342 return -EFAULT; 11343 } 11344 11345 if (!check_kfunc_is_graph_node_api(env, node_field_type, meta->func_id)) 11346 return -EFAULT; 11347 11348 node_type_name = btf_field_type_name(node_field_type); 11349 if (!tnum_is_const(reg->var_off)) { 11350 verbose(env, 11351 "R%d doesn't have constant offset. %s has to be at the constant offset\n", 11352 regno, node_type_name); 11353 return -EINVAL; 11354 } 11355 11356 node_off = reg->off + reg->var_off.value; 11357 field = reg_find_field_offset(reg, node_off, node_field_type); 11358 if (!field || field->offset != node_off) { 11359 verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); 11360 return -EINVAL; 11361 } 11362 11363 field = *node_field; 11364 11365 et = btf_type_by_id(field->graph_root.btf, field->graph_root.value_btf_id); 11366 t = btf_type_by_id(reg->btf, reg->btf_id); 11367 if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, 0, field->graph_root.btf, 11368 field->graph_root.value_btf_id, true)) { 11369 verbose(env, "operation on %s expects arg#1 %s at offset=%d " 11370 "in struct %s, but arg is at offset=%d in struct %s\n", 11371 btf_field_type_name(head_field_type), 11372 btf_field_type_name(node_field_type), 11373 field->graph_root.node_offset, 11374 btf_name_by_offset(field->graph_root.btf, et->name_off), 11375 node_off, btf_name_by_offset(reg->btf, t->name_off)); 11376 return -EINVAL; 11377 } 11378 meta->arg_btf = reg->btf; 11379 meta->arg_btf_id = reg->btf_id; 11380 11381 if (node_off != field->graph_root.node_offset) { 11382 verbose(env, "arg#1 offset=%d, but expected %s at offset=%d in struct %s\n", 11383 node_off, btf_field_type_name(node_field_type), 11384 field->graph_root.node_offset, 11385 btf_name_by_offset(field->graph_root.btf, et->name_off)); 11386 return -EINVAL; 11387 } 11388 11389 return 0; 11390 } 11391 11392 static int process_kf_arg_ptr_to_list_node(struct bpf_verifier_env *env, 11393 struct bpf_reg_state *reg, u32 regno, 11394 struct bpf_kfunc_call_arg_meta *meta) 11395 { 11396 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 11397 BPF_LIST_HEAD, BPF_LIST_NODE, 11398 &meta->arg_list_head.field); 11399 } 11400 11401 static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, 11402 struct bpf_reg_state *reg, u32 regno, 11403 struct bpf_kfunc_call_arg_meta *meta) 11404 { 11405 return __process_kf_arg_ptr_to_graph_node(env, reg, regno, meta, 11406 BPF_RB_ROOT, BPF_RB_NODE, 11407 &meta->arg_rbtree_root.field); 11408 } 11409 11410 /* 11411 * css_task iter allowlist is needed to avoid dead locking on css_set_lock. 11412 * LSM hooks and iters (both sleepable and non-sleepable) are safe. 11413 * Any sleepable progs are also safe since bpf_check_attach_target() enforce 11414 * them can only be attached to some specific hook points. 11415 */ 11416 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) 11417 { 11418 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 11419 11420 switch (prog_type) { 11421 case BPF_PROG_TYPE_LSM: 11422 return true; 11423 case BPF_PROG_TYPE_TRACING: 11424 if (env->prog->expected_attach_type == BPF_TRACE_ITER) 11425 return true; 11426 fallthrough; 11427 default: 11428 return env->prog->aux->sleepable; 11429 } 11430 } 11431 11432 static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, 11433 int insn_idx) 11434 { 11435 const char *func_name = meta->func_name, *ref_tname; 11436 const struct btf *btf = meta->btf; 11437 const struct btf_param *args; 11438 struct btf_record *rec; 11439 u32 i, nargs; 11440 int ret; 11441 11442 args = (const struct btf_param *)(meta->func_proto + 1); 11443 nargs = btf_type_vlen(meta->func_proto); 11444 if (nargs > MAX_BPF_FUNC_REG_ARGS) { 11445 verbose(env, "Function %s has %d > %d args\n", func_name, nargs, 11446 MAX_BPF_FUNC_REG_ARGS); 11447 return -EINVAL; 11448 } 11449 11450 /* Check that BTF function arguments match actual types that the 11451 * verifier sees. 11452 */ 11453 for (i = 0; i < nargs; i++) { 11454 struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[i + 1]; 11455 const struct btf_type *t, *ref_t, *resolve_ret; 11456 enum bpf_arg_type arg_type = ARG_DONTCARE; 11457 u32 regno = i + 1, ref_id, type_size; 11458 bool is_ret_buf_sz = false; 11459 int kf_arg_type; 11460 11461 t = btf_type_skip_modifiers(btf, args[i].type, NULL); 11462 11463 if (is_kfunc_arg_ignore(btf, &args[i])) 11464 continue; 11465 11466 if (btf_type_is_scalar(t)) { 11467 if (reg->type != SCALAR_VALUE) { 11468 verbose(env, "R%d is not a scalar\n", regno); 11469 return -EINVAL; 11470 } 11471 11472 if (is_kfunc_arg_constant(meta->btf, &args[i])) { 11473 if (meta->arg_constant.found) { 11474 verbose(env, "verifier internal error: only one constant argument permitted\n"); 11475 return -EFAULT; 11476 } 11477 if (!tnum_is_const(reg->var_off)) { 11478 verbose(env, "R%d must be a known constant\n", regno); 11479 return -EINVAL; 11480 } 11481 ret = mark_chain_precision(env, regno); 11482 if (ret < 0) 11483 return ret; 11484 meta->arg_constant.found = true; 11485 meta->arg_constant.value = reg->var_off.value; 11486 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdonly_buf_size")) { 11487 meta->r0_rdonly = true; 11488 is_ret_buf_sz = true; 11489 } else if (is_kfunc_arg_scalar_with_name(btf, &args[i], "rdwr_buf_size")) { 11490 is_ret_buf_sz = true; 11491 } 11492 11493 if (is_ret_buf_sz) { 11494 if (meta->r0_size) { 11495 verbose(env, "2 or more rdonly/rdwr_buf_size parameters for kfunc"); 11496 return -EINVAL; 11497 } 11498 11499 if (!tnum_is_const(reg->var_off)) { 11500 verbose(env, "R%d is not a const\n", regno); 11501 return -EINVAL; 11502 } 11503 11504 meta->r0_size = reg->var_off.value; 11505 ret = mark_chain_precision(env, regno); 11506 if (ret) 11507 return ret; 11508 } 11509 continue; 11510 } 11511 11512 if (!btf_type_is_ptr(t)) { 11513 verbose(env, "Unrecognized arg#%d type %s\n", i, btf_type_str(t)); 11514 return -EINVAL; 11515 } 11516 11517 if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && 11518 (register_is_null(reg) || type_may_be_null(reg->type)) && 11519 !is_kfunc_arg_nullable(meta->btf, &args[i])) { 11520 verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); 11521 return -EACCES; 11522 } 11523 11524 if (reg->ref_obj_id) { 11525 if (is_kfunc_release(meta) && meta->ref_obj_id) { 11526 verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", 11527 regno, reg->ref_obj_id, 11528 meta->ref_obj_id); 11529 return -EFAULT; 11530 } 11531 meta->ref_obj_id = reg->ref_obj_id; 11532 if (is_kfunc_release(meta)) 11533 meta->release_regno = regno; 11534 } 11535 11536 ref_t = btf_type_skip_modifiers(btf, t->type, &ref_id); 11537 ref_tname = btf_name_by_offset(btf, ref_t->name_off); 11538 11539 kf_arg_type = get_kfunc_ptr_arg_type(env, meta, t, ref_t, ref_tname, args, i, nargs); 11540 if (kf_arg_type < 0) 11541 return kf_arg_type; 11542 11543 switch (kf_arg_type) { 11544 case KF_ARG_PTR_TO_NULL: 11545 continue; 11546 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 11547 case KF_ARG_PTR_TO_BTF_ID: 11548 if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) 11549 break; 11550 11551 if (!is_trusted_reg(reg)) { 11552 if (!is_kfunc_rcu(meta)) { 11553 verbose(env, "R%d must be referenced or trusted\n", regno); 11554 return -EINVAL; 11555 } 11556 if (!is_rcu_reg(reg)) { 11557 verbose(env, "R%d must be a rcu pointer\n", regno); 11558 return -EINVAL; 11559 } 11560 } 11561 11562 fallthrough; 11563 case KF_ARG_PTR_TO_CTX: 11564 /* Trusted arguments have the same offset checks as release arguments */ 11565 arg_type |= OBJ_RELEASE; 11566 break; 11567 case KF_ARG_PTR_TO_DYNPTR: 11568 case KF_ARG_PTR_TO_ITER: 11569 case KF_ARG_PTR_TO_LIST_HEAD: 11570 case KF_ARG_PTR_TO_LIST_NODE: 11571 case KF_ARG_PTR_TO_RB_ROOT: 11572 case KF_ARG_PTR_TO_RB_NODE: 11573 case KF_ARG_PTR_TO_MEM: 11574 case KF_ARG_PTR_TO_MEM_SIZE: 11575 case KF_ARG_PTR_TO_CALLBACK: 11576 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 11577 /* Trusted by default */ 11578 break; 11579 default: 11580 WARN_ON_ONCE(1); 11581 return -EFAULT; 11582 } 11583 11584 if (is_kfunc_release(meta) && reg->ref_obj_id) 11585 arg_type |= OBJ_RELEASE; 11586 ret = check_func_arg_reg_off(env, reg, regno, arg_type); 11587 if (ret < 0) 11588 return ret; 11589 11590 switch (kf_arg_type) { 11591 case KF_ARG_PTR_TO_CTX: 11592 if (reg->type != PTR_TO_CTX) { 11593 verbose(env, "arg#%d expected pointer to ctx, but got %s\n", i, btf_type_str(t)); 11594 return -EINVAL; 11595 } 11596 11597 if (meta->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 11598 ret = get_kern_ctx_btf_id(&env->log, resolve_prog_type(env->prog)); 11599 if (ret < 0) 11600 return -EINVAL; 11601 meta->ret_btf_id = ret; 11602 } 11603 break; 11604 case KF_ARG_PTR_TO_ALLOC_BTF_ID: 11605 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC)) { 11606 if (meta->func_id != special_kfunc_list[KF_bpf_obj_drop_impl]) { 11607 verbose(env, "arg#%d expected for bpf_obj_drop_impl()\n", i); 11608 return -EINVAL; 11609 } 11610 } else if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC | MEM_PERCPU)) { 11611 if (meta->func_id != special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { 11612 verbose(env, "arg#%d expected for bpf_percpu_obj_drop_impl()\n", i); 11613 return -EINVAL; 11614 } 11615 } else { 11616 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11617 return -EINVAL; 11618 } 11619 if (!reg->ref_obj_id) { 11620 verbose(env, "allocated object must be referenced\n"); 11621 return -EINVAL; 11622 } 11623 if (meta->btf == btf_vmlinux) { 11624 meta->arg_btf = reg->btf; 11625 meta->arg_btf_id = reg->btf_id; 11626 } 11627 break; 11628 case KF_ARG_PTR_TO_DYNPTR: 11629 { 11630 enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; 11631 int clone_ref_obj_id = 0; 11632 11633 if (reg->type != PTR_TO_STACK && 11634 reg->type != CONST_PTR_TO_DYNPTR) { 11635 verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); 11636 return -EINVAL; 11637 } 11638 11639 if (reg->type == CONST_PTR_TO_DYNPTR) 11640 dynptr_arg_type |= MEM_RDONLY; 11641 11642 if (is_kfunc_arg_uninit(btf, &args[i])) 11643 dynptr_arg_type |= MEM_UNINIT; 11644 11645 if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 11646 dynptr_arg_type |= DYNPTR_TYPE_SKB; 11647 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_from_xdp]) { 11648 dynptr_arg_type |= DYNPTR_TYPE_XDP; 11649 } else if (meta->func_id == special_kfunc_list[KF_bpf_dynptr_clone] && 11650 (dynptr_arg_type & MEM_UNINIT)) { 11651 enum bpf_dynptr_type parent_type = meta->initialized_dynptr.type; 11652 11653 if (parent_type == BPF_DYNPTR_TYPE_INVALID) { 11654 verbose(env, "verifier internal error: no dynptr type for parent of clone\n"); 11655 return -EFAULT; 11656 } 11657 11658 dynptr_arg_type |= (unsigned int)get_dynptr_type_flag(parent_type); 11659 clone_ref_obj_id = meta->initialized_dynptr.ref_obj_id; 11660 if (dynptr_type_refcounted(parent_type) && !clone_ref_obj_id) { 11661 verbose(env, "verifier internal error: missing ref obj id for parent of clone\n"); 11662 return -EFAULT; 11663 } 11664 } 11665 11666 ret = process_dynptr_func(env, regno, insn_idx, dynptr_arg_type, clone_ref_obj_id); 11667 if (ret < 0) 11668 return ret; 11669 11670 if (!(dynptr_arg_type & MEM_UNINIT)) { 11671 int id = dynptr_id(env, reg); 11672 11673 if (id < 0) { 11674 verbose(env, "verifier internal error: failed to obtain dynptr id\n"); 11675 return id; 11676 } 11677 meta->initialized_dynptr.id = id; 11678 meta->initialized_dynptr.type = dynptr_get_type(env, reg); 11679 meta->initialized_dynptr.ref_obj_id = dynptr_ref_obj_id(env, reg); 11680 } 11681 11682 break; 11683 } 11684 case KF_ARG_PTR_TO_ITER: 11685 if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { 11686 if (!check_css_task_iter_allowlist(env)) { 11687 verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n"); 11688 return -EINVAL; 11689 } 11690 } 11691 ret = process_iter_arg(env, regno, insn_idx, meta); 11692 if (ret < 0) 11693 return ret; 11694 break; 11695 case KF_ARG_PTR_TO_LIST_HEAD: 11696 if (reg->type != PTR_TO_MAP_VALUE && 11697 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11698 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11699 return -EINVAL; 11700 } 11701 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11702 verbose(env, "allocated object must be referenced\n"); 11703 return -EINVAL; 11704 } 11705 ret = process_kf_arg_ptr_to_list_head(env, reg, regno, meta); 11706 if (ret < 0) 11707 return ret; 11708 break; 11709 case KF_ARG_PTR_TO_RB_ROOT: 11710 if (reg->type != PTR_TO_MAP_VALUE && 11711 reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11712 verbose(env, "arg#%d expected pointer to map value or allocated object\n", i); 11713 return -EINVAL; 11714 } 11715 if (reg->type == (PTR_TO_BTF_ID | MEM_ALLOC) && !reg->ref_obj_id) { 11716 verbose(env, "allocated object must be referenced\n"); 11717 return -EINVAL; 11718 } 11719 ret = process_kf_arg_ptr_to_rbtree_root(env, reg, regno, meta); 11720 if (ret < 0) 11721 return ret; 11722 break; 11723 case KF_ARG_PTR_TO_LIST_NODE: 11724 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11725 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11726 return -EINVAL; 11727 } 11728 if (!reg->ref_obj_id) { 11729 verbose(env, "allocated object must be referenced\n"); 11730 return -EINVAL; 11731 } 11732 ret = process_kf_arg_ptr_to_list_node(env, reg, regno, meta); 11733 if (ret < 0) 11734 return ret; 11735 break; 11736 case KF_ARG_PTR_TO_RB_NODE: 11737 if (meta->func_id == special_kfunc_list[KF_bpf_rbtree_remove]) { 11738 if (!type_is_non_owning_ref(reg->type) || reg->ref_obj_id) { 11739 verbose(env, "rbtree_remove node input must be non-owning ref\n"); 11740 return -EINVAL; 11741 } 11742 if (in_rbtree_lock_required_cb(env)) { 11743 verbose(env, "rbtree_remove not allowed in rbtree cb\n"); 11744 return -EINVAL; 11745 } 11746 } else { 11747 if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) { 11748 verbose(env, "arg#%d expected pointer to allocated object\n", i); 11749 return -EINVAL; 11750 } 11751 if (!reg->ref_obj_id) { 11752 verbose(env, "allocated object must be referenced\n"); 11753 return -EINVAL; 11754 } 11755 } 11756 11757 ret = process_kf_arg_ptr_to_rbtree_node(env, reg, regno, meta); 11758 if (ret < 0) 11759 return ret; 11760 break; 11761 case KF_ARG_PTR_TO_BTF_ID: 11762 /* Only base_type is checked, further checks are done here */ 11763 if ((base_type(reg->type) != PTR_TO_BTF_ID || 11764 (bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) && 11765 !reg2btf_ids[base_type(reg->type)]) { 11766 verbose(env, "arg#%d is %s ", i, reg_type_str(env, reg->type)); 11767 verbose(env, "expected %s or socket\n", 11768 reg_type_str(env, base_type(reg->type) | 11769 (type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS))); 11770 return -EINVAL; 11771 } 11772 ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i); 11773 if (ret < 0) 11774 return ret; 11775 break; 11776 case KF_ARG_PTR_TO_MEM: 11777 resolve_ret = btf_resolve_size(btf, ref_t, &type_size); 11778 if (IS_ERR(resolve_ret)) { 11779 verbose(env, "arg#%d reference type('%s %s') size cannot be determined: %ld\n", 11780 i, btf_type_str(ref_t), ref_tname, PTR_ERR(resolve_ret)); 11781 return -EINVAL; 11782 } 11783 ret = check_mem_reg(env, reg, regno, type_size); 11784 if (ret < 0) 11785 return ret; 11786 break; 11787 case KF_ARG_PTR_TO_MEM_SIZE: 11788 { 11789 struct bpf_reg_state *buff_reg = ®s[regno]; 11790 const struct btf_param *buff_arg = &args[i]; 11791 struct bpf_reg_state *size_reg = ®s[regno + 1]; 11792 const struct btf_param *size_arg = &args[i + 1]; 11793 11794 if (!register_is_null(buff_reg) || !is_kfunc_arg_optional(meta->btf, buff_arg)) { 11795 ret = check_kfunc_mem_size_reg(env, size_reg, regno + 1); 11796 if (ret < 0) { 11797 verbose(env, "arg#%d arg#%d memory, len pair leads to invalid memory access\n", i, i + 1); 11798 return ret; 11799 } 11800 } 11801 11802 if (is_kfunc_arg_const_mem_size(meta->btf, size_arg, size_reg)) { 11803 if (meta->arg_constant.found) { 11804 verbose(env, "verifier internal error: only one constant argument permitted\n"); 11805 return -EFAULT; 11806 } 11807 if (!tnum_is_const(size_reg->var_off)) { 11808 verbose(env, "R%d must be a known constant\n", regno + 1); 11809 return -EINVAL; 11810 } 11811 meta->arg_constant.found = true; 11812 meta->arg_constant.value = size_reg->var_off.value; 11813 } 11814 11815 /* Skip next '__sz' or '__szk' argument */ 11816 i++; 11817 break; 11818 } 11819 case KF_ARG_PTR_TO_CALLBACK: 11820 if (reg->type != PTR_TO_FUNC) { 11821 verbose(env, "arg%d expected pointer to func\n", i); 11822 return -EINVAL; 11823 } 11824 meta->subprogno = reg->subprogno; 11825 break; 11826 case KF_ARG_PTR_TO_REFCOUNTED_KPTR: 11827 if (!type_is_ptr_alloc_obj(reg->type)) { 11828 verbose(env, "arg#%d is neither owning or non-owning ref\n", i); 11829 return -EINVAL; 11830 } 11831 if (!type_is_non_owning_ref(reg->type)) 11832 meta->arg_owning_ref = true; 11833 11834 rec = reg_btf_record(reg); 11835 if (!rec) { 11836 verbose(env, "verifier internal error: Couldn't find btf_record\n"); 11837 return -EFAULT; 11838 } 11839 11840 if (rec->refcount_off < 0) { 11841 verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); 11842 return -EINVAL; 11843 } 11844 11845 meta->arg_btf = reg->btf; 11846 meta->arg_btf_id = reg->btf_id; 11847 break; 11848 } 11849 } 11850 11851 if (is_kfunc_release(meta) && !meta->release_regno) { 11852 verbose(env, "release kernel function %s expects refcounted PTR_TO_BTF_ID\n", 11853 func_name); 11854 return -EINVAL; 11855 } 11856 11857 return 0; 11858 } 11859 11860 static int fetch_kfunc_meta(struct bpf_verifier_env *env, 11861 struct bpf_insn *insn, 11862 struct bpf_kfunc_call_arg_meta *meta, 11863 const char **kfunc_name) 11864 { 11865 const struct btf_type *func, *func_proto; 11866 u32 func_id, *kfunc_flags; 11867 const char *func_name; 11868 struct btf *desc_btf; 11869 11870 if (kfunc_name) 11871 *kfunc_name = NULL; 11872 11873 if (!insn->imm) 11874 return -EINVAL; 11875 11876 desc_btf = find_kfunc_desc_btf(env, insn->off); 11877 if (IS_ERR(desc_btf)) 11878 return PTR_ERR(desc_btf); 11879 11880 func_id = insn->imm; 11881 func = btf_type_by_id(desc_btf, func_id); 11882 func_name = btf_name_by_offset(desc_btf, func->name_off); 11883 if (kfunc_name) 11884 *kfunc_name = func_name; 11885 func_proto = btf_type_by_id(desc_btf, func->type); 11886 11887 kfunc_flags = btf_kfunc_id_set_contains(desc_btf, func_id, env->prog); 11888 if (!kfunc_flags) { 11889 return -EACCES; 11890 } 11891 11892 memset(meta, 0, sizeof(*meta)); 11893 meta->btf = desc_btf; 11894 meta->func_id = func_id; 11895 meta->kfunc_flags = *kfunc_flags; 11896 meta->func_proto = func_proto; 11897 meta->func_name = func_name; 11898 11899 return 0; 11900 } 11901 11902 static int check_return_code(struct bpf_verifier_env *env, int regno); 11903 11904 static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 11905 int *insn_idx_p) 11906 { 11907 const struct btf_type *t, *ptr_type; 11908 u32 i, nargs, ptr_type_id, release_ref_obj_id; 11909 struct bpf_reg_state *regs = cur_regs(env); 11910 const char *func_name, *ptr_type_name; 11911 bool sleepable, rcu_lock, rcu_unlock; 11912 struct bpf_kfunc_call_arg_meta meta; 11913 struct bpf_insn_aux_data *insn_aux; 11914 int err, insn_idx = *insn_idx_p; 11915 const struct btf_param *args; 11916 const struct btf_type *ret_t; 11917 struct btf *desc_btf; 11918 11919 /* skip for now, but return error when we find this in fixup_kfunc_call */ 11920 if (!insn->imm) 11921 return 0; 11922 11923 err = fetch_kfunc_meta(env, insn, &meta, &func_name); 11924 if (err == -EACCES && func_name) 11925 verbose(env, "calling kernel function %s is not allowed\n", func_name); 11926 if (err) 11927 return err; 11928 desc_btf = meta.btf; 11929 insn_aux = &env->insn_aux_data[insn_idx]; 11930 11931 insn_aux->is_iter_next = is_iter_next_kfunc(&meta); 11932 11933 if (is_kfunc_destructive(&meta) && !capable(CAP_SYS_BOOT)) { 11934 verbose(env, "destructive kfunc calls require CAP_SYS_BOOT capability\n"); 11935 return -EACCES; 11936 } 11937 11938 sleepable = is_kfunc_sleepable(&meta); 11939 if (sleepable && !env->prog->aux->sleepable) { 11940 verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); 11941 return -EACCES; 11942 } 11943 11944 rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); 11945 rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); 11946 11947 if (env->cur_state->active_rcu_lock) { 11948 struct bpf_func_state *state; 11949 struct bpf_reg_state *reg; 11950 u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER); 11951 11952 if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { 11953 verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); 11954 return -EACCES; 11955 } 11956 11957 if (rcu_lock) { 11958 verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); 11959 return -EINVAL; 11960 } else if (rcu_unlock) { 11961 bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ 11962 if (reg->type & MEM_RCU) { 11963 reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); 11964 reg->type |= PTR_UNTRUSTED; 11965 } 11966 })); 11967 env->cur_state->active_rcu_lock = false; 11968 } else if (sleepable) { 11969 verbose(env, "kernel func %s is sleepable within rcu_read_lock region\n", func_name); 11970 return -EACCES; 11971 } 11972 } else if (rcu_lock) { 11973 env->cur_state->active_rcu_lock = true; 11974 } else if (rcu_unlock) { 11975 verbose(env, "unmatched rcu read unlock (kernel function %s)\n", func_name); 11976 return -EINVAL; 11977 } 11978 11979 /* Check the arguments */ 11980 err = check_kfunc_args(env, &meta, insn_idx); 11981 if (err < 0) 11982 return err; 11983 /* In case of release function, we get register number of refcounted 11984 * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. 11985 */ 11986 if (meta.release_regno) { 11987 err = release_reference(env, regs[meta.release_regno].ref_obj_id); 11988 if (err) { 11989 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 11990 func_name, meta.func_id); 11991 return err; 11992 } 11993 } 11994 11995 if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 11996 meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 11997 meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 11998 release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; 11999 insn_aux->insert_off = regs[BPF_REG_2].off; 12000 insn_aux->kptr_struct_meta = btf_find_struct_meta(meta.arg_btf, meta.arg_btf_id); 12001 err = ref_convert_owning_non_owning(env, release_ref_obj_id); 12002 if (err) { 12003 verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", 12004 func_name, meta.func_id); 12005 return err; 12006 } 12007 12008 err = release_reference(env, release_ref_obj_id); 12009 if (err) { 12010 verbose(env, "kfunc %s#%d reference has not been acquired before\n", 12011 func_name, meta.func_id); 12012 return err; 12013 } 12014 } 12015 12016 if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 12017 err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, 12018 set_rbtree_add_callback_state); 12019 if (err) { 12020 verbose(env, "kfunc %s#%d failed callback verification\n", 12021 func_name, meta.func_id); 12022 return err; 12023 } 12024 } 12025 12026 if (meta.func_id == special_kfunc_list[KF_bpf_throw]) { 12027 if (!bpf_jit_supports_exceptions()) { 12028 verbose(env, "JIT does not support calling kfunc %s#%d\n", 12029 func_name, meta.func_id); 12030 return -ENOTSUPP; 12031 } 12032 env->seen_exception = true; 12033 12034 /* In the case of the default callback, the cookie value passed 12035 * to bpf_throw becomes the return value of the program. 12036 */ 12037 if (!env->exception_callback_subprog) { 12038 err = check_return_code(env, BPF_REG_1); 12039 if (err < 0) 12040 return err; 12041 } 12042 } 12043 12044 for (i = 0; i < CALLER_SAVED_REGS; i++) 12045 mark_reg_not_init(env, regs, caller_saved[i]); 12046 12047 /* Check return type */ 12048 t = btf_type_skip_modifiers(desc_btf, meta.func_proto->type, NULL); 12049 12050 if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { 12051 /* Only exception is bpf_obj_new_impl */ 12052 if (meta.btf != btf_vmlinux || 12053 (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && 12054 meta.func_id != special_kfunc_list[KF_bpf_percpu_obj_new_impl] && 12055 meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { 12056 verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); 12057 return -EINVAL; 12058 } 12059 } 12060 12061 if (btf_type_is_scalar(t)) { 12062 mark_reg_unknown(env, regs, BPF_REG_0); 12063 mark_btf_func_reg_size(env, BPF_REG_0, t->size); 12064 } else if (btf_type_is_ptr(t)) { 12065 ptr_type = btf_type_skip_modifiers(desc_btf, t->type, &ptr_type_id); 12066 12067 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 12068 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] || 12069 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 12070 struct btf_struct_meta *struct_meta; 12071 struct btf *ret_btf; 12072 u32 ret_btf_id; 12073 12074 if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl] && !bpf_global_ma_set) 12075 return -ENOMEM; 12076 12077 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && !bpf_global_percpu_ma_set) 12078 return -ENOMEM; 12079 12080 if (((u64)(u32)meta.arg_constant.value) != meta.arg_constant.value) { 12081 verbose(env, "local type ID argument must be in range [0, U32_MAX]\n"); 12082 return -EINVAL; 12083 } 12084 12085 ret_btf = env->prog->aux->btf; 12086 ret_btf_id = meta.arg_constant.value; 12087 12088 /* This may be NULL due to user not supplying a BTF */ 12089 if (!ret_btf) { 12090 verbose(env, "bpf_obj_new/bpf_percpu_obj_new requires prog BTF\n"); 12091 return -EINVAL; 12092 } 12093 12094 ret_t = btf_type_by_id(ret_btf, ret_btf_id); 12095 if (!ret_t || !__btf_type_is_struct(ret_t)) { 12096 verbose(env, "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct\n"); 12097 return -EINVAL; 12098 } 12099 12100 struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); 12101 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 12102 if (!__btf_type_is_scalar_struct(env, ret_btf, ret_t, 0)) { 12103 verbose(env, "bpf_percpu_obj_new type ID argument must be of a struct of scalars\n"); 12104 return -EINVAL; 12105 } 12106 12107 if (struct_meta) { 12108 verbose(env, "bpf_percpu_obj_new type ID argument must not contain special fields\n"); 12109 return -EINVAL; 12110 } 12111 } 12112 12113 mark_reg_known_zero(env, regs, BPF_REG_0); 12114 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 12115 regs[BPF_REG_0].btf = ret_btf; 12116 regs[BPF_REG_0].btf_id = ret_btf_id; 12117 if (meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) 12118 regs[BPF_REG_0].type |= MEM_PERCPU; 12119 12120 insn_aux->obj_new_size = ret_t->size; 12121 insn_aux->kptr_struct_meta = struct_meta; 12122 } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 12123 mark_reg_known_zero(env, regs, BPF_REG_0); 12124 regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; 12125 regs[BPF_REG_0].btf = meta.arg_btf; 12126 regs[BPF_REG_0].btf_id = meta.arg_btf_id; 12127 12128 insn_aux->kptr_struct_meta = 12129 btf_find_struct_meta(meta.arg_btf, 12130 meta.arg_btf_id); 12131 } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] || 12132 meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { 12133 struct btf_field *field = meta.arg_list_head.field; 12134 12135 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 12136 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove] || 12137 meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 12138 struct btf_field *field = meta.arg_rbtree_root.field; 12139 12140 mark_reg_graph_node(regs, BPF_REG_0, &field->graph_root); 12141 } else if (meta.func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) { 12142 mark_reg_known_zero(env, regs, BPF_REG_0); 12143 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_TRUSTED; 12144 regs[BPF_REG_0].btf = desc_btf; 12145 regs[BPF_REG_0].btf_id = meta.ret_btf_id; 12146 } else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 12147 ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value); 12148 if (!ret_t || !btf_type_is_struct(ret_t)) { 12149 verbose(env, 12150 "kfunc bpf_rdonly_cast type ID argument must be of a struct\n"); 12151 return -EINVAL; 12152 } 12153 12154 mark_reg_known_zero(env, regs, BPF_REG_0); 12155 regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 12156 regs[BPF_REG_0].btf = desc_btf; 12157 regs[BPF_REG_0].btf_id = meta.arg_constant.value; 12158 } else if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice] || 12159 meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice_rdwr]) { 12160 enum bpf_type_flag type_flag = get_dynptr_type_flag(meta.initialized_dynptr.type); 12161 12162 mark_reg_known_zero(env, regs, BPF_REG_0); 12163 12164 if (!meta.arg_constant.found) { 12165 verbose(env, "verifier internal error: bpf_dynptr_slice(_rdwr) no constant size\n"); 12166 return -EFAULT; 12167 } 12168 12169 regs[BPF_REG_0].mem_size = meta.arg_constant.value; 12170 12171 /* PTR_MAYBE_NULL will be added when is_kfunc_ret_null is checked */ 12172 regs[BPF_REG_0].type = PTR_TO_MEM | type_flag; 12173 12174 if (meta.func_id == special_kfunc_list[KF_bpf_dynptr_slice]) { 12175 regs[BPF_REG_0].type |= MEM_RDONLY; 12176 } else { 12177 /* this will set env->seen_direct_write to true */ 12178 if (!may_access_direct_pkt_data(env, NULL, BPF_WRITE)) { 12179 verbose(env, "the prog does not allow writes to packet data\n"); 12180 return -EINVAL; 12181 } 12182 } 12183 12184 if (!meta.initialized_dynptr.id) { 12185 verbose(env, "verifier internal error: no dynptr id\n"); 12186 return -EFAULT; 12187 } 12188 regs[BPF_REG_0].dynptr_id = meta.initialized_dynptr.id; 12189 12190 /* we don't need to set BPF_REG_0's ref obj id 12191 * because packet slices are not refcounted (see 12192 * dynptr_type_refcounted) 12193 */ 12194 } else { 12195 verbose(env, "kernel function %s unhandled dynamic return type\n", 12196 meta.func_name); 12197 return -EFAULT; 12198 } 12199 } else if (!__btf_type_is_struct(ptr_type)) { 12200 if (!meta.r0_size) { 12201 __u32 sz; 12202 12203 if (!IS_ERR(btf_resolve_size(desc_btf, ptr_type, &sz))) { 12204 meta.r0_size = sz; 12205 meta.r0_rdonly = true; 12206 } 12207 } 12208 if (!meta.r0_size) { 12209 ptr_type_name = btf_name_by_offset(desc_btf, 12210 ptr_type->name_off); 12211 verbose(env, 12212 "kernel function %s returns pointer type %s %s is not supported\n", 12213 func_name, 12214 btf_type_str(ptr_type), 12215 ptr_type_name); 12216 return -EINVAL; 12217 } 12218 12219 mark_reg_known_zero(env, regs, BPF_REG_0); 12220 regs[BPF_REG_0].type = PTR_TO_MEM; 12221 regs[BPF_REG_0].mem_size = meta.r0_size; 12222 12223 if (meta.r0_rdonly) 12224 regs[BPF_REG_0].type |= MEM_RDONLY; 12225 12226 /* Ensures we don't access the memory after a release_reference() */ 12227 if (meta.ref_obj_id) 12228 regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; 12229 } else { 12230 mark_reg_known_zero(env, regs, BPF_REG_0); 12231 regs[BPF_REG_0].btf = desc_btf; 12232 regs[BPF_REG_0].type = PTR_TO_BTF_ID; 12233 regs[BPF_REG_0].btf_id = ptr_type_id; 12234 } 12235 12236 if (is_kfunc_ret_null(&meta)) { 12237 regs[BPF_REG_0].type |= PTR_MAYBE_NULL; 12238 /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ 12239 regs[BPF_REG_0].id = ++env->id_gen; 12240 } 12241 mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); 12242 if (is_kfunc_acquire(&meta)) { 12243 int id = acquire_reference_state(env, insn_idx); 12244 12245 if (id < 0) 12246 return id; 12247 if (is_kfunc_ret_null(&meta)) 12248 regs[BPF_REG_0].id = id; 12249 regs[BPF_REG_0].ref_obj_id = id; 12250 } else if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_first]) { 12251 ref_set_non_owning(env, ®s[BPF_REG_0]); 12252 } 12253 12254 if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) 12255 regs[BPF_REG_0].id = ++env->id_gen; 12256 } else if (btf_type_is_void(t)) { 12257 if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) { 12258 if (meta.func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || 12259 meta.func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl]) { 12260 insn_aux->kptr_struct_meta = 12261 btf_find_struct_meta(meta.arg_btf, 12262 meta.arg_btf_id); 12263 } 12264 } 12265 } 12266 12267 nargs = btf_type_vlen(meta.func_proto); 12268 args = (const struct btf_param *)(meta.func_proto + 1); 12269 for (i = 0; i < nargs; i++) { 12270 u32 regno = i + 1; 12271 12272 t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL); 12273 if (btf_type_is_ptr(t)) 12274 mark_btf_func_reg_size(env, regno, sizeof(void *)); 12275 else 12276 /* scalar. ensured by btf_check_kfunc_arg_match() */ 12277 mark_btf_func_reg_size(env, regno, t->size); 12278 } 12279 12280 if (is_iter_next_kfunc(&meta)) { 12281 err = process_iter_next_call(env, insn_idx, &meta); 12282 if (err) 12283 return err; 12284 } 12285 12286 return 0; 12287 } 12288 12289 static bool signed_add_overflows(s64 a, s64 b) 12290 { 12291 /* Do the add in u64, where overflow is well-defined */ 12292 s64 res = (s64)((u64)a + (u64)b); 12293 12294 if (b < 0) 12295 return res > a; 12296 return res < a; 12297 } 12298 12299 static bool signed_add32_overflows(s32 a, s32 b) 12300 { 12301 /* Do the add in u32, where overflow is well-defined */ 12302 s32 res = (s32)((u32)a + (u32)b); 12303 12304 if (b < 0) 12305 return res > a; 12306 return res < a; 12307 } 12308 12309 static bool signed_sub_overflows(s64 a, s64 b) 12310 { 12311 /* Do the sub in u64, where overflow is well-defined */ 12312 s64 res = (s64)((u64)a - (u64)b); 12313 12314 if (b < 0) 12315 return res < a; 12316 return res > a; 12317 } 12318 12319 static bool signed_sub32_overflows(s32 a, s32 b) 12320 { 12321 /* Do the sub in u32, where overflow is well-defined */ 12322 s32 res = (s32)((u32)a - (u32)b); 12323 12324 if (b < 0) 12325 return res < a; 12326 return res > a; 12327 } 12328 12329 static bool check_reg_sane_offset(struct bpf_verifier_env *env, 12330 const struct bpf_reg_state *reg, 12331 enum bpf_reg_type type) 12332 { 12333 bool known = tnum_is_const(reg->var_off); 12334 s64 val = reg->var_off.value; 12335 s64 smin = reg->smin_value; 12336 12337 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { 12338 verbose(env, "math between %s pointer and %lld is not allowed\n", 12339 reg_type_str(env, type), val); 12340 return false; 12341 } 12342 12343 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { 12344 verbose(env, "%s pointer offset %d is not allowed\n", 12345 reg_type_str(env, type), reg->off); 12346 return false; 12347 } 12348 12349 if (smin == S64_MIN) { 12350 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", 12351 reg_type_str(env, type)); 12352 return false; 12353 } 12354 12355 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { 12356 verbose(env, "value %lld makes %s pointer be out of bounds\n", 12357 smin, reg_type_str(env, type)); 12358 return false; 12359 } 12360 12361 return true; 12362 } 12363 12364 enum { 12365 REASON_BOUNDS = -1, 12366 REASON_TYPE = -2, 12367 REASON_PATHS = -3, 12368 REASON_LIMIT = -4, 12369 REASON_STACK = -5, 12370 }; 12371 12372 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, 12373 u32 *alu_limit, bool mask_to_left) 12374 { 12375 u32 max = 0, ptr_limit = 0; 12376 12377 switch (ptr_reg->type) { 12378 case PTR_TO_STACK: 12379 /* Offset 0 is out-of-bounds, but acceptable start for the 12380 * left direction, see BPF_REG_FP. Also, unknown scalar 12381 * offset where we would need to deal with min/max bounds is 12382 * currently prohibited for unprivileged. 12383 */ 12384 max = MAX_BPF_STACK + mask_to_left; 12385 ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); 12386 break; 12387 case PTR_TO_MAP_VALUE: 12388 max = ptr_reg->map_ptr->value_size; 12389 ptr_limit = (mask_to_left ? 12390 ptr_reg->smin_value : 12391 ptr_reg->umax_value) + ptr_reg->off; 12392 break; 12393 default: 12394 return REASON_TYPE; 12395 } 12396 12397 if (ptr_limit >= max) 12398 return REASON_LIMIT; 12399 *alu_limit = ptr_limit; 12400 return 0; 12401 } 12402 12403 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, 12404 const struct bpf_insn *insn) 12405 { 12406 return env->bypass_spec_v1 || BPF_SRC(insn->code) == BPF_K; 12407 } 12408 12409 static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, 12410 u32 alu_state, u32 alu_limit) 12411 { 12412 /* If we arrived here from different branches with different 12413 * state or limits to sanitize, then this won't work. 12414 */ 12415 if (aux->alu_state && 12416 (aux->alu_state != alu_state || 12417 aux->alu_limit != alu_limit)) 12418 return REASON_PATHS; 12419 12420 /* Corresponding fixup done in do_misc_fixups(). */ 12421 aux->alu_state = alu_state; 12422 aux->alu_limit = alu_limit; 12423 return 0; 12424 } 12425 12426 static int sanitize_val_alu(struct bpf_verifier_env *env, 12427 struct bpf_insn *insn) 12428 { 12429 struct bpf_insn_aux_data *aux = cur_aux(env); 12430 12431 if (can_skip_alu_sanitation(env, insn)) 12432 return 0; 12433 12434 return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); 12435 } 12436 12437 static bool sanitize_needed(u8 opcode) 12438 { 12439 return opcode == BPF_ADD || opcode == BPF_SUB; 12440 } 12441 12442 struct bpf_sanitize_info { 12443 struct bpf_insn_aux_data aux; 12444 bool mask_to_left; 12445 }; 12446 12447 static struct bpf_verifier_state * 12448 sanitize_speculative_path(struct bpf_verifier_env *env, 12449 const struct bpf_insn *insn, 12450 u32 next_idx, u32 curr_idx) 12451 { 12452 struct bpf_verifier_state *branch; 12453 struct bpf_reg_state *regs; 12454 12455 branch = push_stack(env, next_idx, curr_idx, true); 12456 if (branch && insn) { 12457 regs = branch->frame[branch->curframe]->regs; 12458 if (BPF_SRC(insn->code) == BPF_K) { 12459 mark_reg_unknown(env, regs, insn->dst_reg); 12460 } else if (BPF_SRC(insn->code) == BPF_X) { 12461 mark_reg_unknown(env, regs, insn->dst_reg); 12462 mark_reg_unknown(env, regs, insn->src_reg); 12463 } 12464 } 12465 return branch; 12466 } 12467 12468 static int sanitize_ptr_alu(struct bpf_verifier_env *env, 12469 struct bpf_insn *insn, 12470 const struct bpf_reg_state *ptr_reg, 12471 const struct bpf_reg_state *off_reg, 12472 struct bpf_reg_state *dst_reg, 12473 struct bpf_sanitize_info *info, 12474 const bool commit_window) 12475 { 12476 struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; 12477 struct bpf_verifier_state *vstate = env->cur_state; 12478 bool off_is_imm = tnum_is_const(off_reg->var_off); 12479 bool off_is_neg = off_reg->smin_value < 0; 12480 bool ptr_is_dst_reg = ptr_reg == dst_reg; 12481 u8 opcode = BPF_OP(insn->code); 12482 u32 alu_state, alu_limit; 12483 struct bpf_reg_state tmp; 12484 bool ret; 12485 int err; 12486 12487 if (can_skip_alu_sanitation(env, insn)) 12488 return 0; 12489 12490 /* We already marked aux for masking from non-speculative 12491 * paths, thus we got here in the first place. We only care 12492 * to explore bad access from here. 12493 */ 12494 if (vstate->speculative) 12495 goto do_sim; 12496 12497 if (!commit_window) { 12498 if (!tnum_is_const(off_reg->var_off) && 12499 (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) 12500 return REASON_BOUNDS; 12501 12502 info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || 12503 (opcode == BPF_SUB && !off_is_neg); 12504 } 12505 12506 err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); 12507 if (err < 0) 12508 return err; 12509 12510 if (commit_window) { 12511 /* In commit phase we narrow the masking window based on 12512 * the observed pointer move after the simulated operation. 12513 */ 12514 alu_state = info->aux.alu_state; 12515 alu_limit = abs(info->aux.alu_limit - alu_limit); 12516 } else { 12517 alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; 12518 alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; 12519 alu_state |= ptr_is_dst_reg ? 12520 BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; 12521 12522 /* Limit pruning on unknown scalars to enable deep search for 12523 * potential masking differences from other program paths. 12524 */ 12525 if (!off_is_imm) 12526 env->explore_alu_limits = true; 12527 } 12528 12529 err = update_alu_sanitation_state(aux, alu_state, alu_limit); 12530 if (err < 0) 12531 return err; 12532 do_sim: 12533 /* If we're in commit phase, we're done here given we already 12534 * pushed the truncated dst_reg into the speculative verification 12535 * stack. 12536 * 12537 * Also, when register is a known constant, we rewrite register-based 12538 * operation to immediate-based, and thus do not need masking (and as 12539 * a consequence, do not need to simulate the zero-truncation either). 12540 */ 12541 if (commit_window || off_is_imm) 12542 return 0; 12543 12544 /* Simulate and find potential out-of-bounds access under 12545 * speculative execution from truncation as a result of 12546 * masking when off was not within expected range. If off 12547 * sits in dst, then we temporarily need to move ptr there 12548 * to simulate dst (== 0) +/-= ptr. Needed, for example, 12549 * for cases where we use K-based arithmetic in one direction 12550 * and truncated reg-based in the other in order to explore 12551 * bad access. 12552 */ 12553 if (!ptr_is_dst_reg) { 12554 tmp = *dst_reg; 12555 copy_register_state(dst_reg, ptr_reg); 12556 } 12557 ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, 12558 env->insn_idx); 12559 if (!ptr_is_dst_reg && ret) 12560 *dst_reg = tmp; 12561 return !ret ? REASON_STACK : 0; 12562 } 12563 12564 static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) 12565 { 12566 struct bpf_verifier_state *vstate = env->cur_state; 12567 12568 /* If we simulate paths under speculation, we don't update the 12569 * insn as 'seen' such that when we verify unreachable paths in 12570 * the non-speculative domain, sanitize_dead_code() can still 12571 * rewrite/sanitize them. 12572 */ 12573 if (!vstate->speculative) 12574 env->insn_aux_data[env->insn_idx].seen = env->pass_cnt; 12575 } 12576 12577 static int sanitize_err(struct bpf_verifier_env *env, 12578 const struct bpf_insn *insn, int reason, 12579 const struct bpf_reg_state *off_reg, 12580 const struct bpf_reg_state *dst_reg) 12581 { 12582 static const char *err = "pointer arithmetic with it prohibited for !root"; 12583 const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; 12584 u32 dst = insn->dst_reg, src = insn->src_reg; 12585 12586 switch (reason) { 12587 case REASON_BOUNDS: 12588 verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", 12589 off_reg == dst_reg ? dst : src, err); 12590 break; 12591 case REASON_TYPE: 12592 verbose(env, "R%d has pointer with unsupported alu operation, %s\n", 12593 off_reg == dst_reg ? src : dst, err); 12594 break; 12595 case REASON_PATHS: 12596 verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", 12597 dst, op, err); 12598 break; 12599 case REASON_LIMIT: 12600 verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", 12601 dst, op, err); 12602 break; 12603 case REASON_STACK: 12604 verbose(env, "R%d could not be pushed for speculative verification, %s\n", 12605 dst, err); 12606 break; 12607 default: 12608 verbose(env, "verifier internal error: unknown reason (%d)\n", 12609 reason); 12610 break; 12611 } 12612 12613 return -EACCES; 12614 } 12615 12616 /* check that stack access falls within stack limits and that 'reg' doesn't 12617 * have a variable offset. 12618 * 12619 * Variable offset is prohibited for unprivileged mode for simplicity since it 12620 * requires corresponding support in Spectre masking for stack ALU. See also 12621 * retrieve_ptr_limit(). 12622 * 12623 * 12624 * 'off' includes 'reg->off'. 12625 */ 12626 static int check_stack_access_for_ptr_arithmetic( 12627 struct bpf_verifier_env *env, 12628 int regno, 12629 const struct bpf_reg_state *reg, 12630 int off) 12631 { 12632 if (!tnum_is_const(reg->var_off)) { 12633 char tn_buf[48]; 12634 12635 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); 12636 verbose(env, "R%d variable stack access prohibited for !root, var_off=%s off=%d\n", 12637 regno, tn_buf, off); 12638 return -EACCES; 12639 } 12640 12641 if (off >= 0 || off < -MAX_BPF_STACK) { 12642 verbose(env, "R%d stack pointer arithmetic goes out of range, " 12643 "prohibited for !root; off=%d\n", regno, off); 12644 return -EACCES; 12645 } 12646 12647 return 0; 12648 } 12649 12650 static int sanitize_check_bounds(struct bpf_verifier_env *env, 12651 const struct bpf_insn *insn, 12652 const struct bpf_reg_state *dst_reg) 12653 { 12654 u32 dst = insn->dst_reg; 12655 12656 /* For unprivileged we require that resulting offset must be in bounds 12657 * in order to be able to sanitize access later on. 12658 */ 12659 if (env->bypass_spec_v1) 12660 return 0; 12661 12662 switch (dst_reg->type) { 12663 case PTR_TO_STACK: 12664 if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg, 12665 dst_reg->off + dst_reg->var_off.value)) 12666 return -EACCES; 12667 break; 12668 case PTR_TO_MAP_VALUE: 12669 if (check_map_access(env, dst, dst_reg->off, 1, false, ACCESS_HELPER)) { 12670 verbose(env, "R%d pointer arithmetic of map value goes out of range, " 12671 "prohibited for !root\n", dst); 12672 return -EACCES; 12673 } 12674 break; 12675 default: 12676 break; 12677 } 12678 12679 return 0; 12680 } 12681 12682 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 12683 * Caller should also handle BPF_MOV case separately. 12684 * If we return -EACCES, caller may want to try again treating pointer as a 12685 * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. 12686 */ 12687 static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, 12688 struct bpf_insn *insn, 12689 const struct bpf_reg_state *ptr_reg, 12690 const struct bpf_reg_state *off_reg) 12691 { 12692 struct bpf_verifier_state *vstate = env->cur_state; 12693 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 12694 struct bpf_reg_state *regs = state->regs, *dst_reg; 12695 bool known = tnum_is_const(off_reg->var_off); 12696 s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, 12697 smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; 12698 u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, 12699 umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; 12700 struct bpf_sanitize_info info = {}; 12701 u8 opcode = BPF_OP(insn->code); 12702 u32 dst = insn->dst_reg; 12703 int ret; 12704 12705 dst_reg = ®s[dst]; 12706 12707 if ((known && (smin_val != smax_val || umin_val != umax_val)) || 12708 smin_val > smax_val || umin_val > umax_val) { 12709 /* Taint dst register if offset had invalid bounds derived from 12710 * e.g. dead branches. 12711 */ 12712 __mark_reg_unknown(env, dst_reg); 12713 return 0; 12714 } 12715 12716 if (BPF_CLASS(insn->code) != BPF_ALU64) { 12717 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 12718 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 12719 __mark_reg_unknown(env, dst_reg); 12720 return 0; 12721 } 12722 12723 verbose(env, 12724 "R%d 32-bit pointer arithmetic prohibited\n", 12725 dst); 12726 return -EACCES; 12727 } 12728 12729 if (ptr_reg->type & PTR_MAYBE_NULL) { 12730 verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", 12731 dst, reg_type_str(env, ptr_reg->type)); 12732 return -EACCES; 12733 } 12734 12735 switch (base_type(ptr_reg->type)) { 12736 case CONST_PTR_TO_MAP: 12737 /* smin_val represents the known value */ 12738 if (known && smin_val == 0 && opcode == BPF_ADD) 12739 break; 12740 fallthrough; 12741 case PTR_TO_PACKET_END: 12742 case PTR_TO_SOCKET: 12743 case PTR_TO_SOCK_COMMON: 12744 case PTR_TO_TCP_SOCK: 12745 case PTR_TO_XDP_SOCK: 12746 verbose(env, "R%d pointer arithmetic on %s prohibited\n", 12747 dst, reg_type_str(env, ptr_reg->type)); 12748 return -EACCES; 12749 default: 12750 break; 12751 } 12752 12753 /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. 12754 * The id may be overwritten later if we create a new variable offset. 12755 */ 12756 dst_reg->type = ptr_reg->type; 12757 dst_reg->id = ptr_reg->id; 12758 12759 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || 12760 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) 12761 return -EINVAL; 12762 12763 /* pointer types do not carry 32-bit bounds at the moment. */ 12764 __mark_reg32_unbounded(dst_reg); 12765 12766 if (sanitize_needed(opcode)) { 12767 ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, 12768 &info, false); 12769 if (ret < 0) 12770 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12771 } 12772 12773 switch (opcode) { 12774 case BPF_ADD: 12775 /* We can take a fixed offset as long as it doesn't overflow 12776 * the s32 'off' field 12777 */ 12778 if (known && (ptr_reg->off + smin_val == 12779 (s64)(s32)(ptr_reg->off + smin_val))) { 12780 /* pointer += K. Accumulate it into fixed offset */ 12781 dst_reg->smin_value = smin_ptr; 12782 dst_reg->smax_value = smax_ptr; 12783 dst_reg->umin_value = umin_ptr; 12784 dst_reg->umax_value = umax_ptr; 12785 dst_reg->var_off = ptr_reg->var_off; 12786 dst_reg->off = ptr_reg->off + smin_val; 12787 dst_reg->raw = ptr_reg->raw; 12788 break; 12789 } 12790 /* A new variable offset is created. Note that off_reg->off 12791 * == 0, since it's a scalar. 12792 * dst_reg gets the pointer type and since some positive 12793 * integer value was added to the pointer, give it a new 'id' 12794 * if it's a PTR_TO_PACKET. 12795 * this creates a new 'base' pointer, off_reg (variable) gets 12796 * added into the variable offset, and we copy the fixed offset 12797 * from ptr_reg. 12798 */ 12799 if (signed_add_overflows(smin_ptr, smin_val) || 12800 signed_add_overflows(smax_ptr, smax_val)) { 12801 dst_reg->smin_value = S64_MIN; 12802 dst_reg->smax_value = S64_MAX; 12803 } else { 12804 dst_reg->smin_value = smin_ptr + smin_val; 12805 dst_reg->smax_value = smax_ptr + smax_val; 12806 } 12807 if (umin_ptr + umin_val < umin_ptr || 12808 umax_ptr + umax_val < umax_ptr) { 12809 dst_reg->umin_value = 0; 12810 dst_reg->umax_value = U64_MAX; 12811 } else { 12812 dst_reg->umin_value = umin_ptr + umin_val; 12813 dst_reg->umax_value = umax_ptr + umax_val; 12814 } 12815 dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); 12816 dst_reg->off = ptr_reg->off; 12817 dst_reg->raw = ptr_reg->raw; 12818 if (reg_is_pkt_pointer(ptr_reg)) { 12819 dst_reg->id = ++env->id_gen; 12820 /* something was added to pkt_ptr, set range to zero */ 12821 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12822 } 12823 break; 12824 case BPF_SUB: 12825 if (dst_reg == off_reg) { 12826 /* scalar -= pointer. Creates an unknown scalar */ 12827 verbose(env, "R%d tried to subtract pointer from scalar\n", 12828 dst); 12829 return -EACCES; 12830 } 12831 /* We don't allow subtraction from FP, because (according to 12832 * test_verifier.c test "invalid fp arithmetic", JITs might not 12833 * be able to deal with it. 12834 */ 12835 if (ptr_reg->type == PTR_TO_STACK) { 12836 verbose(env, "R%d subtraction from stack pointer prohibited\n", 12837 dst); 12838 return -EACCES; 12839 } 12840 if (known && (ptr_reg->off - smin_val == 12841 (s64)(s32)(ptr_reg->off - smin_val))) { 12842 /* pointer -= K. Subtract it from fixed offset */ 12843 dst_reg->smin_value = smin_ptr; 12844 dst_reg->smax_value = smax_ptr; 12845 dst_reg->umin_value = umin_ptr; 12846 dst_reg->umax_value = umax_ptr; 12847 dst_reg->var_off = ptr_reg->var_off; 12848 dst_reg->id = ptr_reg->id; 12849 dst_reg->off = ptr_reg->off - smin_val; 12850 dst_reg->raw = ptr_reg->raw; 12851 break; 12852 } 12853 /* A new variable offset is created. If the subtrahend is known 12854 * nonnegative, then any reg->range we had before is still good. 12855 */ 12856 if (signed_sub_overflows(smin_ptr, smax_val) || 12857 signed_sub_overflows(smax_ptr, smin_val)) { 12858 /* Overflow possible, we know nothing */ 12859 dst_reg->smin_value = S64_MIN; 12860 dst_reg->smax_value = S64_MAX; 12861 } else { 12862 dst_reg->smin_value = smin_ptr - smax_val; 12863 dst_reg->smax_value = smax_ptr - smin_val; 12864 } 12865 if (umin_ptr < umax_val) { 12866 /* Overflow possible, we know nothing */ 12867 dst_reg->umin_value = 0; 12868 dst_reg->umax_value = U64_MAX; 12869 } else { 12870 /* Cannot overflow (as long as bounds are consistent) */ 12871 dst_reg->umin_value = umin_ptr - umax_val; 12872 dst_reg->umax_value = umax_ptr - umin_val; 12873 } 12874 dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); 12875 dst_reg->off = ptr_reg->off; 12876 dst_reg->raw = ptr_reg->raw; 12877 if (reg_is_pkt_pointer(ptr_reg)) { 12878 dst_reg->id = ++env->id_gen; 12879 /* something was added to pkt_ptr, set range to zero */ 12880 if (smin_val < 0) 12881 memset(&dst_reg->raw, 0, sizeof(dst_reg->raw)); 12882 } 12883 break; 12884 case BPF_AND: 12885 case BPF_OR: 12886 case BPF_XOR: 12887 /* bitwise ops on pointers are troublesome, prohibit. */ 12888 verbose(env, "R%d bitwise operator %s on pointer prohibited\n", 12889 dst, bpf_alu_string[opcode >> 4]); 12890 return -EACCES; 12891 default: 12892 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 12893 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 12894 dst, bpf_alu_string[opcode >> 4]); 12895 return -EACCES; 12896 } 12897 12898 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) 12899 return -EINVAL; 12900 reg_bounds_sync(dst_reg); 12901 if (sanitize_check_bounds(env, insn, dst_reg) < 0) 12902 return -EACCES; 12903 if (sanitize_needed(opcode)) { 12904 ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, 12905 &info, true); 12906 if (ret < 0) 12907 return sanitize_err(env, insn, ret, off_reg, dst_reg); 12908 } 12909 12910 return 0; 12911 } 12912 12913 static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, 12914 struct bpf_reg_state *src_reg) 12915 { 12916 s32 smin_val = src_reg->s32_min_value; 12917 s32 smax_val = src_reg->s32_max_value; 12918 u32 umin_val = src_reg->u32_min_value; 12919 u32 umax_val = src_reg->u32_max_value; 12920 12921 if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || 12922 signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { 12923 dst_reg->s32_min_value = S32_MIN; 12924 dst_reg->s32_max_value = S32_MAX; 12925 } else { 12926 dst_reg->s32_min_value += smin_val; 12927 dst_reg->s32_max_value += smax_val; 12928 } 12929 if (dst_reg->u32_min_value + umin_val < umin_val || 12930 dst_reg->u32_max_value + umax_val < umax_val) { 12931 dst_reg->u32_min_value = 0; 12932 dst_reg->u32_max_value = U32_MAX; 12933 } else { 12934 dst_reg->u32_min_value += umin_val; 12935 dst_reg->u32_max_value += umax_val; 12936 } 12937 } 12938 12939 static void scalar_min_max_add(struct bpf_reg_state *dst_reg, 12940 struct bpf_reg_state *src_reg) 12941 { 12942 s64 smin_val = src_reg->smin_value; 12943 s64 smax_val = src_reg->smax_value; 12944 u64 umin_val = src_reg->umin_value; 12945 u64 umax_val = src_reg->umax_value; 12946 12947 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 12948 signed_add_overflows(dst_reg->smax_value, smax_val)) { 12949 dst_reg->smin_value = S64_MIN; 12950 dst_reg->smax_value = S64_MAX; 12951 } else { 12952 dst_reg->smin_value += smin_val; 12953 dst_reg->smax_value += smax_val; 12954 } 12955 if (dst_reg->umin_value + umin_val < umin_val || 12956 dst_reg->umax_value + umax_val < umax_val) { 12957 dst_reg->umin_value = 0; 12958 dst_reg->umax_value = U64_MAX; 12959 } else { 12960 dst_reg->umin_value += umin_val; 12961 dst_reg->umax_value += umax_val; 12962 } 12963 } 12964 12965 static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, 12966 struct bpf_reg_state *src_reg) 12967 { 12968 s32 smin_val = src_reg->s32_min_value; 12969 s32 smax_val = src_reg->s32_max_value; 12970 u32 umin_val = src_reg->u32_min_value; 12971 u32 umax_val = src_reg->u32_max_value; 12972 12973 if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || 12974 signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { 12975 /* Overflow possible, we know nothing */ 12976 dst_reg->s32_min_value = S32_MIN; 12977 dst_reg->s32_max_value = S32_MAX; 12978 } else { 12979 dst_reg->s32_min_value -= smax_val; 12980 dst_reg->s32_max_value -= smin_val; 12981 } 12982 if (dst_reg->u32_min_value < umax_val) { 12983 /* Overflow possible, we know nothing */ 12984 dst_reg->u32_min_value = 0; 12985 dst_reg->u32_max_value = U32_MAX; 12986 } else { 12987 /* Cannot overflow (as long as bounds are consistent) */ 12988 dst_reg->u32_min_value -= umax_val; 12989 dst_reg->u32_max_value -= umin_val; 12990 } 12991 } 12992 12993 static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, 12994 struct bpf_reg_state *src_reg) 12995 { 12996 s64 smin_val = src_reg->smin_value; 12997 s64 smax_val = src_reg->smax_value; 12998 u64 umin_val = src_reg->umin_value; 12999 u64 umax_val = src_reg->umax_value; 13000 13001 if (signed_sub_overflows(dst_reg->smin_value, smax_val) || 13002 signed_sub_overflows(dst_reg->smax_value, smin_val)) { 13003 /* Overflow possible, we know nothing */ 13004 dst_reg->smin_value = S64_MIN; 13005 dst_reg->smax_value = S64_MAX; 13006 } else { 13007 dst_reg->smin_value -= smax_val; 13008 dst_reg->smax_value -= smin_val; 13009 } 13010 if (dst_reg->umin_value < umax_val) { 13011 /* Overflow possible, we know nothing */ 13012 dst_reg->umin_value = 0; 13013 dst_reg->umax_value = U64_MAX; 13014 } else { 13015 /* Cannot overflow (as long as bounds are consistent) */ 13016 dst_reg->umin_value -= umax_val; 13017 dst_reg->umax_value -= umin_val; 13018 } 13019 } 13020 13021 static void scalar32_min_max_mul(struct bpf_reg_state *dst_reg, 13022 struct bpf_reg_state *src_reg) 13023 { 13024 s32 smin_val = src_reg->s32_min_value; 13025 u32 umin_val = src_reg->u32_min_value; 13026 u32 umax_val = src_reg->u32_max_value; 13027 13028 if (smin_val < 0 || dst_reg->s32_min_value < 0) { 13029 /* Ain't nobody got time to multiply that sign */ 13030 __mark_reg32_unbounded(dst_reg); 13031 return; 13032 } 13033 /* Both values are positive, so we can work with unsigned and 13034 * copy the result to signed (unless it exceeds S32_MAX). 13035 */ 13036 if (umax_val > U16_MAX || dst_reg->u32_max_value > U16_MAX) { 13037 /* Potential overflow, we know nothing */ 13038 __mark_reg32_unbounded(dst_reg); 13039 return; 13040 } 13041 dst_reg->u32_min_value *= umin_val; 13042 dst_reg->u32_max_value *= umax_val; 13043 if (dst_reg->u32_max_value > S32_MAX) { 13044 /* Overflow possible, we know nothing */ 13045 dst_reg->s32_min_value = S32_MIN; 13046 dst_reg->s32_max_value = S32_MAX; 13047 } else { 13048 dst_reg->s32_min_value = dst_reg->u32_min_value; 13049 dst_reg->s32_max_value = dst_reg->u32_max_value; 13050 } 13051 } 13052 13053 static void scalar_min_max_mul(struct bpf_reg_state *dst_reg, 13054 struct bpf_reg_state *src_reg) 13055 { 13056 s64 smin_val = src_reg->smin_value; 13057 u64 umin_val = src_reg->umin_value; 13058 u64 umax_val = src_reg->umax_value; 13059 13060 if (smin_val < 0 || dst_reg->smin_value < 0) { 13061 /* Ain't nobody got time to multiply that sign */ 13062 __mark_reg64_unbounded(dst_reg); 13063 return; 13064 } 13065 /* Both values are positive, so we can work with unsigned and 13066 * copy the result to signed (unless it exceeds S64_MAX). 13067 */ 13068 if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { 13069 /* Potential overflow, we know nothing */ 13070 __mark_reg64_unbounded(dst_reg); 13071 return; 13072 } 13073 dst_reg->umin_value *= umin_val; 13074 dst_reg->umax_value *= umax_val; 13075 if (dst_reg->umax_value > S64_MAX) { 13076 /* Overflow possible, we know nothing */ 13077 dst_reg->smin_value = S64_MIN; 13078 dst_reg->smax_value = S64_MAX; 13079 } else { 13080 dst_reg->smin_value = dst_reg->umin_value; 13081 dst_reg->smax_value = dst_reg->umax_value; 13082 } 13083 } 13084 13085 static void scalar32_min_max_and(struct bpf_reg_state *dst_reg, 13086 struct bpf_reg_state *src_reg) 13087 { 13088 bool src_known = tnum_subreg_is_const(src_reg->var_off); 13089 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 13090 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 13091 s32 smin_val = src_reg->s32_min_value; 13092 u32 umax_val = src_reg->u32_max_value; 13093 13094 if (src_known && dst_known) { 13095 __mark_reg32_known(dst_reg, var32_off.value); 13096 return; 13097 } 13098 13099 /* We get our minimum from the var_off, since that's inherently 13100 * bitwise. Our maximum is the minimum of the operands' maxima. 13101 */ 13102 dst_reg->u32_min_value = var32_off.value; 13103 dst_reg->u32_max_value = min(dst_reg->u32_max_value, umax_val); 13104 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 13105 /* Lose signed bounds when ANDing negative numbers, 13106 * ain't nobody got time for that. 13107 */ 13108 dst_reg->s32_min_value = S32_MIN; 13109 dst_reg->s32_max_value = S32_MAX; 13110 } else { 13111 /* ANDing two positives gives a positive, so safe to 13112 * cast result into s64. 13113 */ 13114 dst_reg->s32_min_value = dst_reg->u32_min_value; 13115 dst_reg->s32_max_value = dst_reg->u32_max_value; 13116 } 13117 } 13118 13119 static void scalar_min_max_and(struct bpf_reg_state *dst_reg, 13120 struct bpf_reg_state *src_reg) 13121 { 13122 bool src_known = tnum_is_const(src_reg->var_off); 13123 bool dst_known = tnum_is_const(dst_reg->var_off); 13124 s64 smin_val = src_reg->smin_value; 13125 u64 umax_val = src_reg->umax_value; 13126 13127 if (src_known && dst_known) { 13128 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13129 return; 13130 } 13131 13132 /* We get our minimum from the var_off, since that's inherently 13133 * bitwise. Our maximum is the minimum of the operands' maxima. 13134 */ 13135 dst_reg->umin_value = dst_reg->var_off.value; 13136 dst_reg->umax_value = min(dst_reg->umax_value, umax_val); 13137 if (dst_reg->smin_value < 0 || smin_val < 0) { 13138 /* Lose signed bounds when ANDing negative numbers, 13139 * ain't nobody got time for that. 13140 */ 13141 dst_reg->smin_value = S64_MIN; 13142 dst_reg->smax_value = S64_MAX; 13143 } else { 13144 /* ANDing two positives gives a positive, so safe to 13145 * cast result into s64. 13146 */ 13147 dst_reg->smin_value = dst_reg->umin_value; 13148 dst_reg->smax_value = dst_reg->umax_value; 13149 } 13150 /* We may learn something more from the var_off */ 13151 __update_reg_bounds(dst_reg); 13152 } 13153 13154 static void scalar32_min_max_or(struct bpf_reg_state *dst_reg, 13155 struct bpf_reg_state *src_reg) 13156 { 13157 bool src_known = tnum_subreg_is_const(src_reg->var_off); 13158 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 13159 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 13160 s32 smin_val = src_reg->s32_min_value; 13161 u32 umin_val = src_reg->u32_min_value; 13162 13163 if (src_known && dst_known) { 13164 __mark_reg32_known(dst_reg, var32_off.value); 13165 return; 13166 } 13167 13168 /* We get our maximum from the var_off, and our minimum is the 13169 * maximum of the operands' minima 13170 */ 13171 dst_reg->u32_min_value = max(dst_reg->u32_min_value, umin_val); 13172 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 13173 if (dst_reg->s32_min_value < 0 || smin_val < 0) { 13174 /* Lose signed bounds when ORing negative numbers, 13175 * ain't nobody got time for that. 13176 */ 13177 dst_reg->s32_min_value = S32_MIN; 13178 dst_reg->s32_max_value = S32_MAX; 13179 } else { 13180 /* ORing two positives gives a positive, so safe to 13181 * cast result into s64. 13182 */ 13183 dst_reg->s32_min_value = dst_reg->u32_min_value; 13184 dst_reg->s32_max_value = dst_reg->u32_max_value; 13185 } 13186 } 13187 13188 static void scalar_min_max_or(struct bpf_reg_state *dst_reg, 13189 struct bpf_reg_state *src_reg) 13190 { 13191 bool src_known = tnum_is_const(src_reg->var_off); 13192 bool dst_known = tnum_is_const(dst_reg->var_off); 13193 s64 smin_val = src_reg->smin_value; 13194 u64 umin_val = src_reg->umin_value; 13195 13196 if (src_known && dst_known) { 13197 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13198 return; 13199 } 13200 13201 /* We get our maximum from the var_off, and our minimum is the 13202 * maximum of the operands' minima 13203 */ 13204 dst_reg->umin_value = max(dst_reg->umin_value, umin_val); 13205 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 13206 if (dst_reg->smin_value < 0 || smin_val < 0) { 13207 /* Lose signed bounds when ORing negative numbers, 13208 * ain't nobody got time for that. 13209 */ 13210 dst_reg->smin_value = S64_MIN; 13211 dst_reg->smax_value = S64_MAX; 13212 } else { 13213 /* ORing two positives gives a positive, so safe to 13214 * cast result into s64. 13215 */ 13216 dst_reg->smin_value = dst_reg->umin_value; 13217 dst_reg->smax_value = dst_reg->umax_value; 13218 } 13219 /* We may learn something more from the var_off */ 13220 __update_reg_bounds(dst_reg); 13221 } 13222 13223 static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg, 13224 struct bpf_reg_state *src_reg) 13225 { 13226 bool src_known = tnum_subreg_is_const(src_reg->var_off); 13227 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 13228 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 13229 s32 smin_val = src_reg->s32_min_value; 13230 13231 if (src_known && dst_known) { 13232 __mark_reg32_known(dst_reg, var32_off.value); 13233 return; 13234 } 13235 13236 /* We get both minimum and maximum from the var32_off. */ 13237 dst_reg->u32_min_value = var32_off.value; 13238 dst_reg->u32_max_value = var32_off.value | var32_off.mask; 13239 13240 if (dst_reg->s32_min_value >= 0 && smin_val >= 0) { 13241 /* XORing two positive sign numbers gives a positive, 13242 * so safe to cast u32 result into s32. 13243 */ 13244 dst_reg->s32_min_value = dst_reg->u32_min_value; 13245 dst_reg->s32_max_value = dst_reg->u32_max_value; 13246 } else { 13247 dst_reg->s32_min_value = S32_MIN; 13248 dst_reg->s32_max_value = S32_MAX; 13249 } 13250 } 13251 13252 static void scalar_min_max_xor(struct bpf_reg_state *dst_reg, 13253 struct bpf_reg_state *src_reg) 13254 { 13255 bool src_known = tnum_is_const(src_reg->var_off); 13256 bool dst_known = tnum_is_const(dst_reg->var_off); 13257 s64 smin_val = src_reg->smin_value; 13258 13259 if (src_known && dst_known) { 13260 /* dst_reg->var_off.value has been updated earlier */ 13261 __mark_reg_known(dst_reg, dst_reg->var_off.value); 13262 return; 13263 } 13264 13265 /* We get both minimum and maximum from the var_off. */ 13266 dst_reg->umin_value = dst_reg->var_off.value; 13267 dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; 13268 13269 if (dst_reg->smin_value >= 0 && smin_val >= 0) { 13270 /* XORing two positive sign numbers gives a positive, 13271 * so safe to cast u64 result into s64. 13272 */ 13273 dst_reg->smin_value = dst_reg->umin_value; 13274 dst_reg->smax_value = dst_reg->umax_value; 13275 } else { 13276 dst_reg->smin_value = S64_MIN; 13277 dst_reg->smax_value = S64_MAX; 13278 } 13279 13280 __update_reg_bounds(dst_reg); 13281 } 13282 13283 static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 13284 u64 umin_val, u64 umax_val) 13285 { 13286 /* We lose all sign bit information (except what we can pick 13287 * up from var_off) 13288 */ 13289 dst_reg->s32_min_value = S32_MIN; 13290 dst_reg->s32_max_value = S32_MAX; 13291 /* If we might shift our top bit out, then we know nothing */ 13292 if (umax_val > 31 || dst_reg->u32_max_value > 1ULL << (31 - umax_val)) { 13293 dst_reg->u32_min_value = 0; 13294 dst_reg->u32_max_value = U32_MAX; 13295 } else { 13296 dst_reg->u32_min_value <<= umin_val; 13297 dst_reg->u32_max_value <<= umax_val; 13298 } 13299 } 13300 13301 static void scalar32_min_max_lsh(struct bpf_reg_state *dst_reg, 13302 struct bpf_reg_state *src_reg) 13303 { 13304 u32 umax_val = src_reg->u32_max_value; 13305 u32 umin_val = src_reg->u32_min_value; 13306 /* u32 alu operation will zext upper bits */ 13307 struct tnum subreg = tnum_subreg(dst_reg->var_off); 13308 13309 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 13310 dst_reg->var_off = tnum_subreg(tnum_lshift(subreg, umin_val)); 13311 /* Not required but being careful mark reg64 bounds as unknown so 13312 * that we are forced to pick them up from tnum and zext later and 13313 * if some path skips this step we are still safe. 13314 */ 13315 __mark_reg64_unbounded(dst_reg); 13316 __update_reg32_bounds(dst_reg); 13317 } 13318 13319 static void __scalar64_min_max_lsh(struct bpf_reg_state *dst_reg, 13320 u64 umin_val, u64 umax_val) 13321 { 13322 /* Special case <<32 because it is a common compiler pattern to sign 13323 * extend subreg by doing <<32 s>>32. In this case if 32bit bounds are 13324 * positive we know this shift will also be positive so we can track 13325 * bounds correctly. Otherwise we lose all sign bit information except 13326 * what we can pick up from var_off. Perhaps we can generalize this 13327 * later to shifts of any length. 13328 */ 13329 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_max_value >= 0) 13330 dst_reg->smax_value = (s64)dst_reg->s32_max_value << 32; 13331 else 13332 dst_reg->smax_value = S64_MAX; 13333 13334 if (umin_val == 32 && umax_val == 32 && dst_reg->s32_min_value >= 0) 13335 dst_reg->smin_value = (s64)dst_reg->s32_min_value << 32; 13336 else 13337 dst_reg->smin_value = S64_MIN; 13338 13339 /* If we might shift our top bit out, then we know nothing */ 13340 if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { 13341 dst_reg->umin_value = 0; 13342 dst_reg->umax_value = U64_MAX; 13343 } else { 13344 dst_reg->umin_value <<= umin_val; 13345 dst_reg->umax_value <<= umax_val; 13346 } 13347 } 13348 13349 static void scalar_min_max_lsh(struct bpf_reg_state *dst_reg, 13350 struct bpf_reg_state *src_reg) 13351 { 13352 u64 umax_val = src_reg->umax_value; 13353 u64 umin_val = src_reg->umin_value; 13354 13355 /* scalar64 calc uses 32bit unshifted bounds so must be called first */ 13356 __scalar64_min_max_lsh(dst_reg, umin_val, umax_val); 13357 __scalar32_min_max_lsh(dst_reg, umin_val, umax_val); 13358 13359 dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); 13360 /* We may learn something more from the var_off */ 13361 __update_reg_bounds(dst_reg); 13362 } 13363 13364 static void scalar32_min_max_rsh(struct bpf_reg_state *dst_reg, 13365 struct bpf_reg_state *src_reg) 13366 { 13367 struct tnum subreg = tnum_subreg(dst_reg->var_off); 13368 u32 umax_val = src_reg->u32_max_value; 13369 u32 umin_val = src_reg->u32_min_value; 13370 13371 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 13372 * be negative, then either: 13373 * 1) src_reg might be zero, so the sign bit of the result is 13374 * unknown, so we lose our signed bounds 13375 * 2) it's known negative, thus the unsigned bounds capture the 13376 * signed bounds 13377 * 3) the signed bounds cross zero, so they tell us nothing 13378 * about the result 13379 * If the value in dst_reg is known nonnegative, then again the 13380 * unsigned bounds capture the signed bounds. 13381 * Thus, in all cases it suffices to blow away our signed bounds 13382 * and rely on inferring new ones from the unsigned bounds and 13383 * var_off of the result. 13384 */ 13385 dst_reg->s32_min_value = S32_MIN; 13386 dst_reg->s32_max_value = S32_MAX; 13387 13388 dst_reg->var_off = tnum_rshift(subreg, umin_val); 13389 dst_reg->u32_min_value >>= umax_val; 13390 dst_reg->u32_max_value >>= umin_val; 13391 13392 __mark_reg64_unbounded(dst_reg); 13393 __update_reg32_bounds(dst_reg); 13394 } 13395 13396 static void scalar_min_max_rsh(struct bpf_reg_state *dst_reg, 13397 struct bpf_reg_state *src_reg) 13398 { 13399 u64 umax_val = src_reg->umax_value; 13400 u64 umin_val = src_reg->umin_value; 13401 13402 /* BPF_RSH is an unsigned shift. If the value in dst_reg might 13403 * be negative, then either: 13404 * 1) src_reg might be zero, so the sign bit of the result is 13405 * unknown, so we lose our signed bounds 13406 * 2) it's known negative, thus the unsigned bounds capture the 13407 * signed bounds 13408 * 3) the signed bounds cross zero, so they tell us nothing 13409 * about the result 13410 * If the value in dst_reg is known nonnegative, then again the 13411 * unsigned bounds capture the signed bounds. 13412 * Thus, in all cases it suffices to blow away our signed bounds 13413 * and rely on inferring new ones from the unsigned bounds and 13414 * var_off of the result. 13415 */ 13416 dst_reg->smin_value = S64_MIN; 13417 dst_reg->smax_value = S64_MAX; 13418 dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); 13419 dst_reg->umin_value >>= umax_val; 13420 dst_reg->umax_value >>= umin_val; 13421 13422 /* Its not easy to operate on alu32 bounds here because it depends 13423 * on bits being shifted in. Take easy way out and mark unbounded 13424 * so we can recalculate later from tnum. 13425 */ 13426 __mark_reg32_unbounded(dst_reg); 13427 __update_reg_bounds(dst_reg); 13428 } 13429 13430 static void scalar32_min_max_arsh(struct bpf_reg_state *dst_reg, 13431 struct bpf_reg_state *src_reg) 13432 { 13433 u64 umin_val = src_reg->u32_min_value; 13434 13435 /* Upon reaching here, src_known is true and 13436 * umax_val is equal to umin_val. 13437 */ 13438 dst_reg->s32_min_value = (u32)(((s32)dst_reg->s32_min_value) >> umin_val); 13439 dst_reg->s32_max_value = (u32)(((s32)dst_reg->s32_max_value) >> umin_val); 13440 13441 dst_reg->var_off = tnum_arshift(tnum_subreg(dst_reg->var_off), umin_val, 32); 13442 13443 /* blow away the dst_reg umin_value/umax_value and rely on 13444 * dst_reg var_off to refine the result. 13445 */ 13446 dst_reg->u32_min_value = 0; 13447 dst_reg->u32_max_value = U32_MAX; 13448 13449 __mark_reg64_unbounded(dst_reg); 13450 __update_reg32_bounds(dst_reg); 13451 } 13452 13453 static void scalar_min_max_arsh(struct bpf_reg_state *dst_reg, 13454 struct bpf_reg_state *src_reg) 13455 { 13456 u64 umin_val = src_reg->umin_value; 13457 13458 /* Upon reaching here, src_known is true and umax_val is equal 13459 * to umin_val. 13460 */ 13461 dst_reg->smin_value >>= umin_val; 13462 dst_reg->smax_value >>= umin_val; 13463 13464 dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, 64); 13465 13466 /* blow away the dst_reg umin_value/umax_value and rely on 13467 * dst_reg var_off to refine the result. 13468 */ 13469 dst_reg->umin_value = 0; 13470 dst_reg->umax_value = U64_MAX; 13471 13472 /* Its not easy to operate on alu32 bounds here because it depends 13473 * on bits being shifted in from upper 32-bits. Take easy way out 13474 * and mark unbounded so we can recalculate later from tnum. 13475 */ 13476 __mark_reg32_unbounded(dst_reg); 13477 __update_reg_bounds(dst_reg); 13478 } 13479 13480 /* WARNING: This function does calculations on 64-bit values, but the actual 13481 * execution may occur on 32-bit values. Therefore, things like bitshifts 13482 * need extra checks in the 32-bit case. 13483 */ 13484 static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 13485 struct bpf_insn *insn, 13486 struct bpf_reg_state *dst_reg, 13487 struct bpf_reg_state src_reg) 13488 { 13489 struct bpf_reg_state *regs = cur_regs(env); 13490 u8 opcode = BPF_OP(insn->code); 13491 bool src_known; 13492 s64 smin_val, smax_val; 13493 u64 umin_val, umax_val; 13494 s32 s32_min_val, s32_max_val; 13495 u32 u32_min_val, u32_max_val; 13496 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; 13497 bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); 13498 int ret; 13499 13500 smin_val = src_reg.smin_value; 13501 smax_val = src_reg.smax_value; 13502 umin_val = src_reg.umin_value; 13503 umax_val = src_reg.umax_value; 13504 13505 s32_min_val = src_reg.s32_min_value; 13506 s32_max_val = src_reg.s32_max_value; 13507 u32_min_val = src_reg.u32_min_value; 13508 u32_max_val = src_reg.u32_max_value; 13509 13510 if (alu32) { 13511 src_known = tnum_subreg_is_const(src_reg.var_off); 13512 if ((src_known && 13513 (s32_min_val != s32_max_val || u32_min_val != u32_max_val)) || 13514 s32_min_val > s32_max_val || u32_min_val > u32_max_val) { 13515 /* Taint dst register if offset had invalid bounds 13516 * derived from e.g. dead branches. 13517 */ 13518 __mark_reg_unknown(env, dst_reg); 13519 return 0; 13520 } 13521 } else { 13522 src_known = tnum_is_const(src_reg.var_off); 13523 if ((src_known && 13524 (smin_val != smax_val || umin_val != umax_val)) || 13525 smin_val > smax_val || umin_val > umax_val) { 13526 /* Taint dst register if offset had invalid bounds 13527 * derived from e.g. dead branches. 13528 */ 13529 __mark_reg_unknown(env, dst_reg); 13530 return 0; 13531 } 13532 } 13533 13534 if (!src_known && 13535 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 13536 __mark_reg_unknown(env, dst_reg); 13537 return 0; 13538 } 13539 13540 if (sanitize_needed(opcode)) { 13541 ret = sanitize_val_alu(env, insn); 13542 if (ret < 0) 13543 return sanitize_err(env, insn, ret, NULL, NULL); 13544 } 13545 13546 /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops. 13547 * There are two classes of instructions: The first class we track both 13548 * alu32 and alu64 sign/unsigned bounds independently this provides the 13549 * greatest amount of precision when alu operations are mixed with jmp32 13550 * operations. These operations are BPF_ADD, BPF_SUB, BPF_MUL, BPF_ADD, 13551 * and BPF_OR. This is possible because these ops have fairly easy to 13552 * understand and calculate behavior in both 32-bit and 64-bit alu ops. 13553 * See alu32 verifier tests for examples. The second class of 13554 * operations, BPF_LSH, BPF_RSH, and BPF_ARSH, however are not so easy 13555 * with regards to tracking sign/unsigned bounds because the bits may 13556 * cross subreg boundaries in the alu64 case. When this happens we mark 13557 * the reg unbounded in the subreg bound space and use the resulting 13558 * tnum to calculate an approximation of the sign/unsigned bounds. 13559 */ 13560 switch (opcode) { 13561 case BPF_ADD: 13562 scalar32_min_max_add(dst_reg, &src_reg); 13563 scalar_min_max_add(dst_reg, &src_reg); 13564 dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); 13565 break; 13566 case BPF_SUB: 13567 scalar32_min_max_sub(dst_reg, &src_reg); 13568 scalar_min_max_sub(dst_reg, &src_reg); 13569 dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); 13570 break; 13571 case BPF_MUL: 13572 dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); 13573 scalar32_min_max_mul(dst_reg, &src_reg); 13574 scalar_min_max_mul(dst_reg, &src_reg); 13575 break; 13576 case BPF_AND: 13577 dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); 13578 scalar32_min_max_and(dst_reg, &src_reg); 13579 scalar_min_max_and(dst_reg, &src_reg); 13580 break; 13581 case BPF_OR: 13582 dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); 13583 scalar32_min_max_or(dst_reg, &src_reg); 13584 scalar_min_max_or(dst_reg, &src_reg); 13585 break; 13586 case BPF_XOR: 13587 dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off); 13588 scalar32_min_max_xor(dst_reg, &src_reg); 13589 scalar_min_max_xor(dst_reg, &src_reg); 13590 break; 13591 case BPF_LSH: 13592 if (umax_val >= insn_bitness) { 13593 /* Shifts greater than 31 or 63 are undefined. 13594 * This includes shifts by a negative number. 13595 */ 13596 mark_reg_unknown(env, regs, insn->dst_reg); 13597 break; 13598 } 13599 if (alu32) 13600 scalar32_min_max_lsh(dst_reg, &src_reg); 13601 else 13602 scalar_min_max_lsh(dst_reg, &src_reg); 13603 break; 13604 case BPF_RSH: 13605 if (umax_val >= insn_bitness) { 13606 /* Shifts greater than 31 or 63 are undefined. 13607 * This includes shifts by a negative number. 13608 */ 13609 mark_reg_unknown(env, regs, insn->dst_reg); 13610 break; 13611 } 13612 if (alu32) 13613 scalar32_min_max_rsh(dst_reg, &src_reg); 13614 else 13615 scalar_min_max_rsh(dst_reg, &src_reg); 13616 break; 13617 case BPF_ARSH: 13618 if (umax_val >= insn_bitness) { 13619 /* Shifts greater than 31 or 63 are undefined. 13620 * This includes shifts by a negative number. 13621 */ 13622 mark_reg_unknown(env, regs, insn->dst_reg); 13623 break; 13624 } 13625 if (alu32) 13626 scalar32_min_max_arsh(dst_reg, &src_reg); 13627 else 13628 scalar_min_max_arsh(dst_reg, &src_reg); 13629 break; 13630 default: 13631 mark_reg_unknown(env, regs, insn->dst_reg); 13632 break; 13633 } 13634 13635 /* ALU32 ops are zero extended into 64bit register */ 13636 if (alu32) 13637 zext_32_to_64(dst_reg); 13638 reg_bounds_sync(dst_reg); 13639 return 0; 13640 } 13641 13642 /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max 13643 * and var_off. 13644 */ 13645 static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, 13646 struct bpf_insn *insn) 13647 { 13648 struct bpf_verifier_state *vstate = env->cur_state; 13649 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 13650 struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; 13651 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 13652 u8 opcode = BPF_OP(insn->code); 13653 int err; 13654 13655 dst_reg = ®s[insn->dst_reg]; 13656 src_reg = NULL; 13657 if (dst_reg->type != SCALAR_VALUE) 13658 ptr_reg = dst_reg; 13659 else 13660 /* Make sure ID is cleared otherwise dst_reg min/max could be 13661 * incorrectly propagated into other registers by find_equal_scalars() 13662 */ 13663 dst_reg->id = 0; 13664 if (BPF_SRC(insn->code) == BPF_X) { 13665 src_reg = ®s[insn->src_reg]; 13666 if (src_reg->type != SCALAR_VALUE) { 13667 if (dst_reg->type != SCALAR_VALUE) { 13668 /* Combining two pointers by any ALU op yields 13669 * an arbitrary scalar. Disallow all math except 13670 * pointer subtraction 13671 */ 13672 if (opcode == BPF_SUB && env->allow_ptr_leaks) { 13673 mark_reg_unknown(env, regs, insn->dst_reg); 13674 return 0; 13675 } 13676 verbose(env, "R%d pointer %s pointer prohibited\n", 13677 insn->dst_reg, 13678 bpf_alu_string[opcode >> 4]); 13679 return -EACCES; 13680 } else { 13681 /* scalar += pointer 13682 * This is legal, but we have to reverse our 13683 * src/dest handling in computing the range 13684 */ 13685 err = mark_chain_precision(env, insn->dst_reg); 13686 if (err) 13687 return err; 13688 return adjust_ptr_min_max_vals(env, insn, 13689 src_reg, dst_reg); 13690 } 13691 } else if (ptr_reg) { 13692 /* pointer += scalar */ 13693 err = mark_chain_precision(env, insn->src_reg); 13694 if (err) 13695 return err; 13696 return adjust_ptr_min_max_vals(env, insn, 13697 dst_reg, src_reg); 13698 } else if (dst_reg->precise) { 13699 /* if dst_reg is precise, src_reg should be precise as well */ 13700 err = mark_chain_precision(env, insn->src_reg); 13701 if (err) 13702 return err; 13703 } 13704 } else { 13705 /* Pretend the src is a reg with a known value, since we only 13706 * need to be able to read from this state. 13707 */ 13708 off_reg.type = SCALAR_VALUE; 13709 __mark_reg_known(&off_reg, insn->imm); 13710 src_reg = &off_reg; 13711 if (ptr_reg) /* pointer += K */ 13712 return adjust_ptr_min_max_vals(env, insn, 13713 ptr_reg, src_reg); 13714 } 13715 13716 /* Got here implies adding two SCALAR_VALUEs */ 13717 if (WARN_ON_ONCE(ptr_reg)) { 13718 print_verifier_state(env, state, true); 13719 verbose(env, "verifier internal error: unexpected ptr_reg\n"); 13720 return -EINVAL; 13721 } 13722 if (WARN_ON(!src_reg)) { 13723 print_verifier_state(env, state, true); 13724 verbose(env, "verifier internal error: no src_reg\n"); 13725 return -EINVAL; 13726 } 13727 return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); 13728 } 13729 13730 /* check validity of 32-bit and 64-bit arithmetic operations */ 13731 static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) 13732 { 13733 struct bpf_reg_state *regs = cur_regs(env); 13734 u8 opcode = BPF_OP(insn->code); 13735 int err; 13736 13737 if (opcode == BPF_END || opcode == BPF_NEG) { 13738 if (opcode == BPF_NEG) { 13739 if (BPF_SRC(insn->code) != BPF_K || 13740 insn->src_reg != BPF_REG_0 || 13741 insn->off != 0 || insn->imm != 0) { 13742 verbose(env, "BPF_NEG uses reserved fields\n"); 13743 return -EINVAL; 13744 } 13745 } else { 13746 if (insn->src_reg != BPF_REG_0 || insn->off != 0 || 13747 (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || 13748 (BPF_CLASS(insn->code) == BPF_ALU64 && 13749 BPF_SRC(insn->code) != BPF_TO_LE)) { 13750 verbose(env, "BPF_END uses reserved fields\n"); 13751 return -EINVAL; 13752 } 13753 } 13754 13755 /* check src operand */ 13756 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13757 if (err) 13758 return err; 13759 13760 if (is_pointer_value(env, insn->dst_reg)) { 13761 verbose(env, "R%d pointer arithmetic prohibited\n", 13762 insn->dst_reg); 13763 return -EACCES; 13764 } 13765 13766 /* check dest operand */ 13767 err = check_reg_arg(env, insn->dst_reg, DST_OP); 13768 if (err) 13769 return err; 13770 13771 } else if (opcode == BPF_MOV) { 13772 13773 if (BPF_SRC(insn->code) == BPF_X) { 13774 if (insn->imm != 0) { 13775 verbose(env, "BPF_MOV uses reserved fields\n"); 13776 return -EINVAL; 13777 } 13778 13779 if (BPF_CLASS(insn->code) == BPF_ALU) { 13780 if (insn->off != 0 && insn->off != 8 && insn->off != 16) { 13781 verbose(env, "BPF_MOV uses reserved fields\n"); 13782 return -EINVAL; 13783 } 13784 } else { 13785 if (insn->off != 0 && insn->off != 8 && insn->off != 16 && 13786 insn->off != 32) { 13787 verbose(env, "BPF_MOV uses reserved fields\n"); 13788 return -EINVAL; 13789 } 13790 } 13791 13792 /* check src operand */ 13793 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13794 if (err) 13795 return err; 13796 } else { 13797 if (insn->src_reg != BPF_REG_0 || insn->off != 0) { 13798 verbose(env, "BPF_MOV uses reserved fields\n"); 13799 return -EINVAL; 13800 } 13801 } 13802 13803 /* check dest operand, mark as required later */ 13804 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13805 if (err) 13806 return err; 13807 13808 if (BPF_SRC(insn->code) == BPF_X) { 13809 struct bpf_reg_state *src_reg = regs + insn->src_reg; 13810 struct bpf_reg_state *dst_reg = regs + insn->dst_reg; 13811 bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id && 13812 !tnum_is_const(src_reg->var_off); 13813 13814 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13815 if (insn->off == 0) { 13816 /* case: R1 = R2 13817 * copy register state to dest reg 13818 */ 13819 if (need_id) 13820 /* Assign src and dst registers the same ID 13821 * that will be used by find_equal_scalars() 13822 * to propagate min/max range. 13823 */ 13824 src_reg->id = ++env->id_gen; 13825 copy_register_state(dst_reg, src_reg); 13826 dst_reg->live |= REG_LIVE_WRITTEN; 13827 dst_reg->subreg_def = DEF_NOT_SUBREG; 13828 } else { 13829 /* case: R1 = (s8, s16 s32)R2 */ 13830 if (is_pointer_value(env, insn->src_reg)) { 13831 verbose(env, 13832 "R%d sign-extension part of pointer\n", 13833 insn->src_reg); 13834 return -EACCES; 13835 } else if (src_reg->type == SCALAR_VALUE) { 13836 bool no_sext; 13837 13838 no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13839 if (no_sext && need_id) 13840 src_reg->id = ++env->id_gen; 13841 copy_register_state(dst_reg, src_reg); 13842 if (!no_sext) 13843 dst_reg->id = 0; 13844 coerce_reg_to_size_sx(dst_reg, insn->off >> 3); 13845 dst_reg->live |= REG_LIVE_WRITTEN; 13846 dst_reg->subreg_def = DEF_NOT_SUBREG; 13847 } else { 13848 mark_reg_unknown(env, regs, insn->dst_reg); 13849 } 13850 } 13851 } else { 13852 /* R1 = (u32) R2 */ 13853 if (is_pointer_value(env, insn->src_reg)) { 13854 verbose(env, 13855 "R%d partial copy of pointer\n", 13856 insn->src_reg); 13857 return -EACCES; 13858 } else if (src_reg->type == SCALAR_VALUE) { 13859 if (insn->off == 0) { 13860 bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX; 13861 13862 if (is_src_reg_u32 && need_id) 13863 src_reg->id = ++env->id_gen; 13864 copy_register_state(dst_reg, src_reg); 13865 /* Make sure ID is cleared if src_reg is not in u32 13866 * range otherwise dst_reg min/max could be incorrectly 13867 * propagated into src_reg by find_equal_scalars() 13868 */ 13869 if (!is_src_reg_u32) 13870 dst_reg->id = 0; 13871 dst_reg->live |= REG_LIVE_WRITTEN; 13872 dst_reg->subreg_def = env->insn_idx + 1; 13873 } else { 13874 /* case: W1 = (s8, s16)W2 */ 13875 bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1)); 13876 13877 if (no_sext && need_id) 13878 src_reg->id = ++env->id_gen; 13879 copy_register_state(dst_reg, src_reg); 13880 if (!no_sext) 13881 dst_reg->id = 0; 13882 dst_reg->live |= REG_LIVE_WRITTEN; 13883 dst_reg->subreg_def = env->insn_idx + 1; 13884 coerce_subreg_to_size_sx(dst_reg, insn->off >> 3); 13885 } 13886 } else { 13887 mark_reg_unknown(env, regs, 13888 insn->dst_reg); 13889 } 13890 zext_32_to_64(dst_reg); 13891 reg_bounds_sync(dst_reg); 13892 } 13893 } else { 13894 /* case: R = imm 13895 * remember the value we stored into this reg 13896 */ 13897 /* clear any state __mark_reg_known doesn't set */ 13898 mark_reg_unknown(env, regs, insn->dst_reg); 13899 regs[insn->dst_reg].type = SCALAR_VALUE; 13900 if (BPF_CLASS(insn->code) == BPF_ALU64) { 13901 __mark_reg_known(regs + insn->dst_reg, 13902 insn->imm); 13903 } else { 13904 __mark_reg_known(regs + insn->dst_reg, 13905 (u32)insn->imm); 13906 } 13907 } 13908 13909 } else if (opcode > BPF_END) { 13910 verbose(env, "invalid BPF_ALU opcode %x\n", opcode); 13911 return -EINVAL; 13912 13913 } else { /* all other ALU ops: and, sub, xor, add, ... */ 13914 13915 if (BPF_SRC(insn->code) == BPF_X) { 13916 if (insn->imm != 0 || insn->off > 1 || 13917 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13918 verbose(env, "BPF_ALU uses reserved fields\n"); 13919 return -EINVAL; 13920 } 13921 /* check src1 operand */ 13922 err = check_reg_arg(env, insn->src_reg, SRC_OP); 13923 if (err) 13924 return err; 13925 } else { 13926 if (insn->src_reg != BPF_REG_0 || insn->off > 1 || 13927 (insn->off == 1 && opcode != BPF_MOD && opcode != BPF_DIV)) { 13928 verbose(env, "BPF_ALU uses reserved fields\n"); 13929 return -EINVAL; 13930 } 13931 } 13932 13933 /* check src2 operand */ 13934 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 13935 if (err) 13936 return err; 13937 13938 if ((opcode == BPF_MOD || opcode == BPF_DIV) && 13939 BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { 13940 verbose(env, "div by zero\n"); 13941 return -EINVAL; 13942 } 13943 13944 if ((opcode == BPF_LSH || opcode == BPF_RSH || 13945 opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { 13946 int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; 13947 13948 if (insn->imm < 0 || insn->imm >= size) { 13949 verbose(env, "invalid shift %d\n", insn->imm); 13950 return -EINVAL; 13951 } 13952 } 13953 13954 /* check dest operand */ 13955 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 13956 if (err) 13957 return err; 13958 13959 return adjust_reg_min_max_vals(env, insn); 13960 } 13961 13962 return 0; 13963 } 13964 13965 static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, 13966 struct bpf_reg_state *dst_reg, 13967 enum bpf_reg_type type, 13968 bool range_right_open) 13969 { 13970 struct bpf_func_state *state; 13971 struct bpf_reg_state *reg; 13972 int new_range; 13973 13974 if (dst_reg->off < 0 || 13975 (dst_reg->off == 0 && range_right_open)) 13976 /* This doesn't give us any range */ 13977 return; 13978 13979 if (dst_reg->umax_value > MAX_PACKET_OFF || 13980 dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) 13981 /* Risk of overflow. For instance, ptr + (1<<63) may be less 13982 * than pkt_end, but that's because it's also less than pkt. 13983 */ 13984 return; 13985 13986 new_range = dst_reg->off; 13987 if (range_right_open) 13988 new_range++; 13989 13990 /* Examples for register markings: 13991 * 13992 * pkt_data in dst register: 13993 * 13994 * r2 = r3; 13995 * r2 += 8; 13996 * if (r2 > pkt_end) goto <handle exception> 13997 * <access okay> 13998 * 13999 * r2 = r3; 14000 * r2 += 8; 14001 * if (r2 < pkt_end) goto <access okay> 14002 * <handle exception> 14003 * 14004 * Where: 14005 * r2 == dst_reg, pkt_end == src_reg 14006 * r2=pkt(id=n,off=8,r=0) 14007 * r3=pkt(id=n,off=0,r=0) 14008 * 14009 * pkt_data in src register: 14010 * 14011 * r2 = r3; 14012 * r2 += 8; 14013 * if (pkt_end >= r2) goto <access okay> 14014 * <handle exception> 14015 * 14016 * r2 = r3; 14017 * r2 += 8; 14018 * if (pkt_end <= r2) goto <handle exception> 14019 * <access okay> 14020 * 14021 * Where: 14022 * pkt_end == dst_reg, r2 == src_reg 14023 * r2=pkt(id=n,off=8,r=0) 14024 * r3=pkt(id=n,off=0,r=0) 14025 * 14026 * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) 14027 * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) 14028 * and [r3, r3 + 8-1) respectively is safe to access depending on 14029 * the check. 14030 */ 14031 14032 /* If our ids match, then we must have the same max_value. And we 14033 * don't care about the other reg's fixed offset, since if it's too big 14034 * the range won't allow anything. 14035 * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. 14036 */ 14037 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14038 if (reg->type == type && reg->id == dst_reg->id) 14039 /* keep the maximum range already checked */ 14040 reg->range = max(reg->range, new_range); 14041 })); 14042 } 14043 14044 static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) 14045 { 14046 struct tnum subreg = tnum_subreg(reg->var_off); 14047 s32 sval = (s32)val; 14048 14049 switch (opcode) { 14050 case BPF_JEQ: 14051 if (tnum_is_const(subreg)) 14052 return !!tnum_equals_const(subreg, val); 14053 else if (val < reg->u32_min_value || val > reg->u32_max_value) 14054 return 0; 14055 else if (sval < reg->s32_min_value || sval > reg->s32_max_value) 14056 return 0; 14057 break; 14058 case BPF_JNE: 14059 if (tnum_is_const(subreg)) 14060 return !tnum_equals_const(subreg, val); 14061 else if (val < reg->u32_min_value || val > reg->u32_max_value) 14062 return 1; 14063 else if (sval < reg->s32_min_value || sval > reg->s32_max_value) 14064 return 1; 14065 break; 14066 case BPF_JSET: 14067 if ((~subreg.mask & subreg.value) & val) 14068 return 1; 14069 if (!((subreg.mask | subreg.value) & val)) 14070 return 0; 14071 break; 14072 case BPF_JGT: 14073 if (reg->u32_min_value > val) 14074 return 1; 14075 else if (reg->u32_max_value <= val) 14076 return 0; 14077 break; 14078 case BPF_JSGT: 14079 if (reg->s32_min_value > sval) 14080 return 1; 14081 else if (reg->s32_max_value <= sval) 14082 return 0; 14083 break; 14084 case BPF_JLT: 14085 if (reg->u32_max_value < val) 14086 return 1; 14087 else if (reg->u32_min_value >= val) 14088 return 0; 14089 break; 14090 case BPF_JSLT: 14091 if (reg->s32_max_value < sval) 14092 return 1; 14093 else if (reg->s32_min_value >= sval) 14094 return 0; 14095 break; 14096 case BPF_JGE: 14097 if (reg->u32_min_value >= val) 14098 return 1; 14099 else if (reg->u32_max_value < val) 14100 return 0; 14101 break; 14102 case BPF_JSGE: 14103 if (reg->s32_min_value >= sval) 14104 return 1; 14105 else if (reg->s32_max_value < sval) 14106 return 0; 14107 break; 14108 case BPF_JLE: 14109 if (reg->u32_max_value <= val) 14110 return 1; 14111 else if (reg->u32_min_value > val) 14112 return 0; 14113 break; 14114 case BPF_JSLE: 14115 if (reg->s32_max_value <= sval) 14116 return 1; 14117 else if (reg->s32_min_value > sval) 14118 return 0; 14119 break; 14120 } 14121 14122 return -1; 14123 } 14124 14125 14126 static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) 14127 { 14128 s64 sval = (s64)val; 14129 14130 switch (opcode) { 14131 case BPF_JEQ: 14132 if (tnum_is_const(reg->var_off)) 14133 return !!tnum_equals_const(reg->var_off, val); 14134 else if (val < reg->umin_value || val > reg->umax_value) 14135 return 0; 14136 else if (sval < reg->smin_value || sval > reg->smax_value) 14137 return 0; 14138 break; 14139 case BPF_JNE: 14140 if (tnum_is_const(reg->var_off)) 14141 return !tnum_equals_const(reg->var_off, val); 14142 else if (val < reg->umin_value || val > reg->umax_value) 14143 return 1; 14144 else if (sval < reg->smin_value || sval > reg->smax_value) 14145 return 1; 14146 break; 14147 case BPF_JSET: 14148 if ((~reg->var_off.mask & reg->var_off.value) & val) 14149 return 1; 14150 if (!((reg->var_off.mask | reg->var_off.value) & val)) 14151 return 0; 14152 break; 14153 case BPF_JGT: 14154 if (reg->umin_value > val) 14155 return 1; 14156 else if (reg->umax_value <= val) 14157 return 0; 14158 break; 14159 case BPF_JSGT: 14160 if (reg->smin_value > sval) 14161 return 1; 14162 else if (reg->smax_value <= sval) 14163 return 0; 14164 break; 14165 case BPF_JLT: 14166 if (reg->umax_value < val) 14167 return 1; 14168 else if (reg->umin_value >= val) 14169 return 0; 14170 break; 14171 case BPF_JSLT: 14172 if (reg->smax_value < sval) 14173 return 1; 14174 else if (reg->smin_value >= sval) 14175 return 0; 14176 break; 14177 case BPF_JGE: 14178 if (reg->umin_value >= val) 14179 return 1; 14180 else if (reg->umax_value < val) 14181 return 0; 14182 break; 14183 case BPF_JSGE: 14184 if (reg->smin_value >= sval) 14185 return 1; 14186 else if (reg->smax_value < sval) 14187 return 0; 14188 break; 14189 case BPF_JLE: 14190 if (reg->umax_value <= val) 14191 return 1; 14192 else if (reg->umin_value > val) 14193 return 0; 14194 break; 14195 case BPF_JSLE: 14196 if (reg->smax_value <= sval) 14197 return 1; 14198 else if (reg->smin_value > sval) 14199 return 0; 14200 break; 14201 } 14202 14203 return -1; 14204 } 14205 14206 /* compute branch direction of the expression "if (reg opcode val) goto target;" 14207 * and return: 14208 * 1 - branch will be taken and "goto target" will be executed 14209 * 0 - branch will not be taken and fall-through to next insn 14210 * -1 - unknown. Example: "if (reg < 5)" is unknown when register value 14211 * range [0,10] 14212 */ 14213 static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, 14214 bool is_jmp32) 14215 { 14216 if (__is_pointer_value(false, reg)) { 14217 if (!reg_not_null(reg)) 14218 return -1; 14219 14220 /* If pointer is valid tests against zero will fail so we can 14221 * use this to direct branch taken. 14222 */ 14223 if (val != 0) 14224 return -1; 14225 14226 switch (opcode) { 14227 case BPF_JEQ: 14228 return 0; 14229 case BPF_JNE: 14230 return 1; 14231 default: 14232 return -1; 14233 } 14234 } 14235 14236 if (is_jmp32) 14237 return is_branch32_taken(reg, val, opcode); 14238 return is_branch64_taken(reg, val, opcode); 14239 } 14240 14241 static int flip_opcode(u32 opcode) 14242 { 14243 /* How can we transform "a <op> b" into "b <op> a"? */ 14244 static const u8 opcode_flip[16] = { 14245 /* these stay the same */ 14246 [BPF_JEQ >> 4] = BPF_JEQ, 14247 [BPF_JNE >> 4] = BPF_JNE, 14248 [BPF_JSET >> 4] = BPF_JSET, 14249 /* these swap "lesser" and "greater" (L and G in the opcodes) */ 14250 [BPF_JGE >> 4] = BPF_JLE, 14251 [BPF_JGT >> 4] = BPF_JLT, 14252 [BPF_JLE >> 4] = BPF_JGE, 14253 [BPF_JLT >> 4] = BPF_JGT, 14254 [BPF_JSGE >> 4] = BPF_JSLE, 14255 [BPF_JSGT >> 4] = BPF_JSLT, 14256 [BPF_JSLE >> 4] = BPF_JSGE, 14257 [BPF_JSLT >> 4] = BPF_JSGT 14258 }; 14259 return opcode_flip[opcode >> 4]; 14260 } 14261 14262 static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, 14263 struct bpf_reg_state *src_reg, 14264 u8 opcode) 14265 { 14266 struct bpf_reg_state *pkt; 14267 14268 if (src_reg->type == PTR_TO_PACKET_END) { 14269 pkt = dst_reg; 14270 } else if (dst_reg->type == PTR_TO_PACKET_END) { 14271 pkt = src_reg; 14272 opcode = flip_opcode(opcode); 14273 } else { 14274 return -1; 14275 } 14276 14277 if (pkt->range >= 0) 14278 return -1; 14279 14280 switch (opcode) { 14281 case BPF_JLE: 14282 /* pkt <= pkt_end */ 14283 fallthrough; 14284 case BPF_JGT: 14285 /* pkt > pkt_end */ 14286 if (pkt->range == BEYOND_PKT_END) 14287 /* pkt has at last one extra byte beyond pkt_end */ 14288 return opcode == BPF_JGT; 14289 break; 14290 case BPF_JLT: 14291 /* pkt < pkt_end */ 14292 fallthrough; 14293 case BPF_JGE: 14294 /* pkt >= pkt_end */ 14295 if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END) 14296 return opcode == BPF_JGE; 14297 break; 14298 } 14299 return -1; 14300 } 14301 14302 /* Adjusts the register min/max values in the case that the dst_reg is the 14303 * variable register that we are working on, and src_reg is a constant or we're 14304 * simply doing a BPF_K check. 14305 * In JEQ/JNE cases we also adjust the var_off values. 14306 */ 14307 static void reg_set_min_max(struct bpf_reg_state *true_reg, 14308 struct bpf_reg_state *false_reg, 14309 u64 val, u32 val32, 14310 u8 opcode, bool is_jmp32) 14311 { 14312 struct tnum false_32off = tnum_subreg(false_reg->var_off); 14313 struct tnum false_64off = false_reg->var_off; 14314 struct tnum true_32off = tnum_subreg(true_reg->var_off); 14315 struct tnum true_64off = true_reg->var_off; 14316 s64 sval = (s64)val; 14317 s32 sval32 = (s32)val32; 14318 14319 /* If the dst_reg is a pointer, we can't learn anything about its 14320 * variable offset from the compare (unless src_reg were a pointer into 14321 * the same object, but we don't bother with that. 14322 * Since false_reg and true_reg have the same type by construction, we 14323 * only need to check one of them for pointerness. 14324 */ 14325 if (__is_pointer_value(false, false_reg)) 14326 return; 14327 14328 switch (opcode) { 14329 /* JEQ/JNE comparison doesn't change the register equivalence. 14330 * 14331 * r1 = r2; 14332 * if (r1 == 42) goto label; 14333 * ... 14334 * label: // here both r1 and r2 are known to be 42. 14335 * 14336 * Hence when marking register as known preserve it's ID. 14337 */ 14338 case BPF_JEQ: 14339 if (is_jmp32) { 14340 __mark_reg32_known(true_reg, val32); 14341 true_32off = tnum_subreg(true_reg->var_off); 14342 } else { 14343 ___mark_reg_known(true_reg, val); 14344 true_64off = true_reg->var_off; 14345 } 14346 break; 14347 case BPF_JNE: 14348 if (is_jmp32) { 14349 __mark_reg32_known(false_reg, val32); 14350 false_32off = tnum_subreg(false_reg->var_off); 14351 } else { 14352 ___mark_reg_known(false_reg, val); 14353 false_64off = false_reg->var_off; 14354 } 14355 break; 14356 case BPF_JSET: 14357 if (is_jmp32) { 14358 false_32off = tnum_and(false_32off, tnum_const(~val32)); 14359 if (is_power_of_2(val32)) 14360 true_32off = tnum_or(true_32off, 14361 tnum_const(val32)); 14362 } else { 14363 false_64off = tnum_and(false_64off, tnum_const(~val)); 14364 if (is_power_of_2(val)) 14365 true_64off = tnum_or(true_64off, 14366 tnum_const(val)); 14367 } 14368 break; 14369 case BPF_JGE: 14370 case BPF_JGT: 14371 { 14372 if (is_jmp32) { 14373 u32 false_umax = opcode == BPF_JGT ? val32 : val32 - 1; 14374 u32 true_umin = opcode == BPF_JGT ? val32 + 1 : val32; 14375 14376 false_reg->u32_max_value = min(false_reg->u32_max_value, 14377 false_umax); 14378 true_reg->u32_min_value = max(true_reg->u32_min_value, 14379 true_umin); 14380 } else { 14381 u64 false_umax = opcode == BPF_JGT ? val : val - 1; 14382 u64 true_umin = opcode == BPF_JGT ? val + 1 : val; 14383 14384 false_reg->umax_value = min(false_reg->umax_value, false_umax); 14385 true_reg->umin_value = max(true_reg->umin_value, true_umin); 14386 } 14387 break; 14388 } 14389 case BPF_JSGE: 14390 case BPF_JSGT: 14391 { 14392 if (is_jmp32) { 14393 s32 false_smax = opcode == BPF_JSGT ? sval32 : sval32 - 1; 14394 s32 true_smin = opcode == BPF_JSGT ? sval32 + 1 : sval32; 14395 14396 false_reg->s32_max_value = min(false_reg->s32_max_value, false_smax); 14397 true_reg->s32_min_value = max(true_reg->s32_min_value, true_smin); 14398 } else { 14399 s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; 14400 s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; 14401 14402 false_reg->smax_value = min(false_reg->smax_value, false_smax); 14403 true_reg->smin_value = max(true_reg->smin_value, true_smin); 14404 } 14405 break; 14406 } 14407 case BPF_JLE: 14408 case BPF_JLT: 14409 { 14410 if (is_jmp32) { 14411 u32 false_umin = opcode == BPF_JLT ? val32 : val32 + 1; 14412 u32 true_umax = opcode == BPF_JLT ? val32 - 1 : val32; 14413 14414 false_reg->u32_min_value = max(false_reg->u32_min_value, 14415 false_umin); 14416 true_reg->u32_max_value = min(true_reg->u32_max_value, 14417 true_umax); 14418 } else { 14419 u64 false_umin = opcode == BPF_JLT ? val : val + 1; 14420 u64 true_umax = opcode == BPF_JLT ? val - 1 : val; 14421 14422 false_reg->umin_value = max(false_reg->umin_value, false_umin); 14423 true_reg->umax_value = min(true_reg->umax_value, true_umax); 14424 } 14425 break; 14426 } 14427 case BPF_JSLE: 14428 case BPF_JSLT: 14429 { 14430 if (is_jmp32) { 14431 s32 false_smin = opcode == BPF_JSLT ? sval32 : sval32 + 1; 14432 s32 true_smax = opcode == BPF_JSLT ? sval32 - 1 : sval32; 14433 14434 false_reg->s32_min_value = max(false_reg->s32_min_value, false_smin); 14435 true_reg->s32_max_value = min(true_reg->s32_max_value, true_smax); 14436 } else { 14437 s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; 14438 s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; 14439 14440 false_reg->smin_value = max(false_reg->smin_value, false_smin); 14441 true_reg->smax_value = min(true_reg->smax_value, true_smax); 14442 } 14443 break; 14444 } 14445 default: 14446 return; 14447 } 14448 14449 if (is_jmp32) { 14450 false_reg->var_off = tnum_or(tnum_clear_subreg(false_64off), 14451 tnum_subreg(false_32off)); 14452 true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), 14453 tnum_subreg(true_32off)); 14454 __reg_combine_32_into_64(false_reg); 14455 __reg_combine_32_into_64(true_reg); 14456 } else { 14457 false_reg->var_off = false_64off; 14458 true_reg->var_off = true_64off; 14459 __reg_combine_64_into_32(false_reg); 14460 __reg_combine_64_into_32(true_reg); 14461 } 14462 } 14463 14464 /* Same as above, but for the case that dst_reg holds a constant and src_reg is 14465 * the variable reg. 14466 */ 14467 static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, 14468 struct bpf_reg_state *false_reg, 14469 u64 val, u32 val32, 14470 u8 opcode, bool is_jmp32) 14471 { 14472 opcode = flip_opcode(opcode); 14473 /* This uses zero as "not present in table"; luckily the zero opcode, 14474 * BPF_JA, can't get here. 14475 */ 14476 if (opcode) 14477 reg_set_min_max(true_reg, false_reg, val, val32, opcode, is_jmp32); 14478 } 14479 14480 /* Regs are known to be equal, so intersect their min/max/var_off */ 14481 static void __reg_combine_min_max(struct bpf_reg_state *src_reg, 14482 struct bpf_reg_state *dst_reg) 14483 { 14484 src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, 14485 dst_reg->umin_value); 14486 src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, 14487 dst_reg->umax_value); 14488 src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, 14489 dst_reg->smin_value); 14490 src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, 14491 dst_reg->smax_value); 14492 src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, 14493 dst_reg->var_off); 14494 reg_bounds_sync(src_reg); 14495 reg_bounds_sync(dst_reg); 14496 } 14497 14498 static void reg_combine_min_max(struct bpf_reg_state *true_src, 14499 struct bpf_reg_state *true_dst, 14500 struct bpf_reg_state *false_src, 14501 struct bpf_reg_state *false_dst, 14502 u8 opcode) 14503 { 14504 switch (opcode) { 14505 case BPF_JEQ: 14506 __reg_combine_min_max(true_src, true_dst); 14507 break; 14508 case BPF_JNE: 14509 __reg_combine_min_max(false_src, false_dst); 14510 break; 14511 } 14512 } 14513 14514 static void mark_ptr_or_null_reg(struct bpf_func_state *state, 14515 struct bpf_reg_state *reg, u32 id, 14516 bool is_null) 14517 { 14518 if (type_may_be_null(reg->type) && reg->id == id && 14519 (is_rcu_reg(reg) || !WARN_ON_ONCE(!reg->id))) { 14520 /* Old offset (both fixed and variable parts) should have been 14521 * known-zero, because we don't allow pointer arithmetic on 14522 * pointers that might be NULL. If we see this happening, don't 14523 * convert the register. 14524 * 14525 * But in some cases, some helpers that return local kptrs 14526 * advance offset for the returned pointer. In those cases, it 14527 * is fine to expect to see reg->off. 14528 */ 14529 if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0))) 14530 return; 14531 if (!(type_is_ptr_alloc_obj(reg->type) || type_is_non_owning_ref(reg->type)) && 14532 WARN_ON_ONCE(reg->off)) 14533 return; 14534 14535 if (is_null) { 14536 reg->type = SCALAR_VALUE; 14537 /* We don't need id and ref_obj_id from this point 14538 * onwards anymore, thus we should better reset it, 14539 * so that state pruning has chances to take effect. 14540 */ 14541 reg->id = 0; 14542 reg->ref_obj_id = 0; 14543 14544 return; 14545 } 14546 14547 mark_ptr_not_null_reg(reg); 14548 14549 if (!reg_may_point_to_spin_lock(reg)) { 14550 /* For not-NULL ptr, reg->ref_obj_id will be reset 14551 * in release_reference(). 14552 * 14553 * reg->id is still used by spin_lock ptr. Other 14554 * than spin_lock ptr type, reg->id can be reset. 14555 */ 14556 reg->id = 0; 14557 } 14558 } 14559 } 14560 14561 /* The logic is similar to find_good_pkt_pointers(), both could eventually 14562 * be folded together at some point. 14563 */ 14564 static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, 14565 bool is_null) 14566 { 14567 struct bpf_func_state *state = vstate->frame[vstate->curframe]; 14568 struct bpf_reg_state *regs = state->regs, *reg; 14569 u32 ref_obj_id = regs[regno].ref_obj_id; 14570 u32 id = regs[regno].id; 14571 14572 if (ref_obj_id && ref_obj_id == id && is_null) 14573 /* regs[regno] is in the " == NULL" branch. 14574 * No one could have freed the reference state before 14575 * doing the NULL check. 14576 */ 14577 WARN_ON_ONCE(release_reference_state(state, id)); 14578 14579 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14580 mark_ptr_or_null_reg(state, reg, id, is_null); 14581 })); 14582 } 14583 14584 static bool try_match_pkt_pointers(const struct bpf_insn *insn, 14585 struct bpf_reg_state *dst_reg, 14586 struct bpf_reg_state *src_reg, 14587 struct bpf_verifier_state *this_branch, 14588 struct bpf_verifier_state *other_branch) 14589 { 14590 if (BPF_SRC(insn->code) != BPF_X) 14591 return false; 14592 14593 /* Pointers are always 64-bit. */ 14594 if (BPF_CLASS(insn->code) == BPF_JMP32) 14595 return false; 14596 14597 switch (BPF_OP(insn->code)) { 14598 case BPF_JGT: 14599 if ((dst_reg->type == PTR_TO_PACKET && 14600 src_reg->type == PTR_TO_PACKET_END) || 14601 (dst_reg->type == PTR_TO_PACKET_META && 14602 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14603 /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ 14604 find_good_pkt_pointers(this_branch, dst_reg, 14605 dst_reg->type, false); 14606 mark_pkt_end(other_branch, insn->dst_reg, true); 14607 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14608 src_reg->type == PTR_TO_PACKET) || 14609 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14610 src_reg->type == PTR_TO_PACKET_META)) { 14611 /* pkt_end > pkt_data', pkt_data > pkt_meta' */ 14612 find_good_pkt_pointers(other_branch, src_reg, 14613 src_reg->type, true); 14614 mark_pkt_end(this_branch, insn->src_reg, false); 14615 } else { 14616 return false; 14617 } 14618 break; 14619 case BPF_JLT: 14620 if ((dst_reg->type == PTR_TO_PACKET && 14621 src_reg->type == PTR_TO_PACKET_END) || 14622 (dst_reg->type == PTR_TO_PACKET_META && 14623 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14624 /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ 14625 find_good_pkt_pointers(other_branch, dst_reg, 14626 dst_reg->type, true); 14627 mark_pkt_end(this_branch, insn->dst_reg, false); 14628 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14629 src_reg->type == PTR_TO_PACKET) || 14630 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14631 src_reg->type == PTR_TO_PACKET_META)) { 14632 /* pkt_end < pkt_data', pkt_data > pkt_meta' */ 14633 find_good_pkt_pointers(this_branch, src_reg, 14634 src_reg->type, false); 14635 mark_pkt_end(other_branch, insn->src_reg, true); 14636 } else { 14637 return false; 14638 } 14639 break; 14640 case BPF_JGE: 14641 if ((dst_reg->type == PTR_TO_PACKET && 14642 src_reg->type == PTR_TO_PACKET_END) || 14643 (dst_reg->type == PTR_TO_PACKET_META && 14644 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14645 /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ 14646 find_good_pkt_pointers(this_branch, dst_reg, 14647 dst_reg->type, true); 14648 mark_pkt_end(other_branch, insn->dst_reg, false); 14649 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14650 src_reg->type == PTR_TO_PACKET) || 14651 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14652 src_reg->type == PTR_TO_PACKET_META)) { 14653 /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ 14654 find_good_pkt_pointers(other_branch, src_reg, 14655 src_reg->type, false); 14656 mark_pkt_end(this_branch, insn->src_reg, true); 14657 } else { 14658 return false; 14659 } 14660 break; 14661 case BPF_JLE: 14662 if ((dst_reg->type == PTR_TO_PACKET && 14663 src_reg->type == PTR_TO_PACKET_END) || 14664 (dst_reg->type == PTR_TO_PACKET_META && 14665 reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { 14666 /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ 14667 find_good_pkt_pointers(other_branch, dst_reg, 14668 dst_reg->type, false); 14669 mark_pkt_end(this_branch, insn->dst_reg, true); 14670 } else if ((dst_reg->type == PTR_TO_PACKET_END && 14671 src_reg->type == PTR_TO_PACKET) || 14672 (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && 14673 src_reg->type == PTR_TO_PACKET_META)) { 14674 /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ 14675 find_good_pkt_pointers(this_branch, src_reg, 14676 src_reg->type, true); 14677 mark_pkt_end(other_branch, insn->src_reg, false); 14678 } else { 14679 return false; 14680 } 14681 break; 14682 default: 14683 return false; 14684 } 14685 14686 return true; 14687 } 14688 14689 static void find_equal_scalars(struct bpf_verifier_state *vstate, 14690 struct bpf_reg_state *known_reg) 14691 { 14692 struct bpf_func_state *state; 14693 struct bpf_reg_state *reg; 14694 14695 bpf_for_each_reg_in_vstate(vstate, state, reg, ({ 14696 if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) 14697 copy_register_state(reg, known_reg); 14698 })); 14699 } 14700 14701 static int check_cond_jmp_op(struct bpf_verifier_env *env, 14702 struct bpf_insn *insn, int *insn_idx) 14703 { 14704 struct bpf_verifier_state *this_branch = env->cur_state; 14705 struct bpf_verifier_state *other_branch; 14706 struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; 14707 struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; 14708 struct bpf_reg_state *eq_branch_regs; 14709 u8 opcode = BPF_OP(insn->code); 14710 bool is_jmp32; 14711 int pred = -1; 14712 int err; 14713 14714 /* Only conditional jumps are expected to reach here. */ 14715 if (opcode == BPF_JA || opcode > BPF_JSLE) { 14716 verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); 14717 return -EINVAL; 14718 } 14719 14720 /* check src2 operand */ 14721 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 14722 if (err) 14723 return err; 14724 14725 dst_reg = ®s[insn->dst_reg]; 14726 if (BPF_SRC(insn->code) == BPF_X) { 14727 if (insn->imm != 0) { 14728 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14729 return -EINVAL; 14730 } 14731 14732 /* check src1 operand */ 14733 err = check_reg_arg(env, insn->src_reg, SRC_OP); 14734 if (err) 14735 return err; 14736 14737 src_reg = ®s[insn->src_reg]; 14738 if (!(reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg)) && 14739 is_pointer_value(env, insn->src_reg)) { 14740 verbose(env, "R%d pointer comparison prohibited\n", 14741 insn->src_reg); 14742 return -EACCES; 14743 } 14744 } else { 14745 if (insn->src_reg != BPF_REG_0) { 14746 verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); 14747 return -EINVAL; 14748 } 14749 } 14750 14751 is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; 14752 14753 if (BPF_SRC(insn->code) == BPF_K) { 14754 pred = is_branch_taken(dst_reg, insn->imm, opcode, is_jmp32); 14755 } else if (src_reg->type == SCALAR_VALUE && 14756 is_jmp32 && tnum_is_const(tnum_subreg(src_reg->var_off))) { 14757 pred = is_branch_taken(dst_reg, 14758 tnum_subreg(src_reg->var_off).value, 14759 opcode, 14760 is_jmp32); 14761 } else if (src_reg->type == SCALAR_VALUE && 14762 !is_jmp32 && tnum_is_const(src_reg->var_off)) { 14763 pred = is_branch_taken(dst_reg, 14764 src_reg->var_off.value, 14765 opcode, 14766 is_jmp32); 14767 } else if (dst_reg->type == SCALAR_VALUE && 14768 is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) { 14769 pred = is_branch_taken(src_reg, 14770 tnum_subreg(dst_reg->var_off).value, 14771 flip_opcode(opcode), 14772 is_jmp32); 14773 } else if (dst_reg->type == SCALAR_VALUE && 14774 !is_jmp32 && tnum_is_const(dst_reg->var_off)) { 14775 pred = is_branch_taken(src_reg, 14776 dst_reg->var_off.value, 14777 flip_opcode(opcode), 14778 is_jmp32); 14779 } else if (reg_is_pkt_pointer_any(dst_reg) && 14780 reg_is_pkt_pointer_any(src_reg) && 14781 !is_jmp32) { 14782 pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode); 14783 } 14784 14785 if (pred >= 0) { 14786 /* If we get here with a dst_reg pointer type it is because 14787 * above is_branch_taken() special cased the 0 comparison. 14788 */ 14789 if (!__is_pointer_value(false, dst_reg)) 14790 err = mark_chain_precision(env, insn->dst_reg); 14791 if (BPF_SRC(insn->code) == BPF_X && !err && 14792 !__is_pointer_value(false, src_reg)) 14793 err = mark_chain_precision(env, insn->src_reg); 14794 if (err) 14795 return err; 14796 } 14797 14798 if (pred == 1) { 14799 /* Only follow the goto, ignore fall-through. If needed, push 14800 * the fall-through branch for simulation under speculative 14801 * execution. 14802 */ 14803 if (!env->bypass_spec_v1 && 14804 !sanitize_speculative_path(env, insn, *insn_idx + 1, 14805 *insn_idx)) 14806 return -EFAULT; 14807 if (env->log.level & BPF_LOG_LEVEL) 14808 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14809 *insn_idx += insn->off; 14810 return 0; 14811 } else if (pred == 0) { 14812 /* Only follow the fall-through branch, since that's where the 14813 * program will go. If needed, push the goto branch for 14814 * simulation under speculative execution. 14815 */ 14816 if (!env->bypass_spec_v1 && 14817 !sanitize_speculative_path(env, insn, 14818 *insn_idx + insn->off + 1, 14819 *insn_idx)) 14820 return -EFAULT; 14821 if (env->log.level & BPF_LOG_LEVEL) 14822 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14823 return 0; 14824 } 14825 14826 other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, 14827 false); 14828 if (!other_branch) 14829 return -EFAULT; 14830 other_branch_regs = other_branch->frame[other_branch->curframe]->regs; 14831 14832 /* detect if we are comparing against a constant value so we can adjust 14833 * our min/max values for our dst register. 14834 * this is only legit if both are scalars (or pointers to the same 14835 * object, I suppose, see the PTR_MAYBE_NULL related if block below), 14836 * because otherwise the different base pointers mean the offsets aren't 14837 * comparable. 14838 */ 14839 if (BPF_SRC(insn->code) == BPF_X) { 14840 struct bpf_reg_state *src_reg = ®s[insn->src_reg]; 14841 14842 if (dst_reg->type == SCALAR_VALUE && 14843 src_reg->type == SCALAR_VALUE) { 14844 if (tnum_is_const(src_reg->var_off) || 14845 (is_jmp32 && 14846 tnum_is_const(tnum_subreg(src_reg->var_off)))) 14847 reg_set_min_max(&other_branch_regs[insn->dst_reg], 14848 dst_reg, 14849 src_reg->var_off.value, 14850 tnum_subreg(src_reg->var_off).value, 14851 opcode, is_jmp32); 14852 else if (tnum_is_const(dst_reg->var_off) || 14853 (is_jmp32 && 14854 tnum_is_const(tnum_subreg(dst_reg->var_off)))) 14855 reg_set_min_max_inv(&other_branch_regs[insn->src_reg], 14856 src_reg, 14857 dst_reg->var_off.value, 14858 tnum_subreg(dst_reg->var_off).value, 14859 opcode, is_jmp32); 14860 else if (!is_jmp32 && 14861 (opcode == BPF_JEQ || opcode == BPF_JNE)) 14862 /* Comparing for equality, we can combine knowledge */ 14863 reg_combine_min_max(&other_branch_regs[insn->src_reg], 14864 &other_branch_regs[insn->dst_reg], 14865 src_reg, dst_reg, opcode); 14866 if (src_reg->id && 14867 !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { 14868 find_equal_scalars(this_branch, src_reg); 14869 find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]); 14870 } 14871 14872 } 14873 } else if (dst_reg->type == SCALAR_VALUE) { 14874 reg_set_min_max(&other_branch_regs[insn->dst_reg], 14875 dst_reg, insn->imm, (u32)insn->imm, 14876 opcode, is_jmp32); 14877 } 14878 14879 if (dst_reg->type == SCALAR_VALUE && dst_reg->id && 14880 !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) { 14881 find_equal_scalars(this_branch, dst_reg); 14882 find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]); 14883 } 14884 14885 /* if one pointer register is compared to another pointer 14886 * register check if PTR_MAYBE_NULL could be lifted. 14887 * E.g. register A - maybe null 14888 * register B - not null 14889 * for JNE A, B, ... - A is not null in the false branch; 14890 * for JEQ A, B, ... - A is not null in the true branch. 14891 * 14892 * Since PTR_TO_BTF_ID points to a kernel struct that does 14893 * not need to be null checked by the BPF program, i.e., 14894 * could be null even without PTR_MAYBE_NULL marking, so 14895 * only propagate nullness when neither reg is that type. 14896 */ 14897 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X && 14898 __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) && 14899 type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) && 14900 base_type(src_reg->type) != PTR_TO_BTF_ID && 14901 base_type(dst_reg->type) != PTR_TO_BTF_ID) { 14902 eq_branch_regs = NULL; 14903 switch (opcode) { 14904 case BPF_JEQ: 14905 eq_branch_regs = other_branch_regs; 14906 break; 14907 case BPF_JNE: 14908 eq_branch_regs = regs; 14909 break; 14910 default: 14911 /* do nothing */ 14912 break; 14913 } 14914 if (eq_branch_regs) { 14915 if (type_may_be_null(src_reg->type)) 14916 mark_ptr_not_null_reg(&eq_branch_regs[insn->src_reg]); 14917 else 14918 mark_ptr_not_null_reg(&eq_branch_regs[insn->dst_reg]); 14919 } 14920 } 14921 14922 /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). 14923 * NOTE: these optimizations below are related with pointer comparison 14924 * which will never be JMP32. 14925 */ 14926 if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && 14927 insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && 14928 type_may_be_null(dst_reg->type)) { 14929 /* Mark all identical registers in each branch as either 14930 * safe or unknown depending R == 0 or R != 0 conditional. 14931 */ 14932 mark_ptr_or_null_regs(this_branch, insn->dst_reg, 14933 opcode == BPF_JNE); 14934 mark_ptr_or_null_regs(other_branch, insn->dst_reg, 14935 opcode == BPF_JEQ); 14936 } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], 14937 this_branch, other_branch) && 14938 is_pointer_value(env, insn->dst_reg)) { 14939 verbose(env, "R%d pointer comparison prohibited\n", 14940 insn->dst_reg); 14941 return -EACCES; 14942 } 14943 if (env->log.level & BPF_LOG_LEVEL) 14944 print_insn_state(env, this_branch->frame[this_branch->curframe]); 14945 return 0; 14946 } 14947 14948 /* verify BPF_LD_IMM64 instruction */ 14949 static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) 14950 { 14951 struct bpf_insn_aux_data *aux = cur_aux(env); 14952 struct bpf_reg_state *regs = cur_regs(env); 14953 struct bpf_reg_state *dst_reg; 14954 struct bpf_map *map; 14955 int err; 14956 14957 if (BPF_SIZE(insn->code) != BPF_DW) { 14958 verbose(env, "invalid BPF_LD_IMM insn\n"); 14959 return -EINVAL; 14960 } 14961 if (insn->off != 0) { 14962 verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); 14963 return -EINVAL; 14964 } 14965 14966 err = check_reg_arg(env, insn->dst_reg, DST_OP); 14967 if (err) 14968 return err; 14969 14970 dst_reg = ®s[insn->dst_reg]; 14971 if (insn->src_reg == 0) { 14972 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 14973 14974 dst_reg->type = SCALAR_VALUE; 14975 __mark_reg_known(®s[insn->dst_reg], imm); 14976 return 0; 14977 } 14978 14979 /* All special src_reg cases are listed below. From this point onwards 14980 * we either succeed and assign a corresponding dst_reg->type after 14981 * zeroing the offset, or fail and reject the program. 14982 */ 14983 mark_reg_known_zero(env, regs, insn->dst_reg); 14984 14985 if (insn->src_reg == BPF_PSEUDO_BTF_ID) { 14986 dst_reg->type = aux->btf_var.reg_type; 14987 switch (base_type(dst_reg->type)) { 14988 case PTR_TO_MEM: 14989 dst_reg->mem_size = aux->btf_var.mem_size; 14990 break; 14991 case PTR_TO_BTF_ID: 14992 dst_reg->btf = aux->btf_var.btf; 14993 dst_reg->btf_id = aux->btf_var.btf_id; 14994 break; 14995 default: 14996 verbose(env, "bpf verifier is misconfigured\n"); 14997 return -EFAULT; 14998 } 14999 return 0; 15000 } 15001 15002 if (insn->src_reg == BPF_PSEUDO_FUNC) { 15003 struct bpf_prog_aux *aux = env->prog->aux; 15004 u32 subprogno = find_subprog(env, 15005 env->insn_idx + insn->imm + 1); 15006 15007 if (!aux->func_info) { 15008 verbose(env, "missing btf func_info\n"); 15009 return -EINVAL; 15010 } 15011 if (aux->func_info_aux[subprogno].linkage != BTF_FUNC_STATIC) { 15012 verbose(env, "callback function not static\n"); 15013 return -EINVAL; 15014 } 15015 15016 dst_reg->type = PTR_TO_FUNC; 15017 dst_reg->subprogno = subprogno; 15018 return 0; 15019 } 15020 15021 map = env->used_maps[aux->map_index]; 15022 dst_reg->map_ptr = map; 15023 15024 if (insn->src_reg == BPF_PSEUDO_MAP_VALUE || 15025 insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) { 15026 dst_reg->type = PTR_TO_MAP_VALUE; 15027 dst_reg->off = aux->map_off; 15028 WARN_ON_ONCE(map->max_entries != 1); 15029 /* We want reg->id to be same (0) as map_value is not distinct */ 15030 } else if (insn->src_reg == BPF_PSEUDO_MAP_FD || 15031 insn->src_reg == BPF_PSEUDO_MAP_IDX) { 15032 dst_reg->type = CONST_PTR_TO_MAP; 15033 } else { 15034 verbose(env, "bpf verifier is misconfigured\n"); 15035 return -EINVAL; 15036 } 15037 15038 return 0; 15039 } 15040 15041 static bool may_access_skb(enum bpf_prog_type type) 15042 { 15043 switch (type) { 15044 case BPF_PROG_TYPE_SOCKET_FILTER: 15045 case BPF_PROG_TYPE_SCHED_CLS: 15046 case BPF_PROG_TYPE_SCHED_ACT: 15047 return true; 15048 default: 15049 return false; 15050 } 15051 } 15052 15053 /* verify safety of LD_ABS|LD_IND instructions: 15054 * - they can only appear in the programs where ctx == skb 15055 * - since they are wrappers of function calls, they scratch R1-R5 registers, 15056 * preserve R6-R9, and store return value into R0 15057 * 15058 * Implicit input: 15059 * ctx == skb == R6 == CTX 15060 * 15061 * Explicit input: 15062 * SRC == any register 15063 * IMM == 32-bit immediate 15064 * 15065 * Output: 15066 * R0 - 8/16/32-bit skb data converted to cpu endianness 15067 */ 15068 static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) 15069 { 15070 struct bpf_reg_state *regs = cur_regs(env); 15071 static const int ctx_reg = BPF_REG_6; 15072 u8 mode = BPF_MODE(insn->code); 15073 int i, err; 15074 15075 if (!may_access_skb(resolve_prog_type(env->prog))) { 15076 verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); 15077 return -EINVAL; 15078 } 15079 15080 if (!env->ops->gen_ld_abs) { 15081 verbose(env, "bpf verifier is misconfigured\n"); 15082 return -EINVAL; 15083 } 15084 15085 if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || 15086 BPF_SIZE(insn->code) == BPF_DW || 15087 (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { 15088 verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); 15089 return -EINVAL; 15090 } 15091 15092 /* check whether implicit source operand (register R6) is readable */ 15093 err = check_reg_arg(env, ctx_reg, SRC_OP); 15094 if (err) 15095 return err; 15096 15097 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as 15098 * gen_ld_abs() may terminate the program at runtime, leading to 15099 * reference leak. 15100 */ 15101 err = check_reference_leak(env, false); 15102 if (err) { 15103 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); 15104 return err; 15105 } 15106 15107 if (env->cur_state->active_lock.ptr) { 15108 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); 15109 return -EINVAL; 15110 } 15111 15112 if (env->cur_state->active_rcu_lock) { 15113 verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_rcu_read_lock-ed region\n"); 15114 return -EINVAL; 15115 } 15116 15117 if (regs[ctx_reg].type != PTR_TO_CTX) { 15118 verbose(env, 15119 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 15120 return -EINVAL; 15121 } 15122 15123 if (mode == BPF_IND) { 15124 /* check explicit source operand */ 15125 err = check_reg_arg(env, insn->src_reg, SRC_OP); 15126 if (err) 15127 return err; 15128 } 15129 15130 err = check_ptr_off_reg(env, ®s[ctx_reg], ctx_reg); 15131 if (err < 0) 15132 return err; 15133 15134 /* reset caller saved regs to unreadable */ 15135 for (i = 0; i < CALLER_SAVED_REGS; i++) { 15136 mark_reg_not_init(env, regs, caller_saved[i]); 15137 check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); 15138 } 15139 15140 /* mark destination R0 register as readable, since it contains 15141 * the value fetched from the packet. 15142 * Already marked as written above. 15143 */ 15144 mark_reg_unknown(env, regs, BPF_REG_0); 15145 /* ld_abs load up to 32-bit skb data. */ 15146 regs[BPF_REG_0].subreg_def = env->insn_idx + 1; 15147 return 0; 15148 } 15149 15150 static int check_return_code(struct bpf_verifier_env *env, int regno) 15151 { 15152 struct tnum enforce_attach_type_range = tnum_unknown; 15153 const struct bpf_prog *prog = env->prog; 15154 struct bpf_reg_state *reg; 15155 struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0); 15156 enum bpf_prog_type prog_type = resolve_prog_type(env->prog); 15157 int err; 15158 struct bpf_func_state *frame = env->cur_state->frame[0]; 15159 const bool is_subprog = frame->subprogno; 15160 15161 /* LSM and struct_ops func-ptr's return type could be "void" */ 15162 if (!is_subprog || frame->in_exception_callback_fn) { 15163 switch (prog_type) { 15164 case BPF_PROG_TYPE_LSM: 15165 if (prog->expected_attach_type == BPF_LSM_CGROUP) 15166 /* See below, can be 0 or 0-1 depending on hook. */ 15167 break; 15168 fallthrough; 15169 case BPF_PROG_TYPE_STRUCT_OPS: 15170 if (!prog->aux->attach_func_proto->type) 15171 return 0; 15172 break; 15173 default: 15174 break; 15175 } 15176 } 15177 15178 /* eBPF calling convention is such that R0 is used 15179 * to return the value from eBPF program. 15180 * Make sure that it's readable at this time 15181 * of bpf_exit, which means that program wrote 15182 * something into it earlier 15183 */ 15184 err = check_reg_arg(env, regno, SRC_OP); 15185 if (err) 15186 return err; 15187 15188 if (is_pointer_value(env, regno)) { 15189 verbose(env, "R%d leaks addr as return value\n", regno); 15190 return -EACCES; 15191 } 15192 15193 reg = cur_regs(env) + regno; 15194 15195 if (frame->in_async_callback_fn) { 15196 /* enforce return zero from async callbacks like timer */ 15197 if (reg->type != SCALAR_VALUE) { 15198 verbose(env, "In async callback the register R%d is not a known value (%s)\n", 15199 regno, reg_type_str(env, reg->type)); 15200 return -EINVAL; 15201 } 15202 15203 if (!tnum_in(const_0, reg->var_off)) { 15204 verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0"); 15205 return -EINVAL; 15206 } 15207 return 0; 15208 } 15209 15210 if (is_subprog && !frame->in_exception_callback_fn) { 15211 if (reg->type != SCALAR_VALUE) { 15212 verbose(env, "At subprogram exit the register R%d is not a scalar value (%s)\n", 15213 regno, reg_type_str(env, reg->type)); 15214 return -EINVAL; 15215 } 15216 return 0; 15217 } 15218 15219 switch (prog_type) { 15220 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 15221 if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || 15222 env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG || 15223 env->prog->expected_attach_type == BPF_CGROUP_UNIX_RECVMSG || 15224 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETPEERNAME || 15225 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETPEERNAME || 15226 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETPEERNAME || 15227 env->prog->expected_attach_type == BPF_CGROUP_INET4_GETSOCKNAME || 15228 env->prog->expected_attach_type == BPF_CGROUP_INET6_GETSOCKNAME || 15229 env->prog->expected_attach_type == BPF_CGROUP_UNIX_GETSOCKNAME) 15230 range = tnum_range(1, 1); 15231 if (env->prog->expected_attach_type == BPF_CGROUP_INET4_BIND || 15232 env->prog->expected_attach_type == BPF_CGROUP_INET6_BIND) 15233 range = tnum_range(0, 3); 15234 break; 15235 case BPF_PROG_TYPE_CGROUP_SKB: 15236 if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { 15237 range = tnum_range(0, 3); 15238 enforce_attach_type_range = tnum_range(2, 3); 15239 } 15240 break; 15241 case BPF_PROG_TYPE_CGROUP_SOCK: 15242 case BPF_PROG_TYPE_SOCK_OPS: 15243 case BPF_PROG_TYPE_CGROUP_DEVICE: 15244 case BPF_PROG_TYPE_CGROUP_SYSCTL: 15245 case BPF_PROG_TYPE_CGROUP_SOCKOPT: 15246 break; 15247 case BPF_PROG_TYPE_RAW_TRACEPOINT: 15248 if (!env->prog->aux->attach_btf_id) 15249 return 0; 15250 range = tnum_const(0); 15251 break; 15252 case BPF_PROG_TYPE_TRACING: 15253 switch (env->prog->expected_attach_type) { 15254 case BPF_TRACE_FENTRY: 15255 case BPF_TRACE_FEXIT: 15256 range = tnum_const(0); 15257 break; 15258 case BPF_TRACE_RAW_TP: 15259 case BPF_MODIFY_RETURN: 15260 return 0; 15261 case BPF_TRACE_ITER: 15262 break; 15263 default: 15264 return -ENOTSUPP; 15265 } 15266 break; 15267 case BPF_PROG_TYPE_SK_LOOKUP: 15268 range = tnum_range(SK_DROP, SK_PASS); 15269 break; 15270 15271 case BPF_PROG_TYPE_LSM: 15272 if (env->prog->expected_attach_type != BPF_LSM_CGROUP) { 15273 /* Regular BPF_PROG_TYPE_LSM programs can return 15274 * any value. 15275 */ 15276 return 0; 15277 } 15278 if (!env->prog->aux->attach_func_proto->type) { 15279 /* Make sure programs that attach to void 15280 * hooks don't try to modify return value. 15281 */ 15282 range = tnum_range(1, 1); 15283 } 15284 break; 15285 15286 case BPF_PROG_TYPE_NETFILTER: 15287 range = tnum_range(NF_DROP, NF_ACCEPT); 15288 break; 15289 case BPF_PROG_TYPE_EXT: 15290 /* freplace program can return anything as its return value 15291 * depends on the to-be-replaced kernel func or bpf program. 15292 */ 15293 default: 15294 return 0; 15295 } 15296 15297 if (reg->type != SCALAR_VALUE) { 15298 verbose(env, "At program exit the register R%d is not a known value (%s)\n", 15299 regno, reg_type_str(env, reg->type)); 15300 return -EINVAL; 15301 } 15302 15303 if (!tnum_in(range, reg->var_off)) { 15304 verbose_invalid_scalar(env, reg, &range, "program exit", "R0"); 15305 if (prog->expected_attach_type == BPF_LSM_CGROUP && 15306 prog_type == BPF_PROG_TYPE_LSM && 15307 !prog->aux->attach_func_proto->type) 15308 verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n"); 15309 return -EINVAL; 15310 } 15311 15312 if (!tnum_is_unknown(enforce_attach_type_range) && 15313 tnum_in(enforce_attach_type_range, reg->var_off)) 15314 env->prog->enforce_expected_attach_type = 1; 15315 return 0; 15316 } 15317 15318 /* non-recursive DFS pseudo code 15319 * 1 procedure DFS-iterative(G,v): 15320 * 2 label v as discovered 15321 * 3 let S be a stack 15322 * 4 S.push(v) 15323 * 5 while S is not empty 15324 * 6 t <- S.peek() 15325 * 7 if t is what we're looking for: 15326 * 8 return t 15327 * 9 for all edges e in G.adjacentEdges(t) do 15328 * 10 if edge e is already labelled 15329 * 11 continue with the next edge 15330 * 12 w <- G.adjacentVertex(t,e) 15331 * 13 if vertex w is not discovered and not explored 15332 * 14 label e as tree-edge 15333 * 15 label w as discovered 15334 * 16 S.push(w) 15335 * 17 continue at 5 15336 * 18 else if vertex w is discovered 15337 * 19 label e as back-edge 15338 * 20 else 15339 * 21 // vertex w is explored 15340 * 22 label e as forward- or cross-edge 15341 * 23 label t as explored 15342 * 24 S.pop() 15343 * 15344 * convention: 15345 * 0x10 - discovered 15346 * 0x11 - discovered and fall-through edge labelled 15347 * 0x12 - discovered and fall-through and branch edges labelled 15348 * 0x20 - explored 15349 */ 15350 15351 enum { 15352 DISCOVERED = 0x10, 15353 EXPLORED = 0x20, 15354 FALLTHROUGH = 1, 15355 BRANCH = 2, 15356 }; 15357 15358 static void mark_prune_point(struct bpf_verifier_env *env, int idx) 15359 { 15360 env->insn_aux_data[idx].prune_point = true; 15361 } 15362 15363 static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx) 15364 { 15365 return env->insn_aux_data[insn_idx].prune_point; 15366 } 15367 15368 static void mark_force_checkpoint(struct bpf_verifier_env *env, int idx) 15369 { 15370 env->insn_aux_data[idx].force_checkpoint = true; 15371 } 15372 15373 static bool is_force_checkpoint(struct bpf_verifier_env *env, int insn_idx) 15374 { 15375 return env->insn_aux_data[insn_idx].force_checkpoint; 15376 } 15377 15378 15379 enum { 15380 DONE_EXPLORING = 0, 15381 KEEP_EXPLORING = 1, 15382 }; 15383 15384 /* t, w, e - match pseudo-code above: 15385 * t - index of current instruction 15386 * w - next instruction 15387 * e - edge 15388 */ 15389 static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, 15390 bool loop_ok) 15391 { 15392 int *insn_stack = env->cfg.insn_stack; 15393 int *insn_state = env->cfg.insn_state; 15394 15395 if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) 15396 return DONE_EXPLORING; 15397 15398 if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) 15399 return DONE_EXPLORING; 15400 15401 if (w < 0 || w >= env->prog->len) { 15402 verbose_linfo(env, t, "%d: ", t); 15403 verbose(env, "jump out of range from insn %d to %d\n", t, w); 15404 return -EINVAL; 15405 } 15406 15407 if (e == BRANCH) { 15408 /* mark branch target for state pruning */ 15409 mark_prune_point(env, w); 15410 mark_jmp_point(env, w); 15411 } 15412 15413 if (insn_state[w] == 0) { 15414 /* tree-edge */ 15415 insn_state[t] = DISCOVERED | e; 15416 insn_state[w] = DISCOVERED; 15417 if (env->cfg.cur_stack >= env->prog->len) 15418 return -E2BIG; 15419 insn_stack[env->cfg.cur_stack++] = w; 15420 return KEEP_EXPLORING; 15421 } else if ((insn_state[w] & 0xF0) == DISCOVERED) { 15422 if (loop_ok && env->bpf_capable) 15423 return DONE_EXPLORING; 15424 verbose_linfo(env, t, "%d: ", t); 15425 verbose_linfo(env, w, "%d: ", w); 15426 verbose(env, "back-edge from insn %d to %d\n", t, w); 15427 return -EINVAL; 15428 } else if (insn_state[w] == EXPLORED) { 15429 /* forward- or cross-edge */ 15430 insn_state[t] = DISCOVERED | e; 15431 } else { 15432 verbose(env, "insn state internal bug\n"); 15433 return -EFAULT; 15434 } 15435 return DONE_EXPLORING; 15436 } 15437 15438 static int visit_func_call_insn(int t, struct bpf_insn *insns, 15439 struct bpf_verifier_env *env, 15440 bool visit_callee) 15441 { 15442 int ret, insn_sz; 15443 15444 insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1; 15445 ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false); 15446 if (ret) 15447 return ret; 15448 15449 mark_prune_point(env, t + insn_sz); 15450 /* when we exit from subprog, we need to record non-linear history */ 15451 mark_jmp_point(env, t + insn_sz); 15452 15453 if (visit_callee) { 15454 mark_prune_point(env, t); 15455 ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, 15456 /* It's ok to allow recursion from CFG point of 15457 * view. __check_func_call() will do the actual 15458 * check. 15459 */ 15460 bpf_pseudo_func(insns + t)); 15461 } 15462 return ret; 15463 } 15464 15465 /* Visits the instruction at index t and returns one of the following: 15466 * < 0 - an error occurred 15467 * DONE_EXPLORING - the instruction was fully explored 15468 * KEEP_EXPLORING - there is still work to be done before it is fully explored 15469 */ 15470 static int visit_insn(int t, struct bpf_verifier_env *env) 15471 { 15472 struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t]; 15473 int ret, off, insn_sz; 15474 15475 if (bpf_pseudo_func(insn)) 15476 return visit_func_call_insn(t, insns, env, true); 15477 15478 /* All non-branch instructions have a single fall-through edge. */ 15479 if (BPF_CLASS(insn->code) != BPF_JMP && 15480 BPF_CLASS(insn->code) != BPF_JMP32) { 15481 insn_sz = bpf_is_ldimm64(insn) ? 2 : 1; 15482 return push_insn(t, t + insn_sz, FALLTHROUGH, env, false); 15483 } 15484 15485 switch (BPF_OP(insn->code)) { 15486 case BPF_EXIT: 15487 return DONE_EXPLORING; 15488 15489 case BPF_CALL: 15490 if (insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) 15491 /* Mark this call insn as a prune point to trigger 15492 * is_state_visited() check before call itself is 15493 * processed by __check_func_call(). Otherwise new 15494 * async state will be pushed for further exploration. 15495 */ 15496 mark_prune_point(env, t); 15497 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 15498 struct bpf_kfunc_call_arg_meta meta; 15499 15500 ret = fetch_kfunc_meta(env, insn, &meta, NULL); 15501 if (ret == 0 && is_iter_next_kfunc(&meta)) { 15502 mark_prune_point(env, t); 15503 /* Checking and saving state checkpoints at iter_next() call 15504 * is crucial for fast convergence of open-coded iterator loop 15505 * logic, so we need to force it. If we don't do that, 15506 * is_state_visited() might skip saving a checkpoint, causing 15507 * unnecessarily long sequence of not checkpointed 15508 * instructions and jumps, leading to exhaustion of jump 15509 * history buffer, and potentially other undesired outcomes. 15510 * It is expected that with correct open-coded iterators 15511 * convergence will happen quickly, so we don't run a risk of 15512 * exhausting memory. 15513 */ 15514 mark_force_checkpoint(env, t); 15515 } 15516 } 15517 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL); 15518 15519 case BPF_JA: 15520 if (BPF_SRC(insn->code) != BPF_K) 15521 return -EINVAL; 15522 15523 if (BPF_CLASS(insn->code) == BPF_JMP) 15524 off = insn->off; 15525 else 15526 off = insn->imm; 15527 15528 /* unconditional jump with single edge */ 15529 ret = push_insn(t, t + off + 1, FALLTHROUGH, env, 15530 true); 15531 if (ret) 15532 return ret; 15533 15534 mark_prune_point(env, t + off + 1); 15535 mark_jmp_point(env, t + off + 1); 15536 15537 return ret; 15538 15539 default: 15540 /* conditional jump with two edges */ 15541 mark_prune_point(env, t); 15542 15543 ret = push_insn(t, t + 1, FALLTHROUGH, env, true); 15544 if (ret) 15545 return ret; 15546 15547 return push_insn(t, t + insn->off + 1, BRANCH, env, true); 15548 } 15549 } 15550 15551 /* non-recursive depth-first-search to detect loops in BPF program 15552 * loop == back-edge in directed graph 15553 */ 15554 static int check_cfg(struct bpf_verifier_env *env) 15555 { 15556 int insn_cnt = env->prog->len; 15557 int *insn_stack, *insn_state; 15558 int ex_insn_beg, i, ret = 0; 15559 bool ex_done = false; 15560 15561 insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 15562 if (!insn_state) 15563 return -ENOMEM; 15564 15565 insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL); 15566 if (!insn_stack) { 15567 kvfree(insn_state); 15568 return -ENOMEM; 15569 } 15570 15571 insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ 15572 insn_stack[0] = 0; /* 0 is the first instruction */ 15573 env->cfg.cur_stack = 1; 15574 15575 walk_cfg: 15576 while (env->cfg.cur_stack > 0) { 15577 int t = insn_stack[env->cfg.cur_stack - 1]; 15578 15579 ret = visit_insn(t, env); 15580 switch (ret) { 15581 case DONE_EXPLORING: 15582 insn_state[t] = EXPLORED; 15583 env->cfg.cur_stack--; 15584 break; 15585 case KEEP_EXPLORING: 15586 break; 15587 default: 15588 if (ret > 0) { 15589 verbose(env, "visit_insn internal bug\n"); 15590 ret = -EFAULT; 15591 } 15592 goto err_free; 15593 } 15594 } 15595 15596 if (env->cfg.cur_stack < 0) { 15597 verbose(env, "pop stack internal bug\n"); 15598 ret = -EFAULT; 15599 goto err_free; 15600 } 15601 15602 if (env->exception_callback_subprog && !ex_done) { 15603 ex_insn_beg = env->subprog_info[env->exception_callback_subprog].start; 15604 15605 insn_state[ex_insn_beg] = DISCOVERED; 15606 insn_stack[0] = ex_insn_beg; 15607 env->cfg.cur_stack = 1; 15608 ex_done = true; 15609 goto walk_cfg; 15610 } 15611 15612 for (i = 0; i < insn_cnt; i++) { 15613 struct bpf_insn *insn = &env->prog->insnsi[i]; 15614 15615 if (insn_state[i] != EXPLORED) { 15616 verbose(env, "unreachable insn %d\n", i); 15617 ret = -EINVAL; 15618 goto err_free; 15619 } 15620 if (bpf_is_ldimm64(insn)) { 15621 if (insn_state[i + 1] != 0) { 15622 verbose(env, "jump into the middle of ldimm64 insn %d\n", i); 15623 ret = -EINVAL; 15624 goto err_free; 15625 } 15626 i++; /* skip second half of ldimm64 */ 15627 } 15628 } 15629 ret = 0; /* cfg looks good */ 15630 15631 err_free: 15632 kvfree(insn_state); 15633 kvfree(insn_stack); 15634 env->cfg.insn_state = env->cfg.insn_stack = NULL; 15635 return ret; 15636 } 15637 15638 static int check_abnormal_return(struct bpf_verifier_env *env) 15639 { 15640 int i; 15641 15642 for (i = 1; i < env->subprog_cnt; i++) { 15643 if (env->subprog_info[i].has_ld_abs) { 15644 verbose(env, "LD_ABS is not allowed in subprogs without BTF\n"); 15645 return -EINVAL; 15646 } 15647 if (env->subprog_info[i].has_tail_call) { 15648 verbose(env, "tail_call is not allowed in subprogs without BTF\n"); 15649 return -EINVAL; 15650 } 15651 } 15652 return 0; 15653 } 15654 15655 /* The minimum supported BTF func info size */ 15656 #define MIN_BPF_FUNCINFO_SIZE 8 15657 #define MAX_FUNCINFO_REC_SIZE 252 15658 15659 static int check_btf_func_early(struct bpf_verifier_env *env, 15660 const union bpf_attr *attr, 15661 bpfptr_t uattr) 15662 { 15663 u32 krec_size = sizeof(struct bpf_func_info); 15664 const struct btf_type *type, *func_proto; 15665 u32 i, nfuncs, urec_size, min_size; 15666 struct bpf_func_info *krecord; 15667 struct bpf_prog *prog; 15668 const struct btf *btf; 15669 u32 prev_offset = 0; 15670 bpfptr_t urecord; 15671 int ret = -ENOMEM; 15672 15673 nfuncs = attr->func_info_cnt; 15674 if (!nfuncs) { 15675 if (check_abnormal_return(env)) 15676 return -EINVAL; 15677 return 0; 15678 } 15679 15680 urec_size = attr->func_info_rec_size; 15681 if (urec_size < MIN_BPF_FUNCINFO_SIZE || 15682 urec_size > MAX_FUNCINFO_REC_SIZE || 15683 urec_size % sizeof(u32)) { 15684 verbose(env, "invalid func info rec size %u\n", urec_size); 15685 return -EINVAL; 15686 } 15687 15688 prog = env->prog; 15689 btf = prog->aux->btf; 15690 15691 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 15692 min_size = min_t(u32, krec_size, urec_size); 15693 15694 krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); 15695 if (!krecord) 15696 return -ENOMEM; 15697 15698 for (i = 0; i < nfuncs; i++) { 15699 ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); 15700 if (ret) { 15701 if (ret == -E2BIG) { 15702 verbose(env, "nonzero tailing record in func info"); 15703 /* set the size kernel expects so loader can zero 15704 * out the rest of the record. 15705 */ 15706 if (copy_to_bpfptr_offset(uattr, 15707 offsetof(union bpf_attr, func_info_rec_size), 15708 &min_size, sizeof(min_size))) 15709 ret = -EFAULT; 15710 } 15711 goto err_free; 15712 } 15713 15714 if (copy_from_bpfptr(&krecord[i], urecord, min_size)) { 15715 ret = -EFAULT; 15716 goto err_free; 15717 } 15718 15719 /* check insn_off */ 15720 ret = -EINVAL; 15721 if (i == 0) { 15722 if (krecord[i].insn_off) { 15723 verbose(env, 15724 "nonzero insn_off %u for the first func info record", 15725 krecord[i].insn_off); 15726 goto err_free; 15727 } 15728 } else if (krecord[i].insn_off <= prev_offset) { 15729 verbose(env, 15730 "same or smaller insn offset (%u) than previous func info record (%u)", 15731 krecord[i].insn_off, prev_offset); 15732 goto err_free; 15733 } 15734 15735 /* check type_id */ 15736 type = btf_type_by_id(btf, krecord[i].type_id); 15737 if (!type || !btf_type_is_func(type)) { 15738 verbose(env, "invalid type id %d in func info", 15739 krecord[i].type_id); 15740 goto err_free; 15741 } 15742 15743 func_proto = btf_type_by_id(btf, type->type); 15744 if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto))) 15745 /* btf_func_check() already verified it during BTF load */ 15746 goto err_free; 15747 15748 prev_offset = krecord[i].insn_off; 15749 bpfptr_add(&urecord, urec_size); 15750 } 15751 15752 prog->aux->func_info = krecord; 15753 prog->aux->func_info_cnt = nfuncs; 15754 return 0; 15755 15756 err_free: 15757 kvfree(krecord); 15758 return ret; 15759 } 15760 15761 static int check_btf_func(struct bpf_verifier_env *env, 15762 const union bpf_attr *attr, 15763 bpfptr_t uattr) 15764 { 15765 const struct btf_type *type, *func_proto, *ret_type; 15766 u32 i, nfuncs, urec_size; 15767 struct bpf_func_info *krecord; 15768 struct bpf_func_info_aux *info_aux = NULL; 15769 struct bpf_prog *prog; 15770 const struct btf *btf; 15771 bpfptr_t urecord; 15772 bool scalar_return; 15773 int ret = -ENOMEM; 15774 15775 nfuncs = attr->func_info_cnt; 15776 if (!nfuncs) { 15777 if (check_abnormal_return(env)) 15778 return -EINVAL; 15779 return 0; 15780 } 15781 if (nfuncs != env->subprog_cnt) { 15782 verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); 15783 return -EINVAL; 15784 } 15785 15786 urec_size = attr->func_info_rec_size; 15787 15788 prog = env->prog; 15789 btf = prog->aux->btf; 15790 15791 urecord = make_bpfptr(attr->func_info, uattr.is_kernel); 15792 15793 krecord = prog->aux->func_info; 15794 info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN); 15795 if (!info_aux) 15796 return -ENOMEM; 15797 15798 for (i = 0; i < nfuncs; i++) { 15799 /* check insn_off */ 15800 ret = -EINVAL; 15801 15802 if (env->subprog_info[i].start != krecord[i].insn_off) { 15803 verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); 15804 goto err_free; 15805 } 15806 15807 /* Already checked type_id */ 15808 type = btf_type_by_id(btf, krecord[i].type_id); 15809 info_aux[i].linkage = BTF_INFO_VLEN(type->info); 15810 /* Already checked func_proto */ 15811 func_proto = btf_type_by_id(btf, type->type); 15812 15813 ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL); 15814 scalar_return = 15815 btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type); 15816 if (i && !scalar_return && env->subprog_info[i].has_ld_abs) { 15817 verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n"); 15818 goto err_free; 15819 } 15820 if (i && !scalar_return && env->subprog_info[i].has_tail_call) { 15821 verbose(env, "tail_call is only allowed in functions that return 'int'.\n"); 15822 goto err_free; 15823 } 15824 15825 bpfptr_add(&urecord, urec_size); 15826 } 15827 15828 prog->aux->func_info_aux = info_aux; 15829 return 0; 15830 15831 err_free: 15832 kfree(info_aux); 15833 return ret; 15834 } 15835 15836 static void adjust_btf_func(struct bpf_verifier_env *env) 15837 { 15838 struct bpf_prog_aux *aux = env->prog->aux; 15839 int i; 15840 15841 if (!aux->func_info) 15842 return; 15843 15844 /* func_info is not available for hidden subprogs */ 15845 for (i = 0; i < env->subprog_cnt - env->hidden_subprog_cnt; i++) 15846 aux->func_info[i].insn_off = env->subprog_info[i].start; 15847 } 15848 15849 #define MIN_BPF_LINEINFO_SIZE offsetofend(struct bpf_line_info, line_col) 15850 #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE 15851 15852 static int check_btf_line(struct bpf_verifier_env *env, 15853 const union bpf_attr *attr, 15854 bpfptr_t uattr) 15855 { 15856 u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; 15857 struct bpf_subprog_info *sub; 15858 struct bpf_line_info *linfo; 15859 struct bpf_prog *prog; 15860 const struct btf *btf; 15861 bpfptr_t ulinfo; 15862 int err; 15863 15864 nr_linfo = attr->line_info_cnt; 15865 if (!nr_linfo) 15866 return 0; 15867 if (nr_linfo > INT_MAX / sizeof(struct bpf_line_info)) 15868 return -EINVAL; 15869 15870 rec_size = attr->line_info_rec_size; 15871 if (rec_size < MIN_BPF_LINEINFO_SIZE || 15872 rec_size > MAX_LINEINFO_REC_SIZE || 15873 rec_size & (sizeof(u32) - 1)) 15874 return -EINVAL; 15875 15876 /* Need to zero it in case the userspace may 15877 * pass in a smaller bpf_line_info object. 15878 */ 15879 linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info), 15880 GFP_KERNEL | __GFP_NOWARN); 15881 if (!linfo) 15882 return -ENOMEM; 15883 15884 prog = env->prog; 15885 btf = prog->aux->btf; 15886 15887 s = 0; 15888 sub = env->subprog_info; 15889 ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel); 15890 expected_size = sizeof(struct bpf_line_info); 15891 ncopy = min_t(u32, expected_size, rec_size); 15892 for (i = 0; i < nr_linfo; i++) { 15893 err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); 15894 if (err) { 15895 if (err == -E2BIG) { 15896 verbose(env, "nonzero tailing record in line_info"); 15897 if (copy_to_bpfptr_offset(uattr, 15898 offsetof(union bpf_attr, line_info_rec_size), 15899 &expected_size, sizeof(expected_size))) 15900 err = -EFAULT; 15901 } 15902 goto err_free; 15903 } 15904 15905 if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) { 15906 err = -EFAULT; 15907 goto err_free; 15908 } 15909 15910 /* 15911 * Check insn_off to ensure 15912 * 1) strictly increasing AND 15913 * 2) bounded by prog->len 15914 * 15915 * The linfo[0].insn_off == 0 check logically falls into 15916 * the later "missing bpf_line_info for func..." case 15917 * because the first linfo[0].insn_off must be the 15918 * first sub also and the first sub must have 15919 * subprog_info[0].start == 0. 15920 */ 15921 if ((i && linfo[i].insn_off <= prev_offset) || 15922 linfo[i].insn_off >= prog->len) { 15923 verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", 15924 i, linfo[i].insn_off, prev_offset, 15925 prog->len); 15926 err = -EINVAL; 15927 goto err_free; 15928 } 15929 15930 if (!prog->insnsi[linfo[i].insn_off].code) { 15931 verbose(env, 15932 "Invalid insn code at line_info[%u].insn_off\n", 15933 i); 15934 err = -EINVAL; 15935 goto err_free; 15936 } 15937 15938 if (!btf_name_by_offset(btf, linfo[i].line_off) || 15939 !btf_name_by_offset(btf, linfo[i].file_name_off)) { 15940 verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); 15941 err = -EINVAL; 15942 goto err_free; 15943 } 15944 15945 if (s != env->subprog_cnt) { 15946 if (linfo[i].insn_off == sub[s].start) { 15947 sub[s].linfo_idx = i; 15948 s++; 15949 } else if (sub[s].start < linfo[i].insn_off) { 15950 verbose(env, "missing bpf_line_info for func#%u\n", s); 15951 err = -EINVAL; 15952 goto err_free; 15953 } 15954 } 15955 15956 prev_offset = linfo[i].insn_off; 15957 bpfptr_add(&ulinfo, rec_size); 15958 } 15959 15960 if (s != env->subprog_cnt) { 15961 verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", 15962 env->subprog_cnt - s, s); 15963 err = -EINVAL; 15964 goto err_free; 15965 } 15966 15967 prog->aux->linfo = linfo; 15968 prog->aux->nr_linfo = nr_linfo; 15969 15970 return 0; 15971 15972 err_free: 15973 kvfree(linfo); 15974 return err; 15975 } 15976 15977 #define MIN_CORE_RELO_SIZE sizeof(struct bpf_core_relo) 15978 #define MAX_CORE_RELO_SIZE MAX_FUNCINFO_REC_SIZE 15979 15980 static int check_core_relo(struct bpf_verifier_env *env, 15981 const union bpf_attr *attr, 15982 bpfptr_t uattr) 15983 { 15984 u32 i, nr_core_relo, ncopy, expected_size, rec_size; 15985 struct bpf_core_relo core_relo = {}; 15986 struct bpf_prog *prog = env->prog; 15987 const struct btf *btf = prog->aux->btf; 15988 struct bpf_core_ctx ctx = { 15989 .log = &env->log, 15990 .btf = btf, 15991 }; 15992 bpfptr_t u_core_relo; 15993 int err; 15994 15995 nr_core_relo = attr->core_relo_cnt; 15996 if (!nr_core_relo) 15997 return 0; 15998 if (nr_core_relo > INT_MAX / sizeof(struct bpf_core_relo)) 15999 return -EINVAL; 16000 16001 rec_size = attr->core_relo_rec_size; 16002 if (rec_size < MIN_CORE_RELO_SIZE || 16003 rec_size > MAX_CORE_RELO_SIZE || 16004 rec_size % sizeof(u32)) 16005 return -EINVAL; 16006 16007 u_core_relo = make_bpfptr(attr->core_relos, uattr.is_kernel); 16008 expected_size = sizeof(struct bpf_core_relo); 16009 ncopy = min_t(u32, expected_size, rec_size); 16010 16011 /* Unlike func_info and line_info, copy and apply each CO-RE 16012 * relocation record one at a time. 16013 */ 16014 for (i = 0; i < nr_core_relo; i++) { 16015 /* future proofing when sizeof(bpf_core_relo) changes */ 16016 err = bpf_check_uarg_tail_zero(u_core_relo, expected_size, rec_size); 16017 if (err) { 16018 if (err == -E2BIG) { 16019 verbose(env, "nonzero tailing record in core_relo"); 16020 if (copy_to_bpfptr_offset(uattr, 16021 offsetof(union bpf_attr, core_relo_rec_size), 16022 &expected_size, sizeof(expected_size))) 16023 err = -EFAULT; 16024 } 16025 break; 16026 } 16027 16028 if (copy_from_bpfptr(&core_relo, u_core_relo, ncopy)) { 16029 err = -EFAULT; 16030 break; 16031 } 16032 16033 if (core_relo.insn_off % 8 || core_relo.insn_off / 8 >= prog->len) { 16034 verbose(env, "Invalid core_relo[%u].insn_off:%u prog->len:%u\n", 16035 i, core_relo.insn_off, prog->len); 16036 err = -EINVAL; 16037 break; 16038 } 16039 16040 err = bpf_core_apply(&ctx, &core_relo, i, 16041 &prog->insnsi[core_relo.insn_off / 8]); 16042 if (err) 16043 break; 16044 bpfptr_add(&u_core_relo, rec_size); 16045 } 16046 return err; 16047 } 16048 16049 static int check_btf_info_early(struct bpf_verifier_env *env, 16050 const union bpf_attr *attr, 16051 bpfptr_t uattr) 16052 { 16053 struct btf *btf; 16054 int err; 16055 16056 if (!attr->func_info_cnt && !attr->line_info_cnt) { 16057 if (check_abnormal_return(env)) 16058 return -EINVAL; 16059 return 0; 16060 } 16061 16062 btf = btf_get_by_fd(attr->prog_btf_fd); 16063 if (IS_ERR(btf)) 16064 return PTR_ERR(btf); 16065 if (btf_is_kernel(btf)) { 16066 btf_put(btf); 16067 return -EACCES; 16068 } 16069 env->prog->aux->btf = btf; 16070 16071 err = check_btf_func_early(env, attr, uattr); 16072 if (err) 16073 return err; 16074 return 0; 16075 } 16076 16077 static int check_btf_info(struct bpf_verifier_env *env, 16078 const union bpf_attr *attr, 16079 bpfptr_t uattr) 16080 { 16081 int err; 16082 16083 if (!attr->func_info_cnt && !attr->line_info_cnt) { 16084 if (check_abnormal_return(env)) 16085 return -EINVAL; 16086 return 0; 16087 } 16088 16089 err = check_btf_func(env, attr, uattr); 16090 if (err) 16091 return err; 16092 16093 err = check_btf_line(env, attr, uattr); 16094 if (err) 16095 return err; 16096 16097 err = check_core_relo(env, attr, uattr); 16098 if (err) 16099 return err; 16100 16101 return 0; 16102 } 16103 16104 /* check %cur's range satisfies %old's */ 16105 static bool range_within(struct bpf_reg_state *old, 16106 struct bpf_reg_state *cur) 16107 { 16108 return old->umin_value <= cur->umin_value && 16109 old->umax_value >= cur->umax_value && 16110 old->smin_value <= cur->smin_value && 16111 old->smax_value >= cur->smax_value && 16112 old->u32_min_value <= cur->u32_min_value && 16113 old->u32_max_value >= cur->u32_max_value && 16114 old->s32_min_value <= cur->s32_min_value && 16115 old->s32_max_value >= cur->s32_max_value; 16116 } 16117 16118 /* If in the old state two registers had the same id, then they need to have 16119 * the same id in the new state as well. But that id could be different from 16120 * the old state, so we need to track the mapping from old to new ids. 16121 * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent 16122 * regs with old id 5 must also have new id 9 for the new state to be safe. But 16123 * regs with a different old id could still have new id 9, we don't care about 16124 * that. 16125 * So we look through our idmap to see if this old id has been seen before. If 16126 * so, we require the new id to match; otherwise, we add the id pair to the map. 16127 */ 16128 static bool check_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 16129 { 16130 struct bpf_id_pair *map = idmap->map; 16131 unsigned int i; 16132 16133 /* either both IDs should be set or both should be zero */ 16134 if (!!old_id != !!cur_id) 16135 return false; 16136 16137 if (old_id == 0) /* cur_id == 0 as well */ 16138 return true; 16139 16140 for (i = 0; i < BPF_ID_MAP_SIZE; i++) { 16141 if (!map[i].old) { 16142 /* Reached an empty slot; haven't seen this id before */ 16143 map[i].old = old_id; 16144 map[i].cur = cur_id; 16145 return true; 16146 } 16147 if (map[i].old == old_id) 16148 return map[i].cur == cur_id; 16149 if (map[i].cur == cur_id) 16150 return false; 16151 } 16152 /* We ran out of idmap slots, which should be impossible */ 16153 WARN_ON_ONCE(1); 16154 return false; 16155 } 16156 16157 /* Similar to check_ids(), but allocate a unique temporary ID 16158 * for 'old_id' or 'cur_id' of zero. 16159 * This makes pairs like '0 vs unique ID', 'unique ID vs 0' valid. 16160 */ 16161 static bool check_scalar_ids(u32 old_id, u32 cur_id, struct bpf_idmap *idmap) 16162 { 16163 old_id = old_id ? old_id : ++idmap->tmp_id_gen; 16164 cur_id = cur_id ? cur_id : ++idmap->tmp_id_gen; 16165 16166 return check_ids(old_id, cur_id, idmap); 16167 } 16168 16169 static void clean_func_state(struct bpf_verifier_env *env, 16170 struct bpf_func_state *st) 16171 { 16172 enum bpf_reg_liveness live; 16173 int i, j; 16174 16175 for (i = 0; i < BPF_REG_FP; i++) { 16176 live = st->regs[i].live; 16177 /* liveness must not touch this register anymore */ 16178 st->regs[i].live |= REG_LIVE_DONE; 16179 if (!(live & REG_LIVE_READ)) 16180 /* since the register is unused, clear its state 16181 * to make further comparison simpler 16182 */ 16183 __mark_reg_not_init(env, &st->regs[i]); 16184 } 16185 16186 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { 16187 live = st->stack[i].spilled_ptr.live; 16188 /* liveness must not touch this stack slot anymore */ 16189 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 16190 if (!(live & REG_LIVE_READ)) { 16191 __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 16192 for (j = 0; j < BPF_REG_SIZE; j++) 16193 st->stack[i].slot_type[j] = STACK_INVALID; 16194 } 16195 } 16196 } 16197 16198 static void clean_verifier_state(struct bpf_verifier_env *env, 16199 struct bpf_verifier_state *st) 16200 { 16201 int i; 16202 16203 if (st->frame[0]->regs[0].live & REG_LIVE_DONE) 16204 /* all regs in this state in all frames were already marked */ 16205 return; 16206 16207 for (i = 0; i <= st->curframe; i++) 16208 clean_func_state(env, st->frame[i]); 16209 } 16210 16211 /* the parentage chains form a tree. 16212 * the verifier states are added to state lists at given insn and 16213 * pushed into state stack for future exploration. 16214 * when the verifier reaches bpf_exit insn some of the verifer states 16215 * stored in the state lists have their final liveness state already, 16216 * but a lot of states will get revised from liveness point of view when 16217 * the verifier explores other branches. 16218 * Example: 16219 * 1: r0 = 1 16220 * 2: if r1 == 100 goto pc+1 16221 * 3: r0 = 2 16222 * 4: exit 16223 * when the verifier reaches exit insn the register r0 in the state list of 16224 * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch 16225 * of insn 2 and goes exploring further. At the insn 4 it will walk the 16226 * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. 16227 * 16228 * Since the verifier pushes the branch states as it sees them while exploring 16229 * the program the condition of walking the branch instruction for the second 16230 * time means that all states below this branch were already explored and 16231 * their final liveness marks are already propagated. 16232 * Hence when the verifier completes the search of state list in is_state_visited() 16233 * we can call this clean_live_states() function to mark all liveness states 16234 * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' 16235 * will not be used. 16236 * This function also clears the registers and stack for states that !READ 16237 * to simplify state merging. 16238 * 16239 * Important note here that walking the same branch instruction in the callee 16240 * doesn't meant that the states are DONE. The verifier has to compare 16241 * the callsites 16242 */ 16243 static void clean_live_states(struct bpf_verifier_env *env, int insn, 16244 struct bpf_verifier_state *cur) 16245 { 16246 struct bpf_verifier_state_list *sl; 16247 16248 sl = *explored_state(env, insn); 16249 while (sl) { 16250 if (sl->state.branches) 16251 goto next; 16252 if (sl->state.insn_idx != insn || 16253 !same_callsites(&sl->state, cur)) 16254 goto next; 16255 clean_verifier_state(env, &sl->state); 16256 next: 16257 sl = sl->next; 16258 } 16259 } 16260 16261 static bool regs_exact(const struct bpf_reg_state *rold, 16262 const struct bpf_reg_state *rcur, 16263 struct bpf_idmap *idmap) 16264 { 16265 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 16266 check_ids(rold->id, rcur->id, idmap) && 16267 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 16268 } 16269 16270 /* Returns true if (rold safe implies rcur safe) */ 16271 static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, 16272 struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact) 16273 { 16274 if (exact) 16275 return regs_exact(rold, rcur, idmap); 16276 16277 if (!(rold->live & REG_LIVE_READ)) 16278 /* explored state didn't use this */ 16279 return true; 16280 if (rold->type == NOT_INIT) 16281 /* explored state can't have used this */ 16282 return true; 16283 if (rcur->type == NOT_INIT) 16284 return false; 16285 16286 /* Enforce that register types have to match exactly, including their 16287 * modifiers (like PTR_MAYBE_NULL, MEM_RDONLY, etc), as a general 16288 * rule. 16289 * 16290 * One can make a point that using a pointer register as unbounded 16291 * SCALAR would be technically acceptable, but this could lead to 16292 * pointer leaks because scalars are allowed to leak while pointers 16293 * are not. We could make this safe in special cases if root is 16294 * calling us, but it's probably not worth the hassle. 16295 * 16296 * Also, register types that are *not* MAYBE_NULL could technically be 16297 * safe to use as their MAYBE_NULL variants (e.g., PTR_TO_MAP_VALUE 16298 * is safe to be used as PTR_TO_MAP_VALUE_OR_NULL, provided both point 16299 * to the same map). 16300 * However, if the old MAYBE_NULL register then got NULL checked, 16301 * doing so could have affected others with the same id, and we can't 16302 * check for that because we lost the id when we converted to 16303 * a non-MAYBE_NULL variant. 16304 * So, as a general rule we don't allow mixing MAYBE_NULL and 16305 * non-MAYBE_NULL registers as well. 16306 */ 16307 if (rold->type != rcur->type) 16308 return false; 16309 16310 switch (base_type(rold->type)) { 16311 case SCALAR_VALUE: 16312 if (env->explore_alu_limits) { 16313 /* explore_alu_limits disables tnum_in() and range_within() 16314 * logic and requires everything to be strict 16315 */ 16316 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 16317 check_scalar_ids(rold->id, rcur->id, idmap); 16318 } 16319 if (!rold->precise) 16320 return true; 16321 /* Why check_ids() for scalar registers? 16322 * 16323 * Consider the following BPF code: 16324 * 1: r6 = ... unbound scalar, ID=a ... 16325 * 2: r7 = ... unbound scalar, ID=b ... 16326 * 3: if (r6 > r7) goto +1 16327 * 4: r6 = r7 16328 * 5: if (r6 > X) goto ... 16329 * 6: ... memory operation using r7 ... 16330 * 16331 * First verification path is [1-6]: 16332 * - at (4) same bpf_reg_state::id (b) would be assigned to r6 and r7; 16333 * - at (5) r6 would be marked <= X, find_equal_scalars() would also mark 16334 * r7 <= X, because r6 and r7 share same id. 16335 * Next verification path is [1-4, 6]. 16336 * 16337 * Instruction (6) would be reached in two states: 16338 * I. r6{.id=b}, r7{.id=b} via path 1-6; 16339 * II. r6{.id=a}, r7{.id=b} via path 1-4, 6. 16340 * 16341 * Use check_ids() to distinguish these states. 16342 * --- 16343 * Also verify that new value satisfies old value range knowledge. 16344 */ 16345 return range_within(rold, rcur) && 16346 tnum_in(rold->var_off, rcur->var_off) && 16347 check_scalar_ids(rold->id, rcur->id, idmap); 16348 case PTR_TO_MAP_KEY: 16349 case PTR_TO_MAP_VALUE: 16350 case PTR_TO_MEM: 16351 case PTR_TO_BUF: 16352 case PTR_TO_TP_BUFFER: 16353 /* If the new min/max/var_off satisfy the old ones and 16354 * everything else matches, we are OK. 16355 */ 16356 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, var_off)) == 0 && 16357 range_within(rold, rcur) && 16358 tnum_in(rold->var_off, rcur->var_off) && 16359 check_ids(rold->id, rcur->id, idmap) && 16360 check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); 16361 case PTR_TO_PACKET_META: 16362 case PTR_TO_PACKET: 16363 /* We must have at least as much range as the old ptr 16364 * did, so that any accesses which were safe before are 16365 * still safe. This is true even if old range < old off, 16366 * since someone could have accessed through (ptr - k), or 16367 * even done ptr -= k in a register, to get a safe access. 16368 */ 16369 if (rold->range > rcur->range) 16370 return false; 16371 /* If the offsets don't match, we can't trust our alignment; 16372 * nor can we be sure that we won't fall out of range. 16373 */ 16374 if (rold->off != rcur->off) 16375 return false; 16376 /* id relations must be preserved */ 16377 if (!check_ids(rold->id, rcur->id, idmap)) 16378 return false; 16379 /* new val must satisfy old val knowledge */ 16380 return range_within(rold, rcur) && 16381 tnum_in(rold->var_off, rcur->var_off); 16382 case PTR_TO_STACK: 16383 /* two stack pointers are equal only if they're pointing to 16384 * the same stack frame, since fp-8 in foo != fp-8 in bar 16385 */ 16386 return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno; 16387 default: 16388 return regs_exact(rold, rcur, idmap); 16389 } 16390 } 16391 16392 static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, 16393 struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact) 16394 { 16395 int i, spi; 16396 16397 /* walk slots of the explored stack and ignore any additional 16398 * slots in the current stack, since explored(safe) state 16399 * didn't use them 16400 */ 16401 for (i = 0; i < old->allocated_stack; i++) { 16402 struct bpf_reg_state *old_reg, *cur_reg; 16403 16404 spi = i / BPF_REG_SIZE; 16405 16406 if (exact && 16407 old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16408 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 16409 return false; 16410 16411 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) { 16412 i += BPF_REG_SIZE - 1; 16413 /* explored state didn't use this */ 16414 continue; 16415 } 16416 16417 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 16418 continue; 16419 16420 if (env->allow_uninit_stack && 16421 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) 16422 continue; 16423 16424 /* explored stack has more populated slots than current stack 16425 * and these slots were used 16426 */ 16427 if (i >= cur->allocated_stack) 16428 return false; 16429 16430 /* if old state was safe with misc data in the stack 16431 * it will be safe with zero-initialized stack. 16432 * The opposite is not true 16433 */ 16434 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && 16435 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) 16436 continue; 16437 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != 16438 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) 16439 /* Ex: old explored (safe) state has STACK_SPILL in 16440 * this stack slot, but current has STACK_MISC -> 16441 * this verifier states are not equivalent, 16442 * return false to continue verification of this path 16443 */ 16444 return false; 16445 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) 16446 continue; 16447 /* Both old and cur are having same slot_type */ 16448 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { 16449 case STACK_SPILL: 16450 /* when explored and current stack slot are both storing 16451 * spilled registers, check that stored pointers types 16452 * are the same as well. 16453 * Ex: explored safe path could have stored 16454 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} 16455 * but current path has stored: 16456 * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} 16457 * such verifier states are not equivalent. 16458 * return false to continue verification of this path 16459 */ 16460 if (!regsafe(env, &old->stack[spi].spilled_ptr, 16461 &cur->stack[spi].spilled_ptr, idmap, exact)) 16462 return false; 16463 break; 16464 case STACK_DYNPTR: 16465 old_reg = &old->stack[spi].spilled_ptr; 16466 cur_reg = &cur->stack[spi].spilled_ptr; 16467 if (old_reg->dynptr.type != cur_reg->dynptr.type || 16468 old_reg->dynptr.first_slot != cur_reg->dynptr.first_slot || 16469 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 16470 return false; 16471 break; 16472 case STACK_ITER: 16473 old_reg = &old->stack[spi].spilled_ptr; 16474 cur_reg = &cur->stack[spi].spilled_ptr; 16475 /* iter.depth is not compared between states as it 16476 * doesn't matter for correctness and would otherwise 16477 * prevent convergence; we maintain it only to prevent 16478 * infinite loop check triggering, see 16479 * iter_active_depths_differ() 16480 */ 16481 if (old_reg->iter.btf != cur_reg->iter.btf || 16482 old_reg->iter.btf_id != cur_reg->iter.btf_id || 16483 old_reg->iter.state != cur_reg->iter.state || 16484 /* ignore {old_reg,cur_reg}->iter.depth, see above */ 16485 !check_ids(old_reg->ref_obj_id, cur_reg->ref_obj_id, idmap)) 16486 return false; 16487 break; 16488 case STACK_MISC: 16489 case STACK_ZERO: 16490 case STACK_INVALID: 16491 continue; 16492 /* Ensure that new unhandled slot types return false by default */ 16493 default: 16494 return false; 16495 } 16496 } 16497 return true; 16498 } 16499 16500 static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, 16501 struct bpf_idmap *idmap) 16502 { 16503 int i; 16504 16505 if (old->acquired_refs != cur->acquired_refs) 16506 return false; 16507 16508 for (i = 0; i < old->acquired_refs; i++) { 16509 if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap)) 16510 return false; 16511 } 16512 16513 return true; 16514 } 16515 16516 /* compare two verifier states 16517 * 16518 * all states stored in state_list are known to be valid, since 16519 * verifier reached 'bpf_exit' instruction through them 16520 * 16521 * this function is called when verifier exploring different branches of 16522 * execution popped from the state stack. If it sees an old state that has 16523 * more strict register state and more strict stack state then this execution 16524 * branch doesn't need to be explored further, since verifier already 16525 * concluded that more strict state leads to valid finish. 16526 * 16527 * Therefore two states are equivalent if register state is more conservative 16528 * and explored stack state is more conservative than the current one. 16529 * Example: 16530 * explored current 16531 * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) 16532 * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) 16533 * 16534 * In other words if current stack state (one being explored) has more 16535 * valid slots than old one that already passed validation, it means 16536 * the verifier can stop exploring and conclude that current state is valid too 16537 * 16538 * Similarly with registers. If explored state has register type as invalid 16539 * whereas register type in current state is meaningful, it means that 16540 * the current state will reach 'bpf_exit' instruction safely 16541 */ 16542 static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, 16543 struct bpf_func_state *cur, bool exact) 16544 { 16545 int i; 16546 16547 for (i = 0; i < MAX_BPF_REG; i++) 16548 if (!regsafe(env, &old->regs[i], &cur->regs[i], 16549 &env->idmap_scratch, exact)) 16550 return false; 16551 16552 if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) 16553 return false; 16554 16555 if (!refsafe(old, cur, &env->idmap_scratch)) 16556 return false; 16557 16558 return true; 16559 } 16560 16561 static void reset_idmap_scratch(struct bpf_verifier_env *env) 16562 { 16563 env->idmap_scratch.tmp_id_gen = env->id_gen; 16564 memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); 16565 } 16566 16567 static bool states_equal(struct bpf_verifier_env *env, 16568 struct bpf_verifier_state *old, 16569 struct bpf_verifier_state *cur, 16570 bool exact) 16571 { 16572 int i; 16573 16574 if (old->curframe != cur->curframe) 16575 return false; 16576 16577 reset_idmap_scratch(env); 16578 16579 /* Verification state from speculative execution simulation 16580 * must never prune a non-speculative execution one. 16581 */ 16582 if (old->speculative && !cur->speculative) 16583 return false; 16584 16585 if (old->active_lock.ptr != cur->active_lock.ptr) 16586 return false; 16587 16588 /* Old and cur active_lock's have to be either both present 16589 * or both absent. 16590 */ 16591 if (!!old->active_lock.id != !!cur->active_lock.id) 16592 return false; 16593 16594 if (old->active_lock.id && 16595 !check_ids(old->active_lock.id, cur->active_lock.id, &env->idmap_scratch)) 16596 return false; 16597 16598 if (old->active_rcu_lock != cur->active_rcu_lock) 16599 return false; 16600 16601 /* for states to be equal callsites have to be the same 16602 * and all frame states need to be equivalent 16603 */ 16604 for (i = 0; i <= old->curframe; i++) { 16605 if (old->frame[i]->callsite != cur->frame[i]->callsite) 16606 return false; 16607 if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) 16608 return false; 16609 } 16610 return true; 16611 } 16612 16613 /* Return 0 if no propagation happened. Return negative error code if error 16614 * happened. Otherwise, return the propagated bit. 16615 */ 16616 static int propagate_liveness_reg(struct bpf_verifier_env *env, 16617 struct bpf_reg_state *reg, 16618 struct bpf_reg_state *parent_reg) 16619 { 16620 u8 parent_flag = parent_reg->live & REG_LIVE_READ; 16621 u8 flag = reg->live & REG_LIVE_READ; 16622 int err; 16623 16624 /* When comes here, read flags of PARENT_REG or REG could be any of 16625 * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need 16626 * of propagation if PARENT_REG has strongest REG_LIVE_READ64. 16627 */ 16628 if (parent_flag == REG_LIVE_READ64 || 16629 /* Or if there is no read flag from REG. */ 16630 !flag || 16631 /* Or if the read flag from REG is the same as PARENT_REG. */ 16632 parent_flag == flag) 16633 return 0; 16634 16635 err = mark_reg_read(env, reg, parent_reg, flag); 16636 if (err) 16637 return err; 16638 16639 return flag; 16640 } 16641 16642 /* A write screens off any subsequent reads; but write marks come from the 16643 * straight-line code between a state and its parent. When we arrive at an 16644 * equivalent state (jump target or such) we didn't arrive by the straight-line 16645 * code, so read marks in the state must propagate to the parent regardless 16646 * of the state's write marks. That's what 'parent == state->parent' comparison 16647 * in mark_reg_read() is for. 16648 */ 16649 static int propagate_liveness(struct bpf_verifier_env *env, 16650 const struct bpf_verifier_state *vstate, 16651 struct bpf_verifier_state *vparent) 16652 { 16653 struct bpf_reg_state *state_reg, *parent_reg; 16654 struct bpf_func_state *state, *parent; 16655 int i, frame, err = 0; 16656 16657 if (vparent->curframe != vstate->curframe) { 16658 WARN(1, "propagate_live: parent frame %d current frame %d\n", 16659 vparent->curframe, vstate->curframe); 16660 return -EFAULT; 16661 } 16662 /* Propagate read liveness of registers... */ 16663 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 16664 for (frame = 0; frame <= vstate->curframe; frame++) { 16665 parent = vparent->frame[frame]; 16666 state = vstate->frame[frame]; 16667 parent_reg = parent->regs; 16668 state_reg = state->regs; 16669 /* We don't need to worry about FP liveness, it's read-only */ 16670 for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { 16671 err = propagate_liveness_reg(env, &state_reg[i], 16672 &parent_reg[i]); 16673 if (err < 0) 16674 return err; 16675 if (err == REG_LIVE_READ64) 16676 mark_insn_zext(env, &parent_reg[i]); 16677 } 16678 16679 /* Propagate stack slots. */ 16680 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && 16681 i < parent->allocated_stack / BPF_REG_SIZE; i++) { 16682 parent_reg = &parent->stack[i].spilled_ptr; 16683 state_reg = &state->stack[i].spilled_ptr; 16684 err = propagate_liveness_reg(env, state_reg, 16685 parent_reg); 16686 if (err < 0) 16687 return err; 16688 } 16689 } 16690 return 0; 16691 } 16692 16693 /* find precise scalars in the previous equivalent state and 16694 * propagate them into the current state 16695 */ 16696 static int propagate_precision(struct bpf_verifier_env *env, 16697 const struct bpf_verifier_state *old) 16698 { 16699 struct bpf_reg_state *state_reg; 16700 struct bpf_func_state *state; 16701 int i, err = 0, fr; 16702 bool first; 16703 16704 for (fr = old->curframe; fr >= 0; fr--) { 16705 state = old->frame[fr]; 16706 state_reg = state->regs; 16707 first = true; 16708 for (i = 0; i < BPF_REG_FP; i++, state_reg++) { 16709 if (state_reg->type != SCALAR_VALUE || 16710 !state_reg->precise || 16711 !(state_reg->live & REG_LIVE_READ)) 16712 continue; 16713 if (env->log.level & BPF_LOG_LEVEL2) { 16714 if (first) 16715 verbose(env, "frame %d: propagating r%d", fr, i); 16716 else 16717 verbose(env, ",r%d", i); 16718 } 16719 bt_set_frame_reg(&env->bt, fr, i); 16720 first = false; 16721 } 16722 16723 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 16724 if (!is_spilled_reg(&state->stack[i])) 16725 continue; 16726 state_reg = &state->stack[i].spilled_ptr; 16727 if (state_reg->type != SCALAR_VALUE || 16728 !state_reg->precise || 16729 !(state_reg->live & REG_LIVE_READ)) 16730 continue; 16731 if (env->log.level & BPF_LOG_LEVEL2) { 16732 if (first) 16733 verbose(env, "frame %d: propagating fp%d", 16734 fr, (-i - 1) * BPF_REG_SIZE); 16735 else 16736 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); 16737 } 16738 bt_set_frame_slot(&env->bt, fr, i); 16739 first = false; 16740 } 16741 if (!first) 16742 verbose(env, "\n"); 16743 } 16744 16745 err = mark_chain_precision_batch(env); 16746 if (err < 0) 16747 return err; 16748 16749 return 0; 16750 } 16751 16752 static bool states_maybe_looping(struct bpf_verifier_state *old, 16753 struct bpf_verifier_state *cur) 16754 { 16755 struct bpf_func_state *fold, *fcur; 16756 int i, fr = cur->curframe; 16757 16758 if (old->curframe != fr) 16759 return false; 16760 16761 fold = old->frame[fr]; 16762 fcur = cur->frame[fr]; 16763 for (i = 0; i < MAX_BPF_REG; i++) 16764 if (memcmp(&fold->regs[i], &fcur->regs[i], 16765 offsetof(struct bpf_reg_state, parent))) 16766 return false; 16767 return true; 16768 } 16769 16770 static bool is_iter_next_insn(struct bpf_verifier_env *env, int insn_idx) 16771 { 16772 return env->insn_aux_data[insn_idx].is_iter_next; 16773 } 16774 16775 /* is_state_visited() handles iter_next() (see process_iter_next_call() for 16776 * terminology) calls specially: as opposed to bounded BPF loops, it *expects* 16777 * states to match, which otherwise would look like an infinite loop. So while 16778 * iter_next() calls are taken care of, we still need to be careful and 16779 * prevent erroneous and too eager declaration of "ininite loop", when 16780 * iterators are involved. 16781 * 16782 * Here's a situation in pseudo-BPF assembly form: 16783 * 16784 * 0: again: ; set up iter_next() call args 16785 * 1: r1 = &it ; <CHECKPOINT HERE> 16786 * 2: call bpf_iter_num_next ; this is iter_next() call 16787 * 3: if r0 == 0 goto done 16788 * 4: ... something useful here ... 16789 * 5: goto again ; another iteration 16790 * 6: done: 16791 * 7: r1 = &it 16792 * 8: call bpf_iter_num_destroy ; clean up iter state 16793 * 9: exit 16794 * 16795 * This is a typical loop. Let's assume that we have a prune point at 1:, 16796 * before we get to `call bpf_iter_num_next` (e.g., because of that `goto 16797 * again`, assuming other heuristics don't get in a way). 16798 * 16799 * When we first time come to 1:, let's say we have some state X. We proceed 16800 * to 2:, fork states, enqueue ACTIVE, validate NULL case successfully, exit. 16801 * Now we come back to validate that forked ACTIVE state. We proceed through 16802 * 3-5, come to goto, jump to 1:. Let's assume our state didn't change, so we 16803 * are converging. But the problem is that we don't know that yet, as this 16804 * convergence has to happen at iter_next() call site only. So if nothing is 16805 * done, at 1: verifier will use bounded loop logic and declare infinite 16806 * looping (and would be *technically* correct, if not for iterator's 16807 * "eventual sticky NULL" contract, see process_iter_next_call()). But we 16808 * don't want that. So what we do in process_iter_next_call() when we go on 16809 * another ACTIVE iteration, we bump slot->iter.depth, to mark that it's 16810 * a different iteration. So when we suspect an infinite loop, we additionally 16811 * check if any of the *ACTIVE* iterator states depths differ. If yes, we 16812 * pretend we are not looping and wait for next iter_next() call. 16813 * 16814 * This only applies to ACTIVE state. In DRAINED state we don't expect to 16815 * loop, because that would actually mean infinite loop, as DRAINED state is 16816 * "sticky", and so we'll keep returning into the same instruction with the 16817 * same state (at least in one of possible code paths). 16818 * 16819 * This approach allows to keep infinite loop heuristic even in the face of 16820 * active iterator. E.g., C snippet below is and will be detected as 16821 * inifintely looping: 16822 * 16823 * struct bpf_iter_num it; 16824 * int *p, x; 16825 * 16826 * bpf_iter_num_new(&it, 0, 10); 16827 * while ((p = bpf_iter_num_next(&t))) { 16828 * x = p; 16829 * while (x--) {} // <<-- infinite loop here 16830 * } 16831 * 16832 */ 16833 static bool iter_active_depths_differ(struct bpf_verifier_state *old, struct bpf_verifier_state *cur) 16834 { 16835 struct bpf_reg_state *slot, *cur_slot; 16836 struct bpf_func_state *state; 16837 int i, fr; 16838 16839 for (fr = old->curframe; fr >= 0; fr--) { 16840 state = old->frame[fr]; 16841 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { 16842 if (state->stack[i].slot_type[0] != STACK_ITER) 16843 continue; 16844 16845 slot = &state->stack[i].spilled_ptr; 16846 if (slot->iter.state != BPF_ITER_STATE_ACTIVE) 16847 continue; 16848 16849 cur_slot = &cur->frame[fr]->stack[i].spilled_ptr; 16850 if (cur_slot->iter.depth != slot->iter.depth) 16851 return true; 16852 } 16853 } 16854 return false; 16855 } 16856 16857 static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) 16858 { 16859 struct bpf_verifier_state_list *new_sl; 16860 struct bpf_verifier_state_list *sl, **pprev; 16861 struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; 16862 int i, j, n, err, states_cnt = 0; 16863 bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); 16864 bool add_new_state = force_new_state; 16865 bool force_exact; 16866 16867 /* bpf progs typically have pruning point every 4 instructions 16868 * http://vger.kernel.org/bpfconf2019.html#session-1 16869 * Do not add new state for future pruning if the verifier hasn't seen 16870 * at least 2 jumps and at least 8 instructions. 16871 * This heuristics helps decrease 'total_states' and 'peak_states' metric. 16872 * In tests that amounts to up to 50% reduction into total verifier 16873 * memory consumption and 20% verifier time speedup. 16874 */ 16875 if (env->jmps_processed - env->prev_jmps_processed >= 2 && 16876 env->insn_processed - env->prev_insn_processed >= 8) 16877 add_new_state = true; 16878 16879 pprev = explored_state(env, insn_idx); 16880 sl = *pprev; 16881 16882 clean_live_states(env, insn_idx, cur); 16883 16884 while (sl) { 16885 states_cnt++; 16886 if (sl->state.insn_idx != insn_idx) 16887 goto next; 16888 16889 if (sl->state.branches) { 16890 struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; 16891 16892 if (frame->in_async_callback_fn && 16893 frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { 16894 /* Different async_entry_cnt means that the verifier is 16895 * processing another entry into async callback. 16896 * Seeing the same state is not an indication of infinite 16897 * loop or infinite recursion. 16898 * But finding the same state doesn't mean that it's safe 16899 * to stop processing the current state. The previous state 16900 * hasn't yet reached bpf_exit, since state.branches > 0. 16901 * Checking in_async_callback_fn alone is not enough either. 16902 * Since the verifier still needs to catch infinite loops 16903 * inside async callbacks. 16904 */ 16905 goto skip_inf_loop_check; 16906 } 16907 /* BPF open-coded iterators loop detection is special. 16908 * states_maybe_looping() logic is too simplistic in detecting 16909 * states that *might* be equivalent, because it doesn't know 16910 * about ID remapping, so don't even perform it. 16911 * See process_iter_next_call() and iter_active_depths_differ() 16912 * for overview of the logic. When current and one of parent 16913 * states are detected as equivalent, it's a good thing: we prove 16914 * convergence and can stop simulating further iterations. 16915 * It's safe to assume that iterator loop will finish, taking into 16916 * account iter_next() contract of eventually returning 16917 * sticky NULL result. 16918 * 16919 * Note, that states have to be compared exactly in this case because 16920 * read and precision marks might not be finalized inside the loop. 16921 * E.g. as in the program below: 16922 * 16923 * 1. r7 = -16 16924 * 2. r6 = bpf_get_prandom_u32() 16925 * 3. while (bpf_iter_num_next(&fp[-8])) { 16926 * 4. if (r6 != 42) { 16927 * 5. r7 = -32 16928 * 6. r6 = bpf_get_prandom_u32() 16929 * 7. continue 16930 * 8. } 16931 * 9. r0 = r10 16932 * 10. r0 += r7 16933 * 11. r8 = *(u64 *)(r0 + 0) 16934 * 12. r6 = bpf_get_prandom_u32() 16935 * 13. } 16936 * 16937 * Here verifier would first visit path 1-3, create a checkpoint at 3 16938 * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does 16939 * not have read or precision mark for r7 yet, thus inexact states 16940 * comparison would discard current state with r7=-32 16941 * => unsafe memory access at 11 would not be caught. 16942 */ 16943 if (is_iter_next_insn(env, insn_idx)) { 16944 if (states_equal(env, &sl->state, cur, true)) { 16945 struct bpf_func_state *cur_frame; 16946 struct bpf_reg_state *iter_state, *iter_reg; 16947 int spi; 16948 16949 cur_frame = cur->frame[cur->curframe]; 16950 /* btf_check_iter_kfuncs() enforces that 16951 * iter state pointer is always the first arg 16952 */ 16953 iter_reg = &cur_frame->regs[BPF_REG_1]; 16954 /* current state is valid due to states_equal(), 16955 * so we can assume valid iter and reg state, 16956 * no need for extra (re-)validations 16957 */ 16958 spi = __get_spi(iter_reg->off + iter_reg->var_off.value); 16959 iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; 16960 if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { 16961 update_loop_entry(cur, &sl->state); 16962 goto hit; 16963 } 16964 } 16965 goto skip_inf_loop_check; 16966 } 16967 /* attempt to detect infinite loop to avoid unnecessary doomed work */ 16968 if (states_maybe_looping(&sl->state, cur) && 16969 states_equal(env, &sl->state, cur, false) && 16970 !iter_active_depths_differ(&sl->state, cur)) { 16971 verbose_linfo(env, insn_idx, "; "); 16972 verbose(env, "infinite loop detected at insn %d\n", insn_idx); 16973 verbose(env, "cur state:"); 16974 print_verifier_state(env, cur->frame[cur->curframe], true); 16975 verbose(env, "old state:"); 16976 print_verifier_state(env, sl->state.frame[cur->curframe], true); 16977 return -EINVAL; 16978 } 16979 /* if the verifier is processing a loop, avoid adding new state 16980 * too often, since different loop iterations have distinct 16981 * states and may not help future pruning. 16982 * This threshold shouldn't be too low to make sure that 16983 * a loop with large bound will be rejected quickly. 16984 * The most abusive loop will be: 16985 * r1 += 1 16986 * if r1 < 1000000 goto pc-2 16987 * 1M insn_procssed limit / 100 == 10k peak states. 16988 * This threshold shouldn't be too high either, since states 16989 * at the end of the loop are likely to be useful in pruning. 16990 */ 16991 skip_inf_loop_check: 16992 if (!force_new_state && 16993 env->jmps_processed - env->prev_jmps_processed < 20 && 16994 env->insn_processed - env->prev_insn_processed < 100) 16995 add_new_state = false; 16996 goto miss; 16997 } 16998 /* If sl->state is a part of a loop and this loop's entry is a part of 16999 * current verification path then states have to be compared exactly. 17000 * 'force_exact' is needed to catch the following case: 17001 * 17002 * initial Here state 'succ' was processed first, 17003 * | it was eventually tracked to produce a 17004 * V state identical to 'hdr'. 17005 * .---------> hdr All branches from 'succ' had been explored 17006 * | | and thus 'succ' has its .branches == 0. 17007 * | V 17008 * | .------... Suppose states 'cur' and 'succ' correspond 17009 * | | | to the same instruction + callsites. 17010 * | V V In such case it is necessary to check 17011 * | ... ... if 'succ' and 'cur' are states_equal(). 17012 * | | | If 'succ' and 'cur' are a part of the 17013 * | V V same loop exact flag has to be set. 17014 * | succ <- cur To check if that is the case, verify 17015 * | | if loop entry of 'succ' is in current 17016 * | V DFS path. 17017 * | ... 17018 * | | 17019 * '----' 17020 * 17021 * Additional details are in the comment before get_loop_entry(). 17022 */ 17023 loop_entry = get_loop_entry(&sl->state); 17024 force_exact = loop_entry && loop_entry->branches > 0; 17025 if (states_equal(env, &sl->state, cur, force_exact)) { 17026 if (force_exact) 17027 update_loop_entry(cur, loop_entry); 17028 hit: 17029 sl->hit_cnt++; 17030 /* reached equivalent register/stack state, 17031 * prune the search. 17032 * Registers read by the continuation are read by us. 17033 * If we have any write marks in env->cur_state, they 17034 * will prevent corresponding reads in the continuation 17035 * from reaching our parent (an explored_state). Our 17036 * own state will get the read marks recorded, but 17037 * they'll be immediately forgotten as we're pruning 17038 * this state and will pop a new one. 17039 */ 17040 err = propagate_liveness(env, &sl->state, cur); 17041 17042 /* if previous state reached the exit with precision and 17043 * current state is equivalent to it (except precsion marks) 17044 * the precision needs to be propagated back in 17045 * the current state. 17046 */ 17047 err = err ? : push_jmp_history(env, cur); 17048 err = err ? : propagate_precision(env, &sl->state); 17049 if (err) 17050 return err; 17051 return 1; 17052 } 17053 miss: 17054 /* when new state is not going to be added do not increase miss count. 17055 * Otherwise several loop iterations will remove the state 17056 * recorded earlier. The goal of these heuristics is to have 17057 * states from some iterations of the loop (some in the beginning 17058 * and some at the end) to help pruning. 17059 */ 17060 if (add_new_state) 17061 sl->miss_cnt++; 17062 /* heuristic to determine whether this state is beneficial 17063 * to keep checking from state equivalence point of view. 17064 * Higher numbers increase max_states_per_insn and verification time, 17065 * but do not meaningfully decrease insn_processed. 17066 * 'n' controls how many times state could miss before eviction. 17067 * Use bigger 'n' for checkpoints because evicting checkpoint states 17068 * too early would hinder iterator convergence. 17069 */ 17070 n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; 17071 if (sl->miss_cnt > sl->hit_cnt * n + n) { 17072 /* the state is unlikely to be useful. Remove it to 17073 * speed up verification 17074 */ 17075 *pprev = sl->next; 17076 if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && 17077 !sl->state.used_as_loop_entry) { 17078 u32 br = sl->state.branches; 17079 17080 WARN_ONCE(br, 17081 "BUG live_done but branches_to_explore %d\n", 17082 br); 17083 free_verifier_state(&sl->state, false); 17084 kfree(sl); 17085 env->peak_states--; 17086 } else { 17087 /* cannot free this state, since parentage chain may 17088 * walk it later. Add it for free_list instead to 17089 * be freed at the end of verification 17090 */ 17091 sl->next = env->free_list; 17092 env->free_list = sl; 17093 } 17094 sl = *pprev; 17095 continue; 17096 } 17097 next: 17098 pprev = &sl->next; 17099 sl = *pprev; 17100 } 17101 17102 if (env->max_states_per_insn < states_cnt) 17103 env->max_states_per_insn = states_cnt; 17104 17105 if (!env->bpf_capable && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) 17106 return 0; 17107 17108 if (!add_new_state) 17109 return 0; 17110 17111 /* There were no equivalent states, remember the current one. 17112 * Technically the current state is not proven to be safe yet, 17113 * but it will either reach outer most bpf_exit (which means it's safe) 17114 * or it will be rejected. When there are no loops the verifier won't be 17115 * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) 17116 * again on the way to bpf_exit. 17117 * When looping the sl->state.branches will be > 0 and this state 17118 * will not be considered for equivalence until branches == 0. 17119 */ 17120 new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); 17121 if (!new_sl) 17122 return -ENOMEM; 17123 env->total_states++; 17124 env->peak_states++; 17125 env->prev_jmps_processed = env->jmps_processed; 17126 env->prev_insn_processed = env->insn_processed; 17127 17128 /* forget precise markings we inherited, see __mark_chain_precision */ 17129 if (env->bpf_capable) 17130 mark_all_scalars_imprecise(env, cur); 17131 17132 /* add new state to the head of linked list */ 17133 new = &new_sl->state; 17134 err = copy_verifier_state(new, cur); 17135 if (err) { 17136 free_verifier_state(new, false); 17137 kfree(new_sl); 17138 return err; 17139 } 17140 new->insn_idx = insn_idx; 17141 WARN_ONCE(new->branches != 1, 17142 "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); 17143 17144 cur->parent = new; 17145 cur->first_insn_idx = insn_idx; 17146 cur->dfs_depth = new->dfs_depth + 1; 17147 clear_jmp_history(cur); 17148 new_sl->next = *explored_state(env, insn_idx); 17149 *explored_state(env, insn_idx) = new_sl; 17150 /* connect new state to parentage chain. Current frame needs all 17151 * registers connected. Only r6 - r9 of the callers are alive (pushed 17152 * to the stack implicitly by JITs) so in callers' frames connect just 17153 * r6 - r9 as an optimization. Callers will have r1 - r5 connected to 17154 * the state of the call instruction (with WRITTEN set), and r0 comes 17155 * from callee with its full parentage chain, anyway. 17156 */ 17157 /* clear write marks in current state: the writes we did are not writes 17158 * our child did, so they don't screen off its reads from us. 17159 * (There are no read marks in current state, because reads always mark 17160 * their parent and current state never has children yet. Only 17161 * explored_states can get read marks.) 17162 */ 17163 for (j = 0; j <= cur->curframe; j++) { 17164 for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) 17165 cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; 17166 for (i = 0; i < BPF_REG_FP; i++) 17167 cur->frame[j]->regs[i].live = REG_LIVE_NONE; 17168 } 17169 17170 /* all stack frames are accessible from callee, clear them all */ 17171 for (j = 0; j <= cur->curframe; j++) { 17172 struct bpf_func_state *frame = cur->frame[j]; 17173 struct bpf_func_state *newframe = new->frame[j]; 17174 17175 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { 17176 frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; 17177 frame->stack[i].spilled_ptr.parent = 17178 &newframe->stack[i].spilled_ptr; 17179 } 17180 } 17181 return 0; 17182 } 17183 17184 /* Return true if it's OK to have the same insn return a different type. */ 17185 static bool reg_type_mismatch_ok(enum bpf_reg_type type) 17186 { 17187 switch (base_type(type)) { 17188 case PTR_TO_CTX: 17189 case PTR_TO_SOCKET: 17190 case PTR_TO_SOCK_COMMON: 17191 case PTR_TO_TCP_SOCK: 17192 case PTR_TO_XDP_SOCK: 17193 case PTR_TO_BTF_ID: 17194 return false; 17195 default: 17196 return true; 17197 } 17198 } 17199 17200 /* If an instruction was previously used with particular pointer types, then we 17201 * need to be careful to avoid cases such as the below, where it may be ok 17202 * for one branch accessing the pointer, but not ok for the other branch: 17203 * 17204 * R1 = sock_ptr 17205 * goto X; 17206 * ... 17207 * R1 = some_other_valid_ptr; 17208 * goto X; 17209 * ... 17210 * R2 = *(u32 *)(R1 + 0); 17211 */ 17212 static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) 17213 { 17214 return src != prev && (!reg_type_mismatch_ok(src) || 17215 !reg_type_mismatch_ok(prev)); 17216 } 17217 17218 static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, 17219 bool allow_trust_missmatch) 17220 { 17221 enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; 17222 17223 if (*prev_type == NOT_INIT) { 17224 /* Saw a valid insn 17225 * dst_reg = *(u32 *)(src_reg + off) 17226 * save type to validate intersecting paths 17227 */ 17228 *prev_type = type; 17229 } else if (reg_type_mismatch(type, *prev_type)) { 17230 /* Abuser program is trying to use the same insn 17231 * dst_reg = *(u32*) (src_reg + off) 17232 * with different pointer types: 17233 * src_reg == ctx in one branch and 17234 * src_reg == stack|map in some other branch. 17235 * Reject it. 17236 */ 17237 if (allow_trust_missmatch && 17238 base_type(type) == PTR_TO_BTF_ID && 17239 base_type(*prev_type) == PTR_TO_BTF_ID) { 17240 /* 17241 * Have to support a use case when one path through 17242 * the program yields TRUSTED pointer while another 17243 * is UNTRUSTED. Fallback to UNTRUSTED to generate 17244 * BPF_PROBE_MEM/BPF_PROBE_MEMSX. 17245 */ 17246 *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; 17247 } else { 17248 verbose(env, "same insn cannot be used with different pointers\n"); 17249 return -EINVAL; 17250 } 17251 } 17252 17253 return 0; 17254 } 17255 17256 static int do_check(struct bpf_verifier_env *env) 17257 { 17258 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 17259 struct bpf_verifier_state *state = env->cur_state; 17260 struct bpf_insn *insns = env->prog->insnsi; 17261 struct bpf_reg_state *regs; 17262 int insn_cnt = env->prog->len; 17263 bool do_print_state = false; 17264 int prev_insn_idx = -1; 17265 17266 for (;;) { 17267 bool exception_exit = false; 17268 struct bpf_insn *insn; 17269 u8 class; 17270 int err; 17271 17272 env->prev_insn_idx = prev_insn_idx; 17273 if (env->insn_idx >= insn_cnt) { 17274 verbose(env, "invalid insn idx %d insn_cnt %d\n", 17275 env->insn_idx, insn_cnt); 17276 return -EFAULT; 17277 } 17278 17279 insn = &insns[env->insn_idx]; 17280 class = BPF_CLASS(insn->code); 17281 17282 if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { 17283 verbose(env, 17284 "BPF program is too large. Processed %d insn\n", 17285 env->insn_processed); 17286 return -E2BIG; 17287 } 17288 17289 state->last_insn_idx = env->prev_insn_idx; 17290 17291 if (is_prune_point(env, env->insn_idx)) { 17292 err = is_state_visited(env, env->insn_idx); 17293 if (err < 0) 17294 return err; 17295 if (err == 1) { 17296 /* found equivalent state, can prune the search */ 17297 if (env->log.level & BPF_LOG_LEVEL) { 17298 if (do_print_state) 17299 verbose(env, "\nfrom %d to %d%s: safe\n", 17300 env->prev_insn_idx, env->insn_idx, 17301 env->cur_state->speculative ? 17302 " (speculative execution)" : ""); 17303 else 17304 verbose(env, "%d: safe\n", env->insn_idx); 17305 } 17306 goto process_bpf_exit; 17307 } 17308 } 17309 17310 if (is_jmp_point(env, env->insn_idx)) { 17311 err = push_jmp_history(env, state); 17312 if (err) 17313 return err; 17314 } 17315 17316 if (signal_pending(current)) 17317 return -EAGAIN; 17318 17319 if (need_resched()) 17320 cond_resched(); 17321 17322 if (env->log.level & BPF_LOG_LEVEL2 && do_print_state) { 17323 verbose(env, "\nfrom %d to %d%s:", 17324 env->prev_insn_idx, env->insn_idx, 17325 env->cur_state->speculative ? 17326 " (speculative execution)" : ""); 17327 print_verifier_state(env, state->frame[state->curframe], true); 17328 do_print_state = false; 17329 } 17330 17331 if (env->log.level & BPF_LOG_LEVEL) { 17332 const struct bpf_insn_cbs cbs = { 17333 .cb_call = disasm_kfunc_name, 17334 .cb_print = verbose, 17335 .private_data = env, 17336 }; 17337 17338 if (verifier_state_scratched(env)) 17339 print_insn_state(env, state->frame[state->curframe]); 17340 17341 verbose_linfo(env, env->insn_idx, "; "); 17342 env->prev_log_pos = env->log.end_pos; 17343 verbose(env, "%d: ", env->insn_idx); 17344 print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); 17345 env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; 17346 env->prev_log_pos = env->log.end_pos; 17347 } 17348 17349 if (bpf_prog_is_offloaded(env->prog->aux)) { 17350 err = bpf_prog_offload_verify_insn(env, env->insn_idx, 17351 env->prev_insn_idx); 17352 if (err) 17353 return err; 17354 } 17355 17356 regs = cur_regs(env); 17357 sanitize_mark_insn_seen(env); 17358 prev_insn_idx = env->insn_idx; 17359 17360 if (class == BPF_ALU || class == BPF_ALU64) { 17361 err = check_alu_op(env, insn); 17362 if (err) 17363 return err; 17364 17365 } else if (class == BPF_LDX) { 17366 enum bpf_reg_type src_reg_type; 17367 17368 /* check for reserved fields is already done */ 17369 17370 /* check src operand */ 17371 err = check_reg_arg(env, insn->src_reg, SRC_OP); 17372 if (err) 17373 return err; 17374 17375 err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); 17376 if (err) 17377 return err; 17378 17379 src_reg_type = regs[insn->src_reg].type; 17380 17381 /* check that memory (src_reg + off) is readable, 17382 * the state of dst_reg will be updated by this func 17383 */ 17384 err = check_mem_access(env, env->insn_idx, insn->src_reg, 17385 insn->off, BPF_SIZE(insn->code), 17386 BPF_READ, insn->dst_reg, false, 17387 BPF_MODE(insn->code) == BPF_MEMSX); 17388 if (err) 17389 return err; 17390 17391 err = save_aux_ptr_type(env, src_reg_type, true); 17392 if (err) 17393 return err; 17394 } else if (class == BPF_STX) { 17395 enum bpf_reg_type dst_reg_type; 17396 17397 if (BPF_MODE(insn->code) == BPF_ATOMIC) { 17398 err = check_atomic(env, env->insn_idx, insn); 17399 if (err) 17400 return err; 17401 env->insn_idx++; 17402 continue; 17403 } 17404 17405 if (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0) { 17406 verbose(env, "BPF_STX uses reserved fields\n"); 17407 return -EINVAL; 17408 } 17409 17410 /* check src1 operand */ 17411 err = check_reg_arg(env, insn->src_reg, SRC_OP); 17412 if (err) 17413 return err; 17414 /* check src2 operand */ 17415 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 17416 if (err) 17417 return err; 17418 17419 dst_reg_type = regs[insn->dst_reg].type; 17420 17421 /* check that memory (dst_reg + off) is writeable */ 17422 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 17423 insn->off, BPF_SIZE(insn->code), 17424 BPF_WRITE, insn->src_reg, false, false); 17425 if (err) 17426 return err; 17427 17428 err = save_aux_ptr_type(env, dst_reg_type, false); 17429 if (err) 17430 return err; 17431 } else if (class == BPF_ST) { 17432 enum bpf_reg_type dst_reg_type; 17433 17434 if (BPF_MODE(insn->code) != BPF_MEM || 17435 insn->src_reg != BPF_REG_0) { 17436 verbose(env, "BPF_ST uses reserved fields\n"); 17437 return -EINVAL; 17438 } 17439 /* check src operand */ 17440 err = check_reg_arg(env, insn->dst_reg, SRC_OP); 17441 if (err) 17442 return err; 17443 17444 dst_reg_type = regs[insn->dst_reg].type; 17445 17446 /* check that memory (dst_reg + off) is writeable */ 17447 err = check_mem_access(env, env->insn_idx, insn->dst_reg, 17448 insn->off, BPF_SIZE(insn->code), 17449 BPF_WRITE, -1, false, false); 17450 if (err) 17451 return err; 17452 17453 err = save_aux_ptr_type(env, dst_reg_type, false); 17454 if (err) 17455 return err; 17456 } else if (class == BPF_JMP || class == BPF_JMP32) { 17457 u8 opcode = BPF_OP(insn->code); 17458 17459 env->jmps_processed++; 17460 if (opcode == BPF_CALL) { 17461 if (BPF_SRC(insn->code) != BPF_K || 17462 (insn->src_reg != BPF_PSEUDO_KFUNC_CALL 17463 && insn->off != 0) || 17464 (insn->src_reg != BPF_REG_0 && 17465 insn->src_reg != BPF_PSEUDO_CALL && 17466 insn->src_reg != BPF_PSEUDO_KFUNC_CALL) || 17467 insn->dst_reg != BPF_REG_0 || 17468 class == BPF_JMP32) { 17469 verbose(env, "BPF_CALL uses reserved fields\n"); 17470 return -EINVAL; 17471 } 17472 17473 if (env->cur_state->active_lock.ptr) { 17474 if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) || 17475 (insn->src_reg == BPF_PSEUDO_CALL) || 17476 (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && 17477 (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) { 17478 verbose(env, "function calls are not allowed while holding a lock\n"); 17479 return -EINVAL; 17480 } 17481 } 17482 if (insn->src_reg == BPF_PSEUDO_CALL) { 17483 err = check_func_call(env, insn, &env->insn_idx); 17484 } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 17485 err = check_kfunc_call(env, insn, &env->insn_idx); 17486 if (!err && is_bpf_throw_kfunc(insn)) { 17487 exception_exit = true; 17488 goto process_bpf_exit_full; 17489 } 17490 } else { 17491 err = check_helper_call(env, insn, &env->insn_idx); 17492 } 17493 if (err) 17494 return err; 17495 17496 mark_reg_scratched(env, BPF_REG_0); 17497 } else if (opcode == BPF_JA) { 17498 if (BPF_SRC(insn->code) != BPF_K || 17499 insn->src_reg != BPF_REG_0 || 17500 insn->dst_reg != BPF_REG_0 || 17501 (class == BPF_JMP && insn->imm != 0) || 17502 (class == BPF_JMP32 && insn->off != 0)) { 17503 verbose(env, "BPF_JA uses reserved fields\n"); 17504 return -EINVAL; 17505 } 17506 17507 if (class == BPF_JMP) 17508 env->insn_idx += insn->off + 1; 17509 else 17510 env->insn_idx += insn->imm + 1; 17511 continue; 17512 17513 } else if (opcode == BPF_EXIT) { 17514 if (BPF_SRC(insn->code) != BPF_K || 17515 insn->imm != 0 || 17516 insn->src_reg != BPF_REG_0 || 17517 insn->dst_reg != BPF_REG_0 || 17518 class == BPF_JMP32) { 17519 verbose(env, "BPF_EXIT uses reserved fields\n"); 17520 return -EINVAL; 17521 } 17522 process_bpf_exit_full: 17523 if (env->cur_state->active_lock.ptr && 17524 !in_rbtree_lock_required_cb(env)) { 17525 verbose(env, "bpf_spin_unlock is missing\n"); 17526 return -EINVAL; 17527 } 17528 17529 if (env->cur_state->active_rcu_lock && 17530 !in_rbtree_lock_required_cb(env)) { 17531 verbose(env, "bpf_rcu_read_unlock is missing\n"); 17532 return -EINVAL; 17533 } 17534 17535 /* We must do check_reference_leak here before 17536 * prepare_func_exit to handle the case when 17537 * state->curframe > 0, it may be a callback 17538 * function, for which reference_state must 17539 * match caller reference state when it exits. 17540 */ 17541 err = check_reference_leak(env, exception_exit); 17542 if (err) 17543 return err; 17544 17545 /* The side effect of the prepare_func_exit 17546 * which is being skipped is that it frees 17547 * bpf_func_state. Typically, process_bpf_exit 17548 * will only be hit with outermost exit. 17549 * copy_verifier_state in pop_stack will handle 17550 * freeing of any extra bpf_func_state left over 17551 * from not processing all nested function 17552 * exits. We also skip return code checks as 17553 * they are not needed for exceptional exits. 17554 */ 17555 if (exception_exit) 17556 goto process_bpf_exit; 17557 17558 if (state->curframe) { 17559 /* exit from nested function */ 17560 err = prepare_func_exit(env, &env->insn_idx); 17561 if (err) 17562 return err; 17563 do_print_state = true; 17564 continue; 17565 } 17566 17567 err = check_return_code(env, BPF_REG_0); 17568 if (err) 17569 return err; 17570 process_bpf_exit: 17571 mark_verifier_state_scratched(env); 17572 update_branch_counts(env, env->cur_state); 17573 err = pop_stack(env, &prev_insn_idx, 17574 &env->insn_idx, pop_log); 17575 if (err < 0) { 17576 if (err != -ENOENT) 17577 return err; 17578 break; 17579 } else { 17580 do_print_state = true; 17581 continue; 17582 } 17583 } else { 17584 err = check_cond_jmp_op(env, insn, &env->insn_idx); 17585 if (err) 17586 return err; 17587 } 17588 } else if (class == BPF_LD) { 17589 u8 mode = BPF_MODE(insn->code); 17590 17591 if (mode == BPF_ABS || mode == BPF_IND) { 17592 err = check_ld_abs(env, insn); 17593 if (err) 17594 return err; 17595 17596 } else if (mode == BPF_IMM) { 17597 err = check_ld_imm(env, insn); 17598 if (err) 17599 return err; 17600 17601 env->insn_idx++; 17602 sanitize_mark_insn_seen(env); 17603 } else { 17604 verbose(env, "invalid BPF_LD mode\n"); 17605 return -EINVAL; 17606 } 17607 } else { 17608 verbose(env, "unknown insn class %d\n", class); 17609 return -EINVAL; 17610 } 17611 17612 env->insn_idx++; 17613 } 17614 17615 return 0; 17616 } 17617 17618 static int find_btf_percpu_datasec(struct btf *btf) 17619 { 17620 const struct btf_type *t; 17621 const char *tname; 17622 int i, n; 17623 17624 /* 17625 * Both vmlinux and module each have their own ".data..percpu" 17626 * DATASECs in BTF. So for module's case, we need to skip vmlinux BTF 17627 * types to look at only module's own BTF types. 17628 */ 17629 n = btf_nr_types(btf); 17630 if (btf_is_module(btf)) 17631 i = btf_nr_types(btf_vmlinux); 17632 else 17633 i = 1; 17634 17635 for(; i < n; i++) { 17636 t = btf_type_by_id(btf, i); 17637 if (BTF_INFO_KIND(t->info) != BTF_KIND_DATASEC) 17638 continue; 17639 17640 tname = btf_name_by_offset(btf, t->name_off); 17641 if (!strcmp(tname, ".data..percpu")) 17642 return i; 17643 } 17644 17645 return -ENOENT; 17646 } 17647 17648 /* replace pseudo btf_id with kernel symbol address */ 17649 static int check_pseudo_btf_id(struct bpf_verifier_env *env, 17650 struct bpf_insn *insn, 17651 struct bpf_insn_aux_data *aux) 17652 { 17653 const struct btf_var_secinfo *vsi; 17654 const struct btf_type *datasec; 17655 struct btf_mod_pair *btf_mod; 17656 const struct btf_type *t; 17657 const char *sym_name; 17658 bool percpu = false; 17659 u32 type, id = insn->imm; 17660 struct btf *btf; 17661 s32 datasec_id; 17662 u64 addr; 17663 int i, btf_fd, err; 17664 17665 btf_fd = insn[1].imm; 17666 if (btf_fd) { 17667 btf = btf_get_by_fd(btf_fd); 17668 if (IS_ERR(btf)) { 17669 verbose(env, "invalid module BTF object FD specified.\n"); 17670 return -EINVAL; 17671 } 17672 } else { 17673 if (!btf_vmlinux) { 17674 verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n"); 17675 return -EINVAL; 17676 } 17677 btf = btf_vmlinux; 17678 btf_get(btf); 17679 } 17680 17681 t = btf_type_by_id(btf, id); 17682 if (!t) { 17683 verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id); 17684 err = -ENOENT; 17685 goto err_put; 17686 } 17687 17688 if (!btf_type_is_var(t) && !btf_type_is_func(t)) { 17689 verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR or KIND_FUNC\n", id); 17690 err = -EINVAL; 17691 goto err_put; 17692 } 17693 17694 sym_name = btf_name_by_offset(btf, t->name_off); 17695 addr = kallsyms_lookup_name(sym_name); 17696 if (!addr) { 17697 verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n", 17698 sym_name); 17699 err = -ENOENT; 17700 goto err_put; 17701 } 17702 insn[0].imm = (u32)addr; 17703 insn[1].imm = addr >> 32; 17704 17705 if (btf_type_is_func(t)) { 17706 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 17707 aux->btf_var.mem_size = 0; 17708 goto check_btf; 17709 } 17710 17711 datasec_id = find_btf_percpu_datasec(btf); 17712 if (datasec_id > 0) { 17713 datasec = btf_type_by_id(btf, datasec_id); 17714 for_each_vsi(i, datasec, vsi) { 17715 if (vsi->type == id) { 17716 percpu = true; 17717 break; 17718 } 17719 } 17720 } 17721 17722 type = t->type; 17723 t = btf_type_skip_modifiers(btf, type, NULL); 17724 if (percpu) { 17725 aux->btf_var.reg_type = PTR_TO_BTF_ID | MEM_PERCPU; 17726 aux->btf_var.btf = btf; 17727 aux->btf_var.btf_id = type; 17728 } else if (!btf_type_is_struct(t)) { 17729 const struct btf_type *ret; 17730 const char *tname; 17731 u32 tsize; 17732 17733 /* resolve the type size of ksym. */ 17734 ret = btf_resolve_size(btf, t, &tsize); 17735 if (IS_ERR(ret)) { 17736 tname = btf_name_by_offset(btf, t->name_off); 17737 verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n", 17738 tname, PTR_ERR(ret)); 17739 err = -EINVAL; 17740 goto err_put; 17741 } 17742 aux->btf_var.reg_type = PTR_TO_MEM | MEM_RDONLY; 17743 aux->btf_var.mem_size = tsize; 17744 } else { 17745 aux->btf_var.reg_type = PTR_TO_BTF_ID; 17746 aux->btf_var.btf = btf; 17747 aux->btf_var.btf_id = type; 17748 } 17749 check_btf: 17750 /* check whether we recorded this BTF (and maybe module) already */ 17751 for (i = 0; i < env->used_btf_cnt; i++) { 17752 if (env->used_btfs[i].btf == btf) { 17753 btf_put(btf); 17754 return 0; 17755 } 17756 } 17757 17758 if (env->used_btf_cnt >= MAX_USED_BTFS) { 17759 err = -E2BIG; 17760 goto err_put; 17761 } 17762 17763 btf_mod = &env->used_btfs[env->used_btf_cnt]; 17764 btf_mod->btf = btf; 17765 btf_mod->module = NULL; 17766 17767 /* if we reference variables from kernel module, bump its refcount */ 17768 if (btf_is_module(btf)) { 17769 btf_mod->module = btf_try_get_module(btf); 17770 if (!btf_mod->module) { 17771 err = -ENXIO; 17772 goto err_put; 17773 } 17774 } 17775 17776 env->used_btf_cnt++; 17777 17778 return 0; 17779 err_put: 17780 btf_put(btf); 17781 return err; 17782 } 17783 17784 static bool is_tracing_prog_type(enum bpf_prog_type type) 17785 { 17786 switch (type) { 17787 case BPF_PROG_TYPE_KPROBE: 17788 case BPF_PROG_TYPE_TRACEPOINT: 17789 case BPF_PROG_TYPE_PERF_EVENT: 17790 case BPF_PROG_TYPE_RAW_TRACEPOINT: 17791 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE: 17792 return true; 17793 default: 17794 return false; 17795 } 17796 } 17797 17798 static int check_map_prog_compatibility(struct bpf_verifier_env *env, 17799 struct bpf_map *map, 17800 struct bpf_prog *prog) 17801 17802 { 17803 enum bpf_prog_type prog_type = resolve_prog_type(prog); 17804 17805 if (btf_record_has_field(map->record, BPF_LIST_HEAD) || 17806 btf_record_has_field(map->record, BPF_RB_ROOT)) { 17807 if (is_tracing_prog_type(prog_type)) { 17808 verbose(env, "tracing progs cannot use bpf_{list_head,rb_root} yet\n"); 17809 return -EINVAL; 17810 } 17811 } 17812 17813 if (btf_record_has_field(map->record, BPF_SPIN_LOCK)) { 17814 if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) { 17815 verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n"); 17816 return -EINVAL; 17817 } 17818 17819 if (is_tracing_prog_type(prog_type)) { 17820 verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); 17821 return -EINVAL; 17822 } 17823 } 17824 17825 if (btf_record_has_field(map->record, BPF_TIMER)) { 17826 if (is_tracing_prog_type(prog_type)) { 17827 verbose(env, "tracing progs cannot use bpf_timer yet\n"); 17828 return -EINVAL; 17829 } 17830 } 17831 17832 if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && 17833 !bpf_offload_prog_map_match(prog, map)) { 17834 verbose(env, "offload device mismatch between prog and map\n"); 17835 return -EINVAL; 17836 } 17837 17838 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { 17839 verbose(env, "bpf_struct_ops map cannot be used in prog\n"); 17840 return -EINVAL; 17841 } 17842 17843 if (prog->aux->sleepable) 17844 switch (map->map_type) { 17845 case BPF_MAP_TYPE_HASH: 17846 case BPF_MAP_TYPE_LRU_HASH: 17847 case BPF_MAP_TYPE_ARRAY: 17848 case BPF_MAP_TYPE_PERCPU_HASH: 17849 case BPF_MAP_TYPE_PERCPU_ARRAY: 17850 case BPF_MAP_TYPE_LRU_PERCPU_HASH: 17851 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 17852 case BPF_MAP_TYPE_HASH_OF_MAPS: 17853 case BPF_MAP_TYPE_RINGBUF: 17854 case BPF_MAP_TYPE_USER_RINGBUF: 17855 case BPF_MAP_TYPE_INODE_STORAGE: 17856 case BPF_MAP_TYPE_SK_STORAGE: 17857 case BPF_MAP_TYPE_TASK_STORAGE: 17858 case BPF_MAP_TYPE_CGRP_STORAGE: 17859 break; 17860 default: 17861 verbose(env, 17862 "Sleepable programs can only use array, hash, ringbuf and local storage maps\n"); 17863 return -EINVAL; 17864 } 17865 17866 return 0; 17867 } 17868 17869 static bool bpf_map_is_cgroup_storage(struct bpf_map *map) 17870 { 17871 return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || 17872 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); 17873 } 17874 17875 /* find and rewrite pseudo imm in ld_imm64 instructions: 17876 * 17877 * 1. if it accesses map FD, replace it with actual map pointer. 17878 * 2. if it accesses btf_id of a VAR, replace it with pointer to the var. 17879 * 17880 * NOTE: btf_vmlinux is required for converting pseudo btf_id. 17881 */ 17882 static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) 17883 { 17884 struct bpf_insn *insn = env->prog->insnsi; 17885 int insn_cnt = env->prog->len; 17886 int i, j, err; 17887 17888 err = bpf_prog_calc_tag(env->prog); 17889 if (err) 17890 return err; 17891 17892 for (i = 0; i < insn_cnt; i++, insn++) { 17893 if (BPF_CLASS(insn->code) == BPF_LDX && 17894 ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_MEMSX) || 17895 insn->imm != 0)) { 17896 verbose(env, "BPF_LDX uses reserved fields\n"); 17897 return -EINVAL; 17898 } 17899 17900 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { 17901 struct bpf_insn_aux_data *aux; 17902 struct bpf_map *map; 17903 struct fd f; 17904 u64 addr; 17905 u32 fd; 17906 17907 if (i == insn_cnt - 1 || insn[1].code != 0 || 17908 insn[1].dst_reg != 0 || insn[1].src_reg != 0 || 17909 insn[1].off != 0) { 17910 verbose(env, "invalid bpf_ld_imm64 insn\n"); 17911 return -EINVAL; 17912 } 17913 17914 if (insn[0].src_reg == 0) 17915 /* valid generic load 64-bit imm */ 17916 goto next_insn; 17917 17918 if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) { 17919 aux = &env->insn_aux_data[i]; 17920 err = check_pseudo_btf_id(env, insn, aux); 17921 if (err) 17922 return err; 17923 goto next_insn; 17924 } 17925 17926 if (insn[0].src_reg == BPF_PSEUDO_FUNC) { 17927 aux = &env->insn_aux_data[i]; 17928 aux->ptr_type = PTR_TO_FUNC; 17929 goto next_insn; 17930 } 17931 17932 /* In final convert_pseudo_ld_imm64() step, this is 17933 * converted into regular 64-bit imm load insn. 17934 */ 17935 switch (insn[0].src_reg) { 17936 case BPF_PSEUDO_MAP_VALUE: 17937 case BPF_PSEUDO_MAP_IDX_VALUE: 17938 break; 17939 case BPF_PSEUDO_MAP_FD: 17940 case BPF_PSEUDO_MAP_IDX: 17941 if (insn[1].imm == 0) 17942 break; 17943 fallthrough; 17944 default: 17945 verbose(env, "unrecognized bpf_ld_imm64 insn\n"); 17946 return -EINVAL; 17947 } 17948 17949 switch (insn[0].src_reg) { 17950 case BPF_PSEUDO_MAP_IDX_VALUE: 17951 case BPF_PSEUDO_MAP_IDX: 17952 if (bpfptr_is_null(env->fd_array)) { 17953 verbose(env, "fd_idx without fd_array is invalid\n"); 17954 return -EPROTO; 17955 } 17956 if (copy_from_bpfptr_offset(&fd, env->fd_array, 17957 insn[0].imm * sizeof(fd), 17958 sizeof(fd))) 17959 return -EFAULT; 17960 break; 17961 default: 17962 fd = insn[0].imm; 17963 break; 17964 } 17965 17966 f = fdget(fd); 17967 map = __bpf_map_get(f); 17968 if (IS_ERR(map)) { 17969 verbose(env, "fd %d is not pointing to valid bpf_map\n", 17970 insn[0].imm); 17971 return PTR_ERR(map); 17972 } 17973 17974 err = check_map_prog_compatibility(env, map, env->prog); 17975 if (err) { 17976 fdput(f); 17977 return err; 17978 } 17979 17980 aux = &env->insn_aux_data[i]; 17981 if (insn[0].src_reg == BPF_PSEUDO_MAP_FD || 17982 insn[0].src_reg == BPF_PSEUDO_MAP_IDX) { 17983 addr = (unsigned long)map; 17984 } else { 17985 u32 off = insn[1].imm; 17986 17987 if (off >= BPF_MAX_VAR_OFF) { 17988 verbose(env, "direct value offset of %u is not allowed\n", off); 17989 fdput(f); 17990 return -EINVAL; 17991 } 17992 17993 if (!map->ops->map_direct_value_addr) { 17994 verbose(env, "no direct value access support for this map type\n"); 17995 fdput(f); 17996 return -EINVAL; 17997 } 17998 17999 err = map->ops->map_direct_value_addr(map, &addr, off); 18000 if (err) { 18001 verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", 18002 map->value_size, off); 18003 fdput(f); 18004 return err; 18005 } 18006 18007 aux->map_off = off; 18008 addr += off; 18009 } 18010 18011 insn[0].imm = (u32)addr; 18012 insn[1].imm = addr >> 32; 18013 18014 /* check whether we recorded this map already */ 18015 for (j = 0; j < env->used_map_cnt; j++) { 18016 if (env->used_maps[j] == map) { 18017 aux->map_index = j; 18018 fdput(f); 18019 goto next_insn; 18020 } 18021 } 18022 18023 if (env->used_map_cnt >= MAX_USED_MAPS) { 18024 fdput(f); 18025 return -E2BIG; 18026 } 18027 18028 /* hold the map. If the program is rejected by verifier, 18029 * the map will be released by release_maps() or it 18030 * will be used by the valid program until it's unloaded 18031 * and all maps are released in free_used_maps() 18032 */ 18033 bpf_map_inc(map); 18034 18035 aux->map_index = env->used_map_cnt; 18036 env->used_maps[env->used_map_cnt++] = map; 18037 18038 if (bpf_map_is_cgroup_storage(map) && 18039 bpf_cgroup_storage_assign(env->prog->aux, map)) { 18040 verbose(env, "only one cgroup storage of each type is allowed\n"); 18041 fdput(f); 18042 return -EBUSY; 18043 } 18044 18045 fdput(f); 18046 next_insn: 18047 insn++; 18048 i++; 18049 continue; 18050 } 18051 18052 /* Basic sanity check before we invest more work here. */ 18053 if (!bpf_opcode_in_insntable(insn->code)) { 18054 verbose(env, "unknown opcode %02x\n", insn->code); 18055 return -EINVAL; 18056 } 18057 } 18058 18059 /* now all pseudo BPF_LD_IMM64 instructions load valid 18060 * 'struct bpf_map *' into a register instead of user map_fd. 18061 * These pointers will be used later by verifier to validate map access. 18062 */ 18063 return 0; 18064 } 18065 18066 /* drop refcnt of maps used by the rejected program */ 18067 static void release_maps(struct bpf_verifier_env *env) 18068 { 18069 __bpf_free_used_maps(env->prog->aux, env->used_maps, 18070 env->used_map_cnt); 18071 } 18072 18073 /* drop refcnt of maps used by the rejected program */ 18074 static void release_btfs(struct bpf_verifier_env *env) 18075 { 18076 __bpf_free_used_btfs(env->prog->aux, env->used_btfs, 18077 env->used_btf_cnt); 18078 } 18079 18080 /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ 18081 static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) 18082 { 18083 struct bpf_insn *insn = env->prog->insnsi; 18084 int insn_cnt = env->prog->len; 18085 int i; 18086 18087 for (i = 0; i < insn_cnt; i++, insn++) { 18088 if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) 18089 continue; 18090 if (insn->src_reg == BPF_PSEUDO_FUNC) 18091 continue; 18092 insn->src_reg = 0; 18093 } 18094 } 18095 18096 /* single env->prog->insni[off] instruction was replaced with the range 18097 * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying 18098 * [0, off) and [off, end) to new locations, so the patched range stays zero 18099 */ 18100 static void adjust_insn_aux_data(struct bpf_verifier_env *env, 18101 struct bpf_insn_aux_data *new_data, 18102 struct bpf_prog *new_prog, u32 off, u32 cnt) 18103 { 18104 struct bpf_insn_aux_data *old_data = env->insn_aux_data; 18105 struct bpf_insn *insn = new_prog->insnsi; 18106 u32 old_seen = old_data[off].seen; 18107 u32 prog_len; 18108 int i; 18109 18110 /* aux info at OFF always needs adjustment, no matter fast path 18111 * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the 18112 * original insn at old prog. 18113 */ 18114 old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); 18115 18116 if (cnt == 1) 18117 return; 18118 prog_len = new_prog->len; 18119 18120 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 18121 memcpy(new_data + off + cnt - 1, old_data + off, 18122 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 18123 for (i = off; i < off + cnt - 1; i++) { 18124 /* Expand insni[off]'s seen count to the patched range. */ 18125 new_data[i].seen = old_seen; 18126 new_data[i].zext_dst = insn_has_def32(env, insn + i); 18127 } 18128 env->insn_aux_data = new_data; 18129 vfree(old_data); 18130 } 18131 18132 static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) 18133 { 18134 int i; 18135 18136 if (len == 1) 18137 return; 18138 /* NOTE: fake 'exit' subprog should be updated as well. */ 18139 for (i = 0; i <= env->subprog_cnt; i++) { 18140 if (env->subprog_info[i].start <= off) 18141 continue; 18142 env->subprog_info[i].start += len - 1; 18143 } 18144 } 18145 18146 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 18147 { 18148 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 18149 int i, sz = prog->aux->size_poke_tab; 18150 struct bpf_jit_poke_descriptor *desc; 18151 18152 for (i = 0; i < sz; i++) { 18153 desc = &tab[i]; 18154 if (desc->insn_idx <= off) 18155 continue; 18156 desc->insn_idx += len - 1; 18157 } 18158 } 18159 18160 static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, 18161 const struct bpf_insn *patch, u32 len) 18162 { 18163 struct bpf_prog *new_prog; 18164 struct bpf_insn_aux_data *new_data = NULL; 18165 18166 if (len > 1) { 18167 new_data = vzalloc(array_size(env->prog->len + len - 1, 18168 sizeof(struct bpf_insn_aux_data))); 18169 if (!new_data) 18170 return NULL; 18171 } 18172 18173 new_prog = bpf_patch_insn_single(env->prog, off, patch, len); 18174 if (IS_ERR(new_prog)) { 18175 if (PTR_ERR(new_prog) == -ERANGE) 18176 verbose(env, 18177 "insn %d cannot be patched due to 16-bit range\n", 18178 env->insn_aux_data[off].orig_idx); 18179 vfree(new_data); 18180 return NULL; 18181 } 18182 adjust_insn_aux_data(env, new_data, new_prog, off, len); 18183 adjust_subprog_starts(env, off, len); 18184 adjust_poke_descs(new_prog, off, len); 18185 return new_prog; 18186 } 18187 18188 static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, 18189 u32 off, u32 cnt) 18190 { 18191 int i, j; 18192 18193 /* find first prog starting at or after off (first to remove) */ 18194 for (i = 0; i < env->subprog_cnt; i++) 18195 if (env->subprog_info[i].start >= off) 18196 break; 18197 /* find first prog starting at or after off + cnt (first to stay) */ 18198 for (j = i; j < env->subprog_cnt; j++) 18199 if (env->subprog_info[j].start >= off + cnt) 18200 break; 18201 /* if j doesn't start exactly at off + cnt, we are just removing 18202 * the front of previous prog 18203 */ 18204 if (env->subprog_info[j].start != off + cnt) 18205 j--; 18206 18207 if (j > i) { 18208 struct bpf_prog_aux *aux = env->prog->aux; 18209 int move; 18210 18211 /* move fake 'exit' subprog as well */ 18212 move = env->subprog_cnt + 1 - j; 18213 18214 memmove(env->subprog_info + i, 18215 env->subprog_info + j, 18216 sizeof(*env->subprog_info) * move); 18217 env->subprog_cnt -= j - i; 18218 18219 /* remove func_info */ 18220 if (aux->func_info) { 18221 move = aux->func_info_cnt - j; 18222 18223 memmove(aux->func_info + i, 18224 aux->func_info + j, 18225 sizeof(*aux->func_info) * move); 18226 aux->func_info_cnt -= j - i; 18227 /* func_info->insn_off is set after all code rewrites, 18228 * in adjust_btf_func() - no need to adjust 18229 */ 18230 } 18231 } else { 18232 /* convert i from "first prog to remove" to "first to adjust" */ 18233 if (env->subprog_info[i].start == off) 18234 i++; 18235 } 18236 18237 /* update fake 'exit' subprog as well */ 18238 for (; i <= env->subprog_cnt; i++) 18239 env->subprog_info[i].start -= cnt; 18240 18241 return 0; 18242 } 18243 18244 static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, 18245 u32 cnt) 18246 { 18247 struct bpf_prog *prog = env->prog; 18248 u32 i, l_off, l_cnt, nr_linfo; 18249 struct bpf_line_info *linfo; 18250 18251 nr_linfo = prog->aux->nr_linfo; 18252 if (!nr_linfo) 18253 return 0; 18254 18255 linfo = prog->aux->linfo; 18256 18257 /* find first line info to remove, count lines to be removed */ 18258 for (i = 0; i < nr_linfo; i++) 18259 if (linfo[i].insn_off >= off) 18260 break; 18261 18262 l_off = i; 18263 l_cnt = 0; 18264 for (; i < nr_linfo; i++) 18265 if (linfo[i].insn_off < off + cnt) 18266 l_cnt++; 18267 else 18268 break; 18269 18270 /* First live insn doesn't match first live linfo, it needs to "inherit" 18271 * last removed linfo. prog is already modified, so prog->len == off 18272 * means no live instructions after (tail of the program was removed). 18273 */ 18274 if (prog->len != off && l_cnt && 18275 (i == nr_linfo || linfo[i].insn_off != off + cnt)) { 18276 l_cnt--; 18277 linfo[--i].insn_off = off + cnt; 18278 } 18279 18280 /* remove the line info which refer to the removed instructions */ 18281 if (l_cnt) { 18282 memmove(linfo + l_off, linfo + i, 18283 sizeof(*linfo) * (nr_linfo - i)); 18284 18285 prog->aux->nr_linfo -= l_cnt; 18286 nr_linfo = prog->aux->nr_linfo; 18287 } 18288 18289 /* pull all linfo[i].insn_off >= off + cnt in by cnt */ 18290 for (i = l_off; i < nr_linfo; i++) 18291 linfo[i].insn_off -= cnt; 18292 18293 /* fix up all subprogs (incl. 'exit') which start >= off */ 18294 for (i = 0; i <= env->subprog_cnt; i++) 18295 if (env->subprog_info[i].linfo_idx > l_off) { 18296 /* program may have started in the removed region but 18297 * may not be fully removed 18298 */ 18299 if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) 18300 env->subprog_info[i].linfo_idx -= l_cnt; 18301 else 18302 env->subprog_info[i].linfo_idx = l_off; 18303 } 18304 18305 return 0; 18306 } 18307 18308 static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) 18309 { 18310 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18311 unsigned int orig_prog_len = env->prog->len; 18312 int err; 18313 18314 if (bpf_prog_is_offloaded(env->prog->aux)) 18315 bpf_prog_offload_remove_insns(env, off, cnt); 18316 18317 err = bpf_remove_insns(env->prog, off, cnt); 18318 if (err) 18319 return err; 18320 18321 err = adjust_subprog_starts_after_remove(env, off, cnt); 18322 if (err) 18323 return err; 18324 18325 err = bpf_adj_linfo_after_remove(env, off, cnt); 18326 if (err) 18327 return err; 18328 18329 memmove(aux_data + off, aux_data + off + cnt, 18330 sizeof(*aux_data) * (orig_prog_len - off - cnt)); 18331 18332 return 0; 18333 } 18334 18335 /* The verifier does more data flow analysis than llvm and will not 18336 * explore branches that are dead at run time. Malicious programs can 18337 * have dead code too. Therefore replace all dead at-run-time code 18338 * with 'ja -1'. 18339 * 18340 * Just nops are not optimal, e.g. if they would sit at the end of the 18341 * program and through another bug we would manage to jump there, then 18342 * we'd execute beyond program memory otherwise. Returning exception 18343 * code also wouldn't work since we can have subprogs where the dead 18344 * code could be located. 18345 */ 18346 static void sanitize_dead_code(struct bpf_verifier_env *env) 18347 { 18348 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18349 struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); 18350 struct bpf_insn *insn = env->prog->insnsi; 18351 const int insn_cnt = env->prog->len; 18352 int i; 18353 18354 for (i = 0; i < insn_cnt; i++) { 18355 if (aux_data[i].seen) 18356 continue; 18357 memcpy(insn + i, &trap, sizeof(trap)); 18358 aux_data[i].zext_dst = false; 18359 } 18360 } 18361 18362 static bool insn_is_cond_jump(u8 code) 18363 { 18364 u8 op; 18365 18366 op = BPF_OP(code); 18367 if (BPF_CLASS(code) == BPF_JMP32) 18368 return op != BPF_JA; 18369 18370 if (BPF_CLASS(code) != BPF_JMP) 18371 return false; 18372 18373 return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; 18374 } 18375 18376 static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) 18377 { 18378 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18379 struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 18380 struct bpf_insn *insn = env->prog->insnsi; 18381 const int insn_cnt = env->prog->len; 18382 int i; 18383 18384 for (i = 0; i < insn_cnt; i++, insn++) { 18385 if (!insn_is_cond_jump(insn->code)) 18386 continue; 18387 18388 if (!aux_data[i + 1].seen) 18389 ja.off = insn->off; 18390 else if (!aux_data[i + 1 + insn->off].seen) 18391 ja.off = 0; 18392 else 18393 continue; 18394 18395 if (bpf_prog_is_offloaded(env->prog->aux)) 18396 bpf_prog_offload_replace_insn(env, i, &ja); 18397 18398 memcpy(insn, &ja, sizeof(ja)); 18399 } 18400 } 18401 18402 static int opt_remove_dead_code(struct bpf_verifier_env *env) 18403 { 18404 struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 18405 int insn_cnt = env->prog->len; 18406 int i, err; 18407 18408 for (i = 0; i < insn_cnt; i++) { 18409 int j; 18410 18411 j = 0; 18412 while (i + j < insn_cnt && !aux_data[i + j].seen) 18413 j++; 18414 if (!j) 18415 continue; 18416 18417 err = verifier_remove_insns(env, i, j); 18418 if (err) 18419 return err; 18420 insn_cnt = env->prog->len; 18421 } 18422 18423 return 0; 18424 } 18425 18426 static int opt_remove_nops(struct bpf_verifier_env *env) 18427 { 18428 const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); 18429 struct bpf_insn *insn = env->prog->insnsi; 18430 int insn_cnt = env->prog->len; 18431 int i, err; 18432 18433 for (i = 0; i < insn_cnt; i++) { 18434 if (memcmp(&insn[i], &ja, sizeof(ja))) 18435 continue; 18436 18437 err = verifier_remove_insns(env, i, 1); 18438 if (err) 18439 return err; 18440 insn_cnt--; 18441 i--; 18442 } 18443 18444 return 0; 18445 } 18446 18447 static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, 18448 const union bpf_attr *attr) 18449 { 18450 struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; 18451 struct bpf_insn_aux_data *aux = env->insn_aux_data; 18452 int i, patch_len, delta = 0, len = env->prog->len; 18453 struct bpf_insn *insns = env->prog->insnsi; 18454 struct bpf_prog *new_prog; 18455 bool rnd_hi32; 18456 18457 rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; 18458 zext_patch[1] = BPF_ZEXT_REG(0); 18459 rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); 18460 rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); 18461 rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); 18462 for (i = 0; i < len; i++) { 18463 int adj_idx = i + delta; 18464 struct bpf_insn insn; 18465 int load_reg; 18466 18467 insn = insns[adj_idx]; 18468 load_reg = insn_def_regno(&insn); 18469 if (!aux[adj_idx].zext_dst) { 18470 u8 code, class; 18471 u32 imm_rnd; 18472 18473 if (!rnd_hi32) 18474 continue; 18475 18476 code = insn.code; 18477 class = BPF_CLASS(code); 18478 if (load_reg == -1) 18479 continue; 18480 18481 /* NOTE: arg "reg" (the fourth one) is only used for 18482 * BPF_STX + SRC_OP, so it is safe to pass NULL 18483 * here. 18484 */ 18485 if (is_reg64(env, &insn, load_reg, NULL, DST_OP)) { 18486 if (class == BPF_LD && 18487 BPF_MODE(code) == BPF_IMM) 18488 i++; 18489 continue; 18490 } 18491 18492 /* ctx load could be transformed into wider load. */ 18493 if (class == BPF_LDX && 18494 aux[adj_idx].ptr_type == PTR_TO_CTX) 18495 continue; 18496 18497 imm_rnd = get_random_u32(); 18498 rnd_hi32_patch[0] = insn; 18499 rnd_hi32_patch[1].imm = imm_rnd; 18500 rnd_hi32_patch[3].dst_reg = load_reg; 18501 patch = rnd_hi32_patch; 18502 patch_len = 4; 18503 goto apply_patch_buffer; 18504 } 18505 18506 /* Add in an zero-extend instruction if a) the JIT has requested 18507 * it or b) it's a CMPXCHG. 18508 * 18509 * The latter is because: BPF_CMPXCHG always loads a value into 18510 * R0, therefore always zero-extends. However some archs' 18511 * equivalent instruction only does this load when the 18512 * comparison is successful. This detail of CMPXCHG is 18513 * orthogonal to the general zero-extension behaviour of the 18514 * CPU, so it's treated independently of bpf_jit_needs_zext. 18515 */ 18516 if (!bpf_jit_needs_zext() && !is_cmpxchg_insn(&insn)) 18517 continue; 18518 18519 /* Zero-extension is done by the caller. */ 18520 if (bpf_pseudo_kfunc_call(&insn)) 18521 continue; 18522 18523 if (WARN_ON(load_reg == -1)) { 18524 verbose(env, "verifier bug. zext_dst is set, but no reg is defined\n"); 18525 return -EFAULT; 18526 } 18527 18528 zext_patch[0] = insn; 18529 zext_patch[1].dst_reg = load_reg; 18530 zext_patch[1].src_reg = load_reg; 18531 patch = zext_patch; 18532 patch_len = 2; 18533 apply_patch_buffer: 18534 new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); 18535 if (!new_prog) 18536 return -ENOMEM; 18537 env->prog = new_prog; 18538 insns = new_prog->insnsi; 18539 aux = env->insn_aux_data; 18540 delta += patch_len - 1; 18541 } 18542 18543 return 0; 18544 } 18545 18546 /* convert load instructions that access fields of a context type into a 18547 * sequence of instructions that access fields of the underlying structure: 18548 * struct __sk_buff -> struct sk_buff 18549 * struct bpf_sock_ops -> struct sock 18550 */ 18551 static int convert_ctx_accesses(struct bpf_verifier_env *env) 18552 { 18553 const struct bpf_verifier_ops *ops = env->ops; 18554 int i, cnt, size, ctx_field_size, delta = 0; 18555 const int insn_cnt = env->prog->len; 18556 struct bpf_insn insn_buf[16], *insn; 18557 u32 target_size, size_default, off; 18558 struct bpf_prog *new_prog; 18559 enum bpf_access_type type; 18560 bool is_narrower_load; 18561 18562 if (ops->gen_prologue || env->seen_direct_write) { 18563 if (!ops->gen_prologue) { 18564 verbose(env, "bpf verifier is misconfigured\n"); 18565 return -EINVAL; 18566 } 18567 cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, 18568 env->prog); 18569 if (cnt >= ARRAY_SIZE(insn_buf)) { 18570 verbose(env, "bpf verifier is misconfigured\n"); 18571 return -EINVAL; 18572 } else if (cnt) { 18573 new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); 18574 if (!new_prog) 18575 return -ENOMEM; 18576 18577 env->prog = new_prog; 18578 delta += cnt - 1; 18579 } 18580 } 18581 18582 if (bpf_prog_is_offloaded(env->prog->aux)) 18583 return 0; 18584 18585 insn = env->prog->insnsi + delta; 18586 18587 for (i = 0; i < insn_cnt; i++, insn++) { 18588 bpf_convert_ctx_access_t convert_ctx_access; 18589 u8 mode; 18590 18591 if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || 18592 insn->code == (BPF_LDX | BPF_MEM | BPF_H) || 18593 insn->code == (BPF_LDX | BPF_MEM | BPF_W) || 18594 insn->code == (BPF_LDX | BPF_MEM | BPF_DW) || 18595 insn->code == (BPF_LDX | BPF_MEMSX | BPF_B) || 18596 insn->code == (BPF_LDX | BPF_MEMSX | BPF_H) || 18597 insn->code == (BPF_LDX | BPF_MEMSX | BPF_W)) { 18598 type = BPF_READ; 18599 } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || 18600 insn->code == (BPF_STX | BPF_MEM | BPF_H) || 18601 insn->code == (BPF_STX | BPF_MEM | BPF_W) || 18602 insn->code == (BPF_STX | BPF_MEM | BPF_DW) || 18603 insn->code == (BPF_ST | BPF_MEM | BPF_B) || 18604 insn->code == (BPF_ST | BPF_MEM | BPF_H) || 18605 insn->code == (BPF_ST | BPF_MEM | BPF_W) || 18606 insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { 18607 type = BPF_WRITE; 18608 } else { 18609 continue; 18610 } 18611 18612 if (type == BPF_WRITE && 18613 env->insn_aux_data[i + delta].sanitize_stack_spill) { 18614 struct bpf_insn patch[] = { 18615 *insn, 18616 BPF_ST_NOSPEC(), 18617 }; 18618 18619 cnt = ARRAY_SIZE(patch); 18620 new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); 18621 if (!new_prog) 18622 return -ENOMEM; 18623 18624 delta += cnt - 1; 18625 env->prog = new_prog; 18626 insn = new_prog->insnsi + i + delta; 18627 continue; 18628 } 18629 18630 switch ((int)env->insn_aux_data[i + delta].ptr_type) { 18631 case PTR_TO_CTX: 18632 if (!ops->convert_ctx_access) 18633 continue; 18634 convert_ctx_access = ops->convert_ctx_access; 18635 break; 18636 case PTR_TO_SOCKET: 18637 case PTR_TO_SOCK_COMMON: 18638 convert_ctx_access = bpf_sock_convert_ctx_access; 18639 break; 18640 case PTR_TO_TCP_SOCK: 18641 convert_ctx_access = bpf_tcp_sock_convert_ctx_access; 18642 break; 18643 case PTR_TO_XDP_SOCK: 18644 convert_ctx_access = bpf_xdp_sock_convert_ctx_access; 18645 break; 18646 case PTR_TO_BTF_ID: 18647 case PTR_TO_BTF_ID | PTR_UNTRUSTED: 18648 /* PTR_TO_BTF_ID | MEM_ALLOC always has a valid lifetime, unlike 18649 * PTR_TO_BTF_ID, and an active ref_obj_id, but the same cannot 18650 * be said once it is marked PTR_UNTRUSTED, hence we must handle 18651 * any faults for loads into such types. BPF_WRITE is disallowed 18652 * for this case. 18653 */ 18654 case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED: 18655 if (type == BPF_READ) { 18656 if (BPF_MODE(insn->code) == BPF_MEM) 18657 insn->code = BPF_LDX | BPF_PROBE_MEM | 18658 BPF_SIZE((insn)->code); 18659 else 18660 insn->code = BPF_LDX | BPF_PROBE_MEMSX | 18661 BPF_SIZE((insn)->code); 18662 env->prog->aux->num_exentries++; 18663 } 18664 continue; 18665 default: 18666 continue; 18667 } 18668 18669 ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; 18670 size = BPF_LDST_BYTES(insn); 18671 mode = BPF_MODE(insn->code); 18672 18673 /* If the read access is a narrower load of the field, 18674 * convert to a 4/8-byte load, to minimum program type specific 18675 * convert_ctx_access changes. If conversion is successful, 18676 * we will apply proper mask to the result. 18677 */ 18678 is_narrower_load = size < ctx_field_size; 18679 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); 18680 off = insn->off; 18681 if (is_narrower_load) { 18682 u8 size_code; 18683 18684 if (type == BPF_WRITE) { 18685 verbose(env, "bpf verifier narrow ctx access misconfigured\n"); 18686 return -EINVAL; 18687 } 18688 18689 size_code = BPF_H; 18690 if (ctx_field_size == 4) 18691 size_code = BPF_W; 18692 else if (ctx_field_size == 8) 18693 size_code = BPF_DW; 18694 18695 insn->off = off & ~(size_default - 1); 18696 insn->code = BPF_LDX | BPF_MEM | size_code; 18697 } 18698 18699 target_size = 0; 18700 cnt = convert_ctx_access(type, insn, insn_buf, env->prog, 18701 &target_size); 18702 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || 18703 (ctx_field_size && !target_size)) { 18704 verbose(env, "bpf verifier is misconfigured\n"); 18705 return -EINVAL; 18706 } 18707 18708 if (is_narrower_load && size < target_size) { 18709 u8 shift = bpf_ctx_narrow_access_offset( 18710 off, size, size_default) * 8; 18711 if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { 18712 verbose(env, "bpf verifier narrow ctx load misconfigured\n"); 18713 return -EINVAL; 18714 } 18715 if (ctx_field_size <= 4) { 18716 if (shift) 18717 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, 18718 insn->dst_reg, 18719 shift); 18720 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 18721 (1 << size * 8) - 1); 18722 } else { 18723 if (shift) 18724 insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, 18725 insn->dst_reg, 18726 shift); 18727 insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, 18728 (1ULL << size * 8) - 1); 18729 } 18730 } 18731 if (mode == BPF_MEMSX) 18732 insn_buf[cnt++] = BPF_RAW_INSN(BPF_ALU64 | BPF_MOV | BPF_X, 18733 insn->dst_reg, insn->dst_reg, 18734 size * 8, 0); 18735 18736 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 18737 if (!new_prog) 18738 return -ENOMEM; 18739 18740 delta += cnt - 1; 18741 18742 /* keep walking new program and skip insns we just inserted */ 18743 env->prog = new_prog; 18744 insn = new_prog->insnsi + i + delta; 18745 } 18746 18747 return 0; 18748 } 18749 18750 static int jit_subprogs(struct bpf_verifier_env *env) 18751 { 18752 struct bpf_prog *prog = env->prog, **func, *tmp; 18753 int i, j, subprog_start, subprog_end = 0, len, subprog; 18754 struct bpf_map *map_ptr; 18755 struct bpf_insn *insn; 18756 void *old_bpf_func; 18757 int err, num_exentries; 18758 18759 if (env->subprog_cnt <= 1) 18760 return 0; 18761 18762 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18763 if (!bpf_pseudo_func(insn) && !bpf_pseudo_call(insn)) 18764 continue; 18765 18766 /* Upon error here we cannot fall back to interpreter but 18767 * need a hard reject of the program. Thus -EFAULT is 18768 * propagated in any case. 18769 */ 18770 subprog = find_subprog(env, i + insn->imm + 1); 18771 if (subprog < 0) { 18772 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", 18773 i + insn->imm + 1); 18774 return -EFAULT; 18775 } 18776 /* temporarily remember subprog id inside insn instead of 18777 * aux_data, since next loop will split up all insns into funcs 18778 */ 18779 insn->off = subprog; 18780 /* remember original imm in case JIT fails and fallback 18781 * to interpreter will be needed 18782 */ 18783 env->insn_aux_data[i].call_imm = insn->imm; 18784 /* point imm to __bpf_call_base+1 from JITs point of view */ 18785 insn->imm = 1; 18786 if (bpf_pseudo_func(insn)) 18787 /* jit (e.g. x86_64) may emit fewer instructions 18788 * if it learns a u32 imm is the same as a u64 imm. 18789 * Force a non zero here. 18790 */ 18791 insn[1].imm = 1; 18792 } 18793 18794 err = bpf_prog_alloc_jited_linfo(prog); 18795 if (err) 18796 goto out_undo_insn; 18797 18798 err = -ENOMEM; 18799 func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); 18800 if (!func) 18801 goto out_undo_insn; 18802 18803 for (i = 0; i < env->subprog_cnt; i++) { 18804 subprog_start = subprog_end; 18805 subprog_end = env->subprog_info[i + 1].start; 18806 18807 len = subprog_end - subprog_start; 18808 /* bpf_prog_run() doesn't call subprogs directly, 18809 * hence main prog stats include the runtime of subprogs. 18810 * subprogs don't have IDs and not reachable via prog_get_next_id 18811 * func[i]->stats will never be accessed and stays NULL 18812 */ 18813 func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); 18814 if (!func[i]) 18815 goto out_free; 18816 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 18817 len * sizeof(struct bpf_insn)); 18818 func[i]->type = prog->type; 18819 func[i]->len = len; 18820 if (bpf_prog_calc_tag(func[i])) 18821 goto out_free; 18822 func[i]->is_func = 1; 18823 func[i]->aux->func_idx = i; 18824 /* Below members will be freed only at prog->aux */ 18825 func[i]->aux->btf = prog->aux->btf; 18826 func[i]->aux->func_info = prog->aux->func_info; 18827 func[i]->aux->func_info_cnt = prog->aux->func_info_cnt; 18828 func[i]->aux->poke_tab = prog->aux->poke_tab; 18829 func[i]->aux->size_poke_tab = prog->aux->size_poke_tab; 18830 18831 for (j = 0; j < prog->aux->size_poke_tab; j++) { 18832 struct bpf_jit_poke_descriptor *poke; 18833 18834 poke = &prog->aux->poke_tab[j]; 18835 if (poke->insn_idx < subprog_end && 18836 poke->insn_idx >= subprog_start) 18837 poke->aux = func[i]->aux; 18838 } 18839 18840 func[i]->aux->name[0] = 'F'; 18841 func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; 18842 func[i]->jit_requested = 1; 18843 func[i]->blinding_requested = prog->blinding_requested; 18844 func[i]->aux->kfunc_tab = prog->aux->kfunc_tab; 18845 func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab; 18846 func[i]->aux->linfo = prog->aux->linfo; 18847 func[i]->aux->nr_linfo = prog->aux->nr_linfo; 18848 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 18849 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 18850 num_exentries = 0; 18851 insn = func[i]->insnsi; 18852 for (j = 0; j < func[i]->len; j++, insn++) { 18853 if (BPF_CLASS(insn->code) == BPF_LDX && 18854 (BPF_MODE(insn->code) == BPF_PROBE_MEM || 18855 BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) 18856 num_exentries++; 18857 } 18858 func[i]->aux->num_exentries = num_exentries; 18859 func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; 18860 func[i]->aux->exception_cb = env->subprog_info[i].is_exception_cb; 18861 if (!i) 18862 func[i]->aux->exception_boundary = env->seen_exception; 18863 func[i] = bpf_int_jit_compile(func[i]); 18864 if (!func[i]->jited) { 18865 err = -ENOTSUPP; 18866 goto out_free; 18867 } 18868 cond_resched(); 18869 } 18870 18871 /* at this point all bpf functions were successfully JITed 18872 * now populate all bpf_calls with correct addresses and 18873 * run last pass of JIT 18874 */ 18875 for (i = 0; i < env->subprog_cnt; i++) { 18876 insn = func[i]->insnsi; 18877 for (j = 0; j < func[i]->len; j++, insn++) { 18878 if (bpf_pseudo_func(insn)) { 18879 subprog = insn->off; 18880 insn[0].imm = (u32)(long)func[subprog]->bpf_func; 18881 insn[1].imm = ((u64)(long)func[subprog]->bpf_func) >> 32; 18882 continue; 18883 } 18884 if (!bpf_pseudo_call(insn)) 18885 continue; 18886 subprog = insn->off; 18887 insn->imm = BPF_CALL_IMM(func[subprog]->bpf_func); 18888 } 18889 18890 /* we use the aux data to keep a list of the start addresses 18891 * of the JITed images for each function in the program 18892 * 18893 * for some architectures, such as powerpc64, the imm field 18894 * might not be large enough to hold the offset of the start 18895 * address of the callee's JITed image from __bpf_call_base 18896 * 18897 * in such cases, we can lookup the start address of a callee 18898 * by using its subprog id, available from the off field of 18899 * the call instruction, as an index for this list 18900 */ 18901 func[i]->aux->func = func; 18902 func[i]->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 18903 func[i]->aux->real_func_cnt = env->subprog_cnt; 18904 } 18905 for (i = 0; i < env->subprog_cnt; i++) { 18906 old_bpf_func = func[i]->bpf_func; 18907 tmp = bpf_int_jit_compile(func[i]); 18908 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 18909 verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); 18910 err = -ENOTSUPP; 18911 goto out_free; 18912 } 18913 cond_resched(); 18914 } 18915 18916 /* finally lock prog and jit images for all functions and 18917 * populate kallsysm. Begin at the first subprogram, since 18918 * bpf_prog_load will add the kallsyms for the main program. 18919 */ 18920 for (i = 1; i < env->subprog_cnt; i++) { 18921 bpf_prog_lock_ro(func[i]); 18922 bpf_prog_kallsyms_add(func[i]); 18923 } 18924 18925 /* Last step: make now unused interpreter insns from main 18926 * prog consistent for later dump requests, so they can 18927 * later look the same as if they were interpreted only. 18928 */ 18929 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18930 if (bpf_pseudo_func(insn)) { 18931 insn[0].imm = env->insn_aux_data[i].call_imm; 18932 insn[1].imm = insn->off; 18933 insn->off = 0; 18934 continue; 18935 } 18936 if (!bpf_pseudo_call(insn)) 18937 continue; 18938 insn->off = env->insn_aux_data[i].call_imm; 18939 subprog = find_subprog(env, i + insn->off + 1); 18940 insn->imm = subprog; 18941 } 18942 18943 prog->jited = 1; 18944 prog->bpf_func = func[0]->bpf_func; 18945 prog->jited_len = func[0]->jited_len; 18946 prog->aux->extable = func[0]->aux->extable; 18947 prog->aux->num_exentries = func[0]->aux->num_exentries; 18948 prog->aux->func = func; 18949 prog->aux->func_cnt = env->subprog_cnt - env->hidden_subprog_cnt; 18950 prog->aux->real_func_cnt = env->subprog_cnt; 18951 prog->aux->bpf_exception_cb = (void *)func[env->exception_callback_subprog]->bpf_func; 18952 prog->aux->exception_boundary = func[0]->aux->exception_boundary; 18953 bpf_prog_jit_attempt_done(prog); 18954 return 0; 18955 out_free: 18956 /* We failed JIT'ing, so at this point we need to unregister poke 18957 * descriptors from subprogs, so that kernel is not attempting to 18958 * patch it anymore as we're freeing the subprog JIT memory. 18959 */ 18960 for (i = 0; i < prog->aux->size_poke_tab; i++) { 18961 map_ptr = prog->aux->poke_tab[i].tail_call.map; 18962 map_ptr->ops->map_poke_untrack(map_ptr, prog->aux); 18963 } 18964 /* At this point we're guaranteed that poke descriptors are not 18965 * live anymore. We can just unlink its descriptor table as it's 18966 * released with the main prog. 18967 */ 18968 for (i = 0; i < env->subprog_cnt; i++) { 18969 if (!func[i]) 18970 continue; 18971 func[i]->aux->poke_tab = NULL; 18972 bpf_jit_free(func[i]); 18973 } 18974 kfree(func); 18975 out_undo_insn: 18976 /* cleanup main prog to be interpreted */ 18977 prog->jit_requested = 0; 18978 prog->blinding_requested = 0; 18979 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 18980 if (!bpf_pseudo_call(insn)) 18981 continue; 18982 insn->off = 0; 18983 insn->imm = env->insn_aux_data[i].call_imm; 18984 } 18985 bpf_prog_jit_attempt_done(prog); 18986 return err; 18987 } 18988 18989 static int fixup_call_args(struct bpf_verifier_env *env) 18990 { 18991 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 18992 struct bpf_prog *prog = env->prog; 18993 struct bpf_insn *insn = prog->insnsi; 18994 bool has_kfunc_call = bpf_prog_has_kfunc_call(prog); 18995 int i, depth; 18996 #endif 18997 int err = 0; 18998 18999 if (env->prog->jit_requested && 19000 !bpf_prog_is_offloaded(env->prog->aux)) { 19001 err = jit_subprogs(env); 19002 if (err == 0) 19003 return 0; 19004 if (err == -EFAULT) 19005 return err; 19006 } 19007 #ifndef CONFIG_BPF_JIT_ALWAYS_ON 19008 if (has_kfunc_call) { 19009 verbose(env, "calling kernel functions are not allowed in non-JITed programs\n"); 19010 return -EINVAL; 19011 } 19012 if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) { 19013 /* When JIT fails the progs with bpf2bpf calls and tail_calls 19014 * have to be rejected, since interpreter doesn't support them yet. 19015 */ 19016 verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n"); 19017 return -EINVAL; 19018 } 19019 for (i = 0; i < prog->len; i++, insn++) { 19020 if (bpf_pseudo_func(insn)) { 19021 /* When JIT fails the progs with callback calls 19022 * have to be rejected, since interpreter doesn't support them yet. 19023 */ 19024 verbose(env, "callbacks are not allowed in non-JITed programs\n"); 19025 return -EINVAL; 19026 } 19027 19028 if (!bpf_pseudo_call(insn)) 19029 continue; 19030 depth = get_callee_stack_depth(env, insn, i); 19031 if (depth < 0) 19032 return depth; 19033 bpf_patch_call_args(insn, depth); 19034 } 19035 err = 0; 19036 #endif 19037 return err; 19038 } 19039 19040 /* replace a generic kfunc with a specialized version if necessary */ 19041 static void specialize_kfunc(struct bpf_verifier_env *env, 19042 u32 func_id, u16 offset, unsigned long *addr) 19043 { 19044 struct bpf_prog *prog = env->prog; 19045 bool seen_direct_write; 19046 void *xdp_kfunc; 19047 bool is_rdonly; 19048 19049 if (bpf_dev_bound_kfunc_id(func_id)) { 19050 xdp_kfunc = bpf_dev_bound_resolve_kfunc(prog, func_id); 19051 if (xdp_kfunc) { 19052 *addr = (unsigned long)xdp_kfunc; 19053 return; 19054 } 19055 /* fallback to default kfunc when not supported by netdev */ 19056 } 19057 19058 if (offset) 19059 return; 19060 19061 if (func_id == special_kfunc_list[KF_bpf_dynptr_from_skb]) { 19062 seen_direct_write = env->seen_direct_write; 19063 is_rdonly = !may_access_direct_pkt_data(env, NULL, BPF_WRITE); 19064 19065 if (is_rdonly) 19066 *addr = (unsigned long)bpf_dynptr_from_skb_rdonly; 19067 19068 /* restore env->seen_direct_write to its original value, since 19069 * may_access_direct_pkt_data mutates it 19070 */ 19071 env->seen_direct_write = seen_direct_write; 19072 } 19073 } 19074 19075 static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux, 19076 u16 struct_meta_reg, 19077 u16 node_offset_reg, 19078 struct bpf_insn *insn, 19079 struct bpf_insn *insn_buf, 19080 int *cnt) 19081 { 19082 struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; 19083 struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) }; 19084 19085 insn_buf[0] = addr[0]; 19086 insn_buf[1] = addr[1]; 19087 insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); 19088 insn_buf[3] = *insn; 19089 *cnt = 4; 19090 } 19091 19092 static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 19093 struct bpf_insn *insn_buf, int insn_idx, int *cnt) 19094 { 19095 const struct bpf_kfunc_desc *desc; 19096 19097 if (!insn->imm) { 19098 verbose(env, "invalid kernel function call not eliminated in verifier pass\n"); 19099 return -EINVAL; 19100 } 19101 19102 *cnt = 0; 19103 19104 /* insn->imm has the btf func_id. Replace it with an offset relative to 19105 * __bpf_call_base, unless the JIT needs to call functions that are 19106 * further than 32 bits away (bpf_jit_supports_far_kfunc_call()). 19107 */ 19108 desc = find_kfunc_desc(env->prog, insn->imm, insn->off); 19109 if (!desc) { 19110 verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n", 19111 insn->imm); 19112 return -EFAULT; 19113 } 19114 19115 if (!bpf_jit_supports_far_kfunc_call()) 19116 insn->imm = BPF_CALL_IMM(desc->addr); 19117 if (insn->off) 19118 return 0; 19119 if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || 19120 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { 19121 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 19122 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 19123 u64 obj_new_size = env->insn_aux_data[insn_idx].obj_new_size; 19124 19125 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl] && kptr_struct_meta) { 19126 verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", 19127 insn_idx); 19128 return -EFAULT; 19129 } 19130 19131 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_1, obj_new_size); 19132 insn_buf[1] = addr[0]; 19133 insn_buf[2] = addr[1]; 19134 insn_buf[3] = *insn; 19135 *cnt = 4; 19136 } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || 19137 desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] || 19138 desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { 19139 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 19140 struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; 19141 19142 if (desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_drop_impl] && kptr_struct_meta) { 19143 verbose(env, "verifier internal error: NULL kptr_struct_meta expected at insn_idx %d\n", 19144 insn_idx); 19145 return -EFAULT; 19146 } 19147 19148 if (desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl] && 19149 !kptr_struct_meta) { 19150 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 19151 insn_idx); 19152 return -EFAULT; 19153 } 19154 19155 insn_buf[0] = addr[0]; 19156 insn_buf[1] = addr[1]; 19157 insn_buf[2] = *insn; 19158 *cnt = 3; 19159 } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || 19160 desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || 19161 desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 19162 struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; 19163 int struct_meta_reg = BPF_REG_3; 19164 int node_offset_reg = BPF_REG_4; 19165 19166 /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ 19167 if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { 19168 struct_meta_reg = BPF_REG_4; 19169 node_offset_reg = BPF_REG_5; 19170 } 19171 19172 if (!kptr_struct_meta) { 19173 verbose(env, "verifier internal error: kptr_struct_meta expected at insn_idx %d\n", 19174 insn_idx); 19175 return -EFAULT; 19176 } 19177 19178 __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, 19179 node_offset_reg, insn, insn_buf, cnt); 19180 } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || 19181 desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { 19182 insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); 19183 *cnt = 1; 19184 } 19185 return 0; 19186 } 19187 19188 /* The function requires that first instruction in 'patch' is insnsi[prog->len - 1] */ 19189 static int add_hidden_subprog(struct bpf_verifier_env *env, struct bpf_insn *patch, int len) 19190 { 19191 struct bpf_subprog_info *info = env->subprog_info; 19192 int cnt = env->subprog_cnt; 19193 struct bpf_prog *prog; 19194 19195 /* We only reserve one slot for hidden subprogs in subprog_info. */ 19196 if (env->hidden_subprog_cnt) { 19197 verbose(env, "verifier internal error: only one hidden subprog supported\n"); 19198 return -EFAULT; 19199 } 19200 /* We're not patching any existing instruction, just appending the new 19201 * ones for the hidden subprog. Hence all of the adjustment operations 19202 * in bpf_patch_insn_data are no-ops. 19203 */ 19204 prog = bpf_patch_insn_data(env, env->prog->len - 1, patch, len); 19205 if (!prog) 19206 return -ENOMEM; 19207 env->prog = prog; 19208 info[cnt + 1].start = info[cnt].start; 19209 info[cnt].start = prog->len - len + 1; 19210 env->subprog_cnt++; 19211 env->hidden_subprog_cnt++; 19212 return 0; 19213 } 19214 19215 /* Do various post-verification rewrites in a single program pass. 19216 * These rewrites simplify JIT and interpreter implementations. 19217 */ 19218 static int do_misc_fixups(struct bpf_verifier_env *env) 19219 { 19220 struct bpf_prog *prog = env->prog; 19221 enum bpf_attach_type eatype = prog->expected_attach_type; 19222 enum bpf_prog_type prog_type = resolve_prog_type(prog); 19223 struct bpf_insn *insn = prog->insnsi; 19224 const struct bpf_func_proto *fn; 19225 const int insn_cnt = prog->len; 19226 const struct bpf_map_ops *ops; 19227 struct bpf_insn_aux_data *aux; 19228 struct bpf_insn insn_buf[16]; 19229 struct bpf_prog *new_prog; 19230 struct bpf_map *map_ptr; 19231 int i, ret, cnt, delta = 0; 19232 19233 if (env->seen_exception && !env->exception_callback_subprog) { 19234 struct bpf_insn patch[] = { 19235 env->prog->insnsi[insn_cnt - 1], 19236 BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), 19237 BPF_EXIT_INSN(), 19238 }; 19239 19240 ret = add_hidden_subprog(env, patch, ARRAY_SIZE(patch)); 19241 if (ret < 0) 19242 return ret; 19243 prog = env->prog; 19244 insn = prog->insnsi; 19245 19246 env->exception_callback_subprog = env->subprog_cnt - 1; 19247 /* Don't update insn_cnt, as add_hidden_subprog always appends insns */ 19248 env->subprog_info[env->exception_callback_subprog].is_cb = true; 19249 env->subprog_info[env->exception_callback_subprog].is_async_cb = true; 19250 env->subprog_info[env->exception_callback_subprog].is_exception_cb = true; 19251 } 19252 19253 for (i = 0; i < insn_cnt; i++, insn++) { 19254 /* Make divide-by-zero exceptions impossible. */ 19255 if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || 19256 insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || 19257 insn->code == (BPF_ALU | BPF_MOD | BPF_X) || 19258 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { 19259 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 19260 bool isdiv = BPF_OP(insn->code) == BPF_DIV; 19261 struct bpf_insn *patchlet; 19262 struct bpf_insn chk_and_div[] = { 19263 /* [R,W]x div 0 -> 0 */ 19264 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 19265 BPF_JNE | BPF_K, insn->src_reg, 19266 0, 2, 0), 19267 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), 19268 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 19269 *insn, 19270 }; 19271 struct bpf_insn chk_and_mod[] = { 19272 /* [R,W]x mod 0 -> [R,W]x */ 19273 BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | 19274 BPF_JEQ | BPF_K, insn->src_reg, 19275 0, 1 + (is64 ? 0 : 1), 0), 19276 *insn, 19277 BPF_JMP_IMM(BPF_JA, 0, 0, 1), 19278 BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), 19279 }; 19280 19281 patchlet = isdiv ? chk_and_div : chk_and_mod; 19282 cnt = isdiv ? ARRAY_SIZE(chk_and_div) : 19283 ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); 19284 19285 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); 19286 if (!new_prog) 19287 return -ENOMEM; 19288 19289 delta += cnt - 1; 19290 env->prog = prog = new_prog; 19291 insn = new_prog->insnsi + i + delta; 19292 continue; 19293 } 19294 19295 /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ 19296 if (BPF_CLASS(insn->code) == BPF_LD && 19297 (BPF_MODE(insn->code) == BPF_ABS || 19298 BPF_MODE(insn->code) == BPF_IND)) { 19299 cnt = env->ops->gen_ld_abs(insn, insn_buf); 19300 if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { 19301 verbose(env, "bpf verifier is misconfigured\n"); 19302 return -EINVAL; 19303 } 19304 19305 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19306 if (!new_prog) 19307 return -ENOMEM; 19308 19309 delta += cnt - 1; 19310 env->prog = prog = new_prog; 19311 insn = new_prog->insnsi + i + delta; 19312 continue; 19313 } 19314 19315 /* Rewrite pointer arithmetic to mitigate speculation attacks. */ 19316 if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || 19317 insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { 19318 const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; 19319 const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; 19320 struct bpf_insn *patch = &insn_buf[0]; 19321 bool issrc, isneg, isimm; 19322 u32 off_reg; 19323 19324 aux = &env->insn_aux_data[i + delta]; 19325 if (!aux->alu_state || 19326 aux->alu_state == BPF_ALU_NON_POINTER) 19327 continue; 19328 19329 isneg = aux->alu_state & BPF_ALU_NEG_VALUE; 19330 issrc = (aux->alu_state & BPF_ALU_SANITIZE) == 19331 BPF_ALU_SANITIZE_SRC; 19332 isimm = aux->alu_state & BPF_ALU_IMMEDIATE; 19333 19334 off_reg = issrc ? insn->src_reg : insn->dst_reg; 19335 if (isimm) { 19336 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 19337 } else { 19338 if (isneg) 19339 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 19340 *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); 19341 *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); 19342 *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); 19343 *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); 19344 *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); 19345 *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); 19346 } 19347 if (!issrc) 19348 *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); 19349 insn->src_reg = BPF_REG_AX; 19350 if (isneg) 19351 insn->code = insn->code == code_add ? 19352 code_sub : code_add; 19353 *patch++ = *insn; 19354 if (issrc && isneg && !isimm) 19355 *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); 19356 cnt = patch - insn_buf; 19357 19358 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19359 if (!new_prog) 19360 return -ENOMEM; 19361 19362 delta += cnt - 1; 19363 env->prog = prog = new_prog; 19364 insn = new_prog->insnsi + i + delta; 19365 continue; 19366 } 19367 19368 if (insn->code != (BPF_JMP | BPF_CALL)) 19369 continue; 19370 if (insn->src_reg == BPF_PSEUDO_CALL) 19371 continue; 19372 if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) { 19373 ret = fixup_kfunc_call(env, insn, insn_buf, i + delta, &cnt); 19374 if (ret) 19375 return ret; 19376 if (cnt == 0) 19377 continue; 19378 19379 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19380 if (!new_prog) 19381 return -ENOMEM; 19382 19383 delta += cnt - 1; 19384 env->prog = prog = new_prog; 19385 insn = new_prog->insnsi + i + delta; 19386 continue; 19387 } 19388 19389 if (insn->imm == BPF_FUNC_get_route_realm) 19390 prog->dst_needed = 1; 19391 if (insn->imm == BPF_FUNC_get_prandom_u32) 19392 bpf_user_rnd_init_once(); 19393 if (insn->imm == BPF_FUNC_override_return) 19394 prog->kprobe_override = 1; 19395 if (insn->imm == BPF_FUNC_tail_call) { 19396 /* If we tail call into other programs, we 19397 * cannot make any assumptions since they can 19398 * be replaced dynamically during runtime in 19399 * the program array. 19400 */ 19401 prog->cb_access = 1; 19402 if (!allow_tail_call_in_subprogs(env)) 19403 prog->aux->stack_depth = MAX_BPF_STACK; 19404 prog->aux->max_pkt_offset = MAX_PACKET_OFF; 19405 19406 /* mark bpf_tail_call as different opcode to avoid 19407 * conditional branch in the interpreter for every normal 19408 * call and to prevent accidental JITing by JIT compiler 19409 * that doesn't support bpf_tail_call yet 19410 */ 19411 insn->imm = 0; 19412 insn->code = BPF_JMP | BPF_TAIL_CALL; 19413 19414 aux = &env->insn_aux_data[i + delta]; 19415 if (env->bpf_capable && !prog->blinding_requested && 19416 prog->jit_requested && 19417 !bpf_map_key_poisoned(aux) && 19418 !bpf_map_ptr_poisoned(aux) && 19419 !bpf_map_ptr_unpriv(aux)) { 19420 struct bpf_jit_poke_descriptor desc = { 19421 .reason = BPF_POKE_REASON_TAIL_CALL, 19422 .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), 19423 .tail_call.key = bpf_map_key_immediate(aux), 19424 .insn_idx = i + delta, 19425 }; 19426 19427 ret = bpf_jit_add_poke_descriptor(prog, &desc); 19428 if (ret < 0) { 19429 verbose(env, "adding tail call poke descriptor failed\n"); 19430 return ret; 19431 } 19432 19433 insn->imm = ret + 1; 19434 continue; 19435 } 19436 19437 if (!bpf_map_ptr_unpriv(aux)) 19438 continue; 19439 19440 /* instead of changing every JIT dealing with tail_call 19441 * emit two extra insns: 19442 * if (index >= max_entries) goto out; 19443 * index &= array->index_mask; 19444 * to avoid out-of-bounds cpu speculation 19445 */ 19446 if (bpf_map_ptr_poisoned(aux)) { 19447 verbose(env, "tail_call abusing map_ptr\n"); 19448 return -EINVAL; 19449 } 19450 19451 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 19452 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, 19453 map_ptr->max_entries, 2); 19454 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, 19455 container_of(map_ptr, 19456 struct bpf_array, 19457 map)->index_mask); 19458 insn_buf[2] = *insn; 19459 cnt = 3; 19460 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19461 if (!new_prog) 19462 return -ENOMEM; 19463 19464 delta += cnt - 1; 19465 env->prog = prog = new_prog; 19466 insn = new_prog->insnsi + i + delta; 19467 continue; 19468 } 19469 19470 if (insn->imm == BPF_FUNC_timer_set_callback) { 19471 /* The verifier will process callback_fn as many times as necessary 19472 * with different maps and the register states prepared by 19473 * set_timer_callback_state will be accurate. 19474 * 19475 * The following use case is valid: 19476 * map1 is shared by prog1, prog2, prog3. 19477 * prog1 calls bpf_timer_init for some map1 elements 19478 * prog2 calls bpf_timer_set_callback for some map1 elements. 19479 * Those that were not bpf_timer_init-ed will return -EINVAL. 19480 * prog3 calls bpf_timer_start for some map1 elements. 19481 * Those that were not both bpf_timer_init-ed and 19482 * bpf_timer_set_callback-ed will return -EINVAL. 19483 */ 19484 struct bpf_insn ld_addrs[2] = { 19485 BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), 19486 }; 19487 19488 insn_buf[0] = ld_addrs[0]; 19489 insn_buf[1] = ld_addrs[1]; 19490 insn_buf[2] = *insn; 19491 cnt = 3; 19492 19493 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19494 if (!new_prog) 19495 return -ENOMEM; 19496 19497 delta += cnt - 1; 19498 env->prog = prog = new_prog; 19499 insn = new_prog->insnsi + i + delta; 19500 goto patch_call_imm; 19501 } 19502 19503 if (is_storage_get_function(insn->imm)) { 19504 if (!env->prog->aux->sleepable || 19505 env->insn_aux_data[i + delta].storage_get_func_atomic) 19506 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); 19507 else 19508 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_KERNEL); 19509 insn_buf[1] = *insn; 19510 cnt = 2; 19511 19512 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19513 if (!new_prog) 19514 return -ENOMEM; 19515 19516 delta += cnt - 1; 19517 env->prog = prog = new_prog; 19518 insn = new_prog->insnsi + i + delta; 19519 goto patch_call_imm; 19520 } 19521 19522 /* bpf_per_cpu_ptr() and bpf_this_cpu_ptr() */ 19523 if (env->insn_aux_data[i + delta].call_with_percpu_alloc_ptr) { 19524 /* patch with 'r1 = *(u64 *)(r1 + 0)' since for percpu data, 19525 * bpf_mem_alloc() returns a ptr to the percpu data ptr. 19526 */ 19527 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0); 19528 insn_buf[1] = *insn; 19529 cnt = 2; 19530 19531 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19532 if (!new_prog) 19533 return -ENOMEM; 19534 19535 delta += cnt - 1; 19536 env->prog = prog = new_prog; 19537 insn = new_prog->insnsi + i + delta; 19538 goto patch_call_imm; 19539 } 19540 19541 /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup 19542 * and other inlining handlers are currently limited to 64 bit 19543 * only. 19544 */ 19545 if (prog->jit_requested && BITS_PER_LONG == 64 && 19546 (insn->imm == BPF_FUNC_map_lookup_elem || 19547 insn->imm == BPF_FUNC_map_update_elem || 19548 insn->imm == BPF_FUNC_map_delete_elem || 19549 insn->imm == BPF_FUNC_map_push_elem || 19550 insn->imm == BPF_FUNC_map_pop_elem || 19551 insn->imm == BPF_FUNC_map_peek_elem || 19552 insn->imm == BPF_FUNC_redirect_map || 19553 insn->imm == BPF_FUNC_for_each_map_elem || 19554 insn->imm == BPF_FUNC_map_lookup_percpu_elem)) { 19555 aux = &env->insn_aux_data[i + delta]; 19556 if (bpf_map_ptr_poisoned(aux)) 19557 goto patch_call_imm; 19558 19559 map_ptr = BPF_MAP_PTR(aux->map_ptr_state); 19560 ops = map_ptr->ops; 19561 if (insn->imm == BPF_FUNC_map_lookup_elem && 19562 ops->map_gen_lookup) { 19563 cnt = ops->map_gen_lookup(map_ptr, insn_buf); 19564 if (cnt == -EOPNOTSUPP) 19565 goto patch_map_ops_generic; 19566 if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { 19567 verbose(env, "bpf verifier is misconfigured\n"); 19568 return -EINVAL; 19569 } 19570 19571 new_prog = bpf_patch_insn_data(env, i + delta, 19572 insn_buf, cnt); 19573 if (!new_prog) 19574 return -ENOMEM; 19575 19576 delta += cnt - 1; 19577 env->prog = prog = new_prog; 19578 insn = new_prog->insnsi + i + delta; 19579 continue; 19580 } 19581 19582 BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, 19583 (void *(*)(struct bpf_map *map, void *key))NULL)); 19584 BUILD_BUG_ON(!__same_type(ops->map_delete_elem, 19585 (long (*)(struct bpf_map *map, void *key))NULL)); 19586 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 19587 (long (*)(struct bpf_map *map, void *key, void *value, 19588 u64 flags))NULL)); 19589 BUILD_BUG_ON(!__same_type(ops->map_push_elem, 19590 (long (*)(struct bpf_map *map, void *value, 19591 u64 flags))NULL)); 19592 BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 19593 (long (*)(struct bpf_map *map, void *value))NULL)); 19594 BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 19595 (long (*)(struct bpf_map *map, void *value))NULL)); 19596 BUILD_BUG_ON(!__same_type(ops->map_redirect, 19597 (long (*)(struct bpf_map *map, u64 index, u64 flags))NULL)); 19598 BUILD_BUG_ON(!__same_type(ops->map_for_each_callback, 19599 (long (*)(struct bpf_map *map, 19600 bpf_callback_t callback_fn, 19601 void *callback_ctx, 19602 u64 flags))NULL)); 19603 BUILD_BUG_ON(!__same_type(ops->map_lookup_percpu_elem, 19604 (void *(*)(struct bpf_map *map, void *key, u32 cpu))NULL)); 19605 19606 patch_map_ops_generic: 19607 switch (insn->imm) { 19608 case BPF_FUNC_map_lookup_elem: 19609 insn->imm = BPF_CALL_IMM(ops->map_lookup_elem); 19610 continue; 19611 case BPF_FUNC_map_update_elem: 19612 insn->imm = BPF_CALL_IMM(ops->map_update_elem); 19613 continue; 19614 case BPF_FUNC_map_delete_elem: 19615 insn->imm = BPF_CALL_IMM(ops->map_delete_elem); 19616 continue; 19617 case BPF_FUNC_map_push_elem: 19618 insn->imm = BPF_CALL_IMM(ops->map_push_elem); 19619 continue; 19620 case BPF_FUNC_map_pop_elem: 19621 insn->imm = BPF_CALL_IMM(ops->map_pop_elem); 19622 continue; 19623 case BPF_FUNC_map_peek_elem: 19624 insn->imm = BPF_CALL_IMM(ops->map_peek_elem); 19625 continue; 19626 case BPF_FUNC_redirect_map: 19627 insn->imm = BPF_CALL_IMM(ops->map_redirect); 19628 continue; 19629 case BPF_FUNC_for_each_map_elem: 19630 insn->imm = BPF_CALL_IMM(ops->map_for_each_callback); 19631 continue; 19632 case BPF_FUNC_map_lookup_percpu_elem: 19633 insn->imm = BPF_CALL_IMM(ops->map_lookup_percpu_elem); 19634 continue; 19635 } 19636 19637 goto patch_call_imm; 19638 } 19639 19640 /* Implement bpf_jiffies64 inline. */ 19641 if (prog->jit_requested && BITS_PER_LONG == 64 && 19642 insn->imm == BPF_FUNC_jiffies64) { 19643 struct bpf_insn ld_jiffies_addr[2] = { 19644 BPF_LD_IMM64(BPF_REG_0, 19645 (unsigned long)&jiffies), 19646 }; 19647 19648 insn_buf[0] = ld_jiffies_addr[0]; 19649 insn_buf[1] = ld_jiffies_addr[1]; 19650 insn_buf[2] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, 19651 BPF_REG_0, 0); 19652 cnt = 3; 19653 19654 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 19655 cnt); 19656 if (!new_prog) 19657 return -ENOMEM; 19658 19659 delta += cnt - 1; 19660 env->prog = prog = new_prog; 19661 insn = new_prog->insnsi + i + delta; 19662 continue; 19663 } 19664 19665 /* Implement bpf_get_func_arg inline. */ 19666 if (prog_type == BPF_PROG_TYPE_TRACING && 19667 insn->imm == BPF_FUNC_get_func_arg) { 19668 /* Load nr_args from ctx - 8 */ 19669 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19670 insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6); 19671 insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3); 19672 insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1); 19673 insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0); 19674 insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 19675 insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0); 19676 insn_buf[7] = BPF_JMP_A(1); 19677 insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL); 19678 cnt = 9; 19679 19680 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19681 if (!new_prog) 19682 return -ENOMEM; 19683 19684 delta += cnt - 1; 19685 env->prog = prog = new_prog; 19686 insn = new_prog->insnsi + i + delta; 19687 continue; 19688 } 19689 19690 /* Implement bpf_get_func_ret inline. */ 19691 if (prog_type == BPF_PROG_TYPE_TRACING && 19692 insn->imm == BPF_FUNC_get_func_ret) { 19693 if (eatype == BPF_TRACE_FEXIT || 19694 eatype == BPF_MODIFY_RETURN) { 19695 /* Load nr_args from ctx - 8 */ 19696 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19697 insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 19698 insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 19699 insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0); 19700 insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0); 19701 insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0); 19702 cnt = 6; 19703 } else { 19704 insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP); 19705 cnt = 1; 19706 } 19707 19708 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); 19709 if (!new_prog) 19710 return -ENOMEM; 19711 19712 delta += cnt - 1; 19713 env->prog = prog = new_prog; 19714 insn = new_prog->insnsi + i + delta; 19715 continue; 19716 } 19717 19718 /* Implement get_func_arg_cnt inline. */ 19719 if (prog_type == BPF_PROG_TYPE_TRACING && 19720 insn->imm == BPF_FUNC_get_func_arg_cnt) { 19721 /* Load nr_args from ctx - 8 */ 19722 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); 19723 19724 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 19725 if (!new_prog) 19726 return -ENOMEM; 19727 19728 env->prog = prog = new_prog; 19729 insn = new_prog->insnsi + i + delta; 19730 continue; 19731 } 19732 19733 /* Implement bpf_get_func_ip inline. */ 19734 if (prog_type == BPF_PROG_TYPE_TRACING && 19735 insn->imm == BPF_FUNC_get_func_ip) { 19736 /* Load IP address from ctx - 16 */ 19737 insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16); 19738 19739 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); 19740 if (!new_prog) 19741 return -ENOMEM; 19742 19743 env->prog = prog = new_prog; 19744 insn = new_prog->insnsi + i + delta; 19745 continue; 19746 } 19747 19748 patch_call_imm: 19749 fn = env->ops->get_func_proto(insn->imm, env->prog); 19750 /* all functions that have prototype and verifier allowed 19751 * programs to call them, must be real in-kernel functions 19752 */ 19753 if (!fn->func) { 19754 verbose(env, 19755 "kernel subsystem misconfigured func %s#%d\n", 19756 func_id_name(insn->imm), insn->imm); 19757 return -EFAULT; 19758 } 19759 insn->imm = fn->func - __bpf_call_base; 19760 } 19761 19762 /* Since poke tab is now finalized, publish aux to tracker. */ 19763 for (i = 0; i < prog->aux->size_poke_tab; i++) { 19764 map_ptr = prog->aux->poke_tab[i].tail_call.map; 19765 if (!map_ptr->ops->map_poke_track || 19766 !map_ptr->ops->map_poke_untrack || 19767 !map_ptr->ops->map_poke_run) { 19768 verbose(env, "bpf verifier is misconfigured\n"); 19769 return -EINVAL; 19770 } 19771 19772 ret = map_ptr->ops->map_poke_track(map_ptr, prog->aux); 19773 if (ret < 0) { 19774 verbose(env, "tracking tail call prog failed\n"); 19775 return ret; 19776 } 19777 } 19778 19779 sort_kfunc_descs_by_imm_off(env->prog); 19780 19781 return 0; 19782 } 19783 19784 static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env, 19785 int position, 19786 s32 stack_base, 19787 u32 callback_subprogno, 19788 u32 *cnt) 19789 { 19790 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; 19791 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; 19792 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; 19793 int reg_loop_max = BPF_REG_6; 19794 int reg_loop_cnt = BPF_REG_7; 19795 int reg_loop_ctx = BPF_REG_8; 19796 19797 struct bpf_prog *new_prog; 19798 u32 callback_start; 19799 u32 call_insn_offset; 19800 s32 callback_offset; 19801 19802 /* This represents an inlined version of bpf_iter.c:bpf_loop, 19803 * be careful to modify this code in sync. 19804 */ 19805 struct bpf_insn insn_buf[] = { 19806 /* Return error and jump to the end of the patch if 19807 * expected number of iterations is too big. 19808 */ 19809 BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2), 19810 BPF_MOV32_IMM(BPF_REG_0, -E2BIG), 19811 BPF_JMP_IMM(BPF_JA, 0, 0, 16), 19812 /* spill R6, R7, R8 to use these as loop vars */ 19813 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset), 19814 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset), 19815 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset), 19816 /* initialize loop vars */ 19817 BPF_MOV64_REG(reg_loop_max, BPF_REG_1), 19818 BPF_MOV32_IMM(reg_loop_cnt, 0), 19819 BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3), 19820 /* loop header, 19821 * if reg_loop_cnt >= reg_loop_max skip the loop body 19822 */ 19823 BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5), 19824 /* callback call, 19825 * correct callback offset would be set after patching 19826 */ 19827 BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt), 19828 BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx), 19829 BPF_CALL_REL(0), 19830 /* increment loop counter */ 19831 BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1), 19832 /* jump to loop header if callback returned 0 */ 19833 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6), 19834 /* return value of bpf_loop, 19835 * set R0 to the number of iterations 19836 */ 19837 BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt), 19838 /* restore original values of R6, R7, R8 */ 19839 BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset), 19840 BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset), 19841 BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset), 19842 }; 19843 19844 *cnt = ARRAY_SIZE(insn_buf); 19845 new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt); 19846 if (!new_prog) 19847 return new_prog; 19848 19849 /* callback start is known only after patching */ 19850 callback_start = env->subprog_info[callback_subprogno].start; 19851 /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */ 19852 call_insn_offset = position + 12; 19853 callback_offset = callback_start - call_insn_offset - 1; 19854 new_prog->insnsi[call_insn_offset].imm = callback_offset; 19855 19856 return new_prog; 19857 } 19858 19859 static bool is_bpf_loop_call(struct bpf_insn *insn) 19860 { 19861 return insn->code == (BPF_JMP | BPF_CALL) && 19862 insn->src_reg == 0 && 19863 insn->imm == BPF_FUNC_loop; 19864 } 19865 19866 /* For all sub-programs in the program (including main) check 19867 * insn_aux_data to see if there are bpf_loop calls that require 19868 * inlining. If such calls are found the calls are replaced with a 19869 * sequence of instructions produced by `inline_bpf_loop` function and 19870 * subprog stack_depth is increased by the size of 3 registers. 19871 * This stack space is used to spill values of the R6, R7, R8. These 19872 * registers are used to store the loop bound, counter and context 19873 * variables. 19874 */ 19875 static int optimize_bpf_loop(struct bpf_verifier_env *env) 19876 { 19877 struct bpf_subprog_info *subprogs = env->subprog_info; 19878 int i, cur_subprog = 0, cnt, delta = 0; 19879 struct bpf_insn *insn = env->prog->insnsi; 19880 int insn_cnt = env->prog->len; 19881 u16 stack_depth = subprogs[cur_subprog].stack_depth; 19882 u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 19883 u16 stack_depth_extra = 0; 19884 19885 for (i = 0; i < insn_cnt; i++, insn++) { 19886 struct bpf_loop_inline_state *inline_state = 19887 &env->insn_aux_data[i + delta].loop_inline_state; 19888 19889 if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) { 19890 struct bpf_prog *new_prog; 19891 19892 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; 19893 new_prog = inline_bpf_loop(env, 19894 i + delta, 19895 -(stack_depth + stack_depth_extra), 19896 inline_state->callback_subprogno, 19897 &cnt); 19898 if (!new_prog) 19899 return -ENOMEM; 19900 19901 delta += cnt - 1; 19902 env->prog = new_prog; 19903 insn = new_prog->insnsi + i + delta; 19904 } 19905 19906 if (subprogs[cur_subprog + 1].start == i + delta + 1) { 19907 subprogs[cur_subprog].stack_depth += stack_depth_extra; 19908 cur_subprog++; 19909 stack_depth = subprogs[cur_subprog].stack_depth; 19910 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth; 19911 stack_depth_extra = 0; 19912 } 19913 } 19914 19915 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 19916 19917 return 0; 19918 } 19919 19920 static void free_states(struct bpf_verifier_env *env) 19921 { 19922 struct bpf_verifier_state_list *sl, *sln; 19923 int i; 19924 19925 sl = env->free_list; 19926 while (sl) { 19927 sln = sl->next; 19928 free_verifier_state(&sl->state, false); 19929 kfree(sl); 19930 sl = sln; 19931 } 19932 env->free_list = NULL; 19933 19934 if (!env->explored_states) 19935 return; 19936 19937 for (i = 0; i < state_htab_size(env); i++) { 19938 sl = env->explored_states[i]; 19939 19940 while (sl) { 19941 sln = sl->next; 19942 free_verifier_state(&sl->state, false); 19943 kfree(sl); 19944 sl = sln; 19945 } 19946 env->explored_states[i] = NULL; 19947 } 19948 } 19949 19950 static int do_check_common(struct bpf_verifier_env *env, int subprog, bool is_ex_cb) 19951 { 19952 bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); 19953 struct bpf_verifier_state *state; 19954 struct bpf_reg_state *regs; 19955 int ret, i; 19956 19957 env->prev_linfo = NULL; 19958 env->pass_cnt++; 19959 19960 state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); 19961 if (!state) 19962 return -ENOMEM; 19963 state->curframe = 0; 19964 state->speculative = false; 19965 state->branches = 1; 19966 state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); 19967 if (!state->frame[0]) { 19968 kfree(state); 19969 return -ENOMEM; 19970 } 19971 env->cur_state = state; 19972 init_func_state(env, state->frame[0], 19973 BPF_MAIN_FUNC /* callsite */, 19974 0 /* frameno */, 19975 subprog); 19976 state->first_insn_idx = env->subprog_info[subprog].start; 19977 state->last_insn_idx = -1; 19978 19979 regs = state->frame[state->curframe]->regs; 19980 if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { 19981 ret = btf_prepare_func_args(env, subprog, regs, is_ex_cb); 19982 if (ret) 19983 goto out; 19984 for (i = BPF_REG_1; i <= BPF_REG_5; i++) { 19985 if (regs[i].type == PTR_TO_CTX) 19986 mark_reg_known_zero(env, regs, i); 19987 else if (regs[i].type == SCALAR_VALUE) 19988 mark_reg_unknown(env, regs, i); 19989 else if (base_type(regs[i].type) == PTR_TO_MEM) { 19990 const u32 mem_size = regs[i].mem_size; 19991 19992 mark_reg_known_zero(env, regs, i); 19993 regs[i].mem_size = mem_size; 19994 regs[i].id = ++env->id_gen; 19995 } 19996 } 19997 if (is_ex_cb) { 19998 state->frame[0]->in_exception_callback_fn = true; 19999 env->subprog_info[subprog].is_cb = true; 20000 env->subprog_info[subprog].is_async_cb = true; 20001 env->subprog_info[subprog].is_exception_cb = true; 20002 } 20003 } else { 20004 /* 1st arg to a function */ 20005 regs[BPF_REG_1].type = PTR_TO_CTX; 20006 mark_reg_known_zero(env, regs, BPF_REG_1); 20007 ret = btf_check_subprog_arg_match(env, subprog, regs); 20008 if (ret == -EFAULT) 20009 /* unlikely verifier bug. abort. 20010 * ret == 0 and ret < 0 are sadly acceptable for 20011 * main() function due to backward compatibility. 20012 * Like socket filter program may be written as: 20013 * int bpf_prog(struct pt_regs *ctx) 20014 * and never dereference that ctx in the program. 20015 * 'struct pt_regs' is a type mismatch for socket 20016 * filter that should be using 'struct __sk_buff'. 20017 */ 20018 goto out; 20019 } 20020 20021 ret = do_check(env); 20022 out: 20023 /* check for NULL is necessary, since cur_state can be freed inside 20024 * do_check() under memory pressure. 20025 */ 20026 if (env->cur_state) { 20027 free_verifier_state(env->cur_state, true); 20028 env->cur_state = NULL; 20029 } 20030 while (!pop_stack(env, NULL, NULL, false)); 20031 if (!ret && pop_log) 20032 bpf_vlog_reset(&env->log, 0); 20033 free_states(env); 20034 return ret; 20035 } 20036 20037 /* Verify all global functions in a BPF program one by one based on their BTF. 20038 * All global functions must pass verification. Otherwise the whole program is rejected. 20039 * Consider: 20040 * int bar(int); 20041 * int foo(int f) 20042 * { 20043 * return bar(f); 20044 * } 20045 * int bar(int b) 20046 * { 20047 * ... 20048 * } 20049 * foo() will be verified first for R1=any_scalar_value. During verification it 20050 * will be assumed that bar() already verified successfully and call to bar() 20051 * from foo() will be checked for type match only. Later bar() will be verified 20052 * independently to check that it's safe for R1=any_scalar_value. 20053 */ 20054 static int do_check_subprogs(struct bpf_verifier_env *env) 20055 { 20056 struct bpf_prog_aux *aux = env->prog->aux; 20057 int i, ret; 20058 20059 if (!aux->func_info) 20060 return 0; 20061 20062 for (i = 1; i < env->subprog_cnt; i++) { 20063 if (aux->func_info_aux[i].linkage != BTF_FUNC_GLOBAL) 20064 continue; 20065 env->insn_idx = env->subprog_info[i].start; 20066 WARN_ON_ONCE(env->insn_idx == 0); 20067 ret = do_check_common(env, i, env->exception_callback_subprog == i); 20068 if (ret) { 20069 return ret; 20070 } else if (env->log.level & BPF_LOG_LEVEL) { 20071 verbose(env, 20072 "Func#%d is safe for any args that match its prototype\n", 20073 i); 20074 } 20075 } 20076 return 0; 20077 } 20078 20079 static int do_check_main(struct bpf_verifier_env *env) 20080 { 20081 int ret; 20082 20083 env->insn_idx = 0; 20084 ret = do_check_common(env, 0, false); 20085 if (!ret) 20086 env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; 20087 return ret; 20088 } 20089 20090 20091 static void print_verification_stats(struct bpf_verifier_env *env) 20092 { 20093 int i; 20094 20095 if (env->log.level & BPF_LOG_STATS) { 20096 verbose(env, "verification time %lld usec\n", 20097 div_u64(env->verification_time, 1000)); 20098 verbose(env, "stack depth "); 20099 for (i = 0; i < env->subprog_cnt; i++) { 20100 u32 depth = env->subprog_info[i].stack_depth; 20101 20102 verbose(env, "%d", depth); 20103 if (i + 1 < env->subprog_cnt) 20104 verbose(env, "+"); 20105 } 20106 verbose(env, "\n"); 20107 } 20108 verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " 20109 "total_states %d peak_states %d mark_read %d\n", 20110 env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, 20111 env->max_states_per_insn, env->total_states, 20112 env->peak_states, env->longest_mark_read_walk); 20113 } 20114 20115 static int check_struct_ops_btf_id(struct bpf_verifier_env *env) 20116 { 20117 const struct btf_type *t, *func_proto; 20118 const struct bpf_struct_ops *st_ops; 20119 const struct btf_member *member; 20120 struct bpf_prog *prog = env->prog; 20121 u32 btf_id, member_idx; 20122 const char *mname; 20123 20124 if (!prog->gpl_compatible) { 20125 verbose(env, "struct ops programs must have a GPL compatible license\n"); 20126 return -EINVAL; 20127 } 20128 20129 btf_id = prog->aux->attach_btf_id; 20130 st_ops = bpf_struct_ops_find(btf_id); 20131 if (!st_ops) { 20132 verbose(env, "attach_btf_id %u is not a supported struct\n", 20133 btf_id); 20134 return -ENOTSUPP; 20135 } 20136 20137 t = st_ops->type; 20138 member_idx = prog->expected_attach_type; 20139 if (member_idx >= btf_type_vlen(t)) { 20140 verbose(env, "attach to invalid member idx %u of struct %s\n", 20141 member_idx, st_ops->name); 20142 return -EINVAL; 20143 } 20144 20145 member = &btf_type_member(t)[member_idx]; 20146 mname = btf_name_by_offset(btf_vmlinux, member->name_off); 20147 func_proto = btf_type_resolve_func_ptr(btf_vmlinux, member->type, 20148 NULL); 20149 if (!func_proto) { 20150 verbose(env, "attach to invalid member %s(@idx %u) of struct %s\n", 20151 mname, member_idx, st_ops->name); 20152 return -EINVAL; 20153 } 20154 20155 if (st_ops->check_member) { 20156 int err = st_ops->check_member(t, member, prog); 20157 20158 if (err) { 20159 verbose(env, "attach to unsupported member %s of struct %s\n", 20160 mname, st_ops->name); 20161 return err; 20162 } 20163 } 20164 20165 prog->aux->attach_func_proto = func_proto; 20166 prog->aux->attach_func_name = mname; 20167 env->ops = st_ops->verifier_ops; 20168 20169 return 0; 20170 } 20171 #define SECURITY_PREFIX "security_" 20172 20173 static int check_attach_modify_return(unsigned long addr, const char *func_name) 20174 { 20175 if (within_error_injection_list(addr) || 20176 !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1)) 20177 return 0; 20178 20179 return -EINVAL; 20180 } 20181 20182 /* list of non-sleepable functions that are otherwise on 20183 * ALLOW_ERROR_INJECTION list 20184 */ 20185 BTF_SET_START(btf_non_sleepable_error_inject) 20186 /* Three functions below can be called from sleepable and non-sleepable context. 20187 * Assume non-sleepable from bpf safety point of view. 20188 */ 20189 BTF_ID(func, __filemap_add_folio) 20190 BTF_ID(func, should_fail_alloc_page) 20191 BTF_ID(func, should_failslab) 20192 BTF_SET_END(btf_non_sleepable_error_inject) 20193 20194 static int check_non_sleepable_error_inject(u32 btf_id) 20195 { 20196 return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id); 20197 } 20198 20199 int bpf_check_attach_target(struct bpf_verifier_log *log, 20200 const struct bpf_prog *prog, 20201 const struct bpf_prog *tgt_prog, 20202 u32 btf_id, 20203 struct bpf_attach_target_info *tgt_info) 20204 { 20205 bool prog_extension = prog->type == BPF_PROG_TYPE_EXT; 20206 const char prefix[] = "btf_trace_"; 20207 int ret = 0, subprog = -1, i; 20208 const struct btf_type *t; 20209 bool conservative = true; 20210 const char *tname; 20211 struct btf *btf; 20212 long addr = 0; 20213 struct module *mod = NULL; 20214 20215 if (!btf_id) { 20216 bpf_log(log, "Tracing programs must provide btf_id\n"); 20217 return -EINVAL; 20218 } 20219 btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf; 20220 if (!btf) { 20221 bpf_log(log, 20222 "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n"); 20223 return -EINVAL; 20224 } 20225 t = btf_type_by_id(btf, btf_id); 20226 if (!t) { 20227 bpf_log(log, "attach_btf_id %u is invalid\n", btf_id); 20228 return -EINVAL; 20229 } 20230 tname = btf_name_by_offset(btf, t->name_off); 20231 if (!tname) { 20232 bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id); 20233 return -EINVAL; 20234 } 20235 if (tgt_prog) { 20236 struct bpf_prog_aux *aux = tgt_prog->aux; 20237 20238 if (bpf_prog_is_dev_bound(prog->aux) && 20239 !bpf_prog_dev_bound_match(prog, tgt_prog)) { 20240 bpf_log(log, "Target program bound device mismatch"); 20241 return -EINVAL; 20242 } 20243 20244 for (i = 0; i < aux->func_info_cnt; i++) 20245 if (aux->func_info[i].type_id == btf_id) { 20246 subprog = i; 20247 break; 20248 } 20249 if (subprog == -1) { 20250 bpf_log(log, "Subprog %s doesn't exist\n", tname); 20251 return -EINVAL; 20252 } 20253 if (aux->func && aux->func[subprog]->aux->exception_cb) { 20254 bpf_log(log, 20255 "%s programs cannot attach to exception callback\n", 20256 prog_extension ? "Extension" : "FENTRY/FEXIT"); 20257 return -EINVAL; 20258 } 20259 conservative = aux->func_info_aux[subprog].unreliable; 20260 if (prog_extension) { 20261 if (conservative) { 20262 bpf_log(log, 20263 "Cannot replace static functions\n"); 20264 return -EINVAL; 20265 } 20266 if (!prog->jit_requested) { 20267 bpf_log(log, 20268 "Extension programs should be JITed\n"); 20269 return -EINVAL; 20270 } 20271 } 20272 if (!tgt_prog->jited) { 20273 bpf_log(log, "Can attach to only JITed progs\n"); 20274 return -EINVAL; 20275 } 20276 if (tgt_prog->type == prog->type) { 20277 /* Cannot fentry/fexit another fentry/fexit program. 20278 * Cannot attach program extension to another extension. 20279 * It's ok to attach fentry/fexit to extension program. 20280 */ 20281 bpf_log(log, "Cannot recursively attach\n"); 20282 return -EINVAL; 20283 } 20284 if (tgt_prog->type == BPF_PROG_TYPE_TRACING && 20285 prog_extension && 20286 (tgt_prog->expected_attach_type == BPF_TRACE_FENTRY || 20287 tgt_prog->expected_attach_type == BPF_TRACE_FEXIT)) { 20288 /* Program extensions can extend all program types 20289 * except fentry/fexit. The reason is the following. 20290 * The fentry/fexit programs are used for performance 20291 * analysis, stats and can be attached to any program 20292 * type except themselves. When extension program is 20293 * replacing XDP function it is necessary to allow 20294 * performance analysis of all functions. Both original 20295 * XDP program and its program extension. Hence 20296 * attaching fentry/fexit to BPF_PROG_TYPE_EXT is 20297 * allowed. If extending of fentry/fexit was allowed it 20298 * would be possible to create long call chain 20299 * fentry->extension->fentry->extension beyond 20300 * reasonable stack size. Hence extending fentry is not 20301 * allowed. 20302 */ 20303 bpf_log(log, "Cannot extend fentry/fexit\n"); 20304 return -EINVAL; 20305 } 20306 } else { 20307 if (prog_extension) { 20308 bpf_log(log, "Cannot replace kernel functions\n"); 20309 return -EINVAL; 20310 } 20311 } 20312 20313 switch (prog->expected_attach_type) { 20314 case BPF_TRACE_RAW_TP: 20315 if (tgt_prog) { 20316 bpf_log(log, 20317 "Only FENTRY/FEXIT progs are attachable to another BPF prog\n"); 20318 return -EINVAL; 20319 } 20320 if (!btf_type_is_typedef(t)) { 20321 bpf_log(log, "attach_btf_id %u is not a typedef\n", 20322 btf_id); 20323 return -EINVAL; 20324 } 20325 if (strncmp(prefix, tname, sizeof(prefix) - 1)) { 20326 bpf_log(log, "attach_btf_id %u points to wrong type name %s\n", 20327 btf_id, tname); 20328 return -EINVAL; 20329 } 20330 tname += sizeof(prefix) - 1; 20331 t = btf_type_by_id(btf, t->type); 20332 if (!btf_type_is_ptr(t)) 20333 /* should never happen in valid vmlinux build */ 20334 return -EINVAL; 20335 t = btf_type_by_id(btf, t->type); 20336 if (!btf_type_is_func_proto(t)) 20337 /* should never happen in valid vmlinux build */ 20338 return -EINVAL; 20339 20340 break; 20341 case BPF_TRACE_ITER: 20342 if (!btf_type_is_func(t)) { 20343 bpf_log(log, "attach_btf_id %u is not a function\n", 20344 btf_id); 20345 return -EINVAL; 20346 } 20347 t = btf_type_by_id(btf, t->type); 20348 if (!btf_type_is_func_proto(t)) 20349 return -EINVAL; 20350 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 20351 if (ret) 20352 return ret; 20353 break; 20354 default: 20355 if (!prog_extension) 20356 return -EINVAL; 20357 fallthrough; 20358 case BPF_MODIFY_RETURN: 20359 case BPF_LSM_MAC: 20360 case BPF_LSM_CGROUP: 20361 case BPF_TRACE_FENTRY: 20362 case BPF_TRACE_FEXIT: 20363 if (!btf_type_is_func(t)) { 20364 bpf_log(log, "attach_btf_id %u is not a function\n", 20365 btf_id); 20366 return -EINVAL; 20367 } 20368 if (prog_extension && 20369 btf_check_type_match(log, prog, btf, t)) 20370 return -EINVAL; 20371 t = btf_type_by_id(btf, t->type); 20372 if (!btf_type_is_func_proto(t)) 20373 return -EINVAL; 20374 20375 if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) && 20376 (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type || 20377 prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type)) 20378 return -EINVAL; 20379 20380 if (tgt_prog && conservative) 20381 t = NULL; 20382 20383 ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel); 20384 if (ret < 0) 20385 return ret; 20386 20387 if (tgt_prog) { 20388 if (subprog == 0) 20389 addr = (long) tgt_prog->bpf_func; 20390 else 20391 addr = (long) tgt_prog->aux->func[subprog]->bpf_func; 20392 } else { 20393 if (btf_is_module(btf)) { 20394 mod = btf_try_get_module(btf); 20395 if (mod) 20396 addr = find_kallsyms_symbol_value(mod, tname); 20397 else 20398 addr = 0; 20399 } else { 20400 addr = kallsyms_lookup_name(tname); 20401 } 20402 if (!addr) { 20403 module_put(mod); 20404 bpf_log(log, 20405 "The address of function %s cannot be found\n", 20406 tname); 20407 return -ENOENT; 20408 } 20409 } 20410 20411 if (prog->aux->sleepable) { 20412 ret = -EINVAL; 20413 switch (prog->type) { 20414 case BPF_PROG_TYPE_TRACING: 20415 20416 /* fentry/fexit/fmod_ret progs can be sleepable if they are 20417 * attached to ALLOW_ERROR_INJECTION and are not in denylist. 20418 */ 20419 if (!check_non_sleepable_error_inject(btf_id) && 20420 within_error_injection_list(addr)) 20421 ret = 0; 20422 /* fentry/fexit/fmod_ret progs can also be sleepable if they are 20423 * in the fmodret id set with the KF_SLEEPABLE flag. 20424 */ 20425 else { 20426 u32 *flags = btf_kfunc_is_modify_return(btf, btf_id, 20427 prog); 20428 20429 if (flags && (*flags & KF_SLEEPABLE)) 20430 ret = 0; 20431 } 20432 break; 20433 case BPF_PROG_TYPE_LSM: 20434 /* LSM progs check that they are attached to bpf_lsm_*() funcs. 20435 * Only some of them are sleepable. 20436 */ 20437 if (bpf_lsm_is_sleepable_hook(btf_id)) 20438 ret = 0; 20439 break; 20440 default: 20441 break; 20442 } 20443 if (ret) { 20444 module_put(mod); 20445 bpf_log(log, "%s is not sleepable\n", tname); 20446 return ret; 20447 } 20448 } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 20449 if (tgt_prog) { 20450 module_put(mod); 20451 bpf_log(log, "can't modify return codes of BPF programs\n"); 20452 return -EINVAL; 20453 } 20454 ret = -EINVAL; 20455 if (btf_kfunc_is_modify_return(btf, btf_id, prog) || 20456 !check_attach_modify_return(addr, tname)) 20457 ret = 0; 20458 if (ret) { 20459 module_put(mod); 20460 bpf_log(log, "%s() is not modifiable\n", tname); 20461 return ret; 20462 } 20463 } 20464 20465 break; 20466 } 20467 tgt_info->tgt_addr = addr; 20468 tgt_info->tgt_name = tname; 20469 tgt_info->tgt_type = t; 20470 tgt_info->tgt_mod = mod; 20471 return 0; 20472 } 20473 20474 BTF_SET_START(btf_id_deny) 20475 BTF_ID_UNUSED 20476 #ifdef CONFIG_SMP 20477 BTF_ID(func, migrate_disable) 20478 BTF_ID(func, migrate_enable) 20479 #endif 20480 #if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU 20481 BTF_ID(func, rcu_read_unlock_strict) 20482 #endif 20483 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) 20484 BTF_ID(func, preempt_count_add) 20485 BTF_ID(func, preempt_count_sub) 20486 #endif 20487 #ifdef CONFIG_PREEMPT_RCU 20488 BTF_ID(func, __rcu_read_lock) 20489 BTF_ID(func, __rcu_read_unlock) 20490 #endif 20491 BTF_SET_END(btf_id_deny) 20492 20493 static bool can_be_sleepable(struct bpf_prog *prog) 20494 { 20495 if (prog->type == BPF_PROG_TYPE_TRACING) { 20496 switch (prog->expected_attach_type) { 20497 case BPF_TRACE_FENTRY: 20498 case BPF_TRACE_FEXIT: 20499 case BPF_MODIFY_RETURN: 20500 case BPF_TRACE_ITER: 20501 return true; 20502 default: 20503 return false; 20504 } 20505 } 20506 return prog->type == BPF_PROG_TYPE_LSM || 20507 prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ || 20508 prog->type == BPF_PROG_TYPE_STRUCT_OPS; 20509 } 20510 20511 static int check_attach_btf_id(struct bpf_verifier_env *env) 20512 { 20513 struct bpf_prog *prog = env->prog; 20514 struct bpf_prog *tgt_prog = prog->aux->dst_prog; 20515 struct bpf_attach_target_info tgt_info = {}; 20516 u32 btf_id = prog->aux->attach_btf_id; 20517 struct bpf_trampoline *tr; 20518 int ret; 20519 u64 key; 20520 20521 if (prog->type == BPF_PROG_TYPE_SYSCALL) { 20522 if (prog->aux->sleepable) 20523 /* attach_btf_id checked to be zero already */ 20524 return 0; 20525 verbose(env, "Syscall programs can only be sleepable\n"); 20526 return -EINVAL; 20527 } 20528 20529 if (prog->aux->sleepable && !can_be_sleepable(prog)) { 20530 verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); 20531 return -EINVAL; 20532 } 20533 20534 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) 20535 return check_struct_ops_btf_id(env); 20536 20537 if (prog->type != BPF_PROG_TYPE_TRACING && 20538 prog->type != BPF_PROG_TYPE_LSM && 20539 prog->type != BPF_PROG_TYPE_EXT) 20540 return 0; 20541 20542 ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info); 20543 if (ret) 20544 return ret; 20545 20546 if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) { 20547 /* to make freplace equivalent to their targets, they need to 20548 * inherit env->ops and expected_attach_type for the rest of the 20549 * verification 20550 */ 20551 env->ops = bpf_verifier_ops[tgt_prog->type]; 20552 prog->expected_attach_type = tgt_prog->expected_attach_type; 20553 } 20554 20555 /* store info about the attachment target that will be used later */ 20556 prog->aux->attach_func_proto = tgt_info.tgt_type; 20557 prog->aux->attach_func_name = tgt_info.tgt_name; 20558 prog->aux->mod = tgt_info.tgt_mod; 20559 20560 if (tgt_prog) { 20561 prog->aux->saved_dst_prog_type = tgt_prog->type; 20562 prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type; 20563 } 20564 20565 if (prog->expected_attach_type == BPF_TRACE_RAW_TP) { 20566 prog->aux->attach_btf_trace = true; 20567 return 0; 20568 } else if (prog->expected_attach_type == BPF_TRACE_ITER) { 20569 if (!bpf_iter_prog_supported(prog)) 20570 return -EINVAL; 20571 return 0; 20572 } 20573 20574 if (prog->type == BPF_PROG_TYPE_LSM) { 20575 ret = bpf_lsm_verify_prog(&env->log, prog); 20576 if (ret < 0) 20577 return ret; 20578 } else if (prog->type == BPF_PROG_TYPE_TRACING && 20579 btf_id_set_contains(&btf_id_deny, btf_id)) { 20580 return -EINVAL; 20581 } 20582 20583 key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id); 20584 tr = bpf_trampoline_get(key, &tgt_info); 20585 if (!tr) 20586 return -ENOMEM; 20587 20588 if (tgt_prog && tgt_prog->aux->tail_call_reachable) 20589 tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; 20590 20591 prog->aux->dst_trampoline = tr; 20592 return 0; 20593 } 20594 20595 struct btf *bpf_get_btf_vmlinux(void) 20596 { 20597 if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) { 20598 mutex_lock(&bpf_verifier_lock); 20599 if (!btf_vmlinux) 20600 btf_vmlinux = btf_parse_vmlinux(); 20601 mutex_unlock(&bpf_verifier_lock); 20602 } 20603 return btf_vmlinux; 20604 } 20605 20606 int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size) 20607 { 20608 u64 start_time = ktime_get_ns(); 20609 struct bpf_verifier_env *env; 20610 int i, len, ret = -EINVAL, err; 20611 u32 log_true_size; 20612 bool is_priv; 20613 20614 /* no program is valid */ 20615 if (ARRAY_SIZE(bpf_verifier_ops) == 0) 20616 return -EINVAL; 20617 20618 /* 'struct bpf_verifier_env' can be global, but since it's not small, 20619 * allocate/free it every time bpf_check() is called 20620 */ 20621 env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); 20622 if (!env) 20623 return -ENOMEM; 20624 20625 env->bt.env = env; 20626 20627 len = (*prog)->len; 20628 env->insn_aux_data = 20629 vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); 20630 ret = -ENOMEM; 20631 if (!env->insn_aux_data) 20632 goto err_free_env; 20633 for (i = 0; i < len; i++) 20634 env->insn_aux_data[i].orig_idx = i; 20635 env->prog = *prog; 20636 env->ops = bpf_verifier_ops[env->prog->type]; 20637 env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel); 20638 is_priv = bpf_capable(); 20639 20640 bpf_get_btf_vmlinux(); 20641 20642 /* grab the mutex to protect few globals used by verifier */ 20643 if (!is_priv) 20644 mutex_lock(&bpf_verifier_lock); 20645 20646 /* user could have requested verbose verifier output 20647 * and supplied buffer to store the verification trace 20648 */ 20649 ret = bpf_vlog_init(&env->log, attr->log_level, 20650 (char __user *) (unsigned long) attr->log_buf, 20651 attr->log_size); 20652 if (ret) 20653 goto err_unlock; 20654 20655 mark_verifier_state_clean(env); 20656 20657 if (IS_ERR(btf_vmlinux)) { 20658 /* Either gcc or pahole or kernel are broken. */ 20659 verbose(env, "in-kernel BTF is malformed\n"); 20660 ret = PTR_ERR(btf_vmlinux); 20661 goto skip_full_check; 20662 } 20663 20664 env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); 20665 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 20666 env->strict_alignment = true; 20667 if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) 20668 env->strict_alignment = false; 20669 20670 env->allow_ptr_leaks = bpf_allow_ptr_leaks(); 20671 env->allow_uninit_stack = bpf_allow_uninit_stack(); 20672 env->bypass_spec_v1 = bpf_bypass_spec_v1(); 20673 env->bypass_spec_v4 = bpf_bypass_spec_v4(); 20674 env->bpf_capable = bpf_capable(); 20675 20676 if (is_priv) 20677 env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 20678 20679 env->explored_states = kvcalloc(state_htab_size(env), 20680 sizeof(struct bpf_verifier_state_list *), 20681 GFP_USER); 20682 ret = -ENOMEM; 20683 if (!env->explored_states) 20684 goto skip_full_check; 20685 20686 ret = check_btf_info_early(env, attr, uattr); 20687 if (ret < 0) 20688 goto skip_full_check; 20689 20690 ret = add_subprog_and_kfunc(env); 20691 if (ret < 0) 20692 goto skip_full_check; 20693 20694 ret = check_subprogs(env); 20695 if (ret < 0) 20696 goto skip_full_check; 20697 20698 ret = check_btf_info(env, attr, uattr); 20699 if (ret < 0) 20700 goto skip_full_check; 20701 20702 ret = check_attach_btf_id(env); 20703 if (ret) 20704 goto skip_full_check; 20705 20706 ret = resolve_pseudo_ldimm64(env); 20707 if (ret < 0) 20708 goto skip_full_check; 20709 20710 if (bpf_prog_is_offloaded(env->prog->aux)) { 20711 ret = bpf_prog_offload_verifier_prep(env->prog); 20712 if (ret) 20713 goto skip_full_check; 20714 } 20715 20716 ret = check_cfg(env); 20717 if (ret < 0) 20718 goto skip_full_check; 20719 20720 ret = do_check_subprogs(env); 20721 ret = ret ?: do_check_main(env); 20722 20723 if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux)) 20724 ret = bpf_prog_offload_finalize(env); 20725 20726 skip_full_check: 20727 kvfree(env->explored_states); 20728 20729 if (ret == 0) 20730 ret = check_max_stack_depth(env); 20731 20732 /* instruction rewrites happen after this point */ 20733 if (ret == 0) 20734 ret = optimize_bpf_loop(env); 20735 20736 if (is_priv) { 20737 if (ret == 0) 20738 opt_hard_wire_dead_code_branches(env); 20739 if (ret == 0) 20740 ret = opt_remove_dead_code(env); 20741 if (ret == 0) 20742 ret = opt_remove_nops(env); 20743 } else { 20744 if (ret == 0) 20745 sanitize_dead_code(env); 20746 } 20747 20748 if (ret == 0) 20749 /* program is valid, convert *(u32*)(ctx + off) accesses */ 20750 ret = convert_ctx_accesses(env); 20751 20752 if (ret == 0) 20753 ret = do_misc_fixups(env); 20754 20755 /* do 32-bit optimization after insn patching has done so those patched 20756 * insns could be handled correctly. 20757 */ 20758 if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) { 20759 ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); 20760 env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret 20761 : false; 20762 } 20763 20764 if (ret == 0) 20765 ret = fixup_call_args(env); 20766 20767 env->verification_time = ktime_get_ns() - start_time; 20768 print_verification_stats(env); 20769 env->prog->aux->verified_insns = env->insn_processed; 20770 20771 /* preserve original error even if log finalization is successful */ 20772 err = bpf_vlog_finalize(&env->log, &log_true_size); 20773 if (err) 20774 ret = err; 20775 20776 if (uattr_size >= offsetofend(union bpf_attr, log_true_size) && 20777 copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, log_true_size), 20778 &log_true_size, sizeof(log_true_size))) { 20779 ret = -EFAULT; 20780 goto err_release_maps; 20781 } 20782 20783 if (ret) 20784 goto err_release_maps; 20785 20786 if (env->used_map_cnt) { 20787 /* if program passed verifier, update used_maps in bpf_prog_info */ 20788 env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, 20789 sizeof(env->used_maps[0]), 20790 GFP_KERNEL); 20791 20792 if (!env->prog->aux->used_maps) { 20793 ret = -ENOMEM; 20794 goto err_release_maps; 20795 } 20796 20797 memcpy(env->prog->aux->used_maps, env->used_maps, 20798 sizeof(env->used_maps[0]) * env->used_map_cnt); 20799 env->prog->aux->used_map_cnt = env->used_map_cnt; 20800 } 20801 if (env->used_btf_cnt) { 20802 /* if program passed verifier, update used_btfs in bpf_prog_aux */ 20803 env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt, 20804 sizeof(env->used_btfs[0]), 20805 GFP_KERNEL); 20806 if (!env->prog->aux->used_btfs) { 20807 ret = -ENOMEM; 20808 goto err_release_maps; 20809 } 20810 20811 memcpy(env->prog->aux->used_btfs, env->used_btfs, 20812 sizeof(env->used_btfs[0]) * env->used_btf_cnt); 20813 env->prog->aux->used_btf_cnt = env->used_btf_cnt; 20814 } 20815 if (env->used_map_cnt || env->used_btf_cnt) { 20816 /* program is valid. Convert pseudo bpf_ld_imm64 into generic 20817 * bpf_ld_imm64 instructions 20818 */ 20819 convert_pseudo_ld_imm64(env); 20820 } 20821 20822 adjust_btf_func(env); 20823 20824 err_release_maps: 20825 if (!env->prog->aux->used_maps) 20826 /* if we didn't copy map pointers into bpf_prog_info, release 20827 * them now. Otherwise free_used_maps() will release them. 20828 */ 20829 release_maps(env); 20830 if (!env->prog->aux->used_btfs) 20831 release_btfs(env); 20832 20833 /* extension progs temporarily inherit the attach_type of their targets 20834 for verification purposes, so set it back to zero before returning 20835 */ 20836 if (env->prog->type == BPF_PROG_TYPE_EXT) 20837 env->prog->expected_attach_type = 0; 20838 20839 *prog = env->prog; 20840 err_unlock: 20841 if (!is_priv) 20842 mutex_unlock(&bpf_verifier_lock); 20843 vfree(env->insn_aux_data); 20844 err_free_env: 20845 kfree(env); 20846 return ret; 20847 } 20848