1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_VERIFIER_H 5 #define _LINUX_BPF_VERIFIER_H 1 6 7 #include <linux/bpf.h> /* for enum bpf_reg_type */ 8 #include <linux/btf.h> /* for struct btf and btf_id() */ 9 #include <linux/filter.h> /* for MAX_BPF_STACK */ 10 #include <linux/tnum.h> 11 12 /* Maximum variable offset umax_value permitted when resolving memory accesses. 13 * In practice this is far bigger than any realistic pointer offset; this limit 14 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 15 */ 16 #define BPF_MAX_VAR_OFF (1 << 29) 17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 18 * that converting umax_value to int cannot overflow. 19 */ 20 #define BPF_MAX_VAR_SIZ (1 << 29) 21 22 /* Liveness marks, used for registers and spilled-regs (in stack slots). 23 * Read marks propagate upwards until they find a write mark; they record that 24 * "one of this state's descendants read this reg" (and therefore the reg is 25 * relevant for states_equal() checks). 26 * Write marks collect downwards and do not propagate; they record that "the 27 * straight-line code that reached this state (from its parent) wrote this reg" 28 * (and therefore that reads propagated from this state or its descendants 29 * should not propagate to its parent). 30 * A state with a write mark can receive read marks; it just won't propagate 31 * them to its parent, since the write mark is a property, not of the state, 32 * but of the link between it and its parent. See mark_reg_read() and 33 * mark_stack_slot_read() in kernel/bpf/verifier.c. 34 */ 35 enum bpf_reg_liveness { 36 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 37 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 38 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 39 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 40 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 41 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 42 }; 43 44 struct bpf_reg_state { 45 /* Ordering of fields matters. See states_equal() */ 46 enum bpf_reg_type type; 47 /* Fixed part of pointer offset, pointer types only */ 48 s32 off; 49 union { 50 /* valid when type == PTR_TO_PACKET */ 51 int range; 52 53 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 54 * PTR_TO_MAP_VALUE_OR_NULL 55 */ 56 struct bpf_map *map_ptr; 57 58 /* for PTR_TO_BTF_ID */ 59 struct { 60 struct btf *btf; 61 u32 btf_id; 62 }; 63 64 u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ 65 66 /* Max size from any of the above. */ 67 struct { 68 unsigned long raw1; 69 unsigned long raw2; 70 } raw; 71 }; 72 /* For PTR_TO_PACKET, used to find other pointers with the same variable 73 * offset, so they can share range knowledge. 74 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 75 * came from, when one is tested for != NULL. 76 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation 77 * for the purpose of tracking that it's freed. 78 * For PTR_TO_SOCKET this is used to share which pointers retain the 79 * same reference to the socket, to determine proper reference freeing. 80 */ 81 u32 id; 82 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 83 * from a pointer-cast helper, bpf_sk_fullsock() and 84 * bpf_tcp_sock(). 85 * 86 * Consider the following where "sk" is a reference counted 87 * pointer returned from "sk = bpf_sk_lookup_tcp();": 88 * 89 * 1: sk = bpf_sk_lookup_tcp(); 90 * 2: if (!sk) { return 0; } 91 * 3: fullsock = bpf_sk_fullsock(sk); 92 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 93 * 5: tp = bpf_tcp_sock(fullsock); 94 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 95 * 7: bpf_sk_release(sk); 96 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 97 * 98 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 99 * "tp" ptr should be invalidated also. In order to do that, 100 * the reg holding "fullsock" and "sk" need to remember 101 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 102 * such that the verifier can reset all regs which have 103 * ref_obj_id matching the sk_reg->id. 104 * 105 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 106 * sk_reg->id will stay as NULL-marking purpose only. 107 * After NULL-marking is done, sk_reg->id can be reset to 0. 108 * 109 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 110 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 111 * 112 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 113 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 114 * which is the same as sk_reg->ref_obj_id. 115 * 116 * From the verifier perspective, if sk, fullsock and tp 117 * are not NULL, they are the same ptr with different 118 * reg->type. In particular, bpf_sk_release(tp) is also 119 * allowed and has the same effect as bpf_sk_release(sk). 120 */ 121 u32 ref_obj_id; 122 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 123 * the actual value. 124 * For pointer types, this represents the variable part of the offset 125 * from the pointed-to object, and is shared with all bpf_reg_states 126 * with the same id as us. 127 */ 128 struct tnum var_off; 129 /* Used to determine if any memory access using this register will 130 * result in a bad access. 131 * These refer to the same value as var_off, not necessarily the actual 132 * contents of the register. 133 */ 134 s64 smin_value; /* minimum possible (s64)value */ 135 s64 smax_value; /* maximum possible (s64)value */ 136 u64 umin_value; /* minimum possible (u64)value */ 137 u64 umax_value; /* maximum possible (u64)value */ 138 s32 s32_min_value; /* minimum possible (s32)value */ 139 s32 s32_max_value; /* maximum possible (s32)value */ 140 u32 u32_min_value; /* minimum possible (u32)value */ 141 u32 u32_max_value; /* maximum possible (u32)value */ 142 /* parentage chain for liveness checking */ 143 struct bpf_reg_state *parent; 144 /* Inside the callee two registers can be both PTR_TO_STACK like 145 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 146 * while another to the caller's stack. To differentiate them 'frameno' 147 * is used which is an index in bpf_verifier_state->frame[] array 148 * pointing to bpf_func_state. 149 */ 150 u32 frameno; 151 /* Tracks subreg definition. The stored value is the insn_idx of the 152 * writing insn. This is safe because subreg_def is used before any insn 153 * patching which only happens after main verification finished. 154 */ 155 s32 subreg_def; 156 enum bpf_reg_liveness live; 157 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 158 bool precise; 159 }; 160 161 enum bpf_stack_slot_type { 162 STACK_INVALID, /* nothing was stored in this stack slot */ 163 STACK_SPILL, /* register spilled into stack */ 164 STACK_MISC, /* BPF program wrote some data into this slot */ 165 STACK_ZERO, /* BPF program wrote constant zero */ 166 }; 167 168 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 169 170 struct bpf_stack_state { 171 struct bpf_reg_state spilled_ptr; 172 u8 slot_type[BPF_REG_SIZE]; 173 }; 174 175 struct bpf_reference_state { 176 /* Track each reference created with a unique id, even if the same 177 * instruction creates the reference multiple times (eg, via CALL). 178 */ 179 int id; 180 /* Instruction where the allocation of this reference occurred. This 181 * is used purely to inform the user of a reference leak. 182 */ 183 int insn_idx; 184 }; 185 186 /* state of the program: 187 * type of all registers and stack info 188 */ 189 struct bpf_func_state { 190 struct bpf_reg_state regs[MAX_BPF_REG]; 191 /* index of call instruction that called into this func */ 192 int callsite; 193 /* stack frame number of this function state from pov of 194 * enclosing bpf_verifier_state. 195 * 0 = main function, 1 = first callee. 196 */ 197 u32 frameno; 198 /* subprog number == index within subprog_info 199 * zero == main subprog 200 */ 201 u32 subprogno; 202 203 /* The following fields should be last. See copy_func_state() */ 204 int acquired_refs; 205 struct bpf_reference_state *refs; 206 int allocated_stack; 207 struct bpf_stack_state *stack; 208 }; 209 210 struct bpf_idx_pair { 211 u32 prev_idx; 212 u32 idx; 213 }; 214 215 #define MAX_CALL_FRAMES 8 216 struct bpf_verifier_state { 217 /* call stack tracking */ 218 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 219 struct bpf_verifier_state *parent; 220 /* 221 * 'branches' field is the number of branches left to explore: 222 * 0 - all possible paths from this state reached bpf_exit or 223 * were safely pruned 224 * 1 - at least one path is being explored. 225 * This state hasn't reached bpf_exit 226 * 2 - at least two paths are being explored. 227 * This state is an immediate parent of two children. 228 * One is fallthrough branch with branches==1 and another 229 * state is pushed into stack (to be explored later) also with 230 * branches==1. The parent of this state has branches==1. 231 * The verifier state tree connected via 'parent' pointer looks like: 232 * 1 233 * 1 234 * 2 -> 1 (first 'if' pushed into stack) 235 * 1 236 * 2 -> 1 (second 'if' pushed into stack) 237 * 1 238 * 1 239 * 1 bpf_exit. 240 * 241 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 242 * and the verifier state tree will look: 243 * 1 244 * 1 245 * 2 -> 1 (first 'if' pushed into stack) 246 * 1 247 * 1 -> 1 (second 'if' pushed into stack) 248 * 0 249 * 0 250 * 0 bpf_exit. 251 * After pop_stack() the do_check() will resume at second 'if'. 252 * 253 * If is_state_visited() sees a state with branches > 0 it means 254 * there is a loop. If such state is exactly equal to the current state 255 * it's an infinite loop. Note states_equal() checks for states 256 * equvalency, so two states being 'states_equal' does not mean 257 * infinite loop. The exact comparison is provided by 258 * states_maybe_looping() function. It's a stronger pre-check and 259 * much faster than states_equal(). 260 * 261 * This algorithm may not find all possible infinite loops or 262 * loop iteration count may be too high. 263 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 264 */ 265 u32 branches; 266 u32 insn_idx; 267 u32 curframe; 268 u32 active_spin_lock; 269 bool speculative; 270 271 /* first and last insn idx of this verifier state */ 272 u32 first_insn_idx; 273 u32 last_insn_idx; 274 /* jmp history recorded from first to last. 275 * backtracking is using it to go from last to first. 276 * For most states jmp_history_cnt is [0-3]. 277 * For loops can go up to ~40. 278 */ 279 struct bpf_idx_pair *jmp_history; 280 u32 jmp_history_cnt; 281 }; 282 283 #define bpf_get_spilled_reg(slot, frame) \ 284 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 285 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ 286 ? &frame->stack[slot].spilled_ptr : NULL) 287 288 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 289 #define bpf_for_each_spilled_reg(iter, frame, reg) \ 290 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ 291 iter < frame->allocated_stack / BPF_REG_SIZE; \ 292 iter++, reg = bpf_get_spilled_reg(iter, frame)) 293 294 /* linked list of verifier states used to prune search */ 295 struct bpf_verifier_state_list { 296 struct bpf_verifier_state state; 297 struct bpf_verifier_state_list *next; 298 int miss_cnt, hit_cnt; 299 }; 300 301 /* Possible states for alu_state member. */ 302 #define BPF_ALU_SANITIZE_SRC 1U 303 #define BPF_ALU_SANITIZE_DST 2U 304 #define BPF_ALU_NEG_VALUE (1U << 2) 305 #define BPF_ALU_NON_POINTER (1U << 3) 306 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 307 BPF_ALU_SANITIZE_DST) 308 309 struct bpf_insn_aux_data { 310 union { 311 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 312 unsigned long map_ptr_state; /* pointer/poison value for maps */ 313 s32 call_imm; /* saved imm field of call insn */ 314 u32 alu_limit; /* limit for add/sub register with pointer */ 315 struct { 316 u32 map_index; /* index into used_maps[] */ 317 u32 map_off; /* offset from value base address */ 318 }; 319 struct { 320 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ 321 union { 322 struct { 323 struct btf *btf; 324 u32 btf_id; /* btf_id for struct typed var */ 325 }; 326 u32 mem_size; /* mem_size for non-struct typed var */ 327 }; 328 } btf_var; 329 }; 330 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 331 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 332 int sanitize_stack_off; /* stack slot to be cleared */ 333 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 334 bool zext_dst; /* this insn zero extends dst reg */ 335 u8 alu_state; /* used in combination with alu_limit */ 336 337 /* below fields are initialized once */ 338 unsigned int orig_idx; /* original instruction index */ 339 bool prune_point; 340 }; 341 342 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 343 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ 344 345 #define BPF_VERIFIER_TMP_LOG_SIZE 1024 346 347 struct bpf_verifier_log { 348 u32 level; 349 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 350 char __user *ubuf; 351 u32 len_used; 352 u32 len_total; 353 }; 354 355 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) 356 { 357 return log->len_used >= log->len_total - 1; 358 } 359 360 #define BPF_LOG_LEVEL1 1 361 #define BPF_LOG_LEVEL2 2 362 #define BPF_LOG_STATS 4 363 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 364 #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) 365 #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 366 367 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 368 { 369 return log && 370 ((log->level && log->ubuf && !bpf_verifier_log_full(log)) || 371 log->level == BPF_LOG_KERNEL); 372 } 373 374 #define BPF_MAX_SUBPROGS 256 375 376 struct bpf_subprog_info { 377 /* 'start' has to be the first field otherwise find_subprog() won't work */ 378 u32 start; /* insn idx of function entry point */ 379 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 380 u16 stack_depth; /* max. stack depth used by this function */ 381 bool has_tail_call; 382 bool tail_call_reachable; 383 bool has_ld_abs; 384 }; 385 386 /* single container for all structs 387 * one verifier_env per bpf_check() call 388 */ 389 struct bpf_verifier_env { 390 u32 insn_idx; 391 u32 prev_insn_idx; 392 struct bpf_prog *prog; /* eBPF program being verified */ 393 const struct bpf_verifier_ops *ops; 394 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 395 int stack_size; /* number of states to be processed */ 396 bool strict_alignment; /* perform strict pointer alignment checks */ 397 bool test_state_freq; /* test verifier with different pruning frequency */ 398 struct bpf_verifier_state *cur_state; /* current verifier state */ 399 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 400 struct bpf_verifier_state_list *free_list; 401 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 402 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ 403 u32 used_map_cnt; /* number of used maps */ 404 u32 used_btf_cnt; /* number of used BTF objects */ 405 u32 id_gen; /* used to generate unique reg IDs */ 406 bool allow_ptr_leaks; 407 bool allow_uninit_stack; 408 bool allow_ptr_to_map_access; 409 bool bpf_capable; 410 bool bypass_spec_v1; 411 bool bypass_spec_v4; 412 bool seen_direct_write; 413 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 414 const struct bpf_line_info *prev_linfo; 415 struct bpf_verifier_log log; 416 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 417 struct { 418 int *insn_state; 419 int *insn_stack; 420 int cur_stack; 421 } cfg; 422 u32 pass_cnt; /* number of times do_check() was called */ 423 u32 subprog_cnt; 424 /* number of instructions analyzed by the verifier */ 425 u32 prev_insn_processed, insn_processed; 426 /* number of jmps, calls, exits analyzed so far */ 427 u32 prev_jmps_processed, jmps_processed; 428 /* total verification time */ 429 u64 verification_time; 430 /* maximum number of verifier states kept in 'branching' instructions */ 431 u32 max_states_per_insn; 432 /* total number of allocated verifier states */ 433 u32 total_states; 434 /* some states are freed during program analysis. 435 * this is peak number of states. this number dominates kernel 436 * memory consumption during verification 437 */ 438 u32 peak_states; 439 /* longest register parentage chain walked for liveness marking */ 440 u32 longest_mark_read_walk; 441 }; 442 443 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 444 const char *fmt, va_list args); 445 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 446 const char *fmt, ...); 447 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 448 const char *fmt, ...); 449 450 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 451 { 452 struct bpf_verifier_state *cur = env->cur_state; 453 454 return cur->frame[cur->curframe]; 455 } 456 457 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 458 { 459 return cur_func(env)->regs; 460 } 461 462 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 463 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 464 int insn_idx, int prev_insn_idx); 465 int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 466 void 467 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 468 struct bpf_insn *insn); 469 void 470 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 471 472 int check_ctx_reg(struct bpf_verifier_env *env, 473 const struct bpf_reg_state *reg, int regno); 474 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, 475 u32 regno, u32 mem_size); 476 477 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ 478 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, 479 struct btf *btf, u32 btf_id) 480 { 481 if (tgt_prog) 482 return ((u64)tgt_prog->aux->id << 32) | btf_id; 483 else 484 return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; 485 } 486 487 int bpf_check_attach_target(struct bpf_verifier_log *log, 488 const struct bpf_prog *prog, 489 const struct bpf_prog *tgt_prog, 490 u32 btf_id, 491 struct bpf_attach_target_info *tgt_info); 492 493 #endif /* _LINUX_BPF_VERIFIER_H */ 494