1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 3 */ 4 #ifndef _LINUX_BPF_VERIFIER_H 5 #define _LINUX_BPF_VERIFIER_H 1 6 7 #include <linux/bpf.h> /* for enum bpf_reg_type */ 8 #include <linux/filter.h> /* for MAX_BPF_STACK */ 9 #include <linux/tnum.h> 10 11 /* Maximum variable offset umax_value permitted when resolving memory accesses. 12 * In practice this is far bigger than any realistic pointer offset; this limit 13 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 14 */ 15 #define BPF_MAX_VAR_OFF (1 << 29) 16 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 17 * that converting umax_value to int cannot overflow. 18 */ 19 #define BPF_MAX_VAR_SIZ (1 << 29) 20 21 /* Liveness marks, used for registers and spilled-regs (in stack slots). 22 * Read marks propagate upwards until they find a write mark; they record that 23 * "one of this state's descendants read this reg" (and therefore the reg is 24 * relevant for states_equal() checks). 25 * Write marks collect downwards and do not propagate; they record that "the 26 * straight-line code that reached this state (from its parent) wrote this reg" 27 * (and therefore that reads propagated from this state or its descendants 28 * should not propagate to its parent). 29 * A state with a write mark can receive read marks; it just won't propagate 30 * them to its parent, since the write mark is a property, not of the state, 31 * but of the link between it and its parent. See mark_reg_read() and 32 * mark_stack_slot_read() in kernel/bpf/verifier.c. 33 */ 34 enum bpf_reg_liveness { 35 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 36 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ 37 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ 38 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, 39 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ 40 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ 41 }; 42 43 struct bpf_reg_state { 44 /* Ordering of fields matters. See states_equal() */ 45 enum bpf_reg_type type; 46 union { 47 /* valid when type == PTR_TO_PACKET */ 48 u16 range; 49 50 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | 51 * PTR_TO_MAP_VALUE_OR_NULL 52 */ 53 struct bpf_map *map_ptr; 54 55 u32 btf_id; /* for PTR_TO_BTF_ID */ 56 57 /* Max size from any of the above. */ 58 unsigned long raw; 59 }; 60 /* Fixed part of pointer offset, pointer types only */ 61 s32 off; 62 /* For PTR_TO_PACKET, used to find other pointers with the same variable 63 * offset, so they can share range knowledge. 64 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we 65 * came from, when one is tested for != NULL. 66 * For PTR_TO_SOCKET this is used to share which pointers retain the 67 * same reference to the socket, to determine proper reference freeing. 68 */ 69 u32 id; 70 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned 71 * from a pointer-cast helper, bpf_sk_fullsock() and 72 * bpf_tcp_sock(). 73 * 74 * Consider the following where "sk" is a reference counted 75 * pointer returned from "sk = bpf_sk_lookup_tcp();": 76 * 77 * 1: sk = bpf_sk_lookup_tcp(); 78 * 2: if (!sk) { return 0; } 79 * 3: fullsock = bpf_sk_fullsock(sk); 80 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } 81 * 5: tp = bpf_tcp_sock(fullsock); 82 * 6: if (!tp) { bpf_sk_release(sk); return 0; } 83 * 7: bpf_sk_release(sk); 84 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain 85 * 86 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and 87 * "tp" ptr should be invalidated also. In order to do that, 88 * the reg holding "fullsock" and "sk" need to remember 89 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id 90 * such that the verifier can reset all regs which have 91 * ref_obj_id matching the sk_reg->id. 92 * 93 * sk_reg->ref_obj_id is set to sk_reg->id at line 1. 94 * sk_reg->id will stay as NULL-marking purpose only. 95 * After NULL-marking is done, sk_reg->id can be reset to 0. 96 * 97 * After "fullsock = bpf_sk_fullsock(sk);" at line 3, 98 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. 99 * 100 * After "tp = bpf_tcp_sock(fullsock);" at line 5, 101 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id 102 * which is the same as sk_reg->ref_obj_id. 103 * 104 * From the verifier perspective, if sk, fullsock and tp 105 * are not NULL, they are the same ptr with different 106 * reg->type. In particular, bpf_sk_release(tp) is also 107 * allowed and has the same effect as bpf_sk_release(sk). 108 */ 109 u32 ref_obj_id; 110 /* For scalar types (SCALAR_VALUE), this represents our knowledge of 111 * the actual value. 112 * For pointer types, this represents the variable part of the offset 113 * from the pointed-to object, and is shared with all bpf_reg_states 114 * with the same id as us. 115 */ 116 struct tnum var_off; 117 /* Used to determine if any memory access using this register will 118 * result in a bad access. 119 * These refer to the same value as var_off, not necessarily the actual 120 * contents of the register. 121 */ 122 s64 smin_value; /* minimum possible (s64)value */ 123 s64 smax_value; /* maximum possible (s64)value */ 124 u64 umin_value; /* minimum possible (u64)value */ 125 u64 umax_value; /* maximum possible (u64)value */ 126 s32 s32_min_value; /* minimum possible (s32)value */ 127 s32 s32_max_value; /* maximum possible (s32)value */ 128 u32 u32_min_value; /* minimum possible (u32)value */ 129 u32 u32_max_value; /* maximum possible (u32)value */ 130 /* parentage chain for liveness checking */ 131 struct bpf_reg_state *parent; 132 /* Inside the callee two registers can be both PTR_TO_STACK like 133 * R1=fp-8 and R2=fp-8, but one of them points to this function stack 134 * while another to the caller's stack. To differentiate them 'frameno' 135 * is used which is an index in bpf_verifier_state->frame[] array 136 * pointing to bpf_func_state. 137 */ 138 u32 frameno; 139 /* Tracks subreg definition. The stored value is the insn_idx of the 140 * writing insn. This is safe because subreg_def is used before any insn 141 * patching which only happens after main verification finished. 142 */ 143 s32 subreg_def; 144 enum bpf_reg_liveness live; 145 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ 146 bool precise; 147 }; 148 149 enum bpf_stack_slot_type { 150 STACK_INVALID, /* nothing was stored in this stack slot */ 151 STACK_SPILL, /* register spilled into stack */ 152 STACK_MISC, /* BPF program wrote some data into this slot */ 153 STACK_ZERO, /* BPF program wrote constant zero */ 154 }; 155 156 #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ 157 158 struct bpf_stack_state { 159 struct bpf_reg_state spilled_ptr; 160 u8 slot_type[BPF_REG_SIZE]; 161 }; 162 163 struct bpf_reference_state { 164 /* Track each reference created with a unique id, even if the same 165 * instruction creates the reference multiple times (eg, via CALL). 166 */ 167 int id; 168 /* Instruction where the allocation of this reference occurred. This 169 * is used purely to inform the user of a reference leak. 170 */ 171 int insn_idx; 172 }; 173 174 /* state of the program: 175 * type of all registers and stack info 176 */ 177 struct bpf_func_state { 178 struct bpf_reg_state regs[MAX_BPF_REG]; 179 /* index of call instruction that called into this func */ 180 int callsite; 181 /* stack frame number of this function state from pov of 182 * enclosing bpf_verifier_state. 183 * 0 = main function, 1 = first callee. 184 */ 185 u32 frameno; 186 /* subprog number == index within subprog_stack_depth 187 * zero == main subprog 188 */ 189 u32 subprogno; 190 191 /* The following fields should be last. See copy_func_state() */ 192 int acquired_refs; 193 struct bpf_reference_state *refs; 194 int allocated_stack; 195 struct bpf_stack_state *stack; 196 }; 197 198 struct bpf_idx_pair { 199 u32 prev_idx; 200 u32 idx; 201 }; 202 203 #define MAX_CALL_FRAMES 8 204 struct bpf_verifier_state { 205 /* call stack tracking */ 206 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 207 struct bpf_verifier_state *parent; 208 /* 209 * 'branches' field is the number of branches left to explore: 210 * 0 - all possible paths from this state reached bpf_exit or 211 * were safely pruned 212 * 1 - at least one path is being explored. 213 * This state hasn't reached bpf_exit 214 * 2 - at least two paths are being explored. 215 * This state is an immediate parent of two children. 216 * One is fallthrough branch with branches==1 and another 217 * state is pushed into stack (to be explored later) also with 218 * branches==1. The parent of this state has branches==1. 219 * The verifier state tree connected via 'parent' pointer looks like: 220 * 1 221 * 1 222 * 2 -> 1 (first 'if' pushed into stack) 223 * 1 224 * 2 -> 1 (second 'if' pushed into stack) 225 * 1 226 * 1 227 * 1 bpf_exit. 228 * 229 * Once do_check() reaches bpf_exit, it calls update_branch_counts() 230 * and the verifier state tree will look: 231 * 1 232 * 1 233 * 2 -> 1 (first 'if' pushed into stack) 234 * 1 235 * 1 -> 1 (second 'if' pushed into stack) 236 * 0 237 * 0 238 * 0 bpf_exit. 239 * After pop_stack() the do_check() will resume at second 'if'. 240 * 241 * If is_state_visited() sees a state with branches > 0 it means 242 * there is a loop. If such state is exactly equal to the current state 243 * it's an infinite loop. Note states_equal() checks for states 244 * equvalency, so two states being 'states_equal' does not mean 245 * infinite loop. The exact comparison is provided by 246 * states_maybe_looping() function. It's a stronger pre-check and 247 * much faster than states_equal(). 248 * 249 * This algorithm may not find all possible infinite loops or 250 * loop iteration count may be too high. 251 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. 252 */ 253 u32 branches; 254 u32 insn_idx; 255 u32 curframe; 256 u32 active_spin_lock; 257 bool speculative; 258 259 /* first and last insn idx of this verifier state */ 260 u32 first_insn_idx; 261 u32 last_insn_idx; 262 /* jmp history recorded from first to last. 263 * backtracking is using it to go from last to first. 264 * For most states jmp_history_cnt is [0-3]. 265 * For loops can go up to ~40. 266 */ 267 struct bpf_idx_pair *jmp_history; 268 u32 jmp_history_cnt; 269 }; 270 271 #define bpf_get_spilled_reg(slot, frame) \ 272 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ 273 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ 274 ? &frame->stack[slot].spilled_ptr : NULL) 275 276 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ 277 #define bpf_for_each_spilled_reg(iter, frame, reg) \ 278 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ 279 iter < frame->allocated_stack / BPF_REG_SIZE; \ 280 iter++, reg = bpf_get_spilled_reg(iter, frame)) 281 282 /* linked list of verifier states used to prune search */ 283 struct bpf_verifier_state_list { 284 struct bpf_verifier_state state; 285 struct bpf_verifier_state_list *next; 286 int miss_cnt, hit_cnt; 287 }; 288 289 /* Possible states for alu_state member. */ 290 #define BPF_ALU_SANITIZE_SRC 1U 291 #define BPF_ALU_SANITIZE_DST 2U 292 #define BPF_ALU_NEG_VALUE (1U << 2) 293 #define BPF_ALU_NON_POINTER (1U << 3) 294 #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ 295 BPF_ALU_SANITIZE_DST) 296 297 struct bpf_insn_aux_data { 298 union { 299 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 300 unsigned long map_ptr_state; /* pointer/poison value for maps */ 301 s32 call_imm; /* saved imm field of call insn */ 302 u32 alu_limit; /* limit for add/sub register with pointer */ 303 struct { 304 u32 map_index; /* index into used_maps[] */ 305 u32 map_off; /* offset from value base address */ 306 }; 307 }; 308 u64 map_key_state; /* constant (32 bit) key tracking for maps */ 309 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 310 int sanitize_stack_off; /* stack slot to be cleared */ 311 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ 312 bool zext_dst; /* this insn zero extends dst reg */ 313 u8 alu_state; /* used in combination with alu_limit */ 314 315 /* below fields are initialized once */ 316 unsigned int orig_idx; /* original instruction index */ 317 bool prune_point; 318 }; 319 320 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 321 322 #define BPF_VERIFIER_TMP_LOG_SIZE 1024 323 324 struct bpf_verifier_log { 325 u32 level; 326 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; 327 char __user *ubuf; 328 u32 len_used; 329 u32 len_total; 330 }; 331 332 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) 333 { 334 return log->len_used >= log->len_total - 1; 335 } 336 337 #define BPF_LOG_LEVEL1 1 338 #define BPF_LOG_LEVEL2 2 339 #define BPF_LOG_STATS 4 340 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) 341 #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) 342 #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ 343 344 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) 345 { 346 return (log->level && log->ubuf && !bpf_verifier_log_full(log)) || 347 log->level == BPF_LOG_KERNEL; 348 } 349 350 #define BPF_MAX_SUBPROGS 256 351 352 struct bpf_subprog_info { 353 /* 'start' has to be the first field otherwise find_subprog() won't work */ 354 u32 start; /* insn idx of function entry point */ 355 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ 356 u16 stack_depth; /* max. stack depth used by this function */ 357 }; 358 359 /* single container for all structs 360 * one verifier_env per bpf_check() call 361 */ 362 struct bpf_verifier_env { 363 u32 insn_idx; 364 u32 prev_insn_idx; 365 struct bpf_prog *prog; /* eBPF program being verified */ 366 const struct bpf_verifier_ops *ops; 367 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 368 int stack_size; /* number of states to be processed */ 369 bool strict_alignment; /* perform strict pointer alignment checks */ 370 bool test_state_freq; /* test verifier with different pruning frequency */ 371 struct bpf_verifier_state *cur_state; /* current verifier state */ 372 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 373 struct bpf_verifier_state_list *free_list; 374 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 375 u32 used_map_cnt; /* number of used maps */ 376 u32 id_gen; /* used to generate unique reg IDs */ 377 bool allow_ptr_leaks; 378 bool seen_direct_write; 379 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 380 const struct bpf_line_info *prev_linfo; 381 struct bpf_verifier_log log; 382 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; 383 struct { 384 int *insn_state; 385 int *insn_stack; 386 int cur_stack; 387 } cfg; 388 u32 pass_cnt; /* number of times do_check() was called */ 389 u32 subprog_cnt; 390 /* number of instructions analyzed by the verifier */ 391 u32 prev_insn_processed, insn_processed; 392 /* number of jmps, calls, exits analyzed so far */ 393 u32 prev_jmps_processed, jmps_processed; 394 /* total verification time */ 395 u64 verification_time; 396 /* maximum number of verifier states kept in 'branching' instructions */ 397 u32 max_states_per_insn; 398 /* total number of allocated verifier states */ 399 u32 total_states; 400 /* some states are freed during program analysis. 401 * this is peak number of states. this number dominates kernel 402 * memory consumption during verification 403 */ 404 u32 peak_states; 405 /* longest register parentage chain walked for liveness marking */ 406 u32 longest_mark_read_walk; 407 }; 408 409 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, 410 const char *fmt, va_list args); 411 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, 412 const char *fmt, ...); 413 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, 414 const char *fmt, ...); 415 416 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) 417 { 418 struct bpf_verifier_state *cur = env->cur_state; 419 420 return cur->frame[cur->curframe]; 421 } 422 423 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) 424 { 425 return cur_func(env)->regs; 426 } 427 428 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); 429 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, 430 int insn_idx, int prev_insn_idx); 431 int bpf_prog_offload_finalize(struct bpf_verifier_env *env); 432 void 433 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, 434 struct bpf_insn *insn); 435 void 436 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); 437 438 int check_ctx_reg(struct bpf_verifier_env *env, 439 const struct bpf_reg_state *reg, int regno); 440 441 #endif /* _LINUX_BPF_VERIFIER_H */ 442