125763b3cSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
258e2af8bSJakub Kicinski /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
358e2af8bSJakub Kicinski */
458e2af8bSJakub Kicinski #ifndef _LINUX_BPF_VERIFIER_H
558e2af8bSJakub Kicinski #define _LINUX_BPF_VERIFIER_H 1
658e2af8bSJakub Kicinski
758e2af8bSJakub Kicinski #include <linux/bpf.h> /* for enum bpf_reg_type */
822dc4a0fSAndrii Nakryiko #include <linux/btf.h> /* for struct btf and btf_id() */
958e2af8bSJakub Kicinski #include <linux/filter.h> /* for MAX_BPF_STACK */
10f1174f77SEdward Cree #include <linux/tnum.h>
1158e2af8bSJakub Kicinski
12b03c9f9fSEdward Cree /* Maximum variable offset umax_value permitted when resolving memory accesses.
13b03c9f9fSEdward Cree * In practice this is far bigger than any realistic pointer offset; this limit
14b03c9f9fSEdward Cree * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
1548461135SJosef Bacik */
16bb7f0f98SAlexei Starovoitov #define BPF_MAX_VAR_OFF (1 << 29)
17b03c9f9fSEdward Cree /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
18b03c9f9fSEdward Cree * that converting umax_value to int cannot overflow.
19b03c9f9fSEdward Cree */
20bb7f0f98SAlexei Starovoitov #define BPF_MAX_VAR_SIZ (1 << 29)
21d9439c21SAndrii Nakryiko /* size of tmp_str_buf in bpf_verifier.
22d9439c21SAndrii Nakryiko * we need at least 306 bytes to fit full stack mask representation
23d9439c21SAndrii Nakryiko * (in the "-8,-16,...,-512" form)
24d9439c21SAndrii Nakryiko */
25d9439c21SAndrii Nakryiko #define TMP_STR_BUF_LEN 320
266f606ffdSMartin KaFai Lau /* Patch buffer size */
27940ce73bSMartin KaFai Lau #define INSN_BUF_SIZE 32
2848461135SJosef Bacik
298e9cd9ceSEdward Cree /* Liveness marks, used for registers and spilled-regs (in stack slots).
308e9cd9ceSEdward Cree * Read marks propagate upwards until they find a write mark; they record that
318e9cd9ceSEdward Cree * "one of this state's descendants read this reg" (and therefore the reg is
328e9cd9ceSEdward Cree * relevant for states_equal() checks).
338e9cd9ceSEdward Cree * Write marks collect downwards and do not propagate; they record that "the
348e9cd9ceSEdward Cree * straight-line code that reached this state (from its parent) wrote this reg"
358e9cd9ceSEdward Cree * (and therefore that reads propagated from this state or its descendants
368e9cd9ceSEdward Cree * should not propagate to its parent).
378e9cd9ceSEdward Cree * A state with a write mark can receive read marks; it just won't propagate
388e9cd9ceSEdward Cree * them to its parent, since the write mark is a property, not of the state,
398e9cd9ceSEdward Cree * but of the link between it and its parent. See mark_reg_read() and
408e9cd9ceSEdward Cree * mark_stack_slot_read() in kernel/bpf/verifier.c.
418e9cd9ceSEdward Cree */
42dc503a8aSEdward Cree enum bpf_reg_liveness {
43dc503a8aSEdward Cree REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
445327ed3dSJiong Wang REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
455327ed3dSJiong Wang REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
465327ed3dSJiong Wang REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
475327ed3dSJiong Wang REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
485327ed3dSJiong Wang REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
49dc503a8aSEdward Cree };
50dc503a8aSEdward Cree
51215bf496SAndrii Nakryiko #define ITER_PREFIX "bpf_iter_"
52215bf496SAndrii Nakryiko
5306accc87SAndrii Nakryiko enum bpf_iter_state {
5406accc87SAndrii Nakryiko BPF_ITER_STATE_INVALID, /* for non-first slot */
5506accc87SAndrii Nakryiko BPF_ITER_STATE_ACTIVE,
5606accc87SAndrii Nakryiko BPF_ITER_STATE_DRAINED,
5706accc87SAndrii Nakryiko };
5806accc87SAndrii Nakryiko
5958e2af8bSJakub Kicinski struct bpf_reg_state {
60679c782dSEdward Cree /* Ordering of fields matters. See states_equal() */
6158e2af8bSJakub Kicinski enum bpf_reg_type type;
6298d7ca37SAlexei Starovoitov /*
6398d7ca37SAlexei Starovoitov * Fixed part of pointer offset, pointer types only.
6498d7ca37SAlexei Starovoitov * Or constant delta between "linked" scalars with the same ID.
6598d7ca37SAlexei Starovoitov */
6622dc4a0fSAndrii Nakryiko s32 off;
6758e2af8bSJakub Kicinski union {
68f1174f77SEdward Cree /* valid when type == PTR_TO_PACKET */
696d94e741SAlexei Starovoitov int range;
7058e2af8bSJakub Kicinski
7158e2af8bSJakub Kicinski /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
7258e2af8bSJakub Kicinski * PTR_TO_MAP_VALUE_OR_NULL
7358e2af8bSJakub Kicinski */
743e8ce298SAlexei Starovoitov struct {
7558e2af8bSJakub Kicinski struct bpf_map *map_ptr;
763e8ce298SAlexei Starovoitov /* To distinguish map lookups from outer map
773e8ce298SAlexei Starovoitov * the map_uid is non-zero for registers
783e8ce298SAlexei Starovoitov * pointing to inner maps.
793e8ce298SAlexei Starovoitov */
803e8ce298SAlexei Starovoitov u32 map_uid;
813e8ce298SAlexei Starovoitov };
820962590eSDaniel Borkmann
8322dc4a0fSAndrii Nakryiko /* for PTR_TO_BTF_ID */
8422dc4a0fSAndrii Nakryiko struct {
8522dc4a0fSAndrii Nakryiko struct btf *btf;
8622dc4a0fSAndrii Nakryiko u32 btf_id;
8722dc4a0fSAndrii Nakryiko };
889e15db66SAlexei Starovoitov
89f8064ab9SKumar Kartikeya Dwivedi struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
90f8064ab9SKumar Kartikeya Dwivedi u32 mem_size;
91f8064ab9SKumar Kartikeya Dwivedi u32 dynptr_id; /* for dynptr slices */
92f8064ab9SKumar Kartikeya Dwivedi };
93457f4436SAndrii Nakryiko
9497e03f52SJoanne Koong /* For dynptr stack slots */
9597e03f52SJoanne Koong struct {
9697e03f52SJoanne Koong enum bpf_dynptr_type type;
9797e03f52SJoanne Koong /* A dynptr is 16 bytes so it takes up 2 stack slots.
9897e03f52SJoanne Koong * We need to track which slot is the first slot
9997e03f52SJoanne Koong * to protect against cases where the user may try to
10097e03f52SJoanne Koong * pass in an address starting at the second slot of the
10197e03f52SJoanne Koong * dynptr.
10297e03f52SJoanne Koong */
10397e03f52SJoanne Koong bool first_slot;
10497e03f52SJoanne Koong } dynptr;
10597e03f52SJoanne Koong
10606accc87SAndrii Nakryiko /* For bpf_iter stack slots */
10706accc87SAndrii Nakryiko struct {
10806accc87SAndrii Nakryiko /* BTF container and BTF type ID describing
10906accc87SAndrii Nakryiko * struct bpf_iter_<type> of an iterator state
11006accc87SAndrii Nakryiko */
11106accc87SAndrii Nakryiko struct btf *btf;
11206accc87SAndrii Nakryiko u32 btf_id;
11306accc87SAndrii Nakryiko /* packing following two fields to fit iter state into 16 bytes */
11406accc87SAndrii Nakryiko enum bpf_iter_state state:2;
11506accc87SAndrii Nakryiko int depth:30;
11606accc87SAndrii Nakryiko } iter;
11706accc87SAndrii Nakryiko
1180de20461SKumar Kartikeya Dwivedi /* For irq stack slots */
1190de20461SKumar Kartikeya Dwivedi struct {
1200de20461SKumar Kartikeya Dwivedi enum {
1210de20461SKumar Kartikeya Dwivedi IRQ_NATIVE_KFUNC,
1220de20461SKumar Kartikeya Dwivedi IRQ_LOCK_KFUNC,
1230de20461SKumar Kartikeya Dwivedi } kfunc_class;
1240de20461SKumar Kartikeya Dwivedi } irq;
1250de20461SKumar Kartikeya Dwivedi
1260962590eSDaniel Borkmann /* Max size from any of the above. */
12722dc4a0fSAndrii Nakryiko struct {
12822dc4a0fSAndrii Nakryiko unsigned long raw1;
12922dc4a0fSAndrii Nakryiko unsigned long raw2;
13022dc4a0fSAndrii Nakryiko } raw;
13169c087baSYonghong Song
13269c087baSYonghong Song u32 subprogno; /* for PTR_TO_FUNC */
13358e2af8bSJakub Kicinski };
134a73bf9f2SAndrii Nakryiko /* For scalar types (SCALAR_VALUE), this represents our knowledge of
135a73bf9f2SAndrii Nakryiko * the actual value.
136a73bf9f2SAndrii Nakryiko * For pointer types, this represents the variable part of the offset
137a73bf9f2SAndrii Nakryiko * from the pointed-to object, and is shared with all bpf_reg_states
138a73bf9f2SAndrii Nakryiko * with the same id as us.
139a73bf9f2SAndrii Nakryiko */
140a73bf9f2SAndrii Nakryiko struct tnum var_off;
141a73bf9f2SAndrii Nakryiko /* Used to determine if any memory access using this register will
142a73bf9f2SAndrii Nakryiko * result in a bad access.
143a73bf9f2SAndrii Nakryiko * These refer to the same value as var_off, not necessarily the actual
144a73bf9f2SAndrii Nakryiko * contents of the register.
145a73bf9f2SAndrii Nakryiko */
146a73bf9f2SAndrii Nakryiko s64 smin_value; /* minimum possible (s64)value */
147a73bf9f2SAndrii Nakryiko s64 smax_value; /* maximum possible (s64)value */
148a73bf9f2SAndrii Nakryiko u64 umin_value; /* minimum possible (u64)value */
149a73bf9f2SAndrii Nakryiko u64 umax_value; /* maximum possible (u64)value */
150a73bf9f2SAndrii Nakryiko s32 s32_min_value; /* minimum possible (s32)value */
151a73bf9f2SAndrii Nakryiko s32 s32_max_value; /* maximum possible (s32)value */
152a73bf9f2SAndrii Nakryiko u32 u32_min_value; /* minimum possible (u32)value */
153a73bf9f2SAndrii Nakryiko u32 u32_max_value; /* maximum possible (u32)value */
154f1174f77SEdward Cree /* For PTR_TO_PACKET, used to find other pointers with the same variable
155f1174f77SEdward Cree * offset, so they can share range knowledge.
156f1174f77SEdward Cree * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
157f1174f77SEdward Cree * came from, when one is tested for != NULL.
158457f4436SAndrii Nakryiko * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
159457f4436SAndrii Nakryiko * for the purpose of tracking that it's freed.
160c64b7983SJoe Stringer * For PTR_TO_SOCKET this is used to share which pointers retain the
161c64b7983SJoe Stringer * same reference to the socket, to determine proper reference freeing.
162bc34dee6SJoanne Koong * For stack slots that are dynptrs, this is used to track references to
163bc34dee6SJoanne Koong * the dynptr to determine proper reference freeing.
16406accc87SAndrii Nakryiko * Similarly to dynptrs, we use ID to track "belonging" of a reference
16506accc87SAndrii Nakryiko * to a specific instance of bpf_iter.
166f1174f77SEdward Cree */
16798d7ca37SAlexei Starovoitov /*
16898d7ca37SAlexei Starovoitov * Upper bit of ID is used to remember relationship between "linked"
16998d7ca37SAlexei Starovoitov * registers. Example:
17098d7ca37SAlexei Starovoitov * r1 = r2; both will have r1->id == r2->id == N
17198d7ca37SAlexei Starovoitov * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10
17298d7ca37SAlexei Starovoitov */
17398d7ca37SAlexei Starovoitov #define BPF_ADD_CONST (1U << 31)
174d2a4dd37SAlexei Starovoitov u32 id;
1751b986589SMartin KaFai Lau /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
1761b986589SMartin KaFai Lau * from a pointer-cast helper, bpf_sk_fullsock() and
1771b986589SMartin KaFai Lau * bpf_tcp_sock().
1781b986589SMartin KaFai Lau *
1791b986589SMartin KaFai Lau * Consider the following where "sk" is a reference counted
1801b986589SMartin KaFai Lau * pointer returned from "sk = bpf_sk_lookup_tcp();":
1811b986589SMartin KaFai Lau *
1821b986589SMartin KaFai Lau * 1: sk = bpf_sk_lookup_tcp();
1831b986589SMartin KaFai Lau * 2: if (!sk) { return 0; }
1841b986589SMartin KaFai Lau * 3: fullsock = bpf_sk_fullsock(sk);
1851b986589SMartin KaFai Lau * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
1861b986589SMartin KaFai Lau * 5: tp = bpf_tcp_sock(fullsock);
1871b986589SMartin KaFai Lau * 6: if (!tp) { bpf_sk_release(sk); return 0; }
1881b986589SMartin KaFai Lau * 7: bpf_sk_release(sk);
1891b986589SMartin KaFai Lau * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
1901b986589SMartin KaFai Lau *
1911b986589SMartin KaFai Lau * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
1921b986589SMartin KaFai Lau * "tp" ptr should be invalidated also. In order to do that,
1931b986589SMartin KaFai Lau * the reg holding "fullsock" and "sk" need to remember
1941b986589SMartin KaFai Lau * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
1951b986589SMartin KaFai Lau * such that the verifier can reset all regs which have
1961b986589SMartin KaFai Lau * ref_obj_id matching the sk_reg->id.
1971b986589SMartin KaFai Lau *
1981b986589SMartin KaFai Lau * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
1991b986589SMartin KaFai Lau * sk_reg->id will stay as NULL-marking purpose only.
2001b986589SMartin KaFai Lau * After NULL-marking is done, sk_reg->id can be reset to 0.
2011b986589SMartin KaFai Lau *
2021b986589SMartin KaFai Lau * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
2031b986589SMartin KaFai Lau * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
2041b986589SMartin KaFai Lau *
2051b986589SMartin KaFai Lau * After "tp = bpf_tcp_sock(fullsock);" at line 5,
2061b986589SMartin KaFai Lau * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
2071b986589SMartin KaFai Lau * which is the same as sk_reg->ref_obj_id.
2081b986589SMartin KaFai Lau *
2091b986589SMartin KaFai Lau * From the verifier perspective, if sk, fullsock and tp
2101b986589SMartin KaFai Lau * are not NULL, they are the same ptr with different
2111b986589SMartin KaFai Lau * reg->type. In particular, bpf_sk_release(tp) is also
2121b986589SMartin KaFai Lau * allowed and has the same effect as bpf_sk_release(sk).
2131b986589SMartin KaFai Lau */
2141b986589SMartin KaFai Lau u32 ref_obj_id;
215679c782dSEdward Cree /* parentage chain for liveness checking */
216679c782dSEdward Cree struct bpf_reg_state *parent;
217f4d7e40aSAlexei Starovoitov /* Inside the callee two registers can be both PTR_TO_STACK like
218f4d7e40aSAlexei Starovoitov * R1=fp-8 and R2=fp-8, but one of them points to this function stack
219f4d7e40aSAlexei Starovoitov * while another to the caller's stack. To differentiate them 'frameno'
220f4d7e40aSAlexei Starovoitov * is used which is an index in bpf_verifier_state->frame[] array
221f4d7e40aSAlexei Starovoitov * pointing to bpf_func_state.
222f4d7e40aSAlexei Starovoitov */
223f4d7e40aSAlexei Starovoitov u32 frameno;
2245327ed3dSJiong Wang /* Tracks subreg definition. The stored value is the insn_idx of the
2255327ed3dSJiong Wang * writing insn. This is safe because subreg_def is used before any insn
2265327ed3dSJiong Wang * patching which only happens after main verification finished.
2275327ed3dSJiong Wang */
2285327ed3dSJiong Wang s32 subreg_def;
229dc503a8aSEdward Cree enum bpf_reg_liveness live;
230b5dc0163SAlexei Starovoitov /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
231b5dc0163SAlexei Starovoitov bool precise;
23258e2af8bSJakub Kicinski };
23358e2af8bSJakub Kicinski
23458e2af8bSJakub Kicinski enum bpf_stack_slot_type {
23558e2af8bSJakub Kicinski STACK_INVALID, /* nothing was stored in this stack slot */
23658e2af8bSJakub Kicinski STACK_SPILL, /* register spilled into stack */
237cc2b14d5SAlexei Starovoitov STACK_MISC, /* BPF program wrote some data into this slot */
238cc2b14d5SAlexei Starovoitov STACK_ZERO, /* BPF program wrote constant zero */
23997e03f52SJoanne Koong /* A dynptr is stored in this stack slot. The type of dynptr
24097e03f52SJoanne Koong * is stored in bpf_stack_state->spilled_ptr.dynptr.type
24197e03f52SJoanne Koong */
24297e03f52SJoanne Koong STACK_DYNPTR,
24306accc87SAndrii Nakryiko STACK_ITER,
244c8e2ee1fSKumar Kartikeya Dwivedi STACK_IRQ_FLAG,
24558e2af8bSJakub Kicinski };
24658e2af8bSJakub Kicinski
24758e2af8bSJakub Kicinski #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
24806accc87SAndrii Nakryiko
249407958a0SAndrii Nakryiko #define BPF_REGMASK_ARGS ((1 << BPF_REG_1) | (1 << BPF_REG_2) | \
250407958a0SAndrii Nakryiko (1 << BPF_REG_3) | (1 << BPF_REG_4) | \
251407958a0SAndrii Nakryiko (1 << BPF_REG_5))
252407958a0SAndrii Nakryiko
25397e03f52SJoanne Koong #define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
25497e03f52SJoanne Koong #define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
25558e2af8bSJakub Kicinski
256638f5b90SAlexei Starovoitov struct bpf_stack_state {
257638f5b90SAlexei Starovoitov struct bpf_reg_state spilled_ptr;
258638f5b90SAlexei Starovoitov u8 slot_type[BPF_REG_SIZE];
259638f5b90SAlexei Starovoitov };
260638f5b90SAlexei Starovoitov
261fd978bf7SJoe Stringer struct bpf_reference_state {
262f6b9a69aSKumar Kartikeya Dwivedi /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to
263f6b9a69aSKumar Kartikeya Dwivedi * default to pointer reference on zero initialization of a state.
264f6b9a69aSKumar Kartikeya Dwivedi */
265f6b9a69aSKumar Kartikeya Dwivedi enum ref_state_type {
2660de20461SKumar Kartikeya Dwivedi REF_TYPE_PTR = (1 << 1),
2670de20461SKumar Kartikeya Dwivedi REF_TYPE_IRQ = (1 << 2),
2680de20461SKumar Kartikeya Dwivedi REF_TYPE_LOCK = (1 << 3),
2690de20461SKumar Kartikeya Dwivedi REF_TYPE_RES_LOCK = (1 << 4),
2700de20461SKumar Kartikeya Dwivedi REF_TYPE_RES_LOCK_IRQ = (1 << 5),
271*ea21771cSKumar Kartikeya Dwivedi REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
272f6b9a69aSKumar Kartikeya Dwivedi } type;
273fd978bf7SJoe Stringer /* Track each reference created with a unique id, even if the same
274fd978bf7SJoe Stringer * instruction creates the reference multiple times (eg, via CALL).
275fd978bf7SJoe Stringer */
276fd978bf7SJoe Stringer int id;
277fd978bf7SJoe Stringer /* Instruction where the allocation of this reference occurred. This
278fd978bf7SJoe Stringer * is used purely to inform the user of a reference leak.
279fd978bf7SJoe Stringer */
280fd978bf7SJoe Stringer int insn_idx;
281f6b9a69aSKumar Kartikeya Dwivedi /* Use to keep track of the source object of a lock, to ensure
282f6b9a69aSKumar Kartikeya Dwivedi * it matches on unlock.
283f6b9a69aSKumar Kartikeya Dwivedi */
284f6b9a69aSKumar Kartikeya Dwivedi void *ptr;
285f6b9a69aSKumar Kartikeya Dwivedi };
286fd978bf7SJoe Stringer
2878fa4ecd4SAndrii Nakryiko struct bpf_retval_range {
2888fa4ecd4SAndrii Nakryiko s32 minval;
2898fa4ecd4SAndrii Nakryiko s32 maxval;
2908fa4ecd4SAndrii Nakryiko };
2918fa4ecd4SAndrii Nakryiko
29258e2af8bSJakub Kicinski /* state of the program:
29358e2af8bSJakub Kicinski * type of all registers and stack info
29458e2af8bSJakub Kicinski */
295f4d7e40aSAlexei Starovoitov struct bpf_func_state {
29658e2af8bSJakub Kicinski struct bpf_reg_state regs[MAX_BPF_REG];
297f4d7e40aSAlexei Starovoitov /* index of call instruction that called into this func */
298f4d7e40aSAlexei Starovoitov int callsite;
299f4d7e40aSAlexei Starovoitov /* stack frame number of this function state from pov of
300f4d7e40aSAlexei Starovoitov * enclosing bpf_verifier_state.
301f4d7e40aSAlexei Starovoitov * 0 = main function, 1 = first callee.
302f4d7e40aSAlexei Starovoitov */
303f4d7e40aSAlexei Starovoitov u32 frameno;
30401f810acSAndrei Matei /* subprog number == index within subprog_info
305f4d7e40aSAlexei Starovoitov * zero == main subprog
306f4d7e40aSAlexei Starovoitov */
307f4d7e40aSAlexei Starovoitov u32 subprogno;
308bfc6bb74SAlexei Starovoitov /* Every bpf_timer_start will increment async_entry_cnt.
309bfc6bb74SAlexei Starovoitov * It's used to distinguish:
310bfc6bb74SAlexei Starovoitov * void foo(void) { for(;;); }
311bfc6bb74SAlexei Starovoitov * void foo(void) { bpf_timer_set_callback(,foo); }
312bfc6bb74SAlexei Starovoitov */
313bfc6bb74SAlexei Starovoitov u32 async_entry_cnt;
3148fa4ecd4SAndrii Nakryiko struct bpf_retval_range callback_ret_range;
31545b5623fSAndrii Nakryiko bool in_callback_fn;
316bfc6bb74SAlexei Starovoitov bool in_async_callback_fn;
317b9ae0c9dSKumar Kartikeya Dwivedi bool in_exception_callback_fn;
318bb124da6SEduard Zingerman /* For callback calling functions that limit number of possible
319bb124da6SEduard Zingerman * callback executions (e.g. bpf_loop) keeps track of current
320bb124da6SEduard Zingerman * simulated iteration number.
321bb124da6SEduard Zingerman * Value in frame N refers to number of times callback with frame
322bb124da6SEduard Zingerman * N+1 was simulated, e.g. for the following call:
323bb124da6SEduard Zingerman *
324bb124da6SEduard Zingerman * bpf_loop(..., fn, ...); | suppose current frame is N
325bb124da6SEduard Zingerman * | fn would be simulated in frame N+1
326bb124da6SEduard Zingerman * | number of simulations is tracked in frame N
327bb124da6SEduard Zingerman */
328bb124da6SEduard Zingerman u32 callback_depth;
329f4d7e40aSAlexei Starovoitov
330fd978bf7SJoe Stringer /* The following fields should be last. See copy_func_state() */
33192e1567eSAndrei Matei /* The state of the stack. Each element of the array describes BPF_REG_SIZE
33292e1567eSAndrei Matei * (i.e. 8) bytes worth of stack memory.
33392e1567eSAndrei Matei * stack[0] represents bytes [*(r10-8)..*(r10-1)]
33492e1567eSAndrei Matei * stack[1] represents bytes [*(r10-16)..*(r10-9)]
33592e1567eSAndrei Matei * ...
33692e1567eSAndrei Matei * stack[allocated_stack/8 - 1] represents [*(r10-allocated_stack)..*(r10-allocated_stack+7)]
33792e1567eSAndrei Matei */
338638f5b90SAlexei Starovoitov struct bpf_stack_state *stack;
33992e1567eSAndrei Matei /* Size of the current stack, in bytes. The stack state is tracked below, in
34092e1567eSAndrei Matei * `stack`. allocated_stack is always a multiple of BPF_REG_SIZE.
34192e1567eSAndrei Matei */
34245b5623fSAndrii Nakryiko int allocated_stack;
34358e2af8bSJakub Kicinski };
34458e2af8bSJakub Kicinski
34541f6f64eSAndrii Nakryiko #define MAX_CALL_FRAMES 8
34641f6f64eSAndrii Nakryiko
34796a30e46SAndrii Nakryiko /* instruction history flags, used in bpf_insn_hist_entry.flags field */
34841f6f64eSAndrii Nakryiko enum {
34941f6f64eSAndrii Nakryiko /* instruction references stack slot through PTR_TO_STACK register;
35041f6f64eSAndrii Nakryiko * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
35141f6f64eSAndrii Nakryiko * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
35241f6f64eSAndrii Nakryiko * 8 bytes per slot, so slot index (spi) is [0, 63])
35341f6f64eSAndrii Nakryiko */
35441f6f64eSAndrii Nakryiko INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
35541f6f64eSAndrii Nakryiko
35641f6f64eSAndrii Nakryiko INSN_F_SPI_MASK = 0x3f, /* 6 bits */
35741f6f64eSAndrii Nakryiko INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
35841f6f64eSAndrii Nakryiko
35941f6f64eSAndrii Nakryiko INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
360b5dc0163SAlexei Starovoitov };
361b5dc0163SAlexei Starovoitov
36241f6f64eSAndrii Nakryiko static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
36341f6f64eSAndrii Nakryiko static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
36441f6f64eSAndrii Nakryiko
36596a30e46SAndrii Nakryiko struct bpf_insn_hist_entry {
36641f6f64eSAndrii Nakryiko u32 idx;
36741f6f64eSAndrii Nakryiko /* insn idx can't be bigger than 1 million */
36841f6f64eSAndrii Nakryiko u32 prev_idx : 22;
36941f6f64eSAndrii Nakryiko /* special flags, e.g., whether insn is doing register stack spill/load */
37041f6f64eSAndrii Nakryiko u32 flags : 10;
3714bf79f9bSEduard Zingerman /* additional registers that need precision tracking when this
3724bf79f9bSEduard Zingerman * jump is backtracked, vector of six 10-bit records
3734bf79f9bSEduard Zingerman */
3744bf79f9bSEduard Zingerman u64 linked_regs;
37541f6f64eSAndrii Nakryiko };
37641f6f64eSAndrii Nakryiko
3775dd9cdbcSEduard Zingerman /* Maximum number of register states that can exist at once */
3785dd9cdbcSEduard Zingerman #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
379f4d7e40aSAlexei Starovoitov struct bpf_verifier_state {
380f4d7e40aSAlexei Starovoitov /* call stack tracking */
381f4d7e40aSAlexei Starovoitov struct bpf_func_state *frame[MAX_CALL_FRAMES];
3822589726dSAlexei Starovoitov struct bpf_verifier_state *parent;
3831995edc5SKumar Kartikeya Dwivedi /* Acquired reference states */
3841995edc5SKumar Kartikeya Dwivedi struct bpf_reference_state *refs;
3852589726dSAlexei Starovoitov /*
3862589726dSAlexei Starovoitov * 'branches' field is the number of branches left to explore:
3872589726dSAlexei Starovoitov * 0 - all possible paths from this state reached bpf_exit or
3882589726dSAlexei Starovoitov * were safely pruned
3892589726dSAlexei Starovoitov * 1 - at least one path is being explored.
3902589726dSAlexei Starovoitov * This state hasn't reached bpf_exit
3912589726dSAlexei Starovoitov * 2 - at least two paths are being explored.
3922589726dSAlexei Starovoitov * This state is an immediate parent of two children.
3932589726dSAlexei Starovoitov * One is fallthrough branch with branches==1 and another
3942589726dSAlexei Starovoitov * state is pushed into stack (to be explored later) also with
3952589726dSAlexei Starovoitov * branches==1. The parent of this state has branches==1.
3962589726dSAlexei Starovoitov * The verifier state tree connected via 'parent' pointer looks like:
3972589726dSAlexei Starovoitov * 1
3982589726dSAlexei Starovoitov * 1
3992589726dSAlexei Starovoitov * 2 -> 1 (first 'if' pushed into stack)
4002589726dSAlexei Starovoitov * 1
4012589726dSAlexei Starovoitov * 2 -> 1 (second 'if' pushed into stack)
4022589726dSAlexei Starovoitov * 1
4032589726dSAlexei Starovoitov * 1
4042589726dSAlexei Starovoitov * 1 bpf_exit.
4052589726dSAlexei Starovoitov *
4062589726dSAlexei Starovoitov * Once do_check() reaches bpf_exit, it calls update_branch_counts()
4072589726dSAlexei Starovoitov * and the verifier state tree will look:
4082589726dSAlexei Starovoitov * 1
4092589726dSAlexei Starovoitov * 1
4102589726dSAlexei Starovoitov * 2 -> 1 (first 'if' pushed into stack)
4112589726dSAlexei Starovoitov * 1
4122589726dSAlexei Starovoitov * 1 -> 1 (second 'if' pushed into stack)
4132589726dSAlexei Starovoitov * 0
4142589726dSAlexei Starovoitov * 0
4152589726dSAlexei Starovoitov * 0 bpf_exit.
4162589726dSAlexei Starovoitov * After pop_stack() the do_check() will resume at second 'if'.
4172589726dSAlexei Starovoitov *
4182589726dSAlexei Starovoitov * If is_state_visited() sees a state with branches > 0 it means
4192589726dSAlexei Starovoitov * there is a loop. If such state is exactly equal to the current state
4202589726dSAlexei Starovoitov * it's an infinite loop. Note states_equal() checks for states
4216dbdc9f3SHongyi Lu * equivalency, so two states being 'states_equal' does not mean
4222589726dSAlexei Starovoitov * infinite loop. The exact comparison is provided by
4232589726dSAlexei Starovoitov * states_maybe_looping() function. It's a stronger pre-check and
4242589726dSAlexei Starovoitov * much faster than states_equal().
4252589726dSAlexei Starovoitov *
4262589726dSAlexei Starovoitov * This algorithm may not find all possible infinite loops or
4272589726dSAlexei Starovoitov * loop iteration count may be too high.
4282589726dSAlexei Starovoitov * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
4292589726dSAlexei Starovoitov */
4302589726dSAlexei Starovoitov u32 branches;
431dc2a4ebcSAlexei Starovoitov u32 insn_idx;
432f4d7e40aSAlexei Starovoitov u32 curframe;
4336a3cd331SDave Marchevsky
4341995edc5SKumar Kartikeya Dwivedi u32 acquired_refs;
4351995edc5SKumar Kartikeya Dwivedi u32 active_locks;
4361995edc5SKumar Kartikeya Dwivedi u32 active_preempt_locks;
437c8e2ee1fSKumar Kartikeya Dwivedi u32 active_irq_id;
438*ea21771cSKumar Kartikeya Dwivedi u32 active_lock_id;
439*ea21771cSKumar Kartikeya Dwivedi void *active_lock_ptr;
4409bb00b28SYonghong Song bool active_rcu_lock;
4411995edc5SKumar Kartikeya Dwivedi
4421995edc5SKumar Kartikeya Dwivedi bool speculative;
44381f1d7a5SBenjamin Tissoires bool in_sleepable;
444b5dc0163SAlexei Starovoitov
445b5dc0163SAlexei Starovoitov /* first and last insn idx of this verifier state */
446b5dc0163SAlexei Starovoitov u32 first_insn_idx;
447b5dc0163SAlexei Starovoitov u32 last_insn_idx;
4482a099282SEduard Zingerman /* If this state is a part of states loop this field points to some
4492a099282SEduard Zingerman * parent of this state such that:
4502a099282SEduard Zingerman * - it is also a member of the same states loop;
4512a099282SEduard Zingerman * - DFS states traversal starting from initial state visits loop_entry
4522a099282SEduard Zingerman * state before this state.
4532a099282SEduard Zingerman * Used to compute topmost loop entry for state loops.
4542a099282SEduard Zingerman * State loops might appear because of open coded iterators logic.
4552a099282SEduard Zingerman * See get_loop_entry() for more information.
4562a099282SEduard Zingerman */
4572a099282SEduard Zingerman struct bpf_verifier_state *loop_entry;
45896a30e46SAndrii Nakryiko /* Sub-range of env->insn_hist[] corresponding to this state's
45996a30e46SAndrii Nakryiko * instruction history.
46096a30e46SAndrii Nakryiko * Backtracking is using it to go from last to first.
46196a30e46SAndrii Nakryiko * For most states instruction history is short, 0-3 instructions.
462b5dc0163SAlexei Starovoitov * For loops can go up to ~40.
463b5dc0163SAlexei Starovoitov */
46496a30e46SAndrii Nakryiko u32 insn_hist_start;
46596a30e46SAndrii Nakryiko u32 insn_hist_end;
4662793a8b0SEduard Zingerman u32 dfs_depth;
467ab5cfac1SEduard Zingerman u32 callback_unroll_depth;
468011832b9SAlexei Starovoitov u32 may_goto_depth;
469408fcf94SEduard Zingerman /* If this state was ever pointed-to by other state's loop_entry field
470408fcf94SEduard Zingerman * this flag would be set to true. Used to avoid freeing such states
471408fcf94SEduard Zingerman * while they are still in use.
472408fcf94SEduard Zingerman */
473408fcf94SEduard Zingerman u32 used_as_loop_entry;
474f4d7e40aSAlexei Starovoitov };
475f4d7e40aSAlexei Starovoitov
476dfab99dfSChuyi Zhou #define bpf_get_spilled_reg(slot, frame, mask) \
477f3709f69SJoe Stringer (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
47832f55dd4SMaxim Mikityanskiy ((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
479f3709f69SJoe Stringer ? &frame->stack[slot].spilled_ptr : NULL)
480f3709f69SJoe Stringer
481f3709f69SJoe Stringer /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
482dfab99dfSChuyi Zhou #define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
483dfab99dfSChuyi Zhou for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
484f3709f69SJoe Stringer iter < frame->allocated_stack / BPF_REG_SIZE; \
485dfab99dfSChuyi Zhou iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
486f3709f69SJoe Stringer
487dfab99dfSChuyi Zhou #define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
488b239da34SKumar Kartikeya Dwivedi ({ \
489b239da34SKumar Kartikeya Dwivedi struct bpf_verifier_state *___vstate = __vst; \
490b239da34SKumar Kartikeya Dwivedi int ___i, ___j; \
491b239da34SKumar Kartikeya Dwivedi for (___i = 0; ___i <= ___vstate->curframe; ___i++) { \
492b239da34SKumar Kartikeya Dwivedi struct bpf_reg_state *___regs; \
493b239da34SKumar Kartikeya Dwivedi __state = ___vstate->frame[___i]; \
494b239da34SKumar Kartikeya Dwivedi ___regs = __state->regs; \
495b239da34SKumar Kartikeya Dwivedi for (___j = 0; ___j < MAX_BPF_REG; ___j++) { \
496b239da34SKumar Kartikeya Dwivedi __reg = &___regs[___j]; \
497b239da34SKumar Kartikeya Dwivedi (void)(__expr); \
498b239da34SKumar Kartikeya Dwivedi } \
499dfab99dfSChuyi Zhou bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
500b239da34SKumar Kartikeya Dwivedi if (!__reg) \
501b239da34SKumar Kartikeya Dwivedi continue; \
502b239da34SKumar Kartikeya Dwivedi (void)(__expr); \
503b239da34SKumar Kartikeya Dwivedi } \
504b239da34SKumar Kartikeya Dwivedi } \
505b239da34SKumar Kartikeya Dwivedi })
506b239da34SKumar Kartikeya Dwivedi
507dfab99dfSChuyi Zhou /* Invoke __expr over regsiters in __vst, setting __state and __reg */
508dfab99dfSChuyi Zhou #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
509dfab99dfSChuyi Zhou bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
510dfab99dfSChuyi Zhou
51158e2af8bSJakub Kicinski /* linked list of verifier states used to prune search */
51258e2af8bSJakub Kicinski struct bpf_verifier_state_list {
51358e2af8bSJakub Kicinski struct bpf_verifier_state state;
5145564ee3aSEduard Zingerman struct list_head node;
515408fcf94SEduard Zingerman u32 miss_cnt;
516408fcf94SEduard Zingerman u32 hit_cnt:31;
517408fcf94SEduard Zingerman u32 in_free_list:1;
51858e2af8bSJakub Kicinski };
51958e2af8bSJakub Kicinski
5201ade2371SEduard Zingerman struct bpf_loop_inline_state {
521f16214c1SMatthieu Baerts unsigned int initialized:1; /* set to true upon first entry */
522f16214c1SMatthieu Baerts unsigned int fit_for_inline:1; /* true if callback function is the same
5231ade2371SEduard Zingerman * at each call and flags are always zero
5241ade2371SEduard Zingerman */
5251ade2371SEduard Zingerman u32 callback_subprogno; /* valid when fit_for_inline is true */
5261ade2371SEduard Zingerman };
5271ade2371SEduard Zingerman
5280a525621SPhilo Lu /* pointer and state for maps */
5290a525621SPhilo Lu struct bpf_map_ptr_state {
5300a525621SPhilo Lu struct bpf_map *map_ptr;
5310a525621SPhilo Lu bool poison;
5320a525621SPhilo Lu bool unpriv;
5330a525621SPhilo Lu };
5340a525621SPhilo Lu
535979d63d5SDaniel Borkmann /* Possible states for alu_state member. */
536801c6058SDaniel Borkmann #define BPF_ALU_SANITIZE_SRC (1U << 0)
537801c6058SDaniel Borkmann #define BPF_ALU_SANITIZE_DST (1U << 1)
538979d63d5SDaniel Borkmann #define BPF_ALU_NEG_VALUE (1U << 2)
539d3bd7413SDaniel Borkmann #define BPF_ALU_NON_POINTER (1U << 3)
540801c6058SDaniel Borkmann #define BPF_ALU_IMMEDIATE (1U << 4)
541979d63d5SDaniel Borkmann #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
542979d63d5SDaniel Borkmann BPF_ALU_SANITIZE_DST)
543979d63d5SDaniel Borkmann
54458e2af8bSJakub Kicinski struct bpf_insn_aux_data {
54581ed18abSAlexei Starovoitov union {
54658e2af8bSJakub Kicinski enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
5470a525621SPhilo Lu struct bpf_map_ptr_state map_ptr_state;
5481c2a088aSAlexei Starovoitov s32 call_imm; /* saved imm field of call insn */
549979d63d5SDaniel Borkmann u32 alu_limit; /* limit for add/sub register with pointer */
550d8eca5bbSDaniel Borkmann struct {
551d8eca5bbSDaniel Borkmann u32 map_index; /* index into used_maps[] */
552d8eca5bbSDaniel Borkmann u32 map_off; /* offset from value base address */
553d8eca5bbSDaniel Borkmann };
5544976b718SHao Luo struct {
5554976b718SHao Luo enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
5564976b718SHao Luo union {
55722dc4a0fSAndrii Nakryiko struct {
55822dc4a0fSAndrii Nakryiko struct btf *btf;
5594976b718SHao Luo u32 btf_id; /* btf_id for struct typed var */
56022dc4a0fSAndrii Nakryiko };
5614976b718SHao Luo u32 mem_size; /* mem_size for non-struct typed var */
5624976b718SHao Luo };
5634976b718SHao Luo } btf_var;
5641ade2371SEduard Zingerman /* if instruction is a call to bpf_loop this field tracks
5651ade2371SEduard Zingerman * the state of the relevant registers to make decision about inlining
5661ade2371SEduard Zingerman */
5671ade2371SEduard Zingerman struct bpf_loop_inline_state loop_inline_state;
56881ed18abSAlexei Starovoitov };
569d2dcc67dSDave Marchevsky union {
570d2dcc67dSDave Marchevsky /* remember the size of type passed to bpf_obj_new to rewrite R1 */
571d2dcc67dSDave Marchevsky u64 obj_new_size;
572d2dcc67dSDave Marchevsky /* remember the offset of node field within type to rewrite */
573d2dcc67dSDave Marchevsky u64 insert_off;
574d2dcc67dSDave Marchevsky };
575958cf2e2SKumar Kartikeya Dwivedi struct btf_struct_meta *kptr_struct_meta;
576d2e4c1e6SDaniel Borkmann u64 map_key_state; /* constant (32 bit) key tracking for maps */
57723994631SYonghong Song int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
57851c39bb1SAlexei Starovoitov u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
5792039f26fSDaniel Borkmann bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
5805327ed3dSJiong Wang bool zext_dst; /* this insn zero extends dst reg */
5816082b6c3SAlexei Starovoitov bool needs_zext; /* alu op needs to clear upper bits */
5829bb00b28SYonghong Song bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
58306accc87SAndrii Nakryiko bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
58401cc55afSYonghong Song bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
585979d63d5SDaniel Borkmann u8 alu_state; /* used in combination with alu_limit */
5865b5f51bfSEduard Zingerman /* true if STX or LDX instruction is a part of a spill/fill
587ae010757SEduard Zingerman * pattern for a bpf_fastcall call.
5885b5f51bfSEduard Zingerman */
589ae010757SEduard Zingerman u8 fastcall_pattern:1;
5905b5f51bfSEduard Zingerman /* for CALL instructions, a number of spill/fill pairs in the
591ae010757SEduard Zingerman * bpf_fastcall pattern.
5925b5f51bfSEduard Zingerman */
593ae010757SEduard Zingerman u8 fastcall_spills_num:3;
59451c39bb1SAlexei Starovoitov
59551c39bb1SAlexei Starovoitov /* below fields are initialized once */
5969e4c24e7SJakub Kicinski unsigned int orig_idx; /* original instruction index */
597bffdeaa8SAndrii Nakryiko bool jmp_point;
5984b5ce570SAndrii Nakryiko bool prune_point;
5994b5ce570SAndrii Nakryiko /* ensure we check state equivalence and save state checkpoint and
6004b5ce570SAndrii Nakryiko * this instruction, regardless of any heuristics
6014b5ce570SAndrii Nakryiko */
6024b5ce570SAndrii Nakryiko bool force_checkpoint;
603ab5cfac1SEduard Zingerman /* true if instruction is a call to a helper function that
604ab5cfac1SEduard Zingerman * accepts callback function as a parameter.
605ab5cfac1SEduard Zingerman */
606ab5cfac1SEduard Zingerman bool calls_callback;
60714c8552dSEduard Zingerman /* registers alive before this instruction. */
60814c8552dSEduard Zingerman u16 live_regs_before;
60958e2af8bSJakub Kicinski };
61058e2af8bSJakub Kicinski
61158e2af8bSJakub Kicinski #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
612541c3badSAndrii Nakryiko #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
61358e2af8bSJakub Kicinski
614a2a7d570SJakub Kicinski #define BPF_VERIFIER_TMP_LOG_SIZE 1024
615a2a7d570SJakub Kicinski
616b9193c1bSMartin KaFai Lau struct bpf_verifier_log {
61712166409SAndrii Nakryiko /* Logical start and end positions of a "log window" of the verifier log.
61812166409SAndrii Nakryiko * start_pos == 0 means we haven't truncated anything.
61912166409SAndrii Nakryiko * Once truncation starts to happen, start_pos + len_total == end_pos,
62012166409SAndrii Nakryiko * except during log reset situations, in which (end_pos - start_pos)
62112166409SAndrii Nakryiko * might get smaller than len_total (see bpf_vlog_reset()).
62212166409SAndrii Nakryiko * Generally, (end_pos - start_pos) gives number of useful data in
62312166409SAndrii Nakryiko * user log buffer.
62412166409SAndrii Nakryiko */
62512166409SAndrii Nakryiko u64 start_pos;
62612166409SAndrii Nakryiko u64 end_pos;
627e7bf8249SJakub Kicinski char __user *ubuf;
62812166409SAndrii Nakryiko u32 level;
629e7bf8249SJakub Kicinski u32 len_total;
630fa1c7d5cSAndrii Nakryiko u32 len_max;
63112166409SAndrii Nakryiko char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
632e7bf8249SJakub Kicinski };
633e7bf8249SJakub Kicinski
63406ee7115SAlexei Starovoitov #define BPF_LOG_LEVEL1 1
63506ee7115SAlexei Starovoitov #define BPF_LOG_LEVEL2 2
63606ee7115SAlexei Starovoitov #define BPF_LOG_STATS 4
63712166409SAndrii Nakryiko #define BPF_LOG_FIXED 8
63806ee7115SAlexei Starovoitov #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
63912166409SAndrii Nakryiko #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
6408580ac94SAlexei Starovoitov #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
6412e576648SChristy Lee #define BPF_LOG_MIN_ALIGNMENT 8U
6422e576648SChristy Lee #define BPF_LOG_ALIGNMENT 40U
64306ee7115SAlexei Starovoitov
bpf_verifier_log_needed(const struct bpf_verifier_log * log)64477d2e05aSMartin KaFai Lau static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
64577d2e05aSMartin KaFai Lau {
646fa1c7d5cSAndrii Nakryiko return log && log->level;
64777d2e05aSMartin KaFai Lau }
64877d2e05aSMartin KaFai Lau
649cc8b0b92SAlexei Starovoitov #define BPF_MAX_SUBPROGS 256
650cc8b0b92SAlexei Starovoitov
6514ba1d0f2SAndrii Nakryiko struct bpf_subprog_arg_info {
6524ba1d0f2SAndrii Nakryiko enum bpf_arg_type arg_type;
6534ba1d0f2SAndrii Nakryiko union {
6544ba1d0f2SAndrii Nakryiko u32 mem_size;
655e2b3c4ffSAndrii Nakryiko u32 btf_id;
6564ba1d0f2SAndrii Nakryiko };
6574ba1d0f2SAndrii Nakryiko };
6584ba1d0f2SAndrii Nakryiko
659a76ab573SYonghong Song enum priv_stack_mode {
660a76ab573SYonghong Song PRIV_STACK_UNKNOWN,
661a76ab573SYonghong Song NO_PRIV_STACK,
662a76ab573SYonghong Song PRIV_STACK_ADAPTIVE,
663a76ab573SYonghong Song };
664a76ab573SYonghong Song
6659c8105bdSJiong Wang struct bpf_subprog_info {
6668c1b6e69SAlexei Starovoitov /* 'start' has to be the first field otherwise find_subprog() won't work */
6679c8105bdSJiong Wang u32 start; /* insn idx of function entry point */
668c454a46bSMartin KaFai Lau u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
6699c8105bdSJiong Wang u16 stack_depth; /* max. stack depth used by this function */
670011832b9SAlexei Starovoitov u16 stack_extra;
671ae010757SEduard Zingerman /* offsets in range [stack_depth .. fastcall_stack_off)
672ae010757SEduard Zingerman * are used for bpf_fastcall spills and fills.
6735b5f51bfSEduard Zingerman */
674ae010757SEduard Zingerman s16 fastcall_stack_off;
675406a6fa4SAndrii Nakryiko bool has_tail_call: 1;
676406a6fa4SAndrii Nakryiko bool tail_call_reachable: 1;
677406a6fa4SAndrii Nakryiko bool has_ld_abs: 1;
678406a6fa4SAndrii Nakryiko bool is_cb: 1;
679406a6fa4SAndrii Nakryiko bool is_async_cb: 1;
680406a6fa4SAndrii Nakryiko bool is_exception_cb: 1;
6814ba1d0f2SAndrii Nakryiko bool args_cached: 1;
682ae010757SEduard Zingerman /* true if bpf_fastcall stack region is used by functions that can't be inlined */
683ae010757SEduard Zingerman bool keep_fastcall_stack: 1;
68451081a3fSEduard Zingerman bool changes_pkt_data: 1;
685e2d8f560SKumar Kartikeya Dwivedi bool might_sleep: 1;
6864ba1d0f2SAndrii Nakryiko
687a76ab573SYonghong Song enum priv_stack_mode priv_stack_mode;
6884ba1d0f2SAndrii Nakryiko u8 arg_cnt;
6894ba1d0f2SAndrii Nakryiko struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS];
6909c8105bdSJiong Wang };
6919c8105bdSJiong Wang
692407958a0SAndrii Nakryiko struct bpf_verifier_env;
693407958a0SAndrii Nakryiko
694407958a0SAndrii Nakryiko struct backtrack_state {
695407958a0SAndrii Nakryiko struct bpf_verifier_env *env;
696407958a0SAndrii Nakryiko u32 frame;
697407958a0SAndrii Nakryiko u32 reg_masks[MAX_CALL_FRAMES];
698407958a0SAndrii Nakryiko u64 stack_masks[MAX_CALL_FRAMES];
699407958a0SAndrii Nakryiko };
700407958a0SAndrii Nakryiko
7011ffc85d9SEduard Zingerman struct bpf_id_pair {
7021ffc85d9SEduard Zingerman u32 old;
7031ffc85d9SEduard Zingerman u32 cur;
7041ffc85d9SEduard Zingerman };
7051ffc85d9SEduard Zingerman
7061ffc85d9SEduard Zingerman struct bpf_idmap {
7071ffc85d9SEduard Zingerman u32 tmp_id_gen;
7081ffc85d9SEduard Zingerman struct bpf_id_pair map[BPF_ID_MAP_SIZE];
7091ffc85d9SEduard Zingerman };
7101ffc85d9SEduard Zingerman
711904e6ddfSEduard Zingerman struct bpf_idset {
712904e6ddfSEduard Zingerman u32 count;
713904e6ddfSEduard Zingerman u32 ids[BPF_ID_MAP_SIZE];
714904e6ddfSEduard Zingerman };
715904e6ddfSEduard Zingerman
71658e2af8bSJakub Kicinski /* single container for all structs
71758e2af8bSJakub Kicinski * one verifier_env per bpf_check() call
71858e2af8bSJakub Kicinski */
71958e2af8bSJakub Kicinski struct bpf_verifier_env {
720c08435ecSDaniel Borkmann u32 insn_idx;
721c08435ecSDaniel Borkmann u32 prev_insn_idx;
72258e2af8bSJakub Kicinski struct bpf_prog *prog; /* eBPF program being verified */
72300176a34SJakub Kicinski const struct bpf_verifier_ops *ops;
724e3f87fdfSKui-Feng Lee struct module *attach_btf_mod; /* The owner module of prog->aux->attach_btf */
72558e2af8bSJakub Kicinski struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
72658e2af8bSJakub Kicinski int stack_size; /* number of states to be processed */
727e07b98d9SDavid S. Miller bool strict_alignment; /* perform strict pointer alignment checks */
72810d274e8SAlexei Starovoitov bool test_state_freq; /* test verifier with different pruning frequency */
729ff8867afSAndrii Nakryiko bool test_reg_invariants; /* fail verification on register invariants violations */
730638f5b90SAlexei Starovoitov struct bpf_verifier_state *cur_state; /* current verifier state */
7315564ee3aSEduard Zingerman /* Search pruning optimization, array of list_heads for
7325564ee3aSEduard Zingerman * lists of struct bpf_verifier_state_list.
7335564ee3aSEduard Zingerman */
7345564ee3aSEduard Zingerman struct list_head *explored_states;
7355564ee3aSEduard Zingerman struct list_head free_list; /* list of struct bpf_verifier_state_list */
73658e2af8bSJakub Kicinski struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
737541c3badSAndrii Nakryiko struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
73858e2af8bSJakub Kicinski u32 used_map_cnt; /* number of used maps */
739541c3badSAndrii Nakryiko u32 used_btf_cnt; /* number of used BTF objects */
74058e2af8bSJakub Kicinski u32 id_gen; /* used to generate unique reg IDs */
741335d1c5bSKumar Kartikeya Dwivedi u32 hidden_subprog_cnt; /* number of hidden subprogs */
742f18b03faSKumar Kartikeya Dwivedi int exception_callback_subprog;
743e042aa53SDaniel Borkmann bool explore_alu_limits;
74458e2af8bSJakub Kicinski bool allow_ptr_leaks;
74592e1567eSAndrei Matei /* Allow access to uninitialized stack memory. Writes with fixed offset are
74692e1567eSAndrei Matei * always allowed, so this refers to reads (with fixed or variable offset),
74792e1567eSAndrei Matei * to writes with variable offset and to indirect (helper) accesses.
74892e1567eSAndrei Matei */
74901f810acSAndrei Matei bool allow_uninit_stack;
7502c78ee89SAlexei Starovoitov bool bpf_capable;
7512c78ee89SAlexei Starovoitov bool bypass_spec_v1;
7522c78ee89SAlexei Starovoitov bool bypass_spec_v4;
75358e2af8bSJakub Kicinski bool seen_direct_write;
754f18b03faSKumar Kartikeya Dwivedi bool seen_exception;
75558e2af8bSJakub Kicinski struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
756d9762e84SMartin KaFai Lau const struct bpf_line_info *prev_linfo;
757b9193c1bSMartin KaFai Lau struct bpf_verifier_log log;
758335d1c5bSKumar Kartikeya Dwivedi struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
759904e6ddfSEduard Zingerman union {
7601ffc85d9SEduard Zingerman struct bpf_idmap idmap_scratch;
761904e6ddfSEduard Zingerman struct bpf_idset idset_scratch;
762904e6ddfSEduard Zingerman };
7637df737e9SAlexei Starovoitov struct {
7647df737e9SAlexei Starovoitov int *insn_state;
7657df737e9SAlexei Starovoitov int *insn_stack;
76614c8552dSEduard Zingerman /* vector of instruction indexes sorted in post-order */
76714c8552dSEduard Zingerman int *insn_postorder;
7687df737e9SAlexei Starovoitov int cur_stack;
76914c8552dSEduard Zingerman /* current position in the insn_postorder vector */
77014c8552dSEduard Zingerman int cur_postorder;
7717df737e9SAlexei Starovoitov } cfg;
772407958a0SAndrii Nakryiko struct backtrack_state bt;
77396a30e46SAndrii Nakryiko struct bpf_insn_hist_entry *insn_hist;
77496a30e46SAndrii Nakryiko struct bpf_insn_hist_entry *cur_hist_ent;
77596a30e46SAndrii Nakryiko u32 insn_hist_cap;
77651c39bb1SAlexei Starovoitov u32 pass_cnt; /* number of times do_check() was called */
777cc8b0b92SAlexei Starovoitov u32 subprog_cnt;
77806ee7115SAlexei Starovoitov /* number of instructions analyzed by the verifier */
7792589726dSAlexei Starovoitov u32 prev_insn_processed, insn_processed;
7802589726dSAlexei Starovoitov /* number of jmps, calls, exits analyzed so far */
7812589726dSAlexei Starovoitov u32 prev_jmps_processed, jmps_processed;
78206ee7115SAlexei Starovoitov /* total verification time */
78306ee7115SAlexei Starovoitov u64 verification_time;
78406ee7115SAlexei Starovoitov /* maximum number of verifier states kept in 'branching' instructions */
78506ee7115SAlexei Starovoitov u32 max_states_per_insn;
78606ee7115SAlexei Starovoitov /* total number of allocated verifier states */
78706ee7115SAlexei Starovoitov u32 total_states;
78806ee7115SAlexei Starovoitov /* some states are freed during program analysis.
78906ee7115SAlexei Starovoitov * this is peak number of states. this number dominates kernel
79006ee7115SAlexei Starovoitov * memory consumption during verification
79106ee7115SAlexei Starovoitov */
79206ee7115SAlexei Starovoitov u32 peak_states;
79306ee7115SAlexei Starovoitov /* longest register parentage chain walked for liveness marking */
79406ee7115SAlexei Starovoitov u32 longest_mark_read_walk;
795574078b0SEduard Zingerman u32 free_list_size;
796574078b0SEduard Zingerman u32 explored_states_size;
797387544bfSAlexei Starovoitov bpfptr_t fd_array;
7980f55f9edSChristy Lee
7990f55f9edSChristy Lee /* bit mask to keep track of whether a register has been accessed
8000f55f9edSChristy Lee * since the last time the function state was printed
8010f55f9edSChristy Lee */
8020f55f9edSChristy Lee u32 scratched_regs;
8030f55f9edSChristy Lee /* Same as scratched_regs but for stack slots */
8040f55f9edSChristy Lee u64 scratched_stack_slots;
80512166409SAndrii Nakryiko u64 prev_log_pos, prev_insn_print_pos;
80692424801SDaniel Borkmann /* buffer used to temporary hold constants as scalar registers */
80792424801SDaniel Borkmann struct bpf_reg_state fake_reg[2];
808d9439c21SAndrii Nakryiko /* buffer used to generate temporary string representations,
809d9439c21SAndrii Nakryiko * e.g., in reg_type_str() to generate reg_type string
810d9439c21SAndrii Nakryiko */
811d9439c21SAndrii Nakryiko char tmp_str_buf[TMP_STR_BUF_LEN];
8126f606ffdSMartin KaFai Lau struct bpf_insn insn_buf[INSN_BUF_SIZE];
813169c3176SMartin KaFai Lau struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
81458e2af8bSJakub Kicinski };
81558e2af8bSJakub Kicinski
subprog_aux(struct bpf_verifier_env * env,int subprog)816e26080d0SAndrii Nakryiko static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
817e26080d0SAndrii Nakryiko {
818e26080d0SAndrii Nakryiko return &env->prog->aux->func_info_aux[subprog];
819e26080d0SAndrii Nakryiko }
820e26080d0SAndrii Nakryiko
subprog_info(struct bpf_verifier_env * env,int subprog)8214ba1d0f2SAndrii Nakryiko static inline struct bpf_subprog_info *subprog_info(struct bpf_verifier_env *env, int subprog)
8224ba1d0f2SAndrii Nakryiko {
8234ba1d0f2SAndrii Nakryiko return &env->subprog_info[subprog];
8244ba1d0f2SAndrii Nakryiko }
8254ba1d0f2SAndrii Nakryiko
826be2d04d1SMathieu Malaterre __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
827be2d04d1SMathieu Malaterre const char *fmt, va_list args);
828430e68d1SQuentin Monnet __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
829430e68d1SQuentin Monnet const char *fmt, ...);
8309e15db66SAlexei Starovoitov __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
8319e15db66SAlexei Starovoitov const char *fmt, ...);
832bdcab414SAndrii Nakryiko int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
833bdcab414SAndrii Nakryiko char __user *log_buf, u32 log_size);
83412166409SAndrii Nakryiko void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
835bdcab414SAndrii Nakryiko int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
836430e68d1SQuentin Monnet
837db840d38SAndrii Nakryiko __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
838db840d38SAndrii Nakryiko u32 insn_off,
839db840d38SAndrii Nakryiko const char *prefix_fmt, ...);
840db840d38SAndrii Nakryiko
cur_func(struct bpf_verifier_env * env)841fd978bf7SJoe Stringer static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
842638f5b90SAlexei Starovoitov {
843f4d7e40aSAlexei Starovoitov struct bpf_verifier_state *cur = env->cur_state;
844f4d7e40aSAlexei Starovoitov
845fd978bf7SJoe Stringer return cur->frame[cur->curframe];
846fd978bf7SJoe Stringer }
847fd978bf7SJoe Stringer
cur_regs(struct bpf_verifier_env * env)848fd978bf7SJoe Stringer static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
849fd978bf7SJoe Stringer {
850fd978bf7SJoe Stringer return cur_func(env)->regs;
851638f5b90SAlexei Starovoitov }
852638f5b90SAlexei Starovoitov
853a40a2632SQuentin Monnet int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
854cae1927cSJakub Kicinski int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
855cae1927cSJakub Kicinski int insn_idx, int prev_insn_idx);
856c941ce9cSQuentin Monnet int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
85708ca90afSJakub Kicinski void
85808ca90afSJakub Kicinski bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
85908ca90afSJakub Kicinski struct bpf_insn *insn);
86008ca90afSJakub Kicinski void
86108ca90afSJakub Kicinski bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
862ab3f0063SJakub Kicinski
863f7b12b6fSToke Høiland-Jørgensen /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,struct btf * btf,u32 btf_id)864f7b12b6fSToke Høiland-Jørgensen static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
86522dc4a0fSAndrii Nakryiko struct btf *btf, u32 btf_id)
866f7b12b6fSToke Høiland-Jørgensen {
86722dc4a0fSAndrii Nakryiko if (tgt_prog)
86822dc4a0fSAndrii Nakryiko return ((u64)tgt_prog->aux->id << 32) | btf_id;
86922dc4a0fSAndrii Nakryiko else
87022dc4a0fSAndrii Nakryiko return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
871f7b12b6fSToke Høiland-Jørgensen }
872f7b12b6fSToke Høiland-Jørgensen
873441e8c66SToke Høiland-Jørgensen /* unpack the IDs from the key as constructed above */
bpf_trampoline_unpack_key(u64 key,u32 * obj_id,u32 * btf_id)874441e8c66SToke Høiland-Jørgensen static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
875441e8c66SToke Høiland-Jørgensen {
876441e8c66SToke Høiland-Jørgensen if (obj_id)
877441e8c66SToke Høiland-Jørgensen *obj_id = key >> 32;
878441e8c66SToke Høiland-Jørgensen if (btf_id)
879441e8c66SToke Høiland-Jørgensen *btf_id = key & 0x7FFFFFFF;
880441e8c66SToke Høiland-Jørgensen }
881441e8c66SToke Høiland-Jørgensen
882f7b12b6fSToke Høiland-Jørgensen int bpf_check_attach_target(struct bpf_verifier_log *log,
883f7b12b6fSToke Høiland-Jørgensen const struct bpf_prog *prog,
884f7b12b6fSToke Høiland-Jørgensen const struct bpf_prog *tgt_prog,
885f7b12b6fSToke Høiland-Jørgensen u32 btf_id,
886f7b12b6fSToke Høiland-Jørgensen struct bpf_attach_target_info *tgt_info);
8872357672cSKumar Kartikeya Dwivedi void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
8882357672cSKumar Kartikeya Dwivedi
889eb1f7f71SBenjamin Tissoires int mark_chain_precision(struct bpf_verifier_env *env, int regno);
890eb1f7f71SBenjamin Tissoires
891d639b9d1SHao Luo #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
892d639b9d1SHao Luo
893d639b9d1SHao Luo /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)894d639b9d1SHao Luo static inline u32 base_type(u32 type)
895d639b9d1SHao Luo {
896d639b9d1SHao Luo return type & BPF_BASE_TYPE_MASK;
897d639b9d1SHao Luo }
898d639b9d1SHao Luo
899d639b9d1SHao Luo /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)900d639b9d1SHao Luo static inline u32 type_flag(u32 type)
901d639b9d1SHao Luo {
902d639b9d1SHao Luo return type & ~BPF_BASE_TYPE_MASK;
903d639b9d1SHao Luo }
904f7b12b6fSToke Høiland-Jørgensen
9054a9c7bbeSMartin KaFai Lau /* only use after check_attach_btf_id() */
resolve_prog_type(const struct bpf_prog * prog)906271de525SMartin KaFai Lau static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
9075c073f26SKumar Kartikeya Dwivedi {
908fdad456cSLeon Hwang return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->saved_dst_prog_type) ?
909fdad456cSLeon Hwang prog->aux->saved_dst_prog_type : prog->type;
9105c073f26SKumar Kartikeya Dwivedi }
9115c073f26SKumar Kartikeya Dwivedi
bpf_prog_check_recur(const struct bpf_prog * prog)912271de525SMartin KaFai Lau static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
913271de525SMartin KaFai Lau {
914271de525SMartin KaFai Lau switch (resolve_prog_type(prog)) {
915271de525SMartin KaFai Lau case BPF_PROG_TYPE_TRACING:
916271de525SMartin KaFai Lau return prog->expected_attach_type != BPF_TRACE_ITER;
917271de525SMartin KaFai Lau case BPF_PROG_TYPE_STRUCT_OPS:
9185bd36da1SYonghong Song return prog->aux->jits_use_priv_stack;
919271de525SMartin KaFai Lau case BPF_PROG_TYPE_LSM:
920271de525SMartin KaFai Lau return false;
921271de525SMartin KaFai Lau default:
922271de525SMartin KaFai Lau return true;
923271de525SMartin KaFai Lau }
924271de525SMartin KaFai Lau }
925271de525SMartin KaFai Lau
9262a6d50b5SDave Marchevsky #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED | NON_OWN_REF)
9273f00c523SDavid Vernet
bpf_type_has_unsafe_modifiers(u32 type)9283f00c523SDavid Vernet static inline bool bpf_type_has_unsafe_modifiers(u32 type)
9293f00c523SDavid Vernet {
9303f00c523SDavid Vernet return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
9313f00c523SDavid Vernet }
9323f00c523SDavid Vernet
type_is_ptr_alloc_obj(u32 type)93342feb662SAndrii Nakryiko static inline bool type_is_ptr_alloc_obj(u32 type)
93442feb662SAndrii Nakryiko {
93542feb662SAndrii Nakryiko return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
93642feb662SAndrii Nakryiko }
93742feb662SAndrii Nakryiko
type_is_non_owning_ref(u32 type)93842feb662SAndrii Nakryiko static inline bool type_is_non_owning_ref(u32 type)
93942feb662SAndrii Nakryiko {
94042feb662SAndrii Nakryiko return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
94142feb662SAndrii Nakryiko }
94242feb662SAndrii Nakryiko
type_is_pkt_pointer(enum bpf_reg_type type)94342feb662SAndrii Nakryiko static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
94442feb662SAndrii Nakryiko {
94542feb662SAndrii Nakryiko type = base_type(type);
94642feb662SAndrii Nakryiko return type == PTR_TO_PACKET ||
94742feb662SAndrii Nakryiko type == PTR_TO_PACKET_META;
94842feb662SAndrii Nakryiko }
94942feb662SAndrii Nakryiko
type_is_sk_pointer(enum bpf_reg_type type)95042feb662SAndrii Nakryiko static inline bool type_is_sk_pointer(enum bpf_reg_type type)
95142feb662SAndrii Nakryiko {
95242feb662SAndrii Nakryiko return type == PTR_TO_SOCKET ||
95342feb662SAndrii Nakryiko type == PTR_TO_SOCK_COMMON ||
95442feb662SAndrii Nakryiko type == PTR_TO_TCP_SOCK ||
95542feb662SAndrii Nakryiko type == PTR_TO_XDP_SOCK;
95642feb662SAndrii Nakryiko }
95742feb662SAndrii Nakryiko
type_may_be_null(u32 type)9581ae497c7SShung-Hsi Yu static inline bool type_may_be_null(u32 type)
9591ae497c7SShung-Hsi Yu {
9601ae497c7SShung-Hsi Yu return type & PTR_MAYBE_NULL;
9611ae497c7SShung-Hsi Yu }
9621ae497c7SShung-Hsi Yu
mark_reg_scratched(struct bpf_verifier_env * env,u32 regno)96342feb662SAndrii Nakryiko static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
96442feb662SAndrii Nakryiko {
96542feb662SAndrii Nakryiko env->scratched_regs |= 1U << regno;
96642feb662SAndrii Nakryiko }
96742feb662SAndrii Nakryiko
mark_stack_slot_scratched(struct bpf_verifier_env * env,u32 spi)96842feb662SAndrii Nakryiko static inline void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
96942feb662SAndrii Nakryiko {
97042feb662SAndrii Nakryiko env->scratched_stack_slots |= 1ULL << spi;
97142feb662SAndrii Nakryiko }
97242feb662SAndrii Nakryiko
reg_scratched(const struct bpf_verifier_env * env,u32 regno)97342feb662SAndrii Nakryiko static inline bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
97442feb662SAndrii Nakryiko {
97542feb662SAndrii Nakryiko return (env->scratched_regs >> regno) & 1;
97642feb662SAndrii Nakryiko }
97742feb662SAndrii Nakryiko
stack_slot_scratched(const struct bpf_verifier_env * env,u64 regno)97842feb662SAndrii Nakryiko static inline bool stack_slot_scratched(const struct bpf_verifier_env *env, u64 regno)
97942feb662SAndrii Nakryiko {
98042feb662SAndrii Nakryiko return (env->scratched_stack_slots >> regno) & 1;
98142feb662SAndrii Nakryiko }
98242feb662SAndrii Nakryiko
verifier_state_scratched(const struct bpf_verifier_env * env)98342feb662SAndrii Nakryiko static inline bool verifier_state_scratched(const struct bpf_verifier_env *env)
98442feb662SAndrii Nakryiko {
98542feb662SAndrii Nakryiko return env->scratched_regs || env->scratched_stack_slots;
98642feb662SAndrii Nakryiko }
98742feb662SAndrii Nakryiko
mark_verifier_state_clean(struct bpf_verifier_env * env)98842feb662SAndrii Nakryiko static inline void mark_verifier_state_clean(struct bpf_verifier_env *env)
98942feb662SAndrii Nakryiko {
99042feb662SAndrii Nakryiko env->scratched_regs = 0U;
99142feb662SAndrii Nakryiko env->scratched_stack_slots = 0ULL;
99242feb662SAndrii Nakryiko }
99342feb662SAndrii Nakryiko
99442feb662SAndrii Nakryiko /* Used for printing the entire verifier state. */
mark_verifier_state_scratched(struct bpf_verifier_env * env)99542feb662SAndrii Nakryiko static inline void mark_verifier_state_scratched(struct bpf_verifier_env *env)
99642feb662SAndrii Nakryiko {
99742feb662SAndrii Nakryiko env->scratched_regs = ~0U;
99842feb662SAndrii Nakryiko env->scratched_stack_slots = ~0ULL;
99942feb662SAndrii Nakryiko }
100042feb662SAndrii Nakryiko
bpf_stack_narrow_access_ok(int off,int fill_size,int spill_size)1001c1e6148cSMaxim Mikityanskiy static inline bool bpf_stack_narrow_access_ok(int off, int fill_size, int spill_size)
1002c1e6148cSMaxim Mikityanskiy {
1003c1e6148cSMaxim Mikityanskiy #ifdef __BIG_ENDIAN
1004c1e6148cSMaxim Mikityanskiy off -= spill_size - fill_size;
1005c1e6148cSMaxim Mikityanskiy #endif
1006c1e6148cSMaxim Mikityanskiy
1007c1e6148cSMaxim Mikityanskiy return !(off % BPF_REG_SIZE);
1008c1e6148cSMaxim Mikityanskiy }
1009c1e6148cSMaxim Mikityanskiy
101042feb662SAndrii Nakryiko const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type);
101142feb662SAndrii Nakryiko const char *dynptr_type_str(enum bpf_dynptr_type type);
101242feb662SAndrii Nakryiko const char *iter_type_str(const struct btf *btf, u32 btf_id);
101342feb662SAndrii Nakryiko const char *iter_state_str(enum bpf_iter_state state);
101442feb662SAndrii Nakryiko
10151995edc5SKumar Kartikeya Dwivedi void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
10161995edc5SKumar Kartikeya Dwivedi u32 frameno, bool print_all);
10171995edc5SKumar Kartikeya Dwivedi void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
10181995edc5SKumar Kartikeya Dwivedi u32 frameno);
101942feb662SAndrii Nakryiko
102058e2af8bSJakub Kicinski #endif /* _LINUX_BPF_VERIFIER_H */
1021