xref: /linux-6.15/include/linux/filter.h (revision 88044230)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * Linux Socket Filter Data Structures
41da177e4SLinus Torvalds  */
51da177e4SLinus Torvalds #ifndef __LINUX_FILTER_H__
61da177e4SLinus Torvalds #define __LINUX_FILTER_H__
71da177e4SLinus Torvalds 
860063497SArun Sharma #include <linux/atomic.h>
98581fd40SJakub Kicinski #include <linux/bpf.h>
104c355cdfSReshetova, Elena #include <linux/refcount.h>
110c5fe1b4SWill Drewry #include <linux/compat.h>
129f12fbe6SZi Shen Lim #include <linux/skbuff.h>
13b954d834SDaniel Borkmann #include <linux/linkage.h>
14b954d834SDaniel Borkmann #include <linux/printk.h>
15d45ed4a4SAlexei Starovoitov #include <linux/workqueue.h>
16b13138efSDaniel Borkmann #include <linux/sched.h>
172870c4d6SJakub Kicinski #include <linux/sched/clock.h>
184f3446bbSDaniel Borkmann #include <linux/capability.h>
19820a0b24SMichael Ellerman #include <linux/set_memory.h>
207105e828SDaniel Borkmann #include <linux/kallsyms.h>
216d5fc195SToshiaki Makita #include <linux/if_vlan.h>
22d53d2f78SRick Edgecombe #include <linux/vmalloc.h>
23b1ea9ff6SChristoph Hellwig #include <linux/sockptr.h>
24a24d22b2SEric Biggers #include <crypto/sha1.h>
25700d4796SAlexei Starovoitov #include <linux/u64_stats_sync.h>
264f3446bbSDaniel Borkmann 
27ff936a04SAlexei Starovoitov #include <net/sch_generic.h>
28b954d834SDaniel Borkmann 
29d9b8aadaSIlya Leoshkevich #include <asm/byteorder.h>
30b954d834SDaniel Borkmann #include <uapi/linux/filter.h>
3160a3b225SDaniel Borkmann 
3260a3b225SDaniel Borkmann struct sk_buff;
3360a3b225SDaniel Borkmann struct sock;
3460a3b225SDaniel Borkmann struct seccomp_data;
3509756af4SAlexei Starovoitov struct bpf_prog_aux;
36297dd12cSJesper Dangaard Brouer struct xdp_rxq_info;
37106ca27fSJesper Dangaard Brouer struct xdp_buff;
382dbb9b9eSMartin KaFai Lau struct sock_reuseport;
397b146cebSAndrey Ignatov struct ctl_table;
407b146cebSAndrey Ignatov struct ctl_table_header;
41792d4b5cSHeiko Carstens 
4230743837SDaniel Borkmann /* ArgX, context and stack frame pointer register positions. Note,
4330743837SDaniel Borkmann  * Arg1, Arg2, Arg3, etc are used as argument mappings of function
4430743837SDaniel Borkmann  * calls in BPF_CALL instruction.
4530743837SDaniel Borkmann  */
4630743837SDaniel Borkmann #define BPF_REG_ARG1	BPF_REG_1
4730743837SDaniel Borkmann #define BPF_REG_ARG2	BPF_REG_2
4830743837SDaniel Borkmann #define BPF_REG_ARG3	BPF_REG_3
4930743837SDaniel Borkmann #define BPF_REG_ARG4	BPF_REG_4
5030743837SDaniel Borkmann #define BPF_REG_ARG5	BPF_REG_5
5130743837SDaniel Borkmann #define BPF_REG_CTX	BPF_REG_6
5230743837SDaniel Borkmann #define BPF_REG_FP	BPF_REG_10
5330743837SDaniel Borkmann 
5430743837SDaniel Borkmann /* Additional register mappings for converted user programs. */
5530743837SDaniel Borkmann #define BPF_REG_A	BPF_REG_0
5630743837SDaniel Borkmann #define BPF_REG_X	BPF_REG_7
57e0cea7ceSDaniel Borkmann #define BPF_REG_TMP	BPF_REG_2	/* scratch reg */
58e0cea7ceSDaniel Borkmann #define BPF_REG_D	BPF_REG_8	/* data, callee-saved */
59e0cea7ceSDaniel Borkmann #define BPF_REG_H	BPF_REG_9	/* hlen, callee-saved */
60bd4cf0edSAlexei Starovoitov 
619b73bfddSDaniel Borkmann /* Kernel hidden auxiliary/helper register. */
624f3446bbSDaniel Borkmann #define BPF_REG_AX		MAX_BPF_REG
63144cd91cSDaniel Borkmann #define MAX_BPF_EXT_REG		(MAX_BPF_REG + 1)
64144cd91cSDaniel Borkmann #define MAX_BPF_JIT_REG		MAX_BPF_EXT_REG
654f3446bbSDaniel Borkmann 
6671189fa9SAlexei Starovoitov /* unused opcode to mark special call to bpf_tail_call() helper */
6771189fa9SAlexei Starovoitov #define BPF_TAIL_CALL	0xf0
6871189fa9SAlexei Starovoitov 
692a02759eSAlexei Starovoitov /* unused opcode to mark special load instruction. Same as BPF_ABS */
702a02759eSAlexei Starovoitov #define BPF_PROBE_MEM	0x20
712a02759eSAlexei Starovoitov 
721f9a1ea8SYonghong Song /* unused opcode to mark special ldsx instruction. Same as BPF_IND */
731f9a1ea8SYonghong Song #define BPF_PROBE_MEMSX	0x40
741f9a1ea8SYonghong Song 
752fe99eb0SAlexei Starovoitov /* unused opcode to mark special load instruction. Same as BPF_MSH */
762fe99eb0SAlexei Starovoitov #define BPF_PROBE_MEM32	0xa0
772fe99eb0SAlexei Starovoitov 
78d503a04fSAlexei Starovoitov /* unused opcode to mark special atomic instruction */
79d503a04fSAlexei Starovoitov #define BPF_PROBE_ATOMIC 0xe0
80d503a04fSAlexei Starovoitov 
811ea47e01SAlexei Starovoitov /* unused opcode to mark call to interpreter with arguments */
821ea47e01SAlexei Starovoitov #define BPF_CALL_ARGS	0xe0
831ea47e01SAlexei Starovoitov 
84f5e81d11SDaniel Borkmann /* unused opcode to mark speculation barrier for mitigating
85f5e81d11SDaniel Borkmann  * Speculative Store Bypass
86f5e81d11SDaniel Borkmann  */
87f5e81d11SDaniel Borkmann #define BPF_NOSPEC	0xc0
88f5e81d11SDaniel Borkmann 
8974451e66SDaniel Borkmann /* As per nm, we expose JITed images as text (code) section for
9074451e66SDaniel Borkmann  * kallsyms. That way, tools like perf can find it to match
9174451e66SDaniel Borkmann  * addresses.
9274451e66SDaniel Borkmann  */
9374451e66SDaniel Borkmann #define BPF_SYM_ELF_TYPE	't'
9474451e66SDaniel Borkmann 
95bd4cf0edSAlexei Starovoitov /* BPF program can access up to 512 bytes of stack space. */
96bd4cf0edSAlexei Starovoitov #define MAX_BPF_STACK	512
97bd4cf0edSAlexei Starovoitov 
98f8f6d679SDaniel Borkmann /* Helper macros for filter block array initializers. */
999739eef1SAlexei Starovoitov 
100e430f34eSAlexei Starovoitov /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
1019739eef1SAlexei Starovoitov 
1027058e3a3SYonghong Song #define BPF_ALU64_REG_OFF(OP, DST, SRC, OFF)			\
1032695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
104f8f6d679SDaniel Borkmann 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
105e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
106e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
1077058e3a3SYonghong Song 		.off   = OFF,					\
108f8f6d679SDaniel Borkmann 		.imm   = 0 })
1099739eef1SAlexei Starovoitov 
1107058e3a3SYonghong Song #define BPF_ALU64_REG(OP, DST, SRC)				\
1117058e3a3SYonghong Song 	BPF_ALU64_REG_OFF(OP, DST, SRC, 0)
1127058e3a3SYonghong Song 
1137058e3a3SYonghong Song #define BPF_ALU32_REG_OFF(OP, DST, SRC, OFF)			\
1142695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
115f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
116e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
117e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
1187058e3a3SYonghong Song 		.off   = OFF,					\
119f8f6d679SDaniel Borkmann 		.imm   = 0 })
1209739eef1SAlexei Starovoitov 
1217058e3a3SYonghong Song #define BPF_ALU32_REG(OP, DST, SRC)				\
1227058e3a3SYonghong Song 	BPF_ALU32_REG_OFF(OP, DST, SRC, 0)
1237058e3a3SYonghong Song 
124e430f34eSAlexei Starovoitov /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
1259739eef1SAlexei Starovoitov 
126daabb2b0SPuranjay Mohan #define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF)			\
1272695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
128f8f6d679SDaniel Borkmann 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
129e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
130e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
131daabb2b0SPuranjay Mohan 		.off   = OFF,					\
132f8f6d679SDaniel Borkmann 		.imm   = IMM })
133daabb2b0SPuranjay Mohan #define BPF_ALU64_IMM(OP, DST, IMM)				\
134daabb2b0SPuranjay Mohan 	BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
1359739eef1SAlexei Starovoitov 
136daabb2b0SPuranjay Mohan #define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF)			\
1372695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
138f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
139e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
140e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
141daabb2b0SPuranjay Mohan 		.off   = OFF,					\
142f8f6d679SDaniel Borkmann 		.imm   = IMM })
143daabb2b0SPuranjay Mohan #define BPF_ALU32_IMM(OP, DST, IMM)				\
144daabb2b0SPuranjay Mohan 	BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
145f8f6d679SDaniel Borkmann 
146f8f6d679SDaniel Borkmann /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
147f8f6d679SDaniel Borkmann 
148e430f34eSAlexei Starovoitov #define BPF_ENDIAN(TYPE, DST, LEN)				\
1492695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
150f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
151e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
152e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
153f8f6d679SDaniel Borkmann 		.off   = 0,					\
154f8f6d679SDaniel Borkmann 		.imm   = LEN })
155f8f6d679SDaniel Borkmann 
156daabb2b0SPuranjay Mohan /* Byte Swap, bswap16/32/64 */
157daabb2b0SPuranjay Mohan 
158daabb2b0SPuranjay Mohan #define BPF_BSWAP(DST, LEN)					\
159daabb2b0SPuranjay Mohan 	((struct bpf_insn) {					\
160daabb2b0SPuranjay Mohan 		.code  = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE),	\
161daabb2b0SPuranjay Mohan 		.dst_reg = DST,					\
162daabb2b0SPuranjay Mohan 		.src_reg = 0,					\
163daabb2b0SPuranjay Mohan 		.off   = 0,					\
164daabb2b0SPuranjay Mohan 		.imm   = LEN })
165daabb2b0SPuranjay Mohan 
166e430f34eSAlexei Starovoitov /* Short form of mov, dst_reg = src_reg */
167f8f6d679SDaniel Borkmann 
168e430f34eSAlexei Starovoitov #define BPF_MOV64_REG(DST, SRC)					\
1692695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
170f8f6d679SDaniel Borkmann 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
171e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
172e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
173f8f6d679SDaniel Borkmann 		.off   = 0,					\
174f8f6d679SDaniel Borkmann 		.imm   = 0 })
175f8f6d679SDaniel Borkmann 
176e430f34eSAlexei Starovoitov #define BPF_MOV32_REG(DST, SRC)					\
1772695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
178f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
179e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
180e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
181f8f6d679SDaniel Borkmann 		.off   = 0,					\
182f8f6d679SDaniel Borkmann 		.imm   = 0 })
183f8f6d679SDaniel Borkmann 
1847bdbf744SAndrii Nakryiko /* Special (internal-only) form of mov, used to resolve per-CPU addrs:
1857bdbf744SAndrii Nakryiko  * dst_reg = src_reg + <percpu_base_off>
1867bdbf744SAndrii Nakryiko  * BPF_ADDR_PERCPU is used as a special insn->off value.
1877bdbf744SAndrii Nakryiko  */
1887bdbf744SAndrii Nakryiko #define BPF_ADDR_PERCPU	(-1)
1897bdbf744SAndrii Nakryiko 
1907bdbf744SAndrii Nakryiko #define BPF_MOV64_PERCPU_REG(DST, SRC)				\
1917bdbf744SAndrii Nakryiko 	((struct bpf_insn) {					\
1927bdbf744SAndrii Nakryiko 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
1937bdbf744SAndrii Nakryiko 		.dst_reg = DST,					\
1947bdbf744SAndrii Nakryiko 		.src_reg = SRC,					\
1957bdbf744SAndrii Nakryiko 		.off   = BPF_ADDR_PERCPU,			\
1967bdbf744SAndrii Nakryiko 		.imm   = 0 })
1977bdbf744SAndrii Nakryiko 
insn_is_mov_percpu_addr(const struct bpf_insn * insn)1987bdbf744SAndrii Nakryiko static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn)
1997bdbf744SAndrii Nakryiko {
2007bdbf744SAndrii Nakryiko 	return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU;
2017bdbf744SAndrii Nakryiko }
2027bdbf744SAndrii Nakryiko 
203e430f34eSAlexei Starovoitov /* Short form of mov, dst_reg = imm32 */
204f8f6d679SDaniel Borkmann 
205e430f34eSAlexei Starovoitov #define BPF_MOV64_IMM(DST, IMM)					\
2062695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
207f8f6d679SDaniel Borkmann 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
208e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
209e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
210f8f6d679SDaniel Borkmann 		.off   = 0,					\
211f8f6d679SDaniel Borkmann 		.imm   = IMM })
212f8f6d679SDaniel Borkmann 
213e430f34eSAlexei Starovoitov #define BPF_MOV32_IMM(DST, IMM)					\
2142695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
215f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
216e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
217e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
218f8f6d679SDaniel Borkmann 		.off   = 0,					\
219f8f6d679SDaniel Borkmann 		.imm   = IMM })
220f8f6d679SDaniel Borkmann 
221daabb2b0SPuranjay Mohan /* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
222daabb2b0SPuranjay Mohan 
223daabb2b0SPuranjay Mohan #define BPF_MOVSX64_REG(DST, SRC, OFF)				\
224daabb2b0SPuranjay Mohan 	((struct bpf_insn) {					\
225daabb2b0SPuranjay Mohan 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
226daabb2b0SPuranjay Mohan 		.dst_reg = DST,					\
227daabb2b0SPuranjay Mohan 		.src_reg = SRC,					\
228daabb2b0SPuranjay Mohan 		.off   = OFF,					\
229daabb2b0SPuranjay Mohan 		.imm   = 0 })
230daabb2b0SPuranjay Mohan 
231daabb2b0SPuranjay Mohan #define BPF_MOVSX32_REG(DST, SRC, OFF)				\
232daabb2b0SPuranjay Mohan 	((struct bpf_insn) {					\
233daabb2b0SPuranjay Mohan 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
234daabb2b0SPuranjay Mohan 		.dst_reg = DST,					\
235daabb2b0SPuranjay Mohan 		.src_reg = SRC,					\
236daabb2b0SPuranjay Mohan 		.off   = OFF,					\
237daabb2b0SPuranjay Mohan 		.imm   = 0 })
238daabb2b0SPuranjay Mohan 
2397d134041SJiong Wang /* Special form of mov32, used for doing explicit zero extension on dst. */
2407d134041SJiong Wang #define BPF_ZEXT_REG(DST)					\
2417d134041SJiong Wang 	((struct bpf_insn) {					\
2427d134041SJiong Wang 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
2437d134041SJiong Wang 		.dst_reg = DST,					\
2447d134041SJiong Wang 		.src_reg = DST,					\
2457d134041SJiong Wang 		.off   = 0,					\
2467d134041SJiong Wang 		.imm   = 1 })
2477d134041SJiong Wang 
insn_is_zext(const struct bpf_insn * insn)2487d134041SJiong Wang static inline bool insn_is_zext(const struct bpf_insn *insn)
2497d134041SJiong Wang {
2507d134041SJiong Wang 	return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1;
2517d134041SJiong Wang }
2527d134041SJiong Wang 
253770546aeSPuranjay Mohan /* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers
254770546aeSPuranjay Mohan  * to pointers in user vma.
255770546aeSPuranjay Mohan  */
insn_is_cast_user(const struct bpf_insn * insn)256770546aeSPuranjay Mohan static inline bool insn_is_cast_user(const struct bpf_insn *insn)
257770546aeSPuranjay Mohan {
258770546aeSPuranjay Mohan 	return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
259770546aeSPuranjay Mohan 			      insn->off == BPF_ADDR_SPACE_CAST &&
260770546aeSPuranjay Mohan 			      insn->imm == 1U << 16;
261770546aeSPuranjay Mohan }
262770546aeSPuranjay Mohan 
26302ab695bSAlexei Starovoitov /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
26402ab695bSAlexei Starovoitov #define BPF_LD_IMM64(DST, IMM)					\
26502ab695bSAlexei Starovoitov 	BPF_LD_IMM64_RAW(DST, 0, IMM)
26602ab695bSAlexei Starovoitov 
26702ab695bSAlexei Starovoitov #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
26802ab695bSAlexei Starovoitov 	((struct bpf_insn) {					\
26902ab695bSAlexei Starovoitov 		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
27002ab695bSAlexei Starovoitov 		.dst_reg = DST,					\
27102ab695bSAlexei Starovoitov 		.src_reg = SRC,					\
27202ab695bSAlexei Starovoitov 		.off   = 0,					\
27302ab695bSAlexei Starovoitov 		.imm   = (__u32) (IMM) }),			\
27402ab695bSAlexei Starovoitov 	((struct bpf_insn) {					\
27502ab695bSAlexei Starovoitov 		.code  = 0, /* zero is reserved opcode */	\
27602ab695bSAlexei Starovoitov 		.dst_reg = 0,					\
27702ab695bSAlexei Starovoitov 		.src_reg = 0,					\
27802ab695bSAlexei Starovoitov 		.off   = 0,					\
27902ab695bSAlexei Starovoitov 		.imm   = ((__u64) (IMM)) >> 32 })
28002ab695bSAlexei Starovoitov 
2810246e64dSAlexei Starovoitov /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
2820246e64dSAlexei Starovoitov #define BPF_LD_MAP_FD(DST, MAP_FD)				\
2830246e64dSAlexei Starovoitov 	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
2840246e64dSAlexei Starovoitov 
285e430f34eSAlexei Starovoitov /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
286f8f6d679SDaniel Borkmann 
287e430f34eSAlexei Starovoitov #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
2882695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
289f8f6d679SDaniel Borkmann 		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
290e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
291e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
292f8f6d679SDaniel Borkmann 		.off   = 0,					\
293f8f6d679SDaniel Borkmann 		.imm   = IMM })
294f8f6d679SDaniel Borkmann 
295e430f34eSAlexei Starovoitov #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
2962695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
297f8f6d679SDaniel Borkmann 		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
298e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
299e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
300f8f6d679SDaniel Borkmann 		.off   = 0,					\
301f8f6d679SDaniel Borkmann 		.imm   = IMM })
302f8f6d679SDaniel Borkmann 
303e430f34eSAlexei Starovoitov /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
304f8f6d679SDaniel Borkmann 
305e430f34eSAlexei Starovoitov #define BPF_LD_ABS(SIZE, IMM)					\
3062695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
307f8f6d679SDaniel Borkmann 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
308e430f34eSAlexei Starovoitov 		.dst_reg = 0,					\
309e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
310f8f6d679SDaniel Borkmann 		.off   = 0,					\
311e430f34eSAlexei Starovoitov 		.imm   = IMM })
312f8f6d679SDaniel Borkmann 
313e430f34eSAlexei Starovoitov /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
314f8f6d679SDaniel Borkmann 
315e430f34eSAlexei Starovoitov #define BPF_LD_IND(SIZE, SRC, IMM)				\
3162695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
317f8f6d679SDaniel Borkmann 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
318e430f34eSAlexei Starovoitov 		.dst_reg = 0,					\
319e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
320f8f6d679SDaniel Borkmann 		.off   = 0,					\
321e430f34eSAlexei Starovoitov 		.imm   = IMM })
322f8f6d679SDaniel Borkmann 
323e430f34eSAlexei Starovoitov /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
324f8f6d679SDaniel Borkmann 
325e430f34eSAlexei Starovoitov #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
3262695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
327f8f6d679SDaniel Borkmann 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
328e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
329e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
330f8f6d679SDaniel Borkmann 		.off   = OFF,					\
331f8f6d679SDaniel Borkmann 		.imm   = 0 })
332f8f6d679SDaniel Borkmann 
333daabb2b0SPuranjay Mohan /* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
334daabb2b0SPuranjay Mohan 
335daabb2b0SPuranjay Mohan #define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF)			\
336daabb2b0SPuranjay Mohan 	((struct bpf_insn) {					\
337daabb2b0SPuranjay Mohan 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX,	\
338daabb2b0SPuranjay Mohan 		.dst_reg = DST,					\
339daabb2b0SPuranjay Mohan 		.src_reg = SRC,					\
340daabb2b0SPuranjay Mohan 		.off   = OFF,					\
341daabb2b0SPuranjay Mohan 		.imm   = 0 })
342daabb2b0SPuranjay Mohan 
343e430f34eSAlexei Starovoitov /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
344e430f34eSAlexei Starovoitov 
345e430f34eSAlexei Starovoitov #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
3462695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
347f8f6d679SDaniel Borkmann 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
348e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
349e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
350f8f6d679SDaniel Borkmann 		.off   = OFF,					\
351f8f6d679SDaniel Borkmann 		.imm   = 0 })
352f8f6d679SDaniel Borkmann 
353cffc642dSMichael Holzheu 
35491c960b0SBrendan Jackman /*
35591c960b0SBrendan Jackman  * Atomic operations:
35691c960b0SBrendan Jackman  *
35791c960b0SBrendan Jackman  *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
358981f94c3SBrendan Jackman  *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
359981f94c3SBrendan Jackman  *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
360981f94c3SBrendan Jackman  *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
3615ca419f2SBrendan Jackman  *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
362981f94c3SBrendan Jackman  *   BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
363981f94c3SBrendan Jackman  *   BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
364981f94c3SBrendan Jackman  *   BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
3655ffa2550SBrendan Jackman  *   BPF_XCHG                 src_reg = atomic_xchg(dst_reg + off16, src_reg)
3665ffa2550SBrendan Jackman  *   BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
367*88044230SPeilin Ye  *   BPF_LOAD_ACQ             dst_reg = smp_load_acquire(src_reg + off16)
368*88044230SPeilin Ye  *   BPF_STORE_REL            smp_store_release(dst_reg + off16, src_reg)
36991c960b0SBrendan Jackman  */
37091c960b0SBrendan Jackman 
37191c960b0SBrendan Jackman #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)			\
372cffc642dSMichael Holzheu 	((struct bpf_insn) {					\
37391c960b0SBrendan Jackman 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC,	\
374cffc642dSMichael Holzheu 		.dst_reg = DST,					\
375cffc642dSMichael Holzheu 		.src_reg = SRC,					\
376cffc642dSMichael Holzheu 		.off   = OFF,					\
37791c960b0SBrendan Jackman 		.imm   = OP })
37891c960b0SBrendan Jackman 
37991c960b0SBrendan Jackman /* Legacy alias */
38091c960b0SBrendan Jackman #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
381cffc642dSMichael Holzheu 
382e430f34eSAlexei Starovoitov /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
383f8f6d679SDaniel Borkmann 
384e430f34eSAlexei Starovoitov #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
3852695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
386e430f34eSAlexei Starovoitov 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
387e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
388e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
389e430f34eSAlexei Starovoitov 		.off   = OFF,					\
390e430f34eSAlexei Starovoitov 		.imm   = IMM })
391e430f34eSAlexei Starovoitov 
392e430f34eSAlexei Starovoitov /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
393e430f34eSAlexei Starovoitov 
394e430f34eSAlexei Starovoitov #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
3952695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
396f8f6d679SDaniel Borkmann 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
397e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
398e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
399f8f6d679SDaniel Borkmann 		.off   = OFF,					\
400f8f6d679SDaniel Borkmann 		.imm   = 0 })
401f8f6d679SDaniel Borkmann 
402e430f34eSAlexei Starovoitov /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
403f8f6d679SDaniel Borkmann 
404e430f34eSAlexei Starovoitov #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
4052695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
406f8f6d679SDaniel Borkmann 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
407e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
408e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
409f8f6d679SDaniel Borkmann 		.off   = OFF,					\
410f8f6d679SDaniel Borkmann 		.imm   = IMM })
411f8f6d679SDaniel Borkmann 
412a7b76c88SJiong Wang /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */
413a7b76c88SJiong Wang 
414a7b76c88SJiong Wang #define BPF_JMP32_REG(OP, DST, SRC, OFF)			\
415a7b76c88SJiong Wang 	((struct bpf_insn) {					\
416a7b76c88SJiong Wang 		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_X,	\
417a7b76c88SJiong Wang 		.dst_reg = DST,					\
418a7b76c88SJiong Wang 		.src_reg = SRC,					\
419a7b76c88SJiong Wang 		.off   = OFF,					\
420a7b76c88SJiong Wang 		.imm   = 0 })
421a7b76c88SJiong Wang 
422a7b76c88SJiong Wang /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */
423a7b76c88SJiong Wang 
424a7b76c88SJiong Wang #define BPF_JMP32_IMM(OP, DST, IMM, OFF)			\
425a7b76c88SJiong Wang 	((struct bpf_insn) {					\
426a7b76c88SJiong Wang 		.code  = BPF_JMP32 | BPF_OP(OP) | BPF_K,	\
427a7b76c88SJiong Wang 		.dst_reg = DST,					\
428a7b76c88SJiong Wang 		.src_reg = 0,					\
429a7b76c88SJiong Wang 		.off   = OFF,					\
430a7b76c88SJiong Wang 		.imm   = IMM })
431a7b76c88SJiong Wang 
432614d0d77SDaniel Borkmann /* Unconditional jumps, goto pc + off16 */
433614d0d77SDaniel Borkmann 
434614d0d77SDaniel Borkmann #define BPF_JMP_A(OFF)						\
435614d0d77SDaniel Borkmann 	((struct bpf_insn) {					\
436614d0d77SDaniel Borkmann 		.code  = BPF_JMP | BPF_JA,			\
437614d0d77SDaniel Borkmann 		.dst_reg = 0,					\
438614d0d77SDaniel Borkmann 		.src_reg = 0,					\
439614d0d77SDaniel Borkmann 		.off   = OFF,					\
440614d0d77SDaniel Borkmann 		.imm   = 0 })
441614d0d77SDaniel Borkmann 
442169c3176SMartin KaFai Lau /* Unconditional jumps, gotol pc + imm32 */
443169c3176SMartin KaFai Lau 
444169c3176SMartin KaFai Lau #define BPF_JMP32_A(IMM)					\
445169c3176SMartin KaFai Lau 	((struct bpf_insn) {					\
446169c3176SMartin KaFai Lau 		.code  = BPF_JMP32 | BPF_JA,			\
447169c3176SMartin KaFai Lau 		.dst_reg = 0,					\
448169c3176SMartin KaFai Lau 		.src_reg = 0,					\
449169c3176SMartin KaFai Lau 		.off   = 0,					\
450169c3176SMartin KaFai Lau 		.imm   = IMM })
451169c3176SMartin KaFai Lau 
45206be0864SDaniel Borkmann /* Relative call */
45306be0864SDaniel Borkmann 
45406be0864SDaniel Borkmann #define BPF_CALL_REL(TGT)					\
45506be0864SDaniel Borkmann 	((struct bpf_insn) {					\
45606be0864SDaniel Borkmann 		.code  = BPF_JMP | BPF_CALL,			\
45706be0864SDaniel Borkmann 		.dst_reg = 0,					\
45806be0864SDaniel Borkmann 		.src_reg = BPF_PSEUDO_CALL,			\
45906be0864SDaniel Borkmann 		.off   = 0,					\
46006be0864SDaniel Borkmann 		.imm   = TGT })
46106be0864SDaniel Borkmann 
4623d717fadSKees Cook /* Convert function address to BPF immediate */
463f8f6d679SDaniel Borkmann 
4643d717fadSKees Cook #define BPF_CALL_IMM(x)	((void *)(x) - (void *)__bpf_call_base)
46509772d92SDaniel Borkmann 
466f8f6d679SDaniel Borkmann #define BPF_EMIT_CALL(FUNC)					\
4672695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
468f8f6d679SDaniel Borkmann 		.code  = BPF_JMP | BPF_CALL,			\
469e430f34eSAlexei Starovoitov 		.dst_reg = 0,					\
470e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
471f8f6d679SDaniel Borkmann 		.off   = 0,					\
4723d717fadSKees Cook 		.imm   = BPF_CALL_IMM(FUNC) })
473f8f6d679SDaniel Borkmann 
4744e4136c6SAmery Hung /* Kfunc call */
4754e4136c6SAmery Hung 
4764e4136c6SAmery Hung #define BPF_CALL_KFUNC(OFF, IMM)				\
4774e4136c6SAmery Hung 	((struct bpf_insn) {					\
4784e4136c6SAmery Hung 		.code  = BPF_JMP | BPF_CALL,			\
4794e4136c6SAmery Hung 		.dst_reg = 0,					\
4804e4136c6SAmery Hung 		.src_reg = BPF_PSEUDO_KFUNC_CALL,		\
4814e4136c6SAmery Hung 		.off   = OFF,					\
4824e4136c6SAmery Hung 		.imm   = IMM })
4834e4136c6SAmery Hung 
484f8f6d679SDaniel Borkmann /* Raw code statement block */
485f8f6d679SDaniel Borkmann 
486e430f34eSAlexei Starovoitov #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
4872695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
488f8f6d679SDaniel Borkmann 		.code  = CODE,					\
489e430f34eSAlexei Starovoitov 		.dst_reg = DST,					\
490e430f34eSAlexei Starovoitov 		.src_reg = SRC,					\
491f8f6d679SDaniel Borkmann 		.off   = OFF,					\
492f8f6d679SDaniel Borkmann 		.imm   = IMM })
493f8f6d679SDaniel Borkmann 
494f8f6d679SDaniel Borkmann /* Program exit */
4959739eef1SAlexei Starovoitov 
4969739eef1SAlexei Starovoitov #define BPF_EXIT_INSN()						\
4972695fb55SAlexei Starovoitov 	((struct bpf_insn) {					\
498f8f6d679SDaniel Borkmann 		.code  = BPF_JMP | BPF_EXIT,			\
499e430f34eSAlexei Starovoitov 		.dst_reg = 0,					\
500e430f34eSAlexei Starovoitov 		.src_reg = 0,					\
501f8f6d679SDaniel Borkmann 		.off   = 0,					\
502f8f6d679SDaniel Borkmann 		.imm   = 0 })
5039739eef1SAlexei Starovoitov 
504f5e81d11SDaniel Borkmann /* Speculation barrier */
505f5e81d11SDaniel Borkmann 
506f5e81d11SDaniel Borkmann #define BPF_ST_NOSPEC()						\
507f5e81d11SDaniel Borkmann 	((struct bpf_insn) {					\
508f5e81d11SDaniel Borkmann 		.code  = BPF_ST | BPF_NOSPEC,			\
509f5e81d11SDaniel Borkmann 		.dst_reg = 0,					\
510f5e81d11SDaniel Borkmann 		.src_reg = 0,					\
511f5e81d11SDaniel Borkmann 		.off   = 0,					\
512f5e81d11SDaniel Borkmann 		.imm   = 0 })
513f5e81d11SDaniel Borkmann 
514a4afd37bSDaniel Borkmann /* Internal classic blocks for direct assignment */
515a4afd37bSDaniel Borkmann 
516a4afd37bSDaniel Borkmann #define __BPF_STMT(CODE, K)					\
517a4afd37bSDaniel Borkmann 	((struct sock_filter) BPF_STMT(CODE, K))
518a4afd37bSDaniel Borkmann 
519a4afd37bSDaniel Borkmann #define __BPF_JUMP(CODE, K, JT, JF)				\
520a4afd37bSDaniel Borkmann 	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
521a4afd37bSDaniel Borkmann 
522f8f6d679SDaniel Borkmann #define bytes_to_bpf_size(bytes)				\
523f8f6d679SDaniel Borkmann ({								\
524f8f6d679SDaniel Borkmann 	int bpf_size = -EINVAL;					\
525f8f6d679SDaniel Borkmann 								\
526f8f6d679SDaniel Borkmann 	if (bytes == sizeof(u8))				\
527f8f6d679SDaniel Borkmann 		bpf_size = BPF_B;				\
528f8f6d679SDaniel Borkmann 	else if (bytes == sizeof(u16))				\
529f8f6d679SDaniel Borkmann 		bpf_size = BPF_H;				\
530f8f6d679SDaniel Borkmann 	else if (bytes == sizeof(u32))				\
531f8f6d679SDaniel Borkmann 		bpf_size = BPF_W;				\
532f8f6d679SDaniel Borkmann 	else if (bytes == sizeof(u64))				\
533f8f6d679SDaniel Borkmann 		bpf_size = BPF_DW;				\
534f8f6d679SDaniel Borkmann 								\
535f8f6d679SDaniel Borkmann 	bpf_size;						\
536f8f6d679SDaniel Borkmann })
5379739eef1SAlexei Starovoitov 
538f96da094SDaniel Borkmann #define bpf_size_to_bytes(bpf_size)				\
539f96da094SDaniel Borkmann ({								\
540f96da094SDaniel Borkmann 	int bytes = -EINVAL;					\
541f96da094SDaniel Borkmann 								\
542f96da094SDaniel Borkmann 	if (bpf_size == BPF_B)					\
543f96da094SDaniel Borkmann 		bytes = sizeof(u8);				\
544f96da094SDaniel Borkmann 	else if (bpf_size == BPF_H)				\
545f96da094SDaniel Borkmann 		bytes = sizeof(u16);				\
546f96da094SDaniel Borkmann 	else if (bpf_size == BPF_W)				\
547f96da094SDaniel Borkmann 		bytes = sizeof(u32);				\
548f96da094SDaniel Borkmann 	else if (bpf_size == BPF_DW)				\
549f96da094SDaniel Borkmann 		bytes = sizeof(u64);				\
550f96da094SDaniel Borkmann 								\
551f96da094SDaniel Borkmann 	bytes;							\
552f96da094SDaniel Borkmann })
553f96da094SDaniel Borkmann 
554f035a515SDaniel Borkmann #define BPF_SIZEOF(type)					\
555f035a515SDaniel Borkmann 	({							\
556f035a515SDaniel Borkmann 		const int __size = bytes_to_bpf_size(sizeof(type)); \
557f035a515SDaniel Borkmann 		BUILD_BUG_ON(__size < 0);			\
558f035a515SDaniel Borkmann 		__size;						\
559f035a515SDaniel Borkmann 	})
560f035a515SDaniel Borkmann 
561f035a515SDaniel Borkmann #define BPF_FIELD_SIZEOF(type, field)				\
562f035a515SDaniel Borkmann 	({							\
563c593642cSPankaj Bharadiya 		const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
564f035a515SDaniel Borkmann 		BUILD_BUG_ON(__size < 0);			\
565f035a515SDaniel Borkmann 		__size;						\
566f035a515SDaniel Borkmann 	})
567f035a515SDaniel Borkmann 
568f96da094SDaniel Borkmann #define BPF_LDST_BYTES(insn)					\
569f96da094SDaniel Borkmann 	({							\
570e59ac634SJakub Kicinski 		const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \
571f96da094SDaniel Borkmann 		WARN_ON(__size < 0);				\
572f96da094SDaniel Borkmann 		__size;						\
573f96da094SDaniel Borkmann 	})
574f96da094SDaniel Borkmann 
575f3694e00SDaniel Borkmann #define __BPF_MAP_0(m, v, ...) v
576f3694e00SDaniel Borkmann #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
577f3694e00SDaniel Borkmann #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
578f3694e00SDaniel Borkmann #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
579f3694e00SDaniel Borkmann #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
580f3694e00SDaniel Borkmann #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
581f3694e00SDaniel Borkmann 
582f3694e00SDaniel Borkmann #define __BPF_REG_0(...) __BPF_PAD(5)
583f3694e00SDaniel Borkmann #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
584f3694e00SDaniel Borkmann #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
585f3694e00SDaniel Borkmann #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
586f3694e00SDaniel Borkmann #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
587f3694e00SDaniel Borkmann #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
588f3694e00SDaniel Borkmann 
589f3694e00SDaniel Borkmann #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
590f3694e00SDaniel Borkmann #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
591f3694e00SDaniel Borkmann 
592f3694e00SDaniel Borkmann #define __BPF_CAST(t, a)						       \
593f3694e00SDaniel Borkmann 	(__force t)							       \
594f3694e00SDaniel Borkmann 	(__force							       \
595f3694e00SDaniel Borkmann 	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
596f3694e00SDaniel Borkmann 				      (unsigned long)0, (t)0))) a
597f3694e00SDaniel Borkmann #define __BPF_V void
598f3694e00SDaniel Borkmann #define __BPF_N
599f3694e00SDaniel Borkmann 
600f3694e00SDaniel Borkmann #define __BPF_DECL_ARGS(t, a) t   a
601f3694e00SDaniel Borkmann #define __BPF_DECL_REGS(t, a) u64 a
602f3694e00SDaniel Borkmann 
603f3694e00SDaniel Borkmann #define __BPF_PAD(n)							       \
604f3694e00SDaniel Borkmann 	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
605f3694e00SDaniel Borkmann 		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
606f3694e00SDaniel Borkmann 
607178c5466SYonghong Song #define BPF_CALL_x(x, attr, name, ...)					       \
608f3694e00SDaniel Borkmann 	static __always_inline						       \
609f3694e00SDaniel Borkmann 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
6107c6a469eSAlexei Starovoitov 	typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
611178c5466SYonghong Song 	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));    \
612178c5466SYonghong Song 	attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))     \
613f3694e00SDaniel Borkmann 	{								       \
6147c6a469eSAlexei Starovoitov 		return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
615f3694e00SDaniel Borkmann 	}								       \
616f3694e00SDaniel Borkmann 	static __always_inline						       \
617f3694e00SDaniel Borkmann 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
618f3694e00SDaniel Borkmann 
619178c5466SYonghong Song #define __NOATTR
620178c5466SYonghong Song #define BPF_CALL_0(name, ...)	BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
621178c5466SYonghong Song #define BPF_CALL_1(name, ...)	BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
622178c5466SYonghong Song #define BPF_CALL_2(name, ...)	BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
623178c5466SYonghong Song #define BPF_CALL_3(name, ...)	BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
624178c5466SYonghong Song #define BPF_CALL_4(name, ...)	BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
625178c5466SYonghong Song #define BPF_CALL_5(name, ...)	BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
626178c5466SYonghong Song 
627178c5466SYonghong Song #define NOTRACE_BPF_CALL_1(name, ...)	BPF_CALL_x(1, notrace, name, __VA_ARGS__)
628f3694e00SDaniel Borkmann 
629f96da094SDaniel Borkmann #define bpf_ctx_range(TYPE, MEMBER)						\
630f96da094SDaniel Borkmann 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
631f96da094SDaniel Borkmann #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)				\
632f96da094SDaniel Borkmann 	offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
633b7df9adaSDaniel Borkmann #if BITS_PER_LONG == 64
634b7df9adaSDaniel Borkmann # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
635b7df9adaSDaniel Borkmann 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
636b7df9adaSDaniel Borkmann #else
637b7df9adaSDaniel Borkmann # define bpf_ctx_range_ptr(TYPE, MEMBER)					\
638b7df9adaSDaniel Borkmann 	offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
639b7df9adaSDaniel Borkmann #endif /* BITS_PER_LONG == 64 */
640f96da094SDaniel Borkmann 
641f96da094SDaniel Borkmann #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)				\
642f96da094SDaniel Borkmann 	({									\
643c593642cSPankaj Bharadiya 		BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE));		\
644f96da094SDaniel Borkmann 		*(PTR_SIZE) = (SIZE);						\
645f96da094SDaniel Borkmann 		offsetof(TYPE, MEMBER);						\
646f96da094SDaniel Borkmann 	})
647f96da094SDaniel Borkmann 
648bd4cf0edSAlexei Starovoitov /* A struct sock_filter is architecture independent. */
6490c5fe1b4SWill Drewry struct compat_sock_fprog {
6500c5fe1b4SWill Drewry 	u16		len;
6510c5fe1b4SWill Drewry 	compat_uptr_t	filter;	/* struct sock_filter * */
6520c5fe1b4SWill Drewry };
6530c5fe1b4SWill Drewry 
654a3ea269bSDaniel Borkmann struct sock_fprog_kern {
655a3ea269bSDaniel Borkmann 	u16			len;
656a3ea269bSDaniel Borkmann 	struct sock_filter	*filter;
657a3ea269bSDaniel Borkmann };
658a3ea269bSDaniel Borkmann 
659b7b3fc8dSIlya Leoshkevich /* Some arches need doubleword alignment for their instructions and/or data */
660b7b3fc8dSIlya Leoshkevich #define BPF_IMAGE_ALIGNMENT 8
661b7b3fc8dSIlya Leoshkevich 
662738cbe72SDaniel Borkmann struct bpf_binary_header {
663ed2d9e1aSSong Liu 	u32 size;
664b7b3fc8dSIlya Leoshkevich 	u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
665738cbe72SDaniel Borkmann };
666738cbe72SDaniel Borkmann 
667700d4796SAlexei Starovoitov struct bpf_prog_stats {
66861a0abaeSEric Dumazet 	u64_stats_t cnt;
66961a0abaeSEric Dumazet 	u64_stats_t nsecs;
67061a0abaeSEric Dumazet 	u64_stats_t misses;
671700d4796SAlexei Starovoitov 	struct u64_stats_sync syncp;
672700d4796SAlexei Starovoitov } __aligned(2 * sizeof(u64));
673700d4796SAlexei Starovoitov 
674e723608bSKumar Kartikeya Dwivedi struct bpf_timed_may_goto {
675e723608bSKumar Kartikeya Dwivedi 	u64 count;
676e723608bSKumar Kartikeya Dwivedi 	u64 timestamp;
677e723608bSKumar Kartikeya Dwivedi };
678e723608bSKumar Kartikeya Dwivedi 
6797ae457c1SAlexei Starovoitov struct sk_filter {
6804c355cdfSReshetova, Elena 	refcount_t	refcnt;
6817ae457c1SAlexei Starovoitov 	struct rcu_head	rcu;
6827ae457c1SAlexei Starovoitov 	struct bpf_prog	*prog;
6837ae457c1SAlexei Starovoitov };
6847ae457c1SAlexei Starovoitov 
685492ecee8SAlexei Starovoitov DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
686492ecee8SAlexei Starovoitov 
687fdf21497SDaniel Xu extern struct mutex nf_conn_btf_access_lock;
6886728aea7SKumar Kartikeya Dwivedi extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
6896728aea7SKumar Kartikeya Dwivedi 				     const struct bpf_reg_state *reg,
690b7e852a9SAlexei Starovoitov 				     int off, int size);
691fdf21497SDaniel Xu 
692fb7dd8bcSAndrii Nakryiko typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
693fb7dd8bcSAndrii Nakryiko 					  const struct bpf_insn *insnsi,
694fb7dd8bcSAndrii Nakryiko 					  unsigned int (*bpf_func)(const void *,
695fb7dd8bcSAndrii Nakryiko 								   const struct bpf_insn *));
6967ae457c1SAlexei Starovoitov 
__bpf_prog_run(const struct bpf_prog * prog,const void * ctx,bpf_dispatcher_fn dfunc)697fb7dd8bcSAndrii Nakryiko static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
698fb7dd8bcSAndrii Nakryiko 					  const void *ctx,
699fb7dd8bcSAndrii Nakryiko 					  bpf_dispatcher_fn dfunc)
700fb7dd8bcSAndrii Nakryiko {
701fb7dd8bcSAndrii Nakryiko 	u32 ret;
702fb7dd8bcSAndrii Nakryiko 
703fb7dd8bcSAndrii Nakryiko 	cant_migrate();
704fb7dd8bcSAndrii Nakryiko 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
705fb7dd8bcSAndrii Nakryiko 		struct bpf_prog_stats *stats;
706ce09cbddSJose Fernandez 		u64 duration, start = sched_clock();
707f941eaddSEric Dumazet 		unsigned long flags;
708fb7dd8bcSAndrii Nakryiko 
709fb7dd8bcSAndrii Nakryiko 		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
710ce09cbddSJose Fernandez 
711ce09cbddSJose Fernandez 		duration = sched_clock() - start;
712fb7dd8bcSAndrii Nakryiko 		stats = this_cpu_ptr(prog->stats);
713f941eaddSEric Dumazet 		flags = u64_stats_update_begin_irqsave(&stats->syncp);
71461a0abaeSEric Dumazet 		u64_stats_inc(&stats->cnt);
715ce09cbddSJose Fernandez 		u64_stats_add(&stats->nsecs, duration);
716f941eaddSEric Dumazet 		u64_stats_update_end_irqrestore(&stats->syncp, flags);
717fb7dd8bcSAndrii Nakryiko 	} else {
718fb7dd8bcSAndrii Nakryiko 		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
719fb7dd8bcSAndrii Nakryiko 	}
720fb7dd8bcSAndrii Nakryiko 	return ret;
721fb7dd8bcSAndrii Nakryiko }
722fb7dd8bcSAndrii Nakryiko 
bpf_prog_run(const struct bpf_prog * prog,const void * ctx)723fb7dd8bcSAndrii Nakryiko static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
724fb7dd8bcSAndrii Nakryiko {
725fb7dd8bcSAndrii Nakryiko 	return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
726fb7dd8bcSAndrii Nakryiko }
7273c58482aSThomas Gleixner 
7283c58482aSThomas Gleixner /*
7293c58482aSThomas Gleixner  * Use in preemptible and therefore migratable context to make sure that
7303c58482aSThomas Gleixner  * the execution of the BPF program runs on one CPU.
7313c58482aSThomas Gleixner  *
7323c58482aSThomas Gleixner  * This uses migrate_disable/enable() explicitly to document that the
7333c58482aSThomas Gleixner  * invocation of a BPF program does not require reentrancy protection
7343c58482aSThomas Gleixner  * against a BPF program which is invoked from a preempting task.
7353c58482aSThomas Gleixner  */
bpf_prog_run_pin_on_cpu(const struct bpf_prog * prog,const void * ctx)7363c58482aSThomas Gleixner static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
7373c58482aSThomas Gleixner 					  const void *ctx)
7383c58482aSThomas Gleixner {
7393c58482aSThomas Gleixner 	u32 ret;
7403c58482aSThomas Gleixner 
7413c58482aSThomas Gleixner 	migrate_disable();
742fb7dd8bcSAndrii Nakryiko 	ret = bpf_prog_run(prog, ctx);
7433c58482aSThomas Gleixner 	migrate_enable();
7443c58482aSThomas Gleixner 	return ret;
7453c58482aSThomas Gleixner }
7467e6897f9SBjörn Töpel 
74701dd194cSDaniel Borkmann #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
74801dd194cSDaniel Borkmann 
749db58ba45SAlexei Starovoitov struct bpf_skb_data_end {
750db58ba45SAlexei Starovoitov 	struct qdisc_skb_cb qdisc_cb;
751de8f3a83SDaniel Borkmann 	void *data_meta;
752db58ba45SAlexei Starovoitov 	void *data_end;
753db58ba45SAlexei Starovoitov };
754db58ba45SAlexei Starovoitov 
755ba452c9eSToke Høiland-Jørgensen struct bpf_nh_params {
756ba452c9eSToke Høiland-Jørgensen 	u32 nh_family;
757ba452c9eSToke Høiland-Jørgensen 	union {
758ba452c9eSToke Høiland-Jørgensen 		u32 ipv4_nh;
759ba452c9eSToke Høiland-Jørgensen 		struct in6_addr ipv6_nh;
760ba452c9eSToke Høiland-Jørgensen 	};
761ba452c9eSToke Høiland-Jørgensen };
762ba452c9eSToke Høiland-Jørgensen 
763401cb7daSSebastian Andrzej Siewior /* flags for bpf_redirect_info kern_flags */
764401cb7daSSebastian Andrzej Siewior #define BPF_RI_F_RF_NO_DIRECT	BIT(0)	/* no napi_direct on return_frame */
765401cb7daSSebastian Andrzej Siewior #define BPF_RI_F_RI_INIT	BIT(1)
7663f9fe37dSSebastian Andrzej Siewior #define BPF_RI_F_CPU_MAP_INIT	BIT(2)
7673f9fe37dSSebastian Andrzej Siewior #define BPF_RI_F_DEV_MAP_INIT	BIT(3)
7683f9fe37dSSebastian Andrzej Siewior #define BPF_RI_F_XSK_MAP_INIT	BIT(4)
769401cb7daSSebastian Andrzej Siewior 
7700b19cc0aSToshiaki Makita struct bpf_redirect_info {
77132637e33SToke Høiland-Jørgensen 	u64 tgt_index;
77243e74c02SToke Høiland-Jørgensen 	void *tgt_value;
773e624d4edSHangbin Liu 	struct bpf_map *map;
77432637e33SToke Høiland-Jørgensen 	u32 flags;
775ee75aef2SBjörn Töpel 	u32 map_id;
776ee75aef2SBjörn Töpel 	enum bpf_map_type map_type;
777ba452c9eSToke Høiland-Jørgensen 	struct bpf_nh_params nh;
778401cb7daSSebastian Andrzej Siewior 	u32 kern_flags;
7790b19cc0aSToshiaki Makita };
7800b19cc0aSToshiaki Makita 
781401cb7daSSebastian Andrzej Siewior struct bpf_net_context {
782401cb7daSSebastian Andrzej Siewior 	struct bpf_redirect_info ri;
7833f9fe37dSSebastian Andrzej Siewior 	struct list_head cpu_map_flush_list;
7843f9fe37dSSebastian Andrzej Siewior 	struct list_head dev_map_flush_list;
7853f9fe37dSSebastian Andrzej Siewior 	struct list_head xskmap_map_flush_list;
786401cb7daSSebastian Andrzej Siewior };
7870b19cc0aSToshiaki Makita 
bpf_net_ctx_set(struct bpf_net_context * bpf_net_ctx)788401cb7daSSebastian Andrzej Siewior static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx)
789401cb7daSSebastian Andrzej Siewior {
790401cb7daSSebastian Andrzej Siewior 	struct task_struct *tsk = current;
791401cb7daSSebastian Andrzej Siewior 
792401cb7daSSebastian Andrzej Siewior 	if (tsk->bpf_net_context != NULL)
793401cb7daSSebastian Andrzej Siewior 		return NULL;
794401cb7daSSebastian Andrzej Siewior 	bpf_net_ctx->ri.kern_flags = 0;
795401cb7daSSebastian Andrzej Siewior 
796401cb7daSSebastian Andrzej Siewior 	tsk->bpf_net_context = bpf_net_ctx;
797401cb7daSSebastian Andrzej Siewior 	return bpf_net_ctx;
798401cb7daSSebastian Andrzej Siewior }
799401cb7daSSebastian Andrzej Siewior 
bpf_net_ctx_clear(struct bpf_net_context * bpf_net_ctx)800401cb7daSSebastian Andrzej Siewior static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx)
801401cb7daSSebastian Andrzej Siewior {
802401cb7daSSebastian Andrzej Siewior 	if (bpf_net_ctx)
803401cb7daSSebastian Andrzej Siewior 		current->bpf_net_context = NULL;
804401cb7daSSebastian Andrzej Siewior }
805401cb7daSSebastian Andrzej Siewior 
bpf_net_ctx_get(void)806401cb7daSSebastian Andrzej Siewior static inline struct bpf_net_context *bpf_net_ctx_get(void)
807401cb7daSSebastian Andrzej Siewior {
808401cb7daSSebastian Andrzej Siewior 	return current->bpf_net_context;
809401cb7daSSebastian Andrzej Siewior }
810401cb7daSSebastian Andrzej Siewior 
bpf_net_ctx_get_ri(void)811401cb7daSSebastian Andrzej Siewior static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void)
812401cb7daSSebastian Andrzej Siewior {
813401cb7daSSebastian Andrzej Siewior 	struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
814401cb7daSSebastian Andrzej Siewior 
815401cb7daSSebastian Andrzej Siewior 	if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) {
816401cb7daSSebastian Andrzej Siewior 		memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh));
817401cb7daSSebastian Andrzej Siewior 		bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT;
818401cb7daSSebastian Andrzej Siewior 	}
819401cb7daSSebastian Andrzej Siewior 
820401cb7daSSebastian Andrzej Siewior 	return &bpf_net_ctx->ri;
821401cb7daSSebastian Andrzej Siewior }
8222539650fSToshiaki Makita 
bpf_net_ctx_get_cpu_map_flush_list(void)8233f9fe37dSSebastian Andrzej Siewior static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void)
8243f9fe37dSSebastian Andrzej Siewior {
8253f9fe37dSSebastian Andrzej Siewior 	struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
8263f9fe37dSSebastian Andrzej Siewior 
8273f9fe37dSSebastian Andrzej Siewior 	if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) {
8283f9fe37dSSebastian Andrzej Siewior 		INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list);
8293f9fe37dSSebastian Andrzej Siewior 		bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT;
8303f9fe37dSSebastian Andrzej Siewior 	}
8313f9fe37dSSebastian Andrzej Siewior 
8323f9fe37dSSebastian Andrzej Siewior 	return &bpf_net_ctx->cpu_map_flush_list;
8333f9fe37dSSebastian Andrzej Siewior }
8343f9fe37dSSebastian Andrzej Siewior 
bpf_net_ctx_get_dev_flush_list(void)8353f9fe37dSSebastian Andrzej Siewior static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void)
8363f9fe37dSSebastian Andrzej Siewior {
8373f9fe37dSSebastian Andrzej Siewior 	struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
8383f9fe37dSSebastian Andrzej Siewior 
8393f9fe37dSSebastian Andrzej Siewior 	if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) {
8403f9fe37dSSebastian Andrzej Siewior 		INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list);
8413f9fe37dSSebastian Andrzej Siewior 		bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT;
8423f9fe37dSSebastian Andrzej Siewior 	}
8433f9fe37dSSebastian Andrzej Siewior 
8443f9fe37dSSebastian Andrzej Siewior 	return &bpf_net_ctx->dev_map_flush_list;
8453f9fe37dSSebastian Andrzej Siewior }
8463f9fe37dSSebastian Andrzej Siewior 
bpf_net_ctx_get_xskmap_flush_list(void)8473f9fe37dSSebastian Andrzej Siewior static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void)
8483f9fe37dSSebastian Andrzej Siewior {
8493f9fe37dSSebastian Andrzej Siewior 	struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
8503f9fe37dSSebastian Andrzej Siewior 
8513f9fe37dSSebastian Andrzej Siewior 	if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) {
8523f9fe37dSSebastian Andrzej Siewior 		INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list);
8533f9fe37dSSebastian Andrzej Siewior 		bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT;
8543f9fe37dSSebastian Andrzej Siewior 	}
8553f9fe37dSSebastian Andrzej Siewior 
8563f9fe37dSSebastian Andrzej Siewior 	return &bpf_net_ctx->xskmap_map_flush_list;
8573f9fe37dSSebastian Andrzej Siewior }
8583f9fe37dSSebastian Andrzej Siewior 
bpf_net_ctx_get_all_used_flush_lists(struct list_head ** lh_map,struct list_head ** lh_dev,struct list_head ** lh_xsk)859d839a731SSebastian Andrzej Siewior static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map,
860d839a731SSebastian Andrzej Siewior 							struct list_head **lh_dev,
861d839a731SSebastian Andrzej Siewior 							struct list_head **lh_xsk)
862d839a731SSebastian Andrzej Siewior {
863d839a731SSebastian Andrzej Siewior 	struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get();
864d839a731SSebastian Andrzej Siewior 	u32 kern_flags = bpf_net_ctx->ri.kern_flags;
865d839a731SSebastian Andrzej Siewior 	struct list_head *lh;
866d839a731SSebastian Andrzej Siewior 
867d839a731SSebastian Andrzej Siewior 	*lh_map = *lh_dev = *lh_xsk = NULL;
868d839a731SSebastian Andrzej Siewior 
869d839a731SSebastian Andrzej Siewior 	if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
870d839a731SSebastian Andrzej Siewior 		return;
871d839a731SSebastian Andrzej Siewior 
872d839a731SSebastian Andrzej Siewior 	lh = &bpf_net_ctx->dev_map_flush_list;
873d839a731SSebastian Andrzej Siewior 	if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh))
874d839a731SSebastian Andrzej Siewior 		*lh_dev = lh;
875d839a731SSebastian Andrzej Siewior 
876d839a731SSebastian Andrzej Siewior 	lh = &bpf_net_ctx->cpu_map_flush_list;
877d839a731SSebastian Andrzej Siewior 	if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh))
878d839a731SSebastian Andrzej Siewior 		*lh_map = lh;
879d839a731SSebastian Andrzej Siewior 
880d839a731SSebastian Andrzej Siewior 	lh = &bpf_net_ctx->xskmap_map_flush_list;
881d839a731SSebastian Andrzej Siewior 	if (IS_ENABLED(CONFIG_XDP_SOCKETS) &&
882d839a731SSebastian Andrzej Siewior 	    kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh))
883d839a731SSebastian Andrzej Siewior 		*lh_xsk = lh;
884d839a731SSebastian Andrzej Siewior }
885d839a731SSebastian Andrzej Siewior 
8866aaae2b6SDaniel Borkmann /* Compute the linear packet data range [data, data_end) which
8876aaae2b6SDaniel Borkmann  * will be accessed by various program types (cls_bpf, act_bpf,
8886aaae2b6SDaniel Borkmann  * lwt, ...). Subsystems allowing direct data access must (!)
8896aaae2b6SDaniel Borkmann  * ensure that cb[] area can be written to when BPF program is
8906aaae2b6SDaniel Borkmann  * invoked (otherwise cb[] save/restore is necessary).
891db58ba45SAlexei Starovoitov  */
bpf_compute_data_pointers(struct sk_buff * skb)8926aaae2b6SDaniel Borkmann static inline void bpf_compute_data_pointers(struct sk_buff *skb)
893db58ba45SAlexei Starovoitov {
894db58ba45SAlexei Starovoitov 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
895db58ba45SAlexei Starovoitov 
896c593642cSPankaj Bharadiya 	BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
897de8f3a83SDaniel Borkmann 	cb->data_meta = skb->data - skb_metadata_len(skb);
898db58ba45SAlexei Starovoitov 	cb->data_end  = skb->data + skb_headlen(skb);
899db58ba45SAlexei Starovoitov }
900db58ba45SAlexei Starovoitov 
901b39b5f41SSong Liu /* Similar to bpf_compute_data_pointers(), except that save orginal
902b39b5f41SSong Liu  * data in cb->data and cb->meta_data for restore.
903b39b5f41SSong Liu  */
bpf_compute_and_save_data_end(struct sk_buff * skb,void ** saved_data_end)904b39b5f41SSong Liu static inline void bpf_compute_and_save_data_end(
905b39b5f41SSong Liu 	struct sk_buff *skb, void **saved_data_end)
906b39b5f41SSong Liu {
907b39b5f41SSong Liu 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
908b39b5f41SSong Liu 
909b39b5f41SSong Liu 	*saved_data_end = cb->data_end;
910b39b5f41SSong Liu 	cb->data_end  = skb->data + skb_headlen(skb);
911b39b5f41SSong Liu }
912b39b5f41SSong Liu 
9139c8c3fa3SAkihiko Odaki /* Restore data saved by bpf_compute_and_save_data_end(). */
bpf_restore_data_end(struct sk_buff * skb,void * saved_data_end)914b39b5f41SSong Liu static inline void bpf_restore_data_end(
915b39b5f41SSong Liu 	struct sk_buff *skb, void *saved_data_end)
916b39b5f41SSong Liu {
917b39b5f41SSong Liu 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
918b39b5f41SSong Liu 
919b39b5f41SSong Liu 	cb->data_end = saved_data_end;
920b39b5f41SSong Liu }
921b39b5f41SSong Liu 
bpf_skb_cb(const struct sk_buff * skb)9227d08c2c9SAndrii Nakryiko static inline u8 *bpf_skb_cb(const struct sk_buff *skb)
92301dd194cSDaniel Borkmann {
92401dd194cSDaniel Borkmann 	/* eBPF programs may read/write skb->cb[] area to transfer meta
92501dd194cSDaniel Borkmann 	 * data between tail calls. Since this also needs to work with
92601dd194cSDaniel Borkmann 	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
92701dd194cSDaniel Borkmann 	 *
92801dd194cSDaniel Borkmann 	 * In some socket filter cases, the cb unfortunately needs to be
92901dd194cSDaniel Borkmann 	 * saved/restored so that protocol specific skb->cb[] data won't
93001dd194cSDaniel Borkmann 	 * be lost. In any case, due to unpriviledged eBPF programs
93101dd194cSDaniel Borkmann 	 * attached to sockets, we need to clear the bpf_skb_cb() area
93201dd194cSDaniel Borkmann 	 * to not leak previous contents to user space.
93301dd194cSDaniel Borkmann 	 */
934c593642cSPankaj Bharadiya 	BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
935c593642cSPankaj Bharadiya 	BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
936c593642cSPankaj Bharadiya 		     sizeof_field(struct qdisc_skb_cb, data));
93701dd194cSDaniel Borkmann 
93801dd194cSDaniel Borkmann 	return qdisc_skb_cb(skb)->data;
93901dd194cSDaniel Borkmann }
94001dd194cSDaniel Borkmann 
9412a916f2fSDavid Miller /* Must be invoked with migration disabled */
__bpf_prog_run_save_cb(const struct bpf_prog * prog,const void * ctx)9426cab5e90SAlexei Starovoitov static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
9437d08c2c9SAndrii Nakryiko 					 const void *ctx)
944ff936a04SAlexei Starovoitov {
9457d08c2c9SAndrii Nakryiko 	const struct sk_buff *skb = ctx;
94601dd194cSDaniel Borkmann 	u8 *cb_data = bpf_skb_cb(skb);
94701dd194cSDaniel Borkmann 	u8 cb_saved[BPF_SKB_CB_LEN];
948ff936a04SAlexei Starovoitov 	u32 res;
949ff936a04SAlexei Starovoitov 
950ff936a04SAlexei Starovoitov 	if (unlikely(prog->cb_access)) {
95101dd194cSDaniel Borkmann 		memcpy(cb_saved, cb_data, sizeof(cb_saved));
95201dd194cSDaniel Borkmann 		memset(cb_data, 0, sizeof(cb_saved));
953ff936a04SAlexei Starovoitov 	}
954ff936a04SAlexei Starovoitov 
955fb7dd8bcSAndrii Nakryiko 	res = bpf_prog_run(prog, skb);
956ff936a04SAlexei Starovoitov 
957ff936a04SAlexei Starovoitov 	if (unlikely(prog->cb_access))
95801dd194cSDaniel Borkmann 		memcpy(cb_data, cb_saved, sizeof(cb_saved));
959ff936a04SAlexei Starovoitov 
960ff936a04SAlexei Starovoitov 	return res;
961ff936a04SAlexei Starovoitov }
962ff936a04SAlexei Starovoitov 
bpf_prog_run_save_cb(const struct bpf_prog * prog,struct sk_buff * skb)9636cab5e90SAlexei Starovoitov static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
9646cab5e90SAlexei Starovoitov 				       struct sk_buff *skb)
9656cab5e90SAlexei Starovoitov {
9666cab5e90SAlexei Starovoitov 	u32 res;
9676cab5e90SAlexei Starovoitov 
9682a916f2fSDavid Miller 	migrate_disable();
9696cab5e90SAlexei Starovoitov 	res = __bpf_prog_run_save_cb(prog, skb);
9702a916f2fSDavid Miller 	migrate_enable();
9716cab5e90SAlexei Starovoitov 	return res;
9726cab5e90SAlexei Starovoitov }
9736cab5e90SAlexei Starovoitov 
bpf_prog_run_clear_cb(const struct bpf_prog * prog,struct sk_buff * skb)974ff936a04SAlexei Starovoitov static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
975ff936a04SAlexei Starovoitov 					struct sk_buff *skb)
976ff936a04SAlexei Starovoitov {
97701dd194cSDaniel Borkmann 	u8 *cb_data = bpf_skb_cb(skb);
9786cab5e90SAlexei Starovoitov 	u32 res;
979ff936a04SAlexei Starovoitov 
980ff936a04SAlexei Starovoitov 	if (unlikely(prog->cb_access))
98101dd194cSDaniel Borkmann 		memset(cb_data, 0, BPF_SKB_CB_LEN);
98201dd194cSDaniel Borkmann 
9833d9f773cSDavid Miller 	res = bpf_prog_run_pin_on_cpu(prog, skb);
9846cab5e90SAlexei Starovoitov 	return res;
985ff936a04SAlexei Starovoitov }
986ff936a04SAlexei Starovoitov 
9876a64037dSBjörn Töpel DECLARE_BPF_DISPATCHER(xdp)
9887e6897f9SBjörn Töpel 
989879af96fSJussi Maki DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
990879af96fSJussi Maki 
991879af96fSJussi Maki u32 xdp_master_redirect(struct xdp_buff *xdp);
992879af96fSJussi Maki 
9937e6897f9SBjörn Töpel void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
9947e6897f9SBjörn Töpel 
bpf_prog_insn_size(const struct bpf_prog * prog)995aafe6ae9SDaniel Borkmann static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
996aafe6ae9SDaniel Borkmann {
997aafe6ae9SDaniel Borkmann 	return prog->len * sizeof(struct bpf_insn);
998aafe6ae9SDaniel Borkmann }
999aafe6ae9SDaniel Borkmann 
bpf_prog_tag_scratch_size(const struct bpf_prog * prog)1000f1f7714eSDaniel Borkmann static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
1001aafe6ae9SDaniel Borkmann {
1002aafe6ae9SDaniel Borkmann 	return round_up(bpf_prog_insn_size(prog) +
10036b0b0fa2SEric Biggers 			sizeof(__be64) + 1, SHA1_BLOCK_SIZE);
1004aafe6ae9SDaniel Borkmann }
1005aafe6ae9SDaniel Borkmann 
bpf_prog_size(unsigned int proglen)10067ae457c1SAlexei Starovoitov static inline unsigned int bpf_prog_size(unsigned int proglen)
1007b715631fSStephen Hemminger {
10087ae457c1SAlexei Starovoitov 	return max(sizeof(struct bpf_prog),
10097ae457c1SAlexei Starovoitov 		   offsetof(struct bpf_prog, insns[proglen]));
1010b715631fSStephen Hemminger }
1011b715631fSStephen Hemminger 
bpf_prog_was_classic(const struct bpf_prog * prog)10127b36f929SDaniel Borkmann static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
10137b36f929SDaniel Borkmann {
10147b36f929SDaniel Borkmann 	/* When classic BPF programs have been loaded and the arch
10157b36f929SDaniel Borkmann 	 * does not have a classic BPF JIT (anymore), they have been
10167b36f929SDaniel Borkmann 	 * converted via bpf_migrate_filter() to eBPF and thus always
10177b36f929SDaniel Borkmann 	 * have an unspec program type.
10187b36f929SDaniel Borkmann 	 */
10197b36f929SDaniel Borkmann 	return prog->type == BPF_PROG_TYPE_UNSPEC;
10207b36f929SDaniel Borkmann }
10217b36f929SDaniel Borkmann 
bpf_ctx_off_adjust_machine(u32 size)1022bc23105cSDaniel Borkmann static inline u32 bpf_ctx_off_adjust_machine(u32 size)
1023f96da094SDaniel Borkmann {
1024bc23105cSDaniel Borkmann 	const u32 size_machine = sizeof(unsigned long);
1025bc23105cSDaniel Borkmann 
1026bc23105cSDaniel Borkmann 	if (size > size_machine && size % size_machine == 0)
1027bc23105cSDaniel Borkmann 		size = size_machine;
1028bc23105cSDaniel Borkmann 
1029bc23105cSDaniel Borkmann 	return size;
1030bc23105cSDaniel Borkmann }
1031bc23105cSDaniel Borkmann 
1032bc23105cSDaniel Borkmann static inline bool
bpf_ctx_narrow_access_ok(u32 off,u32 size,u32 size_default)1033bc23105cSDaniel Borkmann bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
1034bc23105cSDaniel Borkmann {
103546f53a65SAndrey Ignatov 	return size <= size_default && (size & (size - 1)) == 0;
1036f96da094SDaniel Borkmann }
1037f96da094SDaniel Borkmann 
1038d9b8aadaSIlya Leoshkevich static inline u8
bpf_ctx_narrow_access_offset(u32 off,u32 size,u32 size_default)1039d895a0f1SIlya Leoshkevich bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
1040d9b8aadaSIlya Leoshkevich {
1041d895a0f1SIlya Leoshkevich 	u8 access_off = off & (size_default - 1);
1042d9b8aadaSIlya Leoshkevich 
1043d9b8aadaSIlya Leoshkevich #ifdef __LITTLE_ENDIAN
1044d895a0f1SIlya Leoshkevich 	return access_off;
1045d9b8aadaSIlya Leoshkevich #else
1046d895a0f1SIlya Leoshkevich 	return size_default - (access_off + size);
1047d9b8aadaSIlya Leoshkevich #endif
1048d9b8aadaSIlya Leoshkevich }
1049d9b8aadaSIlya Leoshkevich 
1050b4399546SStanislav Fomichev #define bpf_ctx_wide_access_ok(off, size, type, field)			\
1051600c70baSStanislav Fomichev 	(size == sizeof(__u64) &&					\
1052600c70baSStanislav Fomichev 	off >= offsetof(type, field) &&					\
1053600c70baSStanislav Fomichev 	off + sizeof(__u64) <= offsetofend(type, field) &&		\
1054600c70baSStanislav Fomichev 	off % sizeof(__u64) == 0)
1055600c70baSStanislav Fomichev 
1056009937e7SAlexei Starovoitov #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
1057a3ea269bSDaniel Borkmann 
bpf_prog_lock_ro(struct bpf_prog * fp)10587d2cc63eSChristophe Leroy static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp)
105960a3b225SDaniel Borkmann {
1060e1608f3fSDaniel Borkmann #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1061e1608f3fSDaniel Borkmann 	if (!fp->jited) {
1062d53d2f78SRick Edgecombe 		set_vm_flush_reset_perms(fp);
10637d2cc63eSChristophe Leroy 		return set_memory_ro((unsigned long)fp, fp->pages);
106460a3b225SDaniel Borkmann 	}
1065e1608f3fSDaniel Borkmann #endif
10667d2cc63eSChristophe Leroy 	return 0;
1067e1608f3fSDaniel Borkmann }
106860a3b225SDaniel Borkmann 
1069e60adf51SChristophe Leroy static inline int __must_check
bpf_jit_binary_lock_ro(struct bpf_binary_header * hdr)1070e60adf51SChristophe Leroy bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
10719d876e79SDaniel Borkmann {
1072d53d2f78SRick Edgecombe 	set_vm_flush_reset_perms(hdr);
1073e60adf51SChristophe Leroy 	return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT);
10749d876e79SDaniel Borkmann }
10759d876e79SDaniel Borkmann 
1076f4979fceSWillem de Bruijn int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
sk_filter(struct sock * sk,struct sk_buff * skb)1077f4979fceSWillem de Bruijn static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
1078f4979fceSWillem de Bruijn {
1079f4979fceSWillem de Bruijn 	return sk_filter_trim_cap(sk, skb, 1);
1080f4979fceSWillem de Bruijn }
1081bd4cf0edSAlexei Starovoitov 
1082d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
10837ae457c1SAlexei Starovoitov void bpf_prog_free(struct bpf_prog *fp);
1084bd4cf0edSAlexei Starovoitov 
10855e581dadSDaniel Borkmann bool bpf_opcode_in_insntable(u8 code);
10865e581dadSDaniel Borkmann 
1087c454a46bSMartin KaFai Lau void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
1088c454a46bSMartin KaFai Lau 			       const u32 *insn_to_jit_off);
1089c454a46bSMartin KaFai Lau int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
1090e16301fbSMartin KaFai Lau void bpf_prog_jit_attempt_done(struct bpf_prog *prog);
1091c454a46bSMartin KaFai Lau 
109260a3b225SDaniel Borkmann struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
1093492ecee8SAlexei Starovoitov struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags);
109460a3b225SDaniel Borkmann struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
109560a3b225SDaniel Borkmann 				  gfp_t gfp_extra_flags);
109660a3b225SDaniel Borkmann void __bpf_prog_free(struct bpf_prog *fp);
109760a3b225SDaniel Borkmann 
bpf_prog_unlock_free(struct bpf_prog * fp)109860a3b225SDaniel Borkmann static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
109960a3b225SDaniel Borkmann {
110060a3b225SDaniel Borkmann 	__bpf_prog_free(fp);
110160a3b225SDaniel Borkmann }
110260a3b225SDaniel Borkmann 
1103ac67eb2cSDaniel Borkmann typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
1104ac67eb2cSDaniel Borkmann 				       unsigned int flen);
1105ac67eb2cSDaniel Borkmann 
11067ae457c1SAlexei Starovoitov int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
1107ac67eb2cSDaniel Borkmann int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1108bab18991SDaniel Borkmann 			      bpf_aux_classic_check_t trans, bool save_orig);
11097ae457c1SAlexei Starovoitov void bpf_prog_destroy(struct bpf_prog *fp);
1110a3ea269bSDaniel Borkmann 
1111fbc907f0SDaniel Borkmann int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
111289aa0758SAlexei Starovoitov int sk_attach_bpf(u32 ufd, struct sock *sk);
1113538950a1SCraig Gallek int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
1114538950a1SCraig Gallek int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
11158217ca65SMartin KaFai Lau void sk_reuseport_prog_free(struct bpf_prog *prog);
1116fbc907f0SDaniel Borkmann int sk_detach_filter(struct sock *sk);
11174ff09db1SMartin KaFai Lau int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len);
1118fbc907f0SDaniel Borkmann 
1119278571baSAlexei Starovoitov bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
1120fbc907f0SDaniel Borkmann void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
11210a14842fSEric Dumazet 
112262258278SAlexei Starovoitov u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
11231ea47e01SAlexei Starovoitov #define __bpf_call_base_args \
11241ea47e01SAlexei Starovoitov 	((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
11256943c2b0SAndrii Nakryiko 	 (void *)__bpf_call_base)
1126d1c55ab5SDaniel Borkmann 
1127d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
11289383191dSDaniel Borkmann void bpf_jit_compile(struct bpf_prog *prog);
1129a4b1d3c1SJiong Wang bool bpf_jit_needs_zext(void);
11302ddec2c8SPuranjay Mohan bool bpf_jit_inlines_helper_call(s32 imm);
113195acd881STony Ambardar bool bpf_jit_supports_subprog_tailcalls(void);
11327bdbf744SAndrii Nakryiko bool bpf_jit_supports_percpu_insn(void);
1133e6ac2450SMartin KaFai Lau bool bpf_jit_supports_kfunc_call(void);
11341cf3bfc6SIlya Leoshkevich bool bpf_jit_supports_far_kfunc_call(void);
1135fd5d27b7SKumar Kartikeya Dwivedi bool bpf_jit_supports_exceptions(void);
11367c05e7f3SHou Tao bool bpf_jit_supports_ptr_xchg(void);
1137142fd4d2SAlexei Starovoitov bool bpf_jit_supports_arena(void);
1138d503a04fSAlexei Starovoitov bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
1139a76ab573SYonghong Song bool bpf_jit_supports_private_stack(void);
1140e723608bSKumar Kartikeya Dwivedi bool bpf_jit_supports_timed_may_goto(void);
114166e13b61SPuranjay Mohan u64 bpf_arch_uaddress_limit(void);
1142fd5d27b7SKumar Kartikeya Dwivedi void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
1143e723608bSKumar Kartikeya Dwivedi u64 arch_bpf_timed_may_goto(void);
1144e723608bSKumar Kartikeya Dwivedi u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *);
1145b238e187SEduard Zingerman bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
114662258278SAlexei Starovoitov 
bpf_dump_raw_ok(const struct cred * cred)114763960260SKees Cook static inline bool bpf_dump_raw_ok(const struct cred *cred)
11487105e828SDaniel Borkmann {
11497105e828SDaniel Borkmann 	/* Reconstruction of call-sites is dependent on kallsyms,
11507105e828SDaniel Borkmann 	 * thus make dump the same restriction.
11517105e828SDaniel Borkmann 	 */
115263960260SKees Cook 	return kallsyms_show_value(cred);
11537105e828SDaniel Borkmann }
11547105e828SDaniel Borkmann 
1155c237ee5eSDaniel Borkmann struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
1156c237ee5eSDaniel Borkmann 				       const struct bpf_insn *patch, u32 len);
115752875a04SJakub Kicinski int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt);
1158814abfabSJohn Fastabend 
xdp_return_frame_no_direct(void)11592539650fSToshiaki Makita static inline bool xdp_return_frame_no_direct(void)
11602539650fSToshiaki Makita {
1161401cb7daSSebastian Andrzej Siewior 	struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
11622539650fSToshiaki Makita 
11632539650fSToshiaki Makita 	return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
11642539650fSToshiaki Makita }
11652539650fSToshiaki Makita 
xdp_set_return_frame_no_direct(void)11662539650fSToshiaki Makita static inline void xdp_set_return_frame_no_direct(void)
11672539650fSToshiaki Makita {
1168401cb7daSSebastian Andrzej Siewior 	struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
11692539650fSToshiaki Makita 
11702539650fSToshiaki Makita 	ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
11712539650fSToshiaki Makita }
11722539650fSToshiaki Makita 
xdp_clear_return_frame_no_direct(void)11732539650fSToshiaki Makita static inline void xdp_clear_return_frame_no_direct(void)
11742539650fSToshiaki Makita {
1175401cb7daSSebastian Andrzej Siewior 	struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
11762539650fSToshiaki Makita 
11772539650fSToshiaki Makita 	ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
11782539650fSToshiaki Makita }
11792539650fSToshiaki Makita 
xdp_ok_fwd_dev(const struct net_device * fwd,unsigned int pktlen)1180d8d7218aSToshiaki Makita static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
1181d8d7218aSToshiaki Makita 				 unsigned int pktlen)
11826d5fc195SToshiaki Makita {
11836d5fc195SToshiaki Makita 	unsigned int len;
11846d5fc195SToshiaki Makita 
11856d5fc195SToshiaki Makita 	if (unlikely(!(fwd->flags & IFF_UP)))
11866d5fc195SToshiaki Makita 		return -ENETDOWN;
11876d5fc195SToshiaki Makita 
11886d5fc195SToshiaki Makita 	len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
1189d8d7218aSToshiaki Makita 	if (pktlen > len)
11906d5fc195SToshiaki Makita 		return -EMSGSIZE;
11916d5fc195SToshiaki Makita 
11926d5fc195SToshiaki Makita 	return 0;
11936d5fc195SToshiaki Makita }
11946d5fc195SToshiaki Makita 
11951d233886SToke Høiland-Jørgensen /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
119611393cc9SJohn Fastabend  * same cpu context. Further for best results no more than a single map
119711393cc9SJohn Fastabend  * for the do_redirect/do_flush pair should be used. This limitation is
119811393cc9SJohn Fastabend  * because we only track one map and force a flush when the map changes.
11992ddf71e2SJohn Fastabend  * This does not appear to be a real limitation for existing software.
120011393cc9SJohn Fastabend  */
12012facaad6SJesper Dangaard Brouer int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
12027cd1107fSAlexander Lobakin 			    struct xdp_buff *xdp, const struct bpf_prog *prog);
12035acaee0aSJohn Fastabend int xdp_do_redirect(struct net_device *dev,
12045acaee0aSJohn Fastabend 		    struct xdp_buff *xdp,
12057cd1107fSAlexander Lobakin 		    const struct bpf_prog *prog);
12061372d34cSToke Høiland-Jørgensen int xdp_do_redirect_frame(struct net_device *dev,
12071372d34cSToke Høiland-Jørgensen 			  struct xdp_buff *xdp,
12081372d34cSToke Høiland-Jørgensen 			  struct xdp_frame *xdpf,
12097cd1107fSAlexander Lobakin 			  const struct bpf_prog *prog);
12101d233886SToke Høiland-Jørgensen void xdp_do_flush(void);
12111d233886SToke Høiland-Jørgensen 
12127cd1107fSAlexander Lobakin void bpf_warn_invalid_xdp_action(const struct net_device *dev,
12137cd1107fSAlexander Lobakin 				 const struct bpf_prog *prog, u32 act);
1214c237ee5eSDaniel Borkmann 
12152dbb9b9eSMartin KaFai Lau #ifdef CONFIG_INET
12162dbb9b9eSMartin KaFai Lau struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
12172dbb9b9eSMartin KaFai Lau 				  struct bpf_prog *prog, struct sk_buff *skb,
1218d5e4ddaeSKuniyuki Iwashima 				  struct sock *migrating_sk,
12192dbb9b9eSMartin KaFai Lau 				  u32 hash);
12202dbb9b9eSMartin KaFai Lau #else
12212dbb9b9eSMartin KaFai Lau static inline struct sock *
bpf_run_sk_reuseport(struct sock_reuseport * reuse,struct sock * sk,struct bpf_prog * prog,struct sk_buff * skb,struct sock * migrating_sk,u32 hash)12222dbb9b9eSMartin KaFai Lau bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
12232dbb9b9eSMartin KaFai Lau 		     struct bpf_prog *prog, struct sk_buff *skb,
1224d5e4ddaeSKuniyuki Iwashima 		     struct sock *migrating_sk,
12252dbb9b9eSMartin KaFai Lau 		     u32 hash)
12262dbb9b9eSMartin KaFai Lau {
12272dbb9b9eSMartin KaFai Lau 	return NULL;
12282dbb9b9eSMartin KaFai Lau }
12292dbb9b9eSMartin KaFai Lau #endif
12302dbb9b9eSMartin KaFai Lau 
1231b954d834SDaniel Borkmann #ifdef CONFIG_BPF_JIT
1232c94987e4SDaniel Borkmann extern int bpf_jit_enable;
12334f3446bbSDaniel Borkmann extern int bpf_jit_harden;
123474451e66SDaniel Borkmann extern int bpf_jit_kallsyms;
1235fdadd049SDaniel Borkmann extern long bpf_jit_limit;
1236fadb7ff1SLorenz Bauer extern long bpf_jit_limit_max;
1237c94987e4SDaniel Borkmann 
1238b954d834SDaniel Borkmann typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
1239b954d834SDaniel Borkmann 
124019c02415SSong Liu void bpf_jit_fill_hole_with_zero(void *area, unsigned int size);
124119c02415SSong Liu 
1242b954d834SDaniel Borkmann struct bpf_binary_header *
1243b954d834SDaniel Borkmann bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1244b954d834SDaniel Borkmann 		     unsigned int alignment,
1245b954d834SDaniel Borkmann 		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
1246b954d834SDaniel Borkmann void bpf_jit_binary_free(struct bpf_binary_header *hdr);
1247116bfa96SValdis Kletnieks u64 bpf_jit_alloc_exec_limit(void);
1248116bfa96SValdis Kletnieks void *bpf_jit_alloc_exec(unsigned long size);
1249116bfa96SValdis Kletnieks void bpf_jit_free_exec(void *addr);
1250b954d834SDaniel Borkmann void bpf_jit_free(struct bpf_prog *fp);
12511d5f82d9SSong Liu struct bpf_binary_header *
12521d5f82d9SSong Liu bpf_jit_binary_pack_hdr(const struct bpf_prog *fp);
12531d5f82d9SSong Liu 
125419c02415SSong Liu void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns);
1255f08a1c65SSong Liu void bpf_prog_pack_free(void *ptr, u32 size);
125619c02415SSong Liu 
bpf_prog_kallsyms_verify_off(const struct bpf_prog * fp)12571d5f82d9SSong Liu static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
12581d5f82d9SSong Liu {
12591d5f82d9SSong Liu 	return list_empty(&fp->aux->ksym.lnode) ||
12601d5f82d9SSong Liu 	       fp->aux->ksym.lnode.prev == LIST_POISON2;
12611d5f82d9SSong Liu }
1262b954d834SDaniel Borkmann 
126333c98058SSong Liu struct bpf_binary_header *
126433c98058SSong Liu bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image,
126533c98058SSong Liu 			  unsigned int alignment,
126633c98058SSong Liu 			  struct bpf_binary_header **rw_hdr,
126733c98058SSong Liu 			  u8 **rw_image,
126833c98058SSong Liu 			  bpf_jit_fill_hole_t bpf_fill_ill_insns);
12699919c5c9SRafael Passos int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
127033c98058SSong Liu 				 struct bpf_binary_header *rw_header);
127133c98058SSong Liu void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
127233c98058SSong Liu 			      struct bpf_binary_header *rw_header);
127333c98058SSong Liu 
1274a66886feSDaniel Borkmann int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1275a66886feSDaniel Borkmann 				struct bpf_jit_poke_descriptor *poke);
1276a66886feSDaniel Borkmann 
1277e2c95a61SDaniel Borkmann int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1278e2c95a61SDaniel Borkmann 			  const struct bpf_insn *insn, bool extra_pass,
1279e2c95a61SDaniel Borkmann 			  u64 *func_addr, bool *func_addr_fixed);
1280e2c95a61SDaniel Borkmann 
12814f3446bbSDaniel Borkmann struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
12824f3446bbSDaniel Borkmann void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
12834f3446bbSDaniel Borkmann 
bpf_jit_dump(unsigned int flen,unsigned int proglen,u32 pass,void * image)1284b954d834SDaniel Borkmann static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
1285b954d834SDaniel Borkmann 				u32 pass, void *image)
1286b954d834SDaniel Borkmann {
1287b13138efSDaniel Borkmann 	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
1288b13138efSDaniel Borkmann 	       proglen, pass, image, current->comm, task_pid_nr(current));
1289b13138efSDaniel Borkmann 
1290b954d834SDaniel Borkmann 	if (image)
1291b954d834SDaniel Borkmann 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
1292b954d834SDaniel Borkmann 			       16, 1, image, proglen, false);
1293b954d834SDaniel Borkmann }
12944f3446bbSDaniel Borkmann 
bpf_jit_is_ebpf(void)12954f3446bbSDaniel Borkmann static inline bool bpf_jit_is_ebpf(void)
12964f3446bbSDaniel Borkmann {
12974f3446bbSDaniel Borkmann # ifdef CONFIG_HAVE_EBPF_JIT
12984f3446bbSDaniel Borkmann 	return true;
12994f3446bbSDaniel Borkmann # else
13004f3446bbSDaniel Borkmann 	return false;
13014f3446bbSDaniel Borkmann # endif
13024f3446bbSDaniel Borkmann }
13034f3446bbSDaniel Borkmann 
ebpf_jit_enabled(void)130481ed18abSAlexei Starovoitov static inline bool ebpf_jit_enabled(void)
130581ed18abSAlexei Starovoitov {
130681ed18abSAlexei Starovoitov 	return bpf_jit_enable && bpf_jit_is_ebpf();
130781ed18abSAlexei Starovoitov }
130881ed18abSAlexei Starovoitov 
bpf_prog_ebpf_jited(const struct bpf_prog * fp)130974451e66SDaniel Borkmann static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
131074451e66SDaniel Borkmann {
131174451e66SDaniel Borkmann 	return fp->jited && bpf_jit_is_ebpf();
131274451e66SDaniel Borkmann }
131374451e66SDaniel Borkmann 
bpf_jit_blinding_enabled(struct bpf_prog * prog)131460b58afcSAlexei Starovoitov static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
13154f3446bbSDaniel Borkmann {
13164f3446bbSDaniel Borkmann 	/* These are the prerequisites, should someone ever have the
13174f3446bbSDaniel Borkmann 	 * idea to call blinding outside of them, we make sure to
13184f3446bbSDaniel Borkmann 	 * bail out.
13194f3446bbSDaniel Borkmann 	 */
13204f3446bbSDaniel Borkmann 	if (!bpf_jit_is_ebpf())
13214f3446bbSDaniel Borkmann 		return false;
132260b58afcSAlexei Starovoitov 	if (!prog->jit_requested)
13234f3446bbSDaniel Borkmann 		return false;
13244f3446bbSDaniel Borkmann 	if (!bpf_jit_harden)
13254f3446bbSDaniel Borkmann 		return false;
1326d79a3549SAndrii Nakryiko 	if (bpf_jit_harden == 1 && bpf_token_capable(prog->aux->token, CAP_BPF))
13274f3446bbSDaniel Borkmann 		return false;
13284f3446bbSDaniel Borkmann 
13294f3446bbSDaniel Borkmann 	return true;
13304f3446bbSDaniel Borkmann }
133174451e66SDaniel Borkmann 
bpf_jit_kallsyms_enabled(void)133274451e66SDaniel Borkmann static inline bool bpf_jit_kallsyms_enabled(void)
133374451e66SDaniel Borkmann {
133474451e66SDaniel Borkmann 	/* There are a couple of corner cases where kallsyms should
133574451e66SDaniel Borkmann 	 * not be enabled f.e. on hardening.
133674451e66SDaniel Borkmann 	 */
133774451e66SDaniel Borkmann 	if (bpf_jit_harden)
133874451e66SDaniel Borkmann 		return false;
133974451e66SDaniel Borkmann 	if (!bpf_jit_kallsyms)
134074451e66SDaniel Borkmann 		return false;
134174451e66SDaniel Borkmann 	if (bpf_jit_kallsyms == 1)
134274451e66SDaniel Borkmann 		return true;
134374451e66SDaniel Borkmann 
134474451e66SDaniel Borkmann 	return false;
134574451e66SDaniel Borkmann }
134674451e66SDaniel Borkmann 
13477e1f4eb9SArnd Bergmann int __bpf_address_lookup(unsigned long addr, unsigned long *size,
134874451e66SDaniel Borkmann 				 unsigned long *off, char *sym);
134974451e66SDaniel Borkmann bool is_bpf_text_address(unsigned long addr);
135074451e66SDaniel Borkmann int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
135174451e66SDaniel Borkmann 		    char *sym);
1352f18b03faSKumar Kartikeya Dwivedi struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
135374451e66SDaniel Borkmann 
13547e1f4eb9SArnd Bergmann static inline int
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)135574451e66SDaniel Borkmann bpf_address_lookup(unsigned long addr, unsigned long *size,
135674451e66SDaniel Borkmann 		   unsigned long *off, char **modname, char *sym)
135774451e66SDaniel Borkmann {
13587e1f4eb9SArnd Bergmann 	int ret = __bpf_address_lookup(addr, size, off, sym);
135974451e66SDaniel Borkmann 
136074451e66SDaniel Borkmann 	if (ret && modname)
136174451e66SDaniel Borkmann 		*modname = NULL;
136274451e66SDaniel Borkmann 	return ret;
136374451e66SDaniel Borkmann }
136474451e66SDaniel Borkmann 
136574451e66SDaniel Borkmann void bpf_prog_kallsyms_add(struct bpf_prog *fp);
136674451e66SDaniel Borkmann void bpf_prog_kallsyms_del(struct bpf_prog *fp);
136774451e66SDaniel Borkmann 
136874451e66SDaniel Borkmann #else /* CONFIG_BPF_JIT */
136974451e66SDaniel Borkmann 
ebpf_jit_enabled(void)137081ed18abSAlexei Starovoitov static inline bool ebpf_jit_enabled(void)
137181ed18abSAlexei Starovoitov {
137281ed18abSAlexei Starovoitov 	return false;
137381ed18abSAlexei Starovoitov }
137481ed18abSAlexei Starovoitov 
bpf_jit_blinding_enabled(struct bpf_prog * prog)1375b8cd76caSDaniel Borkmann static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
1376b8cd76caSDaniel Borkmann {
1377b8cd76caSDaniel Borkmann 	return false;
1378b8cd76caSDaniel Borkmann }
1379b8cd76caSDaniel Borkmann 
bpf_prog_ebpf_jited(const struct bpf_prog * fp)138074451e66SDaniel Borkmann static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
138174451e66SDaniel Borkmann {
138274451e66SDaniel Borkmann 	return false;
138374451e66SDaniel Borkmann }
138474451e66SDaniel Borkmann 
1385a66886feSDaniel Borkmann static inline int
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)1386a66886feSDaniel Borkmann bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
1387a66886feSDaniel Borkmann 			    struct bpf_jit_poke_descriptor *poke)
1388a66886feSDaniel Borkmann {
1389a66886feSDaniel Borkmann 	return -ENOTSUPP;
1390a66886feSDaniel Borkmann }
1391a66886feSDaniel Borkmann 
bpf_jit_free(struct bpf_prog * fp)1392b954d834SDaniel Borkmann static inline void bpf_jit_free(struct bpf_prog *fp)
1393b954d834SDaniel Borkmann {
1394b954d834SDaniel Borkmann 	bpf_prog_unlock_free(fp);
1395b954d834SDaniel Borkmann }
139674451e66SDaniel Borkmann 
bpf_jit_kallsyms_enabled(void)139774451e66SDaniel Borkmann static inline bool bpf_jit_kallsyms_enabled(void)
139874451e66SDaniel Borkmann {
139974451e66SDaniel Borkmann 	return false;
140074451e66SDaniel Borkmann }
140174451e66SDaniel Borkmann 
14027e1f4eb9SArnd Bergmann static inline int
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)140374451e66SDaniel Borkmann __bpf_address_lookup(unsigned long addr, unsigned long *size,
140474451e66SDaniel Borkmann 		     unsigned long *off, char *sym)
140574451e66SDaniel Borkmann {
14067e1f4eb9SArnd Bergmann 	return 0;
140774451e66SDaniel Borkmann }
140874451e66SDaniel Borkmann 
is_bpf_text_address(unsigned long addr)140974451e66SDaniel Borkmann static inline bool is_bpf_text_address(unsigned long addr)
141074451e66SDaniel Borkmann {
141174451e66SDaniel Borkmann 	return false;
141274451e66SDaniel Borkmann }
141374451e66SDaniel Borkmann 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)141474451e66SDaniel Borkmann static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
141574451e66SDaniel Borkmann 				  char *type, char *sym)
141674451e66SDaniel Borkmann {
141774451e66SDaniel Borkmann 	return -ERANGE;
141874451e66SDaniel Borkmann }
141974451e66SDaniel Borkmann 
bpf_prog_ksym_find(unsigned long addr)1420f18b03faSKumar Kartikeya Dwivedi static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
1421f18b03faSKumar Kartikeya Dwivedi {
1422f18b03faSKumar Kartikeya Dwivedi 	return NULL;
1423f18b03faSKumar Kartikeya Dwivedi }
1424f18b03faSKumar Kartikeya Dwivedi 
14257e1f4eb9SArnd Bergmann static inline int
bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char ** modname,char * sym)142674451e66SDaniel Borkmann bpf_address_lookup(unsigned long addr, unsigned long *size,
142774451e66SDaniel Borkmann 		   unsigned long *off, char **modname, char *sym)
142874451e66SDaniel Borkmann {
14297e1f4eb9SArnd Bergmann 	return 0;
143074451e66SDaniel Borkmann }
143174451e66SDaniel Borkmann 
bpf_prog_kallsyms_add(struct bpf_prog * fp)143274451e66SDaniel Borkmann static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
143374451e66SDaniel Borkmann {
143474451e66SDaniel Borkmann }
143574451e66SDaniel Borkmann 
bpf_prog_kallsyms_del(struct bpf_prog * fp)143674451e66SDaniel Borkmann static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
143774451e66SDaniel Borkmann {
143874451e66SDaniel Borkmann }
14396ee52e2aSSong Liu 
1440b954d834SDaniel Borkmann #endif /* CONFIG_BPF_JIT */
1441b954d834SDaniel Borkmann 
14427d1982b4SDaniel Borkmann void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
14437d1982b4SDaniel Borkmann 
144434805931SDaniel Borkmann #define BPF_ANC		BIT(15)
144534805931SDaniel Borkmann 
bpf_needs_clear_a(const struct sock_filter * first)144655795ef5SRabin Vincent static inline bool bpf_needs_clear_a(const struct sock_filter *first)
144755795ef5SRabin Vincent {
144855795ef5SRabin Vincent 	switch (first->code) {
144955795ef5SRabin Vincent 	case BPF_RET | BPF_K:
145055795ef5SRabin Vincent 	case BPF_LD | BPF_W | BPF_LEN:
145155795ef5SRabin Vincent 		return false;
145255795ef5SRabin Vincent 
145355795ef5SRabin Vincent 	case BPF_LD | BPF_W | BPF_ABS:
145455795ef5SRabin Vincent 	case BPF_LD | BPF_H | BPF_ABS:
145555795ef5SRabin Vincent 	case BPF_LD | BPF_B | BPF_ABS:
145655795ef5SRabin Vincent 		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
145755795ef5SRabin Vincent 			return true;
145855795ef5SRabin Vincent 		return false;
145955795ef5SRabin Vincent 
146055795ef5SRabin Vincent 	default:
146155795ef5SRabin Vincent 		return true;
146255795ef5SRabin Vincent 	}
146355795ef5SRabin Vincent }
146455795ef5SRabin Vincent 
bpf_anc_helper(const struct sock_filter * ftest)146534805931SDaniel Borkmann static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
146634805931SDaniel Borkmann {
146734805931SDaniel Borkmann 	BUG_ON(ftest->code & BPF_ANC);
146834805931SDaniel Borkmann 
146934805931SDaniel Borkmann 	switch (ftest->code) {
147034805931SDaniel Borkmann 	case BPF_LD | BPF_W | BPF_ABS:
147134805931SDaniel Borkmann 	case BPF_LD | BPF_H | BPF_ABS:
147234805931SDaniel Borkmann 	case BPF_LD | BPF_B | BPF_ABS:
147334805931SDaniel Borkmann #define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
147434805931SDaniel Borkmann 				return BPF_ANC | SKF_AD_##CODE
147534805931SDaniel Borkmann 		switch (ftest->k) {
147634805931SDaniel Borkmann 		BPF_ANCILLARY(PROTOCOL);
147734805931SDaniel Borkmann 		BPF_ANCILLARY(PKTTYPE);
147834805931SDaniel Borkmann 		BPF_ANCILLARY(IFINDEX);
147934805931SDaniel Borkmann 		BPF_ANCILLARY(NLATTR);
148034805931SDaniel Borkmann 		BPF_ANCILLARY(NLATTR_NEST);
148134805931SDaniel Borkmann 		BPF_ANCILLARY(MARK);
148234805931SDaniel Borkmann 		BPF_ANCILLARY(QUEUE);
148334805931SDaniel Borkmann 		BPF_ANCILLARY(HATYPE);
148434805931SDaniel Borkmann 		BPF_ANCILLARY(RXHASH);
148534805931SDaniel Borkmann 		BPF_ANCILLARY(CPU);
148634805931SDaniel Borkmann 		BPF_ANCILLARY(ALU_XOR_X);
148734805931SDaniel Borkmann 		BPF_ANCILLARY(VLAN_TAG);
148834805931SDaniel Borkmann 		BPF_ANCILLARY(VLAN_TAG_PRESENT);
148934805931SDaniel Borkmann 		BPF_ANCILLARY(PAY_OFFSET);
149034805931SDaniel Borkmann 		BPF_ANCILLARY(RANDOM);
149127cd5452SMichal Sekletar 		BPF_ANCILLARY(VLAN_TPID);
149234805931SDaniel Borkmann 		}
1493df561f66SGustavo A. R. Silva 		fallthrough;
149434805931SDaniel Borkmann 	default:
149534805931SDaniel Borkmann 		return ftest->code;
149634805931SDaniel Borkmann 	}
149734805931SDaniel Borkmann }
149834805931SDaniel Borkmann 
14999f12fbe6SZi Shen Lim void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
15009f12fbe6SZi Shen Lim 					   int k, unsigned int size);
15019f12fbe6SZi Shen Lim 
bpf_tell_extensions(void)1502ea02f941SMichal Sekletar static inline int bpf_tell_extensions(void)
1503ea02f941SMichal Sekletar {
150437692299SDaniel Borkmann 	return SKF_AD_MAX;
1505ea02f941SMichal Sekletar }
1506ea02f941SMichal Sekletar 
15074fbac77dSAndrey Ignatov struct bpf_sock_addr_kern {
15084fbac77dSAndrey Ignatov 	struct sock *sk;
15094fbac77dSAndrey Ignatov 	struct sockaddr *uaddr;
15104fbac77dSAndrey Ignatov 	/* Temporary "register" to make indirect stores to nested structures
15114fbac77dSAndrey Ignatov 	 * defined above. We need three registers to make such a store, but
15124fbac77dSAndrey Ignatov 	 * only two (src and dst) are available at convert_ctx_access time
15134fbac77dSAndrey Ignatov 	 */
15144fbac77dSAndrey Ignatov 	u64 tmp_reg;
15151cedee13SAndrey Ignatov 	void *t_ctx;	/* Attach type specific context. */
1516fefba7d1SDaan De Meyer 	u32 uaddrlen;
15174fbac77dSAndrey Ignatov };
15184fbac77dSAndrey Ignatov 
151940304b2aSLawrence Brakmo struct bpf_sock_ops_kern {
152040304b2aSLawrence Brakmo 	struct	sock *sk;
152140304b2aSLawrence Brakmo 	union {
1522de525be2SLawrence Brakmo 		u32 args[4];
152340304b2aSLawrence Brakmo 		u32 reply;
152440304b2aSLawrence Brakmo 		u32 replylong[4];
152540304b2aSLawrence Brakmo 	};
15260813a841SMartin KaFai Lau 	struct sk_buff	*syn_skb;
15270813a841SMartin KaFai Lau 	struct sk_buff	*skb;
15280813a841SMartin KaFai Lau 	void	*skb_data_end;
1529c9985d09SMartin KaFai Lau 	u8	op;
1530c9985d09SMartin KaFai Lau 	u8	is_fullsock;
15310813a841SMartin KaFai Lau 	u8	is_locked_tcp_sock;
1532b73042b8SLawrence Brakmo 	u8	remaining_opt_len;
1533b73042b8SLawrence Brakmo 	u64	temp;			/* temp and everything after is not
1534b73042b8SLawrence Brakmo 					 * initialized to 0 before calling
1535b73042b8SLawrence Brakmo 					 * the BPF program. New fields that
1536b73042b8SLawrence Brakmo 					 * should be initialized to 0 should
1537b73042b8SLawrence Brakmo 					 * be inserted before temp.
1538b73042b8SLawrence Brakmo 					 * temp is scratch storage used by
1539b73042b8SLawrence Brakmo 					 * sock_ops_convert_ctx_access
1540b73042b8SLawrence Brakmo 					 * as temporary storage of a register.
154140304b2aSLawrence Brakmo 					 */
154240304b2aSLawrence Brakmo };
15437b146cebSAndrey Ignatov 
15447b146cebSAndrey Ignatov struct bpf_sysctl_kern {
15452c1713a8SThomas Weißschuh 	struct ctl_table_header *head;
15461d11b301SAndrey Ignatov 	const struct ctl_table *table;
15471d11b301SAndrey Ignatov 	void *cur_val;
15484e63acdfSAndrey Ignatov 	size_t cur_len;
15494e63acdfSAndrey Ignatov 	void *new_val;
15504e63acdfSAndrey Ignatov 	size_t new_len;
15517b146cebSAndrey Ignatov 	int new_updated;
1552e1550bfeSAndrey Ignatov 	int write;
1553e1550bfeSAndrey Ignatov 	loff_t *ppos;
1554e1550bfeSAndrey Ignatov 	/* Temporary "register" for indirect stores to ppos. */
15557b146cebSAndrey Ignatov 	u64 tmp_reg;
15567b146cebSAndrey Ignatov };
155720f2505fSStanislav Fomichev 
155820f2505fSStanislav Fomichev #define BPF_SOCKOPT_KERN_BUF_SIZE	32
155920f2505fSStanislav Fomichev struct bpf_sockopt_buf {
156020f2505fSStanislav Fomichev 	u8		data[BPF_SOCKOPT_KERN_BUF_SIZE];
156120f2505fSStanislav Fomichev };
15620d01da6aSStanislav Fomichev 
15630d01da6aSStanislav Fomichev struct bpf_sockopt_kern {
15640d01da6aSStanislav Fomichev 	struct sock	*sk;
15650d01da6aSStanislav Fomichev 	u8		*optval;
15660d01da6aSStanislav Fomichev 	u8		*optval_end;
15670d01da6aSStanislav Fomichev 	s32		level;
15680d01da6aSStanislav Fomichev 	s32		optname;
1569c4dcfdd4SYiFei Zhu 	s32		optlen;
1570c4dcfdd4SYiFei Zhu 	/* for retval in struct bpf_cg_run_ctx */
1571c4dcfdd4SYiFei Zhu 	struct task_struct *current_task;
1572c4dcfdd4SYiFei Zhu 	/* Temporary "register" for indirect stores to ppos. */
15730d01da6aSStanislav Fomichev 	u64		tmp_reg;
15740d01da6aSStanislav Fomichev };
1575b1ea9ff6SChristoph Hellwig 
15764d295e54SChristoph Hellwig int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
1577e9ddbb77SJakub Sitnicki 
1578e9ddbb77SJakub Sitnicki struct bpf_sk_lookup_kern {
1579e9ddbb77SJakub Sitnicki 	u16		family;
1580d66423fbSLorenz Bauer 	u16		protocol;
1581d66423fbSLorenz Bauer 	__be16		sport;
1582e9ddbb77SJakub Sitnicki 	u16		dport;
1583e9ddbb77SJakub Sitnicki 	struct {
1584e9ddbb77SJakub Sitnicki 		__be32 saddr;
1585e9ddbb77SJakub Sitnicki 		__be32 daddr;
1586e9ddbb77SJakub Sitnicki 	} v4;
1587e9ddbb77SJakub Sitnicki 	struct {
1588e9ddbb77SJakub Sitnicki 		const struct in6_addr *saddr;
1589e9ddbb77SJakub Sitnicki 		const struct in6_addr *daddr;
1590e9ddbb77SJakub Sitnicki 	} v6;
1591f8931565SMark Pashmfouroush 	struct sock	*selected_sk;
1592e9ddbb77SJakub Sitnicki 	u32		ingress_ifindex;
1593e9ddbb77SJakub Sitnicki 	bool		no_reuseport;
1594e9ddbb77SJakub Sitnicki };
15951559b4aaSJakub Sitnicki 
15961559b4aaSJakub Sitnicki extern struct static_key_false bpf_sk_lookup_enabled;
15971559b4aaSJakub Sitnicki 
15981559b4aaSJakub Sitnicki /* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
15991559b4aaSJakub Sitnicki  *
16001559b4aaSJakub Sitnicki  * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
16011559b4aaSJakub Sitnicki  * SK_DROP. Their meaning is as follows:
16021559b4aaSJakub Sitnicki  *
16031559b4aaSJakub Sitnicki  *  SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
16041559b4aaSJakub Sitnicki  *  SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
16051559b4aaSJakub Sitnicki  *  SK_DROP                           : terminate lookup with -ECONNREFUSED
16061559b4aaSJakub Sitnicki  *
16071559b4aaSJakub Sitnicki  * This macro aggregates return values and selected sockets from
16081559b4aaSJakub Sitnicki  * multiple BPF programs according to following rules in order:
16091559b4aaSJakub Sitnicki  *
16101559b4aaSJakub Sitnicki  *  1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
16111559b4aaSJakub Sitnicki  *     macro result is SK_PASS and last ctx.selected_sk is used.
16121559b4aaSJakub Sitnicki  *  2. If any program returned SK_DROP return value,
16131559b4aaSJakub Sitnicki  *     macro result is SK_DROP.
16141559b4aaSJakub Sitnicki  *  3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
16151559b4aaSJakub Sitnicki  *
16161559b4aaSJakub Sitnicki  * Caller must ensure that the prog array is non-NULL, and that the
16171559b4aaSJakub Sitnicki  * array as well as the programs it contains remain valid.
16181559b4aaSJakub Sitnicki  */
16191559b4aaSJakub Sitnicki #define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func)			\
16201559b4aaSJakub Sitnicki 	({								\
16211559b4aaSJakub Sitnicki 		struct bpf_sk_lookup_kern *_ctx = &(ctx);		\
16221559b4aaSJakub Sitnicki 		struct bpf_prog_array_item *_item;			\
16231559b4aaSJakub Sitnicki 		struct sock *_selected_sk = NULL;			\
16241559b4aaSJakub Sitnicki 		bool _no_reuseport = false;				\
16251559b4aaSJakub Sitnicki 		struct bpf_prog *_prog;					\
16261559b4aaSJakub Sitnicki 		bool _all_pass = true;					\
16271559b4aaSJakub Sitnicki 		u32 _ret;						\
16281559b4aaSJakub Sitnicki 									\
16291559b4aaSJakub Sitnicki 		migrate_disable();					\
16301559b4aaSJakub Sitnicki 		_item = &(array)->items[0];				\
16311559b4aaSJakub Sitnicki 		while ((_prog = READ_ONCE(_item->prog))) {		\
16321559b4aaSJakub Sitnicki 			/* restore most recent selection */		\
16331559b4aaSJakub Sitnicki 			_ctx->selected_sk = _selected_sk;		\
16341559b4aaSJakub Sitnicki 			_ctx->no_reuseport = _no_reuseport;		\
16351559b4aaSJakub Sitnicki 									\
16361559b4aaSJakub Sitnicki 			_ret = func(_prog, _ctx);			\
16371559b4aaSJakub Sitnicki 			if (_ret == SK_PASS && _ctx->selected_sk) {	\
16381559b4aaSJakub Sitnicki 				/* remember last non-NULL socket */	\
16391559b4aaSJakub Sitnicki 				_selected_sk = _ctx->selected_sk;	\
16401559b4aaSJakub Sitnicki 				_no_reuseport = _ctx->no_reuseport;	\
16411559b4aaSJakub Sitnicki 			} else if (_ret == SK_DROP && _all_pass) {	\
16421559b4aaSJakub Sitnicki 				_all_pass = false;			\
16431559b4aaSJakub Sitnicki 			}						\
16441559b4aaSJakub Sitnicki 			_item++;					\
16451559b4aaSJakub Sitnicki 		}							\
16461559b4aaSJakub Sitnicki 		_ctx->selected_sk = _selected_sk;			\
16471559b4aaSJakub Sitnicki 		_ctx->no_reuseport = _no_reuseport;			\
16481559b4aaSJakub Sitnicki 		migrate_enable();					\
16491559b4aaSJakub Sitnicki 		_all_pass || _selected_sk ? SK_PASS : SK_DROP;		\
16501559b4aaSJakub Sitnicki 	 })
1651d4433e8bSEric Dumazet 
bpf_sk_lookup_run_v4(const struct net * net,int protocol,const __be32 saddr,const __be16 sport,const __be32 daddr,const u16 dport,const int ifindex,struct sock ** psk)16521559b4aaSJakub Sitnicki static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol,
16531559b4aaSJakub Sitnicki 					const __be32 saddr, const __be16 sport,
1654f8931565SMark Pashmfouroush 					const __be32 daddr, const u16 dport,
16551559b4aaSJakub Sitnicki 					const int ifindex, struct sock **psk)
16561559b4aaSJakub Sitnicki {
16571559b4aaSJakub Sitnicki 	struct bpf_prog_array *run_array;
16581559b4aaSJakub Sitnicki 	struct sock *selected_sk = NULL;
16591559b4aaSJakub Sitnicki 	bool no_reuseport = false;
16601559b4aaSJakub Sitnicki 
16611559b4aaSJakub Sitnicki 	rcu_read_lock();
16621559b4aaSJakub Sitnicki 	run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
16631559b4aaSJakub Sitnicki 	if (run_array) {
16641559b4aaSJakub Sitnicki 		struct bpf_sk_lookup_kern ctx = {
16651559b4aaSJakub Sitnicki 			.family		= AF_INET,
16661559b4aaSJakub Sitnicki 			.protocol	= protocol,
16671559b4aaSJakub Sitnicki 			.v4.saddr	= saddr,
16681559b4aaSJakub Sitnicki 			.v4.daddr	= daddr,
16691559b4aaSJakub Sitnicki 			.sport		= sport,
1670f8931565SMark Pashmfouroush 			.dport		= dport,
16711559b4aaSJakub Sitnicki 			.ingress_ifindex	= ifindex,
16721559b4aaSJakub Sitnicki 		};
16731559b4aaSJakub Sitnicki 		u32 act;
1674fb7dd8bcSAndrii Nakryiko 
16751559b4aaSJakub Sitnicki 		act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
16761559b4aaSJakub Sitnicki 		if (act == SK_PASS) {
16771559b4aaSJakub Sitnicki 			selected_sk = ctx.selected_sk;
16781559b4aaSJakub Sitnicki 			no_reuseport = ctx.no_reuseport;
16791559b4aaSJakub Sitnicki 		} else {
16801559b4aaSJakub Sitnicki 			selected_sk = ERR_PTR(-ECONNREFUSED);
16811559b4aaSJakub Sitnicki 		}
16821559b4aaSJakub Sitnicki 	}
16831559b4aaSJakub Sitnicki 	rcu_read_unlock();
16841559b4aaSJakub Sitnicki 	*psk = selected_sk;
16851559b4aaSJakub Sitnicki 	return no_reuseport;
16861559b4aaSJakub Sitnicki }
16871122702fSJakub Sitnicki 
168810b2a44cSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
bpf_sk_lookup_run_v6(const struct net * net,int protocol,const struct in6_addr * saddr,const __be16 sport,const struct in6_addr * daddr,const u16 dport,const int ifindex,struct sock ** psk)16891122702fSJakub Sitnicki static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol,
16901122702fSJakub Sitnicki 					const struct in6_addr *saddr,
16911122702fSJakub Sitnicki 					const __be16 sport,
16921122702fSJakub Sitnicki 					const struct in6_addr *daddr,
1693f8931565SMark Pashmfouroush 					const u16 dport,
16941122702fSJakub Sitnicki 					const int ifindex, struct sock **psk)
16951122702fSJakub Sitnicki {
16961122702fSJakub Sitnicki 	struct bpf_prog_array *run_array;
16971122702fSJakub Sitnicki 	struct sock *selected_sk = NULL;
16981122702fSJakub Sitnicki 	bool no_reuseport = false;
16991122702fSJakub Sitnicki 
17001122702fSJakub Sitnicki 	rcu_read_lock();
17011122702fSJakub Sitnicki 	run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
17021122702fSJakub Sitnicki 	if (run_array) {
17031122702fSJakub Sitnicki 		struct bpf_sk_lookup_kern ctx = {
17041122702fSJakub Sitnicki 			.family		= AF_INET6,
17051122702fSJakub Sitnicki 			.protocol	= protocol,
17061122702fSJakub Sitnicki 			.v6.saddr	= saddr,
17071122702fSJakub Sitnicki 			.v6.daddr	= daddr,
17081122702fSJakub Sitnicki 			.sport		= sport,
1709f8931565SMark Pashmfouroush 			.dport		= dport,
17101122702fSJakub Sitnicki 			.ingress_ifindex	= ifindex,
17111122702fSJakub Sitnicki 		};
17121122702fSJakub Sitnicki 		u32 act;
1713fb7dd8bcSAndrii Nakryiko 
17141122702fSJakub Sitnicki 		act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run);
17151122702fSJakub Sitnicki 		if (act == SK_PASS) {
17161122702fSJakub Sitnicki 			selected_sk = ctx.selected_sk;
17171122702fSJakub Sitnicki 			no_reuseport = ctx.no_reuseport;
17181122702fSJakub Sitnicki 		} else {
17191122702fSJakub Sitnicki 			selected_sk = ERR_PTR(-ECONNREFUSED);
17201122702fSJakub Sitnicki 		}
17211122702fSJakub Sitnicki 	}
17221122702fSJakub Sitnicki 	rcu_read_unlock();
17231122702fSJakub Sitnicki 	*psk = selected_sk;
17241122702fSJakub Sitnicki 	return no_reuseport;
17251122702fSJakub Sitnicki }
17261122702fSJakub Sitnicki #endif /* IS_ENABLED(CONFIG_IPV6) */
1727d7ba4cc9SJP Kobryn 
__bpf_xdp_redirect_map(struct bpf_map * map,u64 index,u64 flags,const u64 flag_mask,void * lookup_elem (struct bpf_map * map,u32 key))1728e624d4edSHangbin Liu static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
1729e6a4750fSBjörn Töpel 						   u64 flags, const u64 flag_mask,
1730e6a4750fSBjörn Töpel 						   void *lookup_elem(struct bpf_map *map, u32 key))
1731401cb7daSSebastian Andrzej Siewior {
1732e624d4edSHangbin Liu 	struct bpf_redirect_info *ri = bpf_net_ctx_get_ri();
1733e6a4750fSBjörn Töpel 	const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
1734e6a4750fSBjörn Töpel 
1735e624d4edSHangbin Liu 	/* Lower bits of the flags are used as return code on lookup failure */
1736e6a4750fSBjörn Töpel 	if (unlikely(flags & ~(action_mask | flag_mask)))
1737e6a4750fSBjörn Töpel 		return XDP_ABORTED;
173832637e33SToke Høiland-Jørgensen 
1739e624d4edSHangbin Liu 	ri->tgt_value = lookup_elem(map, index);
1740e6a4750fSBjörn Töpel 	if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) {
1741e6a4750fSBjörn Töpel 		/* If the lookup fails we want to clear out the state in the
1742e6a4750fSBjörn Töpel 		 * redirect_info struct completely, so that if an eBPF program
1743e6a4750fSBjörn Töpel 		 * performs multiple lookups, the last one always takes
1744e6a4750fSBjörn Töpel 		 * precedence.
1745ee75aef2SBjörn Töpel 		 */
1746ee75aef2SBjörn Töpel 		ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
1747e624d4edSHangbin Liu 		ri->map_type = BPF_MAP_TYPE_UNSPEC;
1748e6a4750fSBjörn Töpel 		return flags & action_mask;
1749e6a4750fSBjörn Töpel 	}
175032637e33SToke Høiland-Jørgensen 
1751ee75aef2SBjörn Töpel 	ri->tgt_index = index;
1752ee75aef2SBjörn Töpel 	ri->map_id = map->id;
1753e6a4750fSBjörn Töpel 	ri->map_type = map->map_type;
1754e624d4edSHangbin Liu 
1755e624d4edSHangbin Liu 	if (flags & BPF_F_BROADCAST) {
1756e624d4edSHangbin Liu 		WRITE_ONCE(ri->map, map);
1757e624d4edSHangbin Liu 		ri->flags = flags;
1758e624d4edSHangbin Liu 	} else {
1759e624d4edSHangbin Liu 		WRITE_ONCE(ri->map, NULL);
1760e624d4edSHangbin Liu 		ri->flags = 0;
1761e624d4edSHangbin Liu 	}
1762e6a4750fSBjörn Töpel 
1763e6a4750fSBjörn Töpel 	return XDP_REDIRECT;
1764e6a4750fSBjörn Töpel }
1765b5964b96SJoanne Koong 
1766b5964b96SJoanne Koong #ifdef CONFIG_NET
1767b5964b96SJoanne Koong int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len);
1768b5964b96SJoanne Koong int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
176905421aecSJoanne Koong 			  u32 len, u64 flags);
177005421aecSJoanne Koong int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
177166e3a13eSJoanne Koong int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len);
177266e3a13eSJoanne Koong void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len);
177366e3a13eSJoanne Koong void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
1774b5964b96SJoanne Koong 		      void *buf, unsigned long len, bool flush);
1775b5964b96SJoanne Koong #else /* CONFIG_NET */
__bpf_skb_load_bytes(const struct sk_buff * skb,u32 offset,void * to,u32 len)1776b5964b96SJoanne Koong static inline int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset,
1777b5964b96SJoanne Koong 				       void *to, u32 len)
1778b5964b96SJoanne Koong {
1779b5964b96SJoanne Koong 	return -EOPNOTSUPP;
1780b5964b96SJoanne Koong }
1781b5964b96SJoanne Koong 
__bpf_skb_store_bytes(struct sk_buff * skb,u32 offset,const void * from,u32 len,u64 flags)1782b5964b96SJoanne Koong static inline int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset,
1783b5964b96SJoanne Koong 					const void *from, u32 len, u64 flags)
1784b5964b96SJoanne Koong {
1785b5964b96SJoanne Koong 	return -EOPNOTSUPP;
178605421aecSJoanne Koong }
178705421aecSJoanne Koong 
__bpf_xdp_load_bytes(struct xdp_buff * xdp,u32 offset,void * buf,u32 len)178805421aecSJoanne Koong static inline int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset,
178905421aecSJoanne Koong 				       void *buf, u32 len)
179005421aecSJoanne Koong {
179105421aecSJoanne Koong 	return -EOPNOTSUPP;
179205421aecSJoanne Koong }
179305421aecSJoanne Koong 
__bpf_xdp_store_bytes(struct xdp_buff * xdp,u32 offset,void * buf,u32 len)179405421aecSJoanne Koong static inline int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset,
179505421aecSJoanne Koong 					void *buf, u32 len)
179605421aecSJoanne Koong {
179705421aecSJoanne Koong 	return -EOPNOTSUPP;
179866e3a13eSJoanne Koong }
179966e3a13eSJoanne Koong 
bpf_xdp_pointer(struct xdp_buff * xdp,u32 offset,u32 len)180066e3a13eSJoanne Koong static inline void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
180166e3a13eSJoanne Koong {
180266e3a13eSJoanne Koong 	return NULL;
180366e3a13eSJoanne Koong }
18048a60a041SKui-Feng Lee 
bpf_xdp_copy_buf(struct xdp_buff * xdp,unsigned long off,void * buf,unsigned long len,bool flush)180566e3a13eSJoanne Koong static inline void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off, void *buf,
180666e3a13eSJoanne Koong 				    unsigned long len, bool flush)
180766e3a13eSJoanne Koong {
1808b5964b96SJoanne Koong }
1809b5964b96SJoanne Koong #endif /* CONFIG_NET */
18101da177e4SLinus Torvalds 
1811 #endif /* __LINUX_FILTER_H__ */
1812