xref: /linux-6.15/include/linux/filter.h (revision 0408c58b)
1 /*
2  * Linux Socket Filter Data Structures
3  */
4 #ifndef __LINUX_FILTER_H__
5 #define __LINUX_FILTER_H__
6 
7 #include <stdarg.h>
8 
9 #include <linux/atomic.h>
10 #include <linux/refcount.h>
11 #include <linux/compat.h>
12 #include <linux/skbuff.h>
13 #include <linux/linkage.h>
14 #include <linux/printk.h>
15 #include <linux/workqueue.h>
16 #include <linux/sched.h>
17 #include <linux/capability.h>
18 #include <linux/cryptohash.h>
19 
20 #include <net/sch_generic.h>
21 
22 #ifdef CONFIG_ARCH_HAS_SET_MEMORY
23 #include <asm/set_memory.h>
24 #endif
25 
26 #include <uapi/linux/filter.h>
27 #include <uapi/linux/bpf.h>
28 
29 struct sk_buff;
30 struct sock;
31 struct seccomp_data;
32 struct bpf_prog_aux;
33 
34 /* ArgX, context and stack frame pointer register positions. Note,
35  * Arg1, Arg2, Arg3, etc are used as argument mappings of function
36  * calls in BPF_CALL instruction.
37  */
38 #define BPF_REG_ARG1	BPF_REG_1
39 #define BPF_REG_ARG2	BPF_REG_2
40 #define BPF_REG_ARG3	BPF_REG_3
41 #define BPF_REG_ARG4	BPF_REG_4
42 #define BPF_REG_ARG5	BPF_REG_5
43 #define BPF_REG_CTX	BPF_REG_6
44 #define BPF_REG_FP	BPF_REG_10
45 
46 /* Additional register mappings for converted user programs. */
47 #define BPF_REG_A	BPF_REG_0
48 #define BPF_REG_X	BPF_REG_7
49 #define BPF_REG_TMP	BPF_REG_8
50 
51 /* Kernel hidden auxiliary/helper register for hardening step.
52  * Only used by eBPF JITs. It's nothing more than a temporary
53  * register that JITs use internally, only that here it's part
54  * of eBPF instructions that have been rewritten for blinding
55  * constants. See JIT pre-step in bpf_jit_blind_constants().
56  */
57 #define BPF_REG_AX		MAX_BPF_REG
58 #define MAX_BPF_JIT_REG		(MAX_BPF_REG + 1)
59 
60 /* unused opcode to mark special call to bpf_tail_call() helper */
61 #define BPF_TAIL_CALL	0xf0
62 
63 /* As per nm, we expose JITed images as text (code) section for
64  * kallsyms. That way, tools like perf can find it to match
65  * addresses.
66  */
67 #define BPF_SYM_ELF_TYPE	't'
68 
69 /* BPF program can access up to 512 bytes of stack space. */
70 #define MAX_BPF_STACK	512
71 
72 /* Helper macros for filter block array initializers. */
73 
74 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
75 
76 #define BPF_ALU64_REG(OP, DST, SRC)				\
77 	((struct bpf_insn) {					\
78 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
79 		.dst_reg = DST,					\
80 		.src_reg = SRC,					\
81 		.off   = 0,					\
82 		.imm   = 0 })
83 
84 #define BPF_ALU32_REG(OP, DST, SRC)				\
85 	((struct bpf_insn) {					\
86 		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
87 		.dst_reg = DST,					\
88 		.src_reg = SRC,					\
89 		.off   = 0,					\
90 		.imm   = 0 })
91 
92 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
93 
94 #define BPF_ALU64_IMM(OP, DST, IMM)				\
95 	((struct bpf_insn) {					\
96 		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
97 		.dst_reg = DST,					\
98 		.src_reg = 0,					\
99 		.off   = 0,					\
100 		.imm   = IMM })
101 
102 #define BPF_ALU32_IMM(OP, DST, IMM)				\
103 	((struct bpf_insn) {					\
104 		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
105 		.dst_reg = DST,					\
106 		.src_reg = 0,					\
107 		.off   = 0,					\
108 		.imm   = IMM })
109 
110 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
111 
112 #define BPF_ENDIAN(TYPE, DST, LEN)				\
113 	((struct bpf_insn) {					\
114 		.code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),	\
115 		.dst_reg = DST,					\
116 		.src_reg = 0,					\
117 		.off   = 0,					\
118 		.imm   = LEN })
119 
120 /* Short form of mov, dst_reg = src_reg */
121 
122 #define BPF_MOV64_REG(DST, SRC)					\
123 	((struct bpf_insn) {					\
124 		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
125 		.dst_reg = DST,					\
126 		.src_reg = SRC,					\
127 		.off   = 0,					\
128 		.imm   = 0 })
129 
130 #define BPF_MOV32_REG(DST, SRC)					\
131 	((struct bpf_insn) {					\
132 		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
133 		.dst_reg = DST,					\
134 		.src_reg = SRC,					\
135 		.off   = 0,					\
136 		.imm   = 0 })
137 
138 /* Short form of mov, dst_reg = imm32 */
139 
140 #define BPF_MOV64_IMM(DST, IMM)					\
141 	((struct bpf_insn) {					\
142 		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
143 		.dst_reg = DST,					\
144 		.src_reg = 0,					\
145 		.off   = 0,					\
146 		.imm   = IMM })
147 
148 #define BPF_MOV32_IMM(DST, IMM)					\
149 	((struct bpf_insn) {					\
150 		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
151 		.dst_reg = DST,					\
152 		.src_reg = 0,					\
153 		.off   = 0,					\
154 		.imm   = IMM })
155 
156 /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
157 #define BPF_LD_IMM64(DST, IMM)					\
158 	BPF_LD_IMM64_RAW(DST, 0, IMM)
159 
160 #define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
161 	((struct bpf_insn) {					\
162 		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
163 		.dst_reg = DST,					\
164 		.src_reg = SRC,					\
165 		.off   = 0,					\
166 		.imm   = (__u32) (IMM) }),			\
167 	((struct bpf_insn) {					\
168 		.code  = 0, /* zero is reserved opcode */	\
169 		.dst_reg = 0,					\
170 		.src_reg = 0,					\
171 		.off   = 0,					\
172 		.imm   = ((__u64) (IMM)) >> 32 })
173 
174 /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
175 #define BPF_LD_MAP_FD(DST, MAP_FD)				\
176 	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
177 
178 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
179 
180 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)			\
181 	((struct bpf_insn) {					\
182 		.code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),	\
183 		.dst_reg = DST,					\
184 		.src_reg = SRC,					\
185 		.off   = 0,					\
186 		.imm   = IMM })
187 
188 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)			\
189 	((struct bpf_insn) {					\
190 		.code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),	\
191 		.dst_reg = DST,					\
192 		.src_reg = SRC,					\
193 		.off   = 0,					\
194 		.imm   = IMM })
195 
196 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
197 
198 #define BPF_LD_ABS(SIZE, IMM)					\
199 	((struct bpf_insn) {					\
200 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
201 		.dst_reg = 0,					\
202 		.src_reg = 0,					\
203 		.off   = 0,					\
204 		.imm   = IMM })
205 
206 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
207 
208 #define BPF_LD_IND(SIZE, SRC, IMM)				\
209 	((struct bpf_insn) {					\
210 		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,	\
211 		.dst_reg = 0,					\
212 		.src_reg = SRC,					\
213 		.off   = 0,					\
214 		.imm   = IMM })
215 
216 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
217 
218 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
219 	((struct bpf_insn) {					\
220 		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
221 		.dst_reg = DST,					\
222 		.src_reg = SRC,					\
223 		.off   = OFF,					\
224 		.imm   = 0 })
225 
226 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
227 
228 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
229 	((struct bpf_insn) {					\
230 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
231 		.dst_reg = DST,					\
232 		.src_reg = SRC,					\
233 		.off   = OFF,					\
234 		.imm   = 0 })
235 
236 /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
237 
238 #define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
239 	((struct bpf_insn) {					\
240 		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
241 		.dst_reg = DST,					\
242 		.src_reg = SRC,					\
243 		.off   = OFF,					\
244 		.imm   = 0 })
245 
246 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
247 
248 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
249 	((struct bpf_insn) {					\
250 		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
251 		.dst_reg = DST,					\
252 		.src_reg = 0,					\
253 		.off   = OFF,					\
254 		.imm   = IMM })
255 
256 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
257 
258 #define BPF_JMP_REG(OP, DST, SRC, OFF)				\
259 	((struct bpf_insn) {					\
260 		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
261 		.dst_reg = DST,					\
262 		.src_reg = SRC,					\
263 		.off   = OFF,					\
264 		.imm   = 0 })
265 
266 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
267 
268 #define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
269 	((struct bpf_insn) {					\
270 		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
271 		.dst_reg = DST,					\
272 		.src_reg = 0,					\
273 		.off   = OFF,					\
274 		.imm   = IMM })
275 
276 /* Unconditional jumps, goto pc + off16 */
277 
278 #define BPF_JMP_A(OFF)						\
279 	((struct bpf_insn) {					\
280 		.code  = BPF_JMP | BPF_JA,			\
281 		.dst_reg = 0,					\
282 		.src_reg = 0,					\
283 		.off   = OFF,					\
284 		.imm   = 0 })
285 
286 /* Function call */
287 
288 #define BPF_EMIT_CALL(FUNC)					\
289 	((struct bpf_insn) {					\
290 		.code  = BPF_JMP | BPF_CALL,			\
291 		.dst_reg = 0,					\
292 		.src_reg = 0,					\
293 		.off   = 0,					\
294 		.imm   = ((FUNC) - __bpf_call_base) })
295 
296 /* Raw code statement block */
297 
298 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
299 	((struct bpf_insn) {					\
300 		.code  = CODE,					\
301 		.dst_reg = DST,					\
302 		.src_reg = SRC,					\
303 		.off   = OFF,					\
304 		.imm   = IMM })
305 
306 /* Program exit */
307 
308 #define BPF_EXIT_INSN()						\
309 	((struct bpf_insn) {					\
310 		.code  = BPF_JMP | BPF_EXIT,			\
311 		.dst_reg = 0,					\
312 		.src_reg = 0,					\
313 		.off   = 0,					\
314 		.imm   = 0 })
315 
316 /* Internal classic blocks for direct assignment */
317 
318 #define __BPF_STMT(CODE, K)					\
319 	((struct sock_filter) BPF_STMT(CODE, K))
320 
321 #define __BPF_JUMP(CODE, K, JT, JF)				\
322 	((struct sock_filter) BPF_JUMP(CODE, K, JT, JF))
323 
324 #define bytes_to_bpf_size(bytes)				\
325 ({								\
326 	int bpf_size = -EINVAL;					\
327 								\
328 	if (bytes == sizeof(u8))				\
329 		bpf_size = BPF_B;				\
330 	else if (bytes == sizeof(u16))				\
331 		bpf_size = BPF_H;				\
332 	else if (bytes == sizeof(u32))				\
333 		bpf_size = BPF_W;				\
334 	else if (bytes == sizeof(u64))				\
335 		bpf_size = BPF_DW;				\
336 								\
337 	bpf_size;						\
338 })
339 
340 #define bpf_size_to_bytes(bpf_size)				\
341 ({								\
342 	int bytes = -EINVAL;					\
343 								\
344 	if (bpf_size == BPF_B)					\
345 		bytes = sizeof(u8);				\
346 	else if (bpf_size == BPF_H)				\
347 		bytes = sizeof(u16);				\
348 	else if (bpf_size == BPF_W)				\
349 		bytes = sizeof(u32);				\
350 	else if (bpf_size == BPF_DW)				\
351 		bytes = sizeof(u64);				\
352 								\
353 	bytes;							\
354 })
355 
356 #define BPF_SIZEOF(type)					\
357 	({							\
358 		const int __size = bytes_to_bpf_size(sizeof(type)); \
359 		BUILD_BUG_ON(__size < 0);			\
360 		__size;						\
361 	})
362 
363 #define BPF_FIELD_SIZEOF(type, field)				\
364 	({							\
365 		const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
366 		BUILD_BUG_ON(__size < 0);			\
367 		__size;						\
368 	})
369 
370 #define BPF_LDST_BYTES(insn)					\
371 	({							\
372 		const int __size = bpf_size_to_bytes(BPF_SIZE(insn->code)); \
373 		WARN_ON(__size < 0);				\
374 		__size;						\
375 	})
376 
377 #define __BPF_MAP_0(m, v, ...) v
378 #define __BPF_MAP_1(m, v, t, a, ...) m(t, a)
379 #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__)
380 #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__)
381 #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__)
382 #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__)
383 
384 #define __BPF_REG_0(...) __BPF_PAD(5)
385 #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4)
386 #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3)
387 #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2)
388 #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1)
389 #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__)
390 
391 #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__)
392 #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__)
393 
394 #define __BPF_CAST(t, a)						       \
395 	(__force t)							       \
396 	(__force							       \
397 	 typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long),      \
398 				      (unsigned long)0, (t)0))) a
399 #define __BPF_V void
400 #define __BPF_N
401 
402 #define __BPF_DECL_ARGS(t, a) t   a
403 #define __BPF_DECL_REGS(t, a) u64 a
404 
405 #define __BPF_PAD(n)							       \
406 	__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2,       \
407 		  u64, __ur_3, u64, __ur_4, u64, __ur_5)
408 
409 #define BPF_CALL_x(x, name, ...)					       \
410 	static __always_inline						       \
411 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__));   \
412 	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__));	       \
413 	u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__))	       \
414 	{								       \
415 		return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
416 	}								       \
417 	static __always_inline						       \
418 	u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
419 
420 #define BPF_CALL_0(name, ...)	BPF_CALL_x(0, name, __VA_ARGS__)
421 #define BPF_CALL_1(name, ...)	BPF_CALL_x(1, name, __VA_ARGS__)
422 #define BPF_CALL_2(name, ...)	BPF_CALL_x(2, name, __VA_ARGS__)
423 #define BPF_CALL_3(name, ...)	BPF_CALL_x(3, name, __VA_ARGS__)
424 #define BPF_CALL_4(name, ...)	BPF_CALL_x(4, name, __VA_ARGS__)
425 #define BPF_CALL_5(name, ...)	BPF_CALL_x(5, name, __VA_ARGS__)
426 
427 #define bpf_ctx_range(TYPE, MEMBER)						\
428 	offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
429 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)				\
430 	offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
431 
432 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)				\
433 	({									\
434 		BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE));		\
435 		*(PTR_SIZE) = (SIZE);						\
436 		offsetof(TYPE, MEMBER);						\
437 	})
438 
439 #ifdef CONFIG_COMPAT
440 /* A struct sock_filter is architecture independent. */
441 struct compat_sock_fprog {
442 	u16		len;
443 	compat_uptr_t	filter;	/* struct sock_filter * */
444 };
445 #endif
446 
447 struct sock_fprog_kern {
448 	u16			len;
449 	struct sock_filter	*filter;
450 };
451 
452 struct bpf_binary_header {
453 	unsigned int pages;
454 	u8 image[];
455 };
456 
457 struct bpf_prog {
458 	u16			pages;		/* Number of allocated pages */
459 	kmemcheck_bitfield_begin(meta);
460 	u16			jited:1,	/* Is our filter JIT'ed? */
461 				locked:1,	/* Program image locked? */
462 				gpl_compatible:1, /* Is filter GPL compatible? */
463 				cb_access:1,	/* Is control block accessed? */
464 				dst_needed:1;	/* Do we need dst entry? */
465 	kmemcheck_bitfield_end(meta);
466 	enum bpf_prog_type	type;		/* Type of BPF program */
467 	u32			len;		/* Number of filter blocks */
468 	u32			jited_len;	/* Size of jited insns in bytes */
469 	u8			tag[BPF_TAG_SIZE];
470 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
471 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
472 	unsigned int		(*bpf_func)(const void *ctx,
473 					    const struct bpf_insn *insn);
474 	/* Instructions for interpreter */
475 	union {
476 		struct sock_filter	insns[0];
477 		struct bpf_insn		insnsi[0];
478 	};
479 };
480 
481 struct sk_filter {
482 	refcount_t	refcnt;
483 	struct rcu_head	rcu;
484 	struct bpf_prog	*prog;
485 };
486 
487 #define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
488 
489 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
490 
491 struct bpf_skb_data_end {
492 	struct qdisc_skb_cb qdisc_cb;
493 	void *data_end;
494 };
495 
496 struct xdp_buff {
497 	void *data;
498 	void *data_end;
499 	void *data_hard_start;
500 };
501 
502 /* compute the linear packet data range [data, data_end) which
503  * will be accessed by cls_bpf, act_bpf and lwt programs
504  */
505 static inline void bpf_compute_data_end(struct sk_buff *skb)
506 {
507 	struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
508 
509 	BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
510 	cb->data_end = skb->data + skb_headlen(skb);
511 }
512 
513 static inline u8 *bpf_skb_cb(struct sk_buff *skb)
514 {
515 	/* eBPF programs may read/write skb->cb[] area to transfer meta
516 	 * data between tail calls. Since this also needs to work with
517 	 * tc, that scratch memory is mapped to qdisc_skb_cb's data area.
518 	 *
519 	 * In some socket filter cases, the cb unfortunately needs to be
520 	 * saved/restored so that protocol specific skb->cb[] data won't
521 	 * be lost. In any case, due to unpriviledged eBPF programs
522 	 * attached to sockets, we need to clear the bpf_skb_cb() area
523 	 * to not leak previous contents to user space.
524 	 */
525 	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
526 	BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
527 		     FIELD_SIZEOF(struct qdisc_skb_cb, data));
528 
529 	return qdisc_skb_cb(skb)->data;
530 }
531 
532 static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
533 				       struct sk_buff *skb)
534 {
535 	u8 *cb_data = bpf_skb_cb(skb);
536 	u8 cb_saved[BPF_SKB_CB_LEN];
537 	u32 res;
538 
539 	if (unlikely(prog->cb_access)) {
540 		memcpy(cb_saved, cb_data, sizeof(cb_saved));
541 		memset(cb_data, 0, sizeof(cb_saved));
542 	}
543 
544 	res = BPF_PROG_RUN(prog, skb);
545 
546 	if (unlikely(prog->cb_access))
547 		memcpy(cb_data, cb_saved, sizeof(cb_saved));
548 
549 	return res;
550 }
551 
552 static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
553 					struct sk_buff *skb)
554 {
555 	u8 *cb_data = bpf_skb_cb(skb);
556 
557 	if (unlikely(prog->cb_access))
558 		memset(cb_data, 0, BPF_SKB_CB_LEN);
559 
560 	return BPF_PROG_RUN(prog, skb);
561 }
562 
563 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
564 					    struct xdp_buff *xdp)
565 {
566 	/* Caller needs to hold rcu_read_lock() (!), otherwise program
567 	 * can be released while still running, or map elements could be
568 	 * freed early while still having concurrent users. XDP fastpath
569 	 * already takes rcu_read_lock() when fetching the program, so
570 	 * it's not necessary here anymore.
571 	 */
572 	return BPF_PROG_RUN(prog, xdp);
573 }
574 
575 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
576 {
577 	return prog->len * sizeof(struct bpf_insn);
578 }
579 
580 static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
581 {
582 	return round_up(bpf_prog_insn_size(prog) +
583 			sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
584 }
585 
586 static inline unsigned int bpf_prog_size(unsigned int proglen)
587 {
588 	return max(sizeof(struct bpf_prog),
589 		   offsetof(struct bpf_prog, insns[proglen]));
590 }
591 
592 static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
593 {
594 	/* When classic BPF programs have been loaded and the arch
595 	 * does not have a classic BPF JIT (anymore), they have been
596 	 * converted via bpf_migrate_filter() to eBPF and thus always
597 	 * have an unspec program type.
598 	 */
599 	return prog->type == BPF_PROG_TYPE_UNSPEC;
600 }
601 
602 static inline bool
603 bpf_ctx_narrow_access_ok(u32 off, u32 size, const u32 size_default)
604 {
605 	bool off_ok;
606 #ifdef __LITTLE_ENDIAN
607 	off_ok = (off & (size_default - 1)) == 0;
608 #else
609 	off_ok = (off & (size_default - 1)) + size == size_default;
610 #endif
611 	return off_ok && size <= size_default && (size & (size - 1)) == 0;
612 }
613 
614 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
615 
616 #ifdef CONFIG_ARCH_HAS_SET_MEMORY
617 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
618 {
619 	fp->locked = 1;
620 	WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
621 }
622 
623 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
624 {
625 	if (fp->locked) {
626 		WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
627 		/* In case set_memory_rw() fails, we want to be the first
628 		 * to crash here instead of some random place later on.
629 		 */
630 		fp->locked = 0;
631 	}
632 }
633 
634 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
635 {
636 	WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
637 }
638 
639 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
640 {
641 	WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
642 }
643 #else
644 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
645 {
646 }
647 
648 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
649 {
650 }
651 
652 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
653 {
654 }
655 
656 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
657 {
658 }
659 #endif /* CONFIG_ARCH_HAS_SET_MEMORY */
660 
661 static inline struct bpf_binary_header *
662 bpf_jit_binary_hdr(const struct bpf_prog *fp)
663 {
664 	unsigned long real_start = (unsigned long)fp->bpf_func;
665 	unsigned long addr = real_start & PAGE_MASK;
666 
667 	return (void *)addr;
668 }
669 
670 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
671 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
672 {
673 	return sk_filter_trim_cap(sk, skb, 1);
674 }
675 
676 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
677 void bpf_prog_free(struct bpf_prog *fp);
678 
679 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
680 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
681 				  gfp_t gfp_extra_flags);
682 void __bpf_prog_free(struct bpf_prog *fp);
683 
684 static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
685 {
686 	bpf_prog_unlock_ro(fp);
687 	__bpf_prog_free(fp);
688 }
689 
690 typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter,
691 				       unsigned int flen);
692 
693 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
694 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
695 			      bpf_aux_classic_check_t trans, bool save_orig);
696 void bpf_prog_destroy(struct bpf_prog *fp);
697 
698 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
699 int sk_attach_bpf(u32 ufd, struct sock *sk);
700 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
701 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
702 int sk_detach_filter(struct sock *sk);
703 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
704 		  unsigned int len);
705 
706 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
707 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
708 
709 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
710 
711 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
712 void bpf_jit_compile(struct bpf_prog *prog);
713 bool bpf_helper_changes_pkt_data(void *func);
714 
715 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
716 				       const struct bpf_insn *patch, u32 len);
717 void bpf_warn_invalid_xdp_action(u32 act);
718 
719 #ifdef CONFIG_BPF_JIT
720 extern int bpf_jit_enable;
721 extern int bpf_jit_harden;
722 extern int bpf_jit_kallsyms;
723 
724 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
725 
726 struct bpf_binary_header *
727 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
728 		     unsigned int alignment,
729 		     bpf_jit_fill_hole_t bpf_fill_ill_insns);
730 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
731 
732 void bpf_jit_free(struct bpf_prog *fp);
733 
734 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
735 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
736 
737 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
738 				u32 pass, void *image)
739 {
740 	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
741 	       proglen, pass, image, current->comm, task_pid_nr(current));
742 
743 	if (image)
744 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
745 			       16, 1, image, proglen, false);
746 }
747 
748 static inline bool bpf_jit_is_ebpf(void)
749 {
750 # ifdef CONFIG_HAVE_EBPF_JIT
751 	return true;
752 # else
753 	return false;
754 # endif
755 }
756 
757 static inline bool ebpf_jit_enabled(void)
758 {
759 	return bpf_jit_enable && bpf_jit_is_ebpf();
760 }
761 
762 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
763 {
764 	return fp->jited && bpf_jit_is_ebpf();
765 }
766 
767 static inline bool bpf_jit_blinding_enabled(void)
768 {
769 	/* These are the prerequisites, should someone ever have the
770 	 * idea to call blinding outside of them, we make sure to
771 	 * bail out.
772 	 */
773 	if (!bpf_jit_is_ebpf())
774 		return false;
775 	if (!bpf_jit_enable)
776 		return false;
777 	if (!bpf_jit_harden)
778 		return false;
779 	if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN))
780 		return false;
781 
782 	return true;
783 }
784 
785 static inline bool bpf_jit_kallsyms_enabled(void)
786 {
787 	/* There are a couple of corner cases where kallsyms should
788 	 * not be enabled f.e. on hardening.
789 	 */
790 	if (bpf_jit_harden)
791 		return false;
792 	if (!bpf_jit_kallsyms)
793 		return false;
794 	if (bpf_jit_kallsyms == 1)
795 		return true;
796 
797 	return false;
798 }
799 
800 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
801 				 unsigned long *off, char *sym);
802 bool is_bpf_text_address(unsigned long addr);
803 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
804 		    char *sym);
805 
806 static inline const char *
807 bpf_address_lookup(unsigned long addr, unsigned long *size,
808 		   unsigned long *off, char **modname, char *sym)
809 {
810 	const char *ret = __bpf_address_lookup(addr, size, off, sym);
811 
812 	if (ret && modname)
813 		*modname = NULL;
814 	return ret;
815 }
816 
817 void bpf_prog_kallsyms_add(struct bpf_prog *fp);
818 void bpf_prog_kallsyms_del(struct bpf_prog *fp);
819 
820 #else /* CONFIG_BPF_JIT */
821 
822 static inline bool ebpf_jit_enabled(void)
823 {
824 	return false;
825 }
826 
827 static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
828 {
829 	return false;
830 }
831 
832 static inline void bpf_jit_free(struct bpf_prog *fp)
833 {
834 	bpf_prog_unlock_free(fp);
835 }
836 
837 static inline bool bpf_jit_kallsyms_enabled(void)
838 {
839 	return false;
840 }
841 
842 static inline const char *
843 __bpf_address_lookup(unsigned long addr, unsigned long *size,
844 		     unsigned long *off, char *sym)
845 {
846 	return NULL;
847 }
848 
849 static inline bool is_bpf_text_address(unsigned long addr)
850 {
851 	return false;
852 }
853 
854 static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
855 				  char *type, char *sym)
856 {
857 	return -ERANGE;
858 }
859 
860 static inline const char *
861 bpf_address_lookup(unsigned long addr, unsigned long *size,
862 		   unsigned long *off, char **modname, char *sym)
863 {
864 	return NULL;
865 }
866 
867 static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
868 {
869 }
870 
871 static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
872 {
873 }
874 #endif /* CONFIG_BPF_JIT */
875 
876 #define BPF_ANC		BIT(15)
877 
878 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
879 {
880 	switch (first->code) {
881 	case BPF_RET | BPF_K:
882 	case BPF_LD | BPF_W | BPF_LEN:
883 		return false;
884 
885 	case BPF_LD | BPF_W | BPF_ABS:
886 	case BPF_LD | BPF_H | BPF_ABS:
887 	case BPF_LD | BPF_B | BPF_ABS:
888 		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
889 			return true;
890 		return false;
891 
892 	default:
893 		return true;
894 	}
895 }
896 
897 static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
898 {
899 	BUG_ON(ftest->code & BPF_ANC);
900 
901 	switch (ftest->code) {
902 	case BPF_LD | BPF_W | BPF_ABS:
903 	case BPF_LD | BPF_H | BPF_ABS:
904 	case BPF_LD | BPF_B | BPF_ABS:
905 #define BPF_ANCILLARY(CODE)	case SKF_AD_OFF + SKF_AD_##CODE:	\
906 				return BPF_ANC | SKF_AD_##CODE
907 		switch (ftest->k) {
908 		BPF_ANCILLARY(PROTOCOL);
909 		BPF_ANCILLARY(PKTTYPE);
910 		BPF_ANCILLARY(IFINDEX);
911 		BPF_ANCILLARY(NLATTR);
912 		BPF_ANCILLARY(NLATTR_NEST);
913 		BPF_ANCILLARY(MARK);
914 		BPF_ANCILLARY(QUEUE);
915 		BPF_ANCILLARY(HATYPE);
916 		BPF_ANCILLARY(RXHASH);
917 		BPF_ANCILLARY(CPU);
918 		BPF_ANCILLARY(ALU_XOR_X);
919 		BPF_ANCILLARY(VLAN_TAG);
920 		BPF_ANCILLARY(VLAN_TAG_PRESENT);
921 		BPF_ANCILLARY(PAY_OFFSET);
922 		BPF_ANCILLARY(RANDOM);
923 		BPF_ANCILLARY(VLAN_TPID);
924 		}
925 		/* Fallthrough. */
926 	default:
927 		return ftest->code;
928 	}
929 }
930 
931 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
932 					   int k, unsigned int size);
933 
934 static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
935 				     unsigned int size, void *buffer)
936 {
937 	if (k >= 0)
938 		return skb_header_pointer(skb, k, size, buffer);
939 
940 	return bpf_internal_load_pointer_neg_helper(skb, k, size);
941 }
942 
943 static inline int bpf_tell_extensions(void)
944 {
945 	return SKF_AD_MAX;
946 }
947 
948 struct bpf_sock_ops_kern {
949 	struct	sock *sk;
950 	u32	op;
951 	union {
952 		u32 reply;
953 		u32 replylong[4];
954 	};
955 };
956 
957 #endif /* __LINUX_FILTER_H__ */
958