xref: /linux-6.15/include/linux/bpf.h (revision d8bed686)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 
9 #include <linux/workqueue.h>
10 #include <linux/file.h>
11 #include <linux/percpu.h>
12 #include <linux/err.h>
13 #include <linux/rbtree_latch.h>
14 #include <linux/numa.h>
15 #include <linux/mm_types.h>
16 #include <linux/wait.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 
24 struct bpf_verifier_env;
25 struct bpf_verifier_log;
26 struct perf_event;
27 struct bpf_prog;
28 struct bpf_prog_aux;
29 struct bpf_map;
30 struct sock;
31 struct seq_file;
32 struct btf;
33 struct btf_type;
34 struct exception_table_entry;
35 struct seq_operations;
36 
37 extern struct idr btf_idr;
38 extern spinlock_t btf_idr_lock;
39 
40 /* map is generic key/value storage optionally accesible by eBPF programs */
41 struct bpf_map_ops {
42 	/* funcs callable from userspace (via syscall) */
43 	int (*map_alloc_check)(union bpf_attr *attr);
44 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
45 	void (*map_release)(struct bpf_map *map, struct file *map_file);
46 	void (*map_free)(struct bpf_map *map);
47 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
48 	void (*map_release_uref)(struct bpf_map *map);
49 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
50 	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
51 				union bpf_attr __user *uattr);
52 	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
53 					   const union bpf_attr *attr,
54 					   union bpf_attr __user *uattr);
55 	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
56 				union bpf_attr __user *uattr);
57 	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
58 				union bpf_attr __user *uattr);
59 
60 	/* funcs callable from userspace and from eBPF programs */
61 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
62 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
63 	int (*map_delete_elem)(struct bpf_map *map, void *key);
64 	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
65 	int (*map_pop_elem)(struct bpf_map *map, void *value);
66 	int (*map_peek_elem)(struct bpf_map *map, void *value);
67 
68 	/* funcs called by prog_array and perf_event_array map */
69 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
70 				int fd);
71 	void (*map_fd_put_ptr)(void *ptr);
72 	u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
73 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
74 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
75 				  struct seq_file *m);
76 	int (*map_check_btf)(const struct bpf_map *map,
77 			     const struct btf *btf,
78 			     const struct btf_type *key_type,
79 			     const struct btf_type *value_type);
80 
81 	/* Prog poke tracking helpers. */
82 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
83 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
84 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
85 			     struct bpf_prog *new);
86 
87 	/* Direct value access helpers. */
88 	int (*map_direct_value_addr)(const struct bpf_map *map,
89 				     u64 *imm, u32 off);
90 	int (*map_direct_value_meta)(const struct bpf_map *map,
91 				     u64 imm, u32 *off);
92 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
93 };
94 
95 struct bpf_map_memory {
96 	u32 pages;
97 	struct user_struct *user;
98 };
99 
100 struct bpf_map {
101 	/* The first two cachelines with read-mostly members of which some
102 	 * are also accessed in fast-path (e.g. ops, max_entries).
103 	 */
104 	const struct bpf_map_ops *ops ____cacheline_aligned;
105 	struct bpf_map *inner_map_meta;
106 #ifdef CONFIG_SECURITY
107 	void *security;
108 #endif
109 	enum bpf_map_type map_type;
110 	u32 key_size;
111 	u32 value_size;
112 	u32 max_entries;
113 	u32 map_flags;
114 	int spin_lock_off; /* >=0 valid offset, <0 error */
115 	u32 id;
116 	int numa_node;
117 	u32 btf_key_type_id;
118 	u32 btf_value_type_id;
119 	struct btf *btf;
120 	struct bpf_map_memory memory;
121 	char name[BPF_OBJ_NAME_LEN];
122 	u32 btf_vmlinux_value_type_id;
123 	bool bypass_spec_v1;
124 	bool frozen; /* write-once; write-protected by freeze_mutex */
125 	/* 22 bytes hole */
126 
127 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
128 	 * particularly with refcounting.
129 	 */
130 	atomic64_t refcnt ____cacheline_aligned;
131 	atomic64_t usercnt;
132 	struct work_struct work;
133 	struct mutex freeze_mutex;
134 	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
135 };
136 
137 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
138 {
139 	return map->spin_lock_off >= 0;
140 }
141 
142 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
143 {
144 	if (likely(!map_value_has_spin_lock(map)))
145 		return;
146 	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
147 		(struct bpf_spin_lock){};
148 }
149 
150 /* copy everything but bpf_spin_lock */
151 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
152 {
153 	if (unlikely(map_value_has_spin_lock(map))) {
154 		u32 off = map->spin_lock_off;
155 
156 		memcpy(dst, src, off);
157 		memcpy(dst + off + sizeof(struct bpf_spin_lock),
158 		       src + off + sizeof(struct bpf_spin_lock),
159 		       map->value_size - off - sizeof(struct bpf_spin_lock));
160 	} else {
161 		memcpy(dst, src, map->value_size);
162 	}
163 }
164 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
165 			   bool lock_src);
166 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
167 
168 struct bpf_offload_dev;
169 struct bpf_offloaded_map;
170 
171 struct bpf_map_dev_ops {
172 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
173 				void *key, void *next_key);
174 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
175 			       void *key, void *value);
176 	int (*map_update_elem)(struct bpf_offloaded_map *map,
177 			       void *key, void *value, u64 flags);
178 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
179 };
180 
181 struct bpf_offloaded_map {
182 	struct bpf_map map;
183 	struct net_device *netdev;
184 	const struct bpf_map_dev_ops *dev_ops;
185 	void *dev_priv;
186 	struct list_head offloads;
187 };
188 
189 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
190 {
191 	return container_of(map, struct bpf_offloaded_map, map);
192 }
193 
194 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
195 {
196 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
197 }
198 
199 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
200 {
201 	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
202 		map->ops->map_seq_show_elem;
203 }
204 
205 int map_check_no_btf(const struct bpf_map *map,
206 		     const struct btf *btf,
207 		     const struct btf_type *key_type,
208 		     const struct btf_type *value_type);
209 
210 extern const struct bpf_map_ops bpf_map_offload_ops;
211 
212 /* function argument constraints */
213 enum bpf_arg_type {
214 	ARG_DONTCARE = 0,	/* unused argument in helper function */
215 
216 	/* the following constraints used to prototype
217 	 * bpf_map_lookup/update/delete_elem() functions
218 	 */
219 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
220 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
221 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
222 	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
223 	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
224 
225 	/* the following constraints used to prototype bpf_memcmp() and other
226 	 * functions that access data on eBPF program stack
227 	 */
228 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
229 	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
230 	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
231 				 * helper function must fill all bytes or clear
232 				 * them in error case.
233 				 */
234 
235 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
236 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
237 
238 	ARG_PTR_TO_CTX,		/* pointer to context */
239 	ARG_PTR_TO_CTX_OR_NULL,	/* pointer to context or NULL */
240 	ARG_ANYTHING,		/* any (initialized) argument is ok */
241 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
242 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
243 	ARG_PTR_TO_INT,		/* pointer to int */
244 	ARG_PTR_TO_LONG,	/* pointer to long */
245 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
246 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
247 };
248 
249 /* type of values returned from helper functions */
250 enum bpf_return_type {
251 	RET_INTEGER,			/* function returns integer */
252 	RET_VOID,			/* function doesn't return anything */
253 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
254 	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
255 	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
256 	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
257 	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
258 };
259 
260 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
261  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
262  * instructions after verifying
263  */
264 struct bpf_func_proto {
265 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
266 	bool gpl_only;
267 	bool pkt_access;
268 	enum bpf_return_type ret_type;
269 	union {
270 		struct {
271 			enum bpf_arg_type arg1_type;
272 			enum bpf_arg_type arg2_type;
273 			enum bpf_arg_type arg3_type;
274 			enum bpf_arg_type arg4_type;
275 			enum bpf_arg_type arg5_type;
276 		};
277 		enum bpf_arg_type arg_type[5];
278 	};
279 	int *btf_id; /* BTF ids of arguments */
280 };
281 
282 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
283  * the first argument to eBPF programs.
284  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
285  */
286 struct bpf_context;
287 
288 enum bpf_access_type {
289 	BPF_READ = 1,
290 	BPF_WRITE = 2
291 };
292 
293 /* types of values stored in eBPF registers */
294 /* Pointer types represent:
295  * pointer
296  * pointer + imm
297  * pointer + (u16) var
298  * pointer + (u16) var + imm
299  * if (range > 0) then [ptr, ptr + range - off) is safe to access
300  * if (id > 0) means that some 'var' was added
301  * if (off > 0) means that 'imm' was added
302  */
303 enum bpf_reg_type {
304 	NOT_INIT = 0,		 /* nothing was written into register */
305 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
306 	PTR_TO_CTX,		 /* reg points to bpf_context */
307 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
308 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
309 	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
310 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
311 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
312 	PTR_TO_PACKET,		 /* reg points to skb->data */
313 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
314 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
315 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
316 	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
317 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
318 	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
319 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
320 	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
321 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
322 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
323 	PTR_TO_BTF_ID,		 /* reg points to kernel struct */
324 	PTR_TO_BTF_ID_OR_NULL,	 /* reg points to kernel struct or NULL */
325 };
326 
327 /* The information passed from prog-specific *_is_valid_access
328  * back to the verifier.
329  */
330 struct bpf_insn_access_aux {
331 	enum bpf_reg_type reg_type;
332 	union {
333 		int ctx_field_size;
334 		u32 btf_id;
335 	};
336 	struct bpf_verifier_log *log; /* for verbose logs */
337 };
338 
339 static inline void
340 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
341 {
342 	aux->ctx_field_size = size;
343 }
344 
345 struct bpf_prog_ops {
346 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
347 			union bpf_attr __user *uattr);
348 };
349 
350 struct bpf_verifier_ops {
351 	/* return eBPF function prototype for verification */
352 	const struct bpf_func_proto *
353 	(*get_func_proto)(enum bpf_func_id func_id,
354 			  const struct bpf_prog *prog);
355 
356 	/* return true if 'size' wide access at offset 'off' within bpf_context
357 	 * with 'type' (read or write) is allowed
358 	 */
359 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
360 				const struct bpf_prog *prog,
361 				struct bpf_insn_access_aux *info);
362 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
363 			    const struct bpf_prog *prog);
364 	int (*gen_ld_abs)(const struct bpf_insn *orig,
365 			  struct bpf_insn *insn_buf);
366 	u32 (*convert_ctx_access)(enum bpf_access_type type,
367 				  const struct bpf_insn *src,
368 				  struct bpf_insn *dst,
369 				  struct bpf_prog *prog, u32 *target_size);
370 	int (*btf_struct_access)(struct bpf_verifier_log *log,
371 				 const struct btf_type *t, int off, int size,
372 				 enum bpf_access_type atype,
373 				 u32 *next_btf_id);
374 };
375 
376 struct bpf_prog_offload_ops {
377 	/* verifier basic callbacks */
378 	int (*insn_hook)(struct bpf_verifier_env *env,
379 			 int insn_idx, int prev_insn_idx);
380 	int (*finalize)(struct bpf_verifier_env *env);
381 	/* verifier optimization callbacks (called after .finalize) */
382 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
383 			    struct bpf_insn *insn);
384 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
385 	/* program management callbacks */
386 	int (*prepare)(struct bpf_prog *prog);
387 	int (*translate)(struct bpf_prog *prog);
388 	void (*destroy)(struct bpf_prog *prog);
389 };
390 
391 struct bpf_prog_offload {
392 	struct bpf_prog		*prog;
393 	struct net_device	*netdev;
394 	struct bpf_offload_dev	*offdev;
395 	void			*dev_priv;
396 	struct list_head	offloads;
397 	bool			dev_state;
398 	bool			opt_failed;
399 	void			*jited_image;
400 	u32			jited_len;
401 };
402 
403 enum bpf_cgroup_storage_type {
404 	BPF_CGROUP_STORAGE_SHARED,
405 	BPF_CGROUP_STORAGE_PERCPU,
406 	__BPF_CGROUP_STORAGE_MAX
407 };
408 
409 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
410 
411 /* The longest tracepoint has 12 args.
412  * See include/trace/bpf_probe.h
413  */
414 #define MAX_BPF_FUNC_ARGS 12
415 
416 struct bpf_prog_stats {
417 	u64 cnt;
418 	u64 nsecs;
419 	struct u64_stats_sync syncp;
420 } __aligned(2 * sizeof(u64));
421 
422 struct btf_func_model {
423 	u8 ret_size;
424 	u8 nr_args;
425 	u8 arg_size[MAX_BPF_FUNC_ARGS];
426 };
427 
428 /* Restore arguments before returning from trampoline to let original function
429  * continue executing. This flag is used for fentry progs when there are no
430  * fexit progs.
431  */
432 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
433 /* Call original function after fentry progs, but before fexit progs.
434  * Makes sense for fentry/fexit, normal calls and indirect calls.
435  */
436 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
437 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
438  * programs only. Should not be used with normal calls and indirect calls.
439  */
440 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
441 
442 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
443  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
444  */
445 #define BPF_MAX_TRAMP_PROGS 40
446 
447 struct bpf_tramp_progs {
448 	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
449 	int nr_progs;
450 };
451 
452 /* Different use cases for BPF trampoline:
453  * 1. replace nop at the function entry (kprobe equivalent)
454  *    flags = BPF_TRAMP_F_RESTORE_REGS
455  *    fentry = a set of programs to run before returning from trampoline
456  *
457  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
458  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
459  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
460  *    fentry = a set of program to run before calling original function
461  *    fexit = a set of program to run after original function
462  *
463  * 3. replace direct call instruction anywhere in the function body
464  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
465  *    With flags = 0
466  *      fentry = a set of programs to run before returning from trampoline
467  *    With flags = BPF_TRAMP_F_CALL_ORIG
468  *      orig_call = original callback addr or direct function addr
469  *      fentry = a set of program to run before calling original function
470  *      fexit = a set of program to run after original function
471  */
472 int arch_prepare_bpf_trampoline(void *image, void *image_end,
473 				const struct btf_func_model *m, u32 flags,
474 				struct bpf_tramp_progs *tprogs,
475 				void *orig_call);
476 /* these two functions are called from generated trampoline */
477 u64 notrace __bpf_prog_enter(void);
478 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
479 
480 struct bpf_ksym {
481 	unsigned long		 start;
482 	unsigned long		 end;
483 	char			 name[KSYM_NAME_LEN];
484 	struct list_head	 lnode;
485 	struct latch_tree_node	 tnode;
486 	bool			 prog;
487 };
488 
489 enum bpf_tramp_prog_type {
490 	BPF_TRAMP_FENTRY,
491 	BPF_TRAMP_FEXIT,
492 	BPF_TRAMP_MODIFY_RETURN,
493 	BPF_TRAMP_MAX,
494 	BPF_TRAMP_REPLACE, /* more than MAX */
495 };
496 
497 struct bpf_trampoline {
498 	/* hlist for trampoline_table */
499 	struct hlist_node hlist;
500 	/* serializes access to fields of this trampoline */
501 	struct mutex mutex;
502 	refcount_t refcnt;
503 	u64 key;
504 	struct {
505 		struct btf_func_model model;
506 		void *addr;
507 		bool ftrace_managed;
508 	} func;
509 	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
510 	 * program by replacing one of its functions. func.addr is the address
511 	 * of the function it replaced.
512 	 */
513 	struct bpf_prog *extension_prog;
514 	/* list of BPF programs using this trampoline */
515 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
516 	/* Number of attached programs. A counter per kind. */
517 	int progs_cnt[BPF_TRAMP_MAX];
518 	/* Executable image of trampoline */
519 	void *image;
520 	u64 selector;
521 	struct bpf_ksym ksym;
522 };
523 
524 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
525 
526 struct bpf_dispatcher_prog {
527 	struct bpf_prog *prog;
528 	refcount_t users;
529 };
530 
531 struct bpf_dispatcher {
532 	/* dispatcher mutex */
533 	struct mutex mutex;
534 	void *func;
535 	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
536 	int num_progs;
537 	void *image;
538 	u32 image_off;
539 	struct bpf_ksym ksym;
540 };
541 
542 static __always_inline unsigned int bpf_dispatcher_nop_func(
543 	const void *ctx,
544 	const struct bpf_insn *insnsi,
545 	unsigned int (*bpf_func)(const void *,
546 				 const struct bpf_insn *))
547 {
548 	return bpf_func(ctx, insnsi);
549 }
550 #ifdef CONFIG_BPF_JIT
551 struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
552 int bpf_trampoline_link_prog(struct bpf_prog *prog);
553 int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
554 void bpf_trampoline_put(struct bpf_trampoline *tr);
555 #define BPF_DISPATCHER_INIT(_name) {				\
556 	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
557 	.func = &_name##_func,					\
558 	.progs = {},						\
559 	.num_progs = 0,						\
560 	.image = NULL,						\
561 	.image_off = 0,						\
562 	.ksym = {						\
563 		.name  = #_name,				\
564 		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
565 	},							\
566 }
567 
568 #define DEFINE_BPF_DISPATCHER(name)					\
569 	noinline unsigned int bpf_dispatcher_##name##_func(		\
570 		const void *ctx,					\
571 		const struct bpf_insn *insnsi,				\
572 		unsigned int (*bpf_func)(const void *,			\
573 					 const struct bpf_insn *))	\
574 	{								\
575 		return bpf_func(ctx, insnsi);				\
576 	}								\
577 	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
578 	struct bpf_dispatcher bpf_dispatcher_##name =			\
579 		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
580 #define DECLARE_BPF_DISPATCHER(name)					\
581 	unsigned int bpf_dispatcher_##name##_func(			\
582 		const void *ctx,					\
583 		const struct bpf_insn *insnsi,				\
584 		unsigned int (*bpf_func)(const void *,			\
585 					 const struct bpf_insn *));	\
586 	extern struct bpf_dispatcher bpf_dispatcher_##name;
587 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
588 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
589 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
590 				struct bpf_prog *to);
591 /* Called only from JIT-enabled code, so there's no need for stubs. */
592 void *bpf_jit_alloc_exec_page(void);
593 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
594 void bpf_image_ksym_del(struct bpf_ksym *ksym);
595 void bpf_ksym_add(struct bpf_ksym *ksym);
596 void bpf_ksym_del(struct bpf_ksym *ksym);
597 #else
598 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
599 {
600 	return NULL;
601 }
602 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
603 {
604 	return -ENOTSUPP;
605 }
606 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
607 {
608 	return -ENOTSUPP;
609 }
610 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
611 #define DEFINE_BPF_DISPATCHER(name)
612 #define DECLARE_BPF_DISPATCHER(name)
613 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
614 #define BPF_DISPATCHER_PTR(name) NULL
615 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
616 					      struct bpf_prog *from,
617 					      struct bpf_prog *to) {}
618 static inline bool is_bpf_image_address(unsigned long address)
619 {
620 	return false;
621 }
622 #endif
623 
624 struct bpf_func_info_aux {
625 	u16 linkage;
626 	bool unreliable;
627 };
628 
629 enum bpf_jit_poke_reason {
630 	BPF_POKE_REASON_TAIL_CALL,
631 };
632 
633 /* Descriptor of pokes pointing /into/ the JITed image. */
634 struct bpf_jit_poke_descriptor {
635 	void *ip;
636 	union {
637 		struct {
638 			struct bpf_map *map;
639 			u32 key;
640 		} tail_call;
641 	};
642 	bool ip_stable;
643 	u8 adj_off;
644 	u16 reason;
645 };
646 
647 /* reg_type info for ctx arguments */
648 struct bpf_ctx_arg_aux {
649 	u32 offset;
650 	enum bpf_reg_type reg_type;
651 };
652 
653 struct bpf_prog_aux {
654 	atomic64_t refcnt;
655 	u32 used_map_cnt;
656 	u32 max_ctx_offset;
657 	u32 max_pkt_offset;
658 	u32 max_tp_access;
659 	u32 stack_depth;
660 	u32 id;
661 	u32 func_cnt; /* used by non-func prog as the number of func progs */
662 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
663 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
664 	u32 ctx_arg_info_size;
665 	const struct bpf_ctx_arg_aux *ctx_arg_info;
666 	struct bpf_prog *linked_prog;
667 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
668 	bool offload_requested;
669 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
670 	bool func_proto_unreliable;
671 	enum bpf_tramp_prog_type trampoline_prog_type;
672 	struct bpf_trampoline *trampoline;
673 	struct hlist_node tramp_hlist;
674 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
675 	const struct btf_type *attach_func_proto;
676 	/* function name for valid attach_btf_id */
677 	const char *attach_func_name;
678 	struct bpf_prog **func;
679 	void *jit_data; /* JIT specific data. arch dependent */
680 	struct bpf_jit_poke_descriptor *poke_tab;
681 	u32 size_poke_tab;
682 	struct bpf_ksym ksym;
683 	const struct bpf_prog_ops *ops;
684 	struct bpf_map **used_maps;
685 	struct bpf_prog *prog;
686 	struct user_struct *user;
687 	u64 load_time; /* ns since boottime */
688 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
689 	char name[BPF_OBJ_NAME_LEN];
690 #ifdef CONFIG_SECURITY
691 	void *security;
692 #endif
693 	struct bpf_prog_offload *offload;
694 	struct btf *btf;
695 	struct bpf_func_info *func_info;
696 	struct bpf_func_info_aux *func_info_aux;
697 	/* bpf_line_info loaded from userspace.  linfo->insn_off
698 	 * has the xlated insn offset.
699 	 * Both the main and sub prog share the same linfo.
700 	 * The subprog can access its first linfo by
701 	 * using the linfo_idx.
702 	 */
703 	struct bpf_line_info *linfo;
704 	/* jited_linfo is the jited addr of the linfo.  It has a
705 	 * one to one mapping to linfo:
706 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
707 	 * Both the main and sub prog share the same jited_linfo.
708 	 * The subprog can access its first jited_linfo by
709 	 * using the linfo_idx.
710 	 */
711 	void **jited_linfo;
712 	u32 func_info_cnt;
713 	u32 nr_linfo;
714 	/* subprog can use linfo_idx to access its first linfo and
715 	 * jited_linfo.
716 	 * main prog always has linfo_idx == 0
717 	 */
718 	u32 linfo_idx;
719 	u32 num_exentries;
720 	struct exception_table_entry *extable;
721 	struct bpf_prog_stats __percpu *stats;
722 	union {
723 		struct work_struct work;
724 		struct rcu_head	rcu;
725 	};
726 };
727 
728 struct bpf_array_aux {
729 	/* 'Ownership' of prog array is claimed by the first program that
730 	 * is going to use this map or by the first program which FD is
731 	 * stored in the map to make sure that all callers and callees have
732 	 * the same prog type and JITed flag.
733 	 */
734 	enum bpf_prog_type type;
735 	bool jited;
736 	/* Programs with direct jumps into programs part of this array. */
737 	struct list_head poke_progs;
738 	struct bpf_map *map;
739 	struct mutex poke_mutex;
740 	struct work_struct work;
741 };
742 
743 struct bpf_struct_ops_value;
744 struct btf_type;
745 struct btf_member;
746 
747 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
748 struct bpf_struct_ops {
749 	const struct bpf_verifier_ops *verifier_ops;
750 	int (*init)(struct btf *btf);
751 	int (*check_member)(const struct btf_type *t,
752 			    const struct btf_member *member);
753 	int (*init_member)(const struct btf_type *t,
754 			   const struct btf_member *member,
755 			   void *kdata, const void *udata);
756 	int (*reg)(void *kdata);
757 	void (*unreg)(void *kdata);
758 	const struct btf_type *type;
759 	const struct btf_type *value_type;
760 	const char *name;
761 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
762 	u32 type_id;
763 	u32 value_id;
764 };
765 
766 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
767 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
768 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
769 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
770 bool bpf_struct_ops_get(const void *kdata);
771 void bpf_struct_ops_put(const void *kdata);
772 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
773 				       void *value);
774 static inline bool bpf_try_module_get(const void *data, struct module *owner)
775 {
776 	if (owner == BPF_MODULE_OWNER)
777 		return bpf_struct_ops_get(data);
778 	else
779 		return try_module_get(owner);
780 }
781 static inline void bpf_module_put(const void *data, struct module *owner)
782 {
783 	if (owner == BPF_MODULE_OWNER)
784 		bpf_struct_ops_put(data);
785 	else
786 		module_put(owner);
787 }
788 #else
789 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
790 {
791 	return NULL;
792 }
793 static inline void bpf_struct_ops_init(struct btf *btf,
794 				       struct bpf_verifier_log *log)
795 {
796 }
797 static inline bool bpf_try_module_get(const void *data, struct module *owner)
798 {
799 	return try_module_get(owner);
800 }
801 static inline void bpf_module_put(const void *data, struct module *owner)
802 {
803 	module_put(owner);
804 }
805 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
806 						     void *key,
807 						     void *value)
808 {
809 	return -EINVAL;
810 }
811 #endif
812 
813 struct bpf_array {
814 	struct bpf_map map;
815 	u32 elem_size;
816 	u32 index_mask;
817 	struct bpf_array_aux *aux;
818 	union {
819 		char value[0] __aligned(8);
820 		void *ptrs[0] __aligned(8);
821 		void __percpu *pptrs[0] __aligned(8);
822 	};
823 };
824 
825 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
826 #define MAX_TAIL_CALL_CNT 32
827 
828 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
829 				 BPF_F_RDONLY_PROG |	\
830 				 BPF_F_WRONLY |		\
831 				 BPF_F_WRONLY_PROG)
832 
833 #define BPF_MAP_CAN_READ	BIT(0)
834 #define BPF_MAP_CAN_WRITE	BIT(1)
835 
836 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
837 {
838 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
839 
840 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
841 	 * not possible.
842 	 */
843 	if (access_flags & BPF_F_RDONLY_PROG)
844 		return BPF_MAP_CAN_READ;
845 	else if (access_flags & BPF_F_WRONLY_PROG)
846 		return BPF_MAP_CAN_WRITE;
847 	else
848 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
849 }
850 
851 static inline bool bpf_map_flags_access_ok(u32 access_flags)
852 {
853 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
854 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
855 }
856 
857 struct bpf_event_entry {
858 	struct perf_event *event;
859 	struct file *perf_file;
860 	struct file *map_file;
861 	struct rcu_head rcu;
862 };
863 
864 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
865 int bpf_prog_calc_tag(struct bpf_prog *fp);
866 const char *kernel_type_name(u32 btf_type_id);
867 
868 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
869 
870 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
871 					unsigned long off, unsigned long len);
872 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
873 					const struct bpf_insn *src,
874 					struct bpf_insn *dst,
875 					struct bpf_prog *prog,
876 					u32 *target_size);
877 
878 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
879 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
880 
881 /* an array of programs to be executed under rcu_lock.
882  *
883  * Typical usage:
884  * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
885  *
886  * the structure returned by bpf_prog_array_alloc() should be populated
887  * with program pointers and the last pointer must be NULL.
888  * The user has to keep refcnt on the program and make sure the program
889  * is removed from the array before bpf_prog_put().
890  * The 'struct bpf_prog_array *' should only be replaced with xchg()
891  * since other cpus are walking the array of pointers in parallel.
892  */
893 struct bpf_prog_array_item {
894 	struct bpf_prog *prog;
895 	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
896 };
897 
898 struct bpf_prog_array {
899 	struct rcu_head rcu;
900 	struct bpf_prog_array_item items[];
901 };
902 
903 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
904 void bpf_prog_array_free(struct bpf_prog_array *progs);
905 int bpf_prog_array_length(struct bpf_prog_array *progs);
906 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
907 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
908 				__u32 __user *prog_ids, u32 cnt);
909 
910 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
911 				struct bpf_prog *old_prog);
912 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
913 			     u32 *prog_ids, u32 request_cnt,
914 			     u32 *prog_cnt);
915 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
916 			struct bpf_prog *exclude_prog,
917 			struct bpf_prog *include_prog,
918 			struct bpf_prog_array **new_array);
919 
920 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
921 	({						\
922 		struct bpf_prog_array_item *_item;	\
923 		struct bpf_prog *_prog;			\
924 		struct bpf_prog_array *_array;		\
925 		u32 _ret = 1;				\
926 		migrate_disable();			\
927 		rcu_read_lock();			\
928 		_array = rcu_dereference(array);	\
929 		if (unlikely(check_non_null && !_array))\
930 			goto _out;			\
931 		_item = &_array->items[0];		\
932 		while ((_prog = READ_ONCE(_item->prog))) {		\
933 			bpf_cgroup_storage_set(_item->cgroup_storage);	\
934 			_ret &= func(_prog, ctx);	\
935 			_item++;			\
936 		}					\
937 _out:							\
938 		rcu_read_unlock();			\
939 		migrate_enable();			\
940 		_ret;					\
941 	 })
942 
943 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
944  * so BPF programs can request cwr for TCP packets.
945  *
946  * Current cgroup skb programs can only return 0 or 1 (0 to drop the
947  * packet. This macro changes the behavior so the low order bit
948  * indicates whether the packet should be dropped (0) or not (1)
949  * and the next bit is a congestion notification bit. This could be
950  * used by TCP to call tcp_enter_cwr()
951  *
952  * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
953  *   0: drop packet
954  *   1: keep packet
955  *   2: drop packet and cn
956  *   3: keep packet and cn
957  *
958  * This macro then converts it to one of the NET_XMIT or an error
959  * code that is then interpreted as drop packet (and no cn):
960  *   0: NET_XMIT_SUCCESS  skb should be transmitted
961  *   1: NET_XMIT_DROP     skb should be dropped and cn
962  *   2: NET_XMIT_CN       skb should be transmitted and cn
963  *   3: -EPERM            skb should be dropped
964  */
965 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
966 	({						\
967 		struct bpf_prog_array_item *_item;	\
968 		struct bpf_prog *_prog;			\
969 		struct bpf_prog_array *_array;		\
970 		u32 ret;				\
971 		u32 _ret = 1;				\
972 		u32 _cn = 0;				\
973 		migrate_disable();			\
974 		rcu_read_lock();			\
975 		_array = rcu_dereference(array);	\
976 		_item = &_array->items[0];		\
977 		while ((_prog = READ_ONCE(_item->prog))) {		\
978 			bpf_cgroup_storage_set(_item->cgroup_storage);	\
979 			ret = func(_prog, ctx);		\
980 			_ret &= (ret & 1);		\
981 			_cn |= (ret & 2);		\
982 			_item++;			\
983 		}					\
984 		rcu_read_unlock();			\
985 		migrate_enable();			\
986 		if (_ret)				\
987 			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
988 		else					\
989 			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
990 		_ret;					\
991 	})
992 
993 #define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
994 	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
995 
996 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
997 	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)
998 
999 #ifdef CONFIG_BPF_SYSCALL
1000 DECLARE_PER_CPU(int, bpf_prog_active);
1001 extern struct mutex bpf_stats_enabled_mutex;
1002 
1003 /*
1004  * Block execution of BPF programs attached to instrumentation (perf,
1005  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1006  * these events can happen inside a region which holds a map bucket lock
1007  * and can deadlock on it.
1008  *
1009  * Use the preemption safe inc/dec variants on RT because migrate disable
1010  * is preemptible on RT and preemption in the middle of the RMW operation
1011  * might lead to inconsistent state. Use the raw variants for non RT
1012  * kernels as migrate_disable() maps to preempt_disable() so the slightly
1013  * more expensive save operation can be avoided.
1014  */
1015 static inline void bpf_disable_instrumentation(void)
1016 {
1017 	migrate_disable();
1018 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1019 		this_cpu_inc(bpf_prog_active);
1020 	else
1021 		__this_cpu_inc(bpf_prog_active);
1022 }
1023 
1024 static inline void bpf_enable_instrumentation(void)
1025 {
1026 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1027 		this_cpu_dec(bpf_prog_active);
1028 	else
1029 		__this_cpu_dec(bpf_prog_active);
1030 	migrate_enable();
1031 }
1032 
1033 extern const struct file_operations bpf_map_fops;
1034 extern const struct file_operations bpf_prog_fops;
1035 extern const struct file_operations bpf_iter_fops;
1036 
1037 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1038 	extern const struct bpf_prog_ops _name ## _prog_ops; \
1039 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1040 #define BPF_MAP_TYPE(_id, _ops) \
1041 	extern const struct bpf_map_ops _ops;
1042 #define BPF_LINK_TYPE(_id, _name)
1043 #include <linux/bpf_types.h>
1044 #undef BPF_PROG_TYPE
1045 #undef BPF_MAP_TYPE
1046 #undef BPF_LINK_TYPE
1047 
1048 extern const struct bpf_prog_ops bpf_offload_prog_ops;
1049 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1050 extern const struct bpf_verifier_ops xdp_analyzer_ops;
1051 
1052 struct bpf_prog *bpf_prog_get(u32 ufd);
1053 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1054 				       bool attach_drv);
1055 void bpf_prog_add(struct bpf_prog *prog, int i);
1056 void bpf_prog_sub(struct bpf_prog *prog, int i);
1057 void bpf_prog_inc(struct bpf_prog *prog);
1058 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1059 void bpf_prog_put(struct bpf_prog *prog);
1060 int __bpf_prog_charge(struct user_struct *user, u32 pages);
1061 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
1062 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
1063 			  struct bpf_map **used_maps, u32 len);
1064 
1065 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1066 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1067 
1068 struct bpf_map *bpf_map_get(u32 ufd);
1069 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1070 struct bpf_map *__bpf_map_get(struct fd f);
1071 void bpf_map_inc(struct bpf_map *map);
1072 void bpf_map_inc_with_uref(struct bpf_map *map);
1073 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1074 void bpf_map_put_with_uref(struct bpf_map *map);
1075 void bpf_map_put(struct bpf_map *map);
1076 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
1077 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
1078 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
1079 void bpf_map_charge_finish(struct bpf_map_memory *mem);
1080 void bpf_map_charge_move(struct bpf_map_memory *dst,
1081 			 struct bpf_map_memory *src);
1082 void *bpf_map_area_alloc(u64 size, int numa_node);
1083 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1084 void bpf_map_area_free(void *base);
1085 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1086 int  generic_map_lookup_batch(struct bpf_map *map,
1087 			      const union bpf_attr *attr,
1088 			      union bpf_attr __user *uattr);
1089 int  generic_map_update_batch(struct bpf_map *map,
1090 			      const union bpf_attr *attr,
1091 			      union bpf_attr __user *uattr);
1092 int  generic_map_delete_batch(struct bpf_map *map,
1093 			      const union bpf_attr *attr,
1094 			      union bpf_attr __user *uattr);
1095 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1096 
1097 extern int sysctl_unprivileged_bpf_disabled;
1098 
1099 static inline bool bpf_allow_ptr_leaks(void)
1100 {
1101 	return perfmon_capable();
1102 }
1103 
1104 static inline bool bpf_bypass_spec_v1(void)
1105 {
1106 	return perfmon_capable();
1107 }
1108 
1109 static inline bool bpf_bypass_spec_v4(void)
1110 {
1111 	return perfmon_capable();
1112 }
1113 
1114 int bpf_map_new_fd(struct bpf_map *map, int flags);
1115 int bpf_prog_new_fd(struct bpf_prog *prog);
1116 
1117 struct bpf_link {
1118 	atomic64_t refcnt;
1119 	u32 id;
1120 	enum bpf_link_type type;
1121 	const struct bpf_link_ops *ops;
1122 	struct bpf_prog *prog;
1123 	struct work_struct work;
1124 };
1125 
1126 struct bpf_link_primer {
1127 	struct bpf_link *link;
1128 	struct file *file;
1129 	int fd;
1130 	u32 id;
1131 };
1132 
1133 struct bpf_link_ops {
1134 	void (*release)(struct bpf_link *link);
1135 	void (*dealloc)(struct bpf_link *link);
1136 	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1137 			   struct bpf_prog *old_prog);
1138 	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1139 	int (*fill_link_info)(const struct bpf_link *link,
1140 			      struct bpf_link_info *info);
1141 };
1142 
1143 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1144 		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
1145 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1146 int bpf_link_settle(struct bpf_link_primer *primer);
1147 void bpf_link_cleanup(struct bpf_link_primer *primer);
1148 void bpf_link_inc(struct bpf_link *link);
1149 void bpf_link_put(struct bpf_link *link);
1150 int bpf_link_new_fd(struct bpf_link *link);
1151 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1152 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1153 
1154 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1155 int bpf_obj_get_user(const char __user *pathname, int flags);
1156 
1157 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1158 #define DEFINE_BPF_ITER_FUNC(target, args...)			\
1159 	extern int bpf_iter_ ## target(args);			\
1160 	int __init bpf_iter_ ## target(args) { return 0; }
1161 
1162 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data);
1163 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
1164 
1165 #define BPF_ITER_CTX_ARG_MAX 2
1166 struct bpf_iter_reg {
1167 	const char *target;
1168 	const struct seq_operations *seq_ops;
1169 	bpf_iter_init_seq_priv_t init_seq_private;
1170 	bpf_iter_fini_seq_priv_t fini_seq_private;
1171 	u32 seq_priv_size;
1172 	u32 ctx_arg_info_size;
1173 	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1174 };
1175 
1176 struct bpf_iter_meta {
1177 	__bpf_md_ptr(struct seq_file *, seq);
1178 	u64 session_id;
1179 	u64 seq_num;
1180 };
1181 
1182 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1183 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1184 bool bpf_iter_prog_supported(struct bpf_prog *prog);
1185 int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
1186 int bpf_iter_new_fd(struct bpf_link *link);
1187 bool bpf_link_is_iter(struct bpf_link *link);
1188 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1189 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1190 
1191 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1192 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1193 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1194 			   u64 flags);
1195 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1196 			    u64 flags);
1197 
1198 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1199 
1200 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1201 				 void *key, void *value, u64 map_flags);
1202 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1203 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1204 				void *key, void *value, u64 map_flags);
1205 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1206 
1207 int bpf_get_file_flag(int flags);
1208 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
1209 			     size_t actual_size);
1210 
1211 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1212  * forced to use 'long' read/writes to try to atomically copy long counters.
1213  * Best-effort only.  No barriers here, since it _will_ race with concurrent
1214  * updates from BPF programs. Called from bpf syscall and mostly used with
1215  * size 8 or 16 bytes, so ask compiler to inline it.
1216  */
1217 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1218 {
1219 	const long *lsrc = src;
1220 	long *ldst = dst;
1221 
1222 	size /= sizeof(long);
1223 	while (size--)
1224 		*ldst++ = *lsrc++;
1225 }
1226 
1227 /* verify correctness of eBPF program */
1228 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
1229 	      union bpf_attr __user *uattr);
1230 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1231 
1232 /* Map specifics */
1233 struct xdp_buff;
1234 struct sk_buff;
1235 
1236 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
1237 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
1238 void __dev_flush(void);
1239 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1240 		    struct net_device *dev_rx);
1241 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1242 		    struct net_device *dev_rx);
1243 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1244 			     struct bpf_prog *xdp_prog);
1245 
1246 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
1247 void __cpu_map_flush(void);
1248 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
1249 		    struct net_device *dev_rx);
1250 
1251 /* Return map's numa specified by userspace */
1252 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1253 {
1254 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
1255 		attr->numa_node : NUMA_NO_NODE;
1256 }
1257 
1258 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1259 int array_map_alloc_check(union bpf_attr *attr);
1260 
1261 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1262 			  union bpf_attr __user *uattr);
1263 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1264 			  union bpf_attr __user *uattr);
1265 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1266 			      const union bpf_attr *kattr,
1267 			      union bpf_attr __user *uattr);
1268 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1269 				     const union bpf_attr *kattr,
1270 				     union bpf_attr __user *uattr);
1271 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1272 		    const struct bpf_prog *prog,
1273 		    struct bpf_insn_access_aux *info);
1274 int btf_struct_access(struct bpf_verifier_log *log,
1275 		      const struct btf_type *t, int off, int size,
1276 		      enum bpf_access_type atype,
1277 		      u32 *next_btf_id);
1278 int btf_resolve_helper_id(struct bpf_verifier_log *log,
1279 			  const struct bpf_func_proto *fn, int);
1280 
1281 int btf_distill_func_proto(struct bpf_verifier_log *log,
1282 			   struct btf *btf,
1283 			   const struct btf_type *func_proto,
1284 			   const char *func_name,
1285 			   struct btf_func_model *m);
1286 
1287 struct bpf_reg_state;
1288 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
1289 			     struct bpf_reg_state *regs);
1290 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1291 			  struct bpf_reg_state *reg);
1292 int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
1293 			 struct btf *btf, const struct btf_type *t);
1294 
1295 struct bpf_prog *bpf_prog_by_id(u32 id);
1296 
1297 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1298 #else /* !CONFIG_BPF_SYSCALL */
1299 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1300 {
1301 	return ERR_PTR(-EOPNOTSUPP);
1302 }
1303 
1304 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1305 						     enum bpf_prog_type type,
1306 						     bool attach_drv)
1307 {
1308 	return ERR_PTR(-EOPNOTSUPP);
1309 }
1310 
1311 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1312 {
1313 }
1314 
1315 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1316 {
1317 }
1318 
1319 static inline void bpf_prog_put(struct bpf_prog *prog)
1320 {
1321 }
1322 
1323 static inline void bpf_prog_inc(struct bpf_prog *prog)
1324 {
1325 }
1326 
1327 static inline struct bpf_prog *__must_check
1328 bpf_prog_inc_not_zero(struct bpf_prog *prog)
1329 {
1330 	return ERR_PTR(-EOPNOTSUPP);
1331 }
1332 
1333 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
1334 {
1335 	return 0;
1336 }
1337 
1338 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1339 {
1340 }
1341 
1342 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1343 {
1344 	return -EOPNOTSUPP;
1345 }
1346 
1347 static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
1348 						       u32 key)
1349 {
1350 	return NULL;
1351 }
1352 
1353 static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
1354 							     u32 key)
1355 {
1356 	return NULL;
1357 }
1358 
1359 static inline void __dev_flush(void)
1360 {
1361 }
1362 
1363 struct xdp_buff;
1364 struct bpf_dtab_netdev;
1365 
1366 static inline
1367 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1368 		    struct net_device *dev_rx)
1369 {
1370 	return 0;
1371 }
1372 
1373 static inline
1374 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1375 		    struct net_device *dev_rx)
1376 {
1377 	return 0;
1378 }
1379 
1380 struct sk_buff;
1381 
1382 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1383 					   struct sk_buff *skb,
1384 					   struct bpf_prog *xdp_prog)
1385 {
1386 	return 0;
1387 }
1388 
1389 static inline
1390 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1391 {
1392 	return NULL;
1393 }
1394 
1395 static inline void __cpu_map_flush(void)
1396 {
1397 }
1398 
1399 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1400 				  struct xdp_buff *xdp,
1401 				  struct net_device *dev_rx)
1402 {
1403 	return 0;
1404 }
1405 
1406 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1407 				enum bpf_prog_type type)
1408 {
1409 	return ERR_PTR(-EOPNOTSUPP);
1410 }
1411 
1412 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1413 					const union bpf_attr *kattr,
1414 					union bpf_attr __user *uattr)
1415 {
1416 	return -ENOTSUPP;
1417 }
1418 
1419 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1420 					const union bpf_attr *kattr,
1421 					union bpf_attr __user *uattr)
1422 {
1423 	return -ENOTSUPP;
1424 }
1425 
1426 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1427 					    const union bpf_attr *kattr,
1428 					    union bpf_attr __user *uattr)
1429 {
1430 	return -ENOTSUPP;
1431 }
1432 
1433 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1434 						   const union bpf_attr *kattr,
1435 						   union bpf_attr __user *uattr)
1436 {
1437 	return -ENOTSUPP;
1438 }
1439 
1440 static inline void bpf_map_put(struct bpf_map *map)
1441 {
1442 }
1443 
1444 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1445 {
1446 	return ERR_PTR(-ENOTSUPP);
1447 }
1448 
1449 static inline const struct bpf_func_proto *
1450 bpf_base_func_proto(enum bpf_func_id func_id)
1451 {
1452 	return NULL;
1453 }
1454 #endif /* CONFIG_BPF_SYSCALL */
1455 
1456 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1457 						 enum bpf_prog_type type)
1458 {
1459 	return bpf_prog_get_type_dev(ufd, type, false);
1460 }
1461 
1462 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1463 
1464 int bpf_prog_offload_compile(struct bpf_prog *prog);
1465 void bpf_prog_offload_destroy(struct bpf_prog *prog);
1466 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1467 			       struct bpf_prog *prog);
1468 
1469 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1470 
1471 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1472 int bpf_map_offload_update_elem(struct bpf_map *map,
1473 				void *key, void *value, u64 flags);
1474 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1475 int bpf_map_offload_get_next_key(struct bpf_map *map,
1476 				 void *key, void *next_key);
1477 
1478 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1479 
1480 struct bpf_offload_dev *
1481 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1482 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1483 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1484 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1485 				    struct net_device *netdev);
1486 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1487 				       struct net_device *netdev);
1488 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1489 
1490 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1491 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1492 
1493 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1494 {
1495 	return aux->offload_requested;
1496 }
1497 
1498 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1499 {
1500 	return unlikely(map->ops == &bpf_map_offload_ops);
1501 }
1502 
1503 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1504 void bpf_map_offload_map_free(struct bpf_map *map);
1505 #else
1506 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1507 					union bpf_attr *attr)
1508 {
1509 	return -EOPNOTSUPP;
1510 }
1511 
1512 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1513 {
1514 	return false;
1515 }
1516 
1517 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1518 {
1519 	return false;
1520 }
1521 
1522 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1523 {
1524 	return ERR_PTR(-EOPNOTSUPP);
1525 }
1526 
1527 static inline void bpf_map_offload_map_free(struct bpf_map *map)
1528 {
1529 }
1530 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1531 
1532 #if defined(CONFIG_BPF_STREAM_PARSER)
1533 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
1534 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1535 void sock_map_unhash(struct sock *sk);
1536 void sock_map_close(struct sock *sk, long timeout);
1537 #else
1538 static inline int sock_map_prog_update(struct bpf_map *map,
1539 				       struct bpf_prog *prog, u32 which)
1540 {
1541 	return -EOPNOTSUPP;
1542 }
1543 
1544 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1545 				       struct bpf_prog *prog)
1546 {
1547 	return -EINVAL;
1548 }
1549 #endif /* CONFIG_BPF_STREAM_PARSER */
1550 
1551 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1552 void bpf_sk_reuseport_detach(struct sock *sk);
1553 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1554 				       void *value);
1555 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1556 				       void *value, u64 map_flags);
1557 #else
1558 static inline void bpf_sk_reuseport_detach(struct sock *sk)
1559 {
1560 }
1561 
1562 #ifdef CONFIG_BPF_SYSCALL
1563 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1564 						     void *key, void *value)
1565 {
1566 	return -EOPNOTSUPP;
1567 }
1568 
1569 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1570 						     void *key, void *value,
1571 						     u64 map_flags)
1572 {
1573 	return -EOPNOTSUPP;
1574 }
1575 #endif /* CONFIG_BPF_SYSCALL */
1576 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1577 
1578 /* verifier prototypes for helper functions called from eBPF programs */
1579 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1580 extern const struct bpf_func_proto bpf_map_update_elem_proto;
1581 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
1582 extern const struct bpf_func_proto bpf_map_push_elem_proto;
1583 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1584 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1585 
1586 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1587 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1588 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1589 extern const struct bpf_func_proto bpf_tail_call_proto;
1590 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1591 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
1592 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1593 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1594 extern const struct bpf_func_proto bpf_get_current_comm_proto;
1595 extern const struct bpf_func_proto bpf_get_stackid_proto;
1596 extern const struct bpf_func_proto bpf_get_stack_proto;
1597 extern const struct bpf_func_proto bpf_sock_map_update_proto;
1598 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1599 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1600 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
1601 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1602 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1603 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1604 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1605 extern const struct bpf_func_proto bpf_spin_lock_proto;
1606 extern const struct bpf_func_proto bpf_spin_unlock_proto;
1607 extern const struct bpf_func_proto bpf_get_local_storage_proto;
1608 extern const struct bpf_func_proto bpf_strtol_proto;
1609 extern const struct bpf_func_proto bpf_strtoul_proto;
1610 extern const struct bpf_func_proto bpf_tcp_sock_proto;
1611 extern const struct bpf_func_proto bpf_jiffies64_proto;
1612 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
1613 extern const struct bpf_func_proto bpf_event_output_data_proto;
1614 
1615 const struct bpf_func_proto *bpf_tracing_func_proto(
1616 	enum bpf_func_id func_id, const struct bpf_prog *prog);
1617 
1618 /* Shared helpers among cBPF and eBPF. */
1619 void bpf_user_rnd_init_once(void);
1620 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1621 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1622 
1623 #if defined(CONFIG_NET)
1624 bool bpf_sock_common_is_valid_access(int off, int size,
1625 				     enum bpf_access_type type,
1626 				     struct bpf_insn_access_aux *info);
1627 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1628 			      struct bpf_insn_access_aux *info);
1629 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1630 				const struct bpf_insn *si,
1631 				struct bpf_insn *insn_buf,
1632 				struct bpf_prog *prog,
1633 				u32 *target_size);
1634 #else
1635 static inline bool bpf_sock_common_is_valid_access(int off, int size,
1636 						   enum bpf_access_type type,
1637 						   struct bpf_insn_access_aux *info)
1638 {
1639 	return false;
1640 }
1641 static inline bool bpf_sock_is_valid_access(int off, int size,
1642 					    enum bpf_access_type type,
1643 					    struct bpf_insn_access_aux *info)
1644 {
1645 	return false;
1646 }
1647 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1648 					      const struct bpf_insn *si,
1649 					      struct bpf_insn *insn_buf,
1650 					      struct bpf_prog *prog,
1651 					      u32 *target_size)
1652 {
1653 	return 0;
1654 }
1655 #endif
1656 
1657 #ifdef CONFIG_INET
1658 struct sk_reuseport_kern {
1659 	struct sk_buff *skb;
1660 	struct sock *sk;
1661 	struct sock *selected_sk;
1662 	void *data_end;
1663 	u32 hash;
1664 	u32 reuseport_id;
1665 	bool bind_inany;
1666 };
1667 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1668 				  struct bpf_insn_access_aux *info);
1669 
1670 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1671 				    const struct bpf_insn *si,
1672 				    struct bpf_insn *insn_buf,
1673 				    struct bpf_prog *prog,
1674 				    u32 *target_size);
1675 
1676 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1677 				  struct bpf_insn_access_aux *info);
1678 
1679 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1680 				    const struct bpf_insn *si,
1681 				    struct bpf_insn *insn_buf,
1682 				    struct bpf_prog *prog,
1683 				    u32 *target_size);
1684 #else
1685 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1686 						enum bpf_access_type type,
1687 						struct bpf_insn_access_aux *info)
1688 {
1689 	return false;
1690 }
1691 
1692 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1693 						  const struct bpf_insn *si,
1694 						  struct bpf_insn *insn_buf,
1695 						  struct bpf_prog *prog,
1696 						  u32 *target_size)
1697 {
1698 	return 0;
1699 }
1700 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1701 						enum bpf_access_type type,
1702 						struct bpf_insn_access_aux *info)
1703 {
1704 	return false;
1705 }
1706 
1707 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1708 						  const struct bpf_insn *si,
1709 						  struct bpf_insn *insn_buf,
1710 						  struct bpf_prog *prog,
1711 						  u32 *target_size)
1712 {
1713 	return 0;
1714 }
1715 #endif /* CONFIG_INET */
1716 
1717 enum bpf_text_poke_type {
1718 	BPF_MOD_CALL,
1719 	BPF_MOD_JUMP,
1720 };
1721 
1722 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
1723 		       void *addr1, void *addr2);
1724 
1725 #endif /* _LINUX_BPF_H */
1726