xref: /linux-6.15/include/linux/bpf.h (revision cebdb737)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 
9 #include <linux/workqueue.h>
10 #include <linux/file.h>
11 #include <linux/percpu.h>
12 #include <linux/err.h>
13 #include <linux/rbtree_latch.h>
14 #include <linux/numa.h>
15 #include <linux/mm_types.h>
16 #include <linux/wait.h>
17 #include <linux/refcount.h>
18 #include <linux/mutex.h>
19 #include <linux/module.h>
20 #include <linux/kallsyms.h>
21 #include <linux/capability.h>
22 #include <linux/sched/mm.h>
23 #include <linux/slab.h>
24 #include <linux/percpu-refcount.h>
25 #include <linux/bpfptr.h>
26 
27 struct bpf_verifier_env;
28 struct bpf_verifier_log;
29 struct perf_event;
30 struct bpf_prog;
31 struct bpf_prog_aux;
32 struct bpf_map;
33 struct sock;
34 struct seq_file;
35 struct btf;
36 struct btf_type;
37 struct exception_table_entry;
38 struct seq_operations;
39 struct bpf_iter_aux_info;
40 struct bpf_local_storage;
41 struct bpf_local_storage_map;
42 struct kobject;
43 struct mem_cgroup;
44 struct module;
45 struct bpf_func_state;
46 
47 extern struct idr btf_idr;
48 extern spinlock_t btf_idr_lock;
49 extern struct kobject *btf_kobj;
50 
51 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
52 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
53 					struct bpf_iter_aux_info *aux);
54 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
55 struct bpf_iter_seq_info {
56 	const struct seq_operations *seq_ops;
57 	bpf_iter_init_seq_priv_t init_seq_private;
58 	bpf_iter_fini_seq_priv_t fini_seq_private;
59 	u32 seq_priv_size;
60 };
61 
62 /* map is generic key/value storage optionally accessible by eBPF programs */
63 struct bpf_map_ops {
64 	/* funcs callable from userspace (via syscall) */
65 	int (*map_alloc_check)(union bpf_attr *attr);
66 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
67 	void (*map_release)(struct bpf_map *map, struct file *map_file);
68 	void (*map_free)(struct bpf_map *map);
69 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
70 	void (*map_release_uref)(struct bpf_map *map);
71 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
72 	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
73 				union bpf_attr __user *uattr);
74 	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
75 					  void *value, u64 flags);
76 	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
77 					   const union bpf_attr *attr,
78 					   union bpf_attr __user *uattr);
79 	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
80 				union bpf_attr __user *uattr);
81 	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
82 				union bpf_attr __user *uattr);
83 
84 	/* funcs callable from userspace and from eBPF programs */
85 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
86 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
87 	int (*map_delete_elem)(struct bpf_map *map, void *key);
88 	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
89 	int (*map_pop_elem)(struct bpf_map *map, void *value);
90 	int (*map_peek_elem)(struct bpf_map *map, void *value);
91 
92 	/* funcs called by prog_array and perf_event_array map */
93 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
94 				int fd);
95 	void (*map_fd_put_ptr)(void *ptr);
96 	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
97 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
98 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
99 				  struct seq_file *m);
100 	int (*map_check_btf)(const struct bpf_map *map,
101 			     const struct btf *btf,
102 			     const struct btf_type *key_type,
103 			     const struct btf_type *value_type);
104 
105 	/* Prog poke tracking helpers. */
106 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
108 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
109 			     struct bpf_prog *new);
110 
111 	/* Direct value access helpers. */
112 	int (*map_direct_value_addr)(const struct bpf_map *map,
113 				     u64 *imm, u32 off);
114 	int (*map_direct_value_meta)(const struct bpf_map *map,
115 				     u64 imm, u32 *off);
116 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
117 	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
118 			     struct poll_table_struct *pts);
119 
120 	/* Functions called by bpf_local_storage maps */
121 	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
122 					void *owner, u32 size);
123 	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
124 					   void *owner, u32 size);
125 	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
126 
127 	/* Misc helpers.*/
128 	int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
129 
130 	/* map_meta_equal must be implemented for maps that can be
131 	 * used as an inner map.  It is a runtime check to ensure
132 	 * an inner map can be inserted to an outer map.
133 	 *
134 	 * Some properties of the inner map has been used during the
135 	 * verification time.  When inserting an inner map at the runtime,
136 	 * map_meta_equal has to ensure the inserting map has the same
137 	 * properties that the verifier has used earlier.
138 	 */
139 	bool (*map_meta_equal)(const struct bpf_map *meta0,
140 			       const struct bpf_map *meta1);
141 
142 
143 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
144 					      struct bpf_func_state *caller,
145 					      struct bpf_func_state *callee);
146 	int (*map_for_each_callback)(struct bpf_map *map,
147 				     bpf_callback_t callback_fn,
148 				     void *callback_ctx, u64 flags);
149 
150 	/* BTF name and id of struct allocated by map_alloc */
151 	const char * const map_btf_name;
152 	int *map_btf_id;
153 
154 	/* bpf_iter info used to open a seq_file */
155 	const struct bpf_iter_seq_info *iter_seq_info;
156 };
157 
158 struct bpf_map {
159 	/* The first two cachelines with read-mostly members of which some
160 	 * are also accessed in fast-path (e.g. ops, max_entries).
161 	 */
162 	const struct bpf_map_ops *ops ____cacheline_aligned;
163 	struct bpf_map *inner_map_meta;
164 #ifdef CONFIG_SECURITY
165 	void *security;
166 #endif
167 	enum bpf_map_type map_type;
168 	u32 key_size;
169 	u32 value_size;
170 	u32 max_entries;
171 	u64 map_extra; /* any per-map-type extra fields */
172 	u32 map_flags;
173 	int spin_lock_off; /* >=0 valid offset, <0 error */
174 	int timer_off; /* >=0 valid offset, <0 error */
175 	u32 id;
176 	int numa_node;
177 	u32 btf_key_type_id;
178 	u32 btf_value_type_id;
179 	u32 btf_vmlinux_value_type_id;
180 	struct btf *btf;
181 #ifdef CONFIG_MEMCG_KMEM
182 	struct mem_cgroup *memcg;
183 #endif
184 	char name[BPF_OBJ_NAME_LEN];
185 	bool bypass_spec_v1;
186 	bool frozen; /* write-once; write-protected by freeze_mutex */
187 	/* 14 bytes hole */
188 
189 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
190 	 * particularly with refcounting.
191 	 */
192 	atomic64_t refcnt ____cacheline_aligned;
193 	atomic64_t usercnt;
194 	struct work_struct work;
195 	struct mutex freeze_mutex;
196 	atomic64_t writecnt;
197 };
198 
199 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
200 {
201 	return map->spin_lock_off >= 0;
202 }
203 
204 static inline bool map_value_has_timer(const struct bpf_map *map)
205 {
206 	return map->timer_off >= 0;
207 }
208 
209 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
210 {
211 	if (unlikely(map_value_has_spin_lock(map)))
212 		*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
213 			(struct bpf_spin_lock){};
214 	if (unlikely(map_value_has_timer(map)))
215 		*(struct bpf_timer *)(dst + map->timer_off) =
216 			(struct bpf_timer){};
217 }
218 
219 /* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
220 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
221 {
222 	u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0;
223 
224 	if (unlikely(map_value_has_spin_lock(map))) {
225 		s_off = map->spin_lock_off;
226 		s_sz = sizeof(struct bpf_spin_lock);
227 	} else if (unlikely(map_value_has_timer(map))) {
228 		t_off = map->timer_off;
229 		t_sz = sizeof(struct bpf_timer);
230 	}
231 
232 	if (unlikely(s_sz || t_sz)) {
233 		if (s_off < t_off || !s_sz) {
234 			swap(s_off, t_off);
235 			swap(s_sz, t_sz);
236 		}
237 		memcpy(dst, src, t_off);
238 		memcpy(dst + t_off + t_sz,
239 		       src + t_off + t_sz,
240 		       s_off - t_off - t_sz);
241 		memcpy(dst + s_off + s_sz,
242 		       src + s_off + s_sz,
243 		       map->value_size - s_off - s_sz);
244 	} else {
245 		memcpy(dst, src, map->value_size);
246 	}
247 }
248 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
249 			   bool lock_src);
250 void bpf_timer_cancel_and_free(void *timer);
251 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
252 
253 struct bpf_offload_dev;
254 struct bpf_offloaded_map;
255 
256 struct bpf_map_dev_ops {
257 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
258 				void *key, void *next_key);
259 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
260 			       void *key, void *value);
261 	int (*map_update_elem)(struct bpf_offloaded_map *map,
262 			       void *key, void *value, u64 flags);
263 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
264 };
265 
266 struct bpf_offloaded_map {
267 	struct bpf_map map;
268 	struct net_device *netdev;
269 	const struct bpf_map_dev_ops *dev_ops;
270 	void *dev_priv;
271 	struct list_head offloads;
272 };
273 
274 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
275 {
276 	return container_of(map, struct bpf_offloaded_map, map);
277 }
278 
279 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
280 {
281 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
282 }
283 
284 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
285 {
286 	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
287 		map->ops->map_seq_show_elem;
288 }
289 
290 int map_check_no_btf(const struct bpf_map *map,
291 		     const struct btf *btf,
292 		     const struct btf_type *key_type,
293 		     const struct btf_type *value_type);
294 
295 bool bpf_map_meta_equal(const struct bpf_map *meta0,
296 			const struct bpf_map *meta1);
297 
298 extern const struct bpf_map_ops bpf_map_offload_ops;
299 
300 /* function argument constraints */
301 enum bpf_arg_type {
302 	ARG_DONTCARE = 0,	/* unused argument in helper function */
303 
304 	/* the following constraints used to prototype
305 	 * bpf_map_lookup/update/delete_elem() functions
306 	 */
307 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
308 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
309 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
310 	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
311 	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
312 
313 	/* the following constraints used to prototype bpf_memcmp() and other
314 	 * functions that access data on eBPF program stack
315 	 */
316 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
317 	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
318 	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
319 				 * helper function must fill all bytes or clear
320 				 * them in error case.
321 				 */
322 
323 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
324 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
325 
326 	ARG_PTR_TO_CTX,		/* pointer to context */
327 	ARG_PTR_TO_CTX_OR_NULL,	/* pointer to context or NULL */
328 	ARG_ANYTHING,		/* any (initialized) argument is ok */
329 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
330 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
331 	ARG_PTR_TO_INT,		/* pointer to int */
332 	ARG_PTR_TO_LONG,	/* pointer to long */
333 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
334 	ARG_PTR_TO_SOCKET_OR_NULL,	/* pointer to bpf_sock (fullsock) or NULL */
335 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
336 	ARG_PTR_TO_ALLOC_MEM,	/* pointer to dynamically allocated memory */
337 	ARG_PTR_TO_ALLOC_MEM_OR_NULL,	/* pointer to dynamically allocated memory or NULL */
338 	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
339 	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
340 	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
341 	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
342 	ARG_PTR_TO_STACK_OR_NULL,	/* pointer to stack or NULL */
343 	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
344 	ARG_PTR_TO_TIMER,	/* pointer to bpf_timer */
345 	__BPF_ARG_TYPE_MAX,
346 };
347 
348 /* type of values returned from helper functions */
349 enum bpf_return_type {
350 	RET_INTEGER,			/* function returns integer */
351 	RET_VOID,			/* function doesn't return anything */
352 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
353 	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
354 	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
355 	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
356 	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
357 	RET_PTR_TO_ALLOC_MEM_OR_NULL,	/* returns a pointer to dynamically allocated memory or NULL */
358 	RET_PTR_TO_BTF_ID_OR_NULL,	/* returns a pointer to a btf_id or NULL */
359 	RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
360 	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
361 	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
362 };
363 
364 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
365  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
366  * instructions after verifying
367  */
368 struct bpf_func_proto {
369 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
370 	bool gpl_only;
371 	bool pkt_access;
372 	enum bpf_return_type ret_type;
373 	union {
374 		struct {
375 			enum bpf_arg_type arg1_type;
376 			enum bpf_arg_type arg2_type;
377 			enum bpf_arg_type arg3_type;
378 			enum bpf_arg_type arg4_type;
379 			enum bpf_arg_type arg5_type;
380 		};
381 		enum bpf_arg_type arg_type[5];
382 	};
383 	union {
384 		struct {
385 			u32 *arg1_btf_id;
386 			u32 *arg2_btf_id;
387 			u32 *arg3_btf_id;
388 			u32 *arg4_btf_id;
389 			u32 *arg5_btf_id;
390 		};
391 		u32 *arg_btf_id[5];
392 	};
393 	int *ret_btf_id; /* return value btf_id */
394 	bool (*allowed)(const struct bpf_prog *prog);
395 };
396 
397 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
398  * the first argument to eBPF programs.
399  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
400  */
401 struct bpf_context;
402 
403 enum bpf_access_type {
404 	BPF_READ = 1,
405 	BPF_WRITE = 2
406 };
407 
408 /* types of values stored in eBPF registers */
409 /* Pointer types represent:
410  * pointer
411  * pointer + imm
412  * pointer + (u16) var
413  * pointer + (u16) var + imm
414  * if (range > 0) then [ptr, ptr + range - off) is safe to access
415  * if (id > 0) means that some 'var' was added
416  * if (off > 0) means that 'imm' was added
417  */
418 enum bpf_reg_type {
419 	NOT_INIT = 0,		 /* nothing was written into register */
420 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
421 	PTR_TO_CTX,		 /* reg points to bpf_context */
422 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
423 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
424 	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
425 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
426 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
427 	PTR_TO_PACKET,		 /* reg points to skb->data */
428 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
429 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
430 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
431 	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
432 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
433 	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
434 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
435 	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
436 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
437 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
438 	/* PTR_TO_BTF_ID points to a kernel struct that does not need
439 	 * to be null checked by the BPF program. This does not imply the
440 	 * pointer is _not_ null and in practice this can easily be a null
441 	 * pointer when reading pointer chains. The assumption is program
442 	 * context will handle null pointer dereference typically via fault
443 	 * handling. The verifier must keep this in mind and can make no
444 	 * assumptions about null or non-null when doing branch analysis.
445 	 * Further, when passed into helpers the helpers can not, without
446 	 * additional context, assume the value is non-null.
447 	 */
448 	PTR_TO_BTF_ID,
449 	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
450 	 * been checked for null. Used primarily to inform the verifier
451 	 * an explicit null check is required for this struct.
452 	 */
453 	PTR_TO_BTF_ID_OR_NULL,
454 	PTR_TO_MEM,		 /* reg points to valid memory region */
455 	PTR_TO_MEM_OR_NULL,	 /* reg points to valid memory region or NULL */
456 	PTR_TO_RDONLY_BUF,	 /* reg points to a readonly buffer */
457 	PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
458 	PTR_TO_RDWR_BUF,	 /* reg points to a read/write buffer */
459 	PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
460 	PTR_TO_PERCPU_BTF_ID,	 /* reg points to a percpu kernel variable */
461 	PTR_TO_FUNC,		 /* reg points to a bpf program function */
462 	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
463 	__BPF_REG_TYPE_MAX,
464 };
465 
466 /* The information passed from prog-specific *_is_valid_access
467  * back to the verifier.
468  */
469 struct bpf_insn_access_aux {
470 	enum bpf_reg_type reg_type;
471 	union {
472 		int ctx_field_size;
473 		struct {
474 			struct btf *btf;
475 			u32 btf_id;
476 		};
477 	};
478 	struct bpf_verifier_log *log; /* for verbose logs */
479 };
480 
481 static inline void
482 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
483 {
484 	aux->ctx_field_size = size;
485 }
486 
487 static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
488 {
489 	return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
490 	       insn->src_reg == BPF_PSEUDO_FUNC;
491 }
492 
493 struct bpf_prog_ops {
494 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
495 			union bpf_attr __user *uattr);
496 };
497 
498 struct bpf_verifier_ops {
499 	/* return eBPF function prototype for verification */
500 	const struct bpf_func_proto *
501 	(*get_func_proto)(enum bpf_func_id func_id,
502 			  const struct bpf_prog *prog);
503 
504 	/* return true if 'size' wide access at offset 'off' within bpf_context
505 	 * with 'type' (read or write) is allowed
506 	 */
507 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
508 				const struct bpf_prog *prog,
509 				struct bpf_insn_access_aux *info);
510 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
511 			    const struct bpf_prog *prog);
512 	int (*gen_ld_abs)(const struct bpf_insn *orig,
513 			  struct bpf_insn *insn_buf);
514 	u32 (*convert_ctx_access)(enum bpf_access_type type,
515 				  const struct bpf_insn *src,
516 				  struct bpf_insn *dst,
517 				  struct bpf_prog *prog, u32 *target_size);
518 	int (*btf_struct_access)(struct bpf_verifier_log *log,
519 				 const struct btf *btf,
520 				 const struct btf_type *t, int off, int size,
521 				 enum bpf_access_type atype,
522 				 u32 *next_btf_id);
523 	bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
524 };
525 
526 struct bpf_prog_offload_ops {
527 	/* verifier basic callbacks */
528 	int (*insn_hook)(struct bpf_verifier_env *env,
529 			 int insn_idx, int prev_insn_idx);
530 	int (*finalize)(struct bpf_verifier_env *env);
531 	/* verifier optimization callbacks (called after .finalize) */
532 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
533 			    struct bpf_insn *insn);
534 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
535 	/* program management callbacks */
536 	int (*prepare)(struct bpf_prog *prog);
537 	int (*translate)(struct bpf_prog *prog);
538 	void (*destroy)(struct bpf_prog *prog);
539 };
540 
541 struct bpf_prog_offload {
542 	struct bpf_prog		*prog;
543 	struct net_device	*netdev;
544 	struct bpf_offload_dev	*offdev;
545 	void			*dev_priv;
546 	struct list_head	offloads;
547 	bool			dev_state;
548 	bool			opt_failed;
549 	void			*jited_image;
550 	u32			jited_len;
551 };
552 
553 enum bpf_cgroup_storage_type {
554 	BPF_CGROUP_STORAGE_SHARED,
555 	BPF_CGROUP_STORAGE_PERCPU,
556 	__BPF_CGROUP_STORAGE_MAX
557 };
558 
559 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
560 
561 /* The longest tracepoint has 12 args.
562  * See include/trace/bpf_probe.h
563  */
564 #define MAX_BPF_FUNC_ARGS 12
565 
566 /* The maximum number of arguments passed through registers
567  * a single function may have.
568  */
569 #define MAX_BPF_FUNC_REG_ARGS 5
570 
571 struct btf_func_model {
572 	u8 ret_size;
573 	u8 nr_args;
574 	u8 arg_size[MAX_BPF_FUNC_ARGS];
575 };
576 
577 /* Restore arguments before returning from trampoline to let original function
578  * continue executing. This flag is used for fentry progs when there are no
579  * fexit progs.
580  */
581 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
582 /* Call original function after fentry progs, but before fexit progs.
583  * Makes sense for fentry/fexit, normal calls and indirect calls.
584  */
585 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
586 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
587  * programs only. Should not be used with normal calls and indirect calls.
588  */
589 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
590 /* Store IP address of the caller on the trampoline stack,
591  * so it's available for trampoline's programs.
592  */
593 #define BPF_TRAMP_F_IP_ARG		BIT(3)
594 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
595 #define BPF_TRAMP_F_RET_FENTRY_RET	BIT(4)
596 
597 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
598  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
599  */
600 #define BPF_MAX_TRAMP_PROGS 38
601 
602 struct bpf_tramp_progs {
603 	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
604 	int nr_progs;
605 };
606 
607 /* Different use cases for BPF trampoline:
608  * 1. replace nop at the function entry (kprobe equivalent)
609  *    flags = BPF_TRAMP_F_RESTORE_REGS
610  *    fentry = a set of programs to run before returning from trampoline
611  *
612  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
613  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
614  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
615  *    fentry = a set of program to run before calling original function
616  *    fexit = a set of program to run after original function
617  *
618  * 3. replace direct call instruction anywhere in the function body
619  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
620  *    With flags = 0
621  *      fentry = a set of programs to run before returning from trampoline
622  *    With flags = BPF_TRAMP_F_CALL_ORIG
623  *      orig_call = original callback addr or direct function addr
624  *      fentry = a set of program to run before calling original function
625  *      fexit = a set of program to run after original function
626  */
627 struct bpf_tramp_image;
628 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
629 				const struct btf_func_model *m, u32 flags,
630 				struct bpf_tramp_progs *tprogs,
631 				void *orig_call);
632 /* these two functions are called from generated trampoline */
633 u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
634 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
635 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
636 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
637 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
638 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
639 
640 struct bpf_ksym {
641 	unsigned long		 start;
642 	unsigned long		 end;
643 	char			 name[KSYM_NAME_LEN];
644 	struct list_head	 lnode;
645 	struct latch_tree_node	 tnode;
646 	bool			 prog;
647 };
648 
649 enum bpf_tramp_prog_type {
650 	BPF_TRAMP_FENTRY,
651 	BPF_TRAMP_FEXIT,
652 	BPF_TRAMP_MODIFY_RETURN,
653 	BPF_TRAMP_MAX,
654 	BPF_TRAMP_REPLACE, /* more than MAX */
655 };
656 
657 struct bpf_tramp_image {
658 	void *image;
659 	struct bpf_ksym ksym;
660 	struct percpu_ref pcref;
661 	void *ip_after_call;
662 	void *ip_epilogue;
663 	union {
664 		struct rcu_head rcu;
665 		struct work_struct work;
666 	};
667 };
668 
669 struct bpf_trampoline {
670 	/* hlist for trampoline_table */
671 	struct hlist_node hlist;
672 	/* serializes access to fields of this trampoline */
673 	struct mutex mutex;
674 	refcount_t refcnt;
675 	u64 key;
676 	struct {
677 		struct btf_func_model model;
678 		void *addr;
679 		bool ftrace_managed;
680 	} func;
681 	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
682 	 * program by replacing one of its functions. func.addr is the address
683 	 * of the function it replaced.
684 	 */
685 	struct bpf_prog *extension_prog;
686 	/* list of BPF programs using this trampoline */
687 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
688 	/* Number of attached programs. A counter per kind. */
689 	int progs_cnt[BPF_TRAMP_MAX];
690 	/* Executable image of trampoline */
691 	struct bpf_tramp_image *cur_image;
692 	u64 selector;
693 	struct module *mod;
694 };
695 
696 struct bpf_attach_target_info {
697 	struct btf_func_model fmodel;
698 	long tgt_addr;
699 	const char *tgt_name;
700 	const struct btf_type *tgt_type;
701 };
702 
703 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
704 
705 struct bpf_dispatcher_prog {
706 	struct bpf_prog *prog;
707 	refcount_t users;
708 };
709 
710 struct bpf_dispatcher {
711 	/* dispatcher mutex */
712 	struct mutex mutex;
713 	void *func;
714 	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
715 	int num_progs;
716 	void *image;
717 	u32 image_off;
718 	struct bpf_ksym ksym;
719 };
720 
721 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
722 	const void *ctx,
723 	const struct bpf_insn *insnsi,
724 	unsigned int (*bpf_func)(const void *,
725 				 const struct bpf_insn *))
726 {
727 	return bpf_func(ctx, insnsi);
728 }
729 #ifdef CONFIG_BPF_JIT
730 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
731 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
732 struct bpf_trampoline *bpf_trampoline_get(u64 key,
733 					  struct bpf_attach_target_info *tgt_info);
734 void bpf_trampoline_put(struct bpf_trampoline *tr);
735 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
736 #define BPF_DISPATCHER_INIT(_name) {				\
737 	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
738 	.func = &_name##_func,					\
739 	.progs = {},						\
740 	.num_progs = 0,						\
741 	.image = NULL,						\
742 	.image_off = 0,						\
743 	.ksym = {						\
744 		.name  = #_name,				\
745 		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
746 	},							\
747 }
748 
749 #define DEFINE_BPF_DISPATCHER(name)					\
750 	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
751 		const void *ctx,					\
752 		const struct bpf_insn *insnsi,				\
753 		unsigned int (*bpf_func)(const void *,			\
754 					 const struct bpf_insn *))	\
755 	{								\
756 		return bpf_func(ctx, insnsi);				\
757 	}								\
758 	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
759 	struct bpf_dispatcher bpf_dispatcher_##name =			\
760 		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
761 #define DECLARE_BPF_DISPATCHER(name)					\
762 	unsigned int bpf_dispatcher_##name##_func(			\
763 		const void *ctx,					\
764 		const struct bpf_insn *insnsi,				\
765 		unsigned int (*bpf_func)(const void *,			\
766 					 const struct bpf_insn *));	\
767 	extern struct bpf_dispatcher bpf_dispatcher_##name;
768 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
769 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
770 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
771 				struct bpf_prog *to);
772 /* Called only from JIT-enabled code, so there's no need for stubs. */
773 void *bpf_jit_alloc_exec_page(void);
774 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
775 void bpf_image_ksym_del(struct bpf_ksym *ksym);
776 void bpf_ksym_add(struct bpf_ksym *ksym);
777 void bpf_ksym_del(struct bpf_ksym *ksym);
778 int bpf_jit_charge_modmem(u32 pages);
779 void bpf_jit_uncharge_modmem(u32 pages);
780 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
781 #else
782 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
783 					   struct bpf_trampoline *tr)
784 {
785 	return -ENOTSUPP;
786 }
787 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
788 					     struct bpf_trampoline *tr)
789 {
790 	return -ENOTSUPP;
791 }
792 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
793 							struct bpf_attach_target_info *tgt_info)
794 {
795 	return ERR_PTR(-EOPNOTSUPP);
796 }
797 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
798 #define DEFINE_BPF_DISPATCHER(name)
799 #define DECLARE_BPF_DISPATCHER(name)
800 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
801 #define BPF_DISPATCHER_PTR(name) NULL
802 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
803 					      struct bpf_prog *from,
804 					      struct bpf_prog *to) {}
805 static inline bool is_bpf_image_address(unsigned long address)
806 {
807 	return false;
808 }
809 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
810 {
811 	return false;
812 }
813 #endif
814 
815 struct bpf_func_info_aux {
816 	u16 linkage;
817 	bool unreliable;
818 };
819 
820 enum bpf_jit_poke_reason {
821 	BPF_POKE_REASON_TAIL_CALL,
822 };
823 
824 /* Descriptor of pokes pointing /into/ the JITed image. */
825 struct bpf_jit_poke_descriptor {
826 	void *tailcall_target;
827 	void *tailcall_bypass;
828 	void *bypass_addr;
829 	void *aux;
830 	union {
831 		struct {
832 			struct bpf_map *map;
833 			u32 key;
834 		} tail_call;
835 	};
836 	bool tailcall_target_stable;
837 	u8 adj_off;
838 	u16 reason;
839 	u32 insn_idx;
840 };
841 
842 /* reg_type info for ctx arguments */
843 struct bpf_ctx_arg_aux {
844 	u32 offset;
845 	enum bpf_reg_type reg_type;
846 	u32 btf_id;
847 };
848 
849 struct btf_mod_pair {
850 	struct btf *btf;
851 	struct module *module;
852 };
853 
854 struct bpf_kfunc_desc_tab;
855 
856 struct bpf_prog_aux {
857 	atomic64_t refcnt;
858 	u32 used_map_cnt;
859 	u32 used_btf_cnt;
860 	u32 max_ctx_offset;
861 	u32 max_pkt_offset;
862 	u32 max_tp_access;
863 	u32 stack_depth;
864 	u32 id;
865 	u32 func_cnt; /* used by non-func prog as the number of func progs */
866 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
867 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
868 	u32 ctx_arg_info_size;
869 	u32 max_rdonly_access;
870 	u32 max_rdwr_access;
871 	struct btf *attach_btf;
872 	const struct bpf_ctx_arg_aux *ctx_arg_info;
873 	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
874 	struct bpf_prog *dst_prog;
875 	struct bpf_trampoline *dst_trampoline;
876 	enum bpf_prog_type saved_dst_prog_type;
877 	enum bpf_attach_type saved_dst_attach_type;
878 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
879 	bool offload_requested;
880 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
881 	bool func_proto_unreliable;
882 	bool sleepable;
883 	bool tail_call_reachable;
884 	struct hlist_node tramp_hlist;
885 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
886 	const struct btf_type *attach_func_proto;
887 	/* function name for valid attach_btf_id */
888 	const char *attach_func_name;
889 	struct bpf_prog **func;
890 	void *jit_data; /* JIT specific data. arch dependent */
891 	struct bpf_jit_poke_descriptor *poke_tab;
892 	struct bpf_kfunc_desc_tab *kfunc_tab;
893 	struct bpf_kfunc_btf_tab *kfunc_btf_tab;
894 	u32 size_poke_tab;
895 	struct bpf_ksym ksym;
896 	const struct bpf_prog_ops *ops;
897 	struct bpf_map **used_maps;
898 	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
899 	struct btf_mod_pair *used_btfs;
900 	struct bpf_prog *prog;
901 	struct user_struct *user;
902 	u64 load_time; /* ns since boottime */
903 	u32 verified_insns;
904 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
905 	char name[BPF_OBJ_NAME_LEN];
906 #ifdef CONFIG_SECURITY
907 	void *security;
908 #endif
909 	struct bpf_prog_offload *offload;
910 	struct btf *btf;
911 	struct bpf_func_info *func_info;
912 	struct bpf_func_info_aux *func_info_aux;
913 	/* bpf_line_info loaded from userspace.  linfo->insn_off
914 	 * has the xlated insn offset.
915 	 * Both the main and sub prog share the same linfo.
916 	 * The subprog can access its first linfo by
917 	 * using the linfo_idx.
918 	 */
919 	struct bpf_line_info *linfo;
920 	/* jited_linfo is the jited addr of the linfo.  It has a
921 	 * one to one mapping to linfo:
922 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
923 	 * Both the main and sub prog share the same jited_linfo.
924 	 * The subprog can access its first jited_linfo by
925 	 * using the linfo_idx.
926 	 */
927 	void **jited_linfo;
928 	u32 func_info_cnt;
929 	u32 nr_linfo;
930 	/* subprog can use linfo_idx to access its first linfo and
931 	 * jited_linfo.
932 	 * main prog always has linfo_idx == 0
933 	 */
934 	u32 linfo_idx;
935 	u32 num_exentries;
936 	struct exception_table_entry *extable;
937 	union {
938 		struct work_struct work;
939 		struct rcu_head	rcu;
940 	};
941 };
942 
943 struct bpf_array_aux {
944 	/* 'Ownership' of prog array is claimed by the first program that
945 	 * is going to use this map or by the first program which FD is
946 	 * stored in the map to make sure that all callers and callees have
947 	 * the same prog type and JITed flag.
948 	 */
949 	struct {
950 		spinlock_t lock;
951 		enum bpf_prog_type type;
952 		bool jited;
953 	} owner;
954 	/* Programs with direct jumps into programs part of this array. */
955 	struct list_head poke_progs;
956 	struct bpf_map *map;
957 	struct mutex poke_mutex;
958 	struct work_struct work;
959 };
960 
961 struct bpf_link {
962 	atomic64_t refcnt;
963 	u32 id;
964 	enum bpf_link_type type;
965 	const struct bpf_link_ops *ops;
966 	struct bpf_prog *prog;
967 	struct work_struct work;
968 };
969 
970 struct bpf_link_ops {
971 	void (*release)(struct bpf_link *link);
972 	void (*dealloc)(struct bpf_link *link);
973 	int (*detach)(struct bpf_link *link);
974 	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
975 			   struct bpf_prog *old_prog);
976 	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
977 	int (*fill_link_info)(const struct bpf_link *link,
978 			      struct bpf_link_info *info);
979 };
980 
981 struct bpf_link_primer {
982 	struct bpf_link *link;
983 	struct file *file;
984 	int fd;
985 	u32 id;
986 };
987 
988 struct bpf_struct_ops_value;
989 struct btf_member;
990 
991 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
992 struct bpf_struct_ops {
993 	const struct bpf_verifier_ops *verifier_ops;
994 	int (*init)(struct btf *btf);
995 	int (*check_member)(const struct btf_type *t,
996 			    const struct btf_member *member);
997 	int (*init_member)(const struct btf_type *t,
998 			   const struct btf_member *member,
999 			   void *kdata, const void *udata);
1000 	int (*reg)(void *kdata);
1001 	void (*unreg)(void *kdata);
1002 	const struct btf_type *type;
1003 	const struct btf_type *value_type;
1004 	const char *name;
1005 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1006 	u32 type_id;
1007 	u32 value_id;
1008 };
1009 
1010 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1011 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1012 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1013 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1014 bool bpf_struct_ops_get(const void *kdata);
1015 void bpf_struct_ops_put(const void *kdata);
1016 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1017 				       void *value);
1018 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_progs *tprogs,
1019 				      struct bpf_prog *prog,
1020 				      const struct btf_func_model *model,
1021 				      void *image, void *image_end);
1022 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1023 {
1024 	if (owner == BPF_MODULE_OWNER)
1025 		return bpf_struct_ops_get(data);
1026 	else
1027 		return try_module_get(owner);
1028 }
1029 static inline void bpf_module_put(const void *data, struct module *owner)
1030 {
1031 	if (owner == BPF_MODULE_OWNER)
1032 		bpf_struct_ops_put(data);
1033 	else
1034 		module_put(owner);
1035 }
1036 
1037 #ifdef CONFIG_NET
1038 /* Define it here to avoid the use of forward declaration */
1039 struct bpf_dummy_ops_state {
1040 	int val;
1041 };
1042 
1043 struct bpf_dummy_ops {
1044 	int (*test_1)(struct bpf_dummy_ops_state *cb);
1045 	int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1046 		      char a3, unsigned long a4);
1047 };
1048 
1049 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1050 			    union bpf_attr __user *uattr);
1051 #endif
1052 #else
1053 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1054 {
1055 	return NULL;
1056 }
1057 static inline void bpf_struct_ops_init(struct btf *btf,
1058 				       struct bpf_verifier_log *log)
1059 {
1060 }
1061 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1062 {
1063 	return try_module_get(owner);
1064 }
1065 static inline void bpf_module_put(const void *data, struct module *owner)
1066 {
1067 	module_put(owner);
1068 }
1069 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1070 						     void *key,
1071 						     void *value)
1072 {
1073 	return -EINVAL;
1074 }
1075 #endif
1076 
1077 struct bpf_array {
1078 	struct bpf_map map;
1079 	u32 elem_size;
1080 	u32 index_mask;
1081 	struct bpf_array_aux *aux;
1082 	union {
1083 		char value[0] __aligned(8);
1084 		void *ptrs[0] __aligned(8);
1085 		void __percpu *pptrs[0] __aligned(8);
1086 	};
1087 };
1088 
1089 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
1090 #define MAX_TAIL_CALL_CNT 33
1091 
1092 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
1093 				 BPF_F_RDONLY_PROG |	\
1094 				 BPF_F_WRONLY |		\
1095 				 BPF_F_WRONLY_PROG)
1096 
1097 #define BPF_MAP_CAN_READ	BIT(0)
1098 #define BPF_MAP_CAN_WRITE	BIT(1)
1099 
1100 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1101 {
1102 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1103 
1104 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1105 	 * not possible.
1106 	 */
1107 	if (access_flags & BPF_F_RDONLY_PROG)
1108 		return BPF_MAP_CAN_READ;
1109 	else if (access_flags & BPF_F_WRONLY_PROG)
1110 		return BPF_MAP_CAN_WRITE;
1111 	else
1112 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1113 }
1114 
1115 static inline bool bpf_map_flags_access_ok(u32 access_flags)
1116 {
1117 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1118 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1119 }
1120 
1121 struct bpf_event_entry {
1122 	struct perf_event *event;
1123 	struct file *perf_file;
1124 	struct file *map_file;
1125 	struct rcu_head rcu;
1126 };
1127 
1128 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1129 int bpf_prog_calc_tag(struct bpf_prog *fp);
1130 
1131 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1132 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1133 
1134 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1135 					unsigned long off, unsigned long len);
1136 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1137 					const struct bpf_insn *src,
1138 					struct bpf_insn *dst,
1139 					struct bpf_prog *prog,
1140 					u32 *target_size);
1141 
1142 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1143 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1144 
1145 /* an array of programs to be executed under rcu_lock.
1146  *
1147  * Typical usage:
1148  * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run);
1149  *
1150  * the structure returned by bpf_prog_array_alloc() should be populated
1151  * with program pointers and the last pointer must be NULL.
1152  * The user has to keep refcnt on the program and make sure the program
1153  * is removed from the array before bpf_prog_put().
1154  * The 'struct bpf_prog_array *' should only be replaced with xchg()
1155  * since other cpus are walking the array of pointers in parallel.
1156  */
1157 struct bpf_prog_array_item {
1158 	struct bpf_prog *prog;
1159 	union {
1160 		struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1161 		u64 bpf_cookie;
1162 	};
1163 };
1164 
1165 struct bpf_prog_array {
1166 	struct rcu_head rcu;
1167 	struct bpf_prog_array_item items[];
1168 };
1169 
1170 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1171 void bpf_prog_array_free(struct bpf_prog_array *progs);
1172 int bpf_prog_array_length(struct bpf_prog_array *progs);
1173 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1174 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1175 				__u32 __user *prog_ids, u32 cnt);
1176 
1177 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1178 				struct bpf_prog *old_prog);
1179 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1180 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1181 			     struct bpf_prog *prog);
1182 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1183 			     u32 *prog_ids, u32 request_cnt,
1184 			     u32 *prog_cnt);
1185 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1186 			struct bpf_prog *exclude_prog,
1187 			struct bpf_prog *include_prog,
1188 			u64 bpf_cookie,
1189 			struct bpf_prog_array **new_array);
1190 
1191 struct bpf_run_ctx {};
1192 
1193 struct bpf_cg_run_ctx {
1194 	struct bpf_run_ctx run_ctx;
1195 	const struct bpf_prog_array_item *prog_item;
1196 };
1197 
1198 struct bpf_trace_run_ctx {
1199 	struct bpf_run_ctx run_ctx;
1200 	u64 bpf_cookie;
1201 };
1202 
1203 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1204 {
1205 	struct bpf_run_ctx *old_ctx = NULL;
1206 
1207 #ifdef CONFIG_BPF_SYSCALL
1208 	old_ctx = current->bpf_ctx;
1209 	current->bpf_ctx = new_ctx;
1210 #endif
1211 	return old_ctx;
1212 }
1213 
1214 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1215 {
1216 #ifdef CONFIG_BPF_SYSCALL
1217 	current->bpf_ctx = old_ctx;
1218 #endif
1219 }
1220 
1221 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1222 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
1223 /* BPF program asks to set CN on the packet. */
1224 #define BPF_RET_SET_CN						(1 << 0)
1225 
1226 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1227 
1228 static __always_inline u32
1229 BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu,
1230 			    const void *ctx, bpf_prog_run_fn run_prog,
1231 			    u32 *ret_flags)
1232 {
1233 	const struct bpf_prog_array_item *item;
1234 	const struct bpf_prog *prog;
1235 	const struct bpf_prog_array *array;
1236 	struct bpf_run_ctx *old_run_ctx;
1237 	struct bpf_cg_run_ctx run_ctx;
1238 	u32 ret = 1;
1239 	u32 func_ret;
1240 
1241 	migrate_disable();
1242 	rcu_read_lock();
1243 	array = rcu_dereference(array_rcu);
1244 	item = &array->items[0];
1245 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1246 	while ((prog = READ_ONCE(item->prog))) {
1247 		run_ctx.prog_item = item;
1248 		func_ret = run_prog(prog, ctx);
1249 		ret &= (func_ret & 1);
1250 		*(ret_flags) |= (func_ret >> 1);
1251 		item++;
1252 	}
1253 	bpf_reset_run_ctx(old_run_ctx);
1254 	rcu_read_unlock();
1255 	migrate_enable();
1256 	return ret;
1257 }
1258 
1259 static __always_inline u32
1260 BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu,
1261 		      const void *ctx, bpf_prog_run_fn run_prog)
1262 {
1263 	const struct bpf_prog_array_item *item;
1264 	const struct bpf_prog *prog;
1265 	const struct bpf_prog_array *array;
1266 	struct bpf_run_ctx *old_run_ctx;
1267 	struct bpf_cg_run_ctx run_ctx;
1268 	u32 ret = 1;
1269 
1270 	migrate_disable();
1271 	rcu_read_lock();
1272 	array = rcu_dereference(array_rcu);
1273 	item = &array->items[0];
1274 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1275 	while ((prog = READ_ONCE(item->prog))) {
1276 		run_ctx.prog_item = item;
1277 		ret &= run_prog(prog, ctx);
1278 		item++;
1279 	}
1280 	bpf_reset_run_ctx(old_run_ctx);
1281 	rcu_read_unlock();
1282 	migrate_enable();
1283 	return ret;
1284 }
1285 
1286 static __always_inline u32
1287 BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu,
1288 		   const void *ctx, bpf_prog_run_fn run_prog)
1289 {
1290 	const struct bpf_prog_array_item *item;
1291 	const struct bpf_prog *prog;
1292 	const struct bpf_prog_array *array;
1293 	struct bpf_run_ctx *old_run_ctx;
1294 	struct bpf_trace_run_ctx run_ctx;
1295 	u32 ret = 1;
1296 
1297 	migrate_disable();
1298 	rcu_read_lock();
1299 	array = rcu_dereference(array_rcu);
1300 	if (unlikely(!array))
1301 		goto out;
1302 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1303 	item = &array->items[0];
1304 	while ((prog = READ_ONCE(item->prog))) {
1305 		run_ctx.bpf_cookie = item->bpf_cookie;
1306 		ret &= run_prog(prog, ctx);
1307 		item++;
1308 	}
1309 	bpf_reset_run_ctx(old_run_ctx);
1310 out:
1311 	rcu_read_unlock();
1312 	migrate_enable();
1313 	return ret;
1314 }
1315 
1316 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
1317  * so BPF programs can request cwr for TCP packets.
1318  *
1319  * Current cgroup skb programs can only return 0 or 1 (0 to drop the
1320  * packet. This macro changes the behavior so the low order bit
1321  * indicates whether the packet should be dropped (0) or not (1)
1322  * and the next bit is a congestion notification bit. This could be
1323  * used by TCP to call tcp_enter_cwr()
1324  *
1325  * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
1326  *   0: drop packet
1327  *   1: keep packet
1328  *   2: drop packet and cn
1329  *   3: keep packet and cn
1330  *
1331  * This macro then converts it to one of the NET_XMIT or an error
1332  * code that is then interpreted as drop packet (and no cn):
1333  *   0: NET_XMIT_SUCCESS  skb should be transmitted
1334  *   1: NET_XMIT_DROP     skb should be dropped and cn
1335  *   2: NET_XMIT_CN       skb should be transmitted and cn
1336  *   3: -EPERM            skb should be dropped
1337  */
1338 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
1339 	({						\
1340 		u32 _flags = 0;				\
1341 		bool _cn;				\
1342 		u32 _ret;				\
1343 		_ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \
1344 		_cn = _flags & BPF_RET_SET_CN;		\
1345 		if (_ret)				\
1346 			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
1347 		else					\
1348 			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
1349 		_ret;					\
1350 	})
1351 
1352 #ifdef CONFIG_BPF_SYSCALL
1353 DECLARE_PER_CPU(int, bpf_prog_active);
1354 extern struct mutex bpf_stats_enabled_mutex;
1355 
1356 /*
1357  * Block execution of BPF programs attached to instrumentation (perf,
1358  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1359  * these events can happen inside a region which holds a map bucket lock
1360  * and can deadlock on it.
1361  */
1362 static inline void bpf_disable_instrumentation(void)
1363 {
1364 	migrate_disable();
1365 	this_cpu_inc(bpf_prog_active);
1366 }
1367 
1368 static inline void bpf_enable_instrumentation(void)
1369 {
1370 	this_cpu_dec(bpf_prog_active);
1371 	migrate_enable();
1372 }
1373 
1374 extern const struct file_operations bpf_map_fops;
1375 extern const struct file_operations bpf_prog_fops;
1376 extern const struct file_operations bpf_iter_fops;
1377 
1378 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1379 	extern const struct bpf_prog_ops _name ## _prog_ops; \
1380 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1381 #define BPF_MAP_TYPE(_id, _ops) \
1382 	extern const struct bpf_map_ops _ops;
1383 #define BPF_LINK_TYPE(_id, _name)
1384 #include <linux/bpf_types.h>
1385 #undef BPF_PROG_TYPE
1386 #undef BPF_MAP_TYPE
1387 #undef BPF_LINK_TYPE
1388 
1389 extern const struct bpf_prog_ops bpf_offload_prog_ops;
1390 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1391 extern const struct bpf_verifier_ops xdp_analyzer_ops;
1392 
1393 struct bpf_prog *bpf_prog_get(u32 ufd);
1394 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1395 				       bool attach_drv);
1396 void bpf_prog_add(struct bpf_prog *prog, int i);
1397 void bpf_prog_sub(struct bpf_prog *prog, int i);
1398 void bpf_prog_inc(struct bpf_prog *prog);
1399 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1400 void bpf_prog_put(struct bpf_prog *prog);
1401 
1402 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1403 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1404 
1405 struct bpf_map *bpf_map_get(u32 ufd);
1406 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1407 struct bpf_map *__bpf_map_get(struct fd f);
1408 void bpf_map_inc(struct bpf_map *map);
1409 void bpf_map_inc_with_uref(struct bpf_map *map);
1410 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1411 void bpf_map_put_with_uref(struct bpf_map *map);
1412 void bpf_map_put(struct bpf_map *map);
1413 void *bpf_map_area_alloc(u64 size, int numa_node);
1414 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1415 void bpf_map_area_free(void *base);
1416 bool bpf_map_write_active(const struct bpf_map *map);
1417 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1418 int  generic_map_lookup_batch(struct bpf_map *map,
1419 			      const union bpf_attr *attr,
1420 			      union bpf_attr __user *uattr);
1421 int  generic_map_update_batch(struct bpf_map *map,
1422 			      const union bpf_attr *attr,
1423 			      union bpf_attr __user *uattr);
1424 int  generic_map_delete_batch(struct bpf_map *map,
1425 			      const union bpf_attr *attr,
1426 			      union bpf_attr __user *uattr);
1427 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1428 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1429 
1430 #ifdef CONFIG_MEMCG_KMEM
1431 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1432 			   int node);
1433 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1434 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1435 				    size_t align, gfp_t flags);
1436 #else
1437 static inline void *
1438 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1439 		     int node)
1440 {
1441 	return kmalloc_node(size, flags, node);
1442 }
1443 
1444 static inline void *
1445 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1446 {
1447 	return kzalloc(size, flags);
1448 }
1449 
1450 static inline void __percpu *
1451 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1452 		     gfp_t flags)
1453 {
1454 	return __alloc_percpu_gfp(size, align, flags);
1455 }
1456 #endif
1457 
1458 extern int sysctl_unprivileged_bpf_disabled;
1459 
1460 static inline bool bpf_allow_ptr_leaks(void)
1461 {
1462 	return perfmon_capable();
1463 }
1464 
1465 static inline bool bpf_allow_uninit_stack(void)
1466 {
1467 	return perfmon_capable();
1468 }
1469 
1470 static inline bool bpf_allow_ptr_to_map_access(void)
1471 {
1472 	return perfmon_capable();
1473 }
1474 
1475 static inline bool bpf_bypass_spec_v1(void)
1476 {
1477 	return perfmon_capable();
1478 }
1479 
1480 static inline bool bpf_bypass_spec_v4(void)
1481 {
1482 	return perfmon_capable();
1483 }
1484 
1485 int bpf_map_new_fd(struct bpf_map *map, int flags);
1486 int bpf_prog_new_fd(struct bpf_prog *prog);
1487 
1488 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1489 		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
1490 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1491 int bpf_link_settle(struct bpf_link_primer *primer);
1492 void bpf_link_cleanup(struct bpf_link_primer *primer);
1493 void bpf_link_inc(struct bpf_link *link);
1494 void bpf_link_put(struct bpf_link *link);
1495 int bpf_link_new_fd(struct bpf_link *link);
1496 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1497 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1498 
1499 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1500 int bpf_obj_get_user(const char __user *pathname, int flags);
1501 
1502 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1503 #define DEFINE_BPF_ITER_FUNC(target, args...)			\
1504 	extern int bpf_iter_ ## target(args);			\
1505 	int __init bpf_iter_ ## target(args) { return 0; }
1506 
1507 struct bpf_iter_aux_info {
1508 	struct bpf_map *map;
1509 };
1510 
1511 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1512 					union bpf_iter_link_info *linfo,
1513 					struct bpf_iter_aux_info *aux);
1514 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1515 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1516 					struct seq_file *seq);
1517 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1518 					 struct bpf_link_info *info);
1519 typedef const struct bpf_func_proto *
1520 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
1521 			     const struct bpf_prog *prog);
1522 
1523 enum bpf_iter_feature {
1524 	BPF_ITER_RESCHED	= BIT(0),
1525 };
1526 
1527 #define BPF_ITER_CTX_ARG_MAX 2
1528 struct bpf_iter_reg {
1529 	const char *target;
1530 	bpf_iter_attach_target_t attach_target;
1531 	bpf_iter_detach_target_t detach_target;
1532 	bpf_iter_show_fdinfo_t show_fdinfo;
1533 	bpf_iter_fill_link_info_t fill_link_info;
1534 	bpf_iter_get_func_proto_t get_func_proto;
1535 	u32 ctx_arg_info_size;
1536 	u32 feature;
1537 	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1538 	const struct bpf_iter_seq_info *seq_info;
1539 };
1540 
1541 struct bpf_iter_meta {
1542 	__bpf_md_ptr(struct seq_file *, seq);
1543 	u64 session_id;
1544 	u64 seq_num;
1545 };
1546 
1547 struct bpf_iter__bpf_map_elem {
1548 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
1549 	__bpf_md_ptr(struct bpf_map *, map);
1550 	__bpf_md_ptr(void *, key);
1551 	__bpf_md_ptr(void *, value);
1552 };
1553 
1554 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1555 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1556 bool bpf_iter_prog_supported(struct bpf_prog *prog);
1557 const struct bpf_func_proto *
1558 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
1559 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1560 int bpf_iter_new_fd(struct bpf_link *link);
1561 bool bpf_link_is_iter(struct bpf_link *link);
1562 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1563 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1564 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1565 			      struct seq_file *seq);
1566 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1567 				struct bpf_link_info *info);
1568 
1569 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1570 				   struct bpf_func_state *caller,
1571 				   struct bpf_func_state *callee);
1572 
1573 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1574 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1575 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1576 			   u64 flags);
1577 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1578 			    u64 flags);
1579 
1580 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1581 
1582 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1583 				 void *key, void *value, u64 map_flags);
1584 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1585 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1586 				void *key, void *value, u64 map_flags);
1587 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1588 
1589 int bpf_get_file_flag(int flags);
1590 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1591 			     size_t actual_size);
1592 
1593 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1594  * forced to use 'long' read/writes to try to atomically copy long counters.
1595  * Best-effort only.  No barriers here, since it _will_ race with concurrent
1596  * updates from BPF programs. Called from bpf syscall and mostly used with
1597  * size 8 or 16 bytes, so ask compiler to inline it.
1598  */
1599 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1600 {
1601 	const long *lsrc = src;
1602 	long *ldst = dst;
1603 
1604 	size /= sizeof(long);
1605 	while (size--)
1606 		*ldst++ = *lsrc++;
1607 }
1608 
1609 /* verify correctness of eBPF program */
1610 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1611 
1612 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1613 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1614 #endif
1615 
1616 struct btf *bpf_get_btf_vmlinux(void);
1617 
1618 /* Map specifics */
1619 struct xdp_buff;
1620 struct sk_buff;
1621 struct bpf_dtab_netdev;
1622 struct bpf_cpu_map_entry;
1623 
1624 void __dev_flush(void);
1625 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1626 		    struct net_device *dev_rx);
1627 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1628 		    struct net_device *dev_rx);
1629 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
1630 			  struct bpf_map *map, bool exclude_ingress);
1631 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1632 			     struct bpf_prog *xdp_prog);
1633 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1634 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
1635 			   bool exclude_ingress);
1636 
1637 void __cpu_map_flush(void);
1638 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
1639 		    struct net_device *dev_rx);
1640 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1641 			     struct sk_buff *skb);
1642 
1643 /* Return map's numa specified by userspace */
1644 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1645 {
1646 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
1647 		attr->numa_node : NUMA_NO_NODE;
1648 }
1649 
1650 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1651 int array_map_alloc_check(union bpf_attr *attr);
1652 
1653 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1654 			  union bpf_attr __user *uattr);
1655 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1656 			  union bpf_attr __user *uattr);
1657 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1658 			      const union bpf_attr *kattr,
1659 			      union bpf_attr __user *uattr);
1660 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1661 				     const union bpf_attr *kattr,
1662 				     union bpf_attr __user *uattr);
1663 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1664 			     const union bpf_attr *kattr,
1665 			     union bpf_attr __user *uattr);
1666 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1667 				const union bpf_attr *kattr,
1668 				union bpf_attr __user *uattr);
1669 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
1670 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1671 		    const struct bpf_prog *prog,
1672 		    struct bpf_insn_access_aux *info);
1673 
1674 static inline bool bpf_tracing_ctx_access(int off, int size,
1675 					  enum bpf_access_type type)
1676 {
1677 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
1678 		return false;
1679 	if (type != BPF_READ)
1680 		return false;
1681 	if (off % size != 0)
1682 		return false;
1683 	return true;
1684 }
1685 
1686 static inline bool bpf_tracing_btf_ctx_access(int off, int size,
1687 					      enum bpf_access_type type,
1688 					      const struct bpf_prog *prog,
1689 					      struct bpf_insn_access_aux *info)
1690 {
1691 	if (!bpf_tracing_ctx_access(off, size, type))
1692 		return false;
1693 	return btf_ctx_access(off, size, type, prog, info);
1694 }
1695 
1696 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1697 		      const struct btf_type *t, int off, int size,
1698 		      enum bpf_access_type atype,
1699 		      u32 *next_btf_id);
1700 bool btf_struct_ids_match(struct bpf_verifier_log *log,
1701 			  const struct btf *btf, u32 id, int off,
1702 			  const struct btf *need_btf, u32 need_type_id);
1703 
1704 int btf_distill_func_proto(struct bpf_verifier_log *log,
1705 			   struct btf *btf,
1706 			   const struct btf_type *func_proto,
1707 			   const char *func_name,
1708 			   struct btf_func_model *m);
1709 
1710 struct bpf_reg_state;
1711 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1712 				struct bpf_reg_state *regs);
1713 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1714 			      const struct btf *btf, u32 func_id,
1715 			      struct bpf_reg_state *regs);
1716 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1717 			  struct bpf_reg_state *reg);
1718 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1719 			 struct btf *btf, const struct btf_type *t);
1720 
1721 struct bpf_prog *bpf_prog_by_id(u32 id);
1722 struct bpf_link *bpf_link_by_id(u32 id);
1723 
1724 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1725 void bpf_task_storage_free(struct task_struct *task);
1726 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1727 const struct btf_func_model *
1728 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1729 			 const struct bpf_insn *insn);
1730 struct bpf_core_ctx {
1731 	struct bpf_verifier_log *log;
1732 	const struct btf *btf;
1733 };
1734 
1735 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
1736 		   int relo_idx, void *insn);
1737 
1738 #else /* !CONFIG_BPF_SYSCALL */
1739 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1740 {
1741 	return ERR_PTR(-EOPNOTSUPP);
1742 }
1743 
1744 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1745 						     enum bpf_prog_type type,
1746 						     bool attach_drv)
1747 {
1748 	return ERR_PTR(-EOPNOTSUPP);
1749 }
1750 
1751 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1752 {
1753 }
1754 
1755 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1756 {
1757 }
1758 
1759 static inline void bpf_prog_put(struct bpf_prog *prog)
1760 {
1761 }
1762 
1763 static inline void bpf_prog_inc(struct bpf_prog *prog)
1764 {
1765 }
1766 
1767 static inline struct bpf_prog *__must_check
1768 bpf_prog_inc_not_zero(struct bpf_prog *prog)
1769 {
1770 	return ERR_PTR(-EOPNOTSUPP);
1771 }
1772 
1773 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1774 				 const struct bpf_link_ops *ops,
1775 				 struct bpf_prog *prog)
1776 {
1777 }
1778 
1779 static inline int bpf_link_prime(struct bpf_link *link,
1780 				 struct bpf_link_primer *primer)
1781 {
1782 	return -EOPNOTSUPP;
1783 }
1784 
1785 static inline int bpf_link_settle(struct bpf_link_primer *primer)
1786 {
1787 	return -EOPNOTSUPP;
1788 }
1789 
1790 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1791 {
1792 }
1793 
1794 static inline void bpf_link_inc(struct bpf_link *link)
1795 {
1796 }
1797 
1798 static inline void bpf_link_put(struct bpf_link *link)
1799 {
1800 }
1801 
1802 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1803 {
1804 	return -EOPNOTSUPP;
1805 }
1806 
1807 static inline bool dev_map_can_have_prog(struct bpf_map *map)
1808 {
1809 	return false;
1810 }
1811 
1812 static inline void __dev_flush(void)
1813 {
1814 }
1815 
1816 struct xdp_buff;
1817 struct bpf_dtab_netdev;
1818 struct bpf_cpu_map_entry;
1819 
1820 static inline
1821 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1822 		    struct net_device *dev_rx)
1823 {
1824 	return 0;
1825 }
1826 
1827 static inline
1828 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1829 		    struct net_device *dev_rx)
1830 {
1831 	return 0;
1832 }
1833 
1834 static inline
1835 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
1836 			  struct bpf_map *map, bool exclude_ingress)
1837 {
1838 	return 0;
1839 }
1840 
1841 struct sk_buff;
1842 
1843 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1844 					   struct sk_buff *skb,
1845 					   struct bpf_prog *xdp_prog)
1846 {
1847 	return 0;
1848 }
1849 
1850 static inline
1851 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1852 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
1853 			   bool exclude_ingress)
1854 {
1855 	return 0;
1856 }
1857 
1858 static inline void __cpu_map_flush(void)
1859 {
1860 }
1861 
1862 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1863 				  struct xdp_buff *xdp,
1864 				  struct net_device *dev_rx)
1865 {
1866 	return 0;
1867 }
1868 
1869 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
1870 					   struct sk_buff *skb)
1871 {
1872 	return -EOPNOTSUPP;
1873 }
1874 
1875 static inline bool cpu_map_prog_allowed(struct bpf_map *map)
1876 {
1877 	return false;
1878 }
1879 
1880 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1881 				enum bpf_prog_type type)
1882 {
1883 	return ERR_PTR(-EOPNOTSUPP);
1884 }
1885 
1886 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1887 					const union bpf_attr *kattr,
1888 					union bpf_attr __user *uattr)
1889 {
1890 	return -ENOTSUPP;
1891 }
1892 
1893 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1894 					const union bpf_attr *kattr,
1895 					union bpf_attr __user *uattr)
1896 {
1897 	return -ENOTSUPP;
1898 }
1899 
1900 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1901 					    const union bpf_attr *kattr,
1902 					    union bpf_attr __user *uattr)
1903 {
1904 	return -ENOTSUPP;
1905 }
1906 
1907 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1908 						   const union bpf_attr *kattr,
1909 						   union bpf_attr __user *uattr)
1910 {
1911 	return -ENOTSUPP;
1912 }
1913 
1914 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1915 					      const union bpf_attr *kattr,
1916 					      union bpf_attr __user *uattr)
1917 {
1918 	return -ENOTSUPP;
1919 }
1920 
1921 static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
1922 						  struct module *owner)
1923 {
1924 	return false;
1925 }
1926 
1927 static inline void bpf_map_put(struct bpf_map *map)
1928 {
1929 }
1930 
1931 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1932 {
1933 	return ERR_PTR(-ENOTSUPP);
1934 }
1935 
1936 static inline const struct bpf_func_proto *
1937 bpf_base_func_proto(enum bpf_func_id func_id)
1938 {
1939 	return NULL;
1940 }
1941 
1942 static inline void bpf_task_storage_free(struct task_struct *task)
1943 {
1944 }
1945 
1946 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1947 {
1948 	return false;
1949 }
1950 
1951 static inline const struct btf_func_model *
1952 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1953 			 const struct bpf_insn *insn)
1954 {
1955 	return NULL;
1956 }
1957 #endif /* CONFIG_BPF_SYSCALL */
1958 
1959 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
1960 			  struct btf_mod_pair *used_btfs, u32 len);
1961 
1962 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1963 						 enum bpf_prog_type type)
1964 {
1965 	return bpf_prog_get_type_dev(ufd, type, false);
1966 }
1967 
1968 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
1969 			  struct bpf_map **used_maps, u32 len);
1970 
1971 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1972 
1973 int bpf_prog_offload_compile(struct bpf_prog *prog);
1974 void bpf_prog_offload_destroy(struct bpf_prog *prog);
1975 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1976 			       struct bpf_prog *prog);
1977 
1978 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1979 
1980 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1981 int bpf_map_offload_update_elem(struct bpf_map *map,
1982 				void *key, void *value, u64 flags);
1983 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1984 int bpf_map_offload_get_next_key(struct bpf_map *map,
1985 				 void *key, void *next_key);
1986 
1987 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1988 
1989 struct bpf_offload_dev *
1990 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1991 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1992 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1993 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1994 				    struct net_device *netdev);
1995 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1996 				       struct net_device *netdev);
1997 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1998 
1999 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2000 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
2001 
2002 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2003 {
2004 	return aux->offload_requested;
2005 }
2006 
2007 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2008 {
2009 	return unlikely(map->ops == &bpf_map_offload_ops);
2010 }
2011 
2012 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2013 void bpf_map_offload_map_free(struct bpf_map *map);
2014 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2015 			      const union bpf_attr *kattr,
2016 			      union bpf_attr __user *uattr);
2017 
2018 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2019 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2020 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2021 void sock_map_unhash(struct sock *sk);
2022 void sock_map_close(struct sock *sk, long timeout);
2023 #else
2024 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
2025 					union bpf_attr *attr)
2026 {
2027 	return -EOPNOTSUPP;
2028 }
2029 
2030 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
2031 {
2032 	return false;
2033 }
2034 
2035 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
2036 {
2037 	return false;
2038 }
2039 
2040 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2041 {
2042 	return ERR_PTR(-EOPNOTSUPP);
2043 }
2044 
2045 static inline void bpf_map_offload_map_free(struct bpf_map *map)
2046 {
2047 }
2048 
2049 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2050 					    const union bpf_attr *kattr,
2051 					    union bpf_attr __user *uattr)
2052 {
2053 	return -ENOTSUPP;
2054 }
2055 
2056 #ifdef CONFIG_BPF_SYSCALL
2057 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2058 				       struct bpf_prog *prog)
2059 {
2060 	return -EINVAL;
2061 }
2062 
2063 static inline int sock_map_prog_detach(const union bpf_attr *attr,
2064 				       enum bpf_prog_type ptype)
2065 {
2066 	return -EOPNOTSUPP;
2067 }
2068 
2069 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
2070 					   u64 flags)
2071 {
2072 	return -EOPNOTSUPP;
2073 }
2074 #endif /* CONFIG_BPF_SYSCALL */
2075 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
2076 
2077 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
2078 void bpf_sk_reuseport_detach(struct sock *sk);
2079 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
2080 				       void *value);
2081 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
2082 				       void *value, u64 map_flags);
2083 #else
2084 static inline void bpf_sk_reuseport_detach(struct sock *sk)
2085 {
2086 }
2087 
2088 #ifdef CONFIG_BPF_SYSCALL
2089 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
2090 						     void *key, void *value)
2091 {
2092 	return -EOPNOTSUPP;
2093 }
2094 
2095 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
2096 						     void *key, void *value,
2097 						     u64 map_flags)
2098 {
2099 	return -EOPNOTSUPP;
2100 }
2101 #endif /* CONFIG_BPF_SYSCALL */
2102 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
2103 
2104 /* verifier prototypes for helper functions called from eBPF programs */
2105 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
2106 extern const struct bpf_func_proto bpf_map_update_elem_proto;
2107 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
2108 extern const struct bpf_func_proto bpf_map_push_elem_proto;
2109 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
2110 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
2111 
2112 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
2113 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
2114 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
2115 extern const struct bpf_func_proto bpf_tail_call_proto;
2116 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
2117 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
2118 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
2119 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
2120 extern const struct bpf_func_proto bpf_get_current_comm_proto;
2121 extern const struct bpf_func_proto bpf_get_stackid_proto;
2122 extern const struct bpf_func_proto bpf_get_stack_proto;
2123 extern const struct bpf_func_proto bpf_get_task_stack_proto;
2124 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
2125 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
2126 extern const struct bpf_func_proto bpf_sock_map_update_proto;
2127 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
2128 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
2129 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
2130 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
2131 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
2132 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
2133 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
2134 extern const struct bpf_func_proto bpf_spin_lock_proto;
2135 extern const struct bpf_func_proto bpf_spin_unlock_proto;
2136 extern const struct bpf_func_proto bpf_get_local_storage_proto;
2137 extern const struct bpf_func_proto bpf_strtol_proto;
2138 extern const struct bpf_func_proto bpf_strtoul_proto;
2139 extern const struct bpf_func_proto bpf_tcp_sock_proto;
2140 extern const struct bpf_func_proto bpf_jiffies64_proto;
2141 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
2142 extern const struct bpf_func_proto bpf_event_output_data_proto;
2143 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
2144 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
2145 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
2146 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
2147 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
2148 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
2149 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
2150 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
2151 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
2152 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
2153 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
2154 extern const struct bpf_func_proto bpf_copy_from_user_proto;
2155 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
2156 extern const struct bpf_func_proto bpf_snprintf_proto;
2157 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
2158 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
2159 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
2160 extern const struct bpf_func_proto bpf_sock_from_file_proto;
2161 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
2162 extern const struct bpf_func_proto bpf_task_storage_get_proto;
2163 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
2164 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
2165 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2166 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
2167 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
2168 extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
2169 extern const struct bpf_func_proto bpf_find_vma_proto;
2170 extern const struct bpf_func_proto bpf_loop_proto;
2171 extern const struct bpf_func_proto bpf_strncmp_proto;
2172 
2173 const struct bpf_func_proto *tracing_prog_func_proto(
2174   enum bpf_func_id func_id, const struct bpf_prog *prog);
2175 
2176 /* Shared helpers among cBPF and eBPF. */
2177 void bpf_user_rnd_init_once(void);
2178 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2179 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2180 
2181 #if defined(CONFIG_NET)
2182 bool bpf_sock_common_is_valid_access(int off, int size,
2183 				     enum bpf_access_type type,
2184 				     struct bpf_insn_access_aux *info);
2185 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2186 			      struct bpf_insn_access_aux *info);
2187 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2188 				const struct bpf_insn *si,
2189 				struct bpf_insn *insn_buf,
2190 				struct bpf_prog *prog,
2191 				u32 *target_size);
2192 #else
2193 static inline bool bpf_sock_common_is_valid_access(int off, int size,
2194 						   enum bpf_access_type type,
2195 						   struct bpf_insn_access_aux *info)
2196 {
2197 	return false;
2198 }
2199 static inline bool bpf_sock_is_valid_access(int off, int size,
2200 					    enum bpf_access_type type,
2201 					    struct bpf_insn_access_aux *info)
2202 {
2203 	return false;
2204 }
2205 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2206 					      const struct bpf_insn *si,
2207 					      struct bpf_insn *insn_buf,
2208 					      struct bpf_prog *prog,
2209 					      u32 *target_size)
2210 {
2211 	return 0;
2212 }
2213 #endif
2214 
2215 #ifdef CONFIG_INET
2216 struct sk_reuseport_kern {
2217 	struct sk_buff *skb;
2218 	struct sock *sk;
2219 	struct sock *selected_sk;
2220 	struct sock *migrating_sk;
2221 	void *data_end;
2222 	u32 hash;
2223 	u32 reuseport_id;
2224 	bool bind_inany;
2225 };
2226 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2227 				  struct bpf_insn_access_aux *info);
2228 
2229 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2230 				    const struct bpf_insn *si,
2231 				    struct bpf_insn *insn_buf,
2232 				    struct bpf_prog *prog,
2233 				    u32 *target_size);
2234 
2235 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2236 				  struct bpf_insn_access_aux *info);
2237 
2238 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2239 				    const struct bpf_insn *si,
2240 				    struct bpf_insn *insn_buf,
2241 				    struct bpf_prog *prog,
2242 				    u32 *target_size);
2243 #else
2244 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2245 						enum bpf_access_type type,
2246 						struct bpf_insn_access_aux *info)
2247 {
2248 	return false;
2249 }
2250 
2251 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2252 						  const struct bpf_insn *si,
2253 						  struct bpf_insn *insn_buf,
2254 						  struct bpf_prog *prog,
2255 						  u32 *target_size)
2256 {
2257 	return 0;
2258 }
2259 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2260 						enum bpf_access_type type,
2261 						struct bpf_insn_access_aux *info)
2262 {
2263 	return false;
2264 }
2265 
2266 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2267 						  const struct bpf_insn *si,
2268 						  struct bpf_insn *insn_buf,
2269 						  struct bpf_prog *prog,
2270 						  u32 *target_size)
2271 {
2272 	return 0;
2273 }
2274 #endif /* CONFIG_INET */
2275 
2276 enum bpf_text_poke_type {
2277 	BPF_MOD_CALL,
2278 	BPF_MOD_JUMP,
2279 };
2280 
2281 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2282 		       void *addr1, void *addr2);
2283 
2284 struct btf_id_set;
2285 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
2286 
2287 #define MAX_BPRINTF_VARARGS		12
2288 
2289 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2290 			u32 **bin_buf, u32 num_args);
2291 void bpf_bprintf_cleanup(void);
2292 
2293 #endif /* _LINUX_BPF_H */
2294