xref: /linux-6.15/include/linux/bpf.h (revision d0f482bb)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 
9 #include <linux/workqueue.h>
10 #include <linux/file.h>
11 #include <linux/percpu.h>
12 #include <linux/err.h>
13 #include <linux/rbtree_latch.h>
14 #include <linux/numa.h>
15 #include <linux/mm_types.h>
16 #include <linux/wait.h>
17 #include <linux/refcount.h>
18 #include <linux/mutex.h>
19 #include <linux/module.h>
20 #include <linux/kallsyms.h>
21 #include <linux/capability.h>
22 #include <linux/sched/mm.h>
23 #include <linux/slab.h>
24 #include <linux/percpu-refcount.h>
25 #include <linux/bpfptr.h>
26 
27 struct bpf_verifier_env;
28 struct bpf_verifier_log;
29 struct perf_event;
30 struct bpf_prog;
31 struct bpf_prog_aux;
32 struct bpf_map;
33 struct sock;
34 struct seq_file;
35 struct btf;
36 struct btf_type;
37 struct exception_table_entry;
38 struct seq_operations;
39 struct bpf_iter_aux_info;
40 struct bpf_local_storage;
41 struct bpf_local_storage_map;
42 struct kobject;
43 struct mem_cgroup;
44 struct module;
45 struct bpf_func_state;
46 
47 extern struct idr btf_idr;
48 extern spinlock_t btf_idr_lock;
49 extern struct kobject *btf_kobj;
50 
51 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
52 					struct bpf_iter_aux_info *aux);
53 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
54 struct bpf_iter_seq_info {
55 	const struct seq_operations *seq_ops;
56 	bpf_iter_init_seq_priv_t init_seq_private;
57 	bpf_iter_fini_seq_priv_t fini_seq_private;
58 	u32 seq_priv_size;
59 };
60 
61 /* map is generic key/value storage optionally accessible by eBPF programs */
62 struct bpf_map_ops {
63 	/* funcs callable from userspace (via syscall) */
64 	int (*map_alloc_check)(union bpf_attr *attr);
65 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
66 	void (*map_release)(struct bpf_map *map, struct file *map_file);
67 	void (*map_free)(struct bpf_map *map);
68 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
69 	void (*map_release_uref)(struct bpf_map *map);
70 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
71 	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
72 				union bpf_attr __user *uattr);
73 	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
74 					  void *value, u64 flags);
75 	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
76 					   const union bpf_attr *attr,
77 					   union bpf_attr __user *uattr);
78 	int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
79 				union bpf_attr __user *uattr);
80 	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
81 				union bpf_attr __user *uattr);
82 
83 	/* funcs callable from userspace and from eBPF programs */
84 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
85 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
86 	int (*map_delete_elem)(struct bpf_map *map, void *key);
87 	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
88 	int (*map_pop_elem)(struct bpf_map *map, void *value);
89 	int (*map_peek_elem)(struct bpf_map *map, void *value);
90 
91 	/* funcs called by prog_array and perf_event_array map */
92 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
93 				int fd);
94 	void (*map_fd_put_ptr)(void *ptr);
95 	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
96 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
97 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
98 				  struct seq_file *m);
99 	int (*map_check_btf)(const struct bpf_map *map,
100 			     const struct btf *btf,
101 			     const struct btf_type *key_type,
102 			     const struct btf_type *value_type);
103 
104 	/* Prog poke tracking helpers. */
105 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
106 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
107 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
108 			     struct bpf_prog *new);
109 
110 	/* Direct value access helpers. */
111 	int (*map_direct_value_addr)(const struct bpf_map *map,
112 				     u64 *imm, u32 off);
113 	int (*map_direct_value_meta)(const struct bpf_map *map,
114 				     u64 imm, u32 *off);
115 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
116 	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
117 			     struct poll_table_struct *pts);
118 
119 	/* Functions called by bpf_local_storage maps */
120 	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
121 					void *owner, u32 size);
122 	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
123 					   void *owner, u32 size);
124 	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
125 
126 	/* Misc helpers.*/
127 	int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
128 
129 	/* map_meta_equal must be implemented for maps that can be
130 	 * used as an inner map.  It is a runtime check to ensure
131 	 * an inner map can be inserted to an outer map.
132 	 *
133 	 * Some properties of the inner map has been used during the
134 	 * verification time.  When inserting an inner map at the runtime,
135 	 * map_meta_equal has to ensure the inserting map has the same
136 	 * properties that the verifier has used earlier.
137 	 */
138 	bool (*map_meta_equal)(const struct bpf_map *meta0,
139 			       const struct bpf_map *meta1);
140 
141 
142 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
143 					      struct bpf_func_state *caller,
144 					      struct bpf_func_state *callee);
145 	int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn,
146 				     void *callback_ctx, u64 flags);
147 
148 	/* BTF name and id of struct allocated by map_alloc */
149 	const char * const map_btf_name;
150 	int *map_btf_id;
151 
152 	/* bpf_iter info used to open a seq_file */
153 	const struct bpf_iter_seq_info *iter_seq_info;
154 };
155 
156 struct bpf_map {
157 	/* The first two cachelines with read-mostly members of which some
158 	 * are also accessed in fast-path (e.g. ops, max_entries).
159 	 */
160 	const struct bpf_map_ops *ops ____cacheline_aligned;
161 	struct bpf_map *inner_map_meta;
162 #ifdef CONFIG_SECURITY
163 	void *security;
164 #endif
165 	enum bpf_map_type map_type;
166 	u32 key_size;
167 	u32 value_size;
168 	u32 max_entries;
169 	u32 map_flags;
170 	int spin_lock_off; /* >=0 valid offset, <0 error */
171 	u32 id;
172 	int numa_node;
173 	u32 btf_key_type_id;
174 	u32 btf_value_type_id;
175 	struct btf *btf;
176 #ifdef CONFIG_MEMCG_KMEM
177 	struct mem_cgroup *memcg;
178 #endif
179 	char name[BPF_OBJ_NAME_LEN];
180 	u32 btf_vmlinux_value_type_id;
181 	bool bypass_spec_v1;
182 	bool frozen; /* write-once; write-protected by freeze_mutex */
183 	/* 22 bytes hole */
184 
185 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
186 	 * particularly with refcounting.
187 	 */
188 	atomic64_t refcnt ____cacheline_aligned;
189 	atomic64_t usercnt;
190 	struct work_struct work;
191 	struct mutex freeze_mutex;
192 	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
193 };
194 
195 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
196 {
197 	return map->spin_lock_off >= 0;
198 }
199 
200 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
201 {
202 	if (likely(!map_value_has_spin_lock(map)))
203 		return;
204 	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
205 		(struct bpf_spin_lock){};
206 }
207 
208 /* copy everything but bpf_spin_lock */
209 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
210 {
211 	if (unlikely(map_value_has_spin_lock(map))) {
212 		u32 off = map->spin_lock_off;
213 
214 		memcpy(dst, src, off);
215 		memcpy(dst + off + sizeof(struct bpf_spin_lock),
216 		       src + off + sizeof(struct bpf_spin_lock),
217 		       map->value_size - off - sizeof(struct bpf_spin_lock));
218 	} else {
219 		memcpy(dst, src, map->value_size);
220 	}
221 }
222 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
223 			   bool lock_src);
224 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
225 
226 struct bpf_offload_dev;
227 struct bpf_offloaded_map;
228 
229 struct bpf_map_dev_ops {
230 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
231 				void *key, void *next_key);
232 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
233 			       void *key, void *value);
234 	int (*map_update_elem)(struct bpf_offloaded_map *map,
235 			       void *key, void *value, u64 flags);
236 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
237 };
238 
239 struct bpf_offloaded_map {
240 	struct bpf_map map;
241 	struct net_device *netdev;
242 	const struct bpf_map_dev_ops *dev_ops;
243 	void *dev_priv;
244 	struct list_head offloads;
245 };
246 
247 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
248 {
249 	return container_of(map, struct bpf_offloaded_map, map);
250 }
251 
252 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
253 {
254 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
255 }
256 
257 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
258 {
259 	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
260 		map->ops->map_seq_show_elem;
261 }
262 
263 int map_check_no_btf(const struct bpf_map *map,
264 		     const struct btf *btf,
265 		     const struct btf_type *key_type,
266 		     const struct btf_type *value_type);
267 
268 bool bpf_map_meta_equal(const struct bpf_map *meta0,
269 			const struct bpf_map *meta1);
270 
271 extern const struct bpf_map_ops bpf_map_offload_ops;
272 
273 /* function argument constraints */
274 enum bpf_arg_type {
275 	ARG_DONTCARE = 0,	/* unused argument in helper function */
276 
277 	/* the following constraints used to prototype
278 	 * bpf_map_lookup/update/delete_elem() functions
279 	 */
280 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
281 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
282 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
283 	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
284 	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
285 
286 	/* the following constraints used to prototype bpf_memcmp() and other
287 	 * functions that access data on eBPF program stack
288 	 */
289 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
290 	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
291 	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
292 				 * helper function must fill all bytes or clear
293 				 * them in error case.
294 				 */
295 
296 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
297 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
298 
299 	ARG_PTR_TO_CTX,		/* pointer to context */
300 	ARG_PTR_TO_CTX_OR_NULL,	/* pointer to context or NULL */
301 	ARG_ANYTHING,		/* any (initialized) argument is ok */
302 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
303 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
304 	ARG_PTR_TO_INT,		/* pointer to int */
305 	ARG_PTR_TO_LONG,	/* pointer to long */
306 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
307 	ARG_PTR_TO_SOCKET_OR_NULL,	/* pointer to bpf_sock (fullsock) or NULL */
308 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
309 	ARG_PTR_TO_ALLOC_MEM,	/* pointer to dynamically allocated memory */
310 	ARG_PTR_TO_ALLOC_MEM_OR_NULL,	/* pointer to dynamically allocated memory or NULL */
311 	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
312 	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
313 	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
314 	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
315 	ARG_PTR_TO_STACK_OR_NULL,	/* pointer to stack or NULL */
316 	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
317 	__BPF_ARG_TYPE_MAX,
318 };
319 
320 /* type of values returned from helper functions */
321 enum bpf_return_type {
322 	RET_INTEGER,			/* function returns integer */
323 	RET_VOID,			/* function doesn't return anything */
324 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
325 	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
326 	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
327 	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
328 	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
329 	RET_PTR_TO_ALLOC_MEM_OR_NULL,	/* returns a pointer to dynamically allocated memory or NULL */
330 	RET_PTR_TO_BTF_ID_OR_NULL,	/* returns a pointer to a btf_id or NULL */
331 	RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
332 	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
333 	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
334 };
335 
336 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
337  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
338  * instructions after verifying
339  */
340 struct bpf_func_proto {
341 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
342 	bool gpl_only;
343 	bool pkt_access;
344 	enum bpf_return_type ret_type;
345 	union {
346 		struct {
347 			enum bpf_arg_type arg1_type;
348 			enum bpf_arg_type arg2_type;
349 			enum bpf_arg_type arg3_type;
350 			enum bpf_arg_type arg4_type;
351 			enum bpf_arg_type arg5_type;
352 		};
353 		enum bpf_arg_type arg_type[5];
354 	};
355 	union {
356 		struct {
357 			u32 *arg1_btf_id;
358 			u32 *arg2_btf_id;
359 			u32 *arg3_btf_id;
360 			u32 *arg4_btf_id;
361 			u32 *arg5_btf_id;
362 		};
363 		u32 *arg_btf_id[5];
364 	};
365 	int *ret_btf_id; /* return value btf_id */
366 	bool (*allowed)(const struct bpf_prog *prog);
367 };
368 
369 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
370  * the first argument to eBPF programs.
371  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
372  */
373 struct bpf_context;
374 
375 enum bpf_access_type {
376 	BPF_READ = 1,
377 	BPF_WRITE = 2
378 };
379 
380 /* types of values stored in eBPF registers */
381 /* Pointer types represent:
382  * pointer
383  * pointer + imm
384  * pointer + (u16) var
385  * pointer + (u16) var + imm
386  * if (range > 0) then [ptr, ptr + range - off) is safe to access
387  * if (id > 0) means that some 'var' was added
388  * if (off > 0) means that 'imm' was added
389  */
390 enum bpf_reg_type {
391 	NOT_INIT = 0,		 /* nothing was written into register */
392 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
393 	PTR_TO_CTX,		 /* reg points to bpf_context */
394 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
395 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
396 	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
397 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
398 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
399 	PTR_TO_PACKET,		 /* reg points to skb->data */
400 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
401 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
402 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
403 	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
404 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
405 	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
406 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
407 	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
408 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
409 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
410 	/* PTR_TO_BTF_ID points to a kernel struct that does not need
411 	 * to be null checked by the BPF program. This does not imply the
412 	 * pointer is _not_ null and in practice this can easily be a null
413 	 * pointer when reading pointer chains. The assumption is program
414 	 * context will handle null pointer dereference typically via fault
415 	 * handling. The verifier must keep this in mind and can make no
416 	 * assumptions about null or non-null when doing branch analysis.
417 	 * Further, when passed into helpers the helpers can not, without
418 	 * additional context, assume the value is non-null.
419 	 */
420 	PTR_TO_BTF_ID,
421 	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
422 	 * been checked for null. Used primarily to inform the verifier
423 	 * an explicit null check is required for this struct.
424 	 */
425 	PTR_TO_BTF_ID_OR_NULL,
426 	PTR_TO_MEM,		 /* reg points to valid memory region */
427 	PTR_TO_MEM_OR_NULL,	 /* reg points to valid memory region or NULL */
428 	PTR_TO_RDONLY_BUF,	 /* reg points to a readonly buffer */
429 	PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
430 	PTR_TO_RDWR_BUF,	 /* reg points to a read/write buffer */
431 	PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
432 	PTR_TO_PERCPU_BTF_ID,	 /* reg points to a percpu kernel variable */
433 	PTR_TO_FUNC,		 /* reg points to a bpf program function */
434 	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
435 	__BPF_REG_TYPE_MAX,
436 };
437 
438 /* The information passed from prog-specific *_is_valid_access
439  * back to the verifier.
440  */
441 struct bpf_insn_access_aux {
442 	enum bpf_reg_type reg_type;
443 	union {
444 		int ctx_field_size;
445 		struct {
446 			struct btf *btf;
447 			u32 btf_id;
448 		};
449 	};
450 	struct bpf_verifier_log *log; /* for verbose logs */
451 };
452 
453 static inline void
454 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
455 {
456 	aux->ctx_field_size = size;
457 }
458 
459 struct bpf_prog_ops {
460 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
461 			union bpf_attr __user *uattr);
462 };
463 
464 struct bpf_verifier_ops {
465 	/* return eBPF function prototype for verification */
466 	const struct bpf_func_proto *
467 	(*get_func_proto)(enum bpf_func_id func_id,
468 			  const struct bpf_prog *prog);
469 
470 	/* return true if 'size' wide access at offset 'off' within bpf_context
471 	 * with 'type' (read or write) is allowed
472 	 */
473 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
474 				const struct bpf_prog *prog,
475 				struct bpf_insn_access_aux *info);
476 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
477 			    const struct bpf_prog *prog);
478 	int (*gen_ld_abs)(const struct bpf_insn *orig,
479 			  struct bpf_insn *insn_buf);
480 	u32 (*convert_ctx_access)(enum bpf_access_type type,
481 				  const struct bpf_insn *src,
482 				  struct bpf_insn *dst,
483 				  struct bpf_prog *prog, u32 *target_size);
484 	int (*btf_struct_access)(struct bpf_verifier_log *log,
485 				 const struct btf *btf,
486 				 const struct btf_type *t, int off, int size,
487 				 enum bpf_access_type atype,
488 				 u32 *next_btf_id);
489 	bool (*check_kfunc_call)(u32 kfunc_btf_id);
490 };
491 
492 struct bpf_prog_offload_ops {
493 	/* verifier basic callbacks */
494 	int (*insn_hook)(struct bpf_verifier_env *env,
495 			 int insn_idx, int prev_insn_idx);
496 	int (*finalize)(struct bpf_verifier_env *env);
497 	/* verifier optimization callbacks (called after .finalize) */
498 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
499 			    struct bpf_insn *insn);
500 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
501 	/* program management callbacks */
502 	int (*prepare)(struct bpf_prog *prog);
503 	int (*translate)(struct bpf_prog *prog);
504 	void (*destroy)(struct bpf_prog *prog);
505 };
506 
507 struct bpf_prog_offload {
508 	struct bpf_prog		*prog;
509 	struct net_device	*netdev;
510 	struct bpf_offload_dev	*offdev;
511 	void			*dev_priv;
512 	struct list_head	offloads;
513 	bool			dev_state;
514 	bool			opt_failed;
515 	void			*jited_image;
516 	u32			jited_len;
517 };
518 
519 enum bpf_cgroup_storage_type {
520 	BPF_CGROUP_STORAGE_SHARED,
521 	BPF_CGROUP_STORAGE_PERCPU,
522 	__BPF_CGROUP_STORAGE_MAX
523 };
524 
525 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
526 
527 /* The longest tracepoint has 12 args.
528  * See include/trace/bpf_probe.h
529  */
530 #define MAX_BPF_FUNC_ARGS 12
531 
532 /* The maximum number of arguments passed through registers
533  * a single function may have.
534  */
535 #define MAX_BPF_FUNC_REG_ARGS 5
536 
537 struct btf_func_model {
538 	u8 ret_size;
539 	u8 nr_args;
540 	u8 arg_size[MAX_BPF_FUNC_ARGS];
541 };
542 
543 /* Restore arguments before returning from trampoline to let original function
544  * continue executing. This flag is used for fentry progs when there are no
545  * fexit progs.
546  */
547 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
548 /* Call original function after fentry progs, but before fexit progs.
549  * Makes sense for fentry/fexit, normal calls and indirect calls.
550  */
551 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
552 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
553  * programs only. Should not be used with normal calls and indirect calls.
554  */
555 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
556 
557 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
558  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
559  */
560 #define BPF_MAX_TRAMP_PROGS 38
561 
562 struct bpf_tramp_progs {
563 	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
564 	int nr_progs;
565 };
566 
567 /* Different use cases for BPF trampoline:
568  * 1. replace nop at the function entry (kprobe equivalent)
569  *    flags = BPF_TRAMP_F_RESTORE_REGS
570  *    fentry = a set of programs to run before returning from trampoline
571  *
572  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
573  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
574  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
575  *    fentry = a set of program to run before calling original function
576  *    fexit = a set of program to run after original function
577  *
578  * 3. replace direct call instruction anywhere in the function body
579  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
580  *    With flags = 0
581  *      fentry = a set of programs to run before returning from trampoline
582  *    With flags = BPF_TRAMP_F_CALL_ORIG
583  *      orig_call = original callback addr or direct function addr
584  *      fentry = a set of program to run before calling original function
585  *      fexit = a set of program to run after original function
586  */
587 struct bpf_tramp_image;
588 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
589 				const struct btf_func_model *m, u32 flags,
590 				struct bpf_tramp_progs *tprogs,
591 				void *orig_call);
592 /* these two functions are called from generated trampoline */
593 u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
594 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
595 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
596 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
597 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
598 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
599 
600 struct bpf_ksym {
601 	unsigned long		 start;
602 	unsigned long		 end;
603 	char			 name[KSYM_NAME_LEN];
604 	struct list_head	 lnode;
605 	struct latch_tree_node	 tnode;
606 	bool			 prog;
607 };
608 
609 enum bpf_tramp_prog_type {
610 	BPF_TRAMP_FENTRY,
611 	BPF_TRAMP_FEXIT,
612 	BPF_TRAMP_MODIFY_RETURN,
613 	BPF_TRAMP_MAX,
614 	BPF_TRAMP_REPLACE, /* more than MAX */
615 };
616 
617 struct bpf_tramp_image {
618 	void *image;
619 	struct bpf_ksym ksym;
620 	struct percpu_ref pcref;
621 	void *ip_after_call;
622 	void *ip_epilogue;
623 	union {
624 		struct rcu_head rcu;
625 		struct work_struct work;
626 	};
627 };
628 
629 struct bpf_trampoline {
630 	/* hlist for trampoline_table */
631 	struct hlist_node hlist;
632 	/* serializes access to fields of this trampoline */
633 	struct mutex mutex;
634 	refcount_t refcnt;
635 	u64 key;
636 	struct {
637 		struct btf_func_model model;
638 		void *addr;
639 		bool ftrace_managed;
640 	} func;
641 	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
642 	 * program by replacing one of its functions. func.addr is the address
643 	 * of the function it replaced.
644 	 */
645 	struct bpf_prog *extension_prog;
646 	/* list of BPF programs using this trampoline */
647 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
648 	/* Number of attached programs. A counter per kind. */
649 	int progs_cnt[BPF_TRAMP_MAX];
650 	/* Executable image of trampoline */
651 	struct bpf_tramp_image *cur_image;
652 	u64 selector;
653 	struct module *mod;
654 };
655 
656 struct bpf_attach_target_info {
657 	struct btf_func_model fmodel;
658 	long tgt_addr;
659 	const char *tgt_name;
660 	const struct btf_type *tgt_type;
661 };
662 
663 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
664 
665 struct bpf_dispatcher_prog {
666 	struct bpf_prog *prog;
667 	refcount_t users;
668 };
669 
670 struct bpf_dispatcher {
671 	/* dispatcher mutex */
672 	struct mutex mutex;
673 	void *func;
674 	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
675 	int num_progs;
676 	void *image;
677 	u32 image_off;
678 	struct bpf_ksym ksym;
679 };
680 
681 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
682 	const void *ctx,
683 	const struct bpf_insn *insnsi,
684 	unsigned int (*bpf_func)(const void *,
685 				 const struct bpf_insn *))
686 {
687 	return bpf_func(ctx, insnsi);
688 }
689 #ifdef CONFIG_BPF_JIT
690 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
691 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
692 struct bpf_trampoline *bpf_trampoline_get(u64 key,
693 					  struct bpf_attach_target_info *tgt_info);
694 void bpf_trampoline_put(struct bpf_trampoline *tr);
695 #define BPF_DISPATCHER_INIT(_name) {				\
696 	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
697 	.func = &_name##_func,					\
698 	.progs = {},						\
699 	.num_progs = 0,						\
700 	.image = NULL,						\
701 	.image_off = 0,						\
702 	.ksym = {						\
703 		.name  = #_name,				\
704 		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
705 	},							\
706 }
707 
708 #define DEFINE_BPF_DISPATCHER(name)					\
709 	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
710 		const void *ctx,					\
711 		const struct bpf_insn *insnsi,				\
712 		unsigned int (*bpf_func)(const void *,			\
713 					 const struct bpf_insn *))	\
714 	{								\
715 		return bpf_func(ctx, insnsi);				\
716 	}								\
717 	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
718 	struct bpf_dispatcher bpf_dispatcher_##name =			\
719 		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
720 #define DECLARE_BPF_DISPATCHER(name)					\
721 	unsigned int bpf_dispatcher_##name##_func(			\
722 		const void *ctx,					\
723 		const struct bpf_insn *insnsi,				\
724 		unsigned int (*bpf_func)(const void *,			\
725 					 const struct bpf_insn *));	\
726 	extern struct bpf_dispatcher bpf_dispatcher_##name;
727 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
728 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
729 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
730 				struct bpf_prog *to);
731 /* Called only from JIT-enabled code, so there's no need for stubs. */
732 void *bpf_jit_alloc_exec_page(void);
733 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
734 void bpf_image_ksym_del(struct bpf_ksym *ksym);
735 void bpf_ksym_add(struct bpf_ksym *ksym);
736 void bpf_ksym_del(struct bpf_ksym *ksym);
737 int bpf_jit_charge_modmem(u32 pages);
738 void bpf_jit_uncharge_modmem(u32 pages);
739 #else
740 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
741 					   struct bpf_trampoline *tr)
742 {
743 	return -ENOTSUPP;
744 }
745 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
746 					     struct bpf_trampoline *tr)
747 {
748 	return -ENOTSUPP;
749 }
750 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
751 							struct bpf_attach_target_info *tgt_info)
752 {
753 	return ERR_PTR(-EOPNOTSUPP);
754 }
755 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
756 #define DEFINE_BPF_DISPATCHER(name)
757 #define DECLARE_BPF_DISPATCHER(name)
758 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
759 #define BPF_DISPATCHER_PTR(name) NULL
760 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
761 					      struct bpf_prog *from,
762 					      struct bpf_prog *to) {}
763 static inline bool is_bpf_image_address(unsigned long address)
764 {
765 	return false;
766 }
767 #endif
768 
769 struct bpf_func_info_aux {
770 	u16 linkage;
771 	bool unreliable;
772 };
773 
774 enum bpf_jit_poke_reason {
775 	BPF_POKE_REASON_TAIL_CALL,
776 };
777 
778 /* Descriptor of pokes pointing /into/ the JITed image. */
779 struct bpf_jit_poke_descriptor {
780 	void *tailcall_target;
781 	void *tailcall_bypass;
782 	void *bypass_addr;
783 	union {
784 		struct {
785 			struct bpf_map *map;
786 			u32 key;
787 		} tail_call;
788 	};
789 	bool tailcall_target_stable;
790 	u8 adj_off;
791 	u16 reason;
792 	u32 insn_idx;
793 };
794 
795 /* reg_type info for ctx arguments */
796 struct bpf_ctx_arg_aux {
797 	u32 offset;
798 	enum bpf_reg_type reg_type;
799 	u32 btf_id;
800 };
801 
802 struct btf_mod_pair {
803 	struct btf *btf;
804 	struct module *module;
805 };
806 
807 struct bpf_kfunc_desc_tab;
808 
809 struct bpf_prog_aux {
810 	atomic64_t refcnt;
811 	u32 used_map_cnt;
812 	u32 used_btf_cnt;
813 	u32 max_ctx_offset;
814 	u32 max_pkt_offset;
815 	u32 max_tp_access;
816 	u32 stack_depth;
817 	u32 id;
818 	u32 func_cnt; /* used by non-func prog as the number of func progs */
819 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
820 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
821 	u32 ctx_arg_info_size;
822 	u32 max_rdonly_access;
823 	u32 max_rdwr_access;
824 	struct btf *attach_btf;
825 	const struct bpf_ctx_arg_aux *ctx_arg_info;
826 	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
827 	struct bpf_prog *dst_prog;
828 	struct bpf_trampoline *dst_trampoline;
829 	enum bpf_prog_type saved_dst_prog_type;
830 	enum bpf_attach_type saved_dst_attach_type;
831 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
832 	bool offload_requested;
833 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
834 	bool func_proto_unreliable;
835 	bool sleepable;
836 	bool tail_call_reachable;
837 	struct hlist_node tramp_hlist;
838 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
839 	const struct btf_type *attach_func_proto;
840 	/* function name for valid attach_btf_id */
841 	const char *attach_func_name;
842 	struct bpf_prog **func;
843 	void *jit_data; /* JIT specific data. arch dependent */
844 	struct bpf_jit_poke_descriptor *poke_tab;
845 	struct bpf_kfunc_desc_tab *kfunc_tab;
846 	u32 size_poke_tab;
847 	struct bpf_ksym ksym;
848 	const struct bpf_prog_ops *ops;
849 	struct bpf_map **used_maps;
850 	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
851 	struct btf_mod_pair *used_btfs;
852 	struct bpf_prog *prog;
853 	struct user_struct *user;
854 	u64 load_time; /* ns since boottime */
855 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
856 	char name[BPF_OBJ_NAME_LEN];
857 #ifdef CONFIG_SECURITY
858 	void *security;
859 #endif
860 	struct bpf_prog_offload *offload;
861 	struct btf *btf;
862 	struct bpf_func_info *func_info;
863 	struct bpf_func_info_aux *func_info_aux;
864 	/* bpf_line_info loaded from userspace.  linfo->insn_off
865 	 * has the xlated insn offset.
866 	 * Both the main and sub prog share the same linfo.
867 	 * The subprog can access its first linfo by
868 	 * using the linfo_idx.
869 	 */
870 	struct bpf_line_info *linfo;
871 	/* jited_linfo is the jited addr of the linfo.  It has a
872 	 * one to one mapping to linfo:
873 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
874 	 * Both the main and sub prog share the same jited_linfo.
875 	 * The subprog can access its first jited_linfo by
876 	 * using the linfo_idx.
877 	 */
878 	void **jited_linfo;
879 	u32 func_info_cnt;
880 	u32 nr_linfo;
881 	/* subprog can use linfo_idx to access its first linfo and
882 	 * jited_linfo.
883 	 * main prog always has linfo_idx == 0
884 	 */
885 	u32 linfo_idx;
886 	u32 num_exentries;
887 	struct exception_table_entry *extable;
888 	union {
889 		struct work_struct work;
890 		struct rcu_head	rcu;
891 	};
892 };
893 
894 struct bpf_array_aux {
895 	/* 'Ownership' of prog array is claimed by the first program that
896 	 * is going to use this map or by the first program which FD is
897 	 * stored in the map to make sure that all callers and callees have
898 	 * the same prog type and JITed flag.
899 	 */
900 	enum bpf_prog_type type;
901 	bool jited;
902 	/* Programs with direct jumps into programs part of this array. */
903 	struct list_head poke_progs;
904 	struct bpf_map *map;
905 	struct mutex poke_mutex;
906 	struct work_struct work;
907 };
908 
909 struct bpf_link {
910 	atomic64_t refcnt;
911 	u32 id;
912 	enum bpf_link_type type;
913 	const struct bpf_link_ops *ops;
914 	struct bpf_prog *prog;
915 	struct work_struct work;
916 };
917 
918 struct bpf_link_ops {
919 	void (*release)(struct bpf_link *link);
920 	void (*dealloc)(struct bpf_link *link);
921 	int (*detach)(struct bpf_link *link);
922 	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
923 			   struct bpf_prog *old_prog);
924 	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
925 	int (*fill_link_info)(const struct bpf_link *link,
926 			      struct bpf_link_info *info);
927 };
928 
929 struct bpf_link_primer {
930 	struct bpf_link *link;
931 	struct file *file;
932 	int fd;
933 	u32 id;
934 };
935 
936 struct bpf_struct_ops_value;
937 struct btf_member;
938 
939 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
940 struct bpf_struct_ops {
941 	const struct bpf_verifier_ops *verifier_ops;
942 	int (*init)(struct btf *btf);
943 	int (*check_member)(const struct btf_type *t,
944 			    const struct btf_member *member);
945 	int (*init_member)(const struct btf_type *t,
946 			   const struct btf_member *member,
947 			   void *kdata, const void *udata);
948 	int (*reg)(void *kdata);
949 	void (*unreg)(void *kdata);
950 	const struct btf_type *type;
951 	const struct btf_type *value_type;
952 	const char *name;
953 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
954 	u32 type_id;
955 	u32 value_id;
956 };
957 
958 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
959 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
960 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
961 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
962 bool bpf_struct_ops_get(const void *kdata);
963 void bpf_struct_ops_put(const void *kdata);
964 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
965 				       void *value);
966 static inline bool bpf_try_module_get(const void *data, struct module *owner)
967 {
968 	if (owner == BPF_MODULE_OWNER)
969 		return bpf_struct_ops_get(data);
970 	else
971 		return try_module_get(owner);
972 }
973 static inline void bpf_module_put(const void *data, struct module *owner)
974 {
975 	if (owner == BPF_MODULE_OWNER)
976 		bpf_struct_ops_put(data);
977 	else
978 		module_put(owner);
979 }
980 #else
981 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
982 {
983 	return NULL;
984 }
985 static inline void bpf_struct_ops_init(struct btf *btf,
986 				       struct bpf_verifier_log *log)
987 {
988 }
989 static inline bool bpf_try_module_get(const void *data, struct module *owner)
990 {
991 	return try_module_get(owner);
992 }
993 static inline void bpf_module_put(const void *data, struct module *owner)
994 {
995 	module_put(owner);
996 }
997 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
998 						     void *key,
999 						     void *value)
1000 {
1001 	return -EINVAL;
1002 }
1003 #endif
1004 
1005 struct bpf_array {
1006 	struct bpf_map map;
1007 	u32 elem_size;
1008 	u32 index_mask;
1009 	struct bpf_array_aux *aux;
1010 	union {
1011 		char value[0] __aligned(8);
1012 		void *ptrs[0] __aligned(8);
1013 		void __percpu *pptrs[0] __aligned(8);
1014 	};
1015 };
1016 
1017 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
1018 #define MAX_TAIL_CALL_CNT 32
1019 
1020 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
1021 				 BPF_F_RDONLY_PROG |	\
1022 				 BPF_F_WRONLY |		\
1023 				 BPF_F_WRONLY_PROG)
1024 
1025 #define BPF_MAP_CAN_READ	BIT(0)
1026 #define BPF_MAP_CAN_WRITE	BIT(1)
1027 
1028 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1029 {
1030 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1031 
1032 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1033 	 * not possible.
1034 	 */
1035 	if (access_flags & BPF_F_RDONLY_PROG)
1036 		return BPF_MAP_CAN_READ;
1037 	else if (access_flags & BPF_F_WRONLY_PROG)
1038 		return BPF_MAP_CAN_WRITE;
1039 	else
1040 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1041 }
1042 
1043 static inline bool bpf_map_flags_access_ok(u32 access_flags)
1044 {
1045 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1046 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1047 }
1048 
1049 struct bpf_event_entry {
1050 	struct perf_event *event;
1051 	struct file *perf_file;
1052 	struct file *map_file;
1053 	struct rcu_head rcu;
1054 };
1055 
1056 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1057 int bpf_prog_calc_tag(struct bpf_prog *fp);
1058 
1059 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1060 
1061 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1062 					unsigned long off, unsigned long len);
1063 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1064 					const struct bpf_insn *src,
1065 					struct bpf_insn *dst,
1066 					struct bpf_prog *prog,
1067 					u32 *target_size);
1068 
1069 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1070 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1071 
1072 /* an array of programs to be executed under rcu_lock.
1073  *
1074  * Typical usage:
1075  * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
1076  *
1077  * the structure returned by bpf_prog_array_alloc() should be populated
1078  * with program pointers and the last pointer must be NULL.
1079  * The user has to keep refcnt on the program and make sure the program
1080  * is removed from the array before bpf_prog_put().
1081  * The 'struct bpf_prog_array *' should only be replaced with xchg()
1082  * since other cpus are walking the array of pointers in parallel.
1083  */
1084 struct bpf_prog_array_item {
1085 	struct bpf_prog *prog;
1086 	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1087 };
1088 
1089 struct bpf_prog_array {
1090 	struct rcu_head rcu;
1091 	struct bpf_prog_array_item items[];
1092 };
1093 
1094 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1095 void bpf_prog_array_free(struct bpf_prog_array *progs);
1096 int bpf_prog_array_length(struct bpf_prog_array *progs);
1097 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1098 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1099 				__u32 __user *prog_ids, u32 cnt);
1100 
1101 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1102 				struct bpf_prog *old_prog);
1103 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1104 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1105 			     struct bpf_prog *prog);
1106 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1107 			     u32 *prog_ids, u32 request_cnt,
1108 			     u32 *prog_cnt);
1109 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1110 			struct bpf_prog *exclude_prog,
1111 			struct bpf_prog *include_prog,
1112 			struct bpf_prog_array **new_array);
1113 
1114 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1115 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
1116 /* BPF program asks to set CN on the packet. */
1117 #define BPF_RET_SET_CN						(1 << 0)
1118 
1119 /* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY,
1120  * if bpf_cgroup_storage_set() failed, the rest of programs
1121  * will not execute. This should be a really rare scenario
1122  * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of
1123  * preemptions all between bpf_cgroup_storage_set() and
1124  * bpf_cgroup_storage_unset() on the same cpu.
1125  */
1126 #define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags)		\
1127 	({								\
1128 		struct bpf_prog_array_item *_item;			\
1129 		struct bpf_prog *_prog;					\
1130 		struct bpf_prog_array *_array;				\
1131 		u32 _ret = 1;						\
1132 		u32 func_ret;						\
1133 		migrate_disable();					\
1134 		rcu_read_lock();					\
1135 		_array = rcu_dereference(array);			\
1136 		_item = &_array->items[0];				\
1137 		while ((_prog = READ_ONCE(_item->prog))) {		\
1138 			if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage)))	\
1139 				break;					\
1140 			func_ret = func(_prog, ctx);			\
1141 			_ret &= (func_ret & 1);				\
1142 			*(ret_flags) |= (func_ret >> 1);			\
1143 			bpf_cgroup_storage_unset();			\
1144 			_item++;					\
1145 		}							\
1146 		rcu_read_unlock();					\
1147 		migrate_enable();					\
1148 		_ret;							\
1149 	 })
1150 
1151 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage)	\
1152 	({						\
1153 		struct bpf_prog_array_item *_item;	\
1154 		struct bpf_prog *_prog;			\
1155 		struct bpf_prog_array *_array;		\
1156 		u32 _ret = 1;				\
1157 		migrate_disable();			\
1158 		rcu_read_lock();			\
1159 		_array = rcu_dereference(array);	\
1160 		if (unlikely(check_non_null && !_array))\
1161 			goto _out;			\
1162 		_item = &_array->items[0];		\
1163 		while ((_prog = READ_ONCE(_item->prog))) {		\
1164 			if (!set_cg_storage) {			\
1165 				_ret &= func(_prog, ctx);	\
1166 			} else {				\
1167 				if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage)))	\
1168 					break;			\
1169 				_ret &= func(_prog, ctx);	\
1170 				bpf_cgroup_storage_unset();	\
1171 			}				\
1172 			_item++;			\
1173 		}					\
1174 _out:							\
1175 		rcu_read_unlock();			\
1176 		migrate_enable();			\
1177 		_ret;					\
1178 	 })
1179 
1180 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
1181  * so BPF programs can request cwr for TCP packets.
1182  *
1183  * Current cgroup skb programs can only return 0 or 1 (0 to drop the
1184  * packet. This macro changes the behavior so the low order bit
1185  * indicates whether the packet should be dropped (0) or not (1)
1186  * and the next bit is a congestion notification bit. This could be
1187  * used by TCP to call tcp_enter_cwr()
1188  *
1189  * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
1190  *   0: drop packet
1191  *   1: keep packet
1192  *   2: drop packet and cn
1193  *   3: keep packet and cn
1194  *
1195  * This macro then converts it to one of the NET_XMIT or an error
1196  * code that is then interpreted as drop packet (and no cn):
1197  *   0: NET_XMIT_SUCCESS  skb should be transmitted
1198  *   1: NET_XMIT_DROP     skb should be dropped and cn
1199  *   2: NET_XMIT_CN       skb should be transmitted and cn
1200  *   3: -EPERM            skb should be dropped
1201  */
1202 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
1203 	({						\
1204 		u32 _flags = 0;				\
1205 		bool _cn;				\
1206 		u32 _ret;				\
1207 		_ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \
1208 		_cn = _flags & BPF_RET_SET_CN;		\
1209 		if (_ret)				\
1210 			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
1211 		else					\
1212 			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
1213 		_ret;					\
1214 	})
1215 
1216 #define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
1217 	__BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
1218 
1219 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
1220 	__BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
1221 
1222 #ifdef CONFIG_BPF_SYSCALL
1223 DECLARE_PER_CPU(int, bpf_prog_active);
1224 extern struct mutex bpf_stats_enabled_mutex;
1225 
1226 /*
1227  * Block execution of BPF programs attached to instrumentation (perf,
1228  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1229  * these events can happen inside a region which holds a map bucket lock
1230  * and can deadlock on it.
1231  *
1232  * Use the preemption safe inc/dec variants on RT because migrate disable
1233  * is preemptible on RT and preemption in the middle of the RMW operation
1234  * might lead to inconsistent state. Use the raw variants for non RT
1235  * kernels as migrate_disable() maps to preempt_disable() so the slightly
1236  * more expensive save operation can be avoided.
1237  */
1238 static inline void bpf_disable_instrumentation(void)
1239 {
1240 	migrate_disable();
1241 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1242 		this_cpu_inc(bpf_prog_active);
1243 	else
1244 		__this_cpu_inc(bpf_prog_active);
1245 }
1246 
1247 static inline void bpf_enable_instrumentation(void)
1248 {
1249 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
1250 		this_cpu_dec(bpf_prog_active);
1251 	else
1252 		__this_cpu_dec(bpf_prog_active);
1253 	migrate_enable();
1254 }
1255 
1256 extern const struct file_operations bpf_map_fops;
1257 extern const struct file_operations bpf_prog_fops;
1258 extern const struct file_operations bpf_iter_fops;
1259 
1260 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1261 	extern const struct bpf_prog_ops _name ## _prog_ops; \
1262 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
1263 #define BPF_MAP_TYPE(_id, _ops) \
1264 	extern const struct bpf_map_ops _ops;
1265 #define BPF_LINK_TYPE(_id, _name)
1266 #include <linux/bpf_types.h>
1267 #undef BPF_PROG_TYPE
1268 #undef BPF_MAP_TYPE
1269 #undef BPF_LINK_TYPE
1270 
1271 extern const struct bpf_prog_ops bpf_offload_prog_ops;
1272 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1273 extern const struct bpf_verifier_ops xdp_analyzer_ops;
1274 
1275 struct bpf_prog *bpf_prog_get(u32 ufd);
1276 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1277 				       bool attach_drv);
1278 void bpf_prog_add(struct bpf_prog *prog, int i);
1279 void bpf_prog_sub(struct bpf_prog *prog, int i);
1280 void bpf_prog_inc(struct bpf_prog *prog);
1281 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1282 void bpf_prog_put(struct bpf_prog *prog);
1283 
1284 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1285 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1286 
1287 struct bpf_map *bpf_map_get(u32 ufd);
1288 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1289 struct bpf_map *__bpf_map_get(struct fd f);
1290 void bpf_map_inc(struct bpf_map *map);
1291 void bpf_map_inc_with_uref(struct bpf_map *map);
1292 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
1293 void bpf_map_put_with_uref(struct bpf_map *map);
1294 void bpf_map_put(struct bpf_map *map);
1295 void *bpf_map_area_alloc(u64 size, int numa_node);
1296 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
1297 void bpf_map_area_free(void *base);
1298 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
1299 int  generic_map_lookup_batch(struct bpf_map *map,
1300 			      const union bpf_attr *attr,
1301 			      union bpf_attr __user *uattr);
1302 int  generic_map_update_batch(struct bpf_map *map,
1303 			      const union bpf_attr *attr,
1304 			      union bpf_attr __user *uattr);
1305 int  generic_map_delete_batch(struct bpf_map *map,
1306 			      const union bpf_attr *attr,
1307 			      union bpf_attr __user *uattr);
1308 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1309 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
1310 
1311 #ifdef CONFIG_MEMCG_KMEM
1312 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1313 			   int node);
1314 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
1315 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
1316 				    size_t align, gfp_t flags);
1317 #else
1318 static inline void *
1319 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
1320 		     int node)
1321 {
1322 	return kmalloc_node(size, flags, node);
1323 }
1324 
1325 static inline void *
1326 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
1327 {
1328 	return kzalloc(size, flags);
1329 }
1330 
1331 static inline void __percpu *
1332 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
1333 		     gfp_t flags)
1334 {
1335 	return __alloc_percpu_gfp(size, align, flags);
1336 }
1337 #endif
1338 
1339 extern int sysctl_unprivileged_bpf_disabled;
1340 
1341 static inline bool bpf_allow_ptr_leaks(void)
1342 {
1343 	return perfmon_capable();
1344 }
1345 
1346 static inline bool bpf_allow_uninit_stack(void)
1347 {
1348 	return perfmon_capable();
1349 }
1350 
1351 static inline bool bpf_allow_ptr_to_map_access(void)
1352 {
1353 	return perfmon_capable();
1354 }
1355 
1356 static inline bool bpf_bypass_spec_v1(void)
1357 {
1358 	return perfmon_capable();
1359 }
1360 
1361 static inline bool bpf_bypass_spec_v4(void)
1362 {
1363 	return perfmon_capable();
1364 }
1365 
1366 int bpf_map_new_fd(struct bpf_map *map, int flags);
1367 int bpf_prog_new_fd(struct bpf_prog *prog);
1368 
1369 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1370 		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
1371 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1372 int bpf_link_settle(struct bpf_link_primer *primer);
1373 void bpf_link_cleanup(struct bpf_link_primer *primer);
1374 void bpf_link_inc(struct bpf_link *link);
1375 void bpf_link_put(struct bpf_link *link);
1376 int bpf_link_new_fd(struct bpf_link *link);
1377 struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1378 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1379 
1380 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1381 int bpf_obj_get_user(const char __user *pathname, int flags);
1382 
1383 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1384 #define DEFINE_BPF_ITER_FUNC(target, args...)			\
1385 	extern int bpf_iter_ ## target(args);			\
1386 	int __init bpf_iter_ ## target(args) { return 0; }
1387 
1388 struct bpf_iter_aux_info {
1389 	struct bpf_map *map;
1390 };
1391 
1392 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1393 					union bpf_iter_link_info *linfo,
1394 					struct bpf_iter_aux_info *aux);
1395 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1396 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1397 					struct seq_file *seq);
1398 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1399 					 struct bpf_link_info *info);
1400 
1401 enum bpf_iter_feature {
1402 	BPF_ITER_RESCHED	= BIT(0),
1403 };
1404 
1405 #define BPF_ITER_CTX_ARG_MAX 2
1406 struct bpf_iter_reg {
1407 	const char *target;
1408 	bpf_iter_attach_target_t attach_target;
1409 	bpf_iter_detach_target_t detach_target;
1410 	bpf_iter_show_fdinfo_t show_fdinfo;
1411 	bpf_iter_fill_link_info_t fill_link_info;
1412 	u32 ctx_arg_info_size;
1413 	u32 feature;
1414 	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1415 	const struct bpf_iter_seq_info *seq_info;
1416 };
1417 
1418 struct bpf_iter_meta {
1419 	__bpf_md_ptr(struct seq_file *, seq);
1420 	u64 session_id;
1421 	u64 seq_num;
1422 };
1423 
1424 struct bpf_iter__bpf_map_elem {
1425 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
1426 	__bpf_md_ptr(struct bpf_map *, map);
1427 	__bpf_md_ptr(void *, key);
1428 	__bpf_md_ptr(void *, value);
1429 };
1430 
1431 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1432 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1433 bool bpf_iter_prog_supported(struct bpf_prog *prog);
1434 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
1435 int bpf_iter_new_fd(struct bpf_link *link);
1436 bool bpf_link_is_iter(struct bpf_link *link);
1437 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1438 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1439 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1440 			      struct seq_file *seq);
1441 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1442 				struct bpf_link_info *info);
1443 
1444 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
1445 				   struct bpf_func_state *caller,
1446 				   struct bpf_func_state *callee);
1447 
1448 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1449 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1450 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1451 			   u64 flags);
1452 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1453 			    u64 flags);
1454 
1455 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1456 
1457 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1458 				 void *key, void *value, u64 map_flags);
1459 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1460 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1461 				void *key, void *value, u64 map_flags);
1462 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1463 
1464 int bpf_get_file_flag(int flags);
1465 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
1466 			     size_t actual_size);
1467 
1468 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1469  * forced to use 'long' read/writes to try to atomically copy long counters.
1470  * Best-effort only.  No barriers here, since it _will_ race with concurrent
1471  * updates from BPF programs. Called from bpf syscall and mostly used with
1472  * size 8 or 16 bytes, so ask compiler to inline it.
1473  */
1474 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1475 {
1476 	const long *lsrc = src;
1477 	long *ldst = dst;
1478 
1479 	size /= sizeof(long);
1480 	while (size--)
1481 		*ldst++ = *lsrc++;
1482 }
1483 
1484 /* verify correctness of eBPF program */
1485 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
1486 
1487 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1488 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
1489 #endif
1490 
1491 struct btf *bpf_get_btf_vmlinux(void);
1492 
1493 /* Map specifics */
1494 struct xdp_buff;
1495 struct sk_buff;
1496 struct bpf_dtab_netdev;
1497 struct bpf_cpu_map_entry;
1498 
1499 void __dev_flush(void);
1500 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1501 		    struct net_device *dev_rx);
1502 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1503 		    struct net_device *dev_rx);
1504 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
1505 			  struct bpf_map *map, bool exclude_ingress);
1506 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1507 			     struct bpf_prog *xdp_prog);
1508 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1509 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
1510 			   bool exclude_ingress);
1511 bool dev_map_can_have_prog(struct bpf_map *map);
1512 
1513 void __cpu_map_flush(void);
1514 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
1515 		    struct net_device *dev_rx);
1516 bool cpu_map_prog_allowed(struct bpf_map *map);
1517 
1518 /* Return map's numa specified by userspace */
1519 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1520 {
1521 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
1522 		attr->numa_node : NUMA_NO_NODE;
1523 }
1524 
1525 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1526 int array_map_alloc_check(union bpf_attr *attr);
1527 
1528 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1529 			  union bpf_attr __user *uattr);
1530 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1531 			  union bpf_attr __user *uattr);
1532 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1533 			      const union bpf_attr *kattr,
1534 			      union bpf_attr __user *uattr);
1535 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1536 				     const union bpf_attr *kattr,
1537 				     union bpf_attr __user *uattr);
1538 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1539 			     const union bpf_attr *kattr,
1540 			     union bpf_attr __user *uattr);
1541 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1542 				const union bpf_attr *kattr,
1543 				union bpf_attr __user *uattr);
1544 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id);
1545 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1546 		    const struct bpf_prog *prog,
1547 		    struct bpf_insn_access_aux *info);
1548 int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf,
1549 		      const struct btf_type *t, int off, int size,
1550 		      enum bpf_access_type atype,
1551 		      u32 *next_btf_id);
1552 bool btf_struct_ids_match(struct bpf_verifier_log *log,
1553 			  const struct btf *btf, u32 id, int off,
1554 			  const struct btf *need_btf, u32 need_type_id);
1555 
1556 int btf_distill_func_proto(struct bpf_verifier_log *log,
1557 			   struct btf *btf,
1558 			   const struct btf_type *func_proto,
1559 			   const char *func_name,
1560 			   struct btf_func_model *m);
1561 
1562 struct bpf_reg_state;
1563 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
1564 				struct bpf_reg_state *regs);
1565 int btf_check_kfunc_arg_match(struct bpf_verifier_env *env,
1566 			      const struct btf *btf, u32 func_id,
1567 			      struct bpf_reg_state *regs);
1568 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1569 			  struct bpf_reg_state *reg);
1570 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1571 			 struct btf *btf, const struct btf_type *t);
1572 
1573 struct bpf_prog *bpf_prog_by_id(u32 id);
1574 struct bpf_link *bpf_link_by_id(u32 id);
1575 
1576 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1577 void bpf_task_storage_free(struct task_struct *task);
1578 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
1579 const struct btf_func_model *
1580 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1581 			 const struct bpf_insn *insn);
1582 #else /* !CONFIG_BPF_SYSCALL */
1583 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1584 {
1585 	return ERR_PTR(-EOPNOTSUPP);
1586 }
1587 
1588 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1589 						     enum bpf_prog_type type,
1590 						     bool attach_drv)
1591 {
1592 	return ERR_PTR(-EOPNOTSUPP);
1593 }
1594 
1595 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
1596 {
1597 }
1598 
1599 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1600 {
1601 }
1602 
1603 static inline void bpf_prog_put(struct bpf_prog *prog)
1604 {
1605 }
1606 
1607 static inline void bpf_prog_inc(struct bpf_prog *prog)
1608 {
1609 }
1610 
1611 static inline struct bpf_prog *__must_check
1612 bpf_prog_inc_not_zero(struct bpf_prog *prog)
1613 {
1614 	return ERR_PTR(-EOPNOTSUPP);
1615 }
1616 
1617 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1618 				 const struct bpf_link_ops *ops,
1619 				 struct bpf_prog *prog)
1620 {
1621 }
1622 
1623 static inline int bpf_link_prime(struct bpf_link *link,
1624 				 struct bpf_link_primer *primer)
1625 {
1626 	return -EOPNOTSUPP;
1627 }
1628 
1629 static inline int bpf_link_settle(struct bpf_link_primer *primer)
1630 {
1631 	return -EOPNOTSUPP;
1632 }
1633 
1634 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1635 {
1636 }
1637 
1638 static inline void bpf_link_inc(struct bpf_link *link)
1639 {
1640 }
1641 
1642 static inline void bpf_link_put(struct bpf_link *link)
1643 {
1644 }
1645 
1646 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1647 {
1648 	return -EOPNOTSUPP;
1649 }
1650 
1651 static inline bool dev_map_can_have_prog(struct bpf_map *map)
1652 {
1653 	return false;
1654 }
1655 
1656 static inline void __dev_flush(void)
1657 {
1658 }
1659 
1660 struct xdp_buff;
1661 struct bpf_dtab_netdev;
1662 struct bpf_cpu_map_entry;
1663 
1664 static inline
1665 int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1666 		    struct net_device *dev_rx)
1667 {
1668 	return 0;
1669 }
1670 
1671 static inline
1672 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1673 		    struct net_device *dev_rx)
1674 {
1675 	return 0;
1676 }
1677 
1678 static inline
1679 int dev_map_enqueue_multi(struct xdp_buff *xdp, struct net_device *dev_rx,
1680 			  struct bpf_map *map, bool exclude_ingress)
1681 {
1682 	return 0;
1683 }
1684 
1685 struct sk_buff;
1686 
1687 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1688 					   struct sk_buff *skb,
1689 					   struct bpf_prog *xdp_prog)
1690 {
1691 	return 0;
1692 }
1693 
1694 static inline
1695 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
1696 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
1697 			   bool exclude_ingress)
1698 {
1699 	return 0;
1700 }
1701 
1702 static inline void __cpu_map_flush(void)
1703 {
1704 }
1705 
1706 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1707 				  struct xdp_buff *xdp,
1708 				  struct net_device *dev_rx)
1709 {
1710 	return 0;
1711 }
1712 
1713 static inline bool cpu_map_prog_allowed(struct bpf_map *map)
1714 {
1715 	return false;
1716 }
1717 
1718 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1719 				enum bpf_prog_type type)
1720 {
1721 	return ERR_PTR(-EOPNOTSUPP);
1722 }
1723 
1724 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1725 					const union bpf_attr *kattr,
1726 					union bpf_attr __user *uattr)
1727 {
1728 	return -ENOTSUPP;
1729 }
1730 
1731 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1732 					const union bpf_attr *kattr,
1733 					union bpf_attr __user *uattr)
1734 {
1735 	return -ENOTSUPP;
1736 }
1737 
1738 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1739 					    const union bpf_attr *kattr,
1740 					    union bpf_attr __user *uattr)
1741 {
1742 	return -ENOTSUPP;
1743 }
1744 
1745 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1746 						   const union bpf_attr *kattr,
1747 						   union bpf_attr __user *uattr)
1748 {
1749 	return -ENOTSUPP;
1750 }
1751 
1752 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
1753 					      const union bpf_attr *kattr,
1754 					      union bpf_attr __user *uattr)
1755 {
1756 	return -ENOTSUPP;
1757 }
1758 
1759 static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
1760 {
1761 	return false;
1762 }
1763 
1764 static inline void bpf_map_put(struct bpf_map *map)
1765 {
1766 }
1767 
1768 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1769 {
1770 	return ERR_PTR(-ENOTSUPP);
1771 }
1772 
1773 static inline const struct bpf_func_proto *
1774 bpf_base_func_proto(enum bpf_func_id func_id)
1775 {
1776 	return NULL;
1777 }
1778 
1779 static inline void bpf_task_storage_free(struct task_struct *task)
1780 {
1781 }
1782 
1783 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
1784 {
1785 	return false;
1786 }
1787 
1788 static inline const struct btf_func_model *
1789 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
1790 			 const struct bpf_insn *insn)
1791 {
1792 	return NULL;
1793 }
1794 #endif /* CONFIG_BPF_SYSCALL */
1795 
1796 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
1797 			  struct btf_mod_pair *used_btfs, u32 len);
1798 
1799 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1800 						 enum bpf_prog_type type)
1801 {
1802 	return bpf_prog_get_type_dev(ufd, type, false);
1803 }
1804 
1805 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
1806 			  struct bpf_map **used_maps, u32 len);
1807 
1808 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1809 
1810 int bpf_prog_offload_compile(struct bpf_prog *prog);
1811 void bpf_prog_offload_destroy(struct bpf_prog *prog);
1812 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1813 			       struct bpf_prog *prog);
1814 
1815 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1816 
1817 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1818 int bpf_map_offload_update_elem(struct bpf_map *map,
1819 				void *key, void *value, u64 flags);
1820 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1821 int bpf_map_offload_get_next_key(struct bpf_map *map,
1822 				 void *key, void *next_key);
1823 
1824 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1825 
1826 struct bpf_offload_dev *
1827 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1828 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1829 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1830 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1831 				    struct net_device *netdev);
1832 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1833 				       struct net_device *netdev);
1834 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1835 
1836 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1837 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1838 
1839 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1840 {
1841 	return aux->offload_requested;
1842 }
1843 
1844 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1845 {
1846 	return unlikely(map->ops == &bpf_map_offload_ops);
1847 }
1848 
1849 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1850 void bpf_map_offload_map_free(struct bpf_map *map);
1851 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1852 			      const union bpf_attr *kattr,
1853 			      union bpf_attr __user *uattr);
1854 #else
1855 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1856 					union bpf_attr *attr)
1857 {
1858 	return -EOPNOTSUPP;
1859 }
1860 
1861 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1862 {
1863 	return false;
1864 }
1865 
1866 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1867 {
1868 	return false;
1869 }
1870 
1871 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1872 {
1873 	return ERR_PTR(-EOPNOTSUPP);
1874 }
1875 
1876 static inline void bpf_map_offload_map_free(struct bpf_map *map)
1877 {
1878 }
1879 
1880 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1881 					    const union bpf_attr *kattr,
1882 					    union bpf_attr __user *uattr)
1883 {
1884 	return -ENOTSUPP;
1885 }
1886 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1887 
1888 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1889 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1890 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
1891 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
1892 void sock_map_unhash(struct sock *sk);
1893 void sock_map_close(struct sock *sk, long timeout);
1894 
1895 void bpf_sk_reuseport_detach(struct sock *sk);
1896 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1897 				       void *value);
1898 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1899 				       void *value, u64 map_flags);
1900 #else
1901 static inline void bpf_sk_reuseport_detach(struct sock *sk)
1902 {
1903 }
1904 
1905 #ifdef CONFIG_BPF_SYSCALL
1906 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1907 				       struct bpf_prog *prog)
1908 {
1909 	return -EINVAL;
1910 }
1911 
1912 static inline int sock_map_prog_detach(const union bpf_attr *attr,
1913 				       enum bpf_prog_type ptype)
1914 {
1915 	return -EOPNOTSUPP;
1916 }
1917 
1918 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
1919 					   u64 flags)
1920 {
1921 	return -EOPNOTSUPP;
1922 }
1923 
1924 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1925 						     void *key, void *value)
1926 {
1927 	return -EOPNOTSUPP;
1928 }
1929 
1930 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1931 						     void *key, void *value,
1932 						     u64 map_flags)
1933 {
1934 	return -EOPNOTSUPP;
1935 }
1936 #endif /* CONFIG_BPF_SYSCALL */
1937 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1938 
1939 /* verifier prototypes for helper functions called from eBPF programs */
1940 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1941 extern const struct bpf_func_proto bpf_map_update_elem_proto;
1942 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
1943 extern const struct bpf_func_proto bpf_map_push_elem_proto;
1944 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1945 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1946 
1947 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1948 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1949 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1950 extern const struct bpf_func_proto bpf_tail_call_proto;
1951 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1952 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
1953 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1954 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1955 extern const struct bpf_func_proto bpf_get_current_comm_proto;
1956 extern const struct bpf_func_proto bpf_get_stackid_proto;
1957 extern const struct bpf_func_proto bpf_get_stack_proto;
1958 extern const struct bpf_func_proto bpf_get_task_stack_proto;
1959 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
1960 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
1961 extern const struct bpf_func_proto bpf_sock_map_update_proto;
1962 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1963 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1964 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
1965 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1966 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1967 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1968 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1969 extern const struct bpf_func_proto bpf_spin_lock_proto;
1970 extern const struct bpf_func_proto bpf_spin_unlock_proto;
1971 extern const struct bpf_func_proto bpf_get_local_storage_proto;
1972 extern const struct bpf_func_proto bpf_strtol_proto;
1973 extern const struct bpf_func_proto bpf_strtoul_proto;
1974 extern const struct bpf_func_proto bpf_tcp_sock_proto;
1975 extern const struct bpf_func_proto bpf_jiffies64_proto;
1976 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
1977 extern const struct bpf_func_proto bpf_event_output_data_proto;
1978 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
1979 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
1980 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
1981 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
1982 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
1983 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
1984 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
1985 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
1986 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
1987 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
1988 extern const struct bpf_func_proto bpf_copy_from_user_proto;
1989 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
1990 extern const struct bpf_func_proto bpf_snprintf_proto;
1991 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
1992 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
1993 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
1994 extern const struct bpf_func_proto bpf_sock_from_file_proto;
1995 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
1996 extern const struct bpf_func_proto bpf_task_storage_get_proto;
1997 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
1998 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
1999 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
2000 
2001 const struct bpf_func_proto *bpf_tracing_func_proto(
2002 	enum bpf_func_id func_id, const struct bpf_prog *prog);
2003 
2004 const struct bpf_func_proto *tracing_prog_func_proto(
2005   enum bpf_func_id func_id, const struct bpf_prog *prog);
2006 
2007 /* Shared helpers among cBPF and eBPF. */
2008 void bpf_user_rnd_init_once(void);
2009 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2010 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
2011 
2012 #if defined(CONFIG_NET)
2013 bool bpf_sock_common_is_valid_access(int off, int size,
2014 				     enum bpf_access_type type,
2015 				     struct bpf_insn_access_aux *info);
2016 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2017 			      struct bpf_insn_access_aux *info);
2018 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2019 				const struct bpf_insn *si,
2020 				struct bpf_insn *insn_buf,
2021 				struct bpf_prog *prog,
2022 				u32 *target_size);
2023 #else
2024 static inline bool bpf_sock_common_is_valid_access(int off, int size,
2025 						   enum bpf_access_type type,
2026 						   struct bpf_insn_access_aux *info)
2027 {
2028 	return false;
2029 }
2030 static inline bool bpf_sock_is_valid_access(int off, int size,
2031 					    enum bpf_access_type type,
2032 					    struct bpf_insn_access_aux *info)
2033 {
2034 	return false;
2035 }
2036 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
2037 					      const struct bpf_insn *si,
2038 					      struct bpf_insn *insn_buf,
2039 					      struct bpf_prog *prog,
2040 					      u32 *target_size)
2041 {
2042 	return 0;
2043 }
2044 #endif
2045 
2046 #ifdef CONFIG_INET
2047 struct sk_reuseport_kern {
2048 	struct sk_buff *skb;
2049 	struct sock *sk;
2050 	struct sock *selected_sk;
2051 	struct sock *migrating_sk;
2052 	void *data_end;
2053 	u32 hash;
2054 	u32 reuseport_id;
2055 	bool bind_inany;
2056 };
2057 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2058 				  struct bpf_insn_access_aux *info);
2059 
2060 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2061 				    const struct bpf_insn *si,
2062 				    struct bpf_insn *insn_buf,
2063 				    struct bpf_prog *prog,
2064 				    u32 *target_size);
2065 
2066 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
2067 				  struct bpf_insn_access_aux *info);
2068 
2069 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2070 				    const struct bpf_insn *si,
2071 				    struct bpf_insn *insn_buf,
2072 				    struct bpf_prog *prog,
2073 				    u32 *target_size);
2074 #else
2075 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
2076 						enum bpf_access_type type,
2077 						struct bpf_insn_access_aux *info)
2078 {
2079 	return false;
2080 }
2081 
2082 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
2083 						  const struct bpf_insn *si,
2084 						  struct bpf_insn *insn_buf,
2085 						  struct bpf_prog *prog,
2086 						  u32 *target_size)
2087 {
2088 	return 0;
2089 }
2090 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
2091 						enum bpf_access_type type,
2092 						struct bpf_insn_access_aux *info)
2093 {
2094 	return false;
2095 }
2096 
2097 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
2098 						  const struct bpf_insn *si,
2099 						  struct bpf_insn *insn_buf,
2100 						  struct bpf_prog *prog,
2101 						  u32 *target_size)
2102 {
2103 	return 0;
2104 }
2105 #endif /* CONFIG_INET */
2106 
2107 enum bpf_text_poke_type {
2108 	BPF_MOD_CALL,
2109 	BPF_MOD_JUMP,
2110 };
2111 
2112 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2113 		       void *addr1, void *addr2);
2114 
2115 struct btf_id_set;
2116 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
2117 
2118 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
2119 			u32 **bin_buf, u32 num_args);
2120 void bpf_bprintf_cleanup(void);
2121 
2122 #endif /* _LINUX_BPF_H */
2123