xref: /linux-6.15/include/linux/bpf.h (revision f5ae2ea6)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 
9 #include <linux/workqueue.h>
10 #include <linux/file.h>
11 #include <linux/percpu.h>
12 #include <linux/err.h>
13 #include <linux/rbtree_latch.h>
14 #include <linux/numa.h>
15 #include <linux/mm_types.h>
16 #include <linux/wait.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 
21 struct bpf_verifier_env;
22 struct bpf_verifier_log;
23 struct perf_event;
24 struct bpf_prog;
25 struct bpf_prog_aux;
26 struct bpf_map;
27 struct sock;
28 struct seq_file;
29 struct btf;
30 struct btf_type;
31 struct exception_table_entry;
32 
33 extern struct idr btf_idr;
34 extern spinlock_t btf_idr_lock;
35 
36 /* map is generic key/value storage optionally accesible by eBPF programs */
37 struct bpf_map_ops {
38 	/* funcs callable from userspace (via syscall) */
39 	int (*map_alloc_check)(union bpf_attr *attr);
40 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
41 	void (*map_release)(struct bpf_map *map, struct file *map_file);
42 	void (*map_free)(struct bpf_map *map);
43 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
44 	void (*map_release_uref)(struct bpf_map *map);
45 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
46 
47 	/* funcs callable from userspace and from eBPF programs */
48 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
49 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
50 	int (*map_delete_elem)(struct bpf_map *map, void *key);
51 	int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
52 	int (*map_pop_elem)(struct bpf_map *map, void *value);
53 	int (*map_peek_elem)(struct bpf_map *map, void *value);
54 
55 	/* funcs called by prog_array and perf_event_array map */
56 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
57 				int fd);
58 	void (*map_fd_put_ptr)(void *ptr);
59 	u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
60 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
61 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
62 				  struct seq_file *m);
63 	int (*map_check_btf)(const struct bpf_map *map,
64 			     const struct btf *btf,
65 			     const struct btf_type *key_type,
66 			     const struct btf_type *value_type);
67 
68 	/* Prog poke tracking helpers. */
69 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
70 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
71 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
72 			     struct bpf_prog *new);
73 
74 	/* Direct value access helpers. */
75 	int (*map_direct_value_addr)(const struct bpf_map *map,
76 				     u64 *imm, u32 off);
77 	int (*map_direct_value_meta)(const struct bpf_map *map,
78 				     u64 imm, u32 *off);
79 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
80 };
81 
82 struct bpf_map_memory {
83 	u32 pages;
84 	struct user_struct *user;
85 };
86 
87 struct bpf_map {
88 	/* The first two cachelines with read-mostly members of which some
89 	 * are also accessed in fast-path (e.g. ops, max_entries).
90 	 */
91 	const struct bpf_map_ops *ops ____cacheline_aligned;
92 	struct bpf_map *inner_map_meta;
93 #ifdef CONFIG_SECURITY
94 	void *security;
95 #endif
96 	enum bpf_map_type map_type;
97 	u32 key_size;
98 	u32 value_size;
99 	u32 max_entries;
100 	u32 map_flags;
101 	int spin_lock_off; /* >=0 valid offset, <0 error */
102 	u32 id;
103 	int numa_node;
104 	u32 btf_key_type_id;
105 	u32 btf_value_type_id;
106 	struct btf *btf;
107 	struct bpf_map_memory memory;
108 	char name[BPF_OBJ_NAME_LEN];
109 	bool unpriv_array;
110 	bool frozen; /* write-once; write-protected by freeze_mutex */
111 	/* 22 bytes hole */
112 
113 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
114 	 * particularly with refcounting.
115 	 */
116 	atomic64_t refcnt ____cacheline_aligned;
117 	atomic64_t usercnt;
118 	struct work_struct work;
119 	struct mutex freeze_mutex;
120 	u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
121 };
122 
123 static inline bool map_value_has_spin_lock(const struct bpf_map *map)
124 {
125 	return map->spin_lock_off >= 0;
126 }
127 
128 static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
129 {
130 	if (likely(!map_value_has_spin_lock(map)))
131 		return;
132 	*(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
133 		(struct bpf_spin_lock){};
134 }
135 
136 /* copy everything but bpf_spin_lock */
137 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
138 {
139 	if (unlikely(map_value_has_spin_lock(map))) {
140 		u32 off = map->spin_lock_off;
141 
142 		memcpy(dst, src, off);
143 		memcpy(dst + off + sizeof(struct bpf_spin_lock),
144 		       src + off + sizeof(struct bpf_spin_lock),
145 		       map->value_size - off - sizeof(struct bpf_spin_lock));
146 	} else {
147 		memcpy(dst, src, map->value_size);
148 	}
149 }
150 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
151 			   bool lock_src);
152 
153 struct bpf_offload_dev;
154 struct bpf_offloaded_map;
155 
156 struct bpf_map_dev_ops {
157 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
158 				void *key, void *next_key);
159 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
160 			       void *key, void *value);
161 	int (*map_update_elem)(struct bpf_offloaded_map *map,
162 			       void *key, void *value, u64 flags);
163 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
164 };
165 
166 struct bpf_offloaded_map {
167 	struct bpf_map map;
168 	struct net_device *netdev;
169 	const struct bpf_map_dev_ops *dev_ops;
170 	void *dev_priv;
171 	struct list_head offloads;
172 };
173 
174 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
175 {
176 	return container_of(map, struct bpf_offloaded_map, map);
177 }
178 
179 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
180 {
181 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
182 }
183 
184 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
185 {
186 	return map->btf && map->ops->map_seq_show_elem;
187 }
188 
189 int map_check_no_btf(const struct bpf_map *map,
190 		     const struct btf *btf,
191 		     const struct btf_type *key_type,
192 		     const struct btf_type *value_type);
193 
194 extern const struct bpf_map_ops bpf_map_offload_ops;
195 
196 /* function argument constraints */
197 enum bpf_arg_type {
198 	ARG_DONTCARE = 0,	/* unused argument in helper function */
199 
200 	/* the following constraints used to prototype
201 	 * bpf_map_lookup/update/delete_elem() functions
202 	 */
203 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
204 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
205 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
206 	ARG_PTR_TO_UNINIT_MAP_VALUE,	/* pointer to valid memory used to store a map value */
207 	ARG_PTR_TO_MAP_VALUE_OR_NULL,	/* pointer to stack used as map value or NULL */
208 
209 	/* the following constraints used to prototype bpf_memcmp() and other
210 	 * functions that access data on eBPF program stack
211 	 */
212 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
213 	ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
214 	ARG_PTR_TO_UNINIT_MEM,	/* pointer to memory does not need to be initialized,
215 				 * helper function must fill all bytes or clear
216 				 * them in error case.
217 				 */
218 
219 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
220 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
221 
222 	ARG_PTR_TO_CTX,		/* pointer to context */
223 	ARG_ANYTHING,		/* any (initialized) argument is ok */
224 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
225 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
226 	ARG_PTR_TO_INT,		/* pointer to int */
227 	ARG_PTR_TO_LONG,	/* pointer to long */
228 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
229 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
230 };
231 
232 /* type of values returned from helper functions */
233 enum bpf_return_type {
234 	RET_INTEGER,			/* function returns integer */
235 	RET_VOID,			/* function doesn't return anything */
236 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
237 	RET_PTR_TO_MAP_VALUE_OR_NULL,	/* returns a pointer to map elem value or NULL */
238 	RET_PTR_TO_SOCKET_OR_NULL,	/* returns a pointer to a socket or NULL */
239 	RET_PTR_TO_TCP_SOCK_OR_NULL,	/* returns a pointer to a tcp_sock or NULL */
240 	RET_PTR_TO_SOCK_COMMON_OR_NULL,	/* returns a pointer to a sock_common or NULL */
241 };
242 
243 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
244  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
245  * instructions after verifying
246  */
247 struct bpf_func_proto {
248 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
249 	bool gpl_only;
250 	bool pkt_access;
251 	enum bpf_return_type ret_type;
252 	union {
253 		struct {
254 			enum bpf_arg_type arg1_type;
255 			enum bpf_arg_type arg2_type;
256 			enum bpf_arg_type arg3_type;
257 			enum bpf_arg_type arg4_type;
258 			enum bpf_arg_type arg5_type;
259 		};
260 		enum bpf_arg_type arg_type[5];
261 	};
262 	int *btf_id; /* BTF ids of arguments */
263 };
264 
265 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
266  * the first argument to eBPF programs.
267  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
268  */
269 struct bpf_context;
270 
271 enum bpf_access_type {
272 	BPF_READ = 1,
273 	BPF_WRITE = 2
274 };
275 
276 /* types of values stored in eBPF registers */
277 /* Pointer types represent:
278  * pointer
279  * pointer + imm
280  * pointer + (u16) var
281  * pointer + (u16) var + imm
282  * if (range > 0) then [ptr, ptr + range - off) is safe to access
283  * if (id > 0) means that some 'var' was added
284  * if (off > 0) means that 'imm' was added
285  */
286 enum bpf_reg_type {
287 	NOT_INIT = 0,		 /* nothing was written into register */
288 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
289 	PTR_TO_CTX,		 /* reg points to bpf_context */
290 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
291 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
292 	PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
293 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
294 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
295 	PTR_TO_PACKET,		 /* reg points to skb->data */
296 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
297 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
298 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
299 	PTR_TO_SOCKET_OR_NULL,	 /* reg points to struct bpf_sock or NULL */
300 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
301 	PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
302 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
303 	PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
304 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
305 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
306 	PTR_TO_BTF_ID,		 /* reg points to kernel struct */
307 };
308 
309 /* The information passed from prog-specific *_is_valid_access
310  * back to the verifier.
311  */
312 struct bpf_insn_access_aux {
313 	enum bpf_reg_type reg_type;
314 	union {
315 		int ctx_field_size;
316 		u32 btf_id;
317 	};
318 	struct bpf_verifier_log *log; /* for verbose logs */
319 };
320 
321 static inline void
322 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
323 {
324 	aux->ctx_field_size = size;
325 }
326 
327 struct bpf_prog_ops {
328 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
329 			union bpf_attr __user *uattr);
330 };
331 
332 struct bpf_verifier_ops {
333 	/* return eBPF function prototype for verification */
334 	const struct bpf_func_proto *
335 	(*get_func_proto)(enum bpf_func_id func_id,
336 			  const struct bpf_prog *prog);
337 
338 	/* return true if 'size' wide access at offset 'off' within bpf_context
339 	 * with 'type' (read or write) is allowed
340 	 */
341 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
342 				const struct bpf_prog *prog,
343 				struct bpf_insn_access_aux *info);
344 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
345 			    const struct bpf_prog *prog);
346 	int (*gen_ld_abs)(const struct bpf_insn *orig,
347 			  struct bpf_insn *insn_buf);
348 	u32 (*convert_ctx_access)(enum bpf_access_type type,
349 				  const struct bpf_insn *src,
350 				  struct bpf_insn *dst,
351 				  struct bpf_prog *prog, u32 *target_size);
352 };
353 
354 struct bpf_prog_offload_ops {
355 	/* verifier basic callbacks */
356 	int (*insn_hook)(struct bpf_verifier_env *env,
357 			 int insn_idx, int prev_insn_idx);
358 	int (*finalize)(struct bpf_verifier_env *env);
359 	/* verifier optimization callbacks (called after .finalize) */
360 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
361 			    struct bpf_insn *insn);
362 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
363 	/* program management callbacks */
364 	int (*prepare)(struct bpf_prog *prog);
365 	int (*translate)(struct bpf_prog *prog);
366 	void (*destroy)(struct bpf_prog *prog);
367 };
368 
369 struct bpf_prog_offload {
370 	struct bpf_prog		*prog;
371 	struct net_device	*netdev;
372 	struct bpf_offload_dev	*offdev;
373 	void			*dev_priv;
374 	struct list_head	offloads;
375 	bool			dev_state;
376 	bool			opt_failed;
377 	void			*jited_image;
378 	u32			jited_len;
379 };
380 
381 enum bpf_cgroup_storage_type {
382 	BPF_CGROUP_STORAGE_SHARED,
383 	BPF_CGROUP_STORAGE_PERCPU,
384 	__BPF_CGROUP_STORAGE_MAX
385 };
386 
387 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
388 
389 /* The longest tracepoint has 12 args.
390  * See include/trace/bpf_probe.h
391  */
392 #define MAX_BPF_FUNC_ARGS 12
393 
394 struct bpf_prog_stats {
395 	u64 cnt;
396 	u64 nsecs;
397 	struct u64_stats_sync syncp;
398 } __aligned(2 * sizeof(u64));
399 
400 struct btf_func_model {
401 	u8 ret_size;
402 	u8 nr_args;
403 	u8 arg_size[MAX_BPF_FUNC_ARGS];
404 };
405 
406 /* Restore arguments before returning from trampoline to let original function
407  * continue executing. This flag is used for fentry progs when there are no
408  * fexit progs.
409  */
410 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
411 /* Call original function after fentry progs, but before fexit progs.
412  * Makes sense for fentry/fexit, normal calls and indirect calls.
413  */
414 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
415 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
416  * programs only. Should not be used with normal calls and indirect calls.
417  */
418 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
419 
420 /* Different use cases for BPF trampoline:
421  * 1. replace nop at the function entry (kprobe equivalent)
422  *    flags = BPF_TRAMP_F_RESTORE_REGS
423  *    fentry = a set of programs to run before returning from trampoline
424  *
425  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
426  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
427  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
428  *    fentry = a set of program to run before calling original function
429  *    fexit = a set of program to run after original function
430  *
431  * 3. replace direct call instruction anywhere in the function body
432  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
433  *    With flags = 0
434  *      fentry = a set of programs to run before returning from trampoline
435  *    With flags = BPF_TRAMP_F_CALL_ORIG
436  *      orig_call = original callback addr or direct function addr
437  *      fentry = a set of program to run before calling original function
438  *      fexit = a set of program to run after original function
439  */
440 int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
441 				struct bpf_prog **fentry_progs, int fentry_cnt,
442 				struct bpf_prog **fexit_progs, int fexit_cnt,
443 				void *orig_call);
444 /* these two functions are called from generated trampoline */
445 u64 notrace __bpf_prog_enter(void);
446 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
447 
448 enum bpf_tramp_prog_type {
449 	BPF_TRAMP_FENTRY,
450 	BPF_TRAMP_FEXIT,
451 	BPF_TRAMP_MAX
452 };
453 
454 struct bpf_trampoline {
455 	/* hlist for trampoline_table */
456 	struct hlist_node hlist;
457 	/* serializes access to fields of this trampoline */
458 	struct mutex mutex;
459 	refcount_t refcnt;
460 	u64 key;
461 	struct {
462 		struct btf_func_model model;
463 		void *addr;
464 		bool ftrace_managed;
465 	} func;
466 	/* list of BPF programs using this trampoline */
467 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
468 	/* Number of attached programs. A counter per kind. */
469 	int progs_cnt[BPF_TRAMP_MAX];
470 	/* Executable image of trampoline */
471 	void *image;
472 	u64 selector;
473 };
474 #ifdef CONFIG_BPF_JIT
475 struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
476 int bpf_trampoline_link_prog(struct bpf_prog *prog);
477 int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
478 void bpf_trampoline_put(struct bpf_trampoline *tr);
479 #else
480 static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
481 {
482 	return NULL;
483 }
484 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
485 {
486 	return -ENOTSUPP;
487 }
488 static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
489 {
490 	return -ENOTSUPP;
491 }
492 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
493 #endif
494 
495 struct bpf_func_info_aux {
496 	bool unreliable;
497 };
498 
499 enum bpf_jit_poke_reason {
500 	BPF_POKE_REASON_TAIL_CALL,
501 };
502 
503 /* Descriptor of pokes pointing /into/ the JITed image. */
504 struct bpf_jit_poke_descriptor {
505 	void *ip;
506 	union {
507 		struct {
508 			struct bpf_map *map;
509 			u32 key;
510 		} tail_call;
511 	};
512 	bool ip_stable;
513 	u8 adj_off;
514 	u16 reason;
515 };
516 
517 struct bpf_prog_aux {
518 	atomic64_t refcnt;
519 	u32 used_map_cnt;
520 	u32 max_ctx_offset;
521 	u32 max_pkt_offset;
522 	u32 max_tp_access;
523 	u32 stack_depth;
524 	u32 id;
525 	u32 func_cnt; /* used by non-func prog as the number of func progs */
526 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
527 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
528 	struct bpf_prog *linked_prog;
529 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
530 	bool offload_requested;
531 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
532 	bool func_proto_unreliable;
533 	enum bpf_tramp_prog_type trampoline_prog_type;
534 	struct bpf_trampoline *trampoline;
535 	struct hlist_node tramp_hlist;
536 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
537 	const struct btf_type *attach_func_proto;
538 	/* function name for valid attach_btf_id */
539 	const char *attach_func_name;
540 	struct bpf_prog **func;
541 	void *jit_data; /* JIT specific data. arch dependent */
542 	struct bpf_jit_poke_descriptor *poke_tab;
543 	u32 size_poke_tab;
544 	struct latch_tree_node ksym_tnode;
545 	struct list_head ksym_lnode;
546 	const struct bpf_prog_ops *ops;
547 	struct bpf_map **used_maps;
548 	struct bpf_prog *prog;
549 	struct user_struct *user;
550 	u64 load_time; /* ns since boottime */
551 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
552 	char name[BPF_OBJ_NAME_LEN];
553 #ifdef CONFIG_SECURITY
554 	void *security;
555 #endif
556 	struct bpf_prog_offload *offload;
557 	struct btf *btf;
558 	struct bpf_func_info *func_info;
559 	struct bpf_func_info_aux *func_info_aux;
560 	/* bpf_line_info loaded from userspace.  linfo->insn_off
561 	 * has the xlated insn offset.
562 	 * Both the main and sub prog share the same linfo.
563 	 * The subprog can access its first linfo by
564 	 * using the linfo_idx.
565 	 */
566 	struct bpf_line_info *linfo;
567 	/* jited_linfo is the jited addr of the linfo.  It has a
568 	 * one to one mapping to linfo:
569 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
570 	 * Both the main and sub prog share the same jited_linfo.
571 	 * The subprog can access its first jited_linfo by
572 	 * using the linfo_idx.
573 	 */
574 	void **jited_linfo;
575 	u32 func_info_cnt;
576 	u32 nr_linfo;
577 	/* subprog can use linfo_idx to access its first linfo and
578 	 * jited_linfo.
579 	 * main prog always has linfo_idx == 0
580 	 */
581 	u32 linfo_idx;
582 	u32 num_exentries;
583 	struct exception_table_entry *extable;
584 	struct bpf_prog_stats __percpu *stats;
585 	union {
586 		struct work_struct work;
587 		struct rcu_head	rcu;
588 	};
589 };
590 
591 struct bpf_array_aux {
592 	/* 'Ownership' of prog array is claimed by the first program that
593 	 * is going to use this map or by the first program which FD is
594 	 * stored in the map to make sure that all callers and callees have
595 	 * the same prog type and JITed flag.
596 	 */
597 	enum bpf_prog_type type;
598 	bool jited;
599 	/* Programs with direct jumps into programs part of this array. */
600 	struct list_head poke_progs;
601 	struct bpf_map *map;
602 	struct mutex poke_mutex;
603 	struct work_struct work;
604 };
605 
606 struct bpf_array {
607 	struct bpf_map map;
608 	u32 elem_size;
609 	u32 index_mask;
610 	struct bpf_array_aux *aux;
611 	union {
612 		char value[0] __aligned(8);
613 		void *ptrs[0] __aligned(8);
614 		void __percpu *pptrs[0] __aligned(8);
615 	};
616 };
617 
618 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
619 #define MAX_TAIL_CALL_CNT 32
620 
621 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
622 				 BPF_F_RDONLY_PROG |	\
623 				 BPF_F_WRONLY |		\
624 				 BPF_F_WRONLY_PROG)
625 
626 #define BPF_MAP_CAN_READ	BIT(0)
627 #define BPF_MAP_CAN_WRITE	BIT(1)
628 
629 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
630 {
631 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
632 
633 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
634 	 * not possible.
635 	 */
636 	if (access_flags & BPF_F_RDONLY_PROG)
637 		return BPF_MAP_CAN_READ;
638 	else if (access_flags & BPF_F_WRONLY_PROG)
639 		return BPF_MAP_CAN_WRITE;
640 	else
641 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
642 }
643 
644 static inline bool bpf_map_flags_access_ok(u32 access_flags)
645 {
646 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
647 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
648 }
649 
650 struct bpf_event_entry {
651 	struct perf_event *event;
652 	struct file *perf_file;
653 	struct file *map_file;
654 	struct rcu_head rcu;
655 };
656 
657 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
658 int bpf_prog_calc_tag(struct bpf_prog *fp);
659 const char *kernel_type_name(u32 btf_type_id);
660 
661 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
662 
663 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
664 					unsigned long off, unsigned long len);
665 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
666 					const struct bpf_insn *src,
667 					struct bpf_insn *dst,
668 					struct bpf_prog *prog,
669 					u32 *target_size);
670 
671 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
672 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
673 
674 /* an array of programs to be executed under rcu_lock.
675  *
676  * Typical usage:
677  * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
678  *
679  * the structure returned by bpf_prog_array_alloc() should be populated
680  * with program pointers and the last pointer must be NULL.
681  * The user has to keep refcnt on the program and make sure the program
682  * is removed from the array before bpf_prog_put().
683  * The 'struct bpf_prog_array *' should only be replaced with xchg()
684  * since other cpus are walking the array of pointers in parallel.
685  */
686 struct bpf_prog_array_item {
687 	struct bpf_prog *prog;
688 	struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
689 };
690 
691 struct bpf_prog_array {
692 	struct rcu_head rcu;
693 	struct bpf_prog_array_item items[0];
694 };
695 
696 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
697 void bpf_prog_array_free(struct bpf_prog_array *progs);
698 int bpf_prog_array_length(struct bpf_prog_array *progs);
699 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
700 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
701 				__u32 __user *prog_ids, u32 cnt);
702 
703 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
704 				struct bpf_prog *old_prog);
705 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
706 			     u32 *prog_ids, u32 request_cnt,
707 			     u32 *prog_cnt);
708 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
709 			struct bpf_prog *exclude_prog,
710 			struct bpf_prog *include_prog,
711 			struct bpf_prog_array **new_array);
712 
713 #define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null)	\
714 	({						\
715 		struct bpf_prog_array_item *_item;	\
716 		struct bpf_prog *_prog;			\
717 		struct bpf_prog_array *_array;		\
718 		u32 _ret = 1;				\
719 		preempt_disable();			\
720 		rcu_read_lock();			\
721 		_array = rcu_dereference(array);	\
722 		if (unlikely(check_non_null && !_array))\
723 			goto _out;			\
724 		_item = &_array->items[0];		\
725 		while ((_prog = READ_ONCE(_item->prog))) {		\
726 			bpf_cgroup_storage_set(_item->cgroup_storage);	\
727 			_ret &= func(_prog, ctx);	\
728 			_item++;			\
729 		}					\
730 _out:							\
731 		rcu_read_unlock();			\
732 		preempt_enable();			\
733 		_ret;					\
734 	 })
735 
736 /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
737  * so BPF programs can request cwr for TCP packets.
738  *
739  * Current cgroup skb programs can only return 0 or 1 (0 to drop the
740  * packet. This macro changes the behavior so the low order bit
741  * indicates whether the packet should be dropped (0) or not (1)
742  * and the next bit is a congestion notification bit. This could be
743  * used by TCP to call tcp_enter_cwr()
744  *
745  * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
746  *   0: drop packet
747  *   1: keep packet
748  *   2: drop packet and cn
749  *   3: keep packet and cn
750  *
751  * This macro then converts it to one of the NET_XMIT or an error
752  * code that is then interpreted as drop packet (and no cn):
753  *   0: NET_XMIT_SUCCESS  skb should be transmitted
754  *   1: NET_XMIT_DROP     skb should be dropped and cn
755  *   2: NET_XMIT_CN       skb should be transmitted and cn
756  *   3: -EPERM            skb should be dropped
757  */
758 #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func)		\
759 	({						\
760 		struct bpf_prog_array_item *_item;	\
761 		struct bpf_prog *_prog;			\
762 		struct bpf_prog_array *_array;		\
763 		u32 ret;				\
764 		u32 _ret = 1;				\
765 		u32 _cn = 0;				\
766 		preempt_disable();			\
767 		rcu_read_lock();			\
768 		_array = rcu_dereference(array);	\
769 		_item = &_array->items[0];		\
770 		while ((_prog = READ_ONCE(_item->prog))) {		\
771 			bpf_cgroup_storage_set(_item->cgroup_storage);	\
772 			ret = func(_prog, ctx);		\
773 			_ret &= (ret & 1);		\
774 			_cn |= (ret & 2);		\
775 			_item++;			\
776 		}					\
777 		rcu_read_unlock();			\
778 		preempt_enable();			\
779 		if (_ret)				\
780 			_ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS);	\
781 		else					\
782 			_ret = (_cn ? NET_XMIT_DROP : -EPERM);		\
783 		_ret;					\
784 	})
785 
786 #define BPF_PROG_RUN_ARRAY(array, ctx, func)		\
787 	__BPF_PROG_RUN_ARRAY(array, ctx, func, false)
788 
789 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)	\
790 	__BPF_PROG_RUN_ARRAY(array, ctx, func, true)
791 
792 #ifdef CONFIG_BPF_SYSCALL
793 DECLARE_PER_CPU(int, bpf_prog_active);
794 
795 extern const struct file_operations bpf_map_fops;
796 extern const struct file_operations bpf_prog_fops;
797 
798 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
799 	extern const struct bpf_prog_ops _name ## _prog_ops; \
800 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
801 #define BPF_MAP_TYPE(_id, _ops) \
802 	extern const struct bpf_map_ops _ops;
803 #include <linux/bpf_types.h>
804 #undef BPF_PROG_TYPE
805 #undef BPF_MAP_TYPE
806 
807 extern const struct bpf_prog_ops bpf_offload_prog_ops;
808 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
809 extern const struct bpf_verifier_ops xdp_analyzer_ops;
810 
811 struct bpf_prog *bpf_prog_get(u32 ufd);
812 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
813 				       bool attach_drv);
814 void bpf_prog_add(struct bpf_prog *prog, int i);
815 void bpf_prog_sub(struct bpf_prog *prog, int i);
816 void bpf_prog_inc(struct bpf_prog *prog);
817 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
818 void bpf_prog_put(struct bpf_prog *prog);
819 int __bpf_prog_charge(struct user_struct *user, u32 pages);
820 void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
821 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
822 			  struct bpf_map **used_maps, u32 len);
823 
824 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
825 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
826 
827 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
828 struct bpf_map *__bpf_map_get(struct fd f);
829 void bpf_map_inc(struct bpf_map *map);
830 void bpf_map_inc_with_uref(struct bpf_map *map);
831 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
832 void bpf_map_put_with_uref(struct bpf_map *map);
833 void bpf_map_put(struct bpf_map *map);
834 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
835 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
836 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
837 void bpf_map_charge_finish(struct bpf_map_memory *mem);
838 void bpf_map_charge_move(struct bpf_map_memory *dst,
839 			 struct bpf_map_memory *src);
840 void *bpf_map_area_alloc(u64 size, int numa_node);
841 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
842 void bpf_map_area_free(void *base);
843 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
844 
845 extern int sysctl_unprivileged_bpf_disabled;
846 
847 int bpf_map_new_fd(struct bpf_map *map, int flags);
848 int bpf_prog_new_fd(struct bpf_prog *prog);
849 
850 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
851 int bpf_obj_get_user(const char __user *pathname, int flags);
852 
853 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
854 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
855 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
856 			   u64 flags);
857 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
858 			    u64 flags);
859 
860 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
861 
862 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
863 				 void *key, void *value, u64 map_flags);
864 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
865 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
866 				void *key, void *value, u64 map_flags);
867 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
868 
869 int bpf_get_file_flag(int flags);
870 int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
871 			     size_t actual_size);
872 
873 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
874  * forced to use 'long' read/writes to try to atomically copy long counters.
875  * Best-effort only.  No barriers here, since it _will_ race with concurrent
876  * updates from BPF programs. Called from bpf syscall and mostly used with
877  * size 8 or 16 bytes, so ask compiler to inline it.
878  */
879 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
880 {
881 	const long *lsrc = src;
882 	long *ldst = dst;
883 
884 	size /= sizeof(long);
885 	while (size--)
886 		*ldst++ = *lsrc++;
887 }
888 
889 /* verify correctness of eBPF program */
890 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
891 	      union bpf_attr __user *uattr);
892 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
893 
894 /* Map specifics */
895 struct xdp_buff;
896 struct sk_buff;
897 
898 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
899 struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
900 void __dev_map_flush(struct bpf_map *map);
901 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
902 		    struct net_device *dev_rx);
903 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
904 			     struct bpf_prog *xdp_prog);
905 
906 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
907 void __cpu_map_flush(struct bpf_map *map);
908 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
909 		    struct net_device *dev_rx);
910 
911 /* Return map's numa specified by userspace */
912 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
913 {
914 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
915 		attr->numa_node : NUMA_NO_NODE;
916 }
917 
918 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
919 int array_map_alloc_check(union bpf_attr *attr);
920 
921 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
922 			  union bpf_attr __user *uattr);
923 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
924 			  union bpf_attr __user *uattr);
925 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
926 				     const union bpf_attr *kattr,
927 				     union bpf_attr __user *uattr);
928 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
929 		    const struct bpf_prog *prog,
930 		    struct bpf_insn_access_aux *info);
931 int btf_struct_access(struct bpf_verifier_log *log,
932 		      const struct btf_type *t, int off, int size,
933 		      enum bpf_access_type atype,
934 		      u32 *next_btf_id);
935 int btf_resolve_helper_id(struct bpf_verifier_log *log,
936 			  const struct bpf_func_proto *fn, int);
937 
938 int btf_distill_func_proto(struct bpf_verifier_log *log,
939 			   struct btf *btf,
940 			   const struct btf_type *func_proto,
941 			   const char *func_name,
942 			   struct btf_func_model *m);
943 
944 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
945 
946 #else /* !CONFIG_BPF_SYSCALL */
947 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
948 {
949 	return ERR_PTR(-EOPNOTSUPP);
950 }
951 
952 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
953 						     enum bpf_prog_type type,
954 						     bool attach_drv)
955 {
956 	return ERR_PTR(-EOPNOTSUPP);
957 }
958 
959 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
960 {
961 }
962 
963 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
964 {
965 }
966 
967 static inline void bpf_prog_put(struct bpf_prog *prog)
968 {
969 }
970 
971 static inline void bpf_prog_inc(struct bpf_prog *prog)
972 {
973 }
974 
975 static inline struct bpf_prog *__must_check
976 bpf_prog_inc_not_zero(struct bpf_prog *prog)
977 {
978 	return ERR_PTR(-EOPNOTSUPP);
979 }
980 
981 static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
982 {
983 	return 0;
984 }
985 
986 static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
987 {
988 }
989 
990 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
991 {
992 	return -EOPNOTSUPP;
993 }
994 
995 static inline struct net_device  *__dev_map_lookup_elem(struct bpf_map *map,
996 						       u32 key)
997 {
998 	return NULL;
999 }
1000 
1001 static inline struct net_device  *__dev_map_hash_lookup_elem(struct bpf_map *map,
1002 							     u32 key)
1003 {
1004 	return NULL;
1005 }
1006 
1007 static inline void __dev_map_flush(struct bpf_map *map)
1008 {
1009 }
1010 
1011 struct xdp_buff;
1012 struct bpf_dtab_netdev;
1013 
1014 static inline
1015 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1016 		    struct net_device *dev_rx)
1017 {
1018 	return 0;
1019 }
1020 
1021 struct sk_buff;
1022 
1023 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1024 					   struct sk_buff *skb,
1025 					   struct bpf_prog *xdp_prog)
1026 {
1027 	return 0;
1028 }
1029 
1030 static inline
1031 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1032 {
1033 	return NULL;
1034 }
1035 
1036 static inline void __cpu_map_flush(struct bpf_map *map)
1037 {
1038 }
1039 
1040 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1041 				  struct xdp_buff *xdp,
1042 				  struct net_device *dev_rx)
1043 {
1044 	return 0;
1045 }
1046 
1047 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1048 				enum bpf_prog_type type)
1049 {
1050 	return ERR_PTR(-EOPNOTSUPP);
1051 }
1052 
1053 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1054 					const union bpf_attr *kattr,
1055 					union bpf_attr __user *uattr)
1056 {
1057 	return -ENOTSUPP;
1058 }
1059 
1060 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1061 					const union bpf_attr *kattr,
1062 					union bpf_attr __user *uattr)
1063 {
1064 	return -ENOTSUPP;
1065 }
1066 
1067 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1068 						   const union bpf_attr *kattr,
1069 						   union bpf_attr __user *uattr)
1070 {
1071 	return -ENOTSUPP;
1072 }
1073 
1074 static inline void bpf_map_put(struct bpf_map *map)
1075 {
1076 }
1077 #endif /* CONFIG_BPF_SYSCALL */
1078 
1079 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1080 						 enum bpf_prog_type type)
1081 {
1082 	return bpf_prog_get_type_dev(ufd, type, false);
1083 }
1084 
1085 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1086 
1087 int bpf_prog_offload_compile(struct bpf_prog *prog);
1088 void bpf_prog_offload_destroy(struct bpf_prog *prog);
1089 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1090 			       struct bpf_prog *prog);
1091 
1092 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1093 
1094 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1095 int bpf_map_offload_update_elem(struct bpf_map *map,
1096 				void *key, void *value, u64 flags);
1097 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1098 int bpf_map_offload_get_next_key(struct bpf_map *map,
1099 				 void *key, void *next_key);
1100 
1101 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1102 
1103 struct bpf_offload_dev *
1104 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
1105 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
1106 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
1107 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1108 				    struct net_device *netdev);
1109 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1110 				       struct net_device *netdev);
1111 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1112 
1113 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1114 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1115 
1116 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1117 {
1118 	return aux->offload_requested;
1119 }
1120 
1121 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1122 {
1123 	return unlikely(map->ops == &bpf_map_offload_ops);
1124 }
1125 
1126 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1127 void bpf_map_offload_map_free(struct bpf_map *map);
1128 #else
1129 static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1130 					union bpf_attr *attr)
1131 {
1132 	return -EOPNOTSUPP;
1133 }
1134 
1135 static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1136 {
1137 	return false;
1138 }
1139 
1140 static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1141 {
1142 	return false;
1143 }
1144 
1145 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1146 {
1147 	return ERR_PTR(-EOPNOTSUPP);
1148 }
1149 
1150 static inline void bpf_map_offload_map_free(struct bpf_map *map)
1151 {
1152 }
1153 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1154 
1155 #if defined(CONFIG_BPF_STREAM_PARSER)
1156 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
1157 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
1158 #else
1159 static inline int sock_map_prog_update(struct bpf_map *map,
1160 				       struct bpf_prog *prog, u32 which)
1161 {
1162 	return -EOPNOTSUPP;
1163 }
1164 
1165 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1166 				       struct bpf_prog *prog)
1167 {
1168 	return -EINVAL;
1169 }
1170 #endif
1171 
1172 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1173 void bpf_sk_reuseport_detach(struct sock *sk);
1174 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1175 				       void *value);
1176 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1177 				       void *value, u64 map_flags);
1178 #else
1179 static inline void bpf_sk_reuseport_detach(struct sock *sk)
1180 {
1181 }
1182 
1183 #ifdef CONFIG_BPF_SYSCALL
1184 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1185 						     void *key, void *value)
1186 {
1187 	return -EOPNOTSUPP;
1188 }
1189 
1190 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1191 						     void *key, void *value,
1192 						     u64 map_flags)
1193 {
1194 	return -EOPNOTSUPP;
1195 }
1196 #endif /* CONFIG_BPF_SYSCALL */
1197 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1198 
1199 /* verifier prototypes for helper functions called from eBPF programs */
1200 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1201 extern const struct bpf_func_proto bpf_map_update_elem_proto;
1202 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
1203 extern const struct bpf_func_proto bpf_map_push_elem_proto;
1204 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1205 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
1206 
1207 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1208 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1209 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1210 extern const struct bpf_func_proto bpf_tail_call_proto;
1211 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
1212 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1213 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1214 extern const struct bpf_func_proto bpf_get_current_comm_proto;
1215 extern const struct bpf_func_proto bpf_get_stackid_proto;
1216 extern const struct bpf_func_proto bpf_get_stack_proto;
1217 extern const struct bpf_func_proto bpf_sock_map_update_proto;
1218 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1219 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
1220 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1221 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1222 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1223 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1224 extern const struct bpf_func_proto bpf_spin_lock_proto;
1225 extern const struct bpf_func_proto bpf_spin_unlock_proto;
1226 extern const struct bpf_func_proto bpf_get_local_storage_proto;
1227 extern const struct bpf_func_proto bpf_strtol_proto;
1228 extern const struct bpf_func_proto bpf_strtoul_proto;
1229 extern const struct bpf_func_proto bpf_tcp_sock_proto;
1230 
1231 /* Shared helpers among cBPF and eBPF. */
1232 void bpf_user_rnd_init_once(void);
1233 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
1234 
1235 #if defined(CONFIG_NET)
1236 bool bpf_sock_common_is_valid_access(int off, int size,
1237 				     enum bpf_access_type type,
1238 				     struct bpf_insn_access_aux *info);
1239 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1240 			      struct bpf_insn_access_aux *info);
1241 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1242 				const struct bpf_insn *si,
1243 				struct bpf_insn *insn_buf,
1244 				struct bpf_prog *prog,
1245 				u32 *target_size);
1246 #else
1247 static inline bool bpf_sock_common_is_valid_access(int off, int size,
1248 						   enum bpf_access_type type,
1249 						   struct bpf_insn_access_aux *info)
1250 {
1251 	return false;
1252 }
1253 static inline bool bpf_sock_is_valid_access(int off, int size,
1254 					    enum bpf_access_type type,
1255 					    struct bpf_insn_access_aux *info)
1256 {
1257 	return false;
1258 }
1259 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1260 					      const struct bpf_insn *si,
1261 					      struct bpf_insn *insn_buf,
1262 					      struct bpf_prog *prog,
1263 					      u32 *target_size)
1264 {
1265 	return 0;
1266 }
1267 #endif
1268 
1269 #ifdef CONFIG_INET
1270 struct sk_reuseport_kern {
1271 	struct sk_buff *skb;
1272 	struct sock *sk;
1273 	struct sock *selected_sk;
1274 	void *data_end;
1275 	u32 hash;
1276 	u32 reuseport_id;
1277 	bool bind_inany;
1278 };
1279 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1280 				  struct bpf_insn_access_aux *info);
1281 
1282 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1283 				    const struct bpf_insn *si,
1284 				    struct bpf_insn *insn_buf,
1285 				    struct bpf_prog *prog,
1286 				    u32 *target_size);
1287 
1288 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1289 				  struct bpf_insn_access_aux *info);
1290 
1291 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1292 				    const struct bpf_insn *si,
1293 				    struct bpf_insn *insn_buf,
1294 				    struct bpf_prog *prog,
1295 				    u32 *target_size);
1296 #else
1297 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1298 						enum bpf_access_type type,
1299 						struct bpf_insn_access_aux *info)
1300 {
1301 	return false;
1302 }
1303 
1304 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1305 						  const struct bpf_insn *si,
1306 						  struct bpf_insn *insn_buf,
1307 						  struct bpf_prog *prog,
1308 						  u32 *target_size)
1309 {
1310 	return 0;
1311 }
1312 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1313 						enum bpf_access_type type,
1314 						struct bpf_insn_access_aux *info)
1315 {
1316 	return false;
1317 }
1318 
1319 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1320 						  const struct bpf_insn *si,
1321 						  struct bpf_insn *insn_buf,
1322 						  struct bpf_prog *prog,
1323 						  u32 *target_size)
1324 {
1325 	return 0;
1326 }
1327 #endif /* CONFIG_INET */
1328 
1329 enum bpf_text_poke_type {
1330 	BPF_MOD_CALL,
1331 	BPF_MOD_JUMP,
1332 };
1333 
1334 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
1335 		       void *addr1, void *addr2);
1336 
1337 #endif /* _LINUX_BPF_H */
1338