xref: /linux-6.15/include/linux/bpf.h (revision 6b4a64ba)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_H
5 #define _LINUX_BPF_H 1
6 
7 #include <uapi/linux/bpf.h>
8 #include <uapi/linux/filter.h>
9 
10 #include <linux/workqueue.h>
11 #include <linux/file.h>
12 #include <linux/percpu.h>
13 #include <linux/err.h>
14 #include <linux/rbtree_latch.h>
15 #include <linux/numa.h>
16 #include <linux/mm_types.h>
17 #include <linux/wait.h>
18 #include <linux/refcount.h>
19 #include <linux/mutex.h>
20 #include <linux/module.h>
21 #include <linux/kallsyms.h>
22 #include <linux/capability.h>
23 #include <linux/sched/mm.h>
24 #include <linux/slab.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/stddef.h>
27 #include <linux/bpfptr.h>
28 #include <linux/btf.h>
29 #include <linux/rcupdate_trace.h>
30 #include <linux/static_call.h>
31 #include <linux/memcontrol.h>
32 
33 struct bpf_verifier_env;
34 struct bpf_verifier_log;
35 struct perf_event;
36 struct bpf_prog;
37 struct bpf_prog_aux;
38 struct bpf_map;
39 struct sock;
40 struct seq_file;
41 struct btf;
42 struct btf_type;
43 struct exception_table_entry;
44 struct seq_operations;
45 struct bpf_iter_aux_info;
46 struct bpf_local_storage;
47 struct bpf_local_storage_map;
48 struct kobject;
49 struct mem_cgroup;
50 struct module;
51 struct bpf_func_state;
52 struct ftrace_ops;
53 struct cgroup;
54 struct bpf_token;
55 struct user_namespace;
56 struct super_block;
57 struct inode;
58 
59 extern struct idr btf_idr;
60 extern spinlock_t btf_idr_lock;
61 extern struct kobject *btf_kobj;
62 extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
63 extern bool bpf_global_ma_set;
64 
65 typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
66 typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
67 					struct bpf_iter_aux_info *aux);
68 typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
69 typedef unsigned int (*bpf_func_t)(const void *,
70 				   const struct bpf_insn *);
71 struct bpf_iter_seq_info {
72 	const struct seq_operations *seq_ops;
73 	bpf_iter_init_seq_priv_t init_seq_private;
74 	bpf_iter_fini_seq_priv_t fini_seq_private;
75 	u32 seq_priv_size;
76 };
77 
78 /* map is generic key/value storage optionally accessible by eBPF programs */
79 struct bpf_map_ops {
80 	/* funcs callable from userspace (via syscall) */
81 	int (*map_alloc_check)(union bpf_attr *attr);
82 	struct bpf_map *(*map_alloc)(union bpf_attr *attr);
83 	void (*map_release)(struct bpf_map *map, struct file *map_file);
84 	void (*map_free)(struct bpf_map *map);
85 	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
86 	void (*map_release_uref)(struct bpf_map *map);
87 	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
88 	int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
89 				union bpf_attr __user *uattr);
90 	int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
91 					  void *value, u64 flags);
92 	int (*map_lookup_and_delete_batch)(struct bpf_map *map,
93 					   const union bpf_attr *attr,
94 					   union bpf_attr __user *uattr);
95 	int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
96 				const union bpf_attr *attr,
97 				union bpf_attr __user *uattr);
98 	int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
99 				union bpf_attr __user *uattr);
100 
101 	/* funcs callable from userspace and from eBPF programs */
102 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
103 	long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
104 	long (*map_delete_elem)(struct bpf_map *map, void *key);
105 	long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
106 	long (*map_pop_elem)(struct bpf_map *map, void *value);
107 	long (*map_peek_elem)(struct bpf_map *map, void *value);
108 	void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
109 
110 	/* funcs called by prog_array and perf_event_array map */
111 	void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
112 				int fd);
113 	/* If need_defer is true, the implementation should guarantee that
114 	 * the to-be-put element is still alive before the bpf program, which
115 	 * may manipulate it, exists.
116 	 */
117 	void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
118 	int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
119 	u32 (*map_fd_sys_lookup_elem)(void *ptr);
120 	void (*map_seq_show_elem)(struct bpf_map *map, void *key,
121 				  struct seq_file *m);
122 	int (*map_check_btf)(const struct bpf_map *map,
123 			     const struct btf *btf,
124 			     const struct btf_type *key_type,
125 			     const struct btf_type *value_type);
126 
127 	/* Prog poke tracking helpers. */
128 	int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
129 	void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
130 	void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
131 			     struct bpf_prog *new);
132 
133 	/* Direct value access helpers. */
134 	int (*map_direct_value_addr)(const struct bpf_map *map,
135 				     u64 *imm, u32 off);
136 	int (*map_direct_value_meta)(const struct bpf_map *map,
137 				     u64 imm, u32 *off);
138 	int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
139 	__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
140 			     struct poll_table_struct *pts);
141 
142 	/* Functions called by bpf_local_storage maps */
143 	int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
144 					void *owner, u32 size);
145 	void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
146 					   void *owner, u32 size);
147 	struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
148 
149 	/* Misc helpers.*/
150 	long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
151 
152 	/* map_meta_equal must be implemented for maps that can be
153 	 * used as an inner map.  It is a runtime check to ensure
154 	 * an inner map can be inserted to an outer map.
155 	 *
156 	 * Some properties of the inner map has been used during the
157 	 * verification time.  When inserting an inner map at the runtime,
158 	 * map_meta_equal has to ensure the inserting map has the same
159 	 * properties that the verifier has used earlier.
160 	 */
161 	bool (*map_meta_equal)(const struct bpf_map *meta0,
162 			       const struct bpf_map *meta1);
163 
164 
165 	int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
166 					      struct bpf_func_state *caller,
167 					      struct bpf_func_state *callee);
168 	long (*map_for_each_callback)(struct bpf_map *map,
169 				     bpf_callback_t callback_fn,
170 				     void *callback_ctx, u64 flags);
171 
172 	u64 (*map_mem_usage)(const struct bpf_map *map);
173 
174 	/* BTF id of struct allocated by map_alloc */
175 	int *map_btf_id;
176 
177 	/* bpf_iter info used to open a seq_file */
178 	const struct bpf_iter_seq_info *iter_seq_info;
179 };
180 
181 enum {
182 	/* Support at most 10 fields in a BTF type */
183 	BTF_FIELDS_MAX	   = 10,
184 };
185 
186 enum btf_field_type {
187 	BPF_SPIN_LOCK  = (1 << 0),
188 	BPF_TIMER      = (1 << 1),
189 	BPF_KPTR_UNREF = (1 << 2),
190 	BPF_KPTR_REF   = (1 << 3),
191 	BPF_KPTR_PERCPU = (1 << 4),
192 	BPF_KPTR       = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
193 	BPF_LIST_HEAD  = (1 << 5),
194 	BPF_LIST_NODE  = (1 << 6),
195 	BPF_RB_ROOT    = (1 << 7),
196 	BPF_RB_NODE    = (1 << 8),
197 	BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
198 	BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
199 	BPF_REFCOUNT   = (1 << 9),
200 };
201 
202 typedef void (*btf_dtor_kfunc_t)(void *);
203 
204 struct btf_field_kptr {
205 	struct btf *btf;
206 	struct module *module;
207 	/* dtor used if btf_is_kernel(btf), otherwise the type is
208 	 * program-allocated, dtor is NULL,  and __bpf_obj_drop_impl is used
209 	 */
210 	btf_dtor_kfunc_t dtor;
211 	u32 btf_id;
212 };
213 
214 struct btf_field_graph_root {
215 	struct btf *btf;
216 	u32 value_btf_id;
217 	u32 node_offset;
218 	struct btf_record *value_rec;
219 };
220 
221 struct btf_field {
222 	u32 offset;
223 	u32 size;
224 	enum btf_field_type type;
225 	union {
226 		struct btf_field_kptr kptr;
227 		struct btf_field_graph_root graph_root;
228 	};
229 };
230 
231 struct btf_record {
232 	u32 cnt;
233 	u32 field_mask;
234 	int spin_lock_off;
235 	int timer_off;
236 	int refcount_off;
237 	struct btf_field fields[];
238 };
239 
240 /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
241 struct bpf_rb_node_kern {
242 	struct rb_node rb_node;
243 	void *owner;
244 } __attribute__((aligned(8)));
245 
246 /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
247 struct bpf_list_node_kern {
248 	struct list_head list_head;
249 	void *owner;
250 } __attribute__((aligned(8)));
251 
252 struct bpf_map {
253 	/* The first two cachelines with read-mostly members of which some
254 	 * are also accessed in fast-path (e.g. ops, max_entries).
255 	 */
256 	const struct bpf_map_ops *ops ____cacheline_aligned;
257 	struct bpf_map *inner_map_meta;
258 #ifdef CONFIG_SECURITY
259 	void *security;
260 #endif
261 	enum bpf_map_type map_type;
262 	u32 key_size;
263 	u32 value_size;
264 	u32 max_entries;
265 	u64 map_extra; /* any per-map-type extra fields */
266 	u32 map_flags;
267 	u32 id;
268 	struct btf_record *record;
269 	int numa_node;
270 	u32 btf_key_type_id;
271 	u32 btf_value_type_id;
272 	u32 btf_vmlinux_value_type_id;
273 	struct btf *btf;
274 #ifdef CONFIG_MEMCG_KMEM
275 	struct obj_cgroup *objcg;
276 #endif
277 	char name[BPF_OBJ_NAME_LEN];
278 	/* The 3rd and 4th cacheline with misc members to avoid false sharing
279 	 * particularly with refcounting.
280 	 */
281 	atomic64_t refcnt ____cacheline_aligned;
282 	atomic64_t usercnt;
283 	/* rcu is used before freeing and work is only used during freeing */
284 	union {
285 		struct work_struct work;
286 		struct rcu_head rcu;
287 	};
288 	struct mutex freeze_mutex;
289 	atomic64_t writecnt;
290 	/* 'Ownership' of program-containing map is claimed by the first program
291 	 * that is going to use this map or by the first program which FD is
292 	 * stored in the map to make sure that all callers and callees have the
293 	 * same prog type, JITed flag and xdp_has_frags flag.
294 	 */
295 	struct {
296 		spinlock_t lock;
297 		enum bpf_prog_type type;
298 		bool jited;
299 		bool xdp_has_frags;
300 	} owner;
301 	bool bypass_spec_v1;
302 	bool frozen; /* write-once; write-protected by freeze_mutex */
303 	bool free_after_mult_rcu_gp;
304 	bool free_after_rcu_gp;
305 	atomic64_t sleepable_refcnt;
306 	s64 __percpu *elem_count;
307 };
308 
309 static inline const char *btf_field_type_name(enum btf_field_type type)
310 {
311 	switch (type) {
312 	case BPF_SPIN_LOCK:
313 		return "bpf_spin_lock";
314 	case BPF_TIMER:
315 		return "bpf_timer";
316 	case BPF_KPTR_UNREF:
317 	case BPF_KPTR_REF:
318 		return "kptr";
319 	case BPF_KPTR_PERCPU:
320 		return "percpu_kptr";
321 	case BPF_LIST_HEAD:
322 		return "bpf_list_head";
323 	case BPF_LIST_NODE:
324 		return "bpf_list_node";
325 	case BPF_RB_ROOT:
326 		return "bpf_rb_root";
327 	case BPF_RB_NODE:
328 		return "bpf_rb_node";
329 	case BPF_REFCOUNT:
330 		return "bpf_refcount";
331 	default:
332 		WARN_ON_ONCE(1);
333 		return "unknown";
334 	}
335 }
336 
337 static inline u32 btf_field_type_size(enum btf_field_type type)
338 {
339 	switch (type) {
340 	case BPF_SPIN_LOCK:
341 		return sizeof(struct bpf_spin_lock);
342 	case BPF_TIMER:
343 		return sizeof(struct bpf_timer);
344 	case BPF_KPTR_UNREF:
345 	case BPF_KPTR_REF:
346 	case BPF_KPTR_PERCPU:
347 		return sizeof(u64);
348 	case BPF_LIST_HEAD:
349 		return sizeof(struct bpf_list_head);
350 	case BPF_LIST_NODE:
351 		return sizeof(struct bpf_list_node);
352 	case BPF_RB_ROOT:
353 		return sizeof(struct bpf_rb_root);
354 	case BPF_RB_NODE:
355 		return sizeof(struct bpf_rb_node);
356 	case BPF_REFCOUNT:
357 		return sizeof(struct bpf_refcount);
358 	default:
359 		WARN_ON_ONCE(1);
360 		return 0;
361 	}
362 }
363 
364 static inline u32 btf_field_type_align(enum btf_field_type type)
365 {
366 	switch (type) {
367 	case BPF_SPIN_LOCK:
368 		return __alignof__(struct bpf_spin_lock);
369 	case BPF_TIMER:
370 		return __alignof__(struct bpf_timer);
371 	case BPF_KPTR_UNREF:
372 	case BPF_KPTR_REF:
373 	case BPF_KPTR_PERCPU:
374 		return __alignof__(u64);
375 	case BPF_LIST_HEAD:
376 		return __alignof__(struct bpf_list_head);
377 	case BPF_LIST_NODE:
378 		return __alignof__(struct bpf_list_node);
379 	case BPF_RB_ROOT:
380 		return __alignof__(struct bpf_rb_root);
381 	case BPF_RB_NODE:
382 		return __alignof__(struct bpf_rb_node);
383 	case BPF_REFCOUNT:
384 		return __alignof__(struct bpf_refcount);
385 	default:
386 		WARN_ON_ONCE(1);
387 		return 0;
388 	}
389 }
390 
391 static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
392 {
393 	memset(addr, 0, field->size);
394 
395 	switch (field->type) {
396 	case BPF_REFCOUNT:
397 		refcount_set((refcount_t *)addr, 1);
398 		break;
399 	case BPF_RB_NODE:
400 		RB_CLEAR_NODE((struct rb_node *)addr);
401 		break;
402 	case BPF_LIST_HEAD:
403 	case BPF_LIST_NODE:
404 		INIT_LIST_HEAD((struct list_head *)addr);
405 		break;
406 	case BPF_RB_ROOT:
407 		/* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
408 	case BPF_SPIN_LOCK:
409 	case BPF_TIMER:
410 	case BPF_KPTR_UNREF:
411 	case BPF_KPTR_REF:
412 	case BPF_KPTR_PERCPU:
413 		break;
414 	default:
415 		WARN_ON_ONCE(1);
416 		return;
417 	}
418 }
419 
420 static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
421 {
422 	if (IS_ERR_OR_NULL(rec))
423 		return false;
424 	return rec->field_mask & type;
425 }
426 
427 static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
428 {
429 	int i;
430 
431 	if (IS_ERR_OR_NULL(rec))
432 		return;
433 	for (i = 0; i < rec->cnt; i++)
434 		bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
435 }
436 
437 /* 'dst' must be a temporary buffer and should not point to memory that is being
438  * used in parallel by a bpf program or bpf syscall, otherwise the access from
439  * the bpf program or bpf syscall may be corrupted by the reinitialization,
440  * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
441  * allocator, it is still possible for 'dst' to be used in parallel by a bpf
442  * program or bpf syscall.
443  */
444 static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
445 {
446 	bpf_obj_init(map->record, dst);
447 }
448 
449 /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
450  * forced to use 'long' read/writes to try to atomically copy long counters.
451  * Best-effort only.  No barriers here, since it _will_ race with concurrent
452  * updates from BPF programs. Called from bpf syscall and mostly used with
453  * size 8 or 16 bytes, so ask compiler to inline it.
454  */
455 static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
456 {
457 	const long *lsrc = src;
458 	long *ldst = dst;
459 
460 	size /= sizeof(long);
461 	while (size--)
462 		data_race(*ldst++ = *lsrc++);
463 }
464 
465 /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
466 static inline void bpf_obj_memcpy(struct btf_record *rec,
467 				  void *dst, void *src, u32 size,
468 				  bool long_memcpy)
469 {
470 	u32 curr_off = 0;
471 	int i;
472 
473 	if (IS_ERR_OR_NULL(rec)) {
474 		if (long_memcpy)
475 			bpf_long_memcpy(dst, src, round_up(size, 8));
476 		else
477 			memcpy(dst, src, size);
478 		return;
479 	}
480 
481 	for (i = 0; i < rec->cnt; i++) {
482 		u32 next_off = rec->fields[i].offset;
483 		u32 sz = next_off - curr_off;
484 
485 		memcpy(dst + curr_off, src + curr_off, sz);
486 		curr_off += rec->fields[i].size + sz;
487 	}
488 	memcpy(dst + curr_off, src + curr_off, size - curr_off);
489 }
490 
491 static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
492 {
493 	bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
494 }
495 
496 static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
497 {
498 	bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
499 }
500 
501 static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
502 {
503 	u32 curr_off = 0;
504 	int i;
505 
506 	if (IS_ERR_OR_NULL(rec)) {
507 		memset(dst, 0, size);
508 		return;
509 	}
510 
511 	for (i = 0; i < rec->cnt; i++) {
512 		u32 next_off = rec->fields[i].offset;
513 		u32 sz = next_off - curr_off;
514 
515 		memset(dst + curr_off, 0, sz);
516 		curr_off += rec->fields[i].size + sz;
517 	}
518 	memset(dst + curr_off, 0, size - curr_off);
519 }
520 
521 static inline void zero_map_value(struct bpf_map *map, void *dst)
522 {
523 	bpf_obj_memzero(map->record, dst, map->value_size);
524 }
525 
526 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
527 			   bool lock_src);
528 void bpf_timer_cancel_and_free(void *timer);
529 void bpf_list_head_free(const struct btf_field *field, void *list_head,
530 			struct bpf_spin_lock *spin_lock);
531 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
532 		      struct bpf_spin_lock *spin_lock);
533 
534 
535 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
536 
537 struct bpf_offload_dev;
538 struct bpf_offloaded_map;
539 
540 struct bpf_map_dev_ops {
541 	int (*map_get_next_key)(struct bpf_offloaded_map *map,
542 				void *key, void *next_key);
543 	int (*map_lookup_elem)(struct bpf_offloaded_map *map,
544 			       void *key, void *value);
545 	int (*map_update_elem)(struct bpf_offloaded_map *map,
546 			       void *key, void *value, u64 flags);
547 	int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
548 };
549 
550 struct bpf_offloaded_map {
551 	struct bpf_map map;
552 	struct net_device *netdev;
553 	const struct bpf_map_dev_ops *dev_ops;
554 	void *dev_priv;
555 	struct list_head offloads;
556 };
557 
558 static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
559 {
560 	return container_of(map, struct bpf_offloaded_map, map);
561 }
562 
563 static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
564 {
565 	return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
566 }
567 
568 static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
569 {
570 	return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
571 		map->ops->map_seq_show_elem;
572 }
573 
574 int map_check_no_btf(const struct bpf_map *map,
575 		     const struct btf *btf,
576 		     const struct btf_type *key_type,
577 		     const struct btf_type *value_type);
578 
579 bool bpf_map_meta_equal(const struct bpf_map *meta0,
580 			const struct bpf_map *meta1);
581 
582 extern const struct bpf_map_ops bpf_map_offload_ops;
583 
584 /* bpf_type_flag contains a set of flags that are applicable to the values of
585  * arg_type, ret_type and reg_type. For example, a pointer value may be null,
586  * or a memory is read-only. We classify types into two categories: base types
587  * and extended types. Extended types are base types combined with a type flag.
588  *
589  * Currently there are no more than 32 base types in arg_type, ret_type and
590  * reg_types.
591  */
592 #define BPF_BASE_TYPE_BITS	8
593 
594 enum bpf_type_flag {
595 	/* PTR may be NULL. */
596 	PTR_MAYBE_NULL		= BIT(0 + BPF_BASE_TYPE_BITS),
597 
598 	/* MEM is read-only. When applied on bpf_arg, it indicates the arg is
599 	 * compatible with both mutable and immutable memory.
600 	 */
601 	MEM_RDONLY		= BIT(1 + BPF_BASE_TYPE_BITS),
602 
603 	/* MEM points to BPF ring buffer reservation. */
604 	MEM_RINGBUF		= BIT(2 + BPF_BASE_TYPE_BITS),
605 
606 	/* MEM is in user address space. */
607 	MEM_USER		= BIT(3 + BPF_BASE_TYPE_BITS),
608 
609 	/* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
610 	 * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
611 	 * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
612 	 * or bpf_this_cpu_ptr(), which will return the pointer corresponding
613 	 * to the specified cpu.
614 	 */
615 	MEM_PERCPU		= BIT(4 + BPF_BASE_TYPE_BITS),
616 
617 	/* Indicates that the argument will be released. */
618 	OBJ_RELEASE		= BIT(5 + BPF_BASE_TYPE_BITS),
619 
620 	/* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
621 	 * unreferenced and referenced kptr loaded from map value using a load
622 	 * instruction, so that they can only be dereferenced but not escape the
623 	 * BPF program into the kernel (i.e. cannot be passed as arguments to
624 	 * kfunc or bpf helpers).
625 	 */
626 	PTR_UNTRUSTED		= BIT(6 + BPF_BASE_TYPE_BITS),
627 
628 	MEM_UNINIT		= BIT(7 + BPF_BASE_TYPE_BITS),
629 
630 	/* DYNPTR points to memory local to the bpf program. */
631 	DYNPTR_TYPE_LOCAL	= BIT(8 + BPF_BASE_TYPE_BITS),
632 
633 	/* DYNPTR points to a kernel-produced ringbuf record. */
634 	DYNPTR_TYPE_RINGBUF	= BIT(9 + BPF_BASE_TYPE_BITS),
635 
636 	/* Size is known at compile time. */
637 	MEM_FIXED_SIZE		= BIT(10 + BPF_BASE_TYPE_BITS),
638 
639 	/* MEM is of an allocated object of type in program BTF. This is used to
640 	 * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
641 	 */
642 	MEM_ALLOC		= BIT(11 + BPF_BASE_TYPE_BITS),
643 
644 	/* PTR was passed from the kernel in a trusted context, and may be
645 	 * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
646 	 * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
647 	 * PTR_UNTRUSTED refers to a kptr that was read directly from a map
648 	 * without invoking bpf_kptr_xchg(). What we really need to know is
649 	 * whether a pointer is safe to pass to a kfunc or BPF helper function.
650 	 * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
651 	 * helpers, they do not cover all possible instances of unsafe
652 	 * pointers. For example, a pointer that was obtained from walking a
653 	 * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
654 	 * fact that it may be NULL, invalid, etc. This is due to backwards
655 	 * compatibility requirements, as this was the behavior that was first
656 	 * introduced when kptrs were added. The behavior is now considered
657 	 * deprecated, and PTR_UNTRUSTED will eventually be removed.
658 	 *
659 	 * PTR_TRUSTED, on the other hand, is a pointer that the kernel
660 	 * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
661 	 * For example, pointers passed to tracepoint arguments are considered
662 	 * PTR_TRUSTED, as are pointers that are passed to struct_ops
663 	 * callbacks. As alluded to above, pointers that are obtained from
664 	 * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
665 	 * struct task_struct *task is PTR_TRUSTED, then accessing
666 	 * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
667 	 * in a BPF register. Similarly, pointers passed to certain programs
668 	 * types such as kretprobes are not guaranteed to be valid, as they may
669 	 * for example contain an object that was recently freed.
670 	 */
671 	PTR_TRUSTED		= BIT(12 + BPF_BASE_TYPE_BITS),
672 
673 	/* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
674 	MEM_RCU			= BIT(13 + BPF_BASE_TYPE_BITS),
675 
676 	/* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
677 	 * Currently only valid for linked-list and rbtree nodes. If the nodes
678 	 * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
679 	 */
680 	NON_OWN_REF		= BIT(14 + BPF_BASE_TYPE_BITS),
681 
682 	/* DYNPTR points to sk_buff */
683 	DYNPTR_TYPE_SKB		= BIT(15 + BPF_BASE_TYPE_BITS),
684 
685 	/* DYNPTR points to xdp_buff */
686 	DYNPTR_TYPE_XDP		= BIT(16 + BPF_BASE_TYPE_BITS),
687 
688 	__BPF_TYPE_FLAG_MAX,
689 	__BPF_TYPE_LAST_FLAG	= __BPF_TYPE_FLAG_MAX - 1,
690 };
691 
692 #define DYNPTR_TYPE_FLAG_MASK	(DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
693 				 | DYNPTR_TYPE_XDP)
694 
695 /* Max number of base types. */
696 #define BPF_BASE_TYPE_LIMIT	(1UL << BPF_BASE_TYPE_BITS)
697 
698 /* Max number of all types. */
699 #define BPF_TYPE_LIMIT		(__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
700 
701 /* function argument constraints */
702 enum bpf_arg_type {
703 	ARG_DONTCARE = 0,	/* unused argument in helper function */
704 
705 	/* the following constraints used to prototype
706 	 * bpf_map_lookup/update/delete_elem() functions
707 	 */
708 	ARG_CONST_MAP_PTR,	/* const argument used as pointer to bpf_map */
709 	ARG_PTR_TO_MAP_KEY,	/* pointer to stack used as map key */
710 	ARG_PTR_TO_MAP_VALUE,	/* pointer to stack used as map value */
711 
712 	/* Used to prototype bpf_memcmp() and other functions that access data
713 	 * on eBPF program stack
714 	 */
715 	ARG_PTR_TO_MEM,		/* pointer to valid memory (stack, packet, map value) */
716 
717 	ARG_CONST_SIZE,		/* number of bytes accessed from memory */
718 	ARG_CONST_SIZE_OR_ZERO,	/* number of bytes accessed from memory or 0 */
719 
720 	ARG_PTR_TO_CTX,		/* pointer to context */
721 	ARG_ANYTHING,		/* any (initialized) argument is ok */
722 	ARG_PTR_TO_SPIN_LOCK,	/* pointer to bpf_spin_lock */
723 	ARG_PTR_TO_SOCK_COMMON,	/* pointer to sock_common */
724 	ARG_PTR_TO_INT,		/* pointer to int */
725 	ARG_PTR_TO_LONG,	/* pointer to long */
726 	ARG_PTR_TO_SOCKET,	/* pointer to bpf_sock (fullsock) */
727 	ARG_PTR_TO_BTF_ID,	/* pointer to in-kernel struct */
728 	ARG_PTR_TO_RINGBUF_MEM,	/* pointer to dynamically reserved ringbuf memory */
729 	ARG_CONST_ALLOC_SIZE_OR_ZERO,	/* number of allocated bytes requested */
730 	ARG_PTR_TO_BTF_ID_SOCK_COMMON,	/* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
731 	ARG_PTR_TO_PERCPU_BTF_ID,	/* pointer to in-kernel percpu type */
732 	ARG_PTR_TO_FUNC,	/* pointer to a bpf program function */
733 	ARG_PTR_TO_STACK,	/* pointer to stack */
734 	ARG_PTR_TO_CONST_STR,	/* pointer to a null terminated read-only string */
735 	ARG_PTR_TO_TIMER,	/* pointer to bpf_timer */
736 	ARG_PTR_TO_KPTR,	/* pointer to referenced kptr */
737 	ARG_PTR_TO_DYNPTR,      /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
738 	__BPF_ARG_TYPE_MAX,
739 
740 	/* Extended arg_types. */
741 	ARG_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
742 	ARG_PTR_TO_MEM_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
743 	ARG_PTR_TO_CTX_OR_NULL		= PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
744 	ARG_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
745 	ARG_PTR_TO_STACK_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
746 	ARG_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
747 	/* pointer to memory does not need to be initialized, helper function must fill
748 	 * all bytes or clear them in error case.
749 	 */
750 	ARG_PTR_TO_UNINIT_MEM		= MEM_UNINIT | ARG_PTR_TO_MEM,
751 	/* Pointer to valid memory of size known at compile time. */
752 	ARG_PTR_TO_FIXED_SIZE_MEM	= MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
753 
754 	/* This must be the last entry. Its purpose is to ensure the enum is
755 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
756 	 */
757 	__BPF_ARG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
758 };
759 static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
760 
761 /* type of values returned from helper functions */
762 enum bpf_return_type {
763 	RET_INTEGER,			/* function returns integer */
764 	RET_VOID,			/* function doesn't return anything */
765 	RET_PTR_TO_MAP_VALUE,		/* returns a pointer to map elem value */
766 	RET_PTR_TO_SOCKET,		/* returns a pointer to a socket */
767 	RET_PTR_TO_TCP_SOCK,		/* returns a pointer to a tcp_sock */
768 	RET_PTR_TO_SOCK_COMMON,		/* returns a pointer to a sock_common */
769 	RET_PTR_TO_MEM,			/* returns a pointer to memory */
770 	RET_PTR_TO_MEM_OR_BTF_ID,	/* returns a pointer to a valid memory or a btf_id */
771 	RET_PTR_TO_BTF_ID,		/* returns a pointer to a btf_id */
772 	__BPF_RET_TYPE_MAX,
773 
774 	/* Extended ret_types. */
775 	RET_PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
776 	RET_PTR_TO_SOCKET_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
777 	RET_PTR_TO_TCP_SOCK_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
778 	RET_PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
779 	RET_PTR_TO_RINGBUF_MEM_OR_NULL	= PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
780 	RET_PTR_TO_DYNPTR_MEM_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_MEM,
781 	RET_PTR_TO_BTF_ID_OR_NULL	= PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
782 	RET_PTR_TO_BTF_ID_TRUSTED	= PTR_TRUSTED	 | RET_PTR_TO_BTF_ID,
783 
784 	/* This must be the last entry. Its purpose is to ensure the enum is
785 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
786 	 */
787 	__BPF_RET_TYPE_LIMIT	= BPF_TYPE_LIMIT,
788 };
789 static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
790 
791 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
792  * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
793  * instructions after verifying
794  */
795 struct bpf_func_proto {
796 	u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
797 	bool gpl_only;
798 	bool pkt_access;
799 	bool might_sleep;
800 	enum bpf_return_type ret_type;
801 	union {
802 		struct {
803 			enum bpf_arg_type arg1_type;
804 			enum bpf_arg_type arg2_type;
805 			enum bpf_arg_type arg3_type;
806 			enum bpf_arg_type arg4_type;
807 			enum bpf_arg_type arg5_type;
808 		};
809 		enum bpf_arg_type arg_type[5];
810 	};
811 	union {
812 		struct {
813 			u32 *arg1_btf_id;
814 			u32 *arg2_btf_id;
815 			u32 *arg3_btf_id;
816 			u32 *arg4_btf_id;
817 			u32 *arg5_btf_id;
818 		};
819 		u32 *arg_btf_id[5];
820 		struct {
821 			size_t arg1_size;
822 			size_t arg2_size;
823 			size_t arg3_size;
824 			size_t arg4_size;
825 			size_t arg5_size;
826 		};
827 		size_t arg_size[5];
828 	};
829 	int *ret_btf_id; /* return value btf_id */
830 	bool (*allowed)(const struct bpf_prog *prog);
831 };
832 
833 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
834  * the first argument to eBPF programs.
835  * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
836  */
837 struct bpf_context;
838 
839 enum bpf_access_type {
840 	BPF_READ = 1,
841 	BPF_WRITE = 2
842 };
843 
844 /* types of values stored in eBPF registers */
845 /* Pointer types represent:
846  * pointer
847  * pointer + imm
848  * pointer + (u16) var
849  * pointer + (u16) var + imm
850  * if (range > 0) then [ptr, ptr + range - off) is safe to access
851  * if (id > 0) means that some 'var' was added
852  * if (off > 0) means that 'imm' was added
853  */
854 enum bpf_reg_type {
855 	NOT_INIT = 0,		 /* nothing was written into register */
856 	SCALAR_VALUE,		 /* reg doesn't contain a valid pointer */
857 	PTR_TO_CTX,		 /* reg points to bpf_context */
858 	CONST_PTR_TO_MAP,	 /* reg points to struct bpf_map */
859 	PTR_TO_MAP_VALUE,	 /* reg points to map element value */
860 	PTR_TO_MAP_KEY,		 /* reg points to a map element key */
861 	PTR_TO_STACK,		 /* reg == frame_pointer + offset */
862 	PTR_TO_PACKET_META,	 /* skb->data - meta_len */
863 	PTR_TO_PACKET,		 /* reg points to skb->data */
864 	PTR_TO_PACKET_END,	 /* skb->data + headlen */
865 	PTR_TO_FLOW_KEYS,	 /* reg points to bpf_flow_keys */
866 	PTR_TO_SOCKET,		 /* reg points to struct bpf_sock */
867 	PTR_TO_SOCK_COMMON,	 /* reg points to sock_common */
868 	PTR_TO_TCP_SOCK,	 /* reg points to struct tcp_sock */
869 	PTR_TO_TP_BUFFER,	 /* reg points to a writable raw tp's buffer */
870 	PTR_TO_XDP_SOCK,	 /* reg points to struct xdp_sock */
871 	/* PTR_TO_BTF_ID points to a kernel struct that does not need
872 	 * to be null checked by the BPF program. This does not imply the
873 	 * pointer is _not_ null and in practice this can easily be a null
874 	 * pointer when reading pointer chains. The assumption is program
875 	 * context will handle null pointer dereference typically via fault
876 	 * handling. The verifier must keep this in mind and can make no
877 	 * assumptions about null or non-null when doing branch analysis.
878 	 * Further, when passed into helpers the helpers can not, without
879 	 * additional context, assume the value is non-null.
880 	 */
881 	PTR_TO_BTF_ID,
882 	/* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
883 	 * been checked for null. Used primarily to inform the verifier
884 	 * an explicit null check is required for this struct.
885 	 */
886 	PTR_TO_MEM,		 /* reg points to valid memory region */
887 	PTR_TO_BUF,		 /* reg points to a read/write buffer */
888 	PTR_TO_FUNC,		 /* reg points to a bpf program function */
889 	CONST_PTR_TO_DYNPTR,	 /* reg points to a const struct bpf_dynptr */
890 	__BPF_REG_TYPE_MAX,
891 
892 	/* Extended reg_types. */
893 	PTR_TO_MAP_VALUE_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
894 	PTR_TO_SOCKET_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_SOCKET,
895 	PTR_TO_SOCK_COMMON_OR_NULL	= PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
896 	PTR_TO_TCP_SOCK_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
897 	PTR_TO_BTF_ID_OR_NULL		= PTR_MAYBE_NULL | PTR_TO_BTF_ID,
898 
899 	/* This must be the last entry. Its purpose is to ensure the enum is
900 	 * wide enough to hold the higher bits reserved for bpf_type_flag.
901 	 */
902 	__BPF_REG_TYPE_LIMIT	= BPF_TYPE_LIMIT,
903 };
904 static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
905 
906 /* The information passed from prog-specific *_is_valid_access
907  * back to the verifier.
908  */
909 struct bpf_insn_access_aux {
910 	enum bpf_reg_type reg_type;
911 	union {
912 		int ctx_field_size;
913 		struct {
914 			struct btf *btf;
915 			u32 btf_id;
916 		};
917 	};
918 	struct bpf_verifier_log *log; /* for verbose logs */
919 };
920 
921 static inline void
922 bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
923 {
924 	aux->ctx_field_size = size;
925 }
926 
927 static bool bpf_is_ldimm64(const struct bpf_insn *insn)
928 {
929 	return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
930 }
931 
932 static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
933 {
934 	return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
935 }
936 
937 struct bpf_prog_ops {
938 	int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
939 			union bpf_attr __user *uattr);
940 };
941 
942 struct bpf_reg_state;
943 struct bpf_verifier_ops {
944 	/* return eBPF function prototype for verification */
945 	const struct bpf_func_proto *
946 	(*get_func_proto)(enum bpf_func_id func_id,
947 			  const struct bpf_prog *prog);
948 
949 	/* return true if 'size' wide access at offset 'off' within bpf_context
950 	 * with 'type' (read or write) is allowed
951 	 */
952 	bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
953 				const struct bpf_prog *prog,
954 				struct bpf_insn_access_aux *info);
955 	int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
956 			    const struct bpf_prog *prog);
957 	int (*gen_ld_abs)(const struct bpf_insn *orig,
958 			  struct bpf_insn *insn_buf);
959 	u32 (*convert_ctx_access)(enum bpf_access_type type,
960 				  const struct bpf_insn *src,
961 				  struct bpf_insn *dst,
962 				  struct bpf_prog *prog, u32 *target_size);
963 	int (*btf_struct_access)(struct bpf_verifier_log *log,
964 				 const struct bpf_reg_state *reg,
965 				 int off, int size);
966 };
967 
968 struct bpf_prog_offload_ops {
969 	/* verifier basic callbacks */
970 	int (*insn_hook)(struct bpf_verifier_env *env,
971 			 int insn_idx, int prev_insn_idx);
972 	int (*finalize)(struct bpf_verifier_env *env);
973 	/* verifier optimization callbacks (called after .finalize) */
974 	int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
975 			    struct bpf_insn *insn);
976 	int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
977 	/* program management callbacks */
978 	int (*prepare)(struct bpf_prog *prog);
979 	int (*translate)(struct bpf_prog *prog);
980 	void (*destroy)(struct bpf_prog *prog);
981 };
982 
983 struct bpf_prog_offload {
984 	struct bpf_prog		*prog;
985 	struct net_device	*netdev;
986 	struct bpf_offload_dev	*offdev;
987 	void			*dev_priv;
988 	struct list_head	offloads;
989 	bool			dev_state;
990 	bool			opt_failed;
991 	void			*jited_image;
992 	u32			jited_len;
993 };
994 
995 enum bpf_cgroup_storage_type {
996 	BPF_CGROUP_STORAGE_SHARED,
997 	BPF_CGROUP_STORAGE_PERCPU,
998 	__BPF_CGROUP_STORAGE_MAX
999 };
1000 
1001 #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1002 
1003 /* The longest tracepoint has 12 args.
1004  * See include/trace/bpf_probe.h
1005  */
1006 #define MAX_BPF_FUNC_ARGS 12
1007 
1008 /* The maximum number of arguments passed through registers
1009  * a single function may have.
1010  */
1011 #define MAX_BPF_FUNC_REG_ARGS 5
1012 
1013 /* The argument is a structure. */
1014 #define BTF_FMODEL_STRUCT_ARG		BIT(0)
1015 
1016 /* The argument is signed. */
1017 #define BTF_FMODEL_SIGNED_ARG		BIT(1)
1018 
1019 struct btf_func_model {
1020 	u8 ret_size;
1021 	u8 ret_flags;
1022 	u8 nr_args;
1023 	u8 arg_size[MAX_BPF_FUNC_ARGS];
1024 	u8 arg_flags[MAX_BPF_FUNC_ARGS];
1025 };
1026 
1027 /* Restore arguments before returning from trampoline to let original function
1028  * continue executing. This flag is used for fentry progs when there are no
1029  * fexit progs.
1030  */
1031 #define BPF_TRAMP_F_RESTORE_REGS	BIT(0)
1032 /* Call original function after fentry progs, but before fexit progs.
1033  * Makes sense for fentry/fexit, normal calls and indirect calls.
1034  */
1035 #define BPF_TRAMP_F_CALL_ORIG		BIT(1)
1036 /* Skip current frame and return to parent.  Makes sense for fentry/fexit
1037  * programs only. Should not be used with normal calls and indirect calls.
1038  */
1039 #define BPF_TRAMP_F_SKIP_FRAME		BIT(2)
1040 /* Store IP address of the caller on the trampoline stack,
1041  * so it's available for trampoline's programs.
1042  */
1043 #define BPF_TRAMP_F_IP_ARG		BIT(3)
1044 /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
1045 #define BPF_TRAMP_F_RET_FENTRY_RET	BIT(4)
1046 
1047 /* Get original function from stack instead of from provided direct address.
1048  * Makes sense for trampolines with fexit or fmod_ret programs.
1049  */
1050 #define BPF_TRAMP_F_ORIG_STACK		BIT(5)
1051 
1052 /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
1053  * e.g., a live patch. This flag is set and cleared by ftrace call backs,
1054  */
1055 #define BPF_TRAMP_F_SHARE_IPMODIFY	BIT(6)
1056 
1057 /* Indicate that current trampoline is in a tail call context. Then, it has to
1058  * cache and restore tail_call_cnt to avoid infinite tail call loop.
1059  */
1060 #define BPF_TRAMP_F_TAIL_CALL_CTX	BIT(7)
1061 
1062 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1063  * bytes on x86.
1064  */
1065 enum {
1066 #if defined(__s390x__)
1067 	BPF_MAX_TRAMP_LINKS = 27,
1068 #else
1069 	BPF_MAX_TRAMP_LINKS = 38,
1070 #endif
1071 };
1072 
1073 struct bpf_tramp_links {
1074 	struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
1075 	int nr_links;
1076 };
1077 
1078 struct bpf_tramp_run_ctx;
1079 
1080 /* Different use cases for BPF trampoline:
1081  * 1. replace nop at the function entry (kprobe equivalent)
1082  *    flags = BPF_TRAMP_F_RESTORE_REGS
1083  *    fentry = a set of programs to run before returning from trampoline
1084  *
1085  * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1086  *    flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1087  *    orig_call = fentry_ip + MCOUNT_INSN_SIZE
1088  *    fentry = a set of program to run before calling original function
1089  *    fexit = a set of program to run after original function
1090  *
1091  * 3. replace direct call instruction anywhere in the function body
1092  *    or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1093  *    With flags = 0
1094  *      fentry = a set of programs to run before returning from trampoline
1095  *    With flags = BPF_TRAMP_F_CALL_ORIG
1096  *      orig_call = original callback addr or direct function addr
1097  *      fentry = a set of program to run before calling original function
1098  *      fexit = a set of program to run after original function
1099  */
1100 struct bpf_tramp_image;
1101 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1102 				const struct btf_func_model *m, u32 flags,
1103 				struct bpf_tramp_links *tlinks,
1104 				void *func_addr);
1105 void *arch_alloc_bpf_trampoline(unsigned int size);
1106 void arch_free_bpf_trampoline(void *image, unsigned int size);
1107 void arch_protect_bpf_trampoline(void *image, unsigned int size);
1108 void arch_unprotect_bpf_trampoline(void *image, unsigned int size);
1109 int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1110 			     struct bpf_tramp_links *tlinks, void *func_addr);
1111 
1112 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
1113 					     struct bpf_tramp_run_ctx *run_ctx);
1114 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
1115 					     struct bpf_tramp_run_ctx *run_ctx);
1116 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
1117 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
1118 typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
1119 				      struct bpf_tramp_run_ctx *run_ctx);
1120 typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
1121 				      struct bpf_tramp_run_ctx *run_ctx);
1122 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
1123 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
1124 
1125 struct bpf_ksym {
1126 	unsigned long		 start;
1127 	unsigned long		 end;
1128 	char			 name[KSYM_NAME_LEN];
1129 	struct list_head	 lnode;
1130 	struct latch_tree_node	 tnode;
1131 	bool			 prog;
1132 };
1133 
1134 enum bpf_tramp_prog_type {
1135 	BPF_TRAMP_FENTRY,
1136 	BPF_TRAMP_FEXIT,
1137 	BPF_TRAMP_MODIFY_RETURN,
1138 	BPF_TRAMP_MAX,
1139 	BPF_TRAMP_REPLACE, /* more than MAX */
1140 };
1141 
1142 struct bpf_tramp_image {
1143 	void *image;
1144 	int size;
1145 	struct bpf_ksym ksym;
1146 	struct percpu_ref pcref;
1147 	void *ip_after_call;
1148 	void *ip_epilogue;
1149 	union {
1150 		struct rcu_head rcu;
1151 		struct work_struct work;
1152 	};
1153 };
1154 
1155 struct bpf_trampoline {
1156 	/* hlist for trampoline_table */
1157 	struct hlist_node hlist;
1158 	struct ftrace_ops *fops;
1159 	/* serializes access to fields of this trampoline */
1160 	struct mutex mutex;
1161 	refcount_t refcnt;
1162 	u32 flags;
1163 	u64 key;
1164 	struct {
1165 		struct btf_func_model model;
1166 		void *addr;
1167 		bool ftrace_managed;
1168 	} func;
1169 	/* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1170 	 * program by replacing one of its functions. func.addr is the address
1171 	 * of the function it replaced.
1172 	 */
1173 	struct bpf_prog *extension_prog;
1174 	/* list of BPF programs using this trampoline */
1175 	struct hlist_head progs_hlist[BPF_TRAMP_MAX];
1176 	/* Number of attached programs. A counter per kind. */
1177 	int progs_cnt[BPF_TRAMP_MAX];
1178 	/* Executable image of trampoline */
1179 	struct bpf_tramp_image *cur_image;
1180 	struct module *mod;
1181 };
1182 
1183 struct bpf_attach_target_info {
1184 	struct btf_func_model fmodel;
1185 	long tgt_addr;
1186 	struct module *tgt_mod;
1187 	const char *tgt_name;
1188 	const struct btf_type *tgt_type;
1189 };
1190 
1191 #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
1192 
1193 struct bpf_dispatcher_prog {
1194 	struct bpf_prog *prog;
1195 	refcount_t users;
1196 };
1197 
1198 struct bpf_dispatcher {
1199 	/* dispatcher mutex */
1200 	struct mutex mutex;
1201 	void *func;
1202 	struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
1203 	int num_progs;
1204 	void *image;
1205 	void *rw_image;
1206 	u32 image_off;
1207 	struct bpf_ksym ksym;
1208 #ifdef CONFIG_HAVE_STATIC_CALL
1209 	struct static_call_key *sc_key;
1210 	void *sc_tramp;
1211 #endif
1212 };
1213 
1214 static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
1215 	const void *ctx,
1216 	const struct bpf_insn *insnsi,
1217 	bpf_func_t bpf_func)
1218 {
1219 	return bpf_func(ctx, insnsi);
1220 }
1221 
1222 /* the implementation of the opaque uapi struct bpf_dynptr */
1223 struct bpf_dynptr_kern {
1224 	void *data;
1225 	/* Size represents the number of usable bytes of dynptr data.
1226 	 * If for example the offset is at 4 for a local dynptr whose data is
1227 	 * of type u64, the number of usable bytes is 4.
1228 	 *
1229 	 * The upper 8 bits are reserved. It is as follows:
1230 	 * Bits 0 - 23 = size
1231 	 * Bits 24 - 30 = dynptr type
1232 	 * Bit 31 = whether dynptr is read-only
1233 	 */
1234 	u32 size;
1235 	u32 offset;
1236 } __aligned(8);
1237 
1238 enum bpf_dynptr_type {
1239 	BPF_DYNPTR_TYPE_INVALID,
1240 	/* Points to memory that is local to the bpf program */
1241 	BPF_DYNPTR_TYPE_LOCAL,
1242 	/* Underlying data is a ringbuf record */
1243 	BPF_DYNPTR_TYPE_RINGBUF,
1244 	/* Underlying data is a sk_buff */
1245 	BPF_DYNPTR_TYPE_SKB,
1246 	/* Underlying data is a xdp_buff */
1247 	BPF_DYNPTR_TYPE_XDP,
1248 };
1249 
1250 int bpf_dynptr_check_size(u32 size);
1251 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
1252 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
1253 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
1254 
1255 #ifdef CONFIG_BPF_JIT
1256 int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1257 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr);
1258 struct bpf_trampoline *bpf_trampoline_get(u64 key,
1259 					  struct bpf_attach_target_info *tgt_info);
1260 void bpf_trampoline_put(struct bpf_trampoline *tr);
1261 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
1262 
1263 /*
1264  * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1265  * indirection with a direct call to the bpf program. If the architecture does
1266  * not have STATIC_CALL, avoid a double-indirection.
1267  */
1268 #ifdef CONFIG_HAVE_STATIC_CALL
1269 
1270 #define __BPF_DISPATCHER_SC_INIT(_name)				\
1271 	.sc_key = &STATIC_CALL_KEY(_name),			\
1272 	.sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1273 
1274 #define __BPF_DISPATCHER_SC(name)				\
1275 	DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1276 
1277 #define __BPF_DISPATCHER_CALL(name)				\
1278 	static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1279 
1280 #define __BPF_DISPATCHER_UPDATE(_d, _new)			\
1281 	__static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1282 
1283 #else
1284 #define __BPF_DISPATCHER_SC_INIT(name)
1285 #define __BPF_DISPATCHER_SC(name)
1286 #define __BPF_DISPATCHER_CALL(name)		bpf_func(ctx, insnsi)
1287 #define __BPF_DISPATCHER_UPDATE(_d, _new)
1288 #endif
1289 
1290 #define BPF_DISPATCHER_INIT(_name) {				\
1291 	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
1292 	.func = &_name##_func,					\
1293 	.progs = {},						\
1294 	.num_progs = 0,						\
1295 	.image = NULL,						\
1296 	.image_off = 0,						\
1297 	.ksym = {						\
1298 		.name  = #_name,				\
1299 		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
1300 	},							\
1301 	__BPF_DISPATCHER_SC_INIT(_name##_call)			\
1302 }
1303 
1304 #define DEFINE_BPF_DISPATCHER(name)					\
1305 	__BPF_DISPATCHER_SC(name);					\
1306 	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
1307 		const void *ctx,					\
1308 		const struct bpf_insn *insnsi,				\
1309 		bpf_func_t bpf_func)					\
1310 	{								\
1311 		return __BPF_DISPATCHER_CALL(name);			\
1312 	}								\
1313 	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
1314 	struct bpf_dispatcher bpf_dispatcher_##name =			\
1315 		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
1316 
1317 #define DECLARE_BPF_DISPATCHER(name)					\
1318 	unsigned int bpf_dispatcher_##name##_func(			\
1319 		const void *ctx,					\
1320 		const struct bpf_insn *insnsi,				\
1321 		bpf_func_t bpf_func);					\
1322 	extern struct bpf_dispatcher bpf_dispatcher_##name;
1323 
1324 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
1325 #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1326 void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
1327 				struct bpf_prog *to);
1328 /* Called only from JIT-enabled code, so there's no need for stubs. */
1329 void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym);
1330 void bpf_image_ksym_del(struct bpf_ksym *ksym);
1331 void bpf_ksym_add(struct bpf_ksym *ksym);
1332 void bpf_ksym_del(struct bpf_ksym *ksym);
1333 int bpf_jit_charge_modmem(u32 size);
1334 void bpf_jit_uncharge_modmem(u32 size);
1335 bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
1336 #else
1337 static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1338 					   struct bpf_trampoline *tr)
1339 {
1340 	return -ENOTSUPP;
1341 }
1342 static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1343 					     struct bpf_trampoline *tr)
1344 {
1345 	return -ENOTSUPP;
1346 }
1347 static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
1348 							struct bpf_attach_target_info *tgt_info)
1349 {
1350 	return NULL;
1351 }
1352 static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
1353 #define DEFINE_BPF_DISPATCHER(name)
1354 #define DECLARE_BPF_DISPATCHER(name)
1355 #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1356 #define BPF_DISPATCHER_PTR(name) NULL
1357 static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
1358 					      struct bpf_prog *from,
1359 					      struct bpf_prog *to) {}
1360 static inline bool is_bpf_image_address(unsigned long address)
1361 {
1362 	return false;
1363 }
1364 static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
1365 {
1366 	return false;
1367 }
1368 #endif
1369 
1370 struct bpf_func_info_aux {
1371 	u16 linkage;
1372 	bool unreliable;
1373 	bool called : 1;
1374 	bool verified : 1;
1375 };
1376 
1377 enum bpf_jit_poke_reason {
1378 	BPF_POKE_REASON_TAIL_CALL,
1379 };
1380 
1381 /* Descriptor of pokes pointing /into/ the JITed image. */
1382 struct bpf_jit_poke_descriptor {
1383 	void *tailcall_target;
1384 	void *tailcall_bypass;
1385 	void *bypass_addr;
1386 	void *aux;
1387 	union {
1388 		struct {
1389 			struct bpf_map *map;
1390 			u32 key;
1391 		} tail_call;
1392 	};
1393 	bool tailcall_target_stable;
1394 	u8 adj_off;
1395 	u16 reason;
1396 	u32 insn_idx;
1397 };
1398 
1399 /* reg_type info for ctx arguments */
1400 struct bpf_ctx_arg_aux {
1401 	u32 offset;
1402 	enum bpf_reg_type reg_type;
1403 	u32 btf_id;
1404 };
1405 
1406 struct btf_mod_pair {
1407 	struct btf *btf;
1408 	struct module *module;
1409 };
1410 
1411 struct bpf_kfunc_desc_tab;
1412 
1413 struct bpf_prog_aux {
1414 	atomic64_t refcnt;
1415 	u32 used_map_cnt;
1416 	u32 used_btf_cnt;
1417 	u32 max_ctx_offset;
1418 	u32 max_pkt_offset;
1419 	u32 max_tp_access;
1420 	u32 stack_depth;
1421 	u32 id;
1422 	u32 func_cnt; /* used by non-func prog as the number of func progs */
1423 	u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
1424 	u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
1425 	u32 attach_btf_id; /* in-kernel BTF type id to attach to */
1426 	u32 ctx_arg_info_size;
1427 	u32 max_rdonly_access;
1428 	u32 max_rdwr_access;
1429 	struct btf *attach_btf;
1430 	const struct bpf_ctx_arg_aux *ctx_arg_info;
1431 	struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1432 	struct bpf_prog *dst_prog;
1433 	struct bpf_trampoline *dst_trampoline;
1434 	enum bpf_prog_type saved_dst_prog_type;
1435 	enum bpf_attach_type saved_dst_attach_type;
1436 	bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1437 	bool dev_bound; /* Program is bound to the netdev. */
1438 	bool offload_requested; /* Program is bound and offloaded to the netdev. */
1439 	bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1440 	bool func_proto_unreliable;
1441 	bool sleepable;
1442 	bool tail_call_reachable;
1443 	bool xdp_has_frags;
1444 	bool exception_cb;
1445 	bool exception_boundary;
1446 	/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
1447 	const struct btf_type *attach_func_proto;
1448 	/* function name for valid attach_btf_id */
1449 	const char *attach_func_name;
1450 	struct bpf_prog **func;
1451 	void *jit_data; /* JIT specific data. arch dependent */
1452 	struct bpf_jit_poke_descriptor *poke_tab;
1453 	struct bpf_kfunc_desc_tab *kfunc_tab;
1454 	struct bpf_kfunc_btf_tab *kfunc_btf_tab;
1455 	u32 size_poke_tab;
1456 	struct bpf_ksym ksym;
1457 	const struct bpf_prog_ops *ops;
1458 	struct bpf_map **used_maps;
1459 	struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1460 	struct btf_mod_pair *used_btfs;
1461 	struct bpf_prog *prog;
1462 	struct user_struct *user;
1463 	u64 load_time; /* ns since boottime */
1464 	u32 verified_insns;
1465 	int cgroup_atype; /* enum cgroup_bpf_attach_type */
1466 	struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1467 	char name[BPF_OBJ_NAME_LEN];
1468 	unsigned int (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp);
1469 #ifdef CONFIG_SECURITY
1470 	void *security;
1471 #endif
1472 	struct bpf_token *token;
1473 	struct bpf_prog_offload *offload;
1474 	struct btf *btf;
1475 	struct bpf_func_info *func_info;
1476 	struct bpf_func_info_aux *func_info_aux;
1477 	/* bpf_line_info loaded from userspace.  linfo->insn_off
1478 	 * has the xlated insn offset.
1479 	 * Both the main and sub prog share the same linfo.
1480 	 * The subprog can access its first linfo by
1481 	 * using the linfo_idx.
1482 	 */
1483 	struct bpf_line_info *linfo;
1484 	/* jited_linfo is the jited addr of the linfo.  It has a
1485 	 * one to one mapping to linfo:
1486 	 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1487 	 * Both the main and sub prog share the same jited_linfo.
1488 	 * The subprog can access its first jited_linfo by
1489 	 * using the linfo_idx.
1490 	 */
1491 	void **jited_linfo;
1492 	u32 func_info_cnt;
1493 	u32 nr_linfo;
1494 	/* subprog can use linfo_idx to access its first linfo and
1495 	 * jited_linfo.
1496 	 * main prog always has linfo_idx == 0
1497 	 */
1498 	u32 linfo_idx;
1499 	struct module *mod;
1500 	u32 num_exentries;
1501 	struct exception_table_entry *extable;
1502 	union {
1503 		struct work_struct work;
1504 		struct rcu_head	rcu;
1505 	};
1506 };
1507 
1508 struct bpf_prog {
1509 	u16			pages;		/* Number of allocated pages */
1510 	u16			jited:1,	/* Is our filter JIT'ed? */
1511 				jit_requested:1,/* archs need to JIT the prog */
1512 				gpl_compatible:1, /* Is filter GPL compatible? */
1513 				cb_access:1,	/* Is control block accessed? */
1514 				dst_needed:1,	/* Do we need dst entry? */
1515 				blinding_requested:1, /* needs constant blinding */
1516 				blinded:1,	/* Was blinded */
1517 				is_func:1,	/* program is a bpf function */
1518 				kprobe_override:1, /* Do we override a kprobe? */
1519 				has_callchain_buf:1, /* callchain buffer allocated? */
1520 				enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1521 				call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1522 				call_get_func_ip:1, /* Do we call get_func_ip() */
1523 				tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
1524 	enum bpf_prog_type	type;		/* Type of BPF program */
1525 	enum bpf_attach_type	expected_attach_type; /* For some prog types */
1526 	u32			len;		/* Number of filter blocks */
1527 	u32			jited_len;	/* Size of jited insns in bytes */
1528 	u8			tag[BPF_TAG_SIZE];
1529 	struct bpf_prog_stats __percpu *stats;
1530 	int __percpu		*active;
1531 	unsigned int		(*bpf_func)(const void *ctx,
1532 					    const struct bpf_insn *insn);
1533 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
1534 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
1535 	/* Instructions for interpreter */
1536 	union {
1537 		DECLARE_FLEX_ARRAY(struct sock_filter, insns);
1538 		DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
1539 	};
1540 };
1541 
1542 struct bpf_array_aux {
1543 	/* Programs with direct jumps into programs part of this array. */
1544 	struct list_head poke_progs;
1545 	struct bpf_map *map;
1546 	struct mutex poke_mutex;
1547 	struct work_struct work;
1548 };
1549 
1550 struct bpf_link {
1551 	atomic64_t refcnt;
1552 	u32 id;
1553 	enum bpf_link_type type;
1554 	const struct bpf_link_ops *ops;
1555 	struct bpf_prog *prog;
1556 	struct work_struct work;
1557 };
1558 
1559 struct bpf_link_ops {
1560 	void (*release)(struct bpf_link *link);
1561 	void (*dealloc)(struct bpf_link *link);
1562 	int (*detach)(struct bpf_link *link);
1563 	int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1564 			   struct bpf_prog *old_prog);
1565 	void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
1566 	int (*fill_link_info)(const struct bpf_link *link,
1567 			      struct bpf_link_info *info);
1568 	int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
1569 			  struct bpf_map *old_map);
1570 };
1571 
1572 struct bpf_tramp_link {
1573 	struct bpf_link link;
1574 	struct hlist_node tramp_hlist;
1575 	u64 cookie;
1576 };
1577 
1578 struct bpf_shim_tramp_link {
1579 	struct bpf_tramp_link link;
1580 	struct bpf_trampoline *trampoline;
1581 };
1582 
1583 struct bpf_tracing_link {
1584 	struct bpf_tramp_link link;
1585 	enum bpf_attach_type attach_type;
1586 	struct bpf_trampoline *trampoline;
1587 	struct bpf_prog *tgt_prog;
1588 };
1589 
1590 struct bpf_link_primer {
1591 	struct bpf_link *link;
1592 	struct file *file;
1593 	int fd;
1594 	u32 id;
1595 };
1596 
1597 struct bpf_mount_opts {
1598 	umode_t mode;
1599 
1600 	/* BPF token-related delegation options */
1601 	u64 delegate_cmds;
1602 	u64 delegate_maps;
1603 	u64 delegate_progs;
1604 	u64 delegate_attachs;
1605 };
1606 
1607 struct bpf_token {
1608 	struct work_struct work;
1609 	atomic64_t refcnt;
1610 	struct user_namespace *userns;
1611 	u64 allowed_cmds;
1612 	u64 allowed_maps;
1613 	u64 allowed_progs;
1614 	u64 allowed_attachs;
1615 #ifdef CONFIG_SECURITY
1616 	void *security;
1617 #endif
1618 };
1619 
1620 struct bpf_struct_ops_value;
1621 struct btf_member;
1622 
1623 #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1624 /**
1625  * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1626  *			   define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1627  *			   of BPF_PROG_TYPE_STRUCT_OPS progs.
1628  * @verifier_ops: A structure of callbacks that are invoked by the verifier
1629  *		  when determining whether the struct_ops progs in the
1630  *		  struct_ops map are valid.
1631  * @init: A callback that is invoked a single time, and before any other
1632  *	  callback, to initialize the structure. A nonzero return value means
1633  *	  the subsystem could not be initialized.
1634  * @check_member: When defined, a callback invoked by the verifier to allow
1635  *		  the subsystem to determine if an entry in the struct_ops map
1636  *		  is valid. A nonzero return value means that the map is
1637  *		  invalid and should be rejected by the verifier.
1638  * @init_member: A callback that is invoked for each member of the struct_ops
1639  *		 map to allow the subsystem to initialize the member. A nonzero
1640  *		 value means the member could not be initialized. This callback
1641  *		 is exclusive with the @type, @type_id, @value_type, and
1642  *		 @value_id fields.
1643  * @reg: A callback that is invoked when the struct_ops map has been
1644  *	 initialized and is being attached to. Zero means the struct_ops map
1645  *	 has been successfully registered and is live. A nonzero return value
1646  *	 means the struct_ops map could not be registered.
1647  * @unreg: A callback that is invoked when the struct_ops map should be
1648  *	   unregistered.
1649  * @update: A callback that is invoked when the live struct_ops map is being
1650  *	    updated to contain new values. This callback is only invoked when
1651  *	    the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1652  *	    it is assumed that the struct_ops map cannot be updated.
1653  * @validate: A callback that is invoked after all of the members have been
1654  *	      initialized. This callback should perform static checks on the
1655  *	      map, meaning that it should either fail or succeed
1656  *	      deterministically. A struct_ops map that has been validated may
1657  *	      not necessarily succeed in being registered if the call to @reg
1658  *	      fails. For example, a valid struct_ops map may be loaded, but
1659  *	      then fail to be registered due to there being another active
1660  *	      struct_ops map on the system in the subsystem already. For this
1661  *	      reason, if this callback is not defined, the check is skipped as
1662  *	      the struct_ops map will have final verification performed in
1663  *	      @reg.
1664  * @type: BTF type.
1665  * @value_type: Value type.
1666  * @name: The name of the struct bpf_struct_ops object.
1667  * @func_models: Func models
1668  * @type_id: BTF type id.
1669  * @value_id: BTF value id.
1670  */
1671 struct bpf_struct_ops {
1672 	const struct bpf_verifier_ops *verifier_ops;
1673 	int (*init)(struct btf *btf);
1674 	int (*check_member)(const struct btf_type *t,
1675 			    const struct btf_member *member,
1676 			    const struct bpf_prog *prog);
1677 	int (*init_member)(const struct btf_type *t,
1678 			   const struct btf_member *member,
1679 			   void *kdata, const void *udata);
1680 	int (*reg)(void *kdata);
1681 	void (*unreg)(void *kdata);
1682 	int (*update)(void *kdata, void *old_kdata);
1683 	int (*validate)(void *kdata);
1684 	const struct btf_type *type;
1685 	const struct btf_type *value_type;
1686 	const char *name;
1687 	struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
1688 	u32 type_id;
1689 	u32 value_id;
1690 };
1691 
1692 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1693 #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1694 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
1695 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
1696 bool bpf_struct_ops_get(const void *kdata);
1697 void bpf_struct_ops_put(const void *kdata);
1698 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
1699 				       void *value);
1700 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
1701 				      struct bpf_tramp_link *link,
1702 				      const struct btf_func_model *model,
1703 				      void *image, void *image_end);
1704 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1705 {
1706 	if (owner == BPF_MODULE_OWNER)
1707 		return bpf_struct_ops_get(data);
1708 	else
1709 		return try_module_get(owner);
1710 }
1711 static inline void bpf_module_put(const void *data, struct module *owner)
1712 {
1713 	if (owner == BPF_MODULE_OWNER)
1714 		bpf_struct_ops_put(data);
1715 	else
1716 		module_put(owner);
1717 }
1718 int bpf_struct_ops_link_create(union bpf_attr *attr);
1719 
1720 #ifdef CONFIG_NET
1721 /* Define it here to avoid the use of forward declaration */
1722 struct bpf_dummy_ops_state {
1723 	int val;
1724 };
1725 
1726 struct bpf_dummy_ops {
1727 	int (*test_1)(struct bpf_dummy_ops_state *cb);
1728 	int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
1729 		      char a3, unsigned long a4);
1730 	int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
1731 };
1732 
1733 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
1734 			    union bpf_attr __user *uattr);
1735 #endif
1736 #else
1737 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
1738 {
1739 	return NULL;
1740 }
1741 static inline void bpf_struct_ops_init(struct btf *btf,
1742 				       struct bpf_verifier_log *log)
1743 {
1744 }
1745 static inline bool bpf_try_module_get(const void *data, struct module *owner)
1746 {
1747 	return try_module_get(owner);
1748 }
1749 static inline void bpf_module_put(const void *data, struct module *owner)
1750 {
1751 	module_put(owner);
1752 }
1753 static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
1754 						     void *key,
1755 						     void *value)
1756 {
1757 	return -EINVAL;
1758 }
1759 static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
1760 {
1761 	return -EOPNOTSUPP;
1762 }
1763 
1764 #endif
1765 
1766 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
1767 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1768 				    int cgroup_atype);
1769 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
1770 #else
1771 static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
1772 						  int cgroup_atype)
1773 {
1774 	return -EOPNOTSUPP;
1775 }
1776 static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
1777 {
1778 }
1779 #endif
1780 
1781 struct bpf_array {
1782 	struct bpf_map map;
1783 	u32 elem_size;
1784 	u32 index_mask;
1785 	struct bpf_array_aux *aux;
1786 	union {
1787 		DECLARE_FLEX_ARRAY(char, value) __aligned(8);
1788 		DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
1789 		DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
1790 	};
1791 };
1792 
1793 #define BPF_COMPLEXITY_LIMIT_INSNS      1000000 /* yes. 1M insns */
1794 #define MAX_TAIL_CALL_CNT 33
1795 
1796 /* Maximum number of loops for bpf_loop and bpf_iter_num.
1797  * It's enum to expose it (and thus make it discoverable) through BTF.
1798  */
1799 enum {
1800 	BPF_MAX_LOOPS = 8 * 1024 * 1024,
1801 };
1802 
1803 #define BPF_F_ACCESS_MASK	(BPF_F_RDONLY |		\
1804 				 BPF_F_RDONLY_PROG |	\
1805 				 BPF_F_WRONLY |		\
1806 				 BPF_F_WRONLY_PROG)
1807 
1808 #define BPF_MAP_CAN_READ	BIT(0)
1809 #define BPF_MAP_CAN_WRITE	BIT(1)
1810 
1811 /* Maximum number of user-producer ring buffer samples that can be drained in
1812  * a call to bpf_user_ringbuf_drain().
1813  */
1814 #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
1815 
1816 static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
1817 {
1818 	u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1819 
1820 	/* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1821 	 * not possible.
1822 	 */
1823 	if (access_flags & BPF_F_RDONLY_PROG)
1824 		return BPF_MAP_CAN_READ;
1825 	else if (access_flags & BPF_F_WRONLY_PROG)
1826 		return BPF_MAP_CAN_WRITE;
1827 	else
1828 		return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1829 }
1830 
1831 static inline bool bpf_map_flags_access_ok(u32 access_flags)
1832 {
1833 	return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1834 	       (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1835 }
1836 
1837 struct bpf_event_entry {
1838 	struct perf_event *event;
1839 	struct file *perf_file;
1840 	struct file *map_file;
1841 	struct rcu_head rcu;
1842 };
1843 
1844 static inline bool map_type_contains_progs(struct bpf_map *map)
1845 {
1846 	return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
1847 	       map->map_type == BPF_MAP_TYPE_DEVMAP ||
1848 	       map->map_type == BPF_MAP_TYPE_CPUMAP;
1849 }
1850 
1851 bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
1852 int bpf_prog_calc_tag(struct bpf_prog *fp);
1853 
1854 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1855 const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
1856 
1857 typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1858 					unsigned long off, unsigned long len);
1859 typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1860 					const struct bpf_insn *src,
1861 					struct bpf_insn *dst,
1862 					struct bpf_prog *prog,
1863 					u32 *target_size);
1864 
1865 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1866 		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1867 
1868 /* an array of programs to be executed under rcu_lock.
1869  *
1870  * Typical usage:
1871  * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
1872  *
1873  * the structure returned by bpf_prog_array_alloc() should be populated
1874  * with program pointers and the last pointer must be NULL.
1875  * The user has to keep refcnt on the program and make sure the program
1876  * is removed from the array before bpf_prog_put().
1877  * The 'struct bpf_prog_array *' should only be replaced with xchg()
1878  * since other cpus are walking the array of pointers in parallel.
1879  */
1880 struct bpf_prog_array_item {
1881 	struct bpf_prog *prog;
1882 	union {
1883 		struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1884 		u64 bpf_cookie;
1885 	};
1886 };
1887 
1888 struct bpf_prog_array {
1889 	struct rcu_head rcu;
1890 	struct bpf_prog_array_item items[];
1891 };
1892 
1893 struct bpf_empty_prog_array {
1894 	struct bpf_prog_array hdr;
1895 	struct bpf_prog *null_prog;
1896 };
1897 
1898 /* to avoid allocating empty bpf_prog_array for cgroups that
1899  * don't have bpf program attached use one global 'bpf_empty_prog_array'
1900  * It will not be modified the caller of bpf_prog_array_alloc()
1901  * (since caller requested prog_cnt == 0)
1902  * that pointer should be 'freed' by bpf_prog_array_free()
1903  */
1904 extern struct bpf_empty_prog_array bpf_empty_prog_array;
1905 
1906 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
1907 void bpf_prog_array_free(struct bpf_prog_array *progs);
1908 /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
1909 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
1910 int bpf_prog_array_length(struct bpf_prog_array *progs);
1911 bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1912 int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
1913 				__u32 __user *prog_ids, u32 cnt);
1914 
1915 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
1916 				struct bpf_prog *old_prog);
1917 int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1918 int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1919 			     struct bpf_prog *prog);
1920 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
1921 			     u32 *prog_ids, u32 request_cnt,
1922 			     u32 *prog_cnt);
1923 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
1924 			struct bpf_prog *exclude_prog,
1925 			struct bpf_prog *include_prog,
1926 			u64 bpf_cookie,
1927 			struct bpf_prog_array **new_array);
1928 
1929 struct bpf_run_ctx {};
1930 
1931 struct bpf_cg_run_ctx {
1932 	struct bpf_run_ctx run_ctx;
1933 	const struct bpf_prog_array_item *prog_item;
1934 	int retval;
1935 };
1936 
1937 struct bpf_trace_run_ctx {
1938 	struct bpf_run_ctx run_ctx;
1939 	u64 bpf_cookie;
1940 	bool is_uprobe;
1941 };
1942 
1943 struct bpf_tramp_run_ctx {
1944 	struct bpf_run_ctx run_ctx;
1945 	u64 bpf_cookie;
1946 	struct bpf_run_ctx *saved_run_ctx;
1947 };
1948 
1949 static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
1950 {
1951 	struct bpf_run_ctx *old_ctx = NULL;
1952 
1953 #ifdef CONFIG_BPF_SYSCALL
1954 	old_ctx = current->bpf_ctx;
1955 	current->bpf_ctx = new_ctx;
1956 #endif
1957 	return old_ctx;
1958 }
1959 
1960 static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
1961 {
1962 #ifdef CONFIG_BPF_SYSCALL
1963 	current->bpf_ctx = old_ctx;
1964 #endif
1965 }
1966 
1967 /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
1968 #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE			(1 << 0)
1969 /* BPF program asks to set CN on the packet. */
1970 #define BPF_RET_SET_CN						(1 << 0)
1971 
1972 typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
1973 
1974 static __always_inline u32
1975 bpf_prog_run_array(const struct bpf_prog_array *array,
1976 		   const void *ctx, bpf_prog_run_fn run_prog)
1977 {
1978 	const struct bpf_prog_array_item *item;
1979 	const struct bpf_prog *prog;
1980 	struct bpf_run_ctx *old_run_ctx;
1981 	struct bpf_trace_run_ctx run_ctx;
1982 	u32 ret = 1;
1983 
1984 	RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
1985 
1986 	if (unlikely(!array))
1987 		return ret;
1988 
1989 	run_ctx.is_uprobe = false;
1990 
1991 	migrate_disable();
1992 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
1993 	item = &array->items[0];
1994 	while ((prog = READ_ONCE(item->prog))) {
1995 		run_ctx.bpf_cookie = item->bpf_cookie;
1996 		ret &= run_prog(prog, ctx);
1997 		item++;
1998 	}
1999 	bpf_reset_run_ctx(old_run_ctx);
2000 	migrate_enable();
2001 	return ret;
2002 }
2003 
2004 /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
2005  *
2006  * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
2007  * overall. As a result, we must use the bpf_prog_array_free_sleepable
2008  * in order to use the tasks_trace rcu grace period.
2009  *
2010  * When a non-sleepable program is inside the array, we take the rcu read
2011  * section and disable preemption for that program alone, so it can access
2012  * rcu-protected dynamically sized maps.
2013  */
2014 static __always_inline u32
2015 bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
2016 			  const void *ctx, bpf_prog_run_fn run_prog)
2017 {
2018 	const struct bpf_prog_array_item *item;
2019 	const struct bpf_prog *prog;
2020 	const struct bpf_prog_array *array;
2021 	struct bpf_run_ctx *old_run_ctx;
2022 	struct bpf_trace_run_ctx run_ctx;
2023 	u32 ret = 1;
2024 
2025 	might_fault();
2026 
2027 	rcu_read_lock_trace();
2028 	migrate_disable();
2029 
2030 	run_ctx.is_uprobe = true;
2031 
2032 	array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
2033 	if (unlikely(!array))
2034 		goto out;
2035 	old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2036 	item = &array->items[0];
2037 	while ((prog = READ_ONCE(item->prog))) {
2038 		if (!prog->aux->sleepable)
2039 			rcu_read_lock();
2040 
2041 		run_ctx.bpf_cookie = item->bpf_cookie;
2042 		ret &= run_prog(prog, ctx);
2043 		item++;
2044 
2045 		if (!prog->aux->sleepable)
2046 			rcu_read_unlock();
2047 	}
2048 	bpf_reset_run_ctx(old_run_ctx);
2049 out:
2050 	migrate_enable();
2051 	rcu_read_unlock_trace();
2052 	return ret;
2053 }
2054 
2055 #ifdef CONFIG_BPF_SYSCALL
2056 DECLARE_PER_CPU(int, bpf_prog_active);
2057 extern struct mutex bpf_stats_enabled_mutex;
2058 
2059 /*
2060  * Block execution of BPF programs attached to instrumentation (perf,
2061  * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2062  * these events can happen inside a region which holds a map bucket lock
2063  * and can deadlock on it.
2064  */
2065 static inline void bpf_disable_instrumentation(void)
2066 {
2067 	migrate_disable();
2068 	this_cpu_inc(bpf_prog_active);
2069 }
2070 
2071 static inline void bpf_enable_instrumentation(void)
2072 {
2073 	this_cpu_dec(bpf_prog_active);
2074 	migrate_enable();
2075 }
2076 
2077 extern const struct super_operations bpf_super_ops;
2078 extern const struct file_operations bpf_map_fops;
2079 extern const struct file_operations bpf_prog_fops;
2080 extern const struct file_operations bpf_iter_fops;
2081 
2082 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2083 	extern const struct bpf_prog_ops _name ## _prog_ops; \
2084 	extern const struct bpf_verifier_ops _name ## _verifier_ops;
2085 #define BPF_MAP_TYPE(_id, _ops) \
2086 	extern const struct bpf_map_ops _ops;
2087 #define BPF_LINK_TYPE(_id, _name)
2088 #include <linux/bpf_types.h>
2089 #undef BPF_PROG_TYPE
2090 #undef BPF_MAP_TYPE
2091 #undef BPF_LINK_TYPE
2092 
2093 extern const struct bpf_prog_ops bpf_offload_prog_ops;
2094 extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
2095 extern const struct bpf_verifier_ops xdp_analyzer_ops;
2096 
2097 struct bpf_prog *bpf_prog_get(u32 ufd);
2098 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2099 				       bool attach_drv);
2100 void bpf_prog_add(struct bpf_prog *prog, int i);
2101 void bpf_prog_sub(struct bpf_prog *prog, int i);
2102 void bpf_prog_inc(struct bpf_prog *prog);
2103 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
2104 void bpf_prog_put(struct bpf_prog *prog);
2105 
2106 void bpf_prog_free_id(struct bpf_prog *prog);
2107 void bpf_map_free_id(struct bpf_map *map);
2108 
2109 struct btf_field *btf_record_find(const struct btf_record *rec,
2110 				  u32 offset, u32 field_mask);
2111 void btf_record_free(struct btf_record *rec);
2112 void bpf_map_free_record(struct bpf_map *map);
2113 struct btf_record *btf_record_dup(const struct btf_record *rec);
2114 bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
2115 void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
2116 void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
2117 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
2118 
2119 struct bpf_map *bpf_map_get(u32 ufd);
2120 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
2121 struct bpf_map *__bpf_map_get(struct fd f);
2122 void bpf_map_inc(struct bpf_map *map);
2123 void bpf_map_inc_with_uref(struct bpf_map *map);
2124 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2125 struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
2126 void bpf_map_put_with_uref(struct bpf_map *map);
2127 void bpf_map_put(struct bpf_map *map);
2128 void *bpf_map_area_alloc(u64 size, int numa_node);
2129 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
2130 void bpf_map_area_free(void *base);
2131 bool bpf_map_write_active(const struct bpf_map *map);
2132 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2133 int  generic_map_lookup_batch(struct bpf_map *map,
2134 			      const union bpf_attr *attr,
2135 			      union bpf_attr __user *uattr);
2136 int  generic_map_update_batch(struct bpf_map *map, struct file *map_file,
2137 			      const union bpf_attr *attr,
2138 			      union bpf_attr __user *uattr);
2139 int  generic_map_delete_batch(struct bpf_map *map,
2140 			      const union bpf_attr *attr,
2141 			      union bpf_attr __user *uattr);
2142 struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
2143 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
2144 
2145 #ifdef CONFIG_MEMCG_KMEM
2146 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2147 			   int node);
2148 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
2149 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
2150 		       gfp_t flags);
2151 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
2152 				    size_t align, gfp_t flags);
2153 #else
2154 static inline void *
2155 bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
2156 		     int node)
2157 {
2158 	return kmalloc_node(size, flags, node);
2159 }
2160 
2161 static inline void *
2162 bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags)
2163 {
2164 	return kzalloc(size, flags);
2165 }
2166 
2167 static inline void *
2168 bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags)
2169 {
2170 	return kvcalloc(n, size, flags);
2171 }
2172 
2173 static inline void __percpu *
2174 bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align,
2175 		     gfp_t flags)
2176 {
2177 	return __alloc_percpu_gfp(size, align, flags);
2178 }
2179 #endif
2180 
2181 static inline int
2182 bpf_map_init_elem_count(struct bpf_map *map)
2183 {
2184 	size_t size = sizeof(*map->elem_count), align = size;
2185 	gfp_t flags = GFP_USER | __GFP_NOWARN;
2186 
2187 	map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
2188 	if (!map->elem_count)
2189 		return -ENOMEM;
2190 
2191 	return 0;
2192 }
2193 
2194 static inline void
2195 bpf_map_free_elem_count(struct bpf_map *map)
2196 {
2197 	free_percpu(map->elem_count);
2198 }
2199 
2200 static inline void bpf_map_inc_elem_count(struct bpf_map *map)
2201 {
2202 	this_cpu_inc(*map->elem_count);
2203 }
2204 
2205 static inline void bpf_map_dec_elem_count(struct bpf_map *map)
2206 {
2207 	this_cpu_dec(*map->elem_count);
2208 }
2209 
2210 extern int sysctl_unprivileged_bpf_disabled;
2211 
2212 bool bpf_token_capable(const struct bpf_token *token, int cap);
2213 
2214 static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
2215 {
2216 	return bpf_token_capable(token, CAP_PERFMON);
2217 }
2218 
2219 static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
2220 {
2221 	return bpf_token_capable(token, CAP_PERFMON);
2222 }
2223 
2224 static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
2225 {
2226 	return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2227 }
2228 
2229 static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
2230 {
2231 	return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2232 }
2233 
2234 int bpf_map_new_fd(struct bpf_map *map, int flags);
2235 int bpf_prog_new_fd(struct bpf_prog *prog);
2236 
2237 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2238 		   const struct bpf_link_ops *ops, struct bpf_prog *prog);
2239 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
2240 int bpf_link_settle(struct bpf_link_primer *primer);
2241 void bpf_link_cleanup(struct bpf_link_primer *primer);
2242 void bpf_link_inc(struct bpf_link *link);
2243 void bpf_link_put(struct bpf_link *link);
2244 int bpf_link_new_fd(struct bpf_link *link);
2245 struct bpf_link *bpf_link_get_from_fd(u32 ufd);
2246 struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
2247 
2248 void bpf_token_inc(struct bpf_token *token);
2249 void bpf_token_put(struct bpf_token *token);
2250 int bpf_token_create(union bpf_attr *attr);
2251 struct bpf_token *bpf_token_get_from_fd(u32 ufd);
2252 
2253 bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
2254 bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
2255 bool bpf_token_allow_prog_type(const struct bpf_token *token,
2256 			       enum bpf_prog_type prog_type,
2257 			       enum bpf_attach_type attach_type);
2258 
2259 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
2260 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
2261 struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
2262 			    umode_t mode);
2263 
2264 #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2265 #define DEFINE_BPF_ITER_FUNC(target, args...)			\
2266 	extern int bpf_iter_ ## target(args);			\
2267 	int __init bpf_iter_ ## target(args) { return 0; }
2268 
2269 /*
2270  * The task type of iterators.
2271  *
2272  * For BPF task iterators, they can be parameterized with various
2273  * parameters to visit only some of tasks.
2274  *
2275  * BPF_TASK_ITER_ALL (default)
2276  *	Iterate over resources of every task.
2277  *
2278  * BPF_TASK_ITER_TID
2279  *	Iterate over resources of a task/tid.
2280  *
2281  * BPF_TASK_ITER_TGID
2282  *	Iterate over resources of every task of a process / task group.
2283  */
2284 enum bpf_iter_task_type {
2285 	BPF_TASK_ITER_ALL = 0,
2286 	BPF_TASK_ITER_TID,
2287 	BPF_TASK_ITER_TGID,
2288 };
2289 
2290 struct bpf_iter_aux_info {
2291 	/* for map_elem iter */
2292 	struct bpf_map *map;
2293 
2294 	/* for cgroup iter */
2295 	struct {
2296 		struct cgroup *start; /* starting cgroup */
2297 		enum bpf_cgroup_iter_order order;
2298 	} cgroup;
2299 	struct {
2300 		enum bpf_iter_task_type	type;
2301 		u32 pid;
2302 	} task;
2303 };
2304 
2305 typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
2306 					union bpf_iter_link_info *linfo,
2307 					struct bpf_iter_aux_info *aux);
2308 typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
2309 typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
2310 					struct seq_file *seq);
2311 typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
2312 					 struct bpf_link_info *info);
2313 typedef const struct bpf_func_proto *
2314 (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
2315 			     const struct bpf_prog *prog);
2316 
2317 enum bpf_iter_feature {
2318 	BPF_ITER_RESCHED	= BIT(0),
2319 };
2320 
2321 #define BPF_ITER_CTX_ARG_MAX 2
2322 struct bpf_iter_reg {
2323 	const char *target;
2324 	bpf_iter_attach_target_t attach_target;
2325 	bpf_iter_detach_target_t detach_target;
2326 	bpf_iter_show_fdinfo_t show_fdinfo;
2327 	bpf_iter_fill_link_info_t fill_link_info;
2328 	bpf_iter_get_func_proto_t get_func_proto;
2329 	u32 ctx_arg_info_size;
2330 	u32 feature;
2331 	struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
2332 	const struct bpf_iter_seq_info *seq_info;
2333 };
2334 
2335 struct bpf_iter_meta {
2336 	__bpf_md_ptr(struct seq_file *, seq);
2337 	u64 session_id;
2338 	u64 seq_num;
2339 };
2340 
2341 struct bpf_iter__bpf_map_elem {
2342 	__bpf_md_ptr(struct bpf_iter_meta *, meta);
2343 	__bpf_md_ptr(struct bpf_map *, map);
2344 	__bpf_md_ptr(void *, key);
2345 	__bpf_md_ptr(void *, value);
2346 };
2347 
2348 int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
2349 void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
2350 bool bpf_iter_prog_supported(struct bpf_prog *prog);
2351 const struct bpf_func_proto *
2352 bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
2353 int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
2354 int bpf_iter_new_fd(struct bpf_link *link);
2355 bool bpf_link_is_iter(struct bpf_link *link);
2356 struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
2357 int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
2358 void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
2359 			      struct seq_file *seq);
2360 int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
2361 				struct bpf_link_info *info);
2362 
2363 int map_set_for_each_callback_args(struct bpf_verifier_env *env,
2364 				   struct bpf_func_state *caller,
2365 				   struct bpf_func_state *callee);
2366 
2367 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
2368 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2369 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2370 			   u64 flags);
2371 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2372 			    u64 flags);
2373 
2374 int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2375 
2376 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
2377 				 void *key, void *value, u64 map_flags);
2378 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2379 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2380 				void *key, void *value, u64 map_flags);
2381 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2382 
2383 int bpf_get_file_flag(int flags);
2384 int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
2385 			     size_t actual_size);
2386 
2387 /* verify correctness of eBPF program */
2388 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
2389 
2390 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
2391 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
2392 #endif
2393 
2394 struct btf *bpf_get_btf_vmlinux(void);
2395 
2396 /* Map specifics */
2397 struct xdp_frame;
2398 struct sk_buff;
2399 struct bpf_dtab_netdev;
2400 struct bpf_cpu_map_entry;
2401 
2402 void __dev_flush(void);
2403 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2404 		    struct net_device *dev_rx);
2405 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2406 		    struct net_device *dev_rx);
2407 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2408 			  struct bpf_map *map, bool exclude_ingress);
2409 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
2410 			     struct bpf_prog *xdp_prog);
2411 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2412 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
2413 			   bool exclude_ingress);
2414 
2415 void __cpu_map_flush(void);
2416 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
2417 		    struct net_device *dev_rx);
2418 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2419 			     struct sk_buff *skb);
2420 
2421 /* Return map's numa specified by userspace */
2422 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
2423 {
2424 	return (attr->map_flags & BPF_F_NUMA_NODE) ?
2425 		attr->numa_node : NUMA_NO_NODE;
2426 }
2427 
2428 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
2429 int array_map_alloc_check(union bpf_attr *attr);
2430 
2431 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
2432 			  union bpf_attr __user *uattr);
2433 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
2434 			  union bpf_attr __user *uattr);
2435 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2436 			      const union bpf_attr *kattr,
2437 			      union bpf_attr __user *uattr);
2438 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2439 				     const union bpf_attr *kattr,
2440 				     union bpf_attr __user *uattr);
2441 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
2442 			     const union bpf_attr *kattr,
2443 			     union bpf_attr __user *uattr);
2444 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2445 				const union bpf_attr *kattr,
2446 				union bpf_attr __user *uattr);
2447 int bpf_prog_test_run_nf(struct bpf_prog *prog,
2448 			 const union bpf_attr *kattr,
2449 			 union bpf_attr __user *uattr);
2450 bool btf_ctx_access(int off, int size, enum bpf_access_type type,
2451 		    const struct bpf_prog *prog,
2452 		    struct bpf_insn_access_aux *info);
2453 
2454 static inline bool bpf_tracing_ctx_access(int off, int size,
2455 					  enum bpf_access_type type)
2456 {
2457 	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
2458 		return false;
2459 	if (type != BPF_READ)
2460 		return false;
2461 	if (off % size != 0)
2462 		return false;
2463 	return true;
2464 }
2465 
2466 static inline bool bpf_tracing_btf_ctx_access(int off, int size,
2467 					      enum bpf_access_type type,
2468 					      const struct bpf_prog *prog,
2469 					      struct bpf_insn_access_aux *info)
2470 {
2471 	if (!bpf_tracing_ctx_access(off, size, type))
2472 		return false;
2473 	return btf_ctx_access(off, size, type, prog, info);
2474 }
2475 
2476 int btf_struct_access(struct bpf_verifier_log *log,
2477 		      const struct bpf_reg_state *reg,
2478 		      int off, int size, enum bpf_access_type atype,
2479 		      u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
2480 bool btf_struct_ids_match(struct bpf_verifier_log *log,
2481 			  const struct btf *btf, u32 id, int off,
2482 			  const struct btf *need_btf, u32 need_type_id,
2483 			  bool strict);
2484 
2485 int btf_distill_func_proto(struct bpf_verifier_log *log,
2486 			   struct btf *btf,
2487 			   const struct btf_type *func_proto,
2488 			   const char *func_name,
2489 			   struct btf_func_model *m);
2490 
2491 struct bpf_reg_state;
2492 int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
2493 				struct bpf_reg_state *regs);
2494 int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
2495 			   struct bpf_reg_state *regs);
2496 int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
2497 			  struct bpf_reg_state *reg, bool is_ex_cb);
2498 int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
2499 			 struct btf *btf, const struct btf_type *t);
2500 const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
2501 				    int comp_idx, const char *tag_key);
2502 
2503 struct bpf_prog *bpf_prog_by_id(u32 id);
2504 struct bpf_link *bpf_link_by_id(u32 id);
2505 
2506 const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
2507 						 const struct bpf_prog *prog);
2508 void bpf_task_storage_free(struct task_struct *task);
2509 void bpf_cgrp_storage_free(struct cgroup *cgroup);
2510 bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
2511 const struct btf_func_model *
2512 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2513 			 const struct bpf_insn *insn);
2514 int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2515 		       u16 btf_fd_idx, u8 **func_addr);
2516 
2517 struct bpf_core_ctx {
2518 	struct bpf_verifier_log *log;
2519 	const struct btf *btf;
2520 };
2521 
2522 bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
2523 				const struct bpf_reg_state *reg,
2524 				const char *field_name, u32 btf_id, const char *suffix);
2525 
2526 bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
2527 			       const struct btf *reg_btf, u32 reg_id,
2528 			       const struct btf *arg_btf, u32 arg_id);
2529 
2530 int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
2531 		   int relo_idx, void *insn);
2532 
2533 static inline bool unprivileged_ebpf_enabled(void)
2534 {
2535 	return !sysctl_unprivileged_bpf_disabled;
2536 }
2537 
2538 /* Not all bpf prog type has the bpf_ctx.
2539  * For the bpf prog type that has initialized the bpf_ctx,
2540  * this function can be used to decide if a kernel function
2541  * is called by a bpf program.
2542  */
2543 static inline bool has_current_bpf_ctx(void)
2544 {
2545 	return !!current->bpf_ctx;
2546 }
2547 
2548 void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
2549 
2550 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2551 		     enum bpf_dynptr_type type, u32 offset, u32 size);
2552 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2553 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
2554 
2555 bool dev_check_flush(void);
2556 bool cpu_map_check_flush(void);
2557 #else /* !CONFIG_BPF_SYSCALL */
2558 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
2559 {
2560 	return ERR_PTR(-EOPNOTSUPP);
2561 }
2562 
2563 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
2564 						     enum bpf_prog_type type,
2565 						     bool attach_drv)
2566 {
2567 	return ERR_PTR(-EOPNOTSUPP);
2568 }
2569 
2570 static inline void bpf_prog_add(struct bpf_prog *prog, int i)
2571 {
2572 }
2573 
2574 static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
2575 {
2576 }
2577 
2578 static inline void bpf_prog_put(struct bpf_prog *prog)
2579 {
2580 }
2581 
2582 static inline void bpf_prog_inc(struct bpf_prog *prog)
2583 {
2584 }
2585 
2586 static inline struct bpf_prog *__must_check
2587 bpf_prog_inc_not_zero(struct bpf_prog *prog)
2588 {
2589 	return ERR_PTR(-EOPNOTSUPP);
2590 }
2591 
2592 static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2593 				 const struct bpf_link_ops *ops,
2594 				 struct bpf_prog *prog)
2595 {
2596 }
2597 
2598 static inline int bpf_link_prime(struct bpf_link *link,
2599 				 struct bpf_link_primer *primer)
2600 {
2601 	return -EOPNOTSUPP;
2602 }
2603 
2604 static inline int bpf_link_settle(struct bpf_link_primer *primer)
2605 {
2606 	return -EOPNOTSUPP;
2607 }
2608 
2609 static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
2610 {
2611 }
2612 
2613 static inline void bpf_link_inc(struct bpf_link *link)
2614 {
2615 }
2616 
2617 static inline void bpf_link_put(struct bpf_link *link)
2618 {
2619 }
2620 
2621 static inline int bpf_obj_get_user(const char __user *pathname, int flags)
2622 {
2623 	return -EOPNOTSUPP;
2624 }
2625 
2626 static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
2627 {
2628 	return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
2629 }
2630 
2631 static inline void bpf_token_inc(struct bpf_token *token)
2632 {
2633 }
2634 
2635 static inline void bpf_token_put(struct bpf_token *token)
2636 {
2637 }
2638 
2639 static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
2640 {
2641 	return ERR_PTR(-EOPNOTSUPP);
2642 }
2643 
2644 static inline void __dev_flush(void)
2645 {
2646 }
2647 
2648 struct xdp_frame;
2649 struct bpf_dtab_netdev;
2650 struct bpf_cpu_map_entry;
2651 
2652 static inline
2653 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
2654 		    struct net_device *dev_rx)
2655 {
2656 	return 0;
2657 }
2658 
2659 static inline
2660 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2661 		    struct net_device *dev_rx)
2662 {
2663 	return 0;
2664 }
2665 
2666 static inline
2667 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
2668 			  struct bpf_map *map, bool exclude_ingress)
2669 {
2670 	return 0;
2671 }
2672 
2673 struct sk_buff;
2674 
2675 static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2676 					   struct sk_buff *skb,
2677 					   struct bpf_prog *xdp_prog)
2678 {
2679 	return 0;
2680 }
2681 
2682 static inline
2683 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
2684 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
2685 			   bool exclude_ingress)
2686 {
2687 	return 0;
2688 }
2689 
2690 static inline void __cpu_map_flush(void)
2691 {
2692 }
2693 
2694 static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
2695 				  struct xdp_frame *xdpf,
2696 				  struct net_device *dev_rx)
2697 {
2698 	return 0;
2699 }
2700 
2701 static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2702 					   struct sk_buff *skb)
2703 {
2704 	return -EOPNOTSUPP;
2705 }
2706 
2707 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
2708 				enum bpf_prog_type type)
2709 {
2710 	return ERR_PTR(-EOPNOTSUPP);
2711 }
2712 
2713 static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
2714 					const union bpf_attr *kattr,
2715 					union bpf_attr __user *uattr)
2716 {
2717 	return -ENOTSUPP;
2718 }
2719 
2720 static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
2721 					const union bpf_attr *kattr,
2722 					union bpf_attr __user *uattr)
2723 {
2724 	return -ENOTSUPP;
2725 }
2726 
2727 static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2728 					    const union bpf_attr *kattr,
2729 					    union bpf_attr __user *uattr)
2730 {
2731 	return -ENOTSUPP;
2732 }
2733 
2734 static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
2735 						   const union bpf_attr *kattr,
2736 						   union bpf_attr __user *uattr)
2737 {
2738 	return -ENOTSUPP;
2739 }
2740 
2741 static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
2742 					      const union bpf_attr *kattr,
2743 					      union bpf_attr __user *uattr)
2744 {
2745 	return -ENOTSUPP;
2746 }
2747 
2748 static inline void bpf_map_put(struct bpf_map *map)
2749 {
2750 }
2751 
2752 static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2753 {
2754 	return ERR_PTR(-ENOTSUPP);
2755 }
2756 
2757 static inline int btf_struct_access(struct bpf_verifier_log *log,
2758 				    const struct bpf_reg_state *reg,
2759 				    int off, int size, enum bpf_access_type atype,
2760 				    u32 *next_btf_id, enum bpf_type_flag *flag,
2761 				    const char **field_name)
2762 {
2763 	return -EACCES;
2764 }
2765 
2766 static inline const struct bpf_func_proto *
2767 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2768 {
2769 	return NULL;
2770 }
2771 
2772 static inline void bpf_task_storage_free(struct task_struct *task)
2773 {
2774 }
2775 
2776 static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
2777 {
2778 	return false;
2779 }
2780 
2781 static inline const struct btf_func_model *
2782 bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
2783 			 const struct bpf_insn *insn)
2784 {
2785 	return NULL;
2786 }
2787 
2788 static inline int
2789 bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
2790 		   u16 btf_fd_idx, u8 **func_addr)
2791 {
2792 	return -ENOTSUPP;
2793 }
2794 
2795 static inline bool unprivileged_ebpf_enabled(void)
2796 {
2797 	return false;
2798 }
2799 
2800 static inline bool has_current_bpf_ctx(void)
2801 {
2802 	return false;
2803 }
2804 
2805 static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
2806 {
2807 }
2808 
2809 static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
2810 {
2811 }
2812 
2813 static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2814 				   enum bpf_dynptr_type type, u32 offset, u32 size)
2815 {
2816 }
2817 
2818 static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
2819 {
2820 }
2821 
2822 static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
2823 {
2824 }
2825 #endif /* CONFIG_BPF_SYSCALL */
2826 
2827 static __always_inline int
2828 bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
2829 {
2830 	int ret = -EFAULT;
2831 
2832 	if (IS_ENABLED(CONFIG_BPF_EVENTS))
2833 		ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
2834 	if (unlikely(ret < 0))
2835 		memset(dst, 0, size);
2836 	return ret;
2837 }
2838 
2839 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
2840 			  struct btf_mod_pair *used_btfs, u32 len);
2841 
2842 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
2843 						 enum bpf_prog_type type)
2844 {
2845 	return bpf_prog_get_type_dev(ufd, type, false);
2846 }
2847 
2848 void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2849 			  struct bpf_map **used_maps, u32 len);
2850 
2851 bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
2852 
2853 int bpf_prog_offload_compile(struct bpf_prog *prog);
2854 void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
2855 int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
2856 			       struct bpf_prog *prog);
2857 
2858 int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
2859 
2860 int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
2861 int bpf_map_offload_update_elem(struct bpf_map *map,
2862 				void *key, void *value, u64 flags);
2863 int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
2864 int bpf_map_offload_get_next_key(struct bpf_map *map,
2865 				 void *key, void *next_key);
2866 
2867 bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
2868 
2869 struct bpf_offload_dev *
2870 bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
2871 void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
2872 void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
2873 int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
2874 				    struct net_device *netdev);
2875 void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
2876 				       struct net_device *netdev);
2877 bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
2878 
2879 void unpriv_ebpf_notify(int new_state);
2880 
2881 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
2882 int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
2883 			      struct bpf_prog_aux *prog_aux);
2884 void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
2885 int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
2886 int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
2887 void bpf_dev_bound_netdev_unregister(struct net_device *dev);
2888 
2889 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2890 {
2891 	return aux->dev_bound;
2892 }
2893 
2894 static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
2895 {
2896 	return aux->offload_requested;
2897 }
2898 
2899 bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
2900 
2901 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
2902 {
2903 	return unlikely(map->ops == &bpf_map_offload_ops);
2904 }
2905 
2906 struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
2907 void bpf_map_offload_map_free(struct bpf_map *map);
2908 u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
2909 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2910 			      const union bpf_attr *kattr,
2911 			      union bpf_attr __user *uattr);
2912 
2913 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
2914 int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
2915 int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
2916 int sock_map_bpf_prog_query(const union bpf_attr *attr,
2917 			    union bpf_attr __user *uattr);
2918 
2919 void sock_map_unhash(struct sock *sk);
2920 void sock_map_destroy(struct sock *sk);
2921 void sock_map_close(struct sock *sk, long timeout);
2922 #else
2923 static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
2924 					    struct bpf_prog_aux *prog_aux)
2925 {
2926 	return -EOPNOTSUPP;
2927 }
2928 
2929 static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
2930 						u32 func_id)
2931 {
2932 	return NULL;
2933 }
2934 
2935 static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
2936 					  union bpf_attr *attr)
2937 {
2938 	return -EOPNOTSUPP;
2939 }
2940 
2941 static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
2942 					     struct bpf_prog *old_prog)
2943 {
2944 	return -EOPNOTSUPP;
2945 }
2946 
2947 static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
2948 {
2949 }
2950 
2951 static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
2952 {
2953 	return false;
2954 }
2955 
2956 static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
2957 {
2958 	return false;
2959 }
2960 
2961 static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
2962 {
2963 	return false;
2964 }
2965 
2966 static inline bool bpf_map_is_offloaded(struct bpf_map *map)
2967 {
2968 	return false;
2969 }
2970 
2971 static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
2972 {
2973 	return ERR_PTR(-EOPNOTSUPP);
2974 }
2975 
2976 static inline void bpf_map_offload_map_free(struct bpf_map *map)
2977 {
2978 }
2979 
2980 static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
2981 {
2982 	return 0;
2983 }
2984 
2985 static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
2986 					    const union bpf_attr *kattr,
2987 					    union bpf_attr __user *uattr)
2988 {
2989 	return -ENOTSUPP;
2990 }
2991 
2992 #ifdef CONFIG_BPF_SYSCALL
2993 static inline int sock_map_get_from_fd(const union bpf_attr *attr,
2994 				       struct bpf_prog *prog)
2995 {
2996 	return -EINVAL;
2997 }
2998 
2999 static inline int sock_map_prog_detach(const union bpf_attr *attr,
3000 				       enum bpf_prog_type ptype)
3001 {
3002 	return -EOPNOTSUPP;
3003 }
3004 
3005 static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
3006 					   u64 flags)
3007 {
3008 	return -EOPNOTSUPP;
3009 }
3010 
3011 static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
3012 					  union bpf_attr __user *uattr)
3013 {
3014 	return -EINVAL;
3015 }
3016 #endif /* CONFIG_BPF_SYSCALL */
3017 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
3018 
3019 static __always_inline void
3020 bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
3021 {
3022 	const struct bpf_prog_array_item *item;
3023 	struct bpf_prog *prog;
3024 
3025 	if (unlikely(!array))
3026 		return;
3027 
3028 	item = &array->items[0];
3029 	while ((prog = READ_ONCE(item->prog))) {
3030 		bpf_prog_inc_misses_counter(prog);
3031 		item++;
3032 	}
3033 }
3034 
3035 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
3036 void bpf_sk_reuseport_detach(struct sock *sk);
3037 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
3038 				       void *value);
3039 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
3040 				       void *value, u64 map_flags);
3041 #else
3042 static inline void bpf_sk_reuseport_detach(struct sock *sk)
3043 {
3044 }
3045 
3046 #ifdef CONFIG_BPF_SYSCALL
3047 static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
3048 						     void *key, void *value)
3049 {
3050 	return -EOPNOTSUPP;
3051 }
3052 
3053 static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
3054 						     void *key, void *value,
3055 						     u64 map_flags)
3056 {
3057 	return -EOPNOTSUPP;
3058 }
3059 #endif /* CONFIG_BPF_SYSCALL */
3060 #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
3061 
3062 /* verifier prototypes for helper functions called from eBPF programs */
3063 extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
3064 extern const struct bpf_func_proto bpf_map_update_elem_proto;
3065 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
3066 extern const struct bpf_func_proto bpf_map_push_elem_proto;
3067 extern const struct bpf_func_proto bpf_map_pop_elem_proto;
3068 extern const struct bpf_func_proto bpf_map_peek_elem_proto;
3069 extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
3070 
3071 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
3072 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
3073 extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
3074 extern const struct bpf_func_proto bpf_tail_call_proto;
3075 extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
3076 extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
3077 extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
3078 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
3079 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
3080 extern const struct bpf_func_proto bpf_get_current_comm_proto;
3081 extern const struct bpf_func_proto bpf_get_stackid_proto;
3082 extern const struct bpf_func_proto bpf_get_stack_proto;
3083 extern const struct bpf_func_proto bpf_get_task_stack_proto;
3084 extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
3085 extern const struct bpf_func_proto bpf_get_stack_proto_pe;
3086 extern const struct bpf_func_proto bpf_sock_map_update_proto;
3087 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
3088 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
3089 extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
3090 extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
3091 extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
3092 extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
3093 extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
3094 extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
3095 extern const struct bpf_func_proto bpf_spin_lock_proto;
3096 extern const struct bpf_func_proto bpf_spin_unlock_proto;
3097 extern const struct bpf_func_proto bpf_get_local_storage_proto;
3098 extern const struct bpf_func_proto bpf_strtol_proto;
3099 extern const struct bpf_func_proto bpf_strtoul_proto;
3100 extern const struct bpf_func_proto bpf_tcp_sock_proto;
3101 extern const struct bpf_func_proto bpf_jiffies64_proto;
3102 extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
3103 extern const struct bpf_func_proto bpf_event_output_data_proto;
3104 extern const struct bpf_func_proto bpf_ringbuf_output_proto;
3105 extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
3106 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
3107 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
3108 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
3109 extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
3110 extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
3111 extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
3112 extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
3113 extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
3114 extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
3115 extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
3116 extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
3117 extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3118 extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
3119 extern const struct bpf_func_proto bpf_copy_from_user_proto;
3120 extern const struct bpf_func_proto bpf_snprintf_btf_proto;
3121 extern const struct bpf_func_proto bpf_snprintf_proto;
3122 extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
3123 extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
3124 extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
3125 extern const struct bpf_func_proto bpf_sock_from_file_proto;
3126 extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
3127 extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
3128 extern const struct bpf_func_proto bpf_task_storage_get_proto;
3129 extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
3130 extern const struct bpf_func_proto bpf_task_storage_delete_proto;
3131 extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
3132 extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3133 extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
3134 extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
3135 extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
3136 extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
3137 extern const struct bpf_func_proto bpf_find_vma_proto;
3138 extern const struct bpf_func_proto bpf_loop_proto;
3139 extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
3140 extern const struct bpf_func_proto bpf_set_retval_proto;
3141 extern const struct bpf_func_proto bpf_get_retval_proto;
3142 extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
3143 extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
3144 extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
3145 
3146 const struct bpf_func_proto *tracing_prog_func_proto(
3147   enum bpf_func_id func_id, const struct bpf_prog *prog);
3148 
3149 /* Shared helpers among cBPF and eBPF. */
3150 void bpf_user_rnd_init_once(void);
3151 u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3152 u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3153 
3154 #if defined(CONFIG_NET)
3155 bool bpf_sock_common_is_valid_access(int off, int size,
3156 				     enum bpf_access_type type,
3157 				     struct bpf_insn_access_aux *info);
3158 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3159 			      struct bpf_insn_access_aux *info);
3160 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3161 				const struct bpf_insn *si,
3162 				struct bpf_insn *insn_buf,
3163 				struct bpf_prog *prog,
3164 				u32 *target_size);
3165 int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3166 			       struct bpf_dynptr_kern *ptr);
3167 #else
3168 static inline bool bpf_sock_common_is_valid_access(int off, int size,
3169 						   enum bpf_access_type type,
3170 						   struct bpf_insn_access_aux *info)
3171 {
3172 	return false;
3173 }
3174 static inline bool bpf_sock_is_valid_access(int off, int size,
3175 					    enum bpf_access_type type,
3176 					    struct bpf_insn_access_aux *info)
3177 {
3178 	return false;
3179 }
3180 static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3181 					      const struct bpf_insn *si,
3182 					      struct bpf_insn *insn_buf,
3183 					      struct bpf_prog *prog,
3184 					      u32 *target_size)
3185 {
3186 	return 0;
3187 }
3188 static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
3189 					     struct bpf_dynptr_kern *ptr)
3190 {
3191 	return -EOPNOTSUPP;
3192 }
3193 #endif
3194 
3195 #ifdef CONFIG_INET
3196 struct sk_reuseport_kern {
3197 	struct sk_buff *skb;
3198 	struct sock *sk;
3199 	struct sock *selected_sk;
3200 	struct sock *migrating_sk;
3201 	void *data_end;
3202 	u32 hash;
3203 	u32 reuseport_id;
3204 	bool bind_inany;
3205 };
3206 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3207 				  struct bpf_insn_access_aux *info);
3208 
3209 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3210 				    const struct bpf_insn *si,
3211 				    struct bpf_insn *insn_buf,
3212 				    struct bpf_prog *prog,
3213 				    u32 *target_size);
3214 
3215 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3216 				  struct bpf_insn_access_aux *info);
3217 
3218 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3219 				    const struct bpf_insn *si,
3220 				    struct bpf_insn *insn_buf,
3221 				    struct bpf_prog *prog,
3222 				    u32 *target_size);
3223 #else
3224 static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
3225 						enum bpf_access_type type,
3226 						struct bpf_insn_access_aux *info)
3227 {
3228 	return false;
3229 }
3230 
3231 static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
3232 						  const struct bpf_insn *si,
3233 						  struct bpf_insn *insn_buf,
3234 						  struct bpf_prog *prog,
3235 						  u32 *target_size)
3236 {
3237 	return 0;
3238 }
3239 static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
3240 						enum bpf_access_type type,
3241 						struct bpf_insn_access_aux *info)
3242 {
3243 	return false;
3244 }
3245 
3246 static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3247 						  const struct bpf_insn *si,
3248 						  struct bpf_insn *insn_buf,
3249 						  struct bpf_prog *prog,
3250 						  u32 *target_size)
3251 {
3252 	return 0;
3253 }
3254 #endif /* CONFIG_INET */
3255 
3256 enum bpf_text_poke_type {
3257 	BPF_MOD_CALL,
3258 	BPF_MOD_JUMP,
3259 };
3260 
3261 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3262 		       void *addr1, void *addr2);
3263 
3264 void *bpf_arch_text_copy(void *dst, void *src, size_t len);
3265 int bpf_arch_text_invalidate(void *dst, size_t len);
3266 
3267 struct btf_id_set;
3268 bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
3269 
3270 #define MAX_BPRINTF_VARARGS		12
3271 #define MAX_BPRINTF_BUF			1024
3272 
3273 struct bpf_bprintf_data {
3274 	u32 *bin_args;
3275 	char *buf;
3276 	bool get_bin_args;
3277 	bool get_buf;
3278 };
3279 
3280 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
3281 			u32 num_args, struct bpf_bprintf_data *data);
3282 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
3283 
3284 #ifdef CONFIG_BPF_LSM
3285 void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
3286 void bpf_cgroup_atype_put(int cgroup_atype);
3287 #else
3288 static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
3289 static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
3290 #endif /* CONFIG_BPF_LSM */
3291 
3292 struct key;
3293 
3294 #ifdef CONFIG_KEYS
3295 struct bpf_key {
3296 	struct key *key;
3297 	bool has_ref;
3298 };
3299 #endif /* CONFIG_KEYS */
3300 
3301 static inline bool type_is_alloc(u32 type)
3302 {
3303 	return type & MEM_ALLOC;
3304 }
3305 
3306 static inline gfp_t bpf_memcg_flags(gfp_t flags)
3307 {
3308 	if (memcg_bpf_enabled())
3309 		return flags | __GFP_ACCOUNT;
3310 	return flags;
3311 }
3312 
3313 static inline bool bpf_is_subprog(const struct bpf_prog *prog)
3314 {
3315 	return prog->aux->func_idx != 0;
3316 }
3317 
3318 #endif /* _LINUX_BPF_H */
3319