125763b3cSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
299c55f7dSAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
399c55f7dSAlexei Starovoitov */
499c55f7dSAlexei Starovoitov #ifndef _LINUX_BPF_H
599c55f7dSAlexei Starovoitov #define _LINUX_BPF_H 1
699c55f7dSAlexei Starovoitov
799c55f7dSAlexei Starovoitov #include <uapi/linux/bpf.h>
8d687f621SDelyan Kratunov #include <uapi/linux/filter.h>
974451e66SDaniel Borkmann
1099c55f7dSAlexei Starovoitov #include <linux/workqueue.h>
11db20fd2bSAlexei Starovoitov #include <linux/file.h>
12b121d1e7SAlexei Starovoitov #include <linux/percpu.h>
13002245ccSZi Shen Lim #include <linux/err.h>
1474451e66SDaniel Borkmann #include <linux/rbtree_latch.h>
15d6e1e46fSDavid S. Miller #include <linux/numa.h>
16fc970227SAndrii Nakryiko #include <linux/mm_types.h>
17ab3f0063SJakub Kicinski #include <linux/wait.h>
18fec56f58SAlexei Starovoitov #include <linux/refcount.h>
19fec56f58SAlexei Starovoitov #include <linux/mutex.h>
2085d33df3SMartin KaFai Lau #include <linux/module.h>
21bfea9a85SJiri Olsa #include <linux/kallsyms.h>
222c78ee89SAlexei Starovoitov #include <linux/capability.h>
2348edc1f7SRoman Gushchin #include <linux/sched/mm.h>
2448edc1f7SRoman Gushchin #include <linux/slab.h>
25e21aa341SAlexei Starovoitov #include <linux/percpu-refcount.h>
26d687f621SDelyan Kratunov #include <linux/stddef.h>
27af2ac3e1SAlexei Starovoitov #include <linux/bpfptr.h>
2814a324f6SKumar Kartikeya Dwivedi #include <linux/btf.h>
298c7dcb84SDelyan Kratunov #include <linux/rcupdate_trace.h>
30c86df29dSPeter Zijlstra #include <linux/static_call.h>
31ee53cbfbSYafang Shao #include <linux/memcontrol.h>
324f9087f1SPeter Zijlstra #include <linux/cfi.h>
3399c55f7dSAlexei Starovoitov #include <asm/rqspinlock.h>
34cae1927cSJakub Kicinski
359e15db66SAlexei Starovoitov struct bpf_verifier_env;
363b1efb19SDaniel Borkmann struct bpf_verifier_log;
37174a79ffSJohn Fastabend struct perf_event;
38da765a2fSDaniel Borkmann struct bpf_prog;
3999c55f7dSAlexei Starovoitov struct bpf_prog_aux;
4031746031SAlexei Starovoitov struct bpf_map;
414f738adbSJohn Fastabend struct bpf_arena;
42a26ca7c9SMartin KaFai Lau struct sock;
431b2b234bSRoman Gushchin struct seq_file;
44e8d2bec0SDaniel Borkmann struct btf;
453dec541bSAlexei Starovoitov struct btf_type;
46ae24345dSYonghong Song struct exception_table_entry;
47f9c79272SYonghong Song struct seq_operations;
48f836a56eSKP Singh struct bpf_iter_aux_info;
49f836a56eSKP Singh struct bpf_local_storage;
5036e68442SAndrii Nakryiko struct bpf_local_storage_map;
5148edc1f7SRoman Gushchin struct kobject;
52861de02eSJiri Olsa struct mem_cgroup;
5369c087baSYonghong Song struct module;
5400963a2eSSong Liu struct bpf_func_state;
55d4ccaf58SHao Luo struct ftrace_ops;
5635f96de0SAndrii Nakryiko struct cgroup;
5735f96de0SAndrii Nakryiko struct bpf_token;
5835f96de0SAndrii Nakryiko struct user_namespace;
5935f96de0SAndrii Nakryiko struct super_block;
6099c55f7dSAlexei Starovoitov struct inode;
611b9ed84eSQuentin Monnet
621b9ed84eSQuentin Monnet extern struct idr btf_idr;
6336e68442SAndrii Nakryiko extern spinlock_t btf_idr_lock;
6441a5db8dSYonghong Song extern struct kobject *btf_kobj;
651fda5bb6SYonghong Song extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
661b9ed84eSQuentin Monnet extern bool bpf_global_ma_set;
67102acbacSKees Cook
68f9c79272SYonghong Song typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
69f9c79272SYonghong Song typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
7014fc6bd6SYonghong Song struct bpf_iter_aux_info *aux);
71af3f4134SStanislav Fomichev typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
72af3f4134SStanislav Fomichev typedef unsigned int (*bpf_func_t)(const void *,
7314fc6bd6SYonghong Song const struct bpf_insn *);
7414fc6bd6SYonghong Song struct bpf_iter_seq_info {
7514fc6bd6SYonghong Song const struct seq_operations *seq_ops;
7614fc6bd6SYonghong Song bpf_iter_init_seq_priv_t init_seq_private;
7714fc6bd6SYonghong Song bpf_iter_fini_seq_priv_t fini_seq_private;
7814fc6bd6SYonghong Song u32 seq_priv_size;
7914fc6bd6SYonghong Song };
805d903493SRicardo Ribalda
8199c55f7dSAlexei Starovoitov /* map is generic key/value storage optionally accessible by eBPF programs */
8299c55f7dSAlexei Starovoitov struct bpf_map_ops {
831110f3a9SJakub Kicinski /* funcs callable from userspace (via syscall) */
8499c55f7dSAlexei Starovoitov int (*map_alloc_check)(union bpf_attr *attr);
8561d1b6a4SDaniel Borkmann struct bpf_map *(*map_alloc)(union bpf_attr *attr);
8661d1b6a4SDaniel Borkmann void (*map_release)(struct bpf_map *map, struct file *map_file);
87db20fd2bSAlexei Starovoitov void (*map_free)(struct bpf_map *map);
88ba6b8de4SJohn Fastabend int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
89c6110222SDaniel Borkmann void (*map_release_uref)(struct bpf_map *map);
90cb4d03abSBrian Vazquez void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
91cb4d03abSBrian Vazquez int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
923e87f192SDenis Salopek union bpf_attr __user *uattr);
933e87f192SDenis Salopek int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key,
9405799638SYonghong Song void *value, u64 flags);
9505799638SYonghong Song int (*map_lookup_and_delete_batch)(struct bpf_map *map,
9605799638SYonghong Song const union bpf_attr *attr,
973af43ba4SHou Tao union bpf_attr __user *uattr);
983af43ba4SHou Tao int (*map_update_batch)(struct bpf_map *map, struct file *map_file,
99aa2e93b8SBrian Vazquez const union bpf_attr *attr,
100aa2e93b8SBrian Vazquez union bpf_attr __user *uattr);
101aa2e93b8SBrian Vazquez int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
102db20fd2bSAlexei Starovoitov union bpf_attr __user *uattr);
103db20fd2bSAlexei Starovoitov
104db20fd2bSAlexei Starovoitov /* funcs callable from userspace and from eBPF programs */
105d7ba4cc9SJP Kobryn void *(*map_lookup_elem)(struct bpf_map *map, void *key);
106d7ba4cc9SJP Kobryn long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
107d7ba4cc9SJP Kobryn long (*map_delete_elem)(struct bpf_map *map, void *key);
108d7ba4cc9SJP Kobryn long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
109d7ba4cc9SJP Kobryn long (*map_pop_elem)(struct bpf_map *map, void *value);
11007343110SFeng Zhou long (*map_peek_elem)(struct bpf_map *map, void *value);
1112a36f0b9SWang Nan void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
1122a36f0b9SWang Nan
113d056a788SDaniel Borkmann /* funcs called by prog_array and perf_event_array map */
114d056a788SDaniel Borkmann void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
11520c20bd1SHou Tao int fd);
11620c20bd1SHou Tao /* If need_defer is true, the implementation should guarantee that
11720c20bd1SHou Tao * the to-be-put element is still alive before the bpf program, which
11820c20bd1SHou Tao * may manipulate it, exists.
11920c20bd1SHou Tao */
1204a8f87e6SDaniel Borkmann void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer);
12114dc6f04SMartin KaFai Lau int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
122a26ca7c9SMartin KaFai Lau u32 (*map_fd_sys_lookup_elem)(void *ptr);
123a26ca7c9SMartin KaFai Lau void (*map_seq_show_elem)(struct bpf_map *map, void *key,
124e8d2bec0SDaniel Borkmann struct seq_file *m);
1251b2b234bSRoman Gushchin int (*map_check_btf)(const struct bpf_map *map,
126e8d2bec0SDaniel Borkmann const struct btf *btf,
127e8d2bec0SDaniel Borkmann const struct btf_type *key_type,
128d8eca5bbSDaniel Borkmann const struct btf_type *value_type);
129da765a2fSDaniel Borkmann
130da765a2fSDaniel Borkmann /* Prog poke tracking helpers. */
131da765a2fSDaniel Borkmann int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
132da765a2fSDaniel Borkmann void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
133da765a2fSDaniel Borkmann void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
134da765a2fSDaniel Borkmann struct bpf_prog *new);
135d8eca5bbSDaniel Borkmann
136d8eca5bbSDaniel Borkmann /* Direct value access helpers. */
137d8eca5bbSDaniel Borkmann int (*map_direct_value_addr)(const struct bpf_map *map,
138d8eca5bbSDaniel Borkmann u64 *imm, u32 off);
139d8eca5bbSDaniel Borkmann int (*map_direct_value_meta)(const struct bpf_map *map,
140fc970227SAndrii Nakryiko u64 imm, u32 *off);
141457f4436SAndrii Nakryiko int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
142457f4436SAndrii Nakryiko __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
143cf2c2e4aSAlexei Starovoitov struct poll_table_struct *pts);
144cf2c2e4aSAlexei Starovoitov unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr,
145cf2c2e4aSAlexei Starovoitov unsigned long len, unsigned long pgoff,
14641c48f3aSAndrey Ignatov unsigned long flags);
147f836a56eSKP Singh
148f836a56eSKP Singh /* Functions called by bpf_local_storage maps */
149f836a56eSKP Singh int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
150f836a56eSKP Singh void *owner, u32 size);
151f836a56eSKP Singh void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
152f836a56eSKP Singh void *owner, u32 size);
153f4d05259SMartin KaFai Lau struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
154e6a4750fSBjörn Töpel
155d7ba4cc9SJP Kobryn /* Misc helpers.*/
156e6a4750fSBjörn Töpel long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
157f4d05259SMartin KaFai Lau
158f4d05259SMartin KaFai Lau /* map_meta_equal must be implemented for maps that can be
159f4d05259SMartin KaFai Lau * used as an inner map. It is a runtime check to ensure
160f4d05259SMartin KaFai Lau * an inner map can be inserted to an outer map.
161f4d05259SMartin KaFai Lau *
162f4d05259SMartin KaFai Lau * Some properties of the inner map has been used during the
163f4d05259SMartin KaFai Lau * verification time. When inserting an inner map at the runtime,
164f4d05259SMartin KaFai Lau * map_meta_equal has to ensure the inserting map has the same
165f4d05259SMartin KaFai Lau * properties that the verifier has used earlier.
166f4d05259SMartin KaFai Lau */
167f4d05259SMartin KaFai Lau bool (*map_meta_equal)(const struct bpf_map *meta0,
168f4d05259SMartin KaFai Lau const struct bpf_map *meta1);
16969c087baSYonghong Song
17069c087baSYonghong Song
17169c087baSYonghong Song int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
17269c087baSYonghong Song struct bpf_func_state *caller,
173d7ba4cc9SJP Kobryn struct bpf_func_state *callee);
174102acbacSKees Cook long (*map_for_each_callback)(struct bpf_map *map,
17569c087baSYonghong Song bpf_callback_t callback_fn,
17669c087baSYonghong Song void *callback_ctx, u64 flags);
17790a5527dSYafang Shao
17890a5527dSYafang Shao u64 (*map_mem_usage)(const struct bpf_map *map);
179c317ab71SMenglong Dong
18041c48f3aSAndrey Ignatov /* BTF id of struct allocated by map_alloc */
181a5cbe05aSYonghong Song int *map_btf_id;
182a5cbe05aSYonghong Song
183a5cbe05aSYonghong Song /* bpf_iter info used to open a seq_file */
18499c55f7dSAlexei Starovoitov const struct bpf_iter_seq_info *iter_seq_info;
18599c55f7dSAlexei Starovoitov };
18661df10c7SKumar Kartikeya Dwivedi
18795c07d58SHaiyue Wang enum {
188d56b63cfSBenjamin Tissoires /* Support at most 11 fields in a BTF type */
18961df10c7SKumar Kartikeya Dwivedi BTF_FIELDS_MAX = 11,
19061df10c7SKumar Kartikeya Dwivedi };
191aa3496acSKumar Kartikeya Dwivedi
192db559117SKumar Kartikeya Dwivedi enum btf_field_type {
193db559117SKumar Kartikeya Dwivedi BPF_SPIN_LOCK = (1 << 0),
194aa3496acSKumar Kartikeya Dwivedi BPF_TIMER = (1 << 1),
195aa3496acSKumar Kartikeya Dwivedi BPF_KPTR_UNREF = (1 << 2),
19655db92f4SYonghong Song BPF_KPTR_REF = (1 << 3),
19755db92f4SYonghong Song BPF_KPTR_PERCPU = (1 << 4),
19855db92f4SYonghong Song BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
19955db92f4SYonghong Song BPF_LIST_HEAD = (1 << 5),
20055db92f4SYonghong Song BPF_LIST_NODE = (1 << 6),
20155db92f4SYonghong Song BPF_RB_ROOT = (1 << 7),
202790ce3cfSDave Marchevsky BPF_RB_NODE = (1 << 8),
203790ce3cfSDave Marchevsky BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE,
20455db92f4SYonghong Song BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD,
205d56b63cfSBenjamin Tissoires BPF_REFCOUNT = (1 << 9),
2061cb80d9eSKui-Feng Lee BPF_WORKQUEUE = (1 << 10),
207c0a5a21cSKumar Kartikeya Dwivedi BPF_UPTR = (1 << 11),
208c0a5a21cSKumar Kartikeya Dwivedi BPF_RES_SPIN_LOCK = (1 << 12),
209c8e18754SDave Marchevsky };
210c8e18754SDave Marchevsky
211aa3496acSKumar Kartikeya Dwivedi typedef void (*btf_dtor_kfunc_t)(void *);
21261df10c7SKumar Kartikeya Dwivedi
21314a324f6SKumar Kartikeya Dwivedi struct btf_field_kptr {
2149e36a204SDave Marchevsky struct btf *btf;
2159e36a204SDave Marchevsky struct module *module;
216c8e18754SDave Marchevsky /* dtor used if btf_is_kernel(btf), otherwise the type is
21714a324f6SKumar Kartikeya Dwivedi * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
21861df10c7SKumar Kartikeya Dwivedi */
21961df10c7SKumar Kartikeya Dwivedi btf_dtor_kfunc_t dtor;
22061df10c7SKumar Kartikeya Dwivedi u32 btf_id;
22130465003SDave Marchevsky };
222f0c5941fSKumar Kartikeya Dwivedi
223f0c5941fSKumar Kartikeya Dwivedi struct btf_field_graph_root {
224f0c5941fSKumar Kartikeya Dwivedi struct btf *btf;
225865ce09aSKumar Kartikeya Dwivedi u32 value_btf_id;
226f0c5941fSKumar Kartikeya Dwivedi u32 node_offset;
227f0c5941fSKumar Kartikeya Dwivedi struct btf_record *value_rec;
228aa3496acSKumar Kartikeya Dwivedi };
229aa3496acSKumar Kartikeya Dwivedi
230cd2a8079SDave Marchevsky struct btf_field {
231aa3496acSKumar Kartikeya Dwivedi u32 offset;
232aa3496acSKumar Kartikeya Dwivedi u32 size;
233aa3496acSKumar Kartikeya Dwivedi enum btf_field_type type;
23430465003SDave Marchevsky union {
235aa3496acSKumar Kartikeya Dwivedi struct btf_field_kptr kptr;
23661df10c7SKumar Kartikeya Dwivedi struct btf_field_graph_root graph_root;
23761df10c7SKumar Kartikeya Dwivedi };
238aa3496acSKumar Kartikeya Dwivedi };
239aa3496acSKumar Kartikeya Dwivedi
240aa3496acSKumar Kartikeya Dwivedi struct btf_record {
241db559117SKumar Kartikeya Dwivedi u32 cnt;
242db559117SKumar Kartikeya Dwivedi u32 field_mask;
243d56b63cfSBenjamin Tissoires int spin_lock_off;
244d54730b5SDave Marchevsky int res_spin_lock_off;
245aa3496acSKumar Kartikeya Dwivedi int timer_off;
246aa3496acSKumar Kartikeya Dwivedi int wq_off;
247aa3496acSKumar Kartikeya Dwivedi int refcount_off;
2480a1f7bfeSDave Marchevsky struct btf_field fields[];
2490a1f7bfeSDave Marchevsky };
2500a1f7bfeSDave Marchevsky
251c3c510ceSDave Marchevsky /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */
2520a1f7bfeSDave Marchevsky struct bpf_rb_node_kern {
2530a1f7bfeSDave Marchevsky struct rb_node rb_node;
2540a1f7bfeSDave Marchevsky void *owner;
2550a1f7bfeSDave Marchevsky } __attribute__((aligned(8)));
2560a1f7bfeSDave Marchevsky
257c3c510ceSDave Marchevsky /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */
2580a1f7bfeSDave Marchevsky struct bpf_list_node_kern {
2590a1f7bfeSDave Marchevsky struct list_head list_head;
26099c55f7dSAlexei Starovoitov void *owner;
261a3c70a3cSAlexei Starovoitov } __attribute__((aligned(8)));
262be95a845SDaniel Borkmann
263be95a845SDaniel Borkmann struct bpf_map {
264be95a845SDaniel Borkmann const struct bpf_map_ops *ops;
265be95a845SDaniel Borkmann struct bpf_map *inner_map_meta;
26699c55f7dSAlexei Starovoitov #ifdef CONFIG_SECURITY
26799c55f7dSAlexei Starovoitov void *security;
26899c55f7dSAlexei Starovoitov #endif
26999c55f7dSAlexei Starovoitov enum bpf_map_type map_type;
2709330986cSJoanne Koong u32 key_size;
2716c905981SAlexei Starovoitov u32 value_size;
272f3f1c054SMartin KaFai Lau u32 max_entries;
273db559117SKumar Kartikeya Dwivedi u64 map_extra; /* any per-map-type extra fields */
27496eabe7aSMartin KaFai Lau u32 map_flags;
2759b2cf328SMartin KaFai Lau u32 id;
2769b2cf328SMartin KaFai Lau struct btf_record *record;
2778845b468SJoanne Koong int numa_node;
278a26ca7c9SMartin KaFai Lau u32 btf_key_type_id;
2793a3b7fecSJohannes Weiner u32 btf_value_type_id;
2804201d9abSRoman Gushchin u32 btf_vmlinux_value_type_id;
28148edc1f7SRoman Gushchin struct btf *btf;
282fc970227SAndrii Nakryiko #ifdef CONFIG_MEMCG
283a3c70a3cSAlexei Starovoitov struct obj_cgroup *objcg;
284a3c70a3cSAlexei Starovoitov #endif
2851e0bd5a0SAndrii Nakryiko char name[BPF_OBJ_NAME_LEN];
28687667336SHou Tao struct mutex freeze_mutex;
28787667336SHou Tao atomic64_t refcnt;
288be95a845SDaniel Borkmann atomic64_t usercnt;
28987667336SHou Tao /* rcu is used before freeing and work is only used during freeing */
29087667336SHou Tao union {
291353050beSDaniel Borkmann struct work_struct work;
292f45d5b6cSToke Hoiland-Jorgensen struct rcu_head rcu;
293f45d5b6cSToke Hoiland-Jorgensen };
294f45d5b6cSToke Hoiland-Jorgensen atomic64_t writecnt;
295f45d5b6cSToke Hoiland-Jorgensen /* 'Ownership' of program-containing map is claimed by the first program
296f45d5b6cSToke Hoiland-Jorgensen * that is going to use this map or by the first program which FD is
297f45d5b6cSToke Hoiland-Jorgensen * stored in the map to make sure that all callers and callees have the
29828ead3eaSXu Kuohai * same prog type, JITed flag and xdp_has_frags flag.
299f45d5b6cSToke Hoiland-Jorgensen */
300f45d5b6cSToke Hoiland-Jorgensen struct {
301f45d5b6cSToke Hoiland-Jorgensen const struct btf_type *attach_func_proto;
302f45d5b6cSToke Hoiland-Jorgensen spinlock_t lock;
303f45d5b6cSToke Hoiland-Jorgensen enum bpf_prog_type type;
3044d7d7f69SKumar Kartikeya Dwivedi bool jited;
3054d7d7f69SKumar Kartikeya Dwivedi bool xdp_has_frags;
30687667336SHou Tao } owner;
307af66bfd3SHou Tao bool bypass_spec_v1;
308af66bfd3SHou Tao bool frozen; /* write-once; write-protected by freeze_mutex */
30925954730SAnton Protopopov bool free_after_mult_rcu_gp;
31099c55f7dSAlexei Starovoitov bool free_after_rcu_gp;
31199c55f7dSAlexei Starovoitov atomic64_t sleepable_refcnt;
312db559117SKumar Kartikeya Dwivedi s64 __percpu *elem_count;
313db559117SKumar Kartikeya Dwivedi };
314db559117SKumar Kartikeya Dwivedi
btf_field_type_name(enum btf_field_type type)315db559117SKumar Kartikeya Dwivedi static inline const char *btf_field_type_name(enum btf_field_type type)
316db559117SKumar Kartikeya Dwivedi {
317db559117SKumar Kartikeya Dwivedi switch (type) {
318db559117SKumar Kartikeya Dwivedi case BPF_SPIN_LOCK:
319d56b63cfSBenjamin Tissoires return "bpf_spin_lock";
320d56b63cfSBenjamin Tissoires case BPF_RES_SPIN_LOCK:
321db559117SKumar Kartikeya Dwivedi return "bpf_res_spin_lock";
322db559117SKumar Kartikeya Dwivedi case BPF_TIMER:
323db559117SKumar Kartikeya Dwivedi return "bpf_timer";
32455db92f4SYonghong Song case BPF_WORKQUEUE:
32555db92f4SYonghong Song return "bpf_wq";
3261cb80d9eSKui-Feng Lee case BPF_KPTR_UNREF:
3271cb80d9eSKui-Feng Lee case BPF_KPTR_REF:
328f0c5941fSKumar Kartikeya Dwivedi return "kptr";
329f0c5941fSKumar Kartikeya Dwivedi case BPF_KPTR_PERCPU:
3308ffa5cc1SKumar Kartikeya Dwivedi return "percpu_kptr";
3318ffa5cc1SKumar Kartikeya Dwivedi case BPF_UPTR:
3329c395c1bSDave Marchevsky return "uptr";
3339c395c1bSDave Marchevsky case BPF_LIST_HEAD:
3349c395c1bSDave Marchevsky return "bpf_list_head";
3359c395c1bSDave Marchevsky case BPF_LIST_NODE:
336d54730b5SDave Marchevsky return "bpf_list_node";
337d54730b5SDave Marchevsky case BPF_RB_ROOT:
338db559117SKumar Kartikeya Dwivedi return "bpf_rb_root";
339db559117SKumar Kartikeya Dwivedi case BPF_RB_NODE:
340db559117SKumar Kartikeya Dwivedi return "bpf_rb_node";
341db559117SKumar Kartikeya Dwivedi case BPF_REFCOUNT:
342db559117SKumar Kartikeya Dwivedi return "bpf_refcount";
343db559117SKumar Kartikeya Dwivedi default:
344aa3496acSKumar Kartikeya Dwivedi WARN_ON_ONCE(1);
345aa3496acSKumar Kartikeya Dwivedi return "unknown";
346aa3496acSKumar Kartikeya Dwivedi }
347db559117SKumar Kartikeya Dwivedi }
348db559117SKumar Kartikeya Dwivedi
btf_field_type_size(enum btf_field_type type)349db559117SKumar Kartikeya Dwivedi static inline u32 btf_field_type_size(enum btf_field_type type)
350db559117SKumar Kartikeya Dwivedi {
351d56b63cfSBenjamin Tissoires switch (type) {
352d56b63cfSBenjamin Tissoires case BPF_SPIN_LOCK:
353aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_spin_lock);
354aa3496acSKumar Kartikeya Dwivedi case BPF_RES_SPIN_LOCK:
35555db92f4SYonghong Song return sizeof(struct bpf_res_spin_lock);
3561cb80d9eSKui-Feng Lee case BPF_TIMER:
357aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_timer);
358f0c5941fSKumar Kartikeya Dwivedi case BPF_WORKQUEUE:
359f0c5941fSKumar Kartikeya Dwivedi return sizeof(struct bpf_wq);
3608ffa5cc1SKumar Kartikeya Dwivedi case BPF_KPTR_UNREF:
3618ffa5cc1SKumar Kartikeya Dwivedi case BPF_KPTR_REF:
3629c395c1bSDave Marchevsky case BPF_KPTR_PERCPU:
3639c395c1bSDave Marchevsky case BPF_UPTR:
3649c395c1bSDave Marchevsky return sizeof(u64);
3659c395c1bSDave Marchevsky case BPF_LIST_HEAD:
366d54730b5SDave Marchevsky return sizeof(struct bpf_list_head);
367d54730b5SDave Marchevsky case BPF_LIST_NODE:
368aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_list_node);
369aa3496acSKumar Kartikeya Dwivedi case BPF_RB_ROOT:
370aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_rb_root);
371aa3496acSKumar Kartikeya Dwivedi case BPF_RB_NODE:
372aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_rb_node);
373aa3496acSKumar Kartikeya Dwivedi case BPF_REFCOUNT:
374aa3496acSKumar Kartikeya Dwivedi return sizeof(struct bpf_refcount);
375aa3496acSKumar Kartikeya Dwivedi default:
376aa3496acSKumar Kartikeya Dwivedi WARN_ON_ONCE(1);
377db559117SKumar Kartikeya Dwivedi return 0;
378db559117SKumar Kartikeya Dwivedi }
379db559117SKumar Kartikeya Dwivedi }
380db559117SKumar Kartikeya Dwivedi
btf_field_type_align(enum btf_field_type type)381d56b63cfSBenjamin Tissoires static inline u32 btf_field_type_align(enum btf_field_type type)
382d56b63cfSBenjamin Tissoires {
383aa3496acSKumar Kartikeya Dwivedi switch (type) {
384aa3496acSKumar Kartikeya Dwivedi case BPF_SPIN_LOCK:
38555db92f4SYonghong Song return __alignof__(struct bpf_spin_lock);
3861cb80d9eSKui-Feng Lee case BPF_RES_SPIN_LOCK:
387aa3496acSKumar Kartikeya Dwivedi return __alignof__(struct bpf_res_spin_lock);
388f0c5941fSKumar Kartikeya Dwivedi case BPF_TIMER:
389f0c5941fSKumar Kartikeya Dwivedi return __alignof__(struct bpf_timer);
3908ffa5cc1SKumar Kartikeya Dwivedi case BPF_WORKQUEUE:
3918ffa5cc1SKumar Kartikeya Dwivedi return __alignof__(struct bpf_wq);
3929c395c1bSDave Marchevsky case BPF_KPTR_UNREF:
3939c395c1bSDave Marchevsky case BPF_KPTR_REF:
3949c395c1bSDave Marchevsky case BPF_KPTR_PERCPU:
3959c395c1bSDave Marchevsky case BPF_UPTR:
396d54730b5SDave Marchevsky return __alignof__(u64);
397d54730b5SDave Marchevsky case BPF_LIST_HEAD:
398aa3496acSKumar Kartikeya Dwivedi return __alignof__(struct bpf_list_head);
399aa3496acSKumar Kartikeya Dwivedi case BPF_LIST_NODE:
400aa3496acSKumar Kartikeya Dwivedi return __alignof__(struct bpf_list_node);
401aa3496acSKumar Kartikeya Dwivedi case BPF_RB_ROOT:
402aa3496acSKumar Kartikeya Dwivedi return __alignof__(struct bpf_rb_root);
403aa3496acSKumar Kartikeya Dwivedi case BPF_RB_NODE:
4043e81740aSDave Marchevsky return __alignof__(struct bpf_rb_node);
4053e81740aSDave Marchevsky case BPF_REFCOUNT:
4063e81740aSDave Marchevsky return __alignof__(struct bpf_refcount);
4073e81740aSDave Marchevsky default:
4083e81740aSDave Marchevsky WARN_ON_ONCE(1);
4093e81740aSDave Marchevsky return 0;
4103e81740aSDave Marchevsky }
4113e81740aSDave Marchevsky }
4123e81740aSDave Marchevsky
bpf_obj_init_field(const struct btf_field * field,void * addr)4133e81740aSDave Marchevsky static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
4143e81740aSDave Marchevsky {
4153e81740aSDave Marchevsky memset(addr, 0, field->size);
4163e81740aSDave Marchevsky
4173e81740aSDave Marchevsky switch (field->type) {
4183e81740aSDave Marchevsky case BPF_REFCOUNT:
4193e81740aSDave Marchevsky refcount_set((refcount_t *)addr, 1);
4203e81740aSDave Marchevsky break;
4213e81740aSDave Marchevsky case BPF_RB_NODE:
4223e81740aSDave Marchevsky RB_CLEAR_NODE((struct rb_node *)addr);
423d56b63cfSBenjamin Tissoires break;
4243e81740aSDave Marchevsky case BPF_LIST_HEAD:
4253e81740aSDave Marchevsky case BPF_LIST_NODE:
42655db92f4SYonghong Song INIT_LIST_HEAD((struct list_head *)addr);
427ba512b00SMartin KaFai Lau break;
4283e81740aSDave Marchevsky case BPF_RB_ROOT:
4293e81740aSDave Marchevsky /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */
4303e81740aSDave Marchevsky case BPF_SPIN_LOCK:
4313e81740aSDave Marchevsky case BPF_RES_SPIN_LOCK:
4323e81740aSDave Marchevsky case BPF_TIMER:
4333e81740aSDave Marchevsky case BPF_WORKQUEUE:
4343e81740aSDave Marchevsky case BPF_KPTR_UNREF:
435aa3496acSKumar Kartikeya Dwivedi case BPF_KPTR_REF:
436aa3496acSKumar Kartikeya Dwivedi case BPF_KPTR_PERCPU:
437aa3496acSKumar Kartikeya Dwivedi case BPF_UPTR:
438aa3496acSKumar Kartikeya Dwivedi break;
439aa3496acSKumar Kartikeya Dwivedi default:
440aa3496acSKumar Kartikeya Dwivedi WARN_ON_ONCE(1);
441aa3496acSKumar Kartikeya Dwivedi return;
442cd2a8079SDave Marchevsky }
44368134668SAlexei Starovoitov }
4444d7d7f69SKumar Kartikeya Dwivedi
btf_record_has_field(const struct btf_record * rec,enum btf_field_type type)4454d7d7f69SKumar Kartikeya Dwivedi static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type)
446cd2a8079SDave Marchevsky {
447958cf2e2SKumar Kartikeya Dwivedi if (IS_ERR_OR_NULL(rec))
448cd2a8079SDave Marchevsky return false;
4493e81740aSDave Marchevsky return rec->field_mask & type;
4504d7d7f69SKumar Kartikeya Dwivedi }
451958cf2e2SKumar Kartikeya Dwivedi
bpf_obj_init(const struct btf_record * rec,void * obj)452997849c4SHou Tao static inline void bpf_obj_init(const struct btf_record *rec, void *obj)
453997849c4SHou Tao {
454997849c4SHou Tao int i;
455997849c4SHou Tao
456997849c4SHou Tao if (IS_ERR_OR_NULL(rec))
457997849c4SHou Tao return;
458997849c4SHou Tao for (i = 0; i < rec->cnt; i++)
459958cf2e2SKumar Kartikeya Dwivedi bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset);
460958cf2e2SKumar Kartikeya Dwivedi }
461cd2a8079SDave Marchevsky
46268134668SAlexei Starovoitov /* 'dst' must be a temporary buffer and should not point to memory that is being
46368134668SAlexei Starovoitov * used in parallel by a bpf program or bpf syscall, otherwise the access from
46444832519SKumar Kartikeya Dwivedi * the bpf program or bpf syscall may be corrupted by the reinitialization,
46544832519SKumar Kartikeya Dwivedi * leading to weird problems. Even 'dst' is newly-allocated from bpf memory
46644832519SKumar Kartikeya Dwivedi * allocator, it is still possible for 'dst' to be used in parallel by a bpf
46744832519SKumar Kartikeya Dwivedi * program or bpf syscall.
46844832519SKumar Kartikeya Dwivedi */
check_and_init_map_value(struct bpf_map * map,void * dst)46944832519SKumar Kartikeya Dwivedi static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
47044832519SKumar Kartikeya Dwivedi {
47144832519SKumar Kartikeya Dwivedi bpf_obj_init(map->record, dst);
47244832519SKumar Kartikeya Dwivedi }
47344832519SKumar Kartikeya Dwivedi
47444832519SKumar Kartikeya Dwivedi /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
47544832519SKumar Kartikeya Dwivedi * forced to use 'long' read/writes to try to atomically copy long counters.
47644832519SKumar Kartikeya Dwivedi * Best-effort only. No barriers here, since it _will_ race with concurrent
4776a86b5b5SDaniel Borkmann * updates from BPF programs. Called from bpf syscall and mostly used with
47844832519SKumar Kartikeya Dwivedi * size 8 or 16 bytes, so ask compiler to inline it.
47944832519SKumar Kartikeya Dwivedi */
bpf_long_memcpy(void * dst,const void * src,u32 size)48044832519SKumar Kartikeya Dwivedi static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
481cd2a8079SDave Marchevsky {
482f71b2f64SKumar Kartikeya Dwivedi const long *lsrc = src;
483f71b2f64SKumar Kartikeya Dwivedi long *ldst = dst;
484d83525caSAlexei Starovoitov
4854d7d7f69SKumar Kartikeya Dwivedi size /= sizeof(long);
4864d7d7f69SKumar Kartikeya Dwivedi while (size--)
487d83525caSAlexei Starovoitov data_race(*ldst++ = *lsrc++);
488cd2a8079SDave Marchevsky }
48944832519SKumar Kartikeya Dwivedi
490f71b2f64SKumar Kartikeya Dwivedi /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
bpf_obj_memcpy(struct btf_record * rec,void * dst,void * src,u32 size,bool long_memcpy)49144832519SKumar Kartikeya Dwivedi static inline void bpf_obj_memcpy(struct btf_record *rec,
492f71b2f64SKumar Kartikeya Dwivedi void *dst, void *src, u32 size,
4934d7d7f69SKumar Kartikeya Dwivedi bool long_memcpy)
494d83525caSAlexei Starovoitov {
4954d7d7f69SKumar Kartikeya Dwivedi u32 curr_off = 0;
496cd2a8079SDave Marchevsky int i;
497cd2a8079SDave Marchevsky
498aa3496acSKumar Kartikeya Dwivedi if (IS_ERR_OR_NULL(rec)) {
4994d7d7f69SKumar Kartikeya Dwivedi if (long_memcpy)
500aa3496acSKumar Kartikeya Dwivedi bpf_long_memcpy(dst, src, round_up(size, 8));
501cd2a8079SDave Marchevsky else
5024d7d7f69SKumar Kartikeya Dwivedi memcpy(dst, src, size);
503f71b2f64SKumar Kartikeya Dwivedi return;
504d83525caSAlexei Starovoitov }
50544832519SKumar Kartikeya Dwivedi
50644832519SKumar Kartikeya Dwivedi for (i = 0; i < rec->cnt; i++) {
50744832519SKumar Kartikeya Dwivedi u32 next_off = rec->fields[i].offset;
508cd2a8079SDave Marchevsky u32 sz = next_off - curr_off;
50944832519SKumar Kartikeya Dwivedi
51044832519SKumar Kartikeya Dwivedi memcpy(dst + curr_off, src + curr_off, sz);
51144832519SKumar Kartikeya Dwivedi curr_off += rec->fields[i].size + sz;
51244832519SKumar Kartikeya Dwivedi }
513cd2a8079SDave Marchevsky memcpy(dst + curr_off, src + curr_off, size - curr_off);
51444832519SKumar Kartikeya Dwivedi }
51544832519SKumar Kartikeya Dwivedi
copy_map_value(struct bpf_map * map,void * dst,void * src)516ba512b00SMartin KaFai Lau static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
517ba512b00SMartin KaFai Lau {
518ba512b00SMartin KaFai Lau bpf_obj_memcpy(map->record, dst, src, map->value_size, false);
519ba512b00SMartin KaFai Lau }
520ba512b00SMartin KaFai Lau
copy_map_value_long(struct bpf_map * map,void * dst,void * src)521ba512b00SMartin KaFai Lau static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src)
522ba512b00SMartin KaFai Lau {
523ba512b00SMartin KaFai Lau bpf_obj_memcpy(map->record, dst, src, map->value_size, true);
524ba512b00SMartin KaFai Lau }
525ba512b00SMartin KaFai Lau
bpf_obj_swap_uptrs(const struct btf_record * rec,void * dst,void * src)526ba512b00SMartin KaFai Lau static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src)
527ba512b00SMartin KaFai Lau {
528ba512b00SMartin KaFai Lau unsigned long *src_uptr, *dst_uptr;
529ba512b00SMartin KaFai Lau const struct btf_field *field;
530ba512b00SMartin KaFai Lau int i;
531ba512b00SMartin KaFai Lau
532ba512b00SMartin KaFai Lau if (!btf_record_has_field(rec, BPF_UPTR))
533ba512b00SMartin KaFai Lau return;
534ba512b00SMartin KaFai Lau
535cd2a8079SDave Marchevsky for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) {
536cc487558SKumar Kartikeya Dwivedi if (field->type != BPF_UPTR)
537cc487558SKumar Kartikeya Dwivedi continue;
538cc487558SKumar Kartikeya Dwivedi
539cc487558SKumar Kartikeya Dwivedi src_uptr = src + field->offset;
540cd2a8079SDave Marchevsky dst_uptr = dst + field->offset;
541f71b2f64SKumar Kartikeya Dwivedi swap(*src_uptr, *dst_uptr);
542cc487558SKumar Kartikeya Dwivedi }
543cc487558SKumar Kartikeya Dwivedi }
544cc487558SKumar Kartikeya Dwivedi
bpf_obj_memzero(struct btf_record * rec,void * dst,u32 size)545cd2a8079SDave Marchevsky static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size)
546cd2a8079SDave Marchevsky {
547aa3496acSKumar Kartikeya Dwivedi u32 curr_off = 0;
548cc487558SKumar Kartikeya Dwivedi int i;
549aa3496acSKumar Kartikeya Dwivedi
550cd2a8079SDave Marchevsky if (IS_ERR_OR_NULL(rec)) {
551cc487558SKumar Kartikeya Dwivedi memset(dst, 0, size);
552f71b2f64SKumar Kartikeya Dwivedi return;
553f71b2f64SKumar Kartikeya Dwivedi }
554f71b2f64SKumar Kartikeya Dwivedi
555f71b2f64SKumar Kartikeya Dwivedi for (i = 0; i < rec->cnt; i++) {
556f71b2f64SKumar Kartikeya Dwivedi u32 next_off = rec->fields[i].offset;
557cd2a8079SDave Marchevsky u32 sz = next_off - curr_off;
558cc487558SKumar Kartikeya Dwivedi
559cc487558SKumar Kartikeya Dwivedi memset(dst + curr_off, 0, sz);
56096049f3aSAlexei Starovoitov curr_off += rec->fields[i].size + sz;
56196049f3aSAlexei Starovoitov }
562b00628b1SAlexei Starovoitov memset(dst + curr_off, 0, size - curr_off);
563246331e3SBenjamin Tissoires }
564f0c5941fSKumar Kartikeya Dwivedi
zero_map_value(struct bpf_map * map,void * dst)565f0c5941fSKumar Kartikeya Dwivedi static inline void zero_map_value(struct bpf_map *map, void *dst)
5669c395c1bSDave Marchevsky {
5679c395c1bSDave Marchevsky bpf_obj_memzero(map->record, dst, map->value_size);
56831746031SAlexei Starovoitov }
56931746031SAlexei Starovoitov
5708e7ae251SMartin KaFai Lau void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
571d83525caSAlexei Starovoitov bool lock_src);
572602144c2SJakub Kicinski void bpf_timer_cancel_and_free(void *timer);
573a3884572SJakub Kicinski void bpf_wq_cancel_and_free(void *timer);
574a3884572SJakub Kicinski void bpf_list_head_free(const struct btf_field *field, void *list_head,
575a3884572SJakub Kicinski struct bpf_spin_lock *spin_lock);
576a3884572SJakub Kicinski void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
577a3884572SJakub Kicinski struct bpf_spin_lock *spin_lock);
578a3884572SJakub Kicinski u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena);
579a3884572SJakub Kicinski u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena);
580a3884572SJakub Kicinski int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
581a3884572SJakub Kicinski
582a3884572SJakub Kicinski struct bpf_offload_dev;
583a3884572SJakub Kicinski struct bpf_offloaded_map;
584a3884572SJakub Kicinski
585a3884572SJakub Kicinski struct bpf_map_dev_ops {
586a3884572SJakub Kicinski int (*map_get_next_key)(struct bpf_offloaded_map *map,
587a3884572SJakub Kicinski void *key, void *next_key);
588a3884572SJakub Kicinski int (*map_lookup_elem)(struct bpf_offloaded_map *map,
589a3884572SJakub Kicinski void *key, void *value);
590a3884572SJakub Kicinski int (*map_update_elem)(struct bpf_offloaded_map *map,
591a3884572SJakub Kicinski void *key, void *value, u64 flags);
592a3884572SJakub Kicinski int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
593a3884572SJakub Kicinski };
594a3884572SJakub Kicinski
595a3884572SJakub Kicinski struct bpf_offloaded_map {
596a3884572SJakub Kicinski struct bpf_map map;
597a3884572SJakub Kicinski struct net_device *netdev;
5980cd3cbedSJakub Kicinski const struct bpf_map_dev_ops *dev_ops;
5990cd3cbedSJakub Kicinski void *dev_priv;
6000cd3cbedSJakub Kicinski struct list_head offloads;
6010cd3cbedSJakub Kicinski };
6020cd3cbedSJakub Kicinski
map_to_offmap(struct bpf_map * map)603a26ca7c9SMartin KaFai Lau static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
604a26ca7c9SMartin KaFai Lau {
60585d33df3SMartin KaFai Lau return container_of(map, struct bpf_offloaded_map, map);
60685d33df3SMartin KaFai Lau }
607a26ca7c9SMartin KaFai Lau
bpf_map_offload_neutral(const struct bpf_map * map)608a26ca7c9SMartin KaFai Lau static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
609e8d2bec0SDaniel Borkmann {
6101b2b234bSRoman Gushchin return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
611e8d2bec0SDaniel Borkmann }
612e8d2bec0SDaniel Borkmann
bpf_map_support_seq_show(const struct bpf_map * map)613e8d2bec0SDaniel Borkmann static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
614f4d05259SMartin KaFai Lau {
615f4d05259SMartin KaFai Lau return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
616f4d05259SMartin KaFai Lau map->ops->map_seq_show_elem;
617a3884572SJakub Kicinski }
618a3884572SJakub Kicinski
619d639b9d1SHao Luo int map_check_no_btf(const struct bpf_map *map,
620d639b9d1SHao Luo const struct btf *btf,
621d639b9d1SHao Luo const struct btf_type *key_type,
622d639b9d1SHao Luo const struct btf_type *value_type);
623d639b9d1SHao Luo
624d639b9d1SHao Luo bool bpf_map_meta_equal(const struct bpf_map *meta0,
625d639b9d1SHao Luo const struct bpf_map *meta1);
626d639b9d1SHao Luo
627d639b9d1SHao Luo extern const struct bpf_map_ops bpf_map_offload_ops;
628d639b9d1SHao Luo
629d639b9d1SHao Luo /* bpf_type_flag contains a set of flags that are applicable to the values of
630d639b9d1SHao Luo * arg_type, ret_type and reg_type. For example, a pointer value may be null,
631d639b9d1SHao Luo * or a memory is read-only. We classify types into two categories: base types
632d639b9d1SHao Luo * and extended types. Extended types are base types combined with a type flag.
633216e3cd2SHao Luo *
634216e3cd2SHao Luo * Currently there are no more than 32 base types in arg_type, ret_type and
635216e3cd2SHao Luo * reg_types.
63620b2aff4SHao Luo */
63720b2aff4SHao Luo #define BPF_BASE_TYPE_BITS 8
638894f2a8bSKumar Kartikeya Dwivedi
639894f2a8bSKumar Kartikeya Dwivedi enum bpf_type_flag {
640a672b2e3SDaniel Borkmann /* PTR may be NULL. */
641c6f1bfe8SYonghong Song PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS),
642c6f1bfe8SYonghong Song
643c6f1bfe8SYonghong Song /* MEM is read-only. When applied on bpf_arg, it indicates the arg is
6445844101aSHao Luo * compatible with both mutable and immutable memory.
6455844101aSHao Luo */
6465844101aSHao Luo MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
6475844101aSHao Luo
6485844101aSHao Luo /* MEM points to BPF ring buffer reservation. */
6495844101aSHao Luo MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS),
6505844101aSHao Luo
6515844101aSHao Luo /* MEM is in user address space. */
6528f14852eSKumar Kartikeya Dwivedi MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS),
6538f14852eSKumar Kartikeya Dwivedi
6548f14852eSKumar Kartikeya Dwivedi /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged
6556efe152dSKumar Kartikeya Dwivedi * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In
6566efe152dSKumar Kartikeya Dwivedi * order to drop this tag, it must be passed into bpf_per_cpu_ptr()
6576efe152dSKumar Kartikeya Dwivedi * or bpf_this_cpu_ptr(), which will return the pointer corresponding
6586efe152dSKumar Kartikeya Dwivedi * to the specified cpu.
6596efe152dSKumar Kartikeya Dwivedi */
6606efe152dSKumar Kartikeya Dwivedi MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS),
6616efe152dSKumar Kartikeya Dwivedi
6626efe152dSKumar Kartikeya Dwivedi /* Indicates that the argument will be released. */
6636fad274fSDaniel Borkmann OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS),
66416d1e00cSJoanne Koong
66516d1e00cSJoanne Koong /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
66697e03f52SJoanne Koong * unreferenced and referenced kptr loaded from map value using a load
66797e03f52SJoanne Koong * instruction, so that they can only be dereferenced but not escape the
66897e03f52SJoanne Koong * BPF program into the kernel (i.e. cannot be passed as arguments to
66920571567SDavid Vernet * kfunc or bpf helpers).
670bc34dee6SJoanne Koong */
671bc34dee6SJoanne Koong PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
672508362acSMaxim Mikityanskiy
673508362acSMaxim Mikityanskiy /* MEM can be uninitialized. */
674508362acSMaxim Mikityanskiy MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
675282de143SKumar Kartikeya Dwivedi
676282de143SKumar Kartikeya Dwivedi /* DYNPTR points to memory local to the bpf program. */
677282de143SKumar Kartikeya Dwivedi DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
678282de143SKumar Kartikeya Dwivedi
679282de143SKumar Kartikeya Dwivedi /* DYNPTR points to a kernel-produced ringbuf record. */
6803f00c523SDavid Vernet DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
6813f00c523SDavid Vernet
6823f00c523SDavid Vernet /* Size is known at compile time. */
6833f00c523SDavid Vernet MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS),
6843f00c523SDavid Vernet
6853f00c523SDavid Vernet /* MEM is of an allocated object of type in program BTF. This is used to
6863f00c523SDavid Vernet * tag PTR_TO_BTF_ID allocated using bpf_obj_new.
6873f00c523SDavid Vernet */
6883f00c523SDavid Vernet MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS),
6893f00c523SDavid Vernet
6903f00c523SDavid Vernet /* PTR was passed from the kernel in a trusted context, and may be
6913f00c523SDavid Vernet * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions.
6923f00c523SDavid Vernet * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above.
6933f00c523SDavid Vernet * PTR_UNTRUSTED refers to a kptr that was read directly from a map
6943f00c523SDavid Vernet * without invoking bpf_kptr_xchg(). What we really need to know is
6953f00c523SDavid Vernet * whether a pointer is safe to pass to a kfunc or BPF helper function.
6963f00c523SDavid Vernet * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF
6973f00c523SDavid Vernet * helpers, they do not cover all possible instances of unsafe
6983f00c523SDavid Vernet * pointers. For example, a pointer that was obtained from walking a
6993f00c523SDavid Vernet * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the
7003f00c523SDavid Vernet * fact that it may be NULL, invalid, etc. This is due to backwards
7013f00c523SDavid Vernet * compatibility requirements, as this was the behavior that was first
7023f00c523SDavid Vernet * introduced when kptrs were added. The behavior is now considered
7033f00c523SDavid Vernet * deprecated, and PTR_UNTRUSTED will eventually be removed.
7043f00c523SDavid Vernet *
7053f00c523SDavid Vernet * PTR_TRUSTED, on the other hand, is a pointer that the kernel
7063f00c523SDavid Vernet * guarantees to be valid and safe to pass to kfuncs and BPF helpers.
7073f00c523SDavid Vernet * For example, pointers passed to tracepoint arguments are considered
7083f00c523SDavid Vernet * PTR_TRUSTED, as are pointers that are passed to struct_ops
7099bb00b28SYonghong Song * callbacks. As alluded to above, pointers that are obtained from
7109bb00b28SYonghong Song * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a
7119bb00b28SYonghong Song * struct task_struct *task is PTR_TRUSTED, then accessing
7126a3cd331SDave Marchevsky * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored
7130816b8c6SDave Marchevsky * in a BPF register. Similarly, pointers passed to certain programs
7140816b8c6SDave Marchevsky * types such as kretprobes are not guaranteed to be valid, as they may
7156a3cd331SDave Marchevsky * for example contain an object that was recently freed.
7166a3cd331SDave Marchevsky */
7176a3cd331SDave Marchevsky PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS),
718b5964b96SJoanne Koong
719b5964b96SJoanne Koong /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */
720b5964b96SJoanne Koong MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS),
72105421aecSJoanne Koong
72205421aecSJoanne Koong /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning.
72305421aecSJoanne Koong * Currently only valid for linked-list and rbtree nodes. If the nodes
72432556ce9SDaniel Borkmann * have a bpf_refcount_field, they must be tagged MEM_RCU as well.
72532556ce9SDaniel Borkmann */
72632556ce9SDaniel Borkmann NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS),
72732556ce9SDaniel Borkmann
72832556ce9SDaniel Borkmann /* DYNPTR points to sk_buff */
7296fad274fSDaniel Borkmann DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS),
7306fad274fSDaniel Borkmann
7316fad274fSDaniel Borkmann /* DYNPTR points to xdp_buff */
7326fad274fSDaniel Borkmann DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS),
7336fad274fSDaniel Borkmann
7346fad274fSDaniel Borkmann /* Memory must be aligned on some architectures, used in combination with
7356fad274fSDaniel Borkmann * MEM_FIXED_SIZE.
73616d1e00cSJoanne Koong */
73716d1e00cSJoanne Koong MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
738d639b9d1SHao Luo
739d639b9d1SHao Luo /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
74005421aecSJoanne Koong * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
74105421aecSJoanne Koong * MEM_UNINIT means that memory needs to be initialized since it is also
74297e03f52SJoanne Koong * read.
743d639b9d1SHao Luo */
744d639b9d1SHao Luo MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
745d639b9d1SHao Luo
746d639b9d1SHao Luo __BPF_TYPE_FLAG_MAX,
747d639b9d1SHao Luo __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
748d639b9d1SHao Luo };
74917a52670SAlexei Starovoitov
75017a52670SAlexei Starovoitov #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \
75180f1d68cSDaniel Borkmann | DYNPTR_TYPE_XDP)
75217a52670SAlexei Starovoitov
75317a52670SAlexei Starovoitov /* Max number of base types. */
75417a52670SAlexei Starovoitov #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
75517a52670SAlexei Starovoitov
75617a52670SAlexei Starovoitov /* Max number of all types. */
75717a52670SAlexei Starovoitov #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1))
75817a52670SAlexei Starovoitov
75917a52670SAlexei Starovoitov /* function argument constraints */
76016d1e00cSJoanne Koong enum bpf_arg_type {
76116d1e00cSJoanne Koong ARG_DONTCARE = 0, /* unused argument in helper function */
76217a52670SAlexei Starovoitov
76339f19ebbSAlexei Starovoitov /* the following constraints used to prototype
7642edc3de6SAlexei Starovoitov * bpf_map_lookup/update/delete_elem() functions
765435faee1SDaniel Borkmann */
76639f19ebbSAlexei Starovoitov ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
76739f19ebbSAlexei Starovoitov ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
76880f1d68cSDaniel Borkmann ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
769608cd71aSAlexei Starovoitov
77080f1d68cSDaniel Borkmann /* Used to prototype bpf_memcmp() and other functions that access data
771d83525caSAlexei Starovoitov * on eBPF program stack
77246f8bc92SMartin KaFai Lau */
7736ac99e8fSMartin KaFai Lau ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
774a7658e1aSAlexei Starovoitov ARG_PTR_TO_ARENA,
775894f2a8bSKumar Kartikeya Dwivedi
776457f4436SAndrii Nakryiko ARG_CONST_SIZE, /* number of bytes accessed from memory */
7771df8f55aSMartin KaFai Lau ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
778eaa6bcb7SHao Luo
77969c087baSYonghong Song ARG_PTR_TO_CTX, /* pointer to context */
78048946bd6SHao Luo ARG_ANYTHING, /* any (initialized) argument is ok */
781fff13c4bSFlorent Revest ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
782b00628b1SAlexei Starovoitov ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
783d59232afSDave Marchevsky ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
78497e03f52SJoanne Koong ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
785f79e7ea5SLorenz Bauer ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */
786d639b9d1SHao Luo ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
78748946bd6SHao Luo ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
78848946bd6SHao Luo ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
78948946bd6SHao Luo ARG_PTR_TO_FUNC, /* pointer to a bpf program function */
79048946bd6SHao Luo ARG_PTR_TO_STACK, /* pointer to stack */
79148946bd6SHao Luo ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */
79248946bd6SHao Luo ARG_PTR_TO_TIMER, /* pointer to bpf_timer */
793c0a5a21cSKumar Kartikeya Dwivedi ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */
7946fad274fSDaniel Borkmann ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */
7956fad274fSDaniel Borkmann __BPF_ARG_TYPE_MAX,
79616d1e00cSJoanne Koong
7976fad274fSDaniel Borkmann /* Extended arg_types. */
798508362acSMaxim Mikityanskiy ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE,
799508362acSMaxim Mikityanskiy ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM,
80048946bd6SHao Luo ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX,
801d639b9d1SHao Luo ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
802d639b9d1SHao Luo ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
803d639b9d1SHao Luo ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
804d639b9d1SHao Luo /* Pointer to memory does not need to be initialized, since helper function
80517a52670SAlexei Starovoitov * fills all bytes or clears them in error case.
806d639b9d1SHao Luo */
80717a52670SAlexei Starovoitov ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
80817a52670SAlexei Starovoitov /* Pointer to valid memory of size known at compile time. */
80917a52670SAlexei Starovoitov ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
81017a52670SAlexei Starovoitov
81117a52670SAlexei Starovoitov /* This must be the last entry. Its purpose is to ensure the enum is
8123e6a4b3eSRoman Gushchin * wide enough to hold the higher bits reserved for bpf_type_flag.
8133c480732SHao Luo */
8143c480732SHao Luo __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT,
8153c480732SHao Luo };
8162de2669bSKumar Kartikeya Dwivedi static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
81763d9b80dSHao Luo
8183ca1032aSKP Singh /* type of values returned from helper functions */
819d639b9d1SHao Luo enum bpf_return_type {
820d639b9d1SHao Luo RET_INTEGER, /* function returns integer */
8213c480732SHao Luo RET_VOID, /* function doesn't return anything */
8223c480732SHao Luo RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
8233c480732SHao Luo RET_PTR_TO_SOCKET, /* returns a pointer to a socket */
8243c480732SHao Luo RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */
8253c480732SHao Luo RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */
826894f2a8bSKumar Kartikeya Dwivedi RET_PTR_TO_MEM, /* returns a pointer to memory */
8272de2669bSKumar Kartikeya Dwivedi RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
8283c480732SHao Luo RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
8293f00c523SDavid Vernet __BPF_RET_TYPE_MAX,
8303c480732SHao Luo
831d639b9d1SHao Luo /* Extended ret_types. */
832d639b9d1SHao Luo RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE,
833d639b9d1SHao Luo RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
834d639b9d1SHao Luo RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
83517a52670SAlexei Starovoitov RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
836d639b9d1SHao Luo RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM,
83717a52670SAlexei Starovoitov RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM,
83809756af4SAlexei Starovoitov RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
83909756af4SAlexei Starovoitov RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID,
84009756af4SAlexei Starovoitov
84109756af4SAlexei Starovoitov /* This must be the last entry. Its purpose is to ensure the enum is
84209756af4SAlexei Starovoitov * wide enough to hold the higher bits reserved for bpf_type_flag.
84309756af4SAlexei Starovoitov */
84409756af4SAlexei Starovoitov __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT,
84536bbef52SDaniel Borkmann };
84601685c5bSYonghong Song static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
847ae010757SEduard Zingerman
848ae010757SEduard Zingerman /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
8495b5f51bfSEduard Zingerman * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
8505b5f51bfSEduard Zingerman * instructions after verifying
8515b5f51bfSEduard Zingerman */
852ae010757SEduard Zingerman struct bpf_func_proto {
85317a52670SAlexei Starovoitov u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
854a7658e1aSAlexei Starovoitov bool gpl_only;
855a7658e1aSAlexei Starovoitov bool pkt_access;
85617a52670SAlexei Starovoitov bool might_sleep;
85717a52670SAlexei Starovoitov /* set to true if helper follows contract for llvm
85817a52670SAlexei Starovoitov * attribute bpf_fastcall:
85917a52670SAlexei Starovoitov * - void functions do not scratch r0
86017a52670SAlexei Starovoitov * - functions taking N arguments scratch only registers r1-rN
86117a52670SAlexei Starovoitov */
862a7658e1aSAlexei Starovoitov bool allow_fastcall;
863a7658e1aSAlexei Starovoitov enum bpf_return_type ret_type;
8649436ef6eSLorenz Bauer union {
8659436ef6eSLorenz Bauer struct {
8669436ef6eSLorenz Bauer enum bpf_arg_type arg1_type;
8679436ef6eSLorenz Bauer enum bpf_arg_type arg2_type;
8689436ef6eSLorenz Bauer enum bpf_arg_type arg3_type;
8699436ef6eSLorenz Bauer enum bpf_arg_type arg4_type;
8709436ef6eSLorenz Bauer enum bpf_arg_type arg5_type;
8719436ef6eSLorenz Bauer };
8729436ef6eSLorenz Bauer enum bpf_arg_type arg_type[5];
873508362acSMaxim Mikityanskiy };
874508362acSMaxim Mikityanskiy union {
875508362acSMaxim Mikityanskiy struct {
876508362acSMaxim Mikityanskiy u32 *arg1_btf_id;
877508362acSMaxim Mikityanskiy u32 *arg2_btf_id;
878508362acSMaxim Mikityanskiy u32 *arg3_btf_id;
879508362acSMaxim Mikityanskiy u32 *arg4_btf_id;
880508362acSMaxim Mikityanskiy u32 *arg5_btf_id;
8819436ef6eSLorenz Bauer };
882af7ec138SYonghong Song u32 *arg_btf_id[5];
883eae2e83eSJiri Olsa struct {
884a7658e1aSAlexei Starovoitov size_t arg1_size;
88517a52670SAlexei Starovoitov size_t arg2_size;
88617a52670SAlexei Starovoitov size_t arg3_size;
88717a52670SAlexei Starovoitov size_t arg4_size;
88817a52670SAlexei Starovoitov size_t arg5_size;
88917a52670SAlexei Starovoitov };
89017a52670SAlexei Starovoitov size_t arg_size[5];
89117a52670SAlexei Starovoitov };
89217a52670SAlexei Starovoitov int *ret_btf_id; /* return value btf_id */
89317a52670SAlexei Starovoitov bool (*allowed)(const struct bpf_prog *prog);
89417a52670SAlexei Starovoitov };
89509756af4SAlexei Starovoitov
89609756af4SAlexei Starovoitov /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
89719de99f7SAlexei Starovoitov * the first argument to eBPF programs.
898f1174f77SEdward Cree * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
899f1174f77SEdward Cree */
900f1174f77SEdward Cree struct bpf_context;
901f1174f77SEdward Cree
902f1174f77SEdward Cree enum bpf_access_type {
903f1174f77SEdward Cree BPF_READ = 1,
904f1174f77SEdward Cree BPF_WRITE = 2
905f1174f77SEdward Cree };
906f1174f77SEdward Cree
90719de99f7SAlexei Starovoitov /* types of values stored in eBPF registers */
90819de99f7SAlexei Starovoitov /* Pointer types represent:
909f1174f77SEdward Cree * pointer
91019de99f7SAlexei Starovoitov * pointer + imm
91119de99f7SAlexei Starovoitov * pointer + (u16) var
91219de99f7SAlexei Starovoitov * pointer + (u16) var + imm
913c25b2ae1SHao Luo * if (range > 0) then [ptr, ptr + range - off) is safe to access
914f1174f77SEdward Cree * if (id > 0) means that some 'var' was added
915de8f3a83SDaniel Borkmann * if (off > 0) means that 'imm' was added
916f1174f77SEdward Cree */
91719de99f7SAlexei Starovoitov enum bpf_reg_type {
918d58e468bSPetar Penkov NOT_INIT = 0, /* nothing was written into register */
919c64b7983SJoe Stringer SCALAR_VALUE, /* reg doesn't contain a valid pointer */
92046f8bc92SMartin KaFai Lau PTR_TO_CTX, /* reg points to bpf_context */
921655a51e5SMartin KaFai Lau CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
9229df1c28bSMatt Mullins PTR_TO_MAP_VALUE, /* reg points to map element value */
923fada7fdcSJonathan Lemon PTR_TO_MAP_KEY, /* reg points to a map element key */
924ba5f4cfeSJohn Fastabend PTR_TO_STACK, /* reg == frame_pointer + offset */
925ba5f4cfeSJohn Fastabend PTR_TO_PACKET_META, /* skb->data - meta_len */
926ba5f4cfeSJohn Fastabend PTR_TO_PACKET, /* reg points to skb->data */
927ba5f4cfeSJohn Fastabend PTR_TO_PACKET_END, /* skb->data + headlen */
928ba5f4cfeSJohn Fastabend PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
929ba5f4cfeSJohn Fastabend PTR_TO_SOCKET, /* reg points to struct bpf_sock */
930ba5f4cfeSJohn Fastabend PTR_TO_SOCK_COMMON, /* reg points to sock_common */
931ba5f4cfeSJohn Fastabend PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
932ba5f4cfeSJohn Fastabend PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
933ba5f4cfeSJohn Fastabend PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
934ba5f4cfeSJohn Fastabend /* PTR_TO_BTF_ID points to a kernel struct that does not need
935457f4436SAndrii Nakryiko * to be null checked by the BPF program. This does not imply the
9366082b6c3SAlexei Starovoitov * pointer is _not_ null and in practice this can easily be a null
93720b2aff4SHao Luo * pointer when reading pointer chains. The assumption is program
93869c087baSYonghong Song * context will handle null pointer dereference typically via fault
93927060531SKumar Kartikeya Dwivedi * handling. The verifier must keep this in mind and can make no
940e6ac2450SMartin KaFai Lau * assumptions about null or non-null when doing branch analysis.
941d639b9d1SHao Luo * Further, when passed into helpers the helpers can not, without
942c25b2ae1SHao Luo * additional context, assume the value is non-null.
943c25b2ae1SHao Luo */
944c25b2ae1SHao Luo PTR_TO_BTF_ID,
945c25b2ae1SHao Luo PTR_TO_MEM, /* reg points to valid memory region */
946c25b2ae1SHao Luo PTR_TO_ARENA,
947213a6952SMenglong Dong PTR_TO_BUF, /* reg points to a read/write buffer */
948213a6952SMenglong Dong PTR_TO_FUNC, /* reg points to a bpf program function */
949213a6952SMenglong Dong CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */
950213a6952SMenglong Dong __BPF_REG_TYPE_MAX,
951c25b2ae1SHao Luo
952c25b2ae1SHao Luo /* Extended reg_types. */
953d639b9d1SHao Luo PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE,
954d639b9d1SHao Luo PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET,
955d639b9d1SHao Luo PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON,
956d639b9d1SHao Luo PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK,
95719de99f7SAlexei Starovoitov /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
958d639b9d1SHao Luo * been checked for null. Used primarily to inform the verifier
95919de99f7SAlexei Starovoitov * an explicit null check is required for this struct.
96023994631SYonghong Song */
96123994631SYonghong Song PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID,
96223994631SYonghong Song
96323994631SYonghong Song /* This must be the last entry. Its purpose is to ensure the enum is
96423994631SYonghong Song * wide enough to hold the higher bits reserved for bpf_type_flag.
96592de3608SYonghong Song */
9669e15db66SAlexei Starovoitov __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT,
96723994631SYonghong Song };
96822dc4a0fSAndrii Nakryiko static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT);
96922dc4a0fSAndrii Nakryiko
9709e15db66SAlexei Starovoitov /* The information passed from prog-specific *_is_valid_access
971a687df20SAmery Hung * back to the verifier.
9729e15db66SAlexei Starovoitov */
97322dc4a0fSAndrii Nakryiko struct bpf_insn_access_aux {
9749e15db66SAlexei Starovoitov enum bpf_reg_type reg_type;
9755d99e198SXu Kuohai bool is_ldsx;
97623994631SYonghong Song union {
97723994631SYonghong Song int ctx_field_size;
978f96da094SDaniel Borkmann struct {
979f96da094SDaniel Borkmann struct btf *btf;
980f96da094SDaniel Borkmann u32 btf_id;
981f96da094SDaniel Borkmann u32 ref_obj_id;
982f96da094SDaniel Borkmann };
983f96da094SDaniel Borkmann };
9843feb263bSAndrii Nakryiko struct bpf_verifier_log *log; /* for verbose logs */
9853feb263bSAndrii Nakryiko bool is_retval; /* is accessing function return value ? */
9863feb263bSAndrii Nakryiko };
9873feb263bSAndrii Nakryiko
9883feb263bSAndrii Nakryiko static inline void
bpf_ctx_record_field_size(struct bpf_insn_access_aux * aux,u32 size)9893990ed4cSMartin KaFai Lau bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
9903990ed4cSMartin KaFai Lau {
9913feb263bSAndrii Nakryiko aux->ctx_field_size = size;
9923990ed4cSMartin KaFai Lau }
9933990ed4cSMartin KaFai Lau
bpf_is_ldimm64(const struct bpf_insn * insn)99488044230SPeilin Ye static bool bpf_is_ldimm64(const struct bpf_insn *insn)
99588044230SPeilin Ye {
99688044230SPeilin Ye return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
99788044230SPeilin Ye }
99888044230SPeilin Ye
bpf_pseudo_func(const struct bpf_insn * insn)99988044230SPeilin Ye static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
100088044230SPeilin Ye {
100188044230SPeilin Ye return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
100288044230SPeilin Ye }
100388044230SPeilin Ye
100488044230SPeilin Ye /* Given a BPF_ATOMIC instruction @atomic_insn, return true if it is an
100588044230SPeilin Ye * atomic load or store, and false if it is a read-modify-write instruction.
100688044230SPeilin Ye */
100788044230SPeilin Ye static inline bool
bpf_atomic_is_load_store(const struct bpf_insn * atomic_insn)100888044230SPeilin Ye bpf_atomic_is_load_store(const struct bpf_insn *atomic_insn)
10097de16e3aSJakub Kicinski {
10107de16e3aSJakub Kicinski switch (atomic_insn->imm) {
10117de16e3aSJakub Kicinski case BPF_LOAD_ACQ:
10127de16e3aSJakub Kicinski case BPF_STORE_REL:
10137de16e3aSJakub Kicinski return true;
10146728aea7SKumar Kartikeya Dwivedi default:
101509756af4SAlexei Starovoitov return false;
101609756af4SAlexei Starovoitov }
10175e43f899SAndrey Ignatov }
10185e43f899SAndrey Ignatov
10195e43f899SAndrey Ignatov struct bpf_prog_ops {
102017a52670SAlexei Starovoitov int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
102117a52670SAlexei Starovoitov union bpf_attr __user *uattr);
102217a52670SAlexei Starovoitov };
102317a52670SAlexei Starovoitov
102419de99f7SAlexei Starovoitov struct bpf_reg_state;
10255e43f899SAndrey Ignatov struct bpf_verifier_ops {
102623994631SYonghong Song /* return eBPF function prototype for verification */
102736bbef52SDaniel Borkmann const struct bpf_func_proto *
102836bbef52SDaniel Borkmann (*get_func_proto)(enum bpf_func_id func_id,
1029169c3176SMartin KaFai Lau const struct bpf_prog *prog);
1030169c3176SMartin KaFai Lau
1031e0cea7ceSDaniel Borkmann /* return true if 'size' wide access at offset 'off' within bpf_context
1032e0cea7ceSDaniel Borkmann * with 'type' (read or write) is allowed
10336b8cc1d1SDaniel Borkmann */
10346b8cc1d1SDaniel Borkmann bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
10356b8cc1d1SDaniel Borkmann const struct bpf_prog *prog,
1036f96da094SDaniel Borkmann struct bpf_insn_access_aux *info);
103727ae7997SMartin KaFai Lau int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
10386728aea7SKumar Kartikeya Dwivedi const struct bpf_prog *prog);
1039b7e852a9SAlexei Starovoitov int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
104009756af4SAlexei Starovoitov s16 ctx_stack_off);
104109756af4SAlexei Starovoitov int (*gen_ld_abs)(const struct bpf_insn *orig,
1042cae1927cSJakub Kicinski struct bpf_insn *insn_buf);
104308ca90afSJakub Kicinski u32 (*convert_ctx_access)(enum bpf_access_type type,
1044cae1927cSJakub Kicinski const struct bpf_insn *src,
1045cae1927cSJakub Kicinski struct bpf_insn *dst,
1046c941ce9cSQuentin Monnet struct bpf_prog *prog, u32 *target_size);
104708ca90afSJakub Kicinski int (*btf_struct_access)(struct bpf_verifier_log *log,
104808ca90afSJakub Kicinski const struct bpf_reg_state *reg,
104908ca90afSJakub Kicinski int off, int size);
105008ca90afSJakub Kicinski };
105108ca90afSJakub Kicinski
105216a8cb5cSQuentin Monnet struct bpf_prog_offload_ops {
105316a8cb5cSQuentin Monnet /* verifier basic callbacks */
1054eb911947SQuentin Monnet int (*insn_hook)(struct bpf_verifier_env *env,
1055cae1927cSJakub Kicinski int insn_idx, int prev_insn_idx);
1056cae1927cSJakub Kicinski int (*finalize)(struct bpf_verifier_env *env);
10570a9c1991SJakub Kicinski /* verifier optimization callbacks (called after .finalize) */
1058ab3f0063SJakub Kicinski int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
1059ab3f0063SJakub Kicinski struct bpf_insn *insn);
1060341b3e7bSQuentin Monnet int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
1061ab3f0063SJakub Kicinski /* program management callbacks */
1062ab3f0063SJakub Kicinski int (*prepare)(struct bpf_prog *prog);
1063ab3f0063SJakub Kicinski int (*translate)(struct bpf_prog *prog);
106408ca90afSJakub Kicinski void (*destroy)(struct bpf_prog *prog);
1065fcfb126dSJiong Wang };
1066fcfb126dSJiong Wang
1067ab3f0063SJakub Kicinski struct bpf_prog_offload {
1068ab3f0063SJakub Kicinski struct bpf_prog *prog;
10698bad74f9SRoman Gushchin struct net_device *netdev;
10708bad74f9SRoman Gushchin struct bpf_offload_dev *offdev;
1071b741f163SRoman Gushchin void *dev_priv;
10728bad74f9SRoman Gushchin struct list_head offloads;
10738bad74f9SRoman Gushchin bool dev_state;
10748bad74f9SRoman Gushchin bool opt_failed;
10758bad74f9SRoman Gushchin void *jited_image;
10768bad74f9SRoman Gushchin u32 jited_len;
1077f1b9509cSAlexei Starovoitov };
1078f1b9509cSAlexei Starovoitov
1079f1b9509cSAlexei Starovoitov enum bpf_cgroup_storage_type {
1080f1b9509cSAlexei Starovoitov BPF_CGROUP_STORAGE_SHARED,
1081f1b9509cSAlexei Starovoitov BPF_CGROUP_STORAGE_PERCPU,
1082523a4cf4SDmitrii Banshchikov __BPF_CGROUP_STORAGE_MAX
1083523a4cf4SDmitrii Banshchikov };
1084523a4cf4SDmitrii Banshchikov
1085523a4cf4SDmitrii Banshchikov #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
1086523a4cf4SDmitrii Banshchikov
1087720e6a43SYonghong Song /* The longest tracepoint has 12 args.
1088720e6a43SYonghong Song * See include/trace/bpf_probe.h
1089720e6a43SYonghong Song */
109049f67f39SIlya Leoshkevich #define MAX_BPF_FUNC_ARGS 12
109149f67f39SIlya Leoshkevich
109249f67f39SIlya Leoshkevich /* The maximum number of arguments passed through registers
1093fec56f58SAlexei Starovoitov * a single function may have.
1094fec56f58SAlexei Starovoitov */
109549f67f39SIlya Leoshkevich #define MAX_BPF_FUNC_REG_ARGS 5
1096fec56f58SAlexei Starovoitov
1097fec56f58SAlexei Starovoitov /* The argument is a structure. */
1098720e6a43SYonghong Song #define BTF_FMODEL_STRUCT_ARG BIT(0)
1099fec56f58SAlexei Starovoitov
1100fec56f58SAlexei Starovoitov /* The argument is signed. */
1101fec56f58SAlexei Starovoitov #define BTF_FMODEL_SIGNED_ARG BIT(1)
1102fec56f58SAlexei Starovoitov
1103fec56f58SAlexei Starovoitov struct btf_func_model {
1104fec56f58SAlexei Starovoitov u8 ret_size;
1105fec56f58SAlexei Starovoitov u8 ret_flags;
1106fec56f58SAlexei Starovoitov u8 nr_args;
1107fec56f58SAlexei Starovoitov u8 arg_size[MAX_BPF_FUNC_ARGS];
1108fec56f58SAlexei Starovoitov u8 arg_flags[MAX_BPF_FUNC_ARGS];
1109fec56f58SAlexei Starovoitov };
1110fec56f58SAlexei Starovoitov
1111fec56f58SAlexei Starovoitov /* Restore arguments before returning from trampoline to let original function
1112fec56f58SAlexei Starovoitov * continue executing. This flag is used for fentry progs when there are no
1113fec56f58SAlexei Starovoitov * fexit progs.
11147e6f3cd8SJiri Olsa */
11157e6f3cd8SJiri Olsa #define BPF_TRAMP_F_RESTORE_REGS BIT(0)
11167e6f3cd8SJiri Olsa /* Call original function after fentry progs, but before fexit progs.
11177e6f3cd8SJiri Olsa * Makes sense for fentry/fexit, normal calls and indirect calls.
1118356ed649SHou Tao */
1119356ed649SHou Tao #define BPF_TRAMP_F_CALL_ORIG BIT(1)
11207e6f3cd8SJiri Olsa /* Skip current frame and return to parent. Makes sense for fentry/fexit
1121316cba62SJiri Olsa * programs only. Should not be used with normal calls and indirect calls.
1122316cba62SJiri Olsa */
1123316cba62SJiri Olsa #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
1124316cba62SJiri Olsa /* Store IP address of the caller on the trampoline stack,
1125316cba62SJiri Olsa * so it's available for trampoline's programs.
112600963a2eSSong Liu */
112700963a2eSSong Liu #define BPF_TRAMP_F_IP_ARG BIT(3)
112800963a2eSSong Liu /* Return the return value of fentry prog. Only used by bpf_struct_ops. */
112900963a2eSSong Liu #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
113000963a2eSSong Liu
11312b5dcb31SLeon Hwang /* Get original function from stack instead of from provided direct address.
11322b5dcb31SLeon Hwang * Makes sense for trampolines with fexit or fmod_ret programs.
11332b5dcb31SLeon Hwang */
11342b5dcb31SLeon Hwang #define BPF_TRAMP_F_ORIG_STACK BIT(5)
11352b5dcb31SLeon Hwang
11362cd3e377SPeter Zijlstra /* This trampoline is on a function with another ftrace_ops with IPMODIFY,
11372cd3e377SPeter Zijlstra * e.g., a live patch. This flag is set and cleared by ftrace call backs,
11382cd3e377SPeter Zijlstra */
11392cd3e377SPeter Zijlstra #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
11402cd3e377SPeter Zijlstra
11412cd3e377SPeter Zijlstra /* Indicate that current trampoline is in a tail call context. Then, it has to
11422cd3e377SPeter Zijlstra * cache and restore tail_call_cnt to avoid infinite tail call loop.
11432cd3e377SPeter Zijlstra */
11442cd3e377SPeter Zijlstra #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
11452cd3e377SPeter Zijlstra
11462cd3e377SPeter Zijlstra /*
114788fd9e53SKP Singh * Indicate the trampoline should be suitable to receive indirect calls;
1148b23316aaSYuntao Wang * without this indirectly calling the generated code can result in #UD/#CP,
114988fd9e53SKP Singh * depending on the CFI options.
1150390a07a9SIlya Leoshkevich *
1151528eb2cbSIlya Leoshkevich * Used by bpf_struct_ops.
1152528eb2cbSIlya Leoshkevich *
1153528eb2cbSIlya Leoshkevich * Incompatible with FENTRY usage, overloads @func_addr argument.
1154390a07a9SIlya Leoshkevich */
1155528eb2cbSIlya Leoshkevich #define BPF_TRAMP_F_INDIRECT BIT(8)
1156390a07a9SIlya Leoshkevich
115788fd9e53SKP Singh /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
1158f7e0beafSKui-Feng Lee * bytes on x86.
1159f7e0beafSKui-Feng Lee */
1160f7e0beafSKui-Feng Lee enum {
116188fd9e53SKP Singh #if defined(__s390x__)
116288fd9e53SKP Singh BPF_MAX_TRAMP_LINKS = 27,
1163e384c7b7SKui-Feng Lee #else
1164e384c7b7SKui-Feng Lee BPF_MAX_TRAMP_LINKS = 38,
1165fec56f58SAlexei Starovoitov #endif
1166fec56f58SAlexei Starovoitov };
1167fec56f58SAlexei Starovoitov
1168fec56f58SAlexei Starovoitov struct bpf_tramp_links {
1169fec56f58SAlexei Starovoitov struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS];
1170fec56f58SAlexei Starovoitov int nr_links;
1171fec56f58SAlexei Starovoitov };
1172fec56f58SAlexei Starovoitov
1173fec56f58SAlexei Starovoitov struct bpf_tramp_run_ctx;
1174fec56f58SAlexei Starovoitov
1175fec56f58SAlexei Starovoitov /* Different use cases for BPF trampoline:
1176fec56f58SAlexei Starovoitov * 1. replace nop at the function entry (kprobe equivalent)
1177fec56f58SAlexei Starovoitov * flags = BPF_TRAMP_F_RESTORE_REGS
1178fec56f58SAlexei Starovoitov * fentry = a set of programs to run before returning from trampoline
1179fec56f58SAlexei Starovoitov *
1180fec56f58SAlexei Starovoitov * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
1181fec56f58SAlexei Starovoitov * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
1182fec56f58SAlexei Starovoitov * orig_call = fentry_ip + MCOUNT_INSN_SIZE
1183fec56f58SAlexei Starovoitov * fentry = a set of program to run before calling original function
1184fec56f58SAlexei Starovoitov * fexit = a set of program to run after original function
1185e21aa341SAlexei Starovoitov *
11867a3d9a15SSong Liu * 3. replace direct call instruction anywhere in the function body
118785d33df3SMartin KaFai Lau * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
1188f7e0beafSKui-Feng Lee * With flags = 0
11897a3d9a15SSong Liu * fentry = a set of programs to run before returning from trampoline
119082583daaSSong Liu * With flags = BPF_TRAMP_F_CALL_ORIG
119182583daaSSong Liu * orig_call = original callback addr or direct function addr
1192c733239fSChristophe Leroy * fentry = a set of program to run before calling original function
119396d1b7c0SSong Liu * fexit = a set of program to run after original function
119496d1b7c0SSong Liu */
119582583daaSSong Liu struct bpf_tramp_image;
1196271de525SMartin KaFai Lau int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1197e384c7b7SKui-Feng Lee const struct btf_func_model *m, u32 flags,
1198271de525SMartin KaFai Lau struct bpf_tramp_links *tlinks,
119964696c40SMartin KaFai Lau void *func_addr);
1200e21aa341SAlexei Starovoitov void *arch_alloc_bpf_trampoline(unsigned int size);
1201e21aa341SAlexei Starovoitov void arch_free_bpf_trampoline(void *image, unsigned int size);
1202271de525SMartin KaFai Lau int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size);
1203271de525SMartin KaFai Lau int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
1204271de525SMartin KaFai Lau struct bpf_tramp_links *tlinks, void *func_addr);
1205271de525SMartin KaFai Lau
1206271de525SMartin KaFai Lau u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
1207271de525SMartin KaFai Lau struct bpf_tramp_run_ctx *run_ctx);
1208fec56f58SAlexei Starovoitov void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
1209535911c8SJiri Olsa struct bpf_tramp_run_ctx *run_ctx);
1210535911c8SJiri Olsa void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
1211535911c8SJiri Olsa void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
1212bfea9a85SJiri Olsa typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
1213ecb60d1cSJiri Olsa struct bpf_tramp_run_ctx *run_ctx);
1214ca4424c9SJiri Olsa typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
1215cbd76f8dSJiri Olsa struct bpf_tramp_run_ctx *run_ctx);
1216535911c8SJiri Olsa bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
1217535911c8SJiri Olsa bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);
1218fec56f58SAlexei Starovoitov
1219fec56f58SAlexei Starovoitov struct bpf_ksym {
1220fec56f58SAlexei Starovoitov unsigned long start;
1221ae240823SKP Singh unsigned long end;
1222be8704ffSAlexei Starovoitov char name[KSYM_NAME_LEN];
1223be8704ffSAlexei Starovoitov struct list_head lnode;
1224fec56f58SAlexei Starovoitov struct latch_tree_node tnode;
1225fec56f58SAlexei Starovoitov bool prog;
1226e21aa341SAlexei Starovoitov };
1227e21aa341SAlexei Starovoitov
122826ef208cSSong Liu enum bpf_tramp_prog_type {
1229e21aa341SAlexei Starovoitov BPF_TRAMP_FENTRY,
1230e21aa341SAlexei Starovoitov BPF_TRAMP_FEXIT,
1231e21aa341SAlexei Starovoitov BPF_TRAMP_MODIFY_RETURN,
1232e21aa341SAlexei Starovoitov BPF_TRAMP_MAX,
1233e21aa341SAlexei Starovoitov BPF_TRAMP_REPLACE, /* more than MAX */
1234e21aa341SAlexei Starovoitov };
1235e21aa341SAlexei Starovoitov
1236e21aa341SAlexei Starovoitov struct bpf_tramp_image {
1237e21aa341SAlexei Starovoitov void *image;
1238e21aa341SAlexei Starovoitov int size;
1239fec56f58SAlexei Starovoitov struct bpf_ksym ksym;
1240fec56f58SAlexei Starovoitov struct percpu_ref pcref;
1241fec56f58SAlexei Starovoitov void *ip_after_call;
124200963a2eSSong Liu void *ip_epilogue;
1243fec56f58SAlexei Starovoitov union {
1244fec56f58SAlexei Starovoitov struct rcu_head rcu;
1245fec56f58SAlexei Starovoitov struct work_struct work;
124600963a2eSSong Liu };
1247fec56f58SAlexei Starovoitov };
1248fec56f58SAlexei Starovoitov
1249fec56f58SAlexei Starovoitov struct bpf_trampoline {
1250fec56f58SAlexei Starovoitov /* hlist for trampoline_table */
1251b91e014fSAlexei Starovoitov struct hlist_node hlist;
1252fec56f58SAlexei Starovoitov struct ftrace_ops *fops;
1253be8704ffSAlexei Starovoitov /* serializes access to fields of this trampoline */
1254be8704ffSAlexei Starovoitov struct mutex mutex;
1255be8704ffSAlexei Starovoitov refcount_t refcnt;
1256be8704ffSAlexei Starovoitov u32 flags;
1257be8704ffSAlexei Starovoitov u64 key;
1258fec56f58SAlexei Starovoitov struct {
1259fec56f58SAlexei Starovoitov struct btf_func_model model;
1260fec56f58SAlexei Starovoitov void *addr;
1261fec56f58SAlexei Starovoitov bool ftrace_managed;
1262fec56f58SAlexei Starovoitov } func;
1263e21aa341SAlexei Starovoitov /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
1264fec56f58SAlexei Starovoitov * program by replacing one of its functions. func.addr is the address
126575ccbef6SBjörn Töpel * of the function it replaced.
1266f7b12b6fSToke Høiland-Jørgensen */
1267f7b12b6fSToke Høiland-Jørgensen struct bpf_prog *extension_prog;
1268f7b12b6fSToke Høiland-Jørgensen /* list of BPF programs using this trampoline */
126931bf1dbcSViktor Malik struct hlist_head progs_hlist[BPF_TRAMP_MAX];
1270f7b12b6fSToke Høiland-Jørgensen /* Number of attached programs. A counter per kind. */
1271f7b12b6fSToke Høiland-Jørgensen int progs_cnt[BPF_TRAMP_MAX];
1272f7b12b6fSToke Høiland-Jørgensen /* Executable image of trampoline */
1273f7b12b6fSToke Høiland-Jørgensen struct bpf_tramp_image *cur_image;
1274116eb788SBjörn Töpel };
127575ccbef6SBjörn Töpel
127675ccbef6SBjörn Töpel struct bpf_attach_target_info {
127775ccbef6SBjörn Töpel struct btf_func_model fmodel;
127875ccbef6SBjörn Töpel long tgt_addr;
127975ccbef6SBjörn Töpel struct module *tgt_mod;
128075ccbef6SBjörn Töpel const char *tgt_name;
128175ccbef6SBjörn Töpel const struct btf_type *tgt_type;
128275ccbef6SBjörn Töpel };
128375ccbef6SBjörn Töpel
128475ccbef6SBjörn Töpel #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
128575ccbef6SBjörn Töpel
128675ccbef6SBjörn Töpel struct bpf_dispatcher_prog {
128775ccbef6SBjörn Töpel struct bpf_prog *prog;
128819c02415SSong Liu refcount_t users;
128975ccbef6SBjörn Töpel };
1290517b75e4SJiri Olsa
1291c86df29dSPeter Zijlstra struct bpf_dispatcher {
1292c86df29dSPeter Zijlstra /* dispatcher mutex */
1293c86df29dSPeter Zijlstra struct mutex mutex;
1294c86df29dSPeter Zijlstra void *func;
129575ccbef6SBjörn Töpel struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
129675ccbef6SBjörn Töpel int num_progs;
12974f9087f1SPeter Zijlstra void *image;
12984f9087f1SPeter Zijlstra void *rw_image;
12994f9087f1SPeter Zijlstra u32 image_off;
13004f9087f1SPeter Zijlstra struct bpf_ksym ksym;
13014f9087f1SPeter Zijlstra #ifdef CONFIG_HAVE_STATIC_CALL
13027e6897f9SBjörn Töpel struct static_call_key *sc_key;
13037e6897f9SBjörn Töpel void *sc_tramp;
1304af3f4134SStanislav Fomichev #endif
13057e6897f9SBjörn Töpel };
13067e6897f9SBjörn Töpel
13077e6897f9SBjörn Töpel #ifndef __bpfcall
1308f7e0beafSKui-Feng Lee #define __bpfcall __nocfi
13098357b366SJoanne Koong #endif
13108357b366SJoanne Koong
bpf_dispatcher_nop_func(const void * ctx,const struct bpf_insn * insnsi,bpf_func_t bpf_func)13118357b366SJoanne Koong static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func(
13128357b366SJoanne Koong const void *ctx,
13138357b366SJoanne Koong const struct bpf_insn *insnsi,
13148357b366SJoanne Koong bpf_func_t bpf_func)
13158357b366SJoanne Koong {
13168357b366SJoanne Koong return bpf_func(ctx, insnsi);
13178357b366SJoanne Koong }
13188357b366SJoanne Koong
13198357b366SJoanne Koong /* the implementation of the opaque uapi struct bpf_dynptr */
13208357b366SJoanne Koong struct bpf_dynptr_kern {
13218357b366SJoanne Koong void *data;
13228357b366SJoanne Koong /* Size represents the number of usable bytes of dynptr data.
13238357b366SJoanne Koong * If for example the offset is at 4 for a local dynptr whose data is
13248357b366SJoanne Koong * of type u64, the number of usable bytes is 4.
13258357b366SJoanne Koong *
13268357b366SJoanne Koong * The upper 8 bits are reserved. It is as follows:
13278357b366SJoanne Koong * Bits 0 - 23 = size
13288357b366SJoanne Koong * Bits 24 - 30 = dynptr type
13298357b366SJoanne Koong * Bit 31 = whether dynptr is read-only
13308357b366SJoanne Koong */
1331b5964b96SJoanne Koong u32 size;
1332b5964b96SJoanne Koong u32 offset;
133305421aecSJoanne Koong } __aligned(8);
133405421aecSJoanne Koong
13358357b366SJoanne Koong enum bpf_dynptr_type {
13368357b366SJoanne Koong BPF_DYNPTR_TYPE_INVALID,
13378357b366SJoanne Koong /* Points to memory that is local to the bpf program */
133826662d73SJoanne Koong BPF_DYNPTR_TYPE_LOCAL,
133974523c06SSong Liu /* Underlying data is a ringbuf record */
134074523c06SSong Liu BPF_DYNPTR_TYPE_RINGBUF,
13413e1c6f35SVadim Fedorenko /* Underlying data is a sk_buff */
13428357b366SJoanne Koong BPF_DYNPTR_TYPE_SKB,
1343fec56f58SAlexei Starovoitov /* Underlying data is a xdp_buff */
1344d6083f04SLeon Hwang BPF_DYNPTR_TYPE_XDP,
1345d6083f04SLeon Hwang };
1346d6083f04SLeon Hwang
1347d6083f04SLeon Hwang int bpf_dynptr_check_size(u32 size);
1348d6083f04SLeon Hwang u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr);
1349d6083f04SLeon Hwang const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len);
1350f7b12b6fSToke Høiland-Jørgensen void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len);
1351f7b12b6fSToke Høiland-Jørgensen bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr);
1352fec56f58SAlexei Starovoitov
135319c02415SSong Liu #ifdef CONFIG_BPF_JIT
1354c86df29dSPeter Zijlstra int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1355c86df29dSPeter Zijlstra struct bpf_trampoline *tr,
1356c86df29dSPeter Zijlstra struct bpf_prog *tgt_prog);
1357c86df29dSPeter Zijlstra int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1358c86df29dSPeter Zijlstra struct bpf_trampoline *tr,
1359c86df29dSPeter Zijlstra struct bpf_prog *tgt_prog);
1360c86df29dSPeter Zijlstra struct bpf_trampoline *bpf_trampoline_get(u64 key,
1361c86df29dSPeter Zijlstra struct bpf_attach_target_info *tgt_info);
1362c86df29dSPeter Zijlstra void bpf_trampoline_put(struct bpf_trampoline *tr);
1363c86df29dSPeter Zijlstra int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
1364c86df29dSPeter Zijlstra
1365c86df29dSPeter Zijlstra /*
1366c86df29dSPeter Zijlstra * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
1367c86df29dSPeter Zijlstra * indirection with a direct call to the bpf program. If the architecture does
1368c86df29dSPeter Zijlstra * not have STATIC_CALL, avoid a double-indirection.
1369c86df29dSPeter Zijlstra */
1370c86df29dSPeter Zijlstra #ifdef CONFIG_HAVE_STATIC_CALL
1371c86df29dSPeter Zijlstra
1372c86df29dSPeter Zijlstra #define __BPF_DISPATCHER_SC_INIT(_name) \
1373c86df29dSPeter Zijlstra .sc_key = &STATIC_CALL_KEY(_name), \
1374c86df29dSPeter Zijlstra .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
1375c86df29dSPeter Zijlstra
1376c86df29dSPeter Zijlstra #define __BPF_DISPATCHER_SC(name) \
1377c86df29dSPeter Zijlstra DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)
1378c86df29dSPeter Zijlstra
1379c86df29dSPeter Zijlstra #define __BPF_DISPATCHER_CALL(name) \
1380c86df29dSPeter Zijlstra static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
1381dbe69b29SJiri Olsa
1382517b75e4SJiri Olsa #define __BPF_DISPATCHER_UPDATE(_d, _new) \
1383517b75e4SJiri Olsa __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
1384517b75e4SJiri Olsa
138575ccbef6SBjörn Töpel #else
138675ccbef6SBjörn Töpel #define __BPF_DISPATCHER_SC_INIT(name)
138775ccbef6SBjörn Töpel #define __BPF_DISPATCHER_SC(name)
1388517b75e4SJiri Olsa #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi)
1389517b75e4SJiri Olsa #define __BPF_DISPATCHER_UPDATE(_d, _new)
1390517b75e4SJiri Olsa #endif
1391517b75e4SJiri Olsa
1392517b75e4SJiri Olsa #define BPF_DISPATCHER_INIT(_name) { \
1393c86df29dSPeter Zijlstra .mutex = __MUTEX_INITIALIZER(_name.mutex), \
139475ccbef6SBjörn Töpel .func = &_name##_func, \
139575ccbef6SBjörn Töpel .progs = {}, \
139675ccbef6SBjörn Töpel .num_progs = 0, \
1397c86df29dSPeter Zijlstra .image = NULL, \
13984f9087f1SPeter Zijlstra .image_off = 0, \
139975ccbef6SBjörn Töpel .ksym = { \
140075ccbef6SBjörn Töpel .name = #_name, \
1401af3f4134SStanislav Fomichev .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
140275ccbef6SBjörn Töpel }, \
1403c86df29dSPeter Zijlstra __BPF_DISPATCHER_SC_INIT(_name##_call) \
140475ccbef6SBjörn Töpel }
14056a64037dSBjörn Töpel
14066a64037dSBjörn Töpel #define DEFINE_BPF_DISPATCHER(name) \
140718acb7faSPeter Zijlstra __BPF_DISPATCHER_SC(name); \
1408dbe69b29SJiri Olsa noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \
140975ccbef6SBjörn Töpel const void *ctx, \
14106a64037dSBjörn Töpel const struct bpf_insn *insnsi, \
141175ccbef6SBjörn Töpel bpf_func_t bpf_func) \
141275ccbef6SBjörn Töpel { \
1413af3f4134SStanislav Fomichev return __BPF_DISPATCHER_CALL(name); \
14146a64037dSBjörn Töpel } \
1415c86df29dSPeter Zijlstra EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
14166a64037dSBjörn Töpel struct bpf_dispatcher bpf_dispatcher_##name = \
14176a64037dSBjörn Töpel BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
141875ccbef6SBjörn Töpel
141975ccbef6SBjörn Töpel #define DECLARE_BPF_DISPATCHER(name) \
1420dba122fbSJiri Olsa unsigned int bpf_dispatcher_##name##_func( \
14217c8ce4ffSXu Kuohai const void *ctx, \
14227c8ce4ffSXu Kuohai const struct bpf_insn *insnsi, \
1423a108f7dcSJiri Olsa bpf_func_t bpf_func); \
1424dba122fbSJiri Olsa extern struct bpf_dispatcher bpf_dispatcher_##name;
1425dba122fbSJiri Olsa
14263486beddSSong Liu #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
14273486beddSSong Liu #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
1428f92c1e18SJiri Olsa void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
1429fec56f58SAlexei Starovoitov struct bpf_prog *to);
1430f7e0beafSKui-Feng Lee /* Called only from JIT-enabled code, so there's no need for stubs. */
1431d6083f04SLeon Hwang void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym);
1432d6083f04SLeon Hwang void bpf_image_ksym_add(struct bpf_ksym *ksym);
1433fec56f58SAlexei Starovoitov void bpf_image_ksym_del(struct bpf_ksym *ksym);
1434fec56f58SAlexei Starovoitov void bpf_ksym_add(struct bpf_ksym *ksym);
1435fec56f58SAlexei Starovoitov void bpf_ksym_del(struct bpf_ksym *ksym);
1436f7e0beafSKui-Feng Lee int bpf_jit_charge_modmem(u32 size);
1437d6083f04SLeon Hwang void bpf_jit_uncharge_modmem(u32 size);
1438d6083f04SLeon Hwang bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
1439fec56f58SAlexei Starovoitov #else
bpf_trampoline_link_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)1440fec56f58SAlexei Starovoitov static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
1441fec56f58SAlexei Starovoitov struct bpf_trampoline *tr,
1442f7b12b6fSToke Høiland-Jørgensen struct bpf_prog *tgt_prog)
1443f7b12b6fSToke Høiland-Jørgensen {
1444f7b12b6fSToke Høiland-Jørgensen return -ENOTSUPP;
1445b724a641SLeon Hwang }
bpf_trampoline_unlink_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)1446f7b12b6fSToke Høiland-Jørgensen static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
1447fec56f58SAlexei Starovoitov struct bpf_trampoline *tr,
144875ccbef6SBjörn Töpel struct bpf_prog *tgt_prog)
144975ccbef6SBjörn Töpel {
14506a64037dSBjörn Töpel return -ENOTSUPP;
145175ccbef6SBjörn Töpel }
bpf_trampoline_get(u64 key,struct bpf_attach_target_info * tgt_info)145275ccbef6SBjörn Töpel static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
145375ccbef6SBjörn Töpel struct bpf_attach_target_info *tgt_info)
145475ccbef6SBjörn Töpel {
1455e9b4e606SJiri Olsa return NULL;
1456e9b4e606SJiri Olsa }
bpf_trampoline_put(struct bpf_trampoline * tr)1457e9b4e606SJiri Olsa static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
1458e9b4e606SJiri Olsa #define DEFINE_BPF_DISPATCHER(name)
1459f92c1e18SJiri Olsa #define DECLARE_BPF_DISPATCHER(name)
1460f92c1e18SJiri Olsa #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
1461f92c1e18SJiri Olsa #define BPF_DISPATCHER_PTR(name) NULL
bpf_dispatcher_change_prog(struct bpf_dispatcher * d,struct bpf_prog * from,struct bpf_prog * to)1462f92c1e18SJiri Olsa static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
1463fec56f58SAlexei Starovoitov struct bpf_prog *from,
1464fec56f58SAlexei Starovoitov struct bpf_prog *to) {}
is_bpf_image_address(unsigned long address)14658c1b6e69SAlexei Starovoitov static inline bool is_bpf_image_address(unsigned long address)
146651c39bb1SAlexei Starovoitov {
14678c1b6e69SAlexei Starovoitov return false;
14682afae08cSAndrii Nakryiko }
bpf_prog_has_trampoline(const struct bpf_prog * prog)14692afae08cSAndrii Nakryiko static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
14708c1b6e69SAlexei Starovoitov {
14718c1b6e69SAlexei Starovoitov return false;
1472a66886feSDaniel Borkmann }
1473a66886feSDaniel Borkmann #endif
1474a66886feSDaniel Borkmann
1475a66886feSDaniel Borkmann struct bpf_func_info_aux {
1476a66886feSDaniel Borkmann u16 linkage;
1477a66886feSDaniel Borkmann bool unreliable;
1478cf71b174SMaciej Fijalkowski bool called : 1;
1479ebf7d1f5SMaciej Fijalkowski bool verified : 1;
1480ebf7d1f5SMaciej Fijalkowski };
1481f263a814SJohn Fastabend
1482a66886feSDaniel Borkmann enum bpf_jit_poke_reason {
1483a66886feSDaniel Borkmann BPF_POKE_REASON_TAIL_CALL,
1484a66886feSDaniel Borkmann };
1485a66886feSDaniel Borkmann
1486a66886feSDaniel Borkmann /* Descriptor of pokes pointing /into/ the JITed image. */
1487a66886feSDaniel Borkmann struct bpf_jit_poke_descriptor {
1488cf71b174SMaciej Fijalkowski void *tailcall_target;
1489a66886feSDaniel Borkmann void *tailcall_bypass;
1490a66886feSDaniel Borkmann void *bypass_addr;
1491a748c697SMaciej Fijalkowski void *aux;
1492a66886feSDaniel Borkmann union {
1493a66886feSDaniel Borkmann struct {
14943c32cc1bSYonghong Song struct bpf_map *map;
14953c32cc1bSYonghong Song u32 key;
14963c32cc1bSYonghong Song } tail_call;
14973c32cc1bSYonghong Song };
149877c0208eSKui-Feng Lee bool tailcall_target_stable;
1499951cf368SYonghong Song u8 adj_off;
1500a687df20SAmery Hung u16 reason;
1501a687df20SAmery Hung u32 insn_idx;
15023c32cc1bSYonghong Song };
15033c32cc1bSYonghong Song
1504541c3badSAndrii Nakryiko /* reg_type info for ctx arguments */
1505541c3badSAndrii Nakryiko struct bpf_ctx_arg_aux {
1506541c3badSAndrii Nakryiko u32 offset;
1507541c3badSAndrii Nakryiko enum bpf_reg_type reg_type;
1508541c3badSAndrii Nakryiko struct btf *btf;
1509e6ac2450SMartin KaFai Lau u32 btf_id;
1510e6ac2450SMartin KaFai Lau u32 ref_obj_id;
151109756af4SAlexei Starovoitov bool refcounted;
151285192dbfSAndrii Nakryiko };
151324701eceSDaniel Borkmann
1514541c3badSAndrii Nakryiko struct btf_mod_pair {
151532bbe007SAlexei Starovoitov struct btf *btf;
1516e647815aSJiong Wang struct module *module;
15179df1c28bSMatt Mullins };
15188726679aSAlexei Starovoitov
1519dc4bb0e2SMartin KaFai Lau struct bpf_kfunc_desc_tab;
1520ba64e7d8SYonghong Song
1521335d1c5bSKumar Kartikeya Dwivedi struct bpf_prog_aux {
1522ba64e7d8SYonghong Song atomic64_t refcnt;
1523ccfe29ebSAlexei Starovoitov u32 used_map_cnt;
1524*51d65049SJuntong Deng u32 used_btf_cnt;
15253c32cc1bSYonghong Song u32 max_ctx_offset;
1526afbf21dcSYonghong Song u32 max_pkt_offset;
1527afbf21dcSYonghong Song u32 max_tp_access;
152822dc4a0fSAndrii Nakryiko u32 stack_depth;
152943205180SAmery Hung u32 id;
15307d1cd70dSYonghong Song u32 func_cnt; /* used by non-func prog as the number of func progs */
15313aac1eadSToke Høiland-Jørgensen u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
15323aac1eadSToke Høiland-Jørgensen u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
15333aac1eadSToke Høiland-Jørgensen u32 attach_btf_id; /* in-kernel BTF type id to attach to */
15344a1e7c0cSToke Høiland-Jørgensen u32 attach_st_ops_member_off;
15354a1e7c0cSToke Høiland-Jørgensen u32 ctx_arg_info_size;
1536a4b1d3c1SJiong Wang u32 max_rdonly_access;
15372b3486bcSStanislav Fomichev u32 max_rdwr_access;
15382b3486bcSStanislav Fomichev struct btf *attach_btf;
153938207291SMartin KaFai Lau struct bpf_ctx_arg_aux *ctx_arg_info;
154019bfcdf9SDmitrii Dolgov void __percpu *priv_stack_ptr;
15418c1b6e69SAlexei Starovoitov struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
1542ebf7d1f5SMaciej Fijalkowski struct bpf_prog *dst_prog;
1543c2f2cdbeSLorenzo Bianconi struct bpf_trampoline *dst_trampoline;
1544f18b03faSKumar Kartikeya Dwivedi enum bpf_prog_type saved_dst_prog_type;
1545f18b03faSKumar Kartikeya Dwivedi enum bpf_attach_type saved_dst_attach_type;
1546d6083f04SLeon Hwang bool verifier_zext; /* Zero extensions has been inserted by verifier. */
1547e00931c0SYonghong Song bool dev_bound; /* Program is bound to the netdev. */
15485bd36da1SYonghong Song bool offload_requested; /* Program is bound and offloaded to the netdev. */
154981f6d053SEduard Zingerman bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
1550e2d8f560SKumar Kartikeya Dwivedi bool attach_tracing_prog; /* true if tracing another tracing program */
1551d6083f04SLeon Hwang bool func_proto_unreliable;
1552d6083f04SLeon Hwang bool tail_call_reachable;
15532fe99eb0SAlexei Starovoitov bool xdp_has_frags;
15545bd36da1SYonghong Song bool exception_cb;
155538207291SMartin KaFai Lau bool exception_boundary;
155638207291SMartin KaFai Lau bool is_extended; /* true if extended by freplace program */
155738207291SMartin KaFai Lau bool jits_use_priv_stack;
155838207291SMartin KaFai Lau bool priv_stack_requested;
15591c2a088aSAlexei Starovoitov bool changes_pkt_data;
15601c2a088aSAlexei Starovoitov bool might_sleep;
1561a66886feSDaniel Borkmann u64 prog_array_member_cnt; /* counts how many times as member of prog_array */
1562e6ac2450SMartin KaFai Lau struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */
15632357672cSKumar Kartikeya Dwivedi struct bpf_arena *arena;
1564a66886feSDaniel Borkmann void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */
15654f9087f1SPeter Zijlstra /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
15664f9087f1SPeter Zijlstra const struct btf_type *attach_func_proto;
15674f9087f1SPeter Zijlstra /* function name for valid attach_btf_id */
1568535911c8SJiri Olsa const char *attach_func_name;
15697de16e3aSJakub Kicinski struct bpf_prog **func;
1570*51d65049SJuntong Deng void *jit_data; /* JIT specific data. arch dependent */
157109756af4SAlexei Starovoitov struct bpf_jit_poke_descriptor *poke_tab;
1572984fe94fSYiFei Zhu struct bpf_kfunc_desc_tab *kfunc_tab;
1573541c3badSAndrii Nakryiko struct bpf_kfunc_btf_tab *kfunc_btf_tab;
157409756af4SAlexei Starovoitov u32 size_poke_tab;
1575aaac3ba9SAlexei Starovoitov #ifdef CONFIG_FINEIBT
1576cb4d2b3fSMartin KaFai Lau struct bpf_ksym ksym_prefix;
1577aba64c7dSDave Marchevsky #endif
157869fd337aSStanislav Fomichev struct bpf_ksym ksym;
15798bad74f9SRoman Gushchin const struct bpf_prog_ops *ops;
1580067cae47SMartin KaFai Lau const struct bpf_struct_ops *st_ops;
1581852486b3SAlexei Starovoitov struct bpf_map **used_maps;
1582afdb09c7SChenbo Feng struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
1583afdb09c7SChenbo Feng struct btf_mod_pair *used_btfs;
1584afdb09c7SChenbo Feng struct bpf_prog *prog;
1585caf8f28eSAndrii Nakryiko struct user_struct *user;
15860a9c1991SJakub Kicinski u64 load_time; /* ns since boottime */
1587838e9690SYonghong Song u32 verified_insns;
1588ba64e7d8SYonghong Song int cgroup_atype; /* enum cgroup_bpf_attach_type */
15898c1b6e69SAlexei Starovoitov struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
1590c454a46bSMartin KaFai Lau char name[BPF_OBJ_NAME_LEN];
1591c454a46bSMartin KaFai Lau u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64);
1592c454a46bSMartin KaFai Lau #ifdef CONFIG_SECURITY
1593c454a46bSMartin KaFai Lau void *security;
1594c454a46bSMartin KaFai Lau #endif
1595c454a46bSMartin KaFai Lau struct bpf_token *token;
1596c454a46bSMartin KaFai Lau struct bpf_prog_offload *offload;
1597c454a46bSMartin KaFai Lau struct btf *btf;
1598c454a46bSMartin KaFai Lau struct bpf_func_info *func_info;
1599c454a46bSMartin KaFai Lau struct bpf_func_info_aux *func_info_aux;
1600c454a46bSMartin KaFai Lau /* bpf_line_info loaded from userspace. linfo->insn_off
1601c454a46bSMartin KaFai Lau * has the xlated insn offset.
1602c454a46bSMartin KaFai Lau * Both the main and sub prog share the same linfo.
1603c454a46bSMartin KaFai Lau * The subprog can access its first linfo by
1604c454a46bSMartin KaFai Lau * using the linfo_idx.
1605ba64e7d8SYonghong Song */
1606c454a46bSMartin KaFai Lau struct bpf_line_info *linfo;
1607c454a46bSMartin KaFai Lau /* jited_linfo is the jited addr of the linfo. It has a
1608c454a46bSMartin KaFai Lau * one to one mapping to linfo:
1609c454a46bSMartin KaFai Lau * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
1610c454a46bSMartin KaFai Lau * Both the main and sub prog share the same jited_linfo.
1611c454a46bSMartin KaFai Lau * The subprog can access its first jited_linfo by
161231bf1dbcSViktor Malik * using the linfo_idx.
16133dec541bSAlexei Starovoitov */
16143dec541bSAlexei Starovoitov void **jited_linfo;
1615abf2e7d6SAlexei Starovoitov u32 func_info_cnt;
161609756af4SAlexei Starovoitov u32 nr_linfo;
1617abf2e7d6SAlexei Starovoitov /* subprog can use linfo_idx to access its first linfo and
1618abf2e7d6SAlexei Starovoitov * jited_linfo.
161909756af4SAlexei Starovoitov * main prog always has linfo_idx == 0
162009756af4SAlexei Starovoitov */
1621d687f621SDelyan Kratunov u32 linfo_idx;
1622d687f621SDelyan Kratunov struct module *mod;
1623d687f621SDelyan Kratunov u32 num_exentries;
1624d687f621SDelyan Kratunov struct exception_table_entry *extable;
1625d687f621SDelyan Kratunov union {
1626d687f621SDelyan Kratunov struct work_struct work;
1627d687f621SDelyan Kratunov struct rcu_head rcu;
1628d687f621SDelyan Kratunov };
1629d687f621SDelyan Kratunov };
1630d687f621SDelyan Kratunov
1631d687f621SDelyan Kratunov struct bpf_prog {
1632d687f621SDelyan Kratunov u16 pages; /* Number of allocated pages */
1633d687f621SDelyan Kratunov u16 jited:1, /* Is our filter JIT'ed? */
1634d687f621SDelyan Kratunov jit_requested:1,/* archs need to JIT the prog */
1635d687f621SDelyan Kratunov gpl_compatible:1, /* Is filter GPL compatible? */
163666c84731SAndrii Nakryiko cb_access:1, /* Is control block accessed? */
163766c84731SAndrii Nakryiko dst_needed:1, /* Do we need dst entry? */
1638d687f621SDelyan Kratunov blinding_requested:1, /* needs constant blinding */
1639d687f621SDelyan Kratunov blinded:1, /* Was blinded */
1640d687f621SDelyan Kratunov is_func:1, /* program is a bpf function */
1641d687f621SDelyan Kratunov kprobe_override:1, /* Do we override a kprobe? */
1642d687f621SDelyan Kratunov has_callchain_buf:1, /* callchain buffer allocated? */
1643d687f621SDelyan Kratunov enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
1644d687f621SDelyan Kratunov call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
1645d687f621SDelyan Kratunov call_get_func_ip:1, /* Do we call get_func_ip() */
1646d687f621SDelyan Kratunov tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
1647d687f621SDelyan Kratunov sleepable:1; /* BPF program is sleepable */
1648d687f621SDelyan Kratunov enum bpf_prog_type type; /* Type of BPF program */
1649d687f621SDelyan Kratunov enum bpf_attach_type expected_attach_type; /* For some prog types */
1650d687f621SDelyan Kratunov u32 len; /* Number of filter blocks */
1651d687f621SDelyan Kratunov u32 jited_len; /* Size of jited insns in bytes */
1652d687f621SDelyan Kratunov u8 tag[BPF_TAG_SIZE];
1653d687f621SDelyan Kratunov struct bpf_prog_stats __percpu *stats;
1654d687f621SDelyan Kratunov int __percpu *active;
1655d687f621SDelyan Kratunov unsigned int (*bpf_func)(const void *ctx,
16562beee5f5SDaniel Borkmann const struct bpf_insn *insn);
1657da765a2fSDaniel Borkmann struct bpf_prog_aux *aux; /* Auxiliary fields */
1658da765a2fSDaniel Borkmann struct sock_fprog_kern *orig_prog; /* Original BPF program */
1659da765a2fSDaniel Borkmann /* Instructions for interpreter */
1660da765a2fSDaniel Borkmann union {
1661da765a2fSDaniel Borkmann DECLARE_FLEX_ARRAY(struct sock_filter, insns);
16622beee5f5SDaniel Borkmann DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi);
16632beee5f5SDaniel Borkmann };
16646cc7d1e8SAndrii Nakryiko };
16656cc7d1e8SAndrii Nakryiko
16666cc7d1e8SAndrii Nakryiko struct bpf_array_aux {
16676cc7d1e8SAndrii Nakryiko /* Programs with direct jumps into programs part of this array. */
16686cc7d1e8SAndrii Nakryiko struct list_head poke_progs;
16696cc7d1e8SAndrii Nakryiko struct bpf_map *map;
167061c6fefaSAndrii Nakryiko struct mutex poke_mutex;
167161c6fefaSAndrii Nakryiko struct work_struct work;
167261c6fefaSAndrii Nakryiko };
167361c6fefaSAndrii Nakryiko
167461c6fefaSAndrii Nakryiko struct bpf_link {
16751a80dbcbSAndrii Nakryiko atomic64_t refcnt;
16761a80dbcbSAndrii Nakryiko u32 id;
16771a80dbcbSAndrii Nakryiko enum bpf_link_type type;
16781a80dbcbSAndrii Nakryiko const struct bpf_link_ops *ops;
16791a80dbcbSAndrii Nakryiko struct bpf_prog *prog;
16806cc7d1e8SAndrii Nakryiko /* whether BPF link itself has "sleepable" semantics, which can differ
16816cc7d1e8SAndrii Nakryiko * from underlying BPF program having a "sleepable" semantics, as BPF
16821a80dbcbSAndrii Nakryiko * link's semantics is determined by target attach hook
16836cc7d1e8SAndrii Nakryiko */
16846cc7d1e8SAndrii Nakryiko bool sleepable;
16856cc7d1e8SAndrii Nakryiko /* rcu is used before freeing, work can be used to schedule that
16861a80dbcbSAndrii Nakryiko * RCU-based freeing before that, so they never overlap
16871a80dbcbSAndrii Nakryiko */
16881a80dbcbSAndrii Nakryiko union {
16896cc7d1e8SAndrii Nakryiko struct rcu_head rcu;
16901a80dbcbSAndrii Nakryiko struct work_struct work;
169161c6fefaSAndrii Nakryiko };
169261c6fefaSAndrii Nakryiko };
169361c6fefaSAndrii Nakryiko
169461c6fefaSAndrii Nakryiko struct bpf_link_ops {
16951a80dbcbSAndrii Nakryiko void (*release)(struct bpf_link *link);
16961a80dbcbSAndrii Nakryiko /* deallocate link resources callback, called without RCU grace period
169773b11c2aSAndrii Nakryiko * waiting
16986cc7d1e8SAndrii Nakryiko */
16996cc7d1e8SAndrii Nakryiko void (*dealloc)(struct bpf_link *link);
17006cc7d1e8SAndrii Nakryiko /* deallocate link resources callback, called after RCU grace period;
17016cc7d1e8SAndrii Nakryiko * if either the underlying BPF program is sleepable or BPF link's
17026cc7d1e8SAndrii Nakryiko * target hook is sleepable, we'll go through tasks trace RCU GP and
1703aef56f2eSKui-Feng Lee * then "classic" RCU GP; this need for chaining tasks trace and
1704aef56f2eSKui-Feng Lee * classic RCU GPs is designated by setting bpf_link->sleepable flag
17051adddc97SKui-Feng Lee */
17066cc7d1e8SAndrii Nakryiko void (*dealloc_deferred)(struct bpf_link *link);
17076cc7d1e8SAndrii Nakryiko int (*detach)(struct bpf_link *link);
1708f7e0beafSKui-Feng Lee int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
1709f7e0beafSKui-Feng Lee struct bpf_prog *old_prog);
1710f7e0beafSKui-Feng Lee void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
17112fcc8241SKui-Feng Lee int (*fill_link_info)(const struct bpf_link *link,
1712f7e0beafSKui-Feng Lee struct bpf_link_info *info);
1713f7e0beafSKui-Feng Lee int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
171469fd337aSStanislav Fomichev struct bpf_map *old_map);
171569fd337aSStanislav Fomichev __poll_t (*poll)(struct file *file, struct poll_table_struct *pts);
171669fd337aSStanislav Fomichev };
171769fd337aSStanislav Fomichev
171869fd337aSStanislav Fomichev struct bpf_tramp_link {
1719f7e0beafSKui-Feng Lee struct bpf_link link;
1720f7e0beafSKui-Feng Lee struct hlist_node tramp_hlist;
1721f7e0beafSKui-Feng Lee u64 cookie;
1722f7e0beafSKui-Feng Lee };
1723f7e0beafSKui-Feng Lee
1724f7e0beafSKui-Feng Lee struct bpf_shim_tramp_link {
1725f7e0beafSKui-Feng Lee struct bpf_tramp_link link;
1726d4dfc570SAndrii Nakryiko struct bpf_trampoline *trampoline;
1727d4dfc570SAndrii Nakryiko };
1728d4dfc570SAndrii Nakryiko
172968ca5d4eSAndrii Nakryiko struct bpf_tracing_link {
1730d4dfc570SAndrii Nakryiko struct bpf_tramp_link link;
1731d4dfc570SAndrii Nakryiko enum bpf_attach_type attach_type;
17326cc7d1e8SAndrii Nakryiko struct bpf_trampoline *trampoline;
17336cc7d1e8SAndrii Nakryiko struct bpf_prog *tgt_prog;
17346cc7d1e8SAndrii Nakryiko };
17356cc7d1e8SAndrii Nakryiko
17366cc7d1e8SAndrii Nakryiko struct bpf_raw_tp_link {
17376cc7d1e8SAndrii Nakryiko struct bpf_link link;
17386cc7d1e8SAndrii Nakryiko struct bpf_raw_event_map *btp;
17396fe01d3cSAndrii Nakryiko u64 cookie;
17406fe01d3cSAndrii Nakryiko };
17416fe01d3cSAndrii Nakryiko
17426fe01d3cSAndrii Nakryiko struct bpf_link_primer {
17436fe01d3cSAndrii Nakryiko struct bpf_link *link;
17446fe01d3cSAndrii Nakryiko struct file *file;
17456fe01d3cSAndrii Nakryiko int fd;
17466fe01d3cSAndrii Nakryiko u32 id;
17476fe01d3cSAndrii Nakryiko };
17486fe01d3cSAndrii Nakryiko
17496fe01d3cSAndrii Nakryiko struct bpf_mount_opts {
17506fe01d3cSAndrii Nakryiko kuid_t uid;
175135f96de0SAndrii Nakryiko kgid_t gid;
175235f96de0SAndrii Nakryiko umode_t mode;
175335f96de0SAndrii Nakryiko
175435f96de0SAndrii Nakryiko /* BPF token-related delegation options */
175535f96de0SAndrii Nakryiko u64 delegate_cmds;
1756a177fc2bSAndrii Nakryiko u64 delegate_maps;
1757caf8f28eSAndrii Nakryiko u64 delegate_progs;
1758caf8f28eSAndrii Nakryiko u64 delegate_attachs;
1759f568a3d4SAndrii Nakryiko };
1760f568a3d4SAndrii Nakryiko
1761f568a3d4SAndrii Nakryiko struct bpf_token {
176235f96de0SAndrii Nakryiko struct work_struct work;
176335f96de0SAndrii Nakryiko atomic64_t refcnt;
176485d33df3SMartin KaFai Lau struct user_namespace *userns;
176527ae7997SMartin KaFai Lau u64 allowed_cmds;
176627ae7997SMartin KaFai Lau u64 allowed_maps;
176727ae7997SMartin KaFai Lau u64 allowed_progs;
1768bb48cf16SDavid Vernet u64 allowed_attachs;
1769bb48cf16SDavid Vernet #ifdef CONFIG_SECURITY
1770bb48cf16SDavid Vernet void *security;
1771bb48cf16SDavid Vernet #endif
1772bb48cf16SDavid Vernet };
1773bb48cf16SDavid Vernet
1774bb48cf16SDavid Vernet struct bpf_struct_ops_value;
1775bb48cf16SDavid Vernet struct btf_member;
1776bb48cf16SDavid Vernet
1777bb48cf16SDavid Vernet #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
1778bb48cf16SDavid Vernet /**
1779bb48cf16SDavid Vernet * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to
1780bb48cf16SDavid Vernet * define a BPF_MAP_TYPE_STRUCT_OPS map type composed
1781bb48cf16SDavid Vernet * of BPF_PROG_TYPE_STRUCT_OPS progs.
1782bb48cf16SDavid Vernet * @verifier_ops: A structure of callbacks that are invoked by the verifier
1783bb48cf16SDavid Vernet * when determining whether the struct_ops progs in the
1784bb48cf16SDavid Vernet * struct_ops map are valid.
1785bb48cf16SDavid Vernet * @init: A callback that is invoked a single time, and before any other
1786bb48cf16SDavid Vernet * callback, to initialize the structure. A nonzero return value means
1787bb48cf16SDavid Vernet * the subsystem could not be initialized.
1788bb48cf16SDavid Vernet * @check_member: When defined, a callback invoked by the verifier to allow
1789bb48cf16SDavid Vernet * the subsystem to determine if an entry in the struct_ops map
1790bb48cf16SDavid Vernet * is valid. A nonzero return value means that the map is
1791bb48cf16SDavid Vernet * invalid and should be rejected by the verifier.
1792bb48cf16SDavid Vernet * @init_member: A callback that is invoked for each member of the struct_ops
1793bb48cf16SDavid Vernet * map to allow the subsystem to initialize the member. A nonzero
1794bb48cf16SDavid Vernet * value means the member could not be initialized. This callback
1795bb48cf16SDavid Vernet * is exclusive with the @type, @type_id, @value_type, and
1796bb48cf16SDavid Vernet * @value_id fields.
1797bb48cf16SDavid Vernet * @reg: A callback that is invoked when the struct_ops map has been
1798bb48cf16SDavid Vernet * initialized and is being attached to. Zero means the struct_ops map
1799bb48cf16SDavid Vernet * has been successfully registered and is live. A nonzero return value
1800bb48cf16SDavid Vernet * means the struct_ops map could not be registered.
1801bb48cf16SDavid Vernet * @unreg: A callback that is invoked when the struct_ops map should be
1802bb48cf16SDavid Vernet * unregistered.
1803bb48cf16SDavid Vernet * @update: A callback that is invoked when the live struct_ops map is being
1804bb48cf16SDavid Vernet * updated to contain new values. This callback is only invoked when
1805bb48cf16SDavid Vernet * the struct_ops map is loaded with BPF_F_LINK. If not defined, the
1806bb48cf16SDavid Vernet * it is assumed that the struct_ops map cannot be updated.
1807bb48cf16SDavid Vernet * @validate: A callback that is invoked after all of the members have been
1808bb48cf16SDavid Vernet * initialized. This callback should perform static checks on the
1809bb48cf16SDavid Vernet * map, meaning that it should either fail or succeed
1810bb48cf16SDavid Vernet * deterministically. A struct_ops map that has been validated may
1811bb48cf16SDavid Vernet * not necessarily succeed in being registered if the call to @reg
1812bb48cf16SDavid Vernet * fails. For example, a valid struct_ops map may be loaded, but
1813bb48cf16SDavid Vernet * then fail to be registered due to there being another active
1814bb48cf16SDavid Vernet * struct_ops map on the system in the subsystem already. For this
181527ae7997SMartin KaFai Lau * reason, if this callback is not defined, the check is skipped as
181627ae7997SMartin KaFai Lau * the struct_ops map will have final verification performed in
181727ae7997SMartin KaFai Lau * @reg.
181827ae7997SMartin KaFai Lau * @type: BTF type.
181951a52a29SDavid Vernet * @value_type: Value type.
182051a52a29SDavid Vernet * @name: The name of the struct bpf_struct_ops object.
182185d33df3SMartin KaFai Lau * @func_models: Func models
182285d33df3SMartin KaFai Lau * @type_id: BTF type id.
182385d33df3SMartin KaFai Lau * @value_id: BTF value id.
182473287fe2SKui-Feng Lee */
182573287fe2SKui-Feng Lee struct bpf_struct_ops {
182673287fe2SKui-Feng Lee const struct bpf_verifier_ops *verifier_ops;
182768b04864SKui-Feng Lee int (*init)(struct btf *btf);
18284c5763edSKui-Feng Lee int (*check_member)(const struct btf_type *t,
1829e3f87fdfSKui-Feng Lee const struct btf_member *member,
183027ae7997SMartin KaFai Lau const struct bpf_prog *prog);
183127ae7997SMartin KaFai Lau int (*init_member)(const struct btf_type *t,
18324c5763edSKui-Feng Lee const struct btf_member *member,
18334c5763edSKui-Feng Lee void *kdata, const void *udata);
183416116035SKui-Feng Lee int (*reg)(void *kdata, struct bpf_link *link);
183516116035SKui-Feng Lee void (*unreg)(void *kdata, struct bpf_link *link);
183616116035SKui-Feng Lee int (*update)(void *kdata, void *old_kdata, struct bpf_link *link);
183716116035SKui-Feng Lee int (*validate)(void *kdata);
183816116035SKui-Feng Lee void *cfi_stubs;
183916116035SKui-Feng Lee struct module *owner;
184016116035SKui-Feng Lee const char *name;
184116116035SKui-Feng Lee struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
184216116035SKui-Feng Lee };
184316116035SKui-Feng Lee
184416116035SKui-Feng Lee /* Every member of a struct_ops type has an instance even a member is not
184516116035SKui-Feng Lee * an operator (function pointer). The "info" field will be assigned to
184616116035SKui-Feng Lee * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the
18474c5763edSKui-Feng Lee * argument information required by the verifier to verify the program.
18484c5763edSKui-Feng Lee *
18494c5763edSKui-Feng Lee * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the
18504c5763edSKui-Feng Lee * corresponding entry for an given argument.
18514c5763edSKui-Feng Lee */
185227ae7997SMartin KaFai Lau struct bpf_struct_ops_arg_info {
185385d33df3SMartin KaFai Lau struct bpf_ctx_arg_aux *info;
185416116035SKui-Feng Lee u32 cnt;
185516116035SKui-Feng Lee };
185616116035SKui-Feng Lee
185727ae7997SMartin KaFai Lau struct bpf_struct_ops_desc {
185827ae7997SMartin KaFai Lau struct bpf_struct_ops *st_ops;
1859612d087dSKui-Feng Lee
1860612d087dSKui-Feng Lee const struct btf_type *type;
1861612d087dSKui-Feng Lee const struct btf_type *value_type;
1862612d087dSKui-Feng Lee u32 type_id;
1863612d087dSKui-Feng Lee u32 value_id;
1864612d087dSKui-Feng Lee
1865612d087dSKui-Feng Lee /* Collection of argument information for each member */
1866612d087dSKui-Feng Lee struct bpf_struct_ops_arg_info *arg_info;
1867612d087dSKui-Feng Lee };
1868612d087dSKui-Feng Lee
1869612d087dSKui-Feng Lee enum bpf_struct_ops_state {
1870612d087dSKui-Feng Lee BPF_STRUCT_OPS_STATE_INIT,
187127ae7997SMartin KaFai Lau BPF_STRUCT_OPS_STATE_INUSE,
1872f6be98d1SKui-Feng Lee BPF_STRUCT_OPS_STATE_TOBEFREE,
1873f6be98d1SKui-Feng Lee BPF_STRUCT_OPS_STATE_READY,
1874f6be98d1SKui-Feng Lee };
1875f6be98d1SKui-Feng Lee
1876f6be98d1SKui-Feng Lee struct bpf_struct_ops_common_value {
1877f6be98d1SKui-Feng Lee refcount_t refcnt;
1878f6be98d1SKui-Feng Lee enum bpf_struct_ops_state state;
1879f6be98d1SKui-Feng Lee };
1880f6be98d1SKui-Feng Lee
1881f6be98d1SKui-Feng Lee #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
1882f6be98d1SKui-Feng Lee /* This macro helps developer to register a struct_ops type and generate
1883f6be98d1SKui-Feng Lee * type information correctly. Developers should use this macro to register
1884f6be98d1SKui-Feng Lee * a struct_ops type instead of calling __register_bpf_struct_ops() directly.
188585d33df3SMartin KaFai Lau */
188685d33df3SMartin KaFai Lau #define register_bpf_struct_ops(st_ops, type) \
188785d33df3SMartin KaFai Lau ({ \
1888e42ac141SMartin KaFai Lau struct bpf_struct_ops_##type { \
188985d33df3SMartin KaFai Lau struct bpf_struct_ops_common_value common; \
189085d33df3SMartin KaFai Lau struct type data ____cacheline_aligned_in_smp; \
1891f7e0beafSKui-Feng Lee }; \
1892f7e0beafSKui-Feng Lee BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \
189331a645aeSHou Tao __register_bpf_struct_ops(st_ops); \
18942cd3e377SPeter Zijlstra })
1895187e2af0SKui-Feng Lee #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
1896187e2af0SKui-Feng Lee bool bpf_struct_ops_get(const void *kdata);
1897187e2af0SKui-Feng Lee void bpf_struct_ops_put(const void *kdata);
189885d33df3SMartin KaFai Lau int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff);
189985d33df3SMartin KaFai Lau int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
190085d33df3SMartin KaFai Lau void *value);
190185d33df3SMartin KaFai Lau int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
190285d33df3SMartin KaFai Lau struct bpf_tramp_link *link,
190385d33df3SMartin KaFai Lau const struct btf_func_model *model,
190485d33df3SMartin KaFai Lau void *stub_func,
190585d33df3SMartin KaFai Lau void **image, u32 *image_off,
190685d33df3SMartin KaFai Lau bool allow_alloc);
190785d33df3SMartin KaFai Lau void bpf_struct_ops_image_free(void *image);
bpf_try_module_get(const void * data,struct module * owner)190885d33df3SMartin KaFai Lau static inline bool bpf_try_module_get(const void *data, struct module *owner)
190985d33df3SMartin KaFai Lau {
191085d33df3SMartin KaFai Lau if (owner == BPF_MODULE_OWNER)
191185d33df3SMartin KaFai Lau return bpf_struct_ops_get(data);
191268b04864SKui-Feng Lee else
1913c196906dSHou Tao return try_module_get(owner);
1914c196906dSHou Tao }
bpf_module_put(const void * data,struct module * owner)1915c196906dSHou Tao static inline void bpf_module_put(const void *data, struct module *owner)
1916c196906dSHou Tao {
1917c196906dSHou Tao if (owner == BPF_MODULE_OWNER)
1918c196906dSHou Tao bpf_struct_ops_put(data);
1919c196906dSHou Tao else
1920c196906dSHou Tao module_put(owner);
1921c196906dSHou Tao }
1922c196906dSHou Tao int bpf_struct_ops_link_create(union bpf_attr *attr);
1923c196906dSHou Tao
19247dd88059SDavid Vernet #ifdef CONFIG_NET
1925c196906dSHou Tao /* Define it here to avoid the use of forward declaration */
1926c196906dSHou Tao struct bpf_dummy_ops_state {
1927c196906dSHou Tao int val;
1928c196906dSHou Tao };
1929c196906dSHou Tao
1930f6be98d1SKui-Feng Lee struct bpf_dummy_ops {
1931f6be98d1SKui-Feng Lee int (*test_1)(struct bpf_dummy_ops_state *cb);
1932f6be98d1SKui-Feng Lee int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
19331338b933SKui-Feng Lee char a3, unsigned long a4);
193416116035SKui-Feng Lee int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
193527ae7997SMartin KaFai Lau };
1936f6be98d1SKui-Feng Lee
193785d33df3SMartin KaFai Lau int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
193885d33df3SMartin KaFai Lau union bpf_attr __user *uattr);
193985d33df3SMartin KaFai Lau #endif
194085d33df3SMartin KaFai Lau int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
194185d33df3SMartin KaFai Lau struct btf *btf,
194285d33df3SMartin KaFai Lau struct bpf_verifier_log *log);
194385d33df3SMartin KaFai Lau void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map);
194485d33df3SMartin KaFai Lau void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc);
1945e42ac141SMartin KaFai Lau #else
1946e42ac141SMartin KaFai Lau #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; })
bpf_try_module_get(const void * data,struct module * owner)1947e42ac141SMartin KaFai Lau static inline bool bpf_try_module_get(const void *data, struct module *owner)
1948e42ac141SMartin KaFai Lau {
194985d33df3SMartin KaFai Lau return try_module_get(owner);
195085d33df3SMartin KaFai Lau }
bpf_module_put(const void * data,struct module * owner)195185d33df3SMartin KaFai Lau static inline void bpf_module_put(const void *data, struct module *owner)
195285d33df3SMartin KaFai Lau {
195385d33df3SMartin KaFai Lau module_put(owner);
195485d33df3SMartin KaFai Lau }
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)195568b04864SKui-Feng Lee static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
195668b04864SKui-Feng Lee {
195768b04864SKui-Feng Lee return -ENOTSUPP;
195868b04864SKui-Feng Lee }
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)19591338b933SKui-Feng Lee static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
19601338b933SKui-Feng Lee void *key,
19611338b933SKui-Feng Lee void *value)
196268b04864SKui-Feng Lee {
196316116035SKui-Feng Lee return -EINVAL;
196416116035SKui-Feng Lee }
bpf_struct_ops_link_create(union bpf_attr * attr)196516116035SKui-Feng Lee static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
196616116035SKui-Feng Lee {
19679cb61fdaSStanislav Fomichev return -EOPNOTSUPP;
19689cb61fdaSStanislav Fomichev }
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)196943205180SAmery Hung static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
197043205180SAmery Hung {
197143205180SAmery Hung }
19729cb61fdaSStanislav Fomichev
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)19739cb61fdaSStanislav Fomichev static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
19749cb61fdaSStanislav Fomichev {
19759cb61fdaSStanislav Fomichev }
19769cb61fdaSStanislav Fomichev
197769fd337aSStanislav Fomichev #endif
197869fd337aSStanislav Fomichev
197969fd337aSStanislav Fomichev int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
198069fd337aSStanislav Fomichev const struct bpf_ctx_arg_aux *info, u32 cnt);
198169fd337aSStanislav Fomichev
198269fd337aSStanislav Fomichev #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
198369fd337aSStanislav Fomichev int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
198469fd337aSStanislav Fomichev int cgroup_atype);
198527ae7997SMartin KaFai Lau void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
198627ae7997SMartin KaFai Lau #else
bpf_trampoline_link_cgroup_shim(struct bpf_prog * prog,int cgroup_atype)198704fd61abSAlexei Starovoitov static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
198804fd61abSAlexei Starovoitov int cgroup_atype)
198904fd61abSAlexei Starovoitov {
1990b2157399SAlexei Starovoitov return -EOPNOTSUPP;
19912beee5f5SDaniel Borkmann }
bpf_trampoline_unlink_cgroup_shim(struct bpf_prog * prog)199204fd61abSAlexei Starovoitov static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
1993129d868eSKees Cook {
1994129d868eSKees Cook }
1995129d868eSKees Cook #endif
199604fd61abSAlexei Starovoitov
199704fd61abSAlexei Starovoitov struct bpf_array {
19983b1efb19SDaniel Borkmann struct bpf_map map;
1999c04c0d2bSAlexei Starovoitov u32 elem_size;
2000ebf7f6f0STiezhu Yang u32 index_mask;
200104fd61abSAlexei Starovoitov struct bpf_array_aux *aux;
20026018e1f4SAndrii Nakryiko union {
20036018e1f4SAndrii Nakryiko DECLARE_FLEX_ARRAY(char, value) __aligned(8);
20046018e1f4SAndrii Nakryiko DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8);
20056018e1f4SAndrii Nakryiko DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8);
20066018e1f4SAndrii Nakryiko };
2007e723608bSKumar Kartikeya Dwivedi };
20086018e1f4SAndrii Nakryiko
20091ade2371SEduard Zingerman #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
2010591fe988SDaniel Borkmann #define MAX_TAIL_CALL_CNT 33
2011591fe988SDaniel Borkmann
2012591fe988SDaniel Borkmann /* Maximum number of loops for bpf_loop and bpf_iter_num.
2013591fe988SDaniel Borkmann * It's enum to expose it (and thus make it discoverable) through BTF.
2014591fe988SDaniel Borkmann */
2015591fe988SDaniel Borkmann enum {
2016591fe988SDaniel Borkmann BPF_MAX_LOOPS = 8 * 1024 * 1024,
2017591fe988SDaniel Borkmann BPF_MAX_TIMED_LOOPS = 0xffff,
201820571567SDavid Vernet };
201920571567SDavid Vernet
202020571567SDavid Vernet #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
202120571567SDavid Vernet BPF_F_RDONLY_PROG | \
202220571567SDavid Vernet BPF_F_WRONLY | \
2023591fe988SDaniel Borkmann BPF_F_WRONLY_PROG)
2024591fe988SDaniel Borkmann
2025591fe988SDaniel Borkmann #define BPF_MAP_CAN_READ BIT(0)
2026591fe988SDaniel Borkmann #define BPF_MAP_CAN_WRITE BIT(1)
2027591fe988SDaniel Borkmann
2028591fe988SDaniel Borkmann /* Maximum number of user-producer ring buffer samples that can be drained in
2029591fe988SDaniel Borkmann * a call to bpf_user_ringbuf_drain().
2030591fe988SDaniel Borkmann */
2031591fe988SDaniel Borkmann #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024)
2032591fe988SDaniel Borkmann
bpf_map_flags_to_cap(struct bpf_map * map)2033591fe988SDaniel Borkmann static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
2034591fe988SDaniel Borkmann {
2035591fe988SDaniel Borkmann u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
2036591fe988SDaniel Borkmann
2037591fe988SDaniel Borkmann /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
2038591fe988SDaniel Borkmann * not possible.
2039591fe988SDaniel Borkmann */
2040591fe988SDaniel Borkmann if (access_flags & BPF_F_RDONLY_PROG)
2041591fe988SDaniel Borkmann return BPF_MAP_CAN_READ;
2042591fe988SDaniel Borkmann else if (access_flags & BPF_F_WRONLY_PROG)
2043591fe988SDaniel Borkmann return BPF_MAP_CAN_WRITE;
20443b1efb19SDaniel Borkmann else
20453b1efb19SDaniel Borkmann return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
20463b1efb19SDaniel Borkmann }
20473b1efb19SDaniel Borkmann
bpf_map_flags_access_ok(u32 access_flags)20483b1efb19SDaniel Borkmann static inline bool bpf_map_flags_access_ok(u32 access_flags)
20493b1efb19SDaniel Borkmann {
20503b1efb19SDaniel Borkmann return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
2051f45d5b6cSToke Hoiland-Jorgensen (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
2052f45d5b6cSToke Hoiland-Jorgensen }
2053f45d5b6cSToke Hoiland-Jorgensen
2054f45d5b6cSToke Hoiland-Jorgensen struct bpf_event_entry {
2055f45d5b6cSToke Hoiland-Jorgensen struct perf_event *event;
2056f45d5b6cSToke Hoiland-Jorgensen struct file *perf_file;
2057f45d5b6cSToke Hoiland-Jorgensen struct file *map_file;
2058f45d5b6cSToke Hoiland-Jorgensen struct rcu_head rcu;
2059f1f7714eSDaniel Borkmann };
2060bd570ff9SDaniel Borkmann
map_type_contains_progs(struct bpf_map * map)20610756ea3eSAlexei Starovoitov static inline bool map_type_contains_progs(struct bpf_map *map)
206210aceb62SDave Marchevsky {
2063555c8a86SDaniel Borkmann return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
2064ae0a457fSEmil Tsalapatis map->map_type == BPF_MAP_TYPE_DEVMAP ||
2065ae0a457fSEmil Tsalapatis map->map_type == BPF_MAP_TYPE_CPUMAP;
2066555c8a86SDaniel Borkmann }
2067aa7145c1SDaniel Borkmann
2068c64b7983SJoe Stringer bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
2069c64b7983SJoe Stringer int bpf_prog_calc_tag(struct bpf_prog *fp);
2070c64b7983SJoe Stringer
2071c64b7983SJoe Stringer const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
2072c64b7983SJoe Stringer const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
2073555c8a86SDaniel Borkmann
2074555c8a86SDaniel Borkmann const struct bpf_func_proto *bpf_get_perf_event_read_value_proto(void);
2075555c8a86SDaniel Borkmann
207604fd61abSAlexei Starovoitov typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
2077324bda9eSAlexei Starovoitov unsigned long off, unsigned long len);
2078324bda9eSAlexei Starovoitov typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
2079324bda9eSAlexei Starovoitov const struct bpf_insn *src,
2080055eb955SStanislav Fomichev struct bpf_insn *dst,
2081324bda9eSAlexei Starovoitov struct bpf_prog *prog,
2082324bda9eSAlexei Starovoitov u32 *target_size);
2083324bda9eSAlexei Starovoitov
2084324bda9eSAlexei Starovoitov u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2085324bda9eSAlexei Starovoitov void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
2086324bda9eSAlexei Starovoitov
2087324bda9eSAlexei Starovoitov /* an array of programs to be executed under rcu_lock.
2088324bda9eSAlexei Starovoitov *
2089394e40a2SRoman Gushchin * Typical usage:
2090394e40a2SRoman Gushchin * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run);
209182e6b1eeSAndrii Nakryiko *
20928bad74f9SRoman Gushchin * the structure returned by bpf_prog_array_alloc() should be populated
209382e6b1eeSAndrii Nakryiko * with program pointers and the last pointer must be NULL.
209482e6b1eeSAndrii Nakryiko * The user has to keep refcnt on the program and make sure the program
2095394e40a2SRoman Gushchin * is removed from the array before bpf_prog_put().
2096394e40a2SRoman Gushchin * The 'struct bpf_prog_array *' should only be replaced with xchg()
2097324bda9eSAlexei Starovoitov * since other cpus are walking the array of pointers in parallel.
2098324bda9eSAlexei Starovoitov */
2099d7f10df8SGustavo A. R. Silva struct bpf_prog_array_item {
2100324bda9eSAlexei Starovoitov struct bpf_prog *prog;
2101324bda9eSAlexei Starovoitov union {
210246531a30SPavel Begunkov struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
210346531a30SPavel Begunkov u64 bpf_cookie;
210446531a30SPavel Begunkov };
210546531a30SPavel Begunkov };
210646531a30SPavel Begunkov
210746531a30SPavel Begunkov struct bpf_prog_array {
210846531a30SPavel Begunkov struct rcu_head rcu;
210946531a30SPavel Begunkov struct bpf_prog_array_item items[];
211046531a30SPavel Begunkov };
211146531a30SPavel Begunkov
211246531a30SPavel Begunkov struct bpf_empty_prog_array {
211346531a30SPavel Begunkov struct bpf_prog_array hdr;
211446531a30SPavel Begunkov struct bpf_prog *null_prog;
2115d29ab6e1SRoman Gushchin };
211654e9c9d4SStanislav Fomichev
21178c7dcb84SDelyan Kratunov /* to avoid allocating empty bpf_prog_array for cgroups that
21188c7dcb84SDelyan Kratunov * don't have bpf program attached use one global 'bpf_empty_prog_array'
211954e9c9d4SStanislav Fomichev * It will not be modified the caller of bpf_prog_array_alloc()
21200d01da6aSStanislav Fomichev * (since caller requested prog_cnt == 0)
212154e9c9d4SStanislav Fomichev * that pointer should be 'freed' by bpf_prog_array_free()
2122468e2f64SAlexei Starovoitov */
2123324bda9eSAlexei Starovoitov extern struct bpf_empty_prog_array bpf_empty_prog_array;
212454e9c9d4SStanislav Fomichev
2125e87c6bc3SYonghong Song struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
2126ce3aa9ccSJakub Sitnicki void bpf_prog_array_free(struct bpf_prog_array *progs);
2127ce3aa9ccSJakub Sitnicki /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */
2128ce3aa9ccSJakub Sitnicki void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs);
212954e9c9d4SStanislav Fomichev int bpf_prog_array_length(struct bpf_prog_array *progs);
21303a38bb98SYonghong Song bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
21313a38bb98SYonghong Song int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
213254e9c9d4SStanislav Fomichev __u32 __user *prog_ids, u32 cnt);
2133e87c6bc3SYonghong Song
2134e87c6bc3SYonghong Song void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
213582e6b1eeSAndrii Nakryiko struct bpf_prog *old_prog);
2136e87c6bc3SYonghong Song int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
2137e87c6bc3SYonghong Song int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2138c7603cfaSAndrii Nakryiko struct bpf_prog *prog);
2139c7603cfaSAndrii Nakryiko int bpf_prog_array_copy_info(struct bpf_prog_array *array,
2140c7603cfaSAndrii Nakryiko u32 *prog_ids, u32 request_cnt,
2141c7603cfaSAndrii Nakryiko u32 *prog_cnt);
21427d08c2c9SAndrii Nakryiko int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2143c4dcfdd4SYiFei Zhu struct bpf_prog *exclude_prog,
2144c7603cfaSAndrii Nakryiko struct bpf_prog *include_prog,
2145c7603cfaSAndrii Nakryiko u64 bpf_cookie,
214682e6b1eeSAndrii Nakryiko struct bpf_prog_array **new_array);
214782e6b1eeSAndrii Nakryiko
214882e6b1eeSAndrii Nakryiko struct bpf_run_ctx {};
2149a3c485a5SJiri Olsa
215082e6b1eeSAndrii Nakryiko struct bpf_cg_run_ctx {
215182e6b1eeSAndrii Nakryiko struct bpf_run_ctx run_ctx;
2152e384c7b7SKui-Feng Lee const struct bpf_prog_array_item *prog_item;
2153e384c7b7SKui-Feng Lee int retval;
2154e384c7b7SKui-Feng Lee };
2155e384c7b7SKui-Feng Lee
2156e384c7b7SKui-Feng Lee struct bpf_trace_run_ctx {
2157e384c7b7SKui-Feng Lee struct bpf_run_ctx run_ctx;
21587d08c2c9SAndrii Nakryiko u64 bpf_cookie;
21597d08c2c9SAndrii Nakryiko bool is_uprobe;
21607d08c2c9SAndrii Nakryiko };
21617d08c2c9SAndrii Nakryiko
21627d08c2c9SAndrii Nakryiko struct bpf_tramp_run_ctx {
21637d08c2c9SAndrii Nakryiko struct bpf_run_ctx run_ctx;
21647d08c2c9SAndrii Nakryiko u64 bpf_cookie;
21657d08c2c9SAndrii Nakryiko struct bpf_run_ctx *saved_run_ctx;
21667d08c2c9SAndrii Nakryiko };
21677d08c2c9SAndrii Nakryiko
bpf_set_run_ctx(struct bpf_run_ctx * new_ctx)21687d08c2c9SAndrii Nakryiko static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx)
21697d08c2c9SAndrii Nakryiko {
21707d08c2c9SAndrii Nakryiko struct bpf_run_ctx *old_ctx = NULL;
21717d08c2c9SAndrii Nakryiko
21727d08c2c9SAndrii Nakryiko #ifdef CONFIG_BPF_SYSCALL
21737d08c2c9SAndrii Nakryiko old_ctx = current->bpf_ctx;
21747d08c2c9SAndrii Nakryiko current->bpf_ctx = new_ctx;
21757d08c2c9SAndrii Nakryiko #endif
217677241217SStanislav Fomichev return old_ctx;
217777241217SStanislav Fomichev }
217877241217SStanislav Fomichev
bpf_reset_run_ctx(struct bpf_run_ctx * old_ctx)217977241217SStanislav Fomichev static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx)
218077241217SStanislav Fomichev {
21817d08c2c9SAndrii Nakryiko #ifdef CONFIG_BPF_SYSCALL
218277241217SStanislav Fomichev current->bpf_ctx = old_ctx;
21837d08c2c9SAndrii Nakryiko #endif
2184055eb955SStanislav Fomichev }
21857d08c2c9SAndrii Nakryiko
21867d08c2c9SAndrii Nakryiko /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */
21877d08c2c9SAndrii Nakryiko #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0)
21887d08c2c9SAndrii Nakryiko /* BPF program asks to set CN on the packet. */
218982e6b1eeSAndrii Nakryiko #define BPF_RET_SET_CN (1 << 0)
219082e6b1eeSAndrii Nakryiko
21917d08c2c9SAndrii Nakryiko typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx);
21927d08c2c9SAndrii Nakryiko
2193055eb955SStanislav Fomichev static __always_inline u32
bpf_prog_run_array(const struct bpf_prog_array * array,const void * ctx,bpf_prog_run_fn run_prog)2194055eb955SStanislav Fomichev bpf_prog_run_array(const struct bpf_prog_array *array,
21957d08c2c9SAndrii Nakryiko const void *ctx, bpf_prog_run_fn run_prog)
2196055eb955SStanislav Fomichev {
2197055eb955SStanislav Fomichev const struct bpf_prog_array_item *item;
2198a3c485a5SJiri Olsa const struct bpf_prog *prog;
2199a3c485a5SJiri Olsa struct bpf_run_ctx *old_run_ctx;
2200055eb955SStanislav Fomichev struct bpf_trace_run_ctx run_ctx;
220182e6b1eeSAndrii Nakryiko u32 ret = 1;
22027d08c2c9SAndrii Nakryiko
22037d08c2c9SAndrii Nakryiko RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held");
220482e6b1eeSAndrii Nakryiko
22057d08c2c9SAndrii Nakryiko if (unlikely(!array))
22067d08c2c9SAndrii Nakryiko return ret;
22077d08c2c9SAndrii Nakryiko
220882e6b1eeSAndrii Nakryiko run_ctx.is_uprobe = false;
22097d08c2c9SAndrii Nakryiko
22107d08c2c9SAndrii Nakryiko migrate_disable();
22117d08c2c9SAndrii Nakryiko old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
2212324bda9eSAlexei Starovoitov item = &array->items[0];
22138c7dcb84SDelyan Kratunov while ((prog = READ_ONCE(item->prog))) {
22148c7dcb84SDelyan Kratunov run_ctx.bpf_cookie = item->bpf_cookie;
22158c7dcb84SDelyan Kratunov ret &= run_prog(prog, ctx);
22168c7dcb84SDelyan Kratunov item++;
22178c7dcb84SDelyan Kratunov }
22188c7dcb84SDelyan Kratunov bpf_reset_run_ctx(old_run_ctx);
22198c7dcb84SDelyan Kratunov migrate_enable();
22208c7dcb84SDelyan Kratunov return ret;
22218c7dcb84SDelyan Kratunov }
22228c7dcb84SDelyan Kratunov
22238c7dcb84SDelyan Kratunov /* Notes on RCU design for bpf_prog_arrays containing sleepable programs:
22247d0d6736SJann Horn *
22258c7dcb84SDelyan Kratunov * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array
22268c7dcb84SDelyan Kratunov * overall. As a result, we must use the bpf_prog_array_free_sleepable
22278c7dcb84SDelyan Kratunov * in order to use the tasks_trace rcu grace period.
22288c7dcb84SDelyan Kratunov *
22298c7dcb84SDelyan Kratunov * When a non-sleepable program is inside the array, we take the rcu read
22308c7dcb84SDelyan Kratunov * section and disable preemption for that program alone, so it can access
22318c7dcb84SDelyan Kratunov * rcu-protected dynamically sized maps.
22328c7dcb84SDelyan Kratunov */
22338c7dcb84SDelyan Kratunov static __always_inline u32
bpf_prog_run_array_uprobe(const struct bpf_prog_array * array,const void * ctx,bpf_prog_run_fn run_prog)22347d0d6736SJann Horn bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
22358c7dcb84SDelyan Kratunov const void *ctx, bpf_prog_run_fn run_prog)
22367d0d6736SJann Horn {
22377d0d6736SJann Horn const struct bpf_prog_array_item *item;
22387d0d6736SJann Horn const struct bpf_prog *prog;
22398c7dcb84SDelyan Kratunov struct bpf_run_ctx *old_run_ctx;
22408c7dcb84SDelyan Kratunov struct bpf_trace_run_ctx run_ctx;
2241a3c485a5SJiri Olsa u32 ret = 1;
2242a3c485a5SJiri Olsa
22438c7dcb84SDelyan Kratunov might_fault();
22448c7dcb84SDelyan Kratunov RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
22458c7dcb84SDelyan Kratunov
224666c84731SAndrii Nakryiko if (unlikely(!array))
22478c7dcb84SDelyan Kratunov return ret;
22488c7dcb84SDelyan Kratunov
22498c7dcb84SDelyan Kratunov migrate_disable();
22508c7dcb84SDelyan Kratunov
22518c7dcb84SDelyan Kratunov run_ctx.is_uprobe = true;
22528c7dcb84SDelyan Kratunov
225366c84731SAndrii Nakryiko old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
22548c7dcb84SDelyan Kratunov item = &array->items[0];
22558c7dcb84SDelyan Kratunov while ((prog = READ_ONCE(item->prog))) {
22568c7dcb84SDelyan Kratunov if (!prog->sleepable)
22578c7dcb84SDelyan Kratunov rcu_read_lock();
22588c7dcb84SDelyan Kratunov
22598c7dcb84SDelyan Kratunov run_ctx.bpf_cookie = item->bpf_cookie;
22608c7dcb84SDelyan Kratunov ret &= run_prog(prog, ctx);
226189aa0758SAlexei Starovoitov item++;
2262b121d1e7SAlexei Starovoitov
2263d46edd67SSong Liu if (!prog->sleepable)
2264b121d1e7SAlexei Starovoitov rcu_read_unlock();
2265c518cfa0SThomas Gleixner }
2266c518cfa0SThomas Gleixner bpf_reset_run_ctx(old_run_ctx);
2267c518cfa0SThomas Gleixner migrate_enable();
2268c518cfa0SThomas Gleixner return ret;
2269c518cfa0SThomas Gleixner }
2270c518cfa0SThomas Gleixner
2271c518cfa0SThomas Gleixner #ifdef CONFIG_BPF_SYSCALL
2272c518cfa0SThomas Gleixner DECLARE_PER_CPU(int, bpf_prog_active);
2273c518cfa0SThomas Gleixner extern struct mutex bpf_stats_enabled_mutex;
2274c518cfa0SThomas Gleixner
2275c518cfa0SThomas Gleixner /*
2276c518cfa0SThomas Gleixner * Block execution of BPF programs attached to instrumentation (perf,
2277c518cfa0SThomas Gleixner * kprobes, tracepoints) to prevent deadlocks on map operations as any of
2278c518cfa0SThomas Gleixner * these events can happen inside a region which holds a map bucket lock
2279c518cfa0SThomas Gleixner * and can deadlock on it.
2280c518cfa0SThomas Gleixner */
bpf_disable_instrumentation(void)2281c518cfa0SThomas Gleixner static inline void bpf_disable_instrumentation(void)
2282c518cfa0SThomas Gleixner {
228335f96de0SAndrii Nakryiko migrate_disable();
2284f66e448cSChenbo Feng this_cpu_inc(bpf_prog_active);
2285f66e448cSChenbo Feng }
2286367ec3e4SYonghong Song
bpf_enable_instrumentation(void)2287f66e448cSChenbo Feng static inline void bpf_enable_instrumentation(void)
228891cc1a99SAlexei Starovoitov {
22897de16e3aSJakub Kicinski this_cpu_dec(bpf_prog_active);
22907de16e3aSJakub Kicinski migrate_enable();
229140077e0cSJohannes Berg }
229240077e0cSJohannes Berg
2293f2e10bffSAndrii Nakryiko extern const struct super_operations bpf_super_ops;
2294be9370a7SJohannes Berg extern const struct file_operations bpf_map_fops;
2295be9370a7SJohannes Berg extern const struct file_operations bpf_prog_fops;
229640077e0cSJohannes Berg extern const struct file_operations bpf_iter_fops;
2297f2e10bffSAndrii Nakryiko
22980fc174deSDaniel Borkmann #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
2299ab3f0063SJakub Kicinski extern const struct bpf_prog_ops _name ## _prog_ops; \
23004f9218aaSJakub Kicinski extern const struct bpf_verifier_ops _name ## _verifier_ops;
23014f9218aaSJakub Kicinski #define BPF_MAP_TYPE(_id, _ops) \
23024f9218aaSJakub Kicinski extern const struct bpf_map_ops _ops;
230309756af4SAlexei Starovoitov #define BPF_LINK_TYPE(_id, _name)
2304248f346fSJakub Kicinski #include <linux/bpf_types.h>
2305288b3de5SJakub Kicinski #undef BPF_PROG_TYPE
230685192dbfSAndrii Nakryiko #undef BPF_MAP_TYPE
2307c540594fSDaniel Borkmann #undef BPF_LINK_TYPE
230885192dbfSAndrii Nakryiko
2309a6f6df69SJohn Fastabend extern const struct bpf_prog_ops bpf_offload_prog_ops;
231061e021f3SDaniel Borkmann extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
231161e021f3SDaniel Borkmann extern const struct bpf_verifier_ops xdp_analyzer_ops;
2312e7895f01SPaul Moore
2313158e5e9eSTobias Klauser struct bpf_prog *bpf_prog_get(u32 ufd);
2314ad8ad79fSJakub Kicinski struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
2315aa3496acSKumar Kartikeya Dwivedi bool attach_drv);
231674843b57SDave Marchevsky void bpf_prog_add(struct bpf_prog *prog, int i);
2317aa3496acSKumar Kartikeya Dwivedi void bpf_prog_sub(struct bpf_prog *prog, int i);
2318aa3496acSKumar Kartikeya Dwivedi void bpf_prog_inc(struct bpf_prog *prog);
2319aa3496acSKumar Kartikeya Dwivedi struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
2320aa3496acSKumar Kartikeya Dwivedi void bpf_prog_put(struct bpf_prog *prog);
2321db559117SKumar Kartikeya Dwivedi
2322246331e3SBenjamin Tissoires void bpf_prog_free_id(struct bpf_prog *prog);
2323aa3496acSKumar Kartikeya Dwivedi void bpf_map_free_id(struct bpf_map *map);
2324e383a459SHou Tao
232561df10c7SKumar Kartikeya Dwivedi struct btf_field *btf_record_find(const struct btf_record *rec,
23261ed4d924SMartin KaFai Lau u32 offset, u32 field_mask);
2327c9da161cSDaniel Borkmann void btf_record_free(struct btf_record *rec);
232855f32595SAl Viro void bpf_map_free_record(struct bpf_map *map);
23294e885fabSAnton Protopopov struct btf_record *btf_record_dup(const struct btf_record *rec);
23304e885fabSAnton Protopopov bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
23314e885fabSAnton Protopopov void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
23324e885fabSAnton Protopopov void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj);
23334e885fabSAnton Protopopov void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
23344e885fabSAnton Protopopov void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
23354e885fabSAnton Protopopov
23364e885fabSAnton Protopopov struct bpf_map *bpf_map_get(u32 ufd);
233755f32595SAl Viro struct bpf_map *bpf_map_get_with_uref(u32 ufd);
233855f32595SAl Viro
233955f32595SAl Viro /*
234055f32595SAl Viro * The __bpf_map_get() and __btf_get_by_fd() functions parse a file
234155f32595SAl Viro * descriptor and return a corresponding map or btf object.
234255f32595SAl Viro * Their names are double underscored to emphasize the fact that they
234355f32595SAl Viro * do not increase refcnt. To also increase refcnt use corresponding
234455f32595SAl Viro * bpf_map_get() and btf_get_by_fd() functions.
234555f32595SAl Viro */
23464e885fabSAnton Protopopov
__bpf_map_get(struct fd f)23474e885fabSAnton Protopopov static inline struct bpf_map *__bpf_map_get(struct fd f)
23484e885fabSAnton Protopopov {
23494e885fabSAnton Protopopov if (fd_empty(f))
23504e885fabSAnton Protopopov return ERR_PTR(-EBADF);
23514e885fabSAnton Protopopov if (unlikely(fd_file(f)->f_op != &bpf_map_fops))
23524e885fabSAnton Protopopov return ERR_PTR(-EINVAL);
23534e885fabSAnton Protopopov return fd_file(f)->private_data;
23544e885fabSAnton Protopopov }
23551e0bd5a0SAndrii Nakryiko
__btf_get_by_fd(struct fd f)23561e0bd5a0SAndrii Nakryiko static inline struct btf *__btf_get_by_fd(struct fd f)
2357b671c206SKui-Feng Lee {
23581e0bd5a0SAndrii Nakryiko if (fd_empty(f))
2359c9da161cSDaniel Borkmann return ERR_PTR(-EBADF);
236061e021f3SDaniel Borkmann if (unlikely(fd_file(f)->f_op != &btf_fops))
2361196e8ca7SDaniel Borkmann return ERR_PTR(-EINVAL);
2362196e8ca7SDaniel Borkmann return fd_file(f)->private_data;
2363d407bd25SDaniel Borkmann }
2364353050beSDaniel Borkmann
2365bd475643SJakub Kicinski void bpf_map_inc(struct bpf_map *map);
2366cb4d03abSBrian Vazquez void bpf_map_inc_with_uref(struct bpf_map *map);
2367cb4d03abSBrian Vazquez struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
2368cb4d03abSBrian Vazquez struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
23693af43ba4SHou Tao void bpf_map_put_with_uref(struct bpf_map *map);
2370aa2e93b8SBrian Vazquez void bpf_map_put(struct bpf_map *map);
2371aa2e93b8SBrian Vazquez void *bpf_map_area_alloc(u64 size, int numa_node);
2372aa2e93b8SBrian Vazquez void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
2373aa2e93b8SBrian Vazquez void bpf_map_area_free(void *base);
2374aa2e93b8SBrian Vazquez bool bpf_map_write_active(const struct bpf_map *map);
23756086d29dSYonghong Song void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
2376a228a64fSAlexei Starovoitov int generic_map_lookup_batch(struct bpf_map *map,
237761e021f3SDaniel Borkmann const union bpf_attr *attr,
237831746031SAlexei Starovoitov union bpf_attr __user *uattr);
237931746031SAlexei Starovoitov int generic_map_update_batch(struct bpf_map *map, struct file *map_file,
23803a3b7fecSJohannes Weiner const union bpf_attr *attr,
238148edc1f7SRoman Gushchin union bpf_attr __user *uattr);
238248edc1f7SRoman Gushchin int generic_map_delete_batch(struct bpf_map *map,
238348edc1f7SRoman Gushchin const union bpf_attr *attr,
2384ddef81b5SYafang Shao union bpf_attr __user *uattr);
2385ddef81b5SYafang Shao struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
238648edc1f7SRoman Gushchin struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
238748edc1f7SRoman Gushchin
238848edc1f7SRoman Gushchin int bpf_map_alloc_pages(const struct bpf_map *map, int nid,
23893b0ba54dSSuren Baghdasaryan unsigned long nr_pages, struct page **page_array);
23903b0ba54dSSuren Baghdasaryan #ifdef CONFIG_MEMCG
23913b0ba54dSSuren Baghdasaryan void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
23923b0ba54dSSuren Baghdasaryan int node);
23932c321f3fSSuren Baghdasaryan void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags);
23942c321f3fSSuren Baghdasaryan void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
23952c321f3fSSuren Baghdasaryan gfp_t flags);
23962c321f3fSSuren Baghdasaryan void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
23972c321f3fSSuren Baghdasaryan size_t align, gfp_t flags);
23982c321f3fSSuren Baghdasaryan #else
23992c321f3fSSuren Baghdasaryan /*
24002c321f3fSSuren Baghdasaryan * These specialized allocators have to be macros for their allocations to be
240148edc1f7SRoman Gushchin * accounted separately (to have separate alloc_tag).
240248edc1f7SRoman Gushchin */
240325954730SAnton Protopopov #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \
240425954730SAnton Protopopov kmalloc_node(_size, _flags, _node)
240525954730SAnton Protopopov #define bpf_map_kzalloc(_map, _size, _flags) \
240625954730SAnton Protopopov kzalloc(_size, _flags)
240725954730SAnton Protopopov #define bpf_map_kvcalloc(_map, _n, _size, _flags) \
240825954730SAnton Protopopov kvcalloc(_n, _size, _flags)
240925954730SAnton Protopopov #define bpf_map_alloc_percpu(_map, _size, _align, _flags) \
241025954730SAnton Protopopov __alloc_percpu_gfp(_size, _align, _flags)
241125954730SAnton Protopopov #endif
241225954730SAnton Protopopov
241325954730SAnton Protopopov static inline int
bpf_map_init_elem_count(struct bpf_map * map)241425954730SAnton Protopopov bpf_map_init_elem_count(struct bpf_map *map)
241525954730SAnton Protopopov {
241625954730SAnton Protopopov size_t size = sizeof(*map->elem_count), align = size;
241725954730SAnton Protopopov gfp_t flags = GFP_USER | __GFP_NOWARN;
241825954730SAnton Protopopov
241925954730SAnton Protopopov map->elem_count = bpf_map_alloc_percpu(map, size, align, flags);
242025954730SAnton Protopopov if (!map->elem_count)
242125954730SAnton Protopopov return -ENOMEM;
242225954730SAnton Protopopov
242325954730SAnton Protopopov return 0;
242425954730SAnton Protopopov }
242525954730SAnton Protopopov
242625954730SAnton Protopopov static inline void
bpf_map_free_elem_count(struct bpf_map * map)242725954730SAnton Protopopov bpf_map_free_elem_count(struct bpf_map *map)
242825954730SAnton Protopopov {
242925954730SAnton Protopopov free_percpu(map->elem_count);
243025954730SAnton Protopopov }
243125954730SAnton Protopopov
bpf_map_inc_elem_count(struct bpf_map * map)24321be7f75dSAlexei Starovoitov static inline void bpf_map_inc_elem_count(struct bpf_map *map)
24331be7f75dSAlexei Starovoitov {
243435f96de0SAndrii Nakryiko this_cpu_inc(*map->elem_count);
243535f96de0SAndrii Nakryiko }
2436d79a3549SAndrii Nakryiko
bpf_map_dec_elem_count(struct bpf_map * map)24372c78ee89SAlexei Starovoitov static inline void bpf_map_dec_elem_count(struct bpf_map *map)
2438d79a3549SAndrii Nakryiko {
24392c78ee89SAlexei Starovoitov this_cpu_dec(*map->elem_count);
24402c78ee89SAlexei Starovoitov }
2441d79a3549SAndrii Nakryiko
244201f810acSAndrei Matei extern int sysctl_unprivileged_bpf_disabled;
2443d79a3549SAndrii Nakryiko
244401f810acSAndrei Matei bool bpf_token_capable(const struct bpf_token *token, int cap);
244501f810acSAndrei Matei
bpf_allow_ptr_leaks(const struct bpf_token * token)2446d79a3549SAndrii Nakryiko static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token)
24472c78ee89SAlexei Starovoitov {
2448d79a3549SAndrii Nakryiko return bpf_token_capable(token, CAP_PERFMON);
24492c78ee89SAlexei Starovoitov }
24502c78ee89SAlexei Starovoitov
bpf_allow_uninit_stack(const struct bpf_token * token)2451d79a3549SAndrii Nakryiko static inline bool bpf_allow_uninit_stack(const struct bpf_token *token)
24522c78ee89SAlexei Starovoitov {
2453d79a3549SAndrii Nakryiko return bpf_token_capable(token, CAP_PERFMON);
24542c78ee89SAlexei Starovoitov }
24552c78ee89SAlexei Starovoitov
bpf_bypass_spec_v1(const struct bpf_token * token)24566e71b04aSChenbo Feng static inline bool bpf_bypass_spec_v1(const struct bpf_token *token)
2457b2197755SDaniel Borkmann {
2458b2197755SDaniel Borkmann return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2459f2e10bffSAndrii Nakryiko }
2460a3b80e10SAndrii Nakryiko
bpf_bypass_spec_v4(const struct bpf_token * token)246161c6fefaSAndrii Nakryiko static inline bool bpf_bypass_spec_v4(const struct bpf_token *token)
246261c6fefaSAndrii Nakryiko {
246361c6fefaSAndrii Nakryiko return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON);
2464a3b80e10SAndrii Nakryiko }
2465a3b80e10SAndrii Nakryiko
2466a3b80e10SAndrii Nakryiko int bpf_map_new_fd(struct bpf_map *map, int flags);
246770ed506cSAndrii Nakryiko int bpf_prog_new_fd(struct bpf_prog *prog);
246867c3e835SKui-Feng Lee
246970ed506cSAndrii Nakryiko void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
247070ed506cSAndrii Nakryiko const struct bpf_link_ops *ops, struct bpf_prog *prog);
247170ed506cSAndrii Nakryiko void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
24729f883612SDmitrii Dolgov const struct bpf_link_ops *ops, struct bpf_prog *prog,
247370ed506cSAndrii Nakryiko bool sleepable);
247435f96de0SAndrii Nakryiko int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
247535f96de0SAndrii Nakryiko int bpf_link_settle(struct bpf_link_primer *primer);
247635f96de0SAndrii Nakryiko void bpf_link_cleanup(struct bpf_link_primer *primer);
247735f96de0SAndrii Nakryiko void bpf_link_inc(struct bpf_link *link);
247835f96de0SAndrii Nakryiko struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
247935f96de0SAndrii Nakryiko void bpf_link_put(struct bpf_link *link);
2480a177fc2bSAndrii Nakryiko int bpf_link_new_fd(struct bpf_link *link);
2481caf8f28eSAndrii Nakryiko struct bpf_link *bpf_link_get_from_fd(u32 ufd);
2482caf8f28eSAndrii Nakryiko struct bpf_link *bpf_link_get_curr_or_next(u32 *id);
2483caf8f28eSAndrii Nakryiko
248435f96de0SAndrii Nakryiko void bpf_token_inc(struct bpf_token *token);
2485cb8edce2SAndrii Nakryiko void bpf_token_put(struct bpf_token *token);
2486cb8edce2SAndrii Nakryiko int bpf_token_create(union bpf_attr *attr);
248735f96de0SAndrii Nakryiko struct bpf_token *bpf_token_get_from_fd(u32 ufd);
248835f96de0SAndrii Nakryiko
2489b2197755SDaniel Borkmann bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd);
249021aef70eSYonghong Song bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type);
2491e5158d98SYonghong Song bool bpf_token_allow_prog_type(const struct bpf_token *token,
249221aef70eSYonghong Song enum bpf_prog_type prog_type,
249321aef70eSYonghong Song enum bpf_attach_type attach_type);
249415d83c4dSYonghong Song
2495f0d74c4dSKui-Feng Lee int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname);
2496f0d74c4dSKui-Feng Lee int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags);
2497f0d74c4dSKui-Feng Lee struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir,
2498f0d74c4dSKui-Feng Lee umode_t mode);
2499f0d74c4dSKui-Feng Lee
2500f0d74c4dSKui-Feng Lee #define BPF_ITER_FUNC_PREFIX "bpf_iter_"
2501f0d74c4dSKui-Feng Lee #define DEFINE_BPF_ITER_FUNC(target, args...) \
2502f0d74c4dSKui-Feng Lee extern int bpf_iter_ ## target(args); \
2503f0d74c4dSKui-Feng Lee int __init bpf_iter_ ## target(args) { return 0; }
2504f0d74c4dSKui-Feng Lee
2505f0d74c4dSKui-Feng Lee /*
2506f0d74c4dSKui-Feng Lee * The task type of iterators.
2507f0d74c4dSKui-Feng Lee *
2508f0d74c4dSKui-Feng Lee * For BPF task iterators, they can be parameterized with various
2509f0d74c4dSKui-Feng Lee * parameters to visit only some of tasks.
2510f0d74c4dSKui-Feng Lee *
2511f0d74c4dSKui-Feng Lee * BPF_TASK_ITER_ALL (default)
2512f0d74c4dSKui-Feng Lee * Iterate over resources of every task.
2513f0d74c4dSKui-Feng Lee *
2514f0d74c4dSKui-Feng Lee * BPF_TASK_ITER_TID
2515f0d74c4dSKui-Feng Lee * Iterate over resources of a task/tid.
2516f9c79272SYonghong Song *
2517d4ccaf58SHao Luo * BPF_TASK_ITER_TGID
2518a5cbe05aSYonghong Song * Iterate over resources of every task of a process / task group.
2519d4ccaf58SHao Luo */
2520d4ccaf58SHao Luo enum bpf_iter_task_type {
2521d4ccaf58SHao Luo BPF_TASK_ITER_ALL = 0,
2522d4ccaf58SHao Luo BPF_TASK_ITER_TID,
2523d4ccaf58SHao Luo BPF_TASK_ITER_TGID,
2524d4ccaf58SHao Luo };
2525f0d74c4dSKui-Feng Lee
2526f0d74c4dSKui-Feng Lee struct bpf_iter_aux_info {
2527f0d74c4dSKui-Feng Lee /* for map_elem iter */
2528f0d74c4dSKui-Feng Lee struct bpf_map *map;
2529f9c79272SYonghong Song
2530f9c79272SYonghong Song /* for cgroup iter */
25315e7b3020SYonghong Song struct {
25325e7b3020SYonghong Song struct cgroup *start; /* starting cgroup */
2533a5cbe05aSYonghong Song enum bpf_cgroup_iter_order order;
25345e7b3020SYonghong Song } cgroup;
25356b0a249aSYonghong Song struct {
25366b0a249aSYonghong Song enum bpf_iter_task_type type;
25376b0a249aSYonghong Song u32 pid;
25386b0a249aSYonghong Song } task;
25393cee6fb8SMartin KaFai Lau };
25403cee6fb8SMartin KaFai Lau
25413cee6fb8SMartin KaFai Lau typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
2542a5cbe05aSYonghong Song union bpf_iter_link_info *linfo,
2543cf83b2d2SYonghong Song struct bpf_iter_aux_info *aux);
2544cf83b2d2SYonghong Song typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
2545cf83b2d2SYonghong Song typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
2546cf83b2d2SYonghong Song struct seq_file *seq);
25473c32cc1bSYonghong Song typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
2548ae24345dSYonghong Song struct bpf_link_info *info);
2549ae24345dSYonghong Song typedef const struct bpf_func_proto *
25505e7b3020SYonghong Song (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id,
25515e7b3020SYonghong Song const struct bpf_prog *prog);
25526b0a249aSYonghong Song
25536b0a249aSYonghong Song enum bpf_iter_feature {
25543cee6fb8SMartin KaFai Lau BPF_ITER_RESCHED = BIT(0),
25553c32cc1bSYonghong Song };
2556cf83b2d2SYonghong Song
25573c32cc1bSYonghong Song #define BPF_ITER_CTX_ARG_MAX 2
255814fc6bd6SYonghong Song struct bpf_iter_reg {
2559ae24345dSYonghong Song const char *target;
2560ae24345dSYonghong Song bpf_iter_attach_target_t attach_target;
2561e5158d98SYonghong Song bpf_iter_detach_target_t detach_target;
2562e5158d98SYonghong Song bpf_iter_show_fdinfo_t show_fdinfo;
2563e5158d98SYonghong Song bpf_iter_fill_link_info_t fill_link_info;
2564e5158d98SYonghong Song bpf_iter_get_func_proto_t get_func_proto;
2565e5158d98SYonghong Song u32 ctx_arg_info_size;
2566e5158d98SYonghong Song u32 feature;
2567a5cbe05aSYonghong Song struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
2568a5cbe05aSYonghong Song const struct bpf_iter_seq_info *seq_info;
2569a5cbe05aSYonghong Song };
2570a5cbe05aSYonghong Song
2571a5cbe05aSYonghong Song struct bpf_iter_meta {
2572a5cbe05aSYonghong Song __bpf_md_ptr(struct seq_file *, seq);
2573a5cbe05aSYonghong Song u64 session_id;
257415172a46SYonghong Song u64 seq_num;
2575ab2ee4fcSYonghong Song };
257643205180SAmery Hung
25773cee6fb8SMartin KaFai Lau struct bpf_iter__bpf_map_elem {
25783cee6fb8SMartin KaFai Lau __bpf_md_ptr(struct bpf_iter_meta *, meta);
2579af2ac3e1SAlexei Starovoitov __bpf_md_ptr(struct bpf_map *, map);
2580ac51d99bSYonghong Song __bpf_md_ptr(void *, key);
2581367ec3e4SYonghong Song __bpf_md_ptr(void *, value);
2582e5158d98SYonghong Song };
2583e5158d98SYonghong Song
2584b76f2226SYonghong Song int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
2585b76f2226SYonghong Song void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
2586b76f2226SYonghong Song int bpf_iter_prog_supported(struct bpf_prog *prog);
2587b76f2226SYonghong Song const struct bpf_func_proto *
2588ae24345dSYonghong Song bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
2589314ee05eSYonghong Song int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
2590314ee05eSYonghong Song int bpf_iter_new_fd(struct bpf_link *link);
2591314ee05eSYonghong Song bool bpf_link_is_iter(struct bpf_link *link);
2592314ee05eSYonghong Song struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
259315a07b33SAlexei Starovoitov int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
259415a07b33SAlexei Starovoitov void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
259515a07b33SAlexei Starovoitov struct seq_file *seq);
259615a07b33SAlexei Starovoitov int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
259715a07b33SAlexei Starovoitov struct bpf_link_info *info);
259815a07b33SAlexei Starovoitov
2599d056a788SDaniel Borkmann int map_set_for_each_callback_args(struct bpf_verifier_env *env,
2600557c0c6eSAlexei Starovoitov struct bpf_func_state *caller,
260115a07b33SAlexei Starovoitov struct bpf_func_state *callee);
2602d056a788SDaniel Borkmann
2603d056a788SDaniel Borkmann int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
260414dc6f04SMartin KaFai Lau int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
2605bcc6b1b7SMartin KaFai Lau int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2606bcc6b1b7SMartin KaFai Lau u64 flags);
260714dc6f04SMartin KaFai Lau int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
2608d056a788SDaniel Borkmann u64 flags);
26096e71b04aSChenbo Feng
2610af2ac3e1SAlexei Starovoitov int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
2611dcab51f1SMartin KaFai Lau
26126e71b04aSChenbo Feng int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
261361e021f3SDaniel Borkmann void *key, void *value, u64 map_flags);
261447a71c1fSAndrii Nakryiko int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2615a643bff7SAndrii Nakryiko int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2616a643bff7SAndrii Nakryiko void *key, void *value, u64 map_flags);
26171ea47e01SAlexei Starovoitov int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
2618a643bff7SAndrii Nakryiko
261946f55cffSJohn Fastabend int bpf_get_file_flag(int flags);
262076654e67SAlan Maguire int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
262176654e67SAlan Maguire size_t actual_size);
262246f55cffSJohn Fastabend
2623d53ad5d8SToke Høiland-Jørgensen /* verify correctness of eBPF program */
26246d5fc195SToshiaki Makita int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
2625e6a4750fSBjörn Töpel
2626e6a4750fSBjörn Töpel #ifndef CONFIG_BPF_JIT_ALWAYS_ON
262767f29e07SJesper Dangaard Brouer void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
2628d839a731SSebastian Andrzej Siewior #endif
2629d53ad5d8SToke Høiland-Jørgensen
26301d233886SToke Høiland-Jørgensen struct btf *bpf_get_btf_vmlinux(void);
2631d53ad5d8SToke Høiland-Jørgensen
263238edddb8SJesper Dangaard Brouer /* Map specifics */
2633d53ad5d8SToke Høiland-Jørgensen struct xdp_frame;
2634e624d4edSHangbin Liu struct sk_buff;
26356d5fc195SToshiaki Makita struct bpf_dtab_netdev;
26367cd1107fSAlexander Lobakin struct bpf_cpu_map_entry;
2637e624d4edSHangbin Liu
26387cd1107fSAlexander Lobakin void __dev_flush(struct list_head *flush_list);
26397cd1107fSAlexander Lobakin int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
264046f55cffSJohn Fastabend struct net_device *dev_rx);
2641d839a731SSebastian Andrzej Siewior int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2642d53ad5d8SToke Høiland-Jørgensen struct net_device *dev_rx);
26439c270af3SJesper Dangaard Brouer int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
264411941f8aSKumar Kartikeya Dwivedi struct bpf_map *map, bool exclude_ingress);
264511941f8aSKumar Kartikeya Dwivedi int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
26469c270af3SJesper Dangaard Brouer const struct bpf_prog *xdp_prog);
264796eabe7aSMartin KaFai Lau int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
264896eabe7aSMartin KaFai Lau const struct bpf_prog *xdp_prog,
264996eabe7aSMartin KaFai Lau struct bpf_map *map, bool exclude_ingress);
265096eabe7aSMartin KaFai Lau
265196eabe7aSMartin KaFai Lau void __cpu_map_flush(struct list_head *flush_list);
265296eabe7aSMartin KaFai Lau int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
265396eabe7aSMartin KaFai Lau struct net_device *dev_rx);
2654040ee692SAl Viro int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
26555dc4c4b7SMartin KaFai Lau struct sk_buff *skb);
2656040ee692SAl Viro
2657c695865cSStanislav Fomichev /* Return map's numa specified by userspace */
bpf_map_attr_numa_node(const union bpf_attr * attr)2658c695865cSStanislav Fomichev static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
2659c695865cSStanislav Fomichev {
2660c695865cSStanislav Fomichev return (attr->map_flags & BPF_F_NUMA_NODE) ?
2661da00d2f1SKP Singh attr->numa_node : NUMA_NO_NODE;
2662da00d2f1SKP Singh }
2663da00d2f1SKP Singh
2664c695865cSStanislav Fomichev struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
2665c695865cSStanislav Fomichev int array_map_alloc_check(union bpf_attr *attr);
2666c695865cSStanislav Fomichev
26671b4d60ecSSong Liu int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
26681b4d60ecSSong Liu union bpf_attr __user *uattr);
26691b4d60ecSSong Liu int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
26707c32e8f8SLorenz Bauer union bpf_attr __user *uattr);
26717c32e8f8SLorenz Bauer int bpf_prog_test_run_tracing(struct bpf_prog *prog,
26727c32e8f8SLorenz Bauer const union bpf_attr *kattr,
26732b99ef22SFlorian Westphal union bpf_attr __user *uattr);
26742b99ef22SFlorian Westphal int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
26752b99ef22SFlorian Westphal const union bpf_attr *kattr,
26769e15db66SAlexei Starovoitov union bpf_attr __user *uattr);
26779e15db66SAlexei Starovoitov int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
26789e15db66SAlexei Starovoitov const union bpf_attr *kattr,
267935346ab6SHou Tao union bpf_attr __user *uattr);
268035346ab6SHou Tao int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
268135346ab6SHou Tao const union bpf_attr *kattr,
268235346ab6SHou Tao union bpf_attr __user *uattr);
268335346ab6SHou Tao int bpf_prog_test_run_nf(struct bpf_prog *prog,
268435346ab6SHou Tao const union bpf_attr *kattr,
268535346ab6SHou Tao union bpf_attr __user *uattr);
268635346ab6SHou Tao bool btf_ctx_access(int off, int size, enum bpf_access_type type,
268735346ab6SHou Tao const struct bpf_prog *prog,
268835346ab6SHou Tao struct bpf_insn_access_aux *info);
268935346ab6SHou Tao
bpf_tracing_ctx_access(int off,int size,enum bpf_access_type type)269035346ab6SHou Tao static inline bool bpf_tracing_ctx_access(int off, int size,
269135346ab6SHou Tao enum bpf_access_type type)
269235346ab6SHou Tao {
269335346ab6SHou Tao if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
269435346ab6SHou Tao return false;
269535346ab6SHou Tao if (type != BPF_READ)
269635346ab6SHou Tao return false;
269735346ab6SHou Tao if (off % size != 0)
269835346ab6SHou Tao return false;
269935346ab6SHou Tao return true;
270035346ab6SHou Tao }
270135346ab6SHou Tao
bpf_tracing_btf_ctx_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)27026728aea7SKumar Kartikeya Dwivedi static inline bool bpf_tracing_btf_ctx_access(int off, int size,
27036728aea7SKumar Kartikeya Dwivedi enum bpf_access_type type,
27046728aea7SKumar Kartikeya Dwivedi const struct bpf_prog *prog,
270563260df1SAlexei Starovoitov struct bpf_insn_access_aux *info)
2706faaf4a79SJiri Olsa {
270722dc4a0fSAndrii Nakryiko if (!bpf_tracing_ctx_access(off, size, type))
27082ab3b380SKumar Kartikeya Dwivedi return false;
27092ab3b380SKumar Kartikeya Dwivedi return btf_ctx_access(off, size, type, prog, info);
27109e15db66SAlexei Starovoitov }
2711fec56f58SAlexei Starovoitov
2712fec56f58SAlexei Starovoitov int btf_struct_access(struct bpf_verifier_log *log,
2713fec56f58SAlexei Starovoitov const struct bpf_reg_state *reg,
2714fec56f58SAlexei Starovoitov int off, int size, enum bpf_access_type atype,
2715fec56f58SAlexei Starovoitov u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
2716fec56f58SAlexei Starovoitov bool btf_struct_ids_match(struct bpf_verifier_log *log,
271751c39bb1SAlexei Starovoitov const struct btf *btf, u32 id, int off,
27184ba1d0f2SAndrii Nakryiko const struct btf *need_btf, u32 need_type_id,
2719efc68158SToke Høiland-Jørgensen bool strict);
2720be8704ffSAlexei Starovoitov
2721b9ae0c9dSKumar Kartikeya Dwivedi int btf_distill_func_proto(struct bpf_verifier_log *log,
2722b9ae0c9dSKumar Kartikeya Dwivedi struct btf *btf,
2723522bb2c1SAndrii Nakryiko const struct btf_type *func_proto,
2724522bb2c1SAndrii Nakryiko const char *func_name,
27258c1b6e69SAlexei Starovoitov struct btf_func_model *m);
27267e6897f9SBjörn Töpel
2727005142b8SAlexei Starovoitov struct bpf_reg_state;
27287e6897f9SBjörn Töpel int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog);
2729bbc1d247SAndrii Nakryiko int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
2730bbc1d247SAndrii Nakryiko struct btf *btf, const struct btf_type *t);
2731a10787e6SSong Liu const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
2732c4bcfb38SYonghong Song int comp_idx, const char *tag_key);
2733e6ac2450SMartin KaFai Lau int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt,
2734e6ac2450SMartin KaFai Lau int comp_idx, const char *tag_key, int last_id);
2735e6ac2450SMartin KaFai Lau
2736e6ac2450SMartin KaFai Lau struct bpf_prog *bpf_prog_by_id(u32 id);
27371cf3bfc6SIlya Leoshkevich struct bpf_link *bpf_link_by_id(u32 id);
27381cf3bfc6SIlya Leoshkevich
27391cf3bfc6SIlya Leoshkevich const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id,
2740fbd94c7aSAlexei Starovoitov const struct bpf_prog *prog);
2741fbd94c7aSAlexei Starovoitov void bpf_task_storage_free(struct task_struct *task);
2742fbd94c7aSAlexei Starovoitov void bpf_cgrp_storage_free(struct cgroup *cgroup);
2743fbd94c7aSAlexei Starovoitov bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog);
2744fbd94c7aSAlexei Starovoitov const struct btf_func_model *
274557539b1cSDavid Vernet bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
274657539b1cSDavid Vernet const struct bpf_insn *insn);
274763260df1SAlexei Starovoitov int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
274857539b1cSDavid Vernet u16 btf_fd_idx, u8 **func_addr);
2749b613d335SDavid Vernet
2750b613d335SDavid Vernet struct bpf_core_ctx {
2751b613d335SDavid Vernet struct bpf_verifier_log *log;
2752b613d335SDavid Vernet const struct btf *btf;
2753fbd94c7aSAlexei Starovoitov };
2754fbd94c7aSAlexei Starovoitov
2755fbd94c7aSAlexei Starovoitov bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
275644a3918cSJosh Poimboeuf const struct bpf_reg_state *reg,
275744a3918cSJosh Poimboeuf const char *field_name, u32 btf_id, const char *suffix);
275844a3918cSJosh Poimboeuf
275944a3918cSJosh Poimboeuf bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
276044a3918cSJosh Poimboeuf const struct btf *reg_btf, u32 reg_id,
276124426654SMartin KaFai Lau const struct btf *arg_btf, u32 arg_id);
276224426654SMartin KaFai Lau
276324426654SMartin KaFai Lau int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
276424426654SMartin KaFai Lau int relo_idx, void *insn);
276524426654SMartin KaFai Lau
unprivileged_ebpf_enabled(void)276624426654SMartin KaFai Lau static inline bool unprivileged_ebpf_enabled(void)
276724426654SMartin KaFai Lau {
276824426654SMartin KaFai Lau return !sysctl_unprivileged_bpf_disabled;
276924426654SMartin KaFai Lau }
277005b24ff9SJiri Olsa
277105b24ff9SJiri Olsa /* Not all bpf prog type has the bpf_ctx.
27728357b366SJoanne Koong * For the bpf prog type that has initialized the bpf_ctx,
27738357b366SJoanne Koong * this function can be used to decide if a kernel function
27748357b366SJoanne Koong * is called by a bpf program.
27758357b366SJoanne Koong */
has_current_bpf_ctx(void)27768357b366SJoanne Koong static inline bool has_current_bpf_ctx(void)
27779a675ba5SSebastian Andrzej Siewior {
27789c270af3SJesper Dangaard Brouer return !!current->bpf_ctx;
27790fc174deSDaniel Borkmann }
27800fc174deSDaniel Borkmann
27810fc174deSDaniel Borkmann void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
27820fc174deSDaniel Borkmann
27830fc174deSDaniel Borkmann void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
2784248f346fSJakub Kicinski enum bpf_dynptr_type type, u32 offset, u32 size);
2785248f346fSJakub Kicinski void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
2786288b3de5SJakub Kicinski void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
2787248f346fSJakub Kicinski
2788248f346fSJakub Kicinski #else /* !CONFIG_BPF_SYSCALL */
bpf_prog_get(u32 ufd)2789248f346fSJakub Kicinski static inline struct bpf_prog *bpf_prog_get(u32 ufd)
2790248f346fSJakub Kicinski {
279185192dbfSAndrii Nakryiko return ERR_PTR(-EOPNOTSUPP);
2792cc2e0b3fSBrenden Blanco }
2793cc2e0b3fSBrenden Blanco
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)2794113214beSDaniel Borkmann static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
2795c540594fSDaniel Borkmann enum bpf_prog_type type,
2796c540594fSDaniel Borkmann bool attach_drv)
2797c540594fSDaniel Borkmann {
2798c540594fSDaniel Borkmann return ERR_PTR(-EOPNOTSUPP);
27990fc174deSDaniel Borkmann }
28000fc174deSDaniel Borkmann
bpf_prog_add(struct bpf_prog * prog,int i)28010fc174deSDaniel Borkmann static inline void bpf_prog_add(struct bpf_prog *prog, int i)
28026d67942dSDaniel Borkmann {
280385192dbfSAndrii Nakryiko }
2804aa6a5f3cSAlexei Starovoitov
bpf_prog_sub(struct bpf_prog * prog,int i)2805aa6a5f3cSAlexei Starovoitov static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
28065ccb071eSDaniel Borkmann {
2807a6f6df69SJohn Fastabend }
2808a6f6df69SJohn Fastabend
bpf_prog_put(struct bpf_prog * prog)2809a6f6df69SJohn Fastabend static inline void bpf_prog_put(struct bpf_prog *prog)
2810a6f6df69SJohn Fastabend {
2811a6f6df69SJohn Fastabend }
2812a6f6df69SJohn Fastabend
bpf_prog_inc(struct bpf_prog * prog)28136cc7d1e8SAndrii Nakryiko static inline void bpf_prog_inc(struct bpf_prog *prog)
28146cc7d1e8SAndrii Nakryiko {
28156cc7d1e8SAndrii Nakryiko }
28166cc7d1e8SAndrii Nakryiko
28176cc7d1e8SAndrii Nakryiko static inline struct bpf_prog *__must_check
bpf_prog_inc_not_zero(struct bpf_prog * prog)28186cc7d1e8SAndrii Nakryiko bpf_prog_inc_not_zero(struct bpf_prog *prog)
281961c6fefaSAndrii Nakryiko {
282061c6fefaSAndrii Nakryiko return ERR_PTR(-EOPNOTSUPP);
282161c6fefaSAndrii Nakryiko }
282261c6fefaSAndrii Nakryiko
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog)282361c6fefaSAndrii Nakryiko static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
282461c6fefaSAndrii Nakryiko const struct bpf_link_ops *ops,
28256cc7d1e8SAndrii Nakryiko struct bpf_prog *prog)
28266cc7d1e8SAndrii Nakryiko {
28276cc7d1e8SAndrii Nakryiko }
28286cc7d1e8SAndrii Nakryiko
bpf_link_init_sleepable(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog,bool sleepable)28296cc7d1e8SAndrii Nakryiko static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
28306cc7d1e8SAndrii Nakryiko const struct bpf_link_ops *ops, struct bpf_prog *prog,
28316cc7d1e8SAndrii Nakryiko bool sleepable)
28326cc7d1e8SAndrii Nakryiko {
28336cc7d1e8SAndrii Nakryiko }
28346cc7d1e8SAndrii Nakryiko
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)28356cc7d1e8SAndrii Nakryiko static inline int bpf_link_prime(struct bpf_link *link,
28366cc7d1e8SAndrii Nakryiko struct bpf_link_primer *primer)
28376cc7d1e8SAndrii Nakryiko {
28386cc7d1e8SAndrii Nakryiko return -EOPNOTSUPP;
28396cc7d1e8SAndrii Nakryiko }
28406cc7d1e8SAndrii Nakryiko
bpf_link_settle(struct bpf_link_primer * primer)28416cc7d1e8SAndrii Nakryiko static inline int bpf_link_settle(struct bpf_link_primer *primer)
28426cc7d1e8SAndrii Nakryiko {
28436cc7d1e8SAndrii Nakryiko return -EOPNOTSUPP;
284467c3e835SKui-Feng Lee }
284567c3e835SKui-Feng Lee
bpf_link_cleanup(struct bpf_link_primer * primer)284667c3e835SKui-Feng Lee static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
284767c3e835SKui-Feng Lee {
284867c3e835SKui-Feng Lee }
28496cc7d1e8SAndrii Nakryiko
bpf_link_inc(struct bpf_link * link)28506cc7d1e8SAndrii Nakryiko static inline void bpf_link_inc(struct bpf_link *link)
28516cc7d1e8SAndrii Nakryiko {
28526cc7d1e8SAndrii Nakryiko }
28536e71b04aSChenbo Feng
bpf_link_inc_not_zero(struct bpf_link * link)285498589a09SShmulik Ladkani static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
285598589a09SShmulik Ladkani {
285698589a09SShmulik Ladkani return NULL;
285798589a09SShmulik Ladkani }
285835f96de0SAndrii Nakryiko
bpf_link_put(struct bpf_link * link)285935f96de0SAndrii Nakryiko static inline void bpf_link_put(struct bpf_link *link)
286035f96de0SAndrii Nakryiko {
286135f96de0SAndrii Nakryiko }
286235f96de0SAndrii Nakryiko
bpf_obj_get_user(const char __user * pathname,int flags)286335f96de0SAndrii Nakryiko static inline int bpf_obj_get_user(const char __user *pathname, int flags)
286435f96de0SAndrii Nakryiko {
286535f96de0SAndrii Nakryiko return -EOPNOTSUPP;
286635f96de0SAndrii Nakryiko }
286735f96de0SAndrii Nakryiko
bpf_token_capable(const struct bpf_token * token,int cap)286835f96de0SAndrii Nakryiko static inline bool bpf_token_capable(const struct bpf_token *token, int cap)
286935f96de0SAndrii Nakryiko {
287035f96de0SAndrii Nakryiko return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN));
287135f96de0SAndrii Nakryiko }
287235f96de0SAndrii Nakryiko
bpf_token_inc(struct bpf_token * token)287335f96de0SAndrii Nakryiko static inline void bpf_token_inc(struct bpf_token *token)
287435f96de0SAndrii Nakryiko {
287535f96de0SAndrii Nakryiko }
2876d839a731SSebastian Andrzej Siewior
bpf_token_put(struct bpf_token * token)287746f55cffSJohn Fastabend static inline void bpf_token_put(struct bpf_token *token)
287846f55cffSJohn Fastabend {
28799c270af3SJesper Dangaard Brouer }
2880d53ad5d8SToke Høiland-Jørgensen
bpf_token_get_from_fd(u32 ufd)288167f29e07SJesper Dangaard Brouer static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd)
2882e6a4750fSBjörn Töpel {
288367f29e07SJesper Dangaard Brouer return ERR_PTR(-EOPNOTSUPP);
288467f29e07SJesper Dangaard Brouer }
2885d53ad5d8SToke Høiland-Jørgensen
__dev_flush(struct list_head * flush_list)28861d233886SToke Høiland-Jørgensen static inline void __dev_flush(struct list_head *flush_list)
28871d233886SToke Høiland-Jørgensen {
28881d233886SToke Høiland-Jørgensen }
28891d233886SToke Høiland-Jørgensen
28901d233886SToke Høiland-Jørgensen struct xdp_frame;
28911d233886SToke Høiland-Jørgensen struct bpf_dtab_netdev;
2892d53ad5d8SToke Høiland-Jørgensen struct bpf_cpu_map_entry;
289338edddb8SJesper Dangaard Brouer
289467f29e07SJesper Dangaard Brouer static inline
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)289567f29e07SJesper Dangaard Brouer int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
289667f29e07SJesper Dangaard Brouer struct net_device *dev_rx)
289767f29e07SJesper Dangaard Brouer {
2898e624d4edSHangbin Liu return 0;
2899d53ad5d8SToke Høiland-Jørgensen }
2900e624d4edSHangbin Liu
2901e624d4edSHangbin Liu static inline
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)2902e624d4edSHangbin Liu int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
2903e624d4edSHangbin Liu struct net_device *dev_rx)
2904e624d4edSHangbin Liu {
29056d5fc195SToshiaki Makita return 0;
29066d5fc195SToshiaki Makita }
29076d5fc195SToshiaki Makita
29086d5fc195SToshiaki Makita static inline
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)29097cd1107fSAlexander Lobakin int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
29106d5fc195SToshiaki Makita struct bpf_map *map, bool exclude_ingress)
29116d5fc195SToshiaki Makita {
29126d5fc195SToshiaki Makita return 0;
29136d5fc195SToshiaki Makita }
2914e624d4edSHangbin Liu
2915e624d4edSHangbin Liu struct sk_buff;
29167cd1107fSAlexander Lobakin
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,const struct bpf_prog * xdp_prog)29177cd1107fSAlexander Lobakin static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
2918e624d4edSHangbin Liu struct sk_buff *skb,
2919e624d4edSHangbin Liu const struct bpf_prog *xdp_prog)
2920e624d4edSHangbin Liu {
2921e624d4edSHangbin Liu return 0;
2922d839a731SSebastian Andrzej Siewior }
29239c270af3SJesper Dangaard Brouer
29249c270af3SJesper Dangaard Brouer static inline
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,const struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)29259c270af3SJesper Dangaard Brouer int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
29269c270af3SJesper Dangaard Brouer const struct bpf_prog *xdp_prog,
2927d53ad5d8SToke Høiland-Jørgensen struct bpf_map *map, bool exclude_ingress)
29289c270af3SJesper Dangaard Brouer {
29299c270af3SJesper Dangaard Brouer return 0;
29309c270af3SJesper Dangaard Brouer }
29319c270af3SJesper Dangaard Brouer
__cpu_map_flush(struct list_head * flush_list)2932040ee692SAl Viro static inline void __cpu_map_flush(struct list_head *flush_list)
293311941f8aSKumar Kartikeya Dwivedi {
293411941f8aSKumar Kartikeya Dwivedi }
293511941f8aSKumar Kartikeya Dwivedi
cpu_map_enqueue(struct bpf_cpu_map_entry * rcpu,struct xdp_frame * xdpf,struct net_device * dev_rx)293611941f8aSKumar Kartikeya Dwivedi static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
293711941f8aSKumar Kartikeya Dwivedi struct xdp_frame *xdpf,
293811941f8aSKumar Kartikeya Dwivedi struct net_device *dev_rx)
2939040ee692SAl Viro {
2940040ee692SAl Viro return 0;
2941040ee692SAl Viro }
2942040ee692SAl Viro
cpu_map_generic_redirect(struct bpf_cpu_map_entry * rcpu,struct sk_buff * skb)2943040ee692SAl Viro static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
2944c695865cSStanislav Fomichev struct sk_buff *skb)
2945c695865cSStanislav Fomichev {
2946c695865cSStanislav Fomichev return -EOPNOTSUPP;
2947c695865cSStanislav Fomichev }
2948c695865cSStanislav Fomichev
bpf_prog_get_type_path(const char * name,enum bpf_prog_type type)2949c695865cSStanislav Fomichev static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
2950c695865cSStanislav Fomichev enum bpf_prog_type type)
2951c695865cSStanislav Fomichev {
2952c695865cSStanislav Fomichev return ERR_PTR(-EOPNOTSUPP);
2953c695865cSStanislav Fomichev }
2954c695865cSStanislav Fomichev
bpf_prog_test_run_xdp(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)2955c695865cSStanislav Fomichev static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
2956c695865cSStanislav Fomichev const union bpf_attr *kattr,
2957c695865cSStanislav Fomichev union bpf_attr __user *uattr)
2958c695865cSStanislav Fomichev {
2959da00d2f1SKP Singh return -ENOTSUPP;
2960da00d2f1SKP Singh }
2961da00d2f1SKP Singh
bpf_prog_test_run_skb(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)2962da00d2f1SKP Singh static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
2963da00d2f1SKP Singh const union bpf_attr *kattr,
2964da00d2f1SKP Singh union bpf_attr __user *uattr)
2965da00d2f1SKP Singh {
2966c695865cSStanislav Fomichev return -ENOTSUPP;
2967c695865cSStanislav Fomichev }
2968c695865cSStanislav Fomichev
bpf_prog_test_run_tracing(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)2969c695865cSStanislav Fomichev static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
2970c695865cSStanislav Fomichev const union bpf_attr *kattr,
2971c695865cSStanislav Fomichev union bpf_attr __user *uattr)
29726332be04SDaniel Borkmann {
29737c32e8f8SLorenz Bauer return -ENOTSUPP;
29747c32e8f8SLorenz Bauer }
29757c32e8f8SLorenz Bauer
bpf_prog_test_run_flow_dissector(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)29767c32e8f8SLorenz Bauer static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
29777c32e8f8SLorenz Bauer const union bpf_attr *kattr,
29787c32e8f8SLorenz Bauer union bpf_attr __user *uattr)
29797c32e8f8SLorenz Bauer {
29806332be04SDaniel Borkmann return -ENOTSUPP;
29816332be04SDaniel Borkmann }
29826332be04SDaniel Borkmann
bpf_prog_test_run_sk_lookup(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)29837e6897f9SBjörn Töpel static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
29847e6897f9SBjörn Töpel const union bpf_attr *kattr,
29857e6897f9SBjörn Töpel union bpf_attr __user *uattr)
29867e6897f9SBjörn Töpel {
29877e6897f9SBjörn Töpel return -ENOTSUPP;
29886890896bSStanislav Fomichev }
2989d4f7bdb2SDaniel Xu
bpf_map_put(struct bpf_map * map)29906728aea7SKumar Kartikeya Dwivedi static inline void bpf_map_put(struct bpf_map *map)
29916728aea7SKumar Kartikeya Dwivedi {
299263260df1SAlexei Starovoitov }
299363260df1SAlexei Starovoitov
bpf_prog_by_id(u32 id)2994d4f7bdb2SDaniel Xu static inline struct bpf_prog *bpf_prog_by_id(u32 id)
2995d4f7bdb2SDaniel Xu {
2996d4f7bdb2SDaniel Xu return ERR_PTR(-ENOTSUPP);
2997d4f7bdb2SDaniel Xu }
29986890896bSStanislav Fomichev
btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size,enum bpf_access_type atype,u32 * next_btf_id,enum bpf_type_flag * flag,const char ** field_name)2999bbc1d247SAndrii Nakryiko static inline int btf_struct_access(struct bpf_verifier_log *log,
30006890896bSStanislav Fomichev const struct bpf_reg_state *reg,
30016890896bSStanislav Fomichev int off, int size, enum bpf_access_type atype,
30026890896bSStanislav Fomichev u32 *next_btf_id, enum bpf_type_flag *flag,
3003a10787e6SSong Liu const char **field_name)
3004a10787e6SSong Liu {
3005a10787e6SSong Liu return -EACCES;
3006a10787e6SSong Liu }
3007e6ac2450SMartin KaFai Lau
3008e6ac2450SMartin KaFai Lau static inline const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)3009e6ac2450SMartin KaFai Lau bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3010e6ac2450SMartin KaFai Lau {
3011e6ac2450SMartin KaFai Lau return NULL;
3012e6ac2450SMartin KaFai Lau }
3013e6ac2450SMartin KaFai Lau
bpf_task_storage_free(struct task_struct * task)3014e6ac2450SMartin KaFai Lau static inline void bpf_task_storage_free(struct task_struct *task)
3015e6ac2450SMartin KaFai Lau {
3016e6ac2450SMartin KaFai Lau }
3017e6ac2450SMartin KaFai Lau
bpf_prog_has_kfunc_call(const struct bpf_prog * prog)3018e6ac2450SMartin KaFai Lau static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog)
301944a3918cSJosh Poimboeuf {
30201cf3bfc6SIlya Leoshkevich return false;
30211cf3bfc6SIlya Leoshkevich }
30221cf3bfc6SIlya Leoshkevich
30231cf3bfc6SIlya Leoshkevich static inline const struct btf_func_model *
bpf_jit_find_kfunc_model(const struct bpf_prog * prog,const struct bpf_insn * insn)30241cf3bfc6SIlya Leoshkevich bpf_jit_find_kfunc_model(const struct bpf_prog *prog,
30251cf3bfc6SIlya Leoshkevich const struct bpf_insn *insn)
30261cf3bfc6SIlya Leoshkevich {
302744a3918cSJosh Poimboeuf return NULL;
302844a3918cSJosh Poimboeuf }
302944a3918cSJosh Poimboeuf
303044a3918cSJosh Poimboeuf static inline int
bpf_get_kfunc_addr(const struct bpf_prog * prog,u32 func_id,u16 btf_fd_idx,u8 ** func_addr)303144a3918cSJosh Poimboeuf bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id,
303224426654SMartin KaFai Lau u16 btf_fd_idx, u8 **func_addr)
303324426654SMartin KaFai Lau {
303424426654SMartin KaFai Lau return -ENOTSUPP;
303524426654SMartin KaFai Lau }
303605b24ff9SJiri Olsa
unprivileged_ebpf_enabled(void)303705b24ff9SJiri Olsa static inline bool unprivileged_ebpf_enabled(void)
303805b24ff9SJiri Olsa {
303905b24ff9SJiri Olsa return false;
3040c4bcfb38SYonghong Song }
3041c4bcfb38SYonghong Song
has_current_bpf_ctx(void)3042c4bcfb38SYonghong Song static inline bool has_current_bpf_ctx(void)
3043c4bcfb38SYonghong Song {
30448357b366SJoanne Koong return false;
30458357b366SJoanne Koong }
30468357b366SJoanne Koong
bpf_prog_inc_misses_counter(struct bpf_prog * prog)30478357b366SJoanne Koong static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
30488357b366SJoanne Koong {
30498357b366SJoanne Koong }
30508357b366SJoanne Koong
bpf_cgrp_storage_free(struct cgroup * cgroup)30518357b366SJoanne Koong static inline void bpf_cgrp_storage_free(struct cgroup *cgroup)
30528357b366SJoanne Koong {
30538357b366SJoanne Koong }
30548357b366SJoanne Koong
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)30558357b366SJoanne Koong static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
30568357b366SJoanne Koong enum bpf_dynptr_type type, u32 offset, u32 size)
305761e021f3SDaniel Borkmann {
305809756af4SAlexei Starovoitov }
30596a5a148aSArnd Bergmann
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)30606a5a148aSArnd Bergmann static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
30616a5a148aSArnd Bergmann {
30626a5a148aSArnd Bergmann }
30636a5a148aSArnd Bergmann
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)30646a5a148aSArnd Bergmann static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
30656a5a148aSArnd Bergmann {
30666a5a148aSArnd Bergmann }
30676a5a148aSArnd Bergmann #endif /* CONFIG_BPF_SYSCALL */
30686a5a148aSArnd Bergmann
30696a5a148aSArnd Bergmann static __always_inline int
bpf_probe_read_kernel_common(void * dst,u32 size,const void * unsafe_ptr)30706a5a148aSArnd Bergmann bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
3071ab224b9eSRafael Passos {
3072541c3badSAndrii Nakryiko int ret = -EFAULT;
3073479321e9SJakub Kicinski
3074479321e9SJakub Kicinski if (IS_ENABLED(CONFIG_BPF_EVENTS))
3075479321e9SJakub Kicinski ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
3076479321e9SJakub Kicinski if (unlikely(ret < 0))
3077479321e9SJakub Kicinski memset(dst, 0, size);
3078479321e9SJakub Kicinski return ret;
3079936f8946SAndrii Nakryiko }
3080936f8946SAndrii Nakryiko
3081936f8946SAndrii Nakryiko void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len);
3082040ee692SAl Viro
bpf_prog_get_type(u32 ufd,enum bpf_prog_type type)3083040ee692SAl Viro static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
3084ab3f0063SJakub Kicinski enum bpf_prog_type type)
30852b3486bcSStanislav Fomichev {
3086675fc275SJakub Kicinski return bpf_prog_get_type_dev(ufd, type, false);
3087675fc275SJakub Kicinski }
3088ab3f0063SJakub Kicinski
308952775b33SJakub Kicinski void __bpf_free_used_maps(struct bpf_prog_aux *aux,
309052775b33SJakub Kicinski struct bpf_map **used_maps, u32 len);
3091a3884572SJakub Kicinski
3092a3884572SJakub Kicinski bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
3093a3884572SJakub Kicinski
3094a3884572SJakub Kicinski int bpf_prog_offload_compile(struct bpf_prog *prog);
3095a3884572SJakub Kicinski void bpf_prog_dev_bound_destroy(struct bpf_prog *prog);
3096a3884572SJakub Kicinski int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
3097a3884572SJakub Kicinski struct bpf_prog *prog);
309809728266SJakub Kicinski
3099a3884572SJakub Kicinski int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
31001385d755SQuentin Monnet
3101dd27c2e3SJakub Kicinski int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
3102602144c2SJakub Kicinski int bpf_map_offload_update_elem(struct bpf_map *map,
3103dd27c2e3SJakub Kicinski void *key, void *value, u64 flags);
3104602144c2SJakub Kicinski int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
3105602144c2SJakub Kicinski int bpf_map_offload_get_next_key(struct bpf_map *map,
3106602144c2SJakub Kicinski void *key, void *next_key);
3107602144c2SJakub Kicinski
3108fd4f227dSJakub Kicinski bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
31099fd7c555SJakub Kicinski
31102147c438SJosh Poimboeuf struct bpf_offload_dev *
31112147c438SJosh Poimboeuf bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
3112ab3f0063SJakub Kicinski void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
31133d76a4d3SStanislav Fomichev void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
31143d76a4d3SStanislav Fomichev int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
31153d76a4d3SStanislav Fomichev struct net_device *netdev);
31162b3486bcSStanislav Fomichev void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
3117fd7c211dSToke Høiland-Jørgensen struct net_device *netdev);
31182b3486bcSStanislav Fomichev bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
3119ab3f0063SJakub Kicinski
31200d830032SJakub Kicinski void unpriv_ebpf_notify(int new_state);
3121ab3f0063SJakub Kicinski
31222b3486bcSStanislav Fomichev #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
31232b3486bcSStanislav Fomichev int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
3124ab3f0063SJakub Kicinski struct bpf_prog_aux *prog_aux);
31259d03ebc7SStanislav Fomichev void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id);
3126ab3f0063SJakub Kicinski int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr);
31279a18eedbSJakub Kicinski int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog);
3128ab3f0063SJakub Kicinski void bpf_dev_bound_netdev_unregister(struct net_device *dev);
3129a3884572SJakub Kicinski
bpf_prog_is_dev_bound(const struct bpf_prog_aux * aux)3130fd7c211dSToke Høiland-Jørgensen static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
3131fd7c211dSToke Høiland-Jørgensen {
31329d03ebc7SStanislav Fomichev return aux->dev_bound;
3133a3884572SJakub Kicinski }
3134a3884572SJakub Kicinski
bpf_prog_is_offloaded(const struct bpf_prog_aux * aux)3135a3884572SJakub Kicinski static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
3136a3884572SJakub Kicinski {
3137a3884572SJakub Kicinski return aux->offload_requested;
3138a3884572SJakub Kicinski }
31399629363cSYafang Shao
314079a7f8bdSAlexei Starovoitov bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs);
314179a7f8bdSAlexei Starovoitov
bpf_map_is_offloaded(struct bpf_map * map)314279a7f8bdSAlexei Starovoitov static inline bool bpf_map_is_offloaded(struct bpf_map *map)
314317edea21SCong Wang {
314417edea21SCong Wang return unlikely(map->ops == &bpf_map_offload_ops);
314517edea21SCong Wang }
314617edea21SCong Wang
3147748cd572SDi Zhu struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
3148748cd572SDi Zhu void bpf_map_offload_map_free(struct bpf_map *map);
3149699c23f0SYonghong Song u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
3150748cd572SDi Zhu int bpf_prog_test_run_syscall(struct bpf_prog *prog,
315117edea21SCong Wang const union bpf_attr *kattr,
3152d8616ee2SWang Yufen union bpf_attr __user *uattr);
315317edea21SCong Wang
3154ab3f0063SJakub Kicinski int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
31553d76a4d3SStanislav Fomichev int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
31563d76a4d3SStanislav Fomichev int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
31573d76a4d3SStanislav Fomichev int sock_map_bpf_prog_query(const union bpf_attr *attr,
31583d76a4d3SStanislav Fomichev union bpf_attr __user *uattr);
31593d76a4d3SStanislav Fomichev int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog);
31603d76a4d3SStanislav Fomichev
31613d76a4d3SStanislav Fomichev void sock_map_unhash(struct sock *sk);
31623d76a4d3SStanislav Fomichev void sock_map_destroy(struct sock *sk);
31633d76a4d3SStanislav Fomichev void sock_map_close(struct sock *sk, long timeout);
31643d76a4d3SStanislav Fomichev #else
bpf_dev_bound_kfunc_check(struct bpf_verifier_log * log,struct bpf_prog_aux * prog_aux)31653d76a4d3SStanislav Fomichev static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log,
31663d76a4d3SStanislav Fomichev struct bpf_prog_aux *prog_aux)
31672b3486bcSStanislav Fomichev {
3168ab3f0063SJakub Kicinski return -EOPNOTSUPP;
3169ab3f0063SJakub Kicinski }
3170ab3f0063SJakub Kicinski
bpf_dev_bound_resolve_kfunc(struct bpf_prog * prog,u32 func_id)3171ab3f0063SJakub Kicinski static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog,
3172ab3f0063SJakub Kicinski u32 func_id)
3173fd7c211dSToke Høiland-Jørgensen {
3174fd7c211dSToke Høiland-Jørgensen return NULL;
3175fd7c211dSToke Høiland-Jørgensen }
3176fd7c211dSToke Høiland-Jørgensen
bpf_prog_dev_bound_init(struct bpf_prog * prog,union bpf_attr * attr)3177fd7c211dSToke Høiland-Jørgensen static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog,
3178fd7c211dSToke Høiland-Jørgensen union bpf_attr *attr)
31792b3486bcSStanislav Fomichev {
31802b3486bcSStanislav Fomichev return -EOPNOTSUPP;
31812b3486bcSStanislav Fomichev }
31822b3486bcSStanislav Fomichev
bpf_prog_dev_bound_inherit(struct bpf_prog * new_prog,struct bpf_prog * old_prog)31832b3486bcSStanislav Fomichev static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog,
3184ab3f0063SJakub Kicinski struct bpf_prog *old_prog)
3185ab3f0063SJakub Kicinski {
3186ab3f0063SJakub Kicinski return -EOPNOTSUPP;
3187a3884572SJakub Kicinski }
31889d03ebc7SStanislav Fomichev
bpf_dev_bound_netdev_unregister(struct net_device * dev)3189ab3f0063SJakub Kicinski static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev)
3190ab3f0063SJakub Kicinski {
3191ab3f0063SJakub Kicinski }
3192a3884572SJakub Kicinski
bpf_prog_is_dev_bound(const struct bpf_prog_aux * aux)3193fd7c211dSToke Høiland-Jørgensen static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
3194fd7c211dSToke Høiland-Jørgensen {
3195fd7c211dSToke Høiland-Jørgensen return false;
3196fd7c211dSToke Høiland-Jørgensen }
3197fd7c211dSToke Høiland-Jørgensen
bpf_prog_is_offloaded(struct bpf_prog_aux * aux)31989d03ebc7SStanislav Fomichev static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
3199a3884572SJakub Kicinski {
3200a3884572SJakub Kicinski return false;
3201a3884572SJakub Kicinski }
3202a3884572SJakub Kicinski
bpf_prog_dev_bound_match(const struct bpf_prog * lhs,const struct bpf_prog * rhs)3203a3884572SJakub Kicinski static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs)
3204a3884572SJakub Kicinski {
3205a3884572SJakub Kicinski return false;
3206a3884572SJakub Kicinski }
3207a3884572SJakub Kicinski
bpf_map_is_offloaded(struct bpf_map * map)3208a3884572SJakub Kicinski static inline bool bpf_map_is_offloaded(struct bpf_map *map)
3209a3884572SJakub Kicinski {
3210a3884572SJakub Kicinski return false;
321179a7f8bdSAlexei Starovoitov }
32129629363cSYafang Shao
bpf_map_offload_map_alloc(union bpf_attr * attr)32139629363cSYafang Shao static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
32149629363cSYafang Shao {
32159629363cSYafang Shao return ERR_PTR(-EOPNOTSUPP);
32169629363cSYafang Shao }
321779a7f8bdSAlexei Starovoitov
bpf_map_offload_map_free(struct bpf_map * map)321879a7f8bdSAlexei Starovoitov static inline void bpf_map_offload_map_free(struct bpf_map *map)
321979a7f8bdSAlexei Starovoitov {
322079a7f8bdSAlexei Starovoitov }
322179a7f8bdSAlexei Starovoitov
bpf_map_offload_map_mem_usage(const struct bpf_map * map)322279a7f8bdSAlexei Starovoitov static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
3223fdb5c453SSean Young {
322488759609SCong Wang return 0;
3225604326b4SDaniel Borkmann }
3226fdb5c453SSean Young
bpf_prog_test_run_syscall(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)3227fdb5c453SSean Young static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
3228fdb5c453SSean Young const union bpf_attr *kattr,
3229fdb5c453SSean Young union bpf_attr __user *uattr)
3230bb0de313SLorenz Bauer {
3231bb0de313SLorenz Bauer return -ENOTSUPP;
3232bb0de313SLorenz Bauer }
3233bb0de313SLorenz Bauer
3234bb0de313SLorenz Bauer #ifdef CONFIG_BPF_SYSCALL
sock_map_get_from_fd(const union bpf_attr * attr,struct bpf_prog * prog)3235bb0de313SLorenz Bauer static inline int sock_map_get_from_fd(const union bpf_attr *attr,
323613b79d3fSLorenz Bauer struct bpf_prog *prog)
323713b79d3fSLorenz Bauer {
323813b79d3fSLorenz Bauer return -EINVAL;
323913b79d3fSLorenz Bauer }
324013b79d3fSLorenz Bauer
sock_map_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)324113b79d3fSLorenz Bauer static inline int sock_map_prog_detach(const union bpf_attr *attr,
3242748cd572SDi Zhu enum bpf_prog_type ptype)
3243748cd572SDi Zhu {
3244748cd572SDi Zhu return -EOPNOTSUPP;
3245748cd572SDi Zhu }
3246748cd572SDi Zhu
sock_map_update_elem_sys(struct bpf_map * map,void * key,void * value,u64 flags)3247748cd572SDi Zhu static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
3248699c23f0SYonghong Song u64 flags)
3249699c23f0SYonghong Song {
3250699c23f0SYonghong Song return -EOPNOTSUPP;
3251699c23f0SYonghong Song }
3252699c23f0SYonghong Song
sock_map_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)325317edea21SCong Wang static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
325417edea21SCong Wang union bpf_attr __user *uattr)
32556bdc9c4cSJohn Fastabend {
3256dd865789SJiri Olsa return -EINVAL;
3257dd865789SJiri Olsa }
3258dd865789SJiri Olsa
sock_map_link_create(const union bpf_attr * attr,struct bpf_prog * prog)3259dd865789SJiri Olsa static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog)
3260dd865789SJiri Olsa {
3261dd865789SJiri Olsa return -EOPNOTSUPP;
3262dd865789SJiri Olsa }
3263dd865789SJiri Olsa #endif /* CONFIG_BPF_SYSCALL */
3264dd865789SJiri Olsa #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
3265dd865789SJiri Olsa
3266dd865789SJiri Olsa static __always_inline void
bpf_prog_inc_misses_counters(const struct bpf_prog_array * array)3267dd865789SJiri Olsa bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
3268dd865789SJiri Olsa {
3269dd865789SJiri Olsa const struct bpf_prog_array_item *item;
3270dd865789SJiri Olsa struct bpf_prog *prog;
3271dd865789SJiri Olsa
327217edea21SCong Wang if (unlikely(!array))
327317edea21SCong Wang return;
327417edea21SCong Wang
327517edea21SCong Wang item = &array->items[0];
327617edea21SCong Wang while ((prog = READ_ONCE(item->prog))) {
327717edea21SCong Wang bpf_prog_inc_misses_counter(prog);
327817edea21SCong Wang item++;
327917edea21SCong Wang }
328017edea21SCong Wang }
328117edea21SCong Wang
328217edea21SCong Wang #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
328317edea21SCong Wang void bpf_sk_reuseport_detach(struct sock *sk);
32845dc4c4b7SMartin KaFai Lau int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
32855dc4c4b7SMartin KaFai Lau void *value);
32865dc4c4b7SMartin KaFai Lau int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
32875dc4c4b7SMartin KaFai Lau void *value, u64 map_flags);
32885dc4c4b7SMartin KaFai Lau #else
bpf_sk_reuseport_detach(struct sock * sk)32895dc4c4b7SMartin KaFai Lau static inline void bpf_sk_reuseport_detach(struct sock *sk)
32905dc4c4b7SMartin KaFai Lau {
32915dc4c4b7SMartin KaFai Lau }
32925dc4c4b7SMartin KaFai Lau
32935dc4c4b7SMartin KaFai Lau #ifdef CONFIG_BPF_SYSCALL
bpf_fd_reuseport_array_lookup_elem(struct bpf_map * map,void * key,void * value)32945dc4c4b7SMartin KaFai Lau static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
32955dc4c4b7SMartin KaFai Lau void *key, void *value)
32965dc4c4b7SMartin KaFai Lau {
32975dc4c4b7SMartin KaFai Lau return -EOPNOTSUPP;
32985dc4c4b7SMartin KaFai Lau }
3299d0003ec0SAlexei Starovoitov
bpf_fd_reuseport_array_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)3300a2c83fffSDaniel Borkmann static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
3301a2c83fffSDaniel Borkmann void *key, void *value,
3302a2c83fffSDaniel Borkmann u64 map_flags)
3303f1a2e44aSMauricio Vasquez B {
3304f1a2e44aSMauricio Vasquez B return -EOPNOTSUPP;
3305f1a2e44aSMauricio Vasquez B }
330607343110SFeng Zhou #endif /* CONFIG_BPF_SYSCALL */
3307d0003ec0SAlexei Starovoitov #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
330803e69b50SDaniel Borkmann
3309c04167ceSDaniel Borkmann /* verifier prototypes for helper functions called from eBPF programs */
33102d0e30c3SDaniel Borkmann extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
331104fd61abSAlexei Starovoitov extern const struct bpf_func_proto bpf_map_update_elem_proto;
331217ca8cbfSDaniel Borkmann extern const struct bpf_func_proto bpf_map_delete_elem_proto;
331371d19214SMaciej Żenczykowski extern const struct bpf_func_proto bpf_map_push_elem_proto;
3314c8996c98SJesper Dangaard Brouer extern const struct bpf_func_proto bpf_map_pop_elem_proto;
3315ffeedafbSAlexei Starovoitov extern const struct bpf_func_proto bpf_map_peek_elem_proto;
3316ffeedafbSAlexei Starovoitov extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto;
3317ffeedafbSAlexei Starovoitov
3318d5a3b1f6SAlexei Starovoitov extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
3319c195651eSYonghong Song extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
3320d4dd9775SAndrii Nakryiko extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
3321fa28dcb8SSong Liu extern const struct bpf_func_proto bpf_tail_call_proto;
3322d4dd9775SAndrii Nakryiko extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
33237b04d6d6SSong Liu extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
33247b04d6d6SSong Liu extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto;
3325174a79ffSJohn Fastabend extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
332681110384SJohn Fastabend extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
3327bf6fa2c8SYonghong Song extern const struct bpf_func_proto bpf_get_current_comm_proto;
33280f09abd1SDaniel Borkmann extern const struct bpf_func_proto bpf_get_stackid_proto;
3329bed89185SStanislav Fomichev extern const struct bpf_func_proto bpf_get_stack_proto;
33307f628741SMatteo Croce extern const struct bpf_func_proto bpf_get_stack_sleepable_proto;
3331604326b4SDaniel Borkmann extern const struct bpf_func_proto bpf_get_task_stack_proto;
3332604326b4SDaniel Borkmann extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto;
3333604326b4SDaniel Borkmann extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
3334604326b4SDaniel Borkmann extern const struct bpf_func_proto bpf_get_stack_proto_pe;
3335d83525caSAlexei Starovoitov extern const struct bpf_func_proto bpf_sock_map_update_proto;
3336d83525caSAlexei Starovoitov extern const struct bpf_func_proto bpf_sock_hash_update_proto;
3337cd339431SRoman Gushchin extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
3338d7a4cb9bSAndrey Ignatov extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
3339d7a4cb9bSAndrey Ignatov extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
33400d01da6aSStanislav Fomichev extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
33415576b991SMartin KaFai Lau extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
3342b4490c5cSCarlos Neira extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
33430456ea17SStanislav Fomichev extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
3344457f4436SAndrii Nakryiko extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
3345457f4436SAndrii Nakryiko extern const struct bpf_func_proto bpf_spin_lock_proto;
3346457f4436SAndrii Nakryiko extern const struct bpf_func_proto bpf_spin_unlock_proto;
3347457f4436SAndrii Nakryiko extern const struct bpf_func_proto bpf_get_local_storage_proto;
3348457f4436SAndrii Nakryiko extern const struct bpf_func_proto bpf_strtol_proto;
3349bc34dee6SJoanne Koong extern const struct bpf_func_proto bpf_strtoul_proto;
3350bc34dee6SJoanne Koong extern const struct bpf_func_proto bpf_tcp_sock_proto;
3351bc34dee6SJoanne Koong extern const struct bpf_func_proto bpf_jiffies64_proto;
3352af7ec138SYonghong Song extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
3353478cfbdfSYonghong Song extern const struct bpf_func_proto bpf_event_output_data_proto;
3354478cfbdfSYonghong Song extern const struct bpf_func_proto bpf_ringbuf_output_proto;
3355478cfbdfSYonghong Song extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
33560d4fad3eSYonghong Song extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
33579eeb3aa3SHengqi Chen extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
33583bc253c2SGeliang Tang extern const struct bpf_func_proto bpf_ringbuf_query_proto;
335907be4c4aSAlexei Starovoitov extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
3360c4d0bfb4SAlan Maguire extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
33617b15523aSFlorent Revest extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
3362eaa6bcb7SHao Luo extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
336363d9b80dSHao Luo extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
3364d0551261SDmitrii Banshchikov extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
3365b60da495SFlorent Revest extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
3366c5dbb89fSFlorent Revest extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
33670593dd34SMartin KaFai Lau extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto;
3368a10787e6SSong Liu extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto;
33690593dd34SMartin KaFai Lau extern const struct bpf_func_proto bpf_copy_from_user_proto;
3370a10787e6SSong Liu extern const struct bpf_func_proto bpf_snprintf_btf_proto;
337169c087baSYonghong Song extern const struct bpf_func_proto bpf_snprintf_proto;
33723d78417bSAlexei Starovoitov extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
33733cee6fb8SMartin KaFai Lau extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
33743cee6fb8SMartin KaFai Lau extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
33759113d7e4SStanislav Fomichev extern const struct bpf_func_proto bpf_sock_from_file_proto;
33769113d7e4SStanislav Fomichev extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
33777c7e3d31SSong Liu extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
3378e6f2dd0fSJoanne Koong extern const struct bpf_func_proto bpf_task_storage_get_proto;
3379376040e4SKenny Yu extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
338069fd337aSStanislav Fomichev extern const struct bpf_func_proto bpf_task_storage_delete_proto;
338169fd337aSStanislav Fomichev extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
338220571567SDavid Vernet extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
3383c4bcfb38SYonghong Song extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
3384c4bcfb38SYonghong Song extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
3385cd339431SRoman Gushchin extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto;
3386958a3f2dSJiri Olsa extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto;
3387958a3f2dSJiri Olsa extern const struct bpf_func_proto bpf_find_vma_proto;
3388958a3f2dSJiri Olsa extern const struct bpf_func_proto bpf_loop_proto;
33893ad00405SDaniel Borkmann extern const struct bpf_func_proto bpf_copy_from_user_task_proto;
33903ad00405SDaniel Borkmann extern const struct bpf_func_proto bpf_set_retval_proto;
33913ad00405SDaniel Borkmann extern const struct bpf_func_proto bpf_get_retval_proto;
33926890896bSStanislav Fomichev extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
33933ad00405SDaniel Borkmann extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
3394c64b7983SJoe Stringer extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
339546f8bc92SMartin KaFai Lau
339646f8bc92SMartin KaFai Lau const struct bpf_func_proto *tracing_prog_func_proto(
339746f8bc92SMartin KaFai Lau enum bpf_func_id func_id, const struct bpf_prog *prog);
3398c64b7983SJoe Stringer
3399c64b7983SJoe Stringer /* Shared helpers among cBPF and eBPF. */
3400c64b7983SJoe Stringer void bpf_user_rnd_init_once(void);
3401c64b7983SJoe Stringer u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3402c64b7983SJoe Stringer u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
3403c64b7983SJoe Stringer
3404c64b7983SJoe Stringer #if defined(CONFIG_NET)
3405cce4c40bSDaniel Xu bool bpf_sock_common_is_valid_access(int off, int size,
3406cce4c40bSDaniel Xu enum bpf_access_type type,
3407c64b7983SJoe Stringer struct bpf_insn_access_aux *info);
340846f8bc92SMartin KaFai Lau bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
340946f8bc92SMartin KaFai Lau struct bpf_insn_access_aux *info);
341046f8bc92SMartin KaFai Lau u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
341146f8bc92SMartin KaFai Lau const struct bpf_insn *si,
341246f8bc92SMartin KaFai Lau struct bpf_insn *insn_buf,
341346f8bc92SMartin KaFai Lau struct bpf_prog *prog,
3414c64b7983SJoe Stringer u32 *target_size);
3415c64b7983SJoe Stringer int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
3416c64b7983SJoe Stringer struct bpf_dynptr *ptr);
3417c64b7983SJoe Stringer #else
bpf_sock_common_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3418c64b7983SJoe Stringer static inline bool bpf_sock_common_is_valid_access(int off, int size,
3419c64b7983SJoe Stringer enum bpf_access_type type,
3420c64b7983SJoe Stringer struct bpf_insn_access_aux *info)
3421c64b7983SJoe Stringer {
3422c64b7983SJoe Stringer return false;
3423c64b7983SJoe Stringer }
bpf_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3424c64b7983SJoe Stringer static inline bool bpf_sock_is_valid_access(int off, int size,
3425c64b7983SJoe Stringer enum bpf_access_type type,
3426c64b7983SJoe Stringer struct bpf_insn_access_aux *info)
3427c64b7983SJoe Stringer {
3428cce4c40bSDaniel Xu return false;
3429cce4c40bSDaniel Xu }
bpf_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)3430b5964b96SJoanne Koong static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
3431b5964b96SJoanne Koong const struct bpf_insn *si,
3432b5964b96SJoanne Koong struct bpf_insn *insn_buf,
3433c64b7983SJoe Stringer struct bpf_prog *prog,
3434c64b7983SJoe Stringer u32 *target_size)
3435655a51e5SMartin KaFai Lau {
343691cc1a99SAlexei Starovoitov return 0;
343791cc1a99SAlexei Starovoitov }
bpf_dynptr_from_skb_rdonly(struct __sk_buff * skb,u64 flags,struct bpf_dynptr * ptr)343891cc1a99SAlexei Starovoitov static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags,
343991cc1a99SAlexei Starovoitov struct bpf_dynptr *ptr)
3440d5e4ddaeSKuniyuki Iwashima {
344191cc1a99SAlexei Starovoitov return -EOPNOTSUPP;
344291cc1a99SAlexei Starovoitov }
344391cc1a99SAlexei Starovoitov #endif
344491cc1a99SAlexei Starovoitov
344591cc1a99SAlexei Starovoitov #ifdef CONFIG_INET
3446655a51e5SMartin KaFai Lau struct sk_reuseport_kern {
3447655a51e5SMartin KaFai Lau struct sk_buff *skb;
3448655a51e5SMartin KaFai Lau struct sock *sk;
3449655a51e5SMartin KaFai Lau struct sock *selected_sk;
3450655a51e5SMartin KaFai Lau struct sock *migrating_sk;
3451655a51e5SMartin KaFai Lau void *data_end;
3452655a51e5SMartin KaFai Lau u32 hash;
3453655a51e5SMartin KaFai Lau u32 reuseport_id;
34547f94208cSYueHaibing bool bind_inany;
34557f94208cSYueHaibing };
34567f94208cSYueHaibing bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
34577f94208cSYueHaibing struct bpf_insn_access_aux *info);
34587f94208cSYueHaibing
34597f94208cSYueHaibing u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
34607f94208cSYueHaibing const struct bpf_insn *si,
34617f94208cSYueHaibing struct bpf_insn *insn_buf,
34627f94208cSYueHaibing struct bpf_prog *prog,
3463655a51e5SMartin KaFai Lau u32 *target_size);
3464655a51e5SMartin KaFai Lau
3465655a51e5SMartin KaFai Lau bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
3466655a51e5SMartin KaFai Lau struct bpf_insn_access_aux *info);
3467655a51e5SMartin KaFai Lau
3468655a51e5SMartin KaFai Lau u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3469655a51e5SMartin KaFai Lau const struct bpf_insn *si,
3470655a51e5SMartin KaFai Lau struct bpf_insn *insn_buf,
3471655a51e5SMartin KaFai Lau struct bpf_prog *prog,
3472655a51e5SMartin KaFai Lau u32 *target_size);
3473655a51e5SMartin KaFai Lau #else
bpf_tcp_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)3474655a51e5SMartin KaFai Lau static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
3475655a51e5SMartin KaFai Lau enum bpf_access_type type,
3476655a51e5SMartin KaFai Lau struct bpf_insn_access_aux *info)
3477655a51e5SMartin KaFai Lau {
3478655a51e5SMartin KaFai Lau return false;
34797f94208cSYueHaibing }
34807f94208cSYueHaibing
bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)34817f94208cSYueHaibing static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
34827f94208cSYueHaibing const struct bpf_insn *si,
34837f94208cSYueHaibing struct bpf_insn *insn_buf,
34847f94208cSYueHaibing struct bpf_prog *prog,
34857f94208cSYueHaibing u32 *target_size)
34867f94208cSYueHaibing {
34877f94208cSYueHaibing return 0;
34887f94208cSYueHaibing }
bpf_xdp_sock_is_valid_access(int off,int size,enum bpf_access_type type,struct bpf_insn_access_aux * info)34897f94208cSYueHaibing static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
34907f94208cSYueHaibing enum bpf_access_type type,
34917f94208cSYueHaibing struct bpf_insn_access_aux *info)
34927f94208cSYueHaibing {
34937f94208cSYueHaibing return false;
3494655a51e5SMartin KaFai Lau }
3495655a51e5SMartin KaFai Lau
bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,const struct bpf_insn * si,struct bpf_insn * insn_buf,struct bpf_prog * prog,u32 * target_size)34965964b200SAlexei Starovoitov static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
3497b553a6ecSDaniel Borkmann const struct bpf_insn *si,
3498b553a6ecSDaniel Borkmann struct bpf_insn *insn_buf,
34995964b200SAlexei Starovoitov struct bpf_prog *prog,
35004b3da77bSDaniel Borkmann u32 *target_size)
35015964b200SAlexei Starovoitov {
35025964b200SAlexei Starovoitov return 0;
35035964b200SAlexei Starovoitov }
35044b7de801SJiri Olsa #endif /* CONFIG_INET */
35054b7de801SJiri Olsa
35064b7de801SJiri Olsa enum bpf_text_poke_type {
3507ebc1415dSSong Liu BPF_MOD_CALL,
3508fe736565SSong Liu BPF_MOD_JUMP,
3509ebc1415dSSong Liu };
3510eae2e83eSJiri Olsa
35112af30f11SLorenz Bauer int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
3512eae2e83eSJiri Olsa void *addr1, void *addr2);
3513335ff499SDave Marchevsky
3514e2bb9e01SJiri Olsa void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
3515335ff499SDave Marchevsky struct bpf_prog *new, struct bpf_prog *old);
351678aa1cc9SJiri Olsa
351778aa1cc9SJiri Olsa void *bpf_arch_text_copy(void *dst, void *src, size_t len);
3518e2bb9e01SJiri Olsa int bpf_arch_text_invalidate(void *dst, size_t len);
351978aa1cc9SJiri Olsa
3520e2bb9e01SJiri Olsa struct btf_id_set;
352178aa1cc9SJiri Olsa bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
352278aa1cc9SJiri Olsa
352348cac3f4SFlorent Revest #define MAX_BPRINTF_VARARGS 12
352478aa1cc9SJiri Olsa #define MAX_BPRINTF_BUF 1024
3525f19a4050SJiri Olsa
3526d9c9e4dbSFlorent Revest struct bpf_bprintf_data {
3527c0e19f2cSStanislav Fomichev u32 *bin_args;
3528c0e19f2cSStanislav Fomichev char *buf;
3529c0e19f2cSStanislav Fomichev bool get_bin_args;
3530c0e19f2cSStanislav Fomichev bool get_buf;
3531c0e19f2cSStanislav Fomichev };
3532c0e19f2cSStanislav Fomichev
3533c0e19f2cSStanislav Fomichev int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
3534c0e19f2cSStanislav Fomichev u32 num_args, struct bpf_bprintf_data *data);
3535f3cf4134SRoberto Sassu void bpf_bprintf_cleanup(struct bpf_bprintf_data *data);
3536f3cf4134SRoberto Sassu
3537f3cf4134SRoberto Sassu #ifdef CONFIG_BPF_LSM
3538f3cf4134SRoberto Sassu void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype);
3539f3cf4134SRoberto Sassu void bpf_cgroup_atype_put(int cgroup_atype);
3540f3cf4134SRoberto Sassu #else
bpf_cgroup_atype_get(u32 attach_btf_id,int cgroup_atype)3541f3cf4134SRoberto Sassu static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {}
bpf_cgroup_atype_put(int cgroup_atype)3542f3cf4134SRoberto Sassu static inline void bpf_cgroup_atype_put(int cgroup_atype) {}
3543282de143SKumar Kartikeya Dwivedi #endif /* CONFIG_BPF_LSM */
3544282de143SKumar Kartikeya Dwivedi
3545282de143SKumar Kartikeya Dwivedi struct key;
3546282de143SKumar Kartikeya Dwivedi
3547282de143SKumar Kartikeya Dwivedi #ifdef CONFIG_KEYS
3548282de143SKumar Kartikeya Dwivedi struct bpf_key {
3549ee53cbfbSYafang Shao struct key *key;
3550ee53cbfbSYafang Shao bool has_ref;
3551ee53cbfbSYafang Shao };
3552ee53cbfbSYafang Shao #endif /* CONFIG_KEYS */
3553ee53cbfbSYafang Shao
type_is_alloc(u32 type)3554ee53cbfbSYafang Shao static inline bool type_is_alloc(u32 type)
3555ee53cbfbSYafang Shao {
35569af27da6SKumar Kartikeya Dwivedi return type & MEM_ALLOC;
35579af27da6SKumar Kartikeya Dwivedi }
35589af27da6SKumar Kartikeya Dwivedi
bpf_memcg_flags(gfp_t flags)35599af27da6SKumar Kartikeya Dwivedi static inline gfp_t bpf_memcg_flags(gfp_t flags)
35609af27da6SKumar Kartikeya Dwivedi {
356199c55f7dSAlexei Starovoitov if (memcg_bpf_enabled())
3562 return flags | __GFP_ACCOUNT;
3563 return flags;
3564 }
3565
bpf_is_subprog(const struct bpf_prog * prog)3566 static inline bool bpf_is_subprog(const struct bpf_prog *prog)
3567 {
3568 return prog->aux->func_idx != 0;
3569 }
3570
3571 #endif /* _LINUX_BPF_H */
3572