xref: /linux-6.15/kernel/bpf/core.c (revision ae0a457f)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f5bffecdSAlexei Starovoitov /*
3f5bffecdSAlexei Starovoitov  * Linux Socket Filter - Kernel level socket filtering
4f5bffecdSAlexei Starovoitov  *
5f5bffecdSAlexei Starovoitov  * Based on the design of the Berkeley Packet Filter. The new
6f5bffecdSAlexei Starovoitov  * internal format has been designed by PLUMgrid:
7f5bffecdSAlexei Starovoitov  *
8f5bffecdSAlexei Starovoitov  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9f5bffecdSAlexei Starovoitov  *
10f5bffecdSAlexei Starovoitov  * Authors:
11f5bffecdSAlexei Starovoitov  *
12f5bffecdSAlexei Starovoitov  *	Jay Schulist <[email protected]>
13f5bffecdSAlexei Starovoitov  *	Alexei Starovoitov <[email protected]>
14f5bffecdSAlexei Starovoitov  *	Daniel Borkmann <[email protected]>
15f5bffecdSAlexei Starovoitov  *
16f5bffecdSAlexei Starovoitov  * Andi Kleen - Fix a few bad bugs and races.
174df95ff4SAlexei Starovoitov  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18f5bffecdSAlexei Starovoitov  */
19738cbe72SDaniel Borkmann 
20838e9690SYonghong Song #include <uapi/linux/btf.h>
21f5bffecdSAlexei Starovoitov #include <linux/filter.h>
22f5bffecdSAlexei Starovoitov #include <linux/skbuff.h>
2360a3b225SDaniel Borkmann #include <linux/vmalloc.h>
24aaedc2ffSUros Bizjak #include <linux/prandom.h>
2509756af4SAlexei Starovoitov #include <linux/bpf.h>
26838e9690SYonghong Song #include <linux/btf.h>
2700089c04SJulien Thierry #include <linux/objtool.h>
28cb01621bSAndy Shevchenko #include <linux/overflow.h>
2974451e66SDaniel Borkmann #include <linux/rbtree_latch.h>
3074451e66SDaniel Borkmann #include <linux/kallsyms.h>
3174451e66SDaniel Borkmann #include <linux/rcupdate.h>
32c195651eSYonghong Song #include <linux/perf_event.h>
333dec541bSAlexei Starovoitov #include <linux/extable.h>
34b7b3fc8dSIlya Leoshkevich #include <linux/log2.h>
352357672cSKumar Kartikeya Dwivedi #include <linux/bpf_verifier.h>
36ef078600SSong Liu #include <linux/nodemask.h>
37f3dd0c53SLinus Torvalds #include <linux/nospec.h>
38958cf2e2SKumar Kartikeya Dwivedi #include <linux/bpf_mem_alloc.h>
39bf396508SYafang Shao #include <linux/memcontrol.h>
4012af2b83SMike Rapoport (IBM) #include <linux/execmem.h>
41f5e81d11SDaniel Borkmann 
42f5e81d11SDaniel Borkmann #include <asm/barrier.h>
435f60d5f6SAl Viro #include <linux/unaligned.h>
443324b584SDaniel Borkmann 
45f5bffecdSAlexei Starovoitov /* Registers */
46f5bffecdSAlexei Starovoitov #define BPF_R0	regs[BPF_REG_0]
47f5bffecdSAlexei Starovoitov #define BPF_R1	regs[BPF_REG_1]
48f5bffecdSAlexei Starovoitov #define BPF_R2	regs[BPF_REG_2]
49f5bffecdSAlexei Starovoitov #define BPF_R3	regs[BPF_REG_3]
50f5bffecdSAlexei Starovoitov #define BPF_R4	regs[BPF_REG_4]
51f5bffecdSAlexei Starovoitov #define BPF_R5	regs[BPF_REG_5]
52f5bffecdSAlexei Starovoitov #define BPF_R6	regs[BPF_REG_6]
53f5bffecdSAlexei Starovoitov #define BPF_R7	regs[BPF_REG_7]
54f5bffecdSAlexei Starovoitov #define BPF_R8	regs[BPF_REG_8]
55f5bffecdSAlexei Starovoitov #define BPF_R9	regs[BPF_REG_9]
56f5bffecdSAlexei Starovoitov #define BPF_R10	regs[BPF_REG_10]
57f5bffecdSAlexei Starovoitov 
58f5bffecdSAlexei Starovoitov /* Named registers */
59f5bffecdSAlexei Starovoitov #define DST	regs[insn->dst_reg]
60f5bffecdSAlexei Starovoitov #define SRC	regs[insn->src_reg]
61f5bffecdSAlexei Starovoitov #define FP	regs[BPF_REG_FP]
62144cd91cSDaniel Borkmann #define AX	regs[BPF_REG_AX]
63f5bffecdSAlexei Starovoitov #define ARG1	regs[BPF_REG_ARG1]
64f5bffecdSAlexei Starovoitov #define CTX	regs[BPF_REG_CTX]
658100928cSYonghong Song #define OFF	insn->off
66f5bffecdSAlexei Starovoitov #define IMM	insn->imm
67f5bffecdSAlexei Starovoitov 
681fda5bb6SYonghong Song struct bpf_mem_alloc bpf_global_ma;
691fda5bb6SYonghong Song bool bpf_global_ma_set;
70958cf2e2SKumar Kartikeya Dwivedi 
71f5bffecdSAlexei Starovoitov /* No hurry in this branch
72f5bffecdSAlexei Starovoitov  *
73f5bffecdSAlexei Starovoitov  * Exported for the bpf jit load helper.
74f5bffecdSAlexei Starovoitov  */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)75f5bffecdSAlexei Starovoitov void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
76f5bffecdSAlexei Starovoitov {
77f5bffecdSAlexei Starovoitov 	u8 *ptr = NULL;
78f5bffecdSAlexei Starovoitov 
790326195fSEric Dumazet 	if (k >= SKF_NET_OFF) {
80f5bffecdSAlexei Starovoitov 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
810326195fSEric Dumazet 	} else if (k >= SKF_LL_OFF) {
820326195fSEric Dumazet 		if (unlikely(!skb_mac_header_was_set(skb)))
830326195fSEric Dumazet 			return NULL;
84f5bffecdSAlexei Starovoitov 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
850326195fSEric Dumazet 	}
86f5bffecdSAlexei Starovoitov 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
87f5bffecdSAlexei Starovoitov 		return ptr;
88f5bffecdSAlexei Starovoitov 
89f5bffecdSAlexei Starovoitov 	return NULL;
90f5bffecdSAlexei Starovoitov }
91f5bffecdSAlexei Starovoitov 
92fe506415SAlexei Starovoitov /* tell bpf programs that include vmlinux.h kernel's PAGE_SIZE */
93fe506415SAlexei Starovoitov enum page_size_enum {
94fe506415SAlexei Starovoitov 	__PAGE_SIZE = PAGE_SIZE
95fe506415SAlexei Starovoitov };
96fe506415SAlexei Starovoitov 
bpf_prog_alloc_no_stats(unsigned int size,gfp_t gfp_extra_flags)97492ecee8SAlexei Starovoitov struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
9860a3b225SDaniel Borkmann {
99bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
10009756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux;
10160a3b225SDaniel Borkmann 	struct bpf_prog *fp;
10260a3b225SDaniel Borkmann 
103fe506415SAlexei Starovoitov 	size = round_up(size, __PAGE_SIZE);
10488dca4caSChristoph Hellwig 	fp = __vmalloc(size, gfp_flags);
10560a3b225SDaniel Borkmann 	if (fp == NULL)
10660a3b225SDaniel Borkmann 		return NULL;
10760a3b225SDaniel Borkmann 
108bf396508SYafang Shao 	aux = kzalloc(sizeof(*aux), bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
10909756af4SAlexei Starovoitov 	if (aux == NULL) {
11060a3b225SDaniel Borkmann 		vfree(fp);
11160a3b225SDaniel Borkmann 		return NULL;
11260a3b225SDaniel Borkmann 	}
113bf396508SYafang Shao 	fp->active = alloc_percpu_gfp(int, bpf_memcg_flags(GFP_KERNEL | gfp_extra_flags));
114ca06f55bSAlexei Starovoitov 	if (!fp->active) {
115ca06f55bSAlexei Starovoitov 		vfree(fp);
116ca06f55bSAlexei Starovoitov 		kfree(aux);
117ca06f55bSAlexei Starovoitov 		return NULL;
118ca06f55bSAlexei Starovoitov 	}
11960a3b225SDaniel Borkmann 
12060a3b225SDaniel Borkmann 	fp->pages = size / PAGE_SIZE;
12109756af4SAlexei Starovoitov 	fp->aux = aux;
122e9d8afa9SDaniel Borkmann 	fp->aux->prog = fp;
12360b58afcSAlexei Starovoitov 	fp->jit_requested = ebpf_jit_enabled();
124d2a3b7c5SHou Tao 	fp->blinding_requested = bpf_jit_blinding_enabled(fp);
125c0e19f2cSStanislav Fomichev #ifdef CONFIG_CGROUP_BPF
126c0e19f2cSStanislav Fomichev 	aux->cgroup_atype = CGROUP_BPF_ATTACH_TYPE_INVALID;
127c0e19f2cSStanislav Fomichev #endif
12860a3b225SDaniel Borkmann 
129ecb60d1cSJiri Olsa 	INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
1304f9087f1SPeter Zijlstra #ifdef CONFIG_FINEIBT
1314f9087f1SPeter Zijlstra 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_prefix.lnode);
1324f9087f1SPeter Zijlstra #endif
133984fe94fSYiFei Zhu 	mutex_init(&fp->aux->used_maps_mutex);
134d6083f04SLeon Hwang 	mutex_init(&fp->aux->ext_mutex);
1353aac1eadSToke Høiland-Jørgensen 	mutex_init(&fp->aux->dst_mutex);
13674451e66SDaniel Borkmann 
13760a3b225SDaniel Borkmann 	return fp;
13860a3b225SDaniel Borkmann }
139492ecee8SAlexei Starovoitov 
bpf_prog_alloc(unsigned int size,gfp_t gfp_extra_flags)140492ecee8SAlexei Starovoitov struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
141492ecee8SAlexei Starovoitov {
142bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
143492ecee8SAlexei Starovoitov 	struct bpf_prog *prog;
1444b911304SEric Dumazet 	int cpu;
145492ecee8SAlexei Starovoitov 
146492ecee8SAlexei Starovoitov 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
147492ecee8SAlexei Starovoitov 	if (!prog)
148492ecee8SAlexei Starovoitov 		return NULL;
149492ecee8SAlexei Starovoitov 
150700d4796SAlexei Starovoitov 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
151700d4796SAlexei Starovoitov 	if (!prog->stats) {
152ca06f55bSAlexei Starovoitov 		free_percpu(prog->active);
153492ecee8SAlexei Starovoitov 		kfree(prog->aux);
154492ecee8SAlexei Starovoitov 		vfree(prog);
155492ecee8SAlexei Starovoitov 		return NULL;
156492ecee8SAlexei Starovoitov 	}
157492ecee8SAlexei Starovoitov 
1584b911304SEric Dumazet 	for_each_possible_cpu(cpu) {
1594b911304SEric Dumazet 		struct bpf_prog_stats *pstats;
1604b911304SEric Dumazet 
161700d4796SAlexei Starovoitov 		pstats = per_cpu_ptr(prog->stats, cpu);
1624b911304SEric Dumazet 		u64_stats_init(&pstats->syncp);
1634b911304SEric Dumazet 	}
164492ecee8SAlexei Starovoitov 	return prog;
165492ecee8SAlexei Starovoitov }
16660a3b225SDaniel Borkmann EXPORT_SYMBOL_GPL(bpf_prog_alloc);
16760a3b225SDaniel Borkmann 
bpf_prog_alloc_jited_linfo(struct bpf_prog * prog)168c454a46bSMartin KaFai Lau int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
169c454a46bSMartin KaFai Lau {
170c454a46bSMartin KaFai Lau 	if (!prog->aux->nr_linfo || !prog->jit_requested)
171c454a46bSMartin KaFai Lau 		return 0;
172c454a46bSMartin KaFai Lau 
173e16301fbSMartin KaFai Lau 	prog->aux->jited_linfo = kvcalloc(prog->aux->nr_linfo,
174c454a46bSMartin KaFai Lau 					  sizeof(*prog->aux->jited_linfo),
175bf396508SYafang Shao 					  bpf_memcg_flags(GFP_KERNEL | __GFP_NOWARN));
176c454a46bSMartin KaFai Lau 	if (!prog->aux->jited_linfo)
177c454a46bSMartin KaFai Lau 		return -ENOMEM;
178c454a46bSMartin KaFai Lau 
179c454a46bSMartin KaFai Lau 	return 0;
180c454a46bSMartin KaFai Lau }
181c454a46bSMartin KaFai Lau 
bpf_prog_jit_attempt_done(struct bpf_prog * prog)182e16301fbSMartin KaFai Lau void bpf_prog_jit_attempt_done(struct bpf_prog *prog)
183c454a46bSMartin KaFai Lau {
184e16301fbSMartin KaFai Lau 	if (prog->aux->jited_linfo &&
185e16301fbSMartin KaFai Lau 	    (!prog->jited || !prog->aux->jited_linfo[0])) {
186e16301fbSMartin KaFai Lau 		kvfree(prog->aux->jited_linfo);
187c454a46bSMartin KaFai Lau 		prog->aux->jited_linfo = NULL;
188c454a46bSMartin KaFai Lau 	}
189e6ac2450SMartin KaFai Lau 
190e6ac2450SMartin KaFai Lau 	kfree(prog->aux->kfunc_tab);
191e6ac2450SMartin KaFai Lau 	prog->aux->kfunc_tab = NULL;
192c454a46bSMartin KaFai Lau }
193c454a46bSMartin KaFai Lau 
194c454a46bSMartin KaFai Lau /* The jit engine is responsible to provide an array
195c454a46bSMartin KaFai Lau  * for insn_off to the jited_off mapping (insn_to_jit_off).
196c454a46bSMartin KaFai Lau  *
197c454a46bSMartin KaFai Lau  * The idx to this array is the insn_off.  Hence, the insn_off
198c454a46bSMartin KaFai Lau  * here is relative to the prog itself instead of the main prog.
199c454a46bSMartin KaFai Lau  * This array has one entry for each xlated bpf insn.
200c454a46bSMartin KaFai Lau  *
201cc168554SPu Lehui  * jited_off is the byte off to the end of the jited insn.
202c454a46bSMartin KaFai Lau  *
203c454a46bSMartin KaFai Lau  * Hence, with
204c454a46bSMartin KaFai Lau  * insn_start:
205c454a46bSMartin KaFai Lau  *      The first bpf insn off of the prog.  The insn off
206c454a46bSMartin KaFai Lau  *      here is relative to the main prog.
207c454a46bSMartin KaFai Lau  *      e.g. if prog is a subprog, insn_start > 0
208c454a46bSMartin KaFai Lau  * linfo_idx:
209c454a46bSMartin KaFai Lau  *      The prog's idx to prog->aux->linfo and jited_linfo
210c454a46bSMartin KaFai Lau  *
211c454a46bSMartin KaFai Lau  * jited_linfo[linfo_idx] = prog->bpf_func
212c454a46bSMartin KaFai Lau  *
213c454a46bSMartin KaFai Lau  * For i > linfo_idx,
214c454a46bSMartin KaFai Lau  *
215c454a46bSMartin KaFai Lau  * jited_linfo[i] = prog->bpf_func +
216c454a46bSMartin KaFai Lau  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
217c454a46bSMartin KaFai Lau  */
bpf_prog_fill_jited_linfo(struct bpf_prog * prog,const u32 * insn_to_jit_off)218c454a46bSMartin KaFai Lau void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
219c454a46bSMartin KaFai Lau 			       const u32 *insn_to_jit_off)
220c454a46bSMartin KaFai Lau {
221c454a46bSMartin KaFai Lau 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
222c454a46bSMartin KaFai Lau 	const struct bpf_line_info *linfo;
223c454a46bSMartin KaFai Lau 	void **jited_linfo;
224c454a46bSMartin KaFai Lau 
225335d1c5bSKumar Kartikeya Dwivedi 	if (!prog->aux->jited_linfo || prog->aux->func_idx > prog->aux->func_cnt)
226c454a46bSMartin KaFai Lau 		/* Userspace did not provide linfo */
227c454a46bSMartin KaFai Lau 		return;
228c454a46bSMartin KaFai Lau 
229c454a46bSMartin KaFai Lau 	linfo_idx = prog->aux->linfo_idx;
230c454a46bSMartin KaFai Lau 	linfo = &prog->aux->linfo[linfo_idx];
231c454a46bSMartin KaFai Lau 	insn_start = linfo[0].insn_off;
232c454a46bSMartin KaFai Lau 	insn_end = insn_start + prog->len;
233c454a46bSMartin KaFai Lau 
234c454a46bSMartin KaFai Lau 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
235c454a46bSMartin KaFai Lau 	jited_linfo[0] = prog->bpf_func;
236c454a46bSMartin KaFai Lau 
237c454a46bSMartin KaFai Lau 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
238c454a46bSMartin KaFai Lau 
239c454a46bSMartin KaFai Lau 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
240c454a46bSMartin KaFai Lau 		/* The verifier ensures that linfo[i].insn_off is
241c454a46bSMartin KaFai Lau 		 * strictly increasing
242c454a46bSMartin KaFai Lau 		 */
243c454a46bSMartin KaFai Lau 		jited_linfo[i] = prog->bpf_func +
244c454a46bSMartin KaFai Lau 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
245c454a46bSMartin KaFai Lau }
246c454a46bSMartin KaFai Lau 
bpf_prog_realloc(struct bpf_prog * fp_old,unsigned int size,gfp_t gfp_extra_flags)24760a3b225SDaniel Borkmann struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
24860a3b225SDaniel Borkmann 				  gfp_t gfp_extra_flags)
24960a3b225SDaniel Borkmann {
250bf396508SYafang Shao 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
25160a3b225SDaniel Borkmann 	struct bpf_prog *fp;
2523ac1f01bSRoman Gushchin 	u32 pages;
25360a3b225SDaniel Borkmann 
25460a3b225SDaniel Borkmann 	size = round_up(size, PAGE_SIZE);
2555ccb071eSDaniel Borkmann 	pages = size / PAGE_SIZE;
2565ccb071eSDaniel Borkmann 	if (pages <= fp_old->pages)
25760a3b225SDaniel Borkmann 		return fp_old;
25860a3b225SDaniel Borkmann 
25988dca4caSChristoph Hellwig 	fp = __vmalloc(size, gfp_flags);
2603ac1f01bSRoman Gushchin 	if (fp) {
26160a3b225SDaniel Borkmann 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
2625ccb071eSDaniel Borkmann 		fp->pages = pages;
263e9d8afa9SDaniel Borkmann 		fp->aux->prog = fp;
26460a3b225SDaniel Borkmann 
26509756af4SAlexei Starovoitov 		/* We keep fp->aux from fp_old around in the new
26660a3b225SDaniel Borkmann 		 * reallocated structure.
26760a3b225SDaniel Borkmann 		 */
26809756af4SAlexei Starovoitov 		fp_old->aux = NULL;
2691336c662SAlexei Starovoitov 		fp_old->stats = NULL;
2701336c662SAlexei Starovoitov 		fp_old->active = NULL;
27160a3b225SDaniel Borkmann 		__bpf_prog_free(fp_old);
27260a3b225SDaniel Borkmann 	}
27360a3b225SDaniel Borkmann 
27460a3b225SDaniel Borkmann 	return fp;
27560a3b225SDaniel Borkmann }
27660a3b225SDaniel Borkmann 
__bpf_prog_free(struct bpf_prog * fp)27760a3b225SDaniel Borkmann void __bpf_prog_free(struct bpf_prog *fp)
27860a3b225SDaniel Borkmann {
279492ecee8SAlexei Starovoitov 	if (fp->aux) {
280984fe94fSYiFei Zhu 		mutex_destroy(&fp->aux->used_maps_mutex);
2813aac1eadSToke Høiland-Jørgensen 		mutex_destroy(&fp->aux->dst_mutex);
282a66886feSDaniel Borkmann 		kfree(fp->aux->poke_tab);
28309756af4SAlexei Starovoitov 		kfree(fp->aux);
284492ecee8SAlexei Starovoitov 	}
285700d4796SAlexei Starovoitov 	free_percpu(fp->stats);
286ca06f55bSAlexei Starovoitov 	free_percpu(fp->active);
28760a3b225SDaniel Borkmann 	vfree(fp);
28860a3b225SDaniel Borkmann }
28960a3b225SDaniel Borkmann 
bpf_prog_calc_tag(struct bpf_prog * fp)290f1f7714eSDaniel Borkmann int bpf_prog_calc_tag(struct bpf_prog *fp)
2917bd509e3SDaniel Borkmann {
2926b0b0fa2SEric Biggers 	const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
293f1f7714eSDaniel Borkmann 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
2946b0b0fa2SEric Biggers 	u32 digest[SHA1_DIGEST_WORDS];
2956b0b0fa2SEric Biggers 	u32 ws[SHA1_WORKSPACE_WORDS];
2967bd509e3SDaniel Borkmann 	u32 i, bsize, psize, blocks;
297aafe6ae9SDaniel Borkmann 	struct bpf_insn *dst;
2987bd509e3SDaniel Borkmann 	bool was_ld_map;
299aafe6ae9SDaniel Borkmann 	u8 *raw, *todo;
3007bd509e3SDaniel Borkmann 	__be32 *result;
3017bd509e3SDaniel Borkmann 	__be64 *bits;
3027bd509e3SDaniel Borkmann 
303aafe6ae9SDaniel Borkmann 	raw = vmalloc(raw_size);
304aafe6ae9SDaniel Borkmann 	if (!raw)
305aafe6ae9SDaniel Borkmann 		return -ENOMEM;
306aafe6ae9SDaniel Borkmann 
3076b0b0fa2SEric Biggers 	sha1_init(digest);
3087bd509e3SDaniel Borkmann 	memset(ws, 0, sizeof(ws));
3097bd509e3SDaniel Borkmann 
3107bd509e3SDaniel Borkmann 	/* We need to take out the map fd for the digest calculation
3117bd509e3SDaniel Borkmann 	 * since they are unstable from user space side.
3127bd509e3SDaniel Borkmann 	 */
313aafe6ae9SDaniel Borkmann 	dst = (void *)raw;
3147bd509e3SDaniel Borkmann 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
3157bd509e3SDaniel Borkmann 		dst[i] = fp->insnsi[i];
3167bd509e3SDaniel Borkmann 		if (!was_ld_map &&
3177bd509e3SDaniel Borkmann 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
318d8eca5bbSDaniel Borkmann 		    (dst[i].src_reg == BPF_PSEUDO_MAP_FD ||
319d8eca5bbSDaniel Borkmann 		     dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) {
3207bd509e3SDaniel Borkmann 			was_ld_map = true;
3217bd509e3SDaniel Borkmann 			dst[i].imm = 0;
3227bd509e3SDaniel Borkmann 		} else if (was_ld_map &&
3237bd509e3SDaniel Borkmann 			   dst[i].code == 0 &&
3247bd509e3SDaniel Borkmann 			   dst[i].dst_reg == 0 &&
3257bd509e3SDaniel Borkmann 			   dst[i].src_reg == 0 &&
3267bd509e3SDaniel Borkmann 			   dst[i].off == 0) {
3277bd509e3SDaniel Borkmann 			was_ld_map = false;
3287bd509e3SDaniel Borkmann 			dst[i].imm = 0;
3297bd509e3SDaniel Borkmann 		} else {
3307bd509e3SDaniel Borkmann 			was_ld_map = false;
3317bd509e3SDaniel Borkmann 		}
3327bd509e3SDaniel Borkmann 	}
3337bd509e3SDaniel Borkmann 
334aafe6ae9SDaniel Borkmann 	psize = bpf_prog_insn_size(fp);
335aafe6ae9SDaniel Borkmann 	memset(&raw[psize], 0, raw_size - psize);
3367bd509e3SDaniel Borkmann 	raw[psize++] = 0x80;
3377bd509e3SDaniel Borkmann 
3386b0b0fa2SEric Biggers 	bsize  = round_up(psize, SHA1_BLOCK_SIZE);
3396b0b0fa2SEric Biggers 	blocks = bsize / SHA1_BLOCK_SIZE;
340aafe6ae9SDaniel Borkmann 	todo   = raw;
3417bd509e3SDaniel Borkmann 	if (bsize - psize >= sizeof(__be64)) {
3427bd509e3SDaniel Borkmann 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
3437bd509e3SDaniel Borkmann 	} else {
3447bd509e3SDaniel Borkmann 		bits = (__be64 *)(todo + bsize + bits_offset);
3457bd509e3SDaniel Borkmann 		blocks++;
3467bd509e3SDaniel Borkmann 	}
3477bd509e3SDaniel Borkmann 	*bits = cpu_to_be64((psize - 1) << 3);
3487bd509e3SDaniel Borkmann 
3497bd509e3SDaniel Borkmann 	while (blocks--) {
3506b0b0fa2SEric Biggers 		sha1_transform(digest, todo, ws);
3516b0b0fa2SEric Biggers 		todo += SHA1_BLOCK_SIZE;
3527bd509e3SDaniel Borkmann 	}
3537bd509e3SDaniel Borkmann 
354f1f7714eSDaniel Borkmann 	result = (__force __be32 *)digest;
3556b0b0fa2SEric Biggers 	for (i = 0; i < SHA1_DIGEST_WORDS; i++)
356f1f7714eSDaniel Borkmann 		result[i] = cpu_to_be32(digest[i]);
357f1f7714eSDaniel Borkmann 	memcpy(fp->tag, result, sizeof(fp->tag));
358aafe6ae9SDaniel Borkmann 
359aafe6ae9SDaniel Borkmann 	vfree(raw);
360aafe6ae9SDaniel Borkmann 	return 0;
3617bd509e3SDaniel Borkmann }
3627bd509e3SDaniel Borkmann 
bpf_adj_delta_to_imm(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)3632cbd95a5SJakub Kicinski static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
364af959b18SDaniel Borkmann 				s32 end_new, s32 curr, const bool probe_pass)
365c237ee5eSDaniel Borkmann {
366050fad7cSDaniel Borkmann 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
3672cbd95a5SJakub Kicinski 	s32 delta = end_new - end_old;
368050fad7cSDaniel Borkmann 	s64 imm = insn->imm;
369050fad7cSDaniel Borkmann 
3702cbd95a5SJakub Kicinski 	if (curr < pos && curr + imm + 1 >= end_old)
371050fad7cSDaniel Borkmann 		imm += delta;
3722cbd95a5SJakub Kicinski 	else if (curr >= end_new && curr + imm + 1 < end_new)
373050fad7cSDaniel Borkmann 		imm -= delta;
374050fad7cSDaniel Borkmann 	if (imm < imm_min || imm > imm_max)
375050fad7cSDaniel Borkmann 		return -ERANGE;
376050fad7cSDaniel Borkmann 	if (!probe_pass)
377050fad7cSDaniel Borkmann 		insn->imm = imm;
378050fad7cSDaniel Borkmann 	return 0;
379050fad7cSDaniel Borkmann }
380050fad7cSDaniel Borkmann 
bpf_adj_delta_to_off(struct bpf_insn * insn,u32 pos,s32 end_old,s32 end_new,s32 curr,const bool probe_pass)3812cbd95a5SJakub Kicinski static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
382af959b18SDaniel Borkmann 				s32 end_new, s32 curr, const bool probe_pass)
383050fad7cSDaniel Borkmann {
384dfce9cb3SYonghong Song 	s64 off_min, off_max, off;
3852cbd95a5SJakub Kicinski 	s32 delta = end_new - end_old;
3864cd58e9aSYonghong Song 
387dfce9cb3SYonghong Song 	if (insn->code == (BPF_JMP32 | BPF_JA)) {
3884cd58e9aSYonghong Song 		off = insn->imm;
389dfce9cb3SYonghong Song 		off_min = S32_MIN;
390dfce9cb3SYonghong Song 		off_max = S32_MAX;
391dfce9cb3SYonghong Song 	} else {
3924cd58e9aSYonghong Song 		off = insn->off;
393dfce9cb3SYonghong Song 		off_min = S16_MIN;
394dfce9cb3SYonghong Song 		off_max = S16_MAX;
395dfce9cb3SYonghong Song 	}
396050fad7cSDaniel Borkmann 
3972cbd95a5SJakub Kicinski 	if (curr < pos && curr + off + 1 >= end_old)
398050fad7cSDaniel Borkmann 		off += delta;
3992cbd95a5SJakub Kicinski 	else if (curr >= end_new && curr + off + 1 < end_new)
400050fad7cSDaniel Borkmann 		off -= delta;
401050fad7cSDaniel Borkmann 	if (off < off_min || off > off_max)
402050fad7cSDaniel Borkmann 		return -ERANGE;
4034cd58e9aSYonghong Song 	if (!probe_pass) {
4044cd58e9aSYonghong Song 		if (insn->code == (BPF_JMP32 | BPF_JA))
4054cd58e9aSYonghong Song 			insn->imm = off;
4064cd58e9aSYonghong Song 		else
407050fad7cSDaniel Borkmann 			insn->off = off;
4084cd58e9aSYonghong Song 	}
409050fad7cSDaniel Borkmann 	return 0;
410050fad7cSDaniel Borkmann }
411050fad7cSDaniel Borkmann 
bpf_adj_branches(struct bpf_prog * prog,u32 pos,s32 end_old,s32 end_new,const bool probe_pass)4122cbd95a5SJakub Kicinski static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
4132cbd95a5SJakub Kicinski 			    s32 end_new, const bool probe_pass)
414050fad7cSDaniel Borkmann {
4152cbd95a5SJakub Kicinski 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
416c237ee5eSDaniel Borkmann 	struct bpf_insn *insn = prog->insnsi;
417050fad7cSDaniel Borkmann 	int ret = 0;
418c237ee5eSDaniel Borkmann 
419c237ee5eSDaniel Borkmann 	for (i = 0; i < insn_cnt; i++, insn++) {
420050fad7cSDaniel Borkmann 		u8 code;
421050fad7cSDaniel Borkmann 
422050fad7cSDaniel Borkmann 		/* In the probing pass we still operate on the original,
423050fad7cSDaniel Borkmann 		 * unpatched image in order to check overflows before we
424050fad7cSDaniel Borkmann 		 * do any other adjustments. Therefore skip the patchlet.
425050fad7cSDaniel Borkmann 		 */
426050fad7cSDaniel Borkmann 		if (probe_pass && i == pos) {
4272cbd95a5SJakub Kicinski 			i = end_new;
4282cbd95a5SJakub Kicinski 			insn = prog->insnsi + end_old;
429050fad7cSDaniel Borkmann 		}
4303990ed4cSMartin KaFai Lau 		if (bpf_pseudo_func(insn)) {
4313990ed4cSMartin KaFai Lau 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
4323990ed4cSMartin KaFai Lau 						   end_new, i, probe_pass);
4333990ed4cSMartin KaFai Lau 			if (ret)
4343990ed4cSMartin KaFai Lau 				return ret;
4353990ed4cSMartin KaFai Lau 			continue;
4363990ed4cSMartin KaFai Lau 		}
4371ea47e01SAlexei Starovoitov 		code = insn->code;
438092ed096SJiong Wang 		if ((BPF_CLASS(code) != BPF_JMP &&
439092ed096SJiong Wang 		     BPF_CLASS(code) != BPF_JMP32) ||
440050fad7cSDaniel Borkmann 		    BPF_OP(code) == BPF_EXIT)
441c237ee5eSDaniel Borkmann 			continue;
442050fad7cSDaniel Borkmann 		/* Adjust offset of jmps if we cross patch boundaries. */
4431ea47e01SAlexei Starovoitov 		if (BPF_OP(code) == BPF_CALL) {
444050fad7cSDaniel Borkmann 			if (insn->src_reg != BPF_PSEUDO_CALL)
4451ea47e01SAlexei Starovoitov 				continue;
4462cbd95a5SJakub Kicinski 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
4472cbd95a5SJakub Kicinski 						   end_new, i, probe_pass);
4481ea47e01SAlexei Starovoitov 		} else {
4492cbd95a5SJakub Kicinski 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
4502cbd95a5SJakub Kicinski 						   end_new, i, probe_pass);
4511ea47e01SAlexei Starovoitov 		}
452050fad7cSDaniel Borkmann 		if (ret)
453050fad7cSDaniel Borkmann 			break;
454c237ee5eSDaniel Borkmann 	}
455050fad7cSDaniel Borkmann 
456050fad7cSDaniel Borkmann 	return ret;
457c237ee5eSDaniel Borkmann }
458c237ee5eSDaniel Borkmann 
bpf_adj_linfo(struct bpf_prog * prog,u32 off,u32 delta)459c454a46bSMartin KaFai Lau static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
460c454a46bSMartin KaFai Lau {
461c454a46bSMartin KaFai Lau 	struct bpf_line_info *linfo;
462c454a46bSMartin KaFai Lau 	u32 i, nr_linfo;
463c454a46bSMartin KaFai Lau 
464c454a46bSMartin KaFai Lau 	nr_linfo = prog->aux->nr_linfo;
465c454a46bSMartin KaFai Lau 	if (!nr_linfo || !delta)
466c454a46bSMartin KaFai Lau 		return;
467c454a46bSMartin KaFai Lau 
468c454a46bSMartin KaFai Lau 	linfo = prog->aux->linfo;
469c454a46bSMartin KaFai Lau 
470c454a46bSMartin KaFai Lau 	for (i = 0; i < nr_linfo; i++)
471c454a46bSMartin KaFai Lau 		if (off < linfo[i].insn_off)
472c454a46bSMartin KaFai Lau 			break;
473c454a46bSMartin KaFai Lau 
474c454a46bSMartin KaFai Lau 	/* Push all off < linfo[i].insn_off by delta */
475c454a46bSMartin KaFai Lau 	for (; i < nr_linfo; i++)
476c454a46bSMartin KaFai Lau 		linfo[i].insn_off += delta;
477c454a46bSMartin KaFai Lau }
478c454a46bSMartin KaFai Lau 
bpf_patch_insn_single(struct bpf_prog * prog,u32 off,const struct bpf_insn * patch,u32 len)479c237ee5eSDaniel Borkmann struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
480c237ee5eSDaniel Borkmann 				       const struct bpf_insn *patch, u32 len)
481c237ee5eSDaniel Borkmann {
482c237ee5eSDaniel Borkmann 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
483050fad7cSDaniel Borkmann 	const u32 cnt_max = S16_MAX;
484c237ee5eSDaniel Borkmann 	struct bpf_prog *prog_adj;
4854f73379eSAlexei Starovoitov 	int err;
486c237ee5eSDaniel Borkmann 
487c237ee5eSDaniel Borkmann 	/* Since our patchlet doesn't expand the image, we're done. */
488c237ee5eSDaniel Borkmann 	if (insn_delta == 0) {
489c237ee5eSDaniel Borkmann 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
490c237ee5eSDaniel Borkmann 		return prog;
491c237ee5eSDaniel Borkmann 	}
492c237ee5eSDaniel Borkmann 
493c237ee5eSDaniel Borkmann 	insn_adj_cnt = prog->len + insn_delta;
494c237ee5eSDaniel Borkmann 
495050fad7cSDaniel Borkmann 	/* Reject anything that would potentially let the insn->off
496050fad7cSDaniel Borkmann 	 * target overflow when we have excessive program expansions.
497050fad7cSDaniel Borkmann 	 * We need to probe here before we do any reallocation where
498050fad7cSDaniel Borkmann 	 * we afterwards may not fail anymore.
499050fad7cSDaniel Borkmann 	 */
500050fad7cSDaniel Borkmann 	if (insn_adj_cnt > cnt_max &&
5014f73379eSAlexei Starovoitov 	    (err = bpf_adj_branches(prog, off, off + 1, off + len, true)))
5024f73379eSAlexei Starovoitov 		return ERR_PTR(err);
503050fad7cSDaniel Borkmann 
504c237ee5eSDaniel Borkmann 	/* Several new instructions need to be inserted. Make room
505c237ee5eSDaniel Borkmann 	 * for them. Likely, there's no need for a new allocation as
506c237ee5eSDaniel Borkmann 	 * last page could have large enough tailroom.
507c237ee5eSDaniel Borkmann 	 */
508c237ee5eSDaniel Borkmann 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
509c237ee5eSDaniel Borkmann 				    GFP_USER);
510c237ee5eSDaniel Borkmann 	if (!prog_adj)
5114f73379eSAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
512c237ee5eSDaniel Borkmann 
513c237ee5eSDaniel Borkmann 	prog_adj->len = insn_adj_cnt;
514c237ee5eSDaniel Borkmann 
515c237ee5eSDaniel Borkmann 	/* Patching happens in 3 steps:
516c237ee5eSDaniel Borkmann 	 *
517c237ee5eSDaniel Borkmann 	 * 1) Move over tail of insnsi from next instruction onwards,
518c237ee5eSDaniel Borkmann 	 *    so we can patch the single target insn with one or more
519c237ee5eSDaniel Borkmann 	 *    new ones (patching is always from 1 to n insns, n > 0).
520c237ee5eSDaniel Borkmann 	 * 2) Inject new instructions at the target location.
521c237ee5eSDaniel Borkmann 	 * 3) Adjust branch offsets if necessary.
522c237ee5eSDaniel Borkmann 	 */
523c237ee5eSDaniel Borkmann 	insn_rest = insn_adj_cnt - off - len;
524c237ee5eSDaniel Borkmann 
525c237ee5eSDaniel Borkmann 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
526c237ee5eSDaniel Borkmann 		sizeof(*patch) * insn_rest);
527c237ee5eSDaniel Borkmann 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
528c237ee5eSDaniel Borkmann 
529050fad7cSDaniel Borkmann 	/* We are guaranteed to not fail at this point, otherwise
530050fad7cSDaniel Borkmann 	 * the ship has sailed to reverse to the original state. An
531050fad7cSDaniel Borkmann 	 * overflow cannot happen at this point.
532050fad7cSDaniel Borkmann 	 */
5332cbd95a5SJakub Kicinski 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
534c237ee5eSDaniel Borkmann 
535c454a46bSMartin KaFai Lau 	bpf_adj_linfo(prog_adj, off, insn_delta);
536c454a46bSMartin KaFai Lau 
537c237ee5eSDaniel Borkmann 	return prog_adj;
538c237ee5eSDaniel Borkmann }
539c237ee5eSDaniel Borkmann 
bpf_remove_insns(struct bpf_prog * prog,u32 off,u32 cnt)54052875a04SJakub Kicinski int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
54152875a04SJakub Kicinski {
542c4441ca8SAnton Protopopov 	int err;
543c4441ca8SAnton Protopopov 
54452875a04SJakub Kicinski 	/* Branch offsets can't overflow when program is shrinking, no need
54552875a04SJakub Kicinski 	 * to call bpf_adj_branches(..., true) here
54652875a04SJakub Kicinski 	 */
54752875a04SJakub Kicinski 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
54852875a04SJakub Kicinski 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
54952875a04SJakub Kicinski 	prog->len -= cnt;
55052875a04SJakub Kicinski 
551c4441ca8SAnton Protopopov 	err = bpf_adj_branches(prog, off, off + cnt, off, false);
552c4441ca8SAnton Protopopov 	WARN_ON_ONCE(err);
553c4441ca8SAnton Protopopov 	return err;
55452875a04SJakub Kicinski }
55552875a04SJakub Kicinski 
bpf_prog_kallsyms_del_subprogs(struct bpf_prog * fp)556cd7455f1SDaniel Borkmann static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
5577d1982b4SDaniel Borkmann {
5587d1982b4SDaniel Borkmann 	int i;
5597d1982b4SDaniel Borkmann 
560335d1c5bSKumar Kartikeya Dwivedi 	for (i = 0; i < fp->aux->real_func_cnt; i++)
5617d1982b4SDaniel Borkmann 		bpf_prog_kallsyms_del(fp->aux->func[i]);
5627d1982b4SDaniel Borkmann }
5637d1982b4SDaniel Borkmann 
bpf_prog_kallsyms_del_all(struct bpf_prog * fp)5647d1982b4SDaniel Borkmann void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
5657d1982b4SDaniel Borkmann {
5667d1982b4SDaniel Borkmann 	bpf_prog_kallsyms_del_subprogs(fp);
5677d1982b4SDaniel Borkmann 	bpf_prog_kallsyms_del(fp);
5687d1982b4SDaniel Borkmann }
5697d1982b4SDaniel Borkmann 
570b954d834SDaniel Borkmann #ifdef CONFIG_BPF_JIT
571fa9dd599SDaniel Borkmann /* All BPF JIT sysctl knobs here. */
57281c22041SDaniel Borkmann int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
57381c22041SDaniel Borkmann int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON);
574fa9dd599SDaniel Borkmann int bpf_jit_harden   __read_mostly;
575fdadd049SDaniel Borkmann long bpf_jit_limit   __read_mostly;
576fadb7ff1SLorenz Bauer long bpf_jit_limit_max __read_mostly;
577fa9dd599SDaniel Borkmann 
578535911c8SJiri Olsa static void
bpf_prog_ksym_set_addr(struct bpf_prog * prog)579535911c8SJiri Olsa bpf_prog_ksym_set_addr(struct bpf_prog *prog)
58074451e66SDaniel Borkmann {
58174451e66SDaniel Borkmann 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
58274451e66SDaniel Borkmann 
583535911c8SJiri Olsa 	prog->aux->ksym.start = (unsigned long) prog->bpf_func;
584d00c6473SSong Liu 	prog->aux->ksym.end   = prog->aux->ksym.start + prog->jited_len;
58574451e66SDaniel Borkmann }
58674451e66SDaniel Borkmann 
587bfea9a85SJiri Olsa static void
bpf_prog_ksym_set_name(struct bpf_prog * prog)588bfea9a85SJiri Olsa bpf_prog_ksym_set_name(struct bpf_prog *prog)
58974451e66SDaniel Borkmann {
590bfea9a85SJiri Olsa 	char *sym = prog->aux->ksym.name;
591368211fbSMartin KaFai Lau 	const char *end = sym + KSYM_NAME_LEN;
592838e9690SYonghong Song 	const struct btf_type *type;
593838e9690SYonghong Song 	const char *func_name;
594368211fbSMartin KaFai Lau 
59574451e66SDaniel Borkmann 	BUILD_BUG_ON(sizeof("bpf_prog_") +
596368211fbSMartin KaFai Lau 		     sizeof(prog->tag) * 2 +
597368211fbSMartin KaFai Lau 		     /* name has been null terminated.
598368211fbSMartin KaFai Lau 		      * We should need +1 for the '_' preceding
599368211fbSMartin KaFai Lau 		      * the name.  However, the null character
600368211fbSMartin KaFai Lau 		      * is double counted between the name and the
601368211fbSMartin KaFai Lau 		      * sizeof("bpf_prog_") above, so we omit
602368211fbSMartin KaFai Lau 		      * the +1 here.
603368211fbSMartin KaFai Lau 		      */
604368211fbSMartin KaFai Lau 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
60574451e66SDaniel Borkmann 
60674451e66SDaniel Borkmann 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
60774451e66SDaniel Borkmann 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
608838e9690SYonghong Song 
609838e9690SYonghong Song 	/* prog->aux->name will be ignored if full btf name is available */
610335d1c5bSKumar Kartikeya Dwivedi 	if (prog->aux->func_info_cnt && prog->aux->func_idx < prog->aux->func_info_cnt) {
611ba64e7d8SYonghong Song 		type = btf_type_by_id(prog->aux->btf,
612ba64e7d8SYonghong Song 				      prog->aux->func_info[prog->aux->func_idx].type_id);
613838e9690SYonghong Song 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
614838e9690SYonghong Song 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
615838e9690SYonghong Song 		return;
616838e9690SYonghong Song 	}
617838e9690SYonghong Song 
618368211fbSMartin KaFai Lau 	if (prog->aux->name[0])
619368211fbSMartin KaFai Lau 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
620368211fbSMartin KaFai Lau 	else
62174451e66SDaniel Borkmann 		*sym = 0;
62274451e66SDaniel Borkmann }
62374451e66SDaniel Borkmann 
bpf_get_ksym_start(struct latch_tree_node * n)624ca4424c9SJiri Olsa static unsigned long bpf_get_ksym_start(struct latch_tree_node *n)
62574451e66SDaniel Borkmann {
626ca4424c9SJiri Olsa 	return container_of(n, struct bpf_ksym, tnode)->start;
62774451e66SDaniel Borkmann }
62874451e66SDaniel Borkmann 
bpf_tree_less(struct latch_tree_node * a,struct latch_tree_node * b)62974451e66SDaniel Borkmann static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
63074451e66SDaniel Borkmann 					  struct latch_tree_node *b)
63174451e66SDaniel Borkmann {
632ca4424c9SJiri Olsa 	return bpf_get_ksym_start(a) < bpf_get_ksym_start(b);
63374451e66SDaniel Borkmann }
63474451e66SDaniel Borkmann 
bpf_tree_comp(void * key,struct latch_tree_node * n)63574451e66SDaniel Borkmann static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
63674451e66SDaniel Borkmann {
63774451e66SDaniel Borkmann 	unsigned long val = (unsigned long)key;
638ca4424c9SJiri Olsa 	const struct bpf_ksym *ksym;
63974451e66SDaniel Borkmann 
640ca4424c9SJiri Olsa 	ksym = container_of(n, struct bpf_ksym, tnode);
64174451e66SDaniel Borkmann 
642ca4424c9SJiri Olsa 	if (val < ksym->start)
64374451e66SDaniel Borkmann 		return -1;
64466d9111fSKumar Kartikeya Dwivedi 	/* Ensure that we detect return addresses as part of the program, when
64566d9111fSKumar Kartikeya Dwivedi 	 * the final instruction is a call for a program part of the stack
64666d9111fSKumar Kartikeya Dwivedi 	 * trace. Therefore, do val > ksym->end instead of val >= ksym->end.
64766d9111fSKumar Kartikeya Dwivedi 	 */
64866d9111fSKumar Kartikeya Dwivedi 	if (val > ksym->end)
64974451e66SDaniel Borkmann 		return  1;
65074451e66SDaniel Borkmann 
65174451e66SDaniel Borkmann 	return 0;
65274451e66SDaniel Borkmann }
65374451e66SDaniel Borkmann 
65474451e66SDaniel Borkmann static const struct latch_tree_ops bpf_tree_ops = {
65574451e66SDaniel Borkmann 	.less	= bpf_tree_less,
65674451e66SDaniel Borkmann 	.comp	= bpf_tree_comp,
65774451e66SDaniel Borkmann };
65874451e66SDaniel Borkmann 
65974451e66SDaniel Borkmann static DEFINE_SPINLOCK(bpf_lock);
66074451e66SDaniel Borkmann static LIST_HEAD(bpf_kallsyms);
66174451e66SDaniel Borkmann static struct latch_tree_root bpf_tree __cacheline_aligned;
66274451e66SDaniel Borkmann 
bpf_ksym_add(struct bpf_ksym * ksym)663dba122fbSJiri Olsa void bpf_ksym_add(struct bpf_ksym *ksym)
66474451e66SDaniel Borkmann {
665dba122fbSJiri Olsa 	spin_lock_bh(&bpf_lock);
666dba122fbSJiri Olsa 	WARN_ON_ONCE(!list_empty(&ksym->lnode));
667dba122fbSJiri Olsa 	list_add_tail_rcu(&ksym->lnode, &bpf_kallsyms);
668dba122fbSJiri Olsa 	latch_tree_insert(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
669dba122fbSJiri Olsa 	spin_unlock_bh(&bpf_lock);
67074451e66SDaniel Borkmann }
67174451e66SDaniel Borkmann 
__bpf_ksym_del(struct bpf_ksym * ksym)672dba122fbSJiri Olsa static void __bpf_ksym_del(struct bpf_ksym *ksym)
67374451e66SDaniel Borkmann {
674dba122fbSJiri Olsa 	if (list_empty(&ksym->lnode))
67574451e66SDaniel Borkmann 		return;
67674451e66SDaniel Borkmann 
677dba122fbSJiri Olsa 	latch_tree_erase(&ksym->tnode, &bpf_tree, &bpf_tree_ops);
678dba122fbSJiri Olsa 	list_del_rcu(&ksym->lnode);
679dba122fbSJiri Olsa }
680dba122fbSJiri Olsa 
bpf_ksym_del(struct bpf_ksym * ksym)681dba122fbSJiri Olsa void bpf_ksym_del(struct bpf_ksym *ksym)
682dba122fbSJiri Olsa {
683dba122fbSJiri Olsa 	spin_lock_bh(&bpf_lock);
684dba122fbSJiri Olsa 	__bpf_ksym_del(ksym);
685dba122fbSJiri Olsa 	spin_unlock_bh(&bpf_lock);
68674451e66SDaniel Borkmann }
68774451e66SDaniel Borkmann 
bpf_prog_kallsyms_candidate(const struct bpf_prog * fp)68874451e66SDaniel Borkmann static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
68974451e66SDaniel Borkmann {
69074451e66SDaniel Borkmann 	return fp->jited && !bpf_prog_was_classic(fp);
69174451e66SDaniel Borkmann }
69274451e66SDaniel Borkmann 
bpf_prog_kallsyms_add(struct bpf_prog * fp)69374451e66SDaniel Borkmann void bpf_prog_kallsyms_add(struct bpf_prog *fp)
69474451e66SDaniel Borkmann {
69574451e66SDaniel Borkmann 	if (!bpf_prog_kallsyms_candidate(fp) ||
696d79a3549SAndrii Nakryiko 	    !bpf_token_capable(fp->aux->token, CAP_BPF))
69774451e66SDaniel Borkmann 		return;
69874451e66SDaniel Borkmann 
699535911c8SJiri Olsa 	bpf_prog_ksym_set_addr(fp);
700bfea9a85SJiri Olsa 	bpf_prog_ksym_set_name(fp);
701cbd76f8dSJiri Olsa 	fp->aux->ksym.prog = true;
702535911c8SJiri Olsa 
703dba122fbSJiri Olsa 	bpf_ksym_add(&fp->aux->ksym);
7044f9087f1SPeter Zijlstra 
7054f9087f1SPeter Zijlstra #ifdef CONFIG_FINEIBT
7064f9087f1SPeter Zijlstra 	/*
7074f9087f1SPeter Zijlstra 	 * When FineIBT, code in the __cfi_foo() symbols can get executed
7084f9087f1SPeter Zijlstra 	 * and hence unwinder needs help.
7094f9087f1SPeter Zijlstra 	 */
7104f9087f1SPeter Zijlstra 	if (cfi_mode != CFI_FINEIBT)
7114f9087f1SPeter Zijlstra 		return;
7124f9087f1SPeter Zijlstra 
7134f9087f1SPeter Zijlstra 	snprintf(fp->aux->ksym_prefix.name, KSYM_NAME_LEN,
7144f9087f1SPeter Zijlstra 		 "__cfi_%s", fp->aux->ksym.name);
7154f9087f1SPeter Zijlstra 
7164f9087f1SPeter Zijlstra 	fp->aux->ksym_prefix.start = (unsigned long) fp->bpf_func - 16;
7174f9087f1SPeter Zijlstra 	fp->aux->ksym_prefix.end   = (unsigned long) fp->bpf_func;
7184f9087f1SPeter Zijlstra 
7194f9087f1SPeter Zijlstra 	bpf_ksym_add(&fp->aux->ksym_prefix);
7204f9087f1SPeter Zijlstra #endif
72174451e66SDaniel Borkmann }
72274451e66SDaniel Borkmann 
bpf_prog_kallsyms_del(struct bpf_prog * fp)72374451e66SDaniel Borkmann void bpf_prog_kallsyms_del(struct bpf_prog *fp)
72474451e66SDaniel Borkmann {
72574451e66SDaniel Borkmann 	if (!bpf_prog_kallsyms_candidate(fp))
72674451e66SDaniel Borkmann 		return;
72774451e66SDaniel Borkmann 
728dba122fbSJiri Olsa 	bpf_ksym_del(&fp->aux->ksym);
7294f9087f1SPeter Zijlstra #ifdef CONFIG_FINEIBT
7304f9087f1SPeter Zijlstra 	if (cfi_mode != CFI_FINEIBT)
7314f9087f1SPeter Zijlstra 		return;
7324f9087f1SPeter Zijlstra 	bpf_ksym_del(&fp->aux->ksym_prefix);
7334f9087f1SPeter Zijlstra #endif
73474451e66SDaniel Borkmann }
73574451e66SDaniel Borkmann 
bpf_ksym_find(unsigned long addr)736eda0c929SJiri Olsa static struct bpf_ksym *bpf_ksym_find(unsigned long addr)
737eda0c929SJiri Olsa {
738eda0c929SJiri Olsa 	struct latch_tree_node *n;
739eda0c929SJiri Olsa 
740eda0c929SJiri Olsa 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
741eda0c929SJiri Olsa 	return n ? container_of(n, struct bpf_ksym, tnode) : NULL;
742eda0c929SJiri Olsa }
743eda0c929SJiri Olsa 
__bpf_address_lookup(unsigned long addr,unsigned long * size,unsigned long * off,char * sym)7447e1f4eb9SArnd Bergmann int __bpf_address_lookup(unsigned long addr, unsigned long *size,
74574451e66SDaniel Borkmann 				 unsigned long *off, char *sym)
74674451e66SDaniel Borkmann {
747eda0c929SJiri Olsa 	struct bpf_ksym *ksym;
7487e1f4eb9SArnd Bergmann 	int ret = 0;
74974451e66SDaniel Borkmann 
75074451e66SDaniel Borkmann 	rcu_read_lock();
751eda0c929SJiri Olsa 	ksym = bpf_ksym_find(addr);
752eda0c929SJiri Olsa 	if (ksym) {
753eda0c929SJiri Olsa 		unsigned long symbol_start = ksym->start;
754eda0c929SJiri Olsa 		unsigned long symbol_end = ksym->end;
755535911c8SJiri Olsa 
7567e1f4eb9SArnd Bergmann 		ret = strscpy(sym, ksym->name, KSYM_NAME_LEN);
75774451e66SDaniel Borkmann 
75874451e66SDaniel Borkmann 		if (size)
75974451e66SDaniel Borkmann 			*size = symbol_end - symbol_start;
76074451e66SDaniel Borkmann 		if (off)
76174451e66SDaniel Borkmann 			*off  = addr - symbol_start;
76274451e66SDaniel Borkmann 	}
76374451e66SDaniel Borkmann 	rcu_read_unlock();
76474451e66SDaniel Borkmann 
76574451e66SDaniel Borkmann 	return ret;
76674451e66SDaniel Borkmann }
76774451e66SDaniel Borkmann 
is_bpf_text_address(unsigned long addr)76874451e66SDaniel Borkmann bool is_bpf_text_address(unsigned long addr)
76974451e66SDaniel Borkmann {
77074451e66SDaniel Borkmann 	bool ret;
77174451e66SDaniel Borkmann 
77274451e66SDaniel Borkmann 	rcu_read_lock();
773eda0c929SJiri Olsa 	ret = bpf_ksym_find(addr) != NULL;
77474451e66SDaniel Borkmann 	rcu_read_unlock();
77574451e66SDaniel Borkmann 
77674451e66SDaniel Borkmann 	return ret;
77774451e66SDaniel Borkmann }
77874451e66SDaniel Borkmann 
bpf_prog_ksym_find(unsigned long addr)779f18b03faSKumar Kartikeya Dwivedi struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
780cbd76f8dSJiri Olsa {
781cbd76f8dSJiri Olsa 	struct bpf_ksym *ksym = bpf_ksym_find(addr);
782cbd76f8dSJiri Olsa 
783cbd76f8dSJiri Olsa 	return ksym && ksym->prog ?
784cbd76f8dSJiri Olsa 	       container_of(ksym, struct bpf_prog_aux, ksym)->prog :
785cbd76f8dSJiri Olsa 	       NULL;
786cbd76f8dSJiri Olsa }
787cbd76f8dSJiri Olsa 
search_bpf_extables(unsigned long addr)7883dec541bSAlexei Starovoitov const struct exception_table_entry *search_bpf_extables(unsigned long addr)
7893dec541bSAlexei Starovoitov {
7903dec541bSAlexei Starovoitov 	const struct exception_table_entry *e = NULL;
7913dec541bSAlexei Starovoitov 	struct bpf_prog *prog;
7923dec541bSAlexei Starovoitov 
7933dec541bSAlexei Starovoitov 	rcu_read_lock();
794cbd76f8dSJiri Olsa 	prog = bpf_prog_ksym_find(addr);
7953dec541bSAlexei Starovoitov 	if (!prog)
7963dec541bSAlexei Starovoitov 		goto out;
7973dec541bSAlexei Starovoitov 	if (!prog->aux->num_exentries)
7983dec541bSAlexei Starovoitov 		goto out;
7993dec541bSAlexei Starovoitov 
8003dec541bSAlexei Starovoitov 	e = search_extable(prog->aux->extable, prog->aux->num_exentries, addr);
8013dec541bSAlexei Starovoitov out:
8023dec541bSAlexei Starovoitov 	rcu_read_unlock();
8033dec541bSAlexei Starovoitov 	return e;
8043dec541bSAlexei Starovoitov }
8053dec541bSAlexei Starovoitov 
bpf_get_kallsym(unsigned int symnum,unsigned long * value,char * type,char * sym)80674451e66SDaniel Borkmann int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
80774451e66SDaniel Borkmann 		    char *sym)
80874451e66SDaniel Borkmann {
809ecb60d1cSJiri Olsa 	struct bpf_ksym *ksym;
81074451e66SDaniel Borkmann 	unsigned int it = 0;
81174451e66SDaniel Borkmann 	int ret = -ERANGE;
81274451e66SDaniel Borkmann 
81374451e66SDaniel Borkmann 	if (!bpf_jit_kallsyms_enabled())
81474451e66SDaniel Borkmann 		return ret;
81574451e66SDaniel Borkmann 
81674451e66SDaniel Borkmann 	rcu_read_lock();
817ecb60d1cSJiri Olsa 	list_for_each_entry_rcu(ksym, &bpf_kallsyms, lnode) {
81874451e66SDaniel Borkmann 		if (it++ != symnum)
81974451e66SDaniel Borkmann 			continue;
82074451e66SDaniel Borkmann 
8212e114248SJustin Stitt 		strscpy(sym, ksym->name, KSYM_NAME_LEN);
82274451e66SDaniel Borkmann 
823ecb60d1cSJiri Olsa 		*value = ksym->start;
82474451e66SDaniel Borkmann 		*type  = BPF_SYM_ELF_TYPE;
82574451e66SDaniel Borkmann 
82674451e66SDaniel Borkmann 		ret = 0;
82774451e66SDaniel Borkmann 		break;
82874451e66SDaniel Borkmann 	}
82974451e66SDaniel Borkmann 	rcu_read_unlock();
83074451e66SDaniel Borkmann 
83174451e66SDaniel Borkmann 	return ret;
83274451e66SDaniel Borkmann }
83374451e66SDaniel Borkmann 
bpf_jit_add_poke_descriptor(struct bpf_prog * prog,struct bpf_jit_poke_descriptor * poke)834a66886feSDaniel Borkmann int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
835a66886feSDaniel Borkmann 				struct bpf_jit_poke_descriptor *poke)
836a66886feSDaniel Borkmann {
837a66886feSDaniel Borkmann 	struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
838a66886feSDaniel Borkmann 	static const u32 poke_tab_max = 1024;
839a66886feSDaniel Borkmann 	u32 slot = prog->aux->size_poke_tab;
840a66886feSDaniel Borkmann 	u32 size = slot + 1;
841a66886feSDaniel Borkmann 
842a66886feSDaniel Borkmann 	if (size > poke_tab_max)
843a66886feSDaniel Borkmann 		return -ENOSPC;
844cf71b174SMaciej Fijalkowski 	if (poke->tailcall_target || poke->tailcall_target_stable ||
845ebf7d1f5SMaciej Fijalkowski 	    poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
846a66886feSDaniel Borkmann 		return -EINVAL;
847a66886feSDaniel Borkmann 
848a66886feSDaniel Borkmann 	switch (poke->reason) {
849a66886feSDaniel Borkmann 	case BPF_POKE_REASON_TAIL_CALL:
850a66886feSDaniel Borkmann 		if (!poke->tail_call.map)
851a66886feSDaniel Borkmann 			return -EINVAL;
852a66886feSDaniel Borkmann 		break;
853a66886feSDaniel Borkmann 	default:
854a66886feSDaniel Borkmann 		return -EINVAL;
855a66886feSDaniel Borkmann 	}
856a66886feSDaniel Borkmann 
857a3034872SAndy Shevchenko 	tab = krealloc_array(tab, size, sizeof(*poke), GFP_KERNEL);
858a66886feSDaniel Borkmann 	if (!tab)
859a66886feSDaniel Borkmann 		return -ENOMEM;
860a66886feSDaniel Borkmann 
861a66886feSDaniel Borkmann 	memcpy(&tab[slot], poke, sizeof(*poke));
862a66886feSDaniel Borkmann 	prog->aux->size_poke_tab = size;
863a66886feSDaniel Borkmann 	prog->aux->poke_tab = tab;
864a66886feSDaniel Borkmann 
865a66886feSDaniel Borkmann 	return slot;
866a66886feSDaniel Borkmann }
867a66886feSDaniel Borkmann 
86857631054SSong Liu /*
86957631054SSong Liu  * BPF program pack allocator.
87057631054SSong Liu  *
87157631054SSong Liu  * Most BPF programs are pretty small. Allocating a hole page for each
87257631054SSong Liu  * program is sometime a waste. Many small bpf program also adds pressure
87357631054SSong Liu  * to instruction TLB. To solve this issue, we introduce a BPF program pack
87457631054SSong Liu  * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
87557631054SSong Liu  * to host BPF programs.
87657631054SSong Liu  */
87757631054SSong Liu #define BPF_PROG_CHUNK_SHIFT	6
87857631054SSong Liu #define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
87957631054SSong Liu #define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
88057631054SSong Liu 
88157631054SSong Liu struct bpf_prog_pack {
88257631054SSong Liu 	struct list_head list;
88357631054SSong Liu 	void *ptr;
8844cc0991aSSong Liu 	unsigned long bitmap[];
88557631054SSong Liu };
88657631054SSong Liu 
bpf_jit_fill_hole_with_zero(void * area,unsigned int size)88719c02415SSong Liu void bpf_jit_fill_hole_with_zero(void *area, unsigned int size)
88819c02415SSong Liu {
88919c02415SSong Liu 	memset(area, 0, size);
89019c02415SSong Liu }
89119c02415SSong Liu 
89257631054SSong Liu #define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)
89357631054SSong Liu 
89457631054SSong Liu static DEFINE_MUTEX(pack_mutex);
89557631054SSong Liu static LIST_HEAD(pack_list);
89657631054SSong Liu 
897e5810941SSong Liu /* PMD_SIZE is not available in some special config, e.g. ARCH=arm with
898e5810941SSong Liu  * CONFIG_MMU=n. Use PAGE_SIZE in these cases.
899e5810941SSong Liu  */
900e5810941SSong Liu #ifdef PMD_SIZE
901d6170e4aSPuranjay Mohan /* PMD_SIZE is really big for some archs. It doesn't make sense to
902d6170e4aSPuranjay Mohan  * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
903d6170e4aSPuranjay Mohan  * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
904d6170e4aSPuranjay Mohan  * greater than or equal to 2MB.
905d6170e4aSPuranjay Mohan  */
906d6170e4aSPuranjay Mohan #define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
907e5810941SSong Liu #else
908ea2babacSSong Liu #define BPF_PROG_PACK_SIZE PAGE_SIZE
909e5810941SSong Liu #endif
910e5810941SSong Liu 
911ea2babacSSong Liu #define BPF_PROG_CHUNK_COUNT (BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)
912ef078600SSong Liu 
alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)913d88bb5eeSSong Liu static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
91457631054SSong Liu {
91557631054SSong Liu 	struct bpf_prog_pack *pack;
916c733239fSChristophe Leroy 	int err;
91757631054SSong Liu 
918ea2babacSSong Liu 	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)),
919ef078600SSong Liu 		       GFP_KERNEL);
92057631054SSong Liu 	if (!pack)
92157631054SSong Liu 		return NULL;
92220e490adSPuranjay Mohan 	pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE);
923c733239fSChristophe Leroy 	if (!pack->ptr)
924c733239fSChristophe Leroy 		goto out;
925ea2babacSSong Liu 	bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE);
926ea2babacSSong Liu 	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
92757631054SSong Liu 
92857631054SSong Liu 	set_vm_flush_reset_perms(pack->ptr);
929c733239fSChristophe Leroy 	err = set_memory_rox((unsigned long)pack->ptr,
930c733239fSChristophe Leroy 			     BPF_PROG_PACK_SIZE / PAGE_SIZE);
931c733239fSChristophe Leroy 	if (err)
932c733239fSChristophe Leroy 		goto out;
933c733239fSChristophe Leroy 	list_add_tail(&pack->list, &pack_list);
93457631054SSong Liu 	return pack;
935c733239fSChristophe Leroy 
936c733239fSChristophe Leroy out:
937c733239fSChristophe Leroy 	bpf_jit_free_exec(pack->ptr);
938c733239fSChristophe Leroy 	kfree(pack);
939c733239fSChristophe Leroy 	return NULL;
94057631054SSong Liu }
94157631054SSong Liu 
bpf_prog_pack_alloc(u32 size,bpf_jit_fill_hole_t bpf_fill_ill_insns)94219c02415SSong Liu void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
94357631054SSong Liu {
94457631054SSong Liu 	unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
94557631054SSong Liu 	struct bpf_prog_pack *pack;
94657631054SSong Liu 	unsigned long pos;
94757631054SSong Liu 	void *ptr = NULL;
94857631054SSong Liu 
949ef078600SSong Liu 	mutex_lock(&pack_mutex);
950ea2babacSSong Liu 	if (size > BPF_PROG_PACK_SIZE) {
95157631054SSong Liu 		size = round_up(size, PAGE_SIZE);
95220e490adSPuranjay Mohan 		ptr = bpf_jit_alloc_exec(size);
95357631054SSong Liu 		if (ptr) {
954c733239fSChristophe Leroy 			int err;
955c733239fSChristophe Leroy 
956d88bb5eeSSong Liu 			bpf_fill_ill_insns(ptr, size);
95757631054SSong Liu 			set_vm_flush_reset_perms(ptr);
958c733239fSChristophe Leroy 			err = set_memory_rox((unsigned long)ptr,
959c733239fSChristophe Leroy 					     size / PAGE_SIZE);
960c733239fSChristophe Leroy 			if (err) {
961c733239fSChristophe Leroy 				bpf_jit_free_exec(ptr);
962c733239fSChristophe Leroy 				ptr = NULL;
963c733239fSChristophe Leroy 			}
96457631054SSong Liu 		}
965ef078600SSong Liu 		goto out;
96657631054SSong Liu 	}
96757631054SSong Liu 	list_for_each_entry(pack, &pack_list, list) {
968ea2babacSSong Liu 		pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
96957631054SSong Liu 						 nbits, 0);
970ea2babacSSong Liu 		if (pos < BPF_PROG_CHUNK_COUNT)
97157631054SSong Liu 			goto found_free_area;
97257631054SSong Liu 	}
97357631054SSong Liu 
974d88bb5eeSSong Liu 	pack = alloc_new_pack(bpf_fill_ill_insns);
97557631054SSong Liu 	if (!pack)
97657631054SSong Liu 		goto out;
97757631054SSong Liu 
97857631054SSong Liu 	pos = 0;
97957631054SSong Liu 
98057631054SSong Liu found_free_area:
98157631054SSong Liu 	bitmap_set(pack->bitmap, pos, nbits);
98257631054SSong Liu 	ptr = (void *)(pack->ptr) + (pos << BPF_PROG_CHUNK_SHIFT);
98357631054SSong Liu 
98457631054SSong Liu out:
98557631054SSong Liu 	mutex_unlock(&pack_mutex);
98657631054SSong Liu 	return ptr;
98757631054SSong Liu }
98857631054SSong Liu 
bpf_prog_pack_free(void * ptr,u32 size)989f08a1c65SSong Liu void bpf_prog_pack_free(void *ptr, u32 size)
99057631054SSong Liu {
99157631054SSong Liu 	struct bpf_prog_pack *pack = NULL, *tmp;
99257631054SSong Liu 	unsigned int nbits;
99357631054SSong Liu 	unsigned long pos;
99457631054SSong Liu 
995ef078600SSong Liu 	mutex_lock(&pack_mutex);
996f08a1c65SSong Liu 	if (size > BPF_PROG_PACK_SIZE) {
997f08a1c65SSong Liu 		bpf_jit_free_exec(ptr);
998ef078600SSong Liu 		goto out;
99957631054SSong Liu 	}
100057631054SSong Liu 
100157631054SSong Liu 	list_for_each_entry(tmp, &pack_list, list) {
1002f08a1c65SSong Liu 		if (ptr >= tmp->ptr && (tmp->ptr + BPF_PROG_PACK_SIZE) > ptr) {
100357631054SSong Liu 			pack = tmp;
100457631054SSong Liu 			break;
100557631054SSong Liu 		}
100657631054SSong Liu 	}
100757631054SSong Liu 
100857631054SSong Liu 	if (WARN_ONCE(!pack, "bpf_prog_pack bug\n"))
100957631054SSong Liu 		goto out;
101057631054SSong Liu 
1011f08a1c65SSong Liu 	nbits = BPF_PROG_SIZE_TO_NBITS(size);
1012f08a1c65SSong Liu 	pos = ((unsigned long)ptr - (unsigned long)pack->ptr) >> BPF_PROG_CHUNK_SHIFT;
101357631054SSong Liu 
1014f08a1c65SSong Liu 	WARN_ONCE(bpf_arch_text_invalidate(ptr, size),
1015fe736565SSong Liu 		  "bpf_prog_pack bug: missing bpf_arch_text_invalidate?\n");
1016fe736565SSong Liu 
101757631054SSong Liu 	bitmap_clear(pack->bitmap, pos, nbits);
1018ea2babacSSong Liu 	if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
1019ea2babacSSong Liu 				       BPF_PROG_CHUNK_COUNT, 0) == 0) {
102057631054SSong Liu 		list_del(&pack->list);
102120e490adSPuranjay Mohan 		bpf_jit_free_exec(pack->ptr);
102257631054SSong Liu 		kfree(pack);
102357631054SSong Liu 	}
102457631054SSong Liu out:
102557631054SSong Liu 	mutex_unlock(&pack_mutex);
102657631054SSong Liu }
102757631054SSong Liu 
1028ede95a63SDaniel Borkmann static atomic_long_t bpf_jit_current;
1029ede95a63SDaniel Borkmann 
1030fdadd049SDaniel Borkmann /* Can be overridden by an arch's JIT compiler if it has a custom,
1031fdadd049SDaniel Borkmann  * dedicated BPF backend memory area, or if neither of the two
1032fdadd049SDaniel Borkmann  * below apply.
1033fdadd049SDaniel Borkmann  */
bpf_jit_alloc_exec_limit(void)1034fdadd049SDaniel Borkmann u64 __weak bpf_jit_alloc_exec_limit(void)
1035fdadd049SDaniel Borkmann {
1036ede95a63SDaniel Borkmann #if defined(MODULES_VADDR)
1037fdadd049SDaniel Borkmann 	return MODULES_END - MODULES_VADDR;
1038fdadd049SDaniel Borkmann #else
1039fdadd049SDaniel Borkmann 	return VMALLOC_END - VMALLOC_START;
1040fdadd049SDaniel Borkmann #endif
1041fdadd049SDaniel Borkmann }
1042fdadd049SDaniel Borkmann 
bpf_jit_charge_init(void)1043ede95a63SDaniel Borkmann static int __init bpf_jit_charge_init(void)
1044ede95a63SDaniel Borkmann {
1045ede95a63SDaniel Borkmann 	/* Only used as heuristic here to derive limit. */
1046fadb7ff1SLorenz Bauer 	bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
104710ec8ca8SDaniel Borkmann 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
1048fdadd049SDaniel Borkmann 					    PAGE_SIZE), LONG_MAX);
1049ede95a63SDaniel Borkmann 	return 0;
1050ede95a63SDaniel Borkmann }
1051ede95a63SDaniel Borkmann pure_initcall(bpf_jit_charge_init);
1052ede95a63SDaniel Borkmann 
bpf_jit_charge_modmem(u32 size)10533486beddSSong Liu int bpf_jit_charge_modmem(u32 size)
1054ede95a63SDaniel Borkmann {
10550947ae11SKuniyuki Iwashima 	if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
10568a98ae12SLorenz Bauer 		if (!bpf_capable()) {
10573486beddSSong Liu 			atomic_long_sub(size, &bpf_jit_current);
1058ede95a63SDaniel Borkmann 			return -EPERM;
1059ede95a63SDaniel Borkmann 		}
1060ede95a63SDaniel Borkmann 	}
1061ede95a63SDaniel Borkmann 
1062ede95a63SDaniel Borkmann 	return 0;
1063ede95a63SDaniel Borkmann }
1064ede95a63SDaniel Borkmann 
bpf_jit_uncharge_modmem(u32 size)10653486beddSSong Liu void bpf_jit_uncharge_modmem(u32 size)
1066ede95a63SDaniel Borkmann {
10673486beddSSong Liu 	atomic_long_sub(size, &bpf_jit_current);
1068ede95a63SDaniel Borkmann }
1069ede95a63SDaniel Borkmann 
bpf_jit_alloc_exec(unsigned long size)1070dc002bb6SArd Biesheuvel void *__weak bpf_jit_alloc_exec(unsigned long size)
1071dc002bb6SArd Biesheuvel {
107212af2b83SMike Rapoport (IBM) 	return execmem_alloc(EXECMEM_BPF, size);
1073dc002bb6SArd Biesheuvel }
1074dc002bb6SArd Biesheuvel 
bpf_jit_free_exec(void * addr)1075dc002bb6SArd Biesheuvel void __weak bpf_jit_free_exec(void *addr)
1076dc002bb6SArd Biesheuvel {
107712af2b83SMike Rapoport (IBM) 	execmem_free(addr);
1078dc002bb6SArd Biesheuvel }
1079dc002bb6SArd Biesheuvel 
1080738cbe72SDaniel Borkmann struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,bpf_jit_fill_hole_t bpf_fill_ill_insns)1081738cbe72SDaniel Borkmann bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
1082738cbe72SDaniel Borkmann 		     unsigned int alignment,
1083738cbe72SDaniel Borkmann 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
1084738cbe72SDaniel Borkmann {
1085738cbe72SDaniel Borkmann 	struct bpf_binary_header *hdr;
1086ed2d9e1aSSong Liu 	u32 size, hole, start;
1087738cbe72SDaniel Borkmann 
1088b7b3fc8dSIlya Leoshkevich 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
1089b7b3fc8dSIlya Leoshkevich 		     alignment > BPF_IMAGE_ALIGNMENT);
1090b7b3fc8dSIlya Leoshkevich 
1091738cbe72SDaniel Borkmann 	/* Most of BPF filters are really small, but if some of them
1092738cbe72SDaniel Borkmann 	 * fill a page, allow at least 128 extra bytes to insert a
1093738cbe72SDaniel Borkmann 	 * random section of illegal instructions.
1094738cbe72SDaniel Borkmann 	 */
1095738cbe72SDaniel Borkmann 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
1096ede95a63SDaniel Borkmann 
10973486beddSSong Liu 	if (bpf_jit_charge_modmem(size))
1098738cbe72SDaniel Borkmann 		return NULL;
1099dc002bb6SArd Biesheuvel 	hdr = bpf_jit_alloc_exec(size);
1100ede95a63SDaniel Borkmann 	if (!hdr) {
11013486beddSSong Liu 		bpf_jit_uncharge_modmem(size);
1102ede95a63SDaniel Borkmann 		return NULL;
1103ede95a63SDaniel Borkmann 	}
1104738cbe72SDaniel Borkmann 
1105738cbe72SDaniel Borkmann 	/* Fill space with illegal/arch-dep instructions. */
1106738cbe72SDaniel Borkmann 	bpf_fill_ill_insns(hdr, size);
1107738cbe72SDaniel Borkmann 
1108ed2d9e1aSSong Liu 	hdr->size = size;
1109738cbe72SDaniel Borkmann 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
1110738cbe72SDaniel Borkmann 		     PAGE_SIZE - sizeof(*hdr));
11118032bf12SJason A. Donenfeld 	start = get_random_u32_below(hole) & ~(alignment - 1);
1112738cbe72SDaniel Borkmann 
1113738cbe72SDaniel Borkmann 	/* Leave a random number of instructions before BPF code. */
1114738cbe72SDaniel Borkmann 	*image_ptr = &hdr->image[start];
1115738cbe72SDaniel Borkmann 
1116738cbe72SDaniel Borkmann 	return hdr;
1117738cbe72SDaniel Borkmann }
1118738cbe72SDaniel Borkmann 
bpf_jit_binary_free(struct bpf_binary_header * hdr)1119738cbe72SDaniel Borkmann void bpf_jit_binary_free(struct bpf_binary_header *hdr)
1120738cbe72SDaniel Borkmann {
1121ed2d9e1aSSong Liu 	u32 size = hdr->size;
1122ede95a63SDaniel Borkmann 
1123dc002bb6SArd Biesheuvel 	bpf_jit_free_exec(hdr);
1124ed2d9e1aSSong Liu 	bpf_jit_uncharge_modmem(size);
1125738cbe72SDaniel Borkmann }
11264f3446bbSDaniel Borkmann 
112733c98058SSong Liu /* Allocate jit binary from bpf_prog_pack allocator.
112833c98058SSong Liu  * Since the allocated memory is RO+X, the JIT engine cannot write directly
112933c98058SSong Liu  * to the memory. To solve this problem, a RW buffer is also allocated at
113033c98058SSong Liu  * as the same time. The JIT engine should calculate offsets based on the
113133c98058SSong Liu  * RO memory address, but write JITed program to the RW buffer. Once the
113233c98058SSong Liu  * JIT engine finishes, it calls bpf_jit_binary_pack_finalize, which copies
113333c98058SSong Liu  * the JITed program to the RO memory.
113433c98058SSong Liu  */
113533c98058SSong Liu struct bpf_binary_header *
bpf_jit_binary_pack_alloc(unsigned int proglen,u8 ** image_ptr,unsigned int alignment,struct bpf_binary_header ** rw_header,u8 ** rw_image,bpf_jit_fill_hole_t bpf_fill_ill_insns)113633c98058SSong Liu bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
113733c98058SSong Liu 			  unsigned int alignment,
113833c98058SSong Liu 			  struct bpf_binary_header **rw_header,
113933c98058SSong Liu 			  u8 **rw_image,
114033c98058SSong Liu 			  bpf_jit_fill_hole_t bpf_fill_ill_insns)
114133c98058SSong Liu {
114233c98058SSong Liu 	struct bpf_binary_header *ro_header;
114333c98058SSong Liu 	u32 size, hole, start;
114433c98058SSong Liu 
114533c98058SSong Liu 	WARN_ON_ONCE(!is_power_of_2(alignment) ||
114633c98058SSong Liu 		     alignment > BPF_IMAGE_ALIGNMENT);
114733c98058SSong Liu 
114833c98058SSong Liu 	/* add 16 bytes for a random section of illegal instructions */
114933c98058SSong Liu 	size = round_up(proglen + sizeof(*ro_header) + 16, BPF_PROG_CHUNK_SIZE);
115033c98058SSong Liu 
115133c98058SSong Liu 	if (bpf_jit_charge_modmem(size))
115233c98058SSong Liu 		return NULL;
1153d88bb5eeSSong Liu 	ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
115433c98058SSong Liu 	if (!ro_header) {
115533c98058SSong Liu 		bpf_jit_uncharge_modmem(size);
115633c98058SSong Liu 		return NULL;
115733c98058SSong Liu 	}
115833c98058SSong Liu 
115933c98058SSong Liu 	*rw_header = kvmalloc(size, GFP_KERNEL);
116033c98058SSong Liu 	if (!*rw_header) {
1161f08a1c65SSong Liu 		bpf_prog_pack_free(ro_header, size);
116233c98058SSong Liu 		bpf_jit_uncharge_modmem(size);
116333c98058SSong Liu 		return NULL;
116433c98058SSong Liu 	}
116533c98058SSong Liu 
116633c98058SSong Liu 	/* Fill space with illegal/arch-dep instructions. */
116733c98058SSong Liu 	bpf_fill_ill_insns(*rw_header, size);
116833c98058SSong Liu 	(*rw_header)->size = size;
116933c98058SSong Liu 
117033c98058SSong Liu 	hole = min_t(unsigned int, size - (proglen + sizeof(*ro_header)),
117133c98058SSong Liu 		     BPF_PROG_CHUNK_SIZE - sizeof(*ro_header));
11728032bf12SJason A. Donenfeld 	start = get_random_u32_below(hole) & ~(alignment - 1);
117333c98058SSong Liu 
117433c98058SSong Liu 	*image_ptr = &ro_header->image[start];
117533c98058SSong Liu 	*rw_image = &(*rw_header)->image[start];
117633c98058SSong Liu 
117733c98058SSong Liu 	return ro_header;
117833c98058SSong Liu }
117933c98058SSong Liu 
118033c98058SSong Liu /* Copy JITed text from rw_header to its final location, the ro_header. */
bpf_jit_binary_pack_finalize(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)11819919c5c9SRafael Passos int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header,
118233c98058SSong Liu 				 struct bpf_binary_header *rw_header)
118333c98058SSong Liu {
118433c98058SSong Liu 	void *ptr;
118533c98058SSong Liu 
118633c98058SSong Liu 	ptr = bpf_arch_text_copy(ro_header, rw_header, rw_header->size);
118733c98058SSong Liu 
118833c98058SSong Liu 	kvfree(rw_header);
118933c98058SSong Liu 
119033c98058SSong Liu 	if (IS_ERR(ptr)) {
1191f08a1c65SSong Liu 		bpf_prog_pack_free(ro_header, ro_header->size);
119233c98058SSong Liu 		return PTR_ERR(ptr);
119333c98058SSong Liu 	}
119433c98058SSong Liu 	return 0;
119533c98058SSong Liu }
119633c98058SSong Liu 
119733c98058SSong Liu /* bpf_jit_binary_pack_free is called in two different scenarios:
119833c98058SSong Liu  *   1) when the program is freed after;
119933c98058SSong Liu  *   2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
120033c98058SSong Liu  * For case 2), we need to free both the RO memory and the RW buffer.
1201676b2daaSSong Liu  *
1202676b2daaSSong Liu  * bpf_jit_binary_pack_free requires proper ro_header->size. However,
1203676b2daaSSong Liu  * bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
1204676b2daaSSong Liu  * must be set with either bpf_jit_binary_pack_finalize (normal path) or
1205676b2daaSSong Liu  * bpf_arch_text_copy (when jit fails).
120633c98058SSong Liu  */
bpf_jit_binary_pack_free(struct bpf_binary_header * ro_header,struct bpf_binary_header * rw_header)120733c98058SSong Liu void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
120833c98058SSong Liu 			      struct bpf_binary_header *rw_header)
120933c98058SSong Liu {
1210676b2daaSSong Liu 	u32 size = ro_header->size;
121133c98058SSong Liu 
1212f08a1c65SSong Liu 	bpf_prog_pack_free(ro_header, size);
121333c98058SSong Liu 	kvfree(rw_header);
121433c98058SSong Liu 	bpf_jit_uncharge_modmem(size);
121533c98058SSong Liu }
121633c98058SSong Liu 
12171d5f82d9SSong Liu struct bpf_binary_header *
bpf_jit_binary_pack_hdr(const struct bpf_prog * fp)12181d5f82d9SSong Liu bpf_jit_binary_pack_hdr(const struct bpf_prog *fp)
12191d5f82d9SSong Liu {
12201d5f82d9SSong Liu 	unsigned long real_start = (unsigned long)fp->bpf_func;
12211d5f82d9SSong Liu 	unsigned long addr;
12221d5f82d9SSong Liu 
12231d5f82d9SSong Liu 	addr = real_start & BPF_PROG_CHUNK_MASK;
12241d5f82d9SSong Liu 	return (void *)addr;
12251d5f82d9SSong Liu }
12261d5f82d9SSong Liu 
122733c98058SSong Liu static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog * fp)122833c98058SSong Liu bpf_jit_binary_hdr(const struct bpf_prog *fp)
122933c98058SSong Liu {
123033c98058SSong Liu 	unsigned long real_start = (unsigned long)fp->bpf_func;
123133c98058SSong Liu 	unsigned long addr;
123233c98058SSong Liu 
123333c98058SSong Liu 	addr = real_start & PAGE_MASK;
123433c98058SSong Liu 	return (void *)addr;
123533c98058SSong Liu }
123633c98058SSong Liu 
123774451e66SDaniel Borkmann /* This symbol is only overridden by archs that have different
123874451e66SDaniel Borkmann  * requirements than the usual eBPF JITs, f.e. when they only
123974451e66SDaniel Borkmann  * implement cBPF JIT, do not set images read-only, etc.
124074451e66SDaniel Borkmann  */
bpf_jit_free(struct bpf_prog * fp)124174451e66SDaniel Borkmann void __weak bpf_jit_free(struct bpf_prog *fp)
124274451e66SDaniel Borkmann {
124374451e66SDaniel Borkmann 	if (fp->jited) {
124474451e66SDaniel Borkmann 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
124574451e66SDaniel Borkmann 
124674451e66SDaniel Borkmann 		bpf_jit_binary_free(hdr);
124774451e66SDaniel Borkmann 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
124874451e66SDaniel Borkmann 	}
124974451e66SDaniel Borkmann 
125074451e66SDaniel Borkmann 	bpf_prog_unlock_free(fp);
125174451e66SDaniel Borkmann }
125274451e66SDaniel Borkmann 
bpf_jit_get_func_addr(const struct bpf_prog * prog,const struct bpf_insn * insn,bool extra_pass,u64 * func_addr,bool * func_addr_fixed)1253e2c95a61SDaniel Borkmann int bpf_jit_get_func_addr(const struct bpf_prog *prog,
1254e2c95a61SDaniel Borkmann 			  const struct bpf_insn *insn, bool extra_pass,
1255e2c95a61SDaniel Borkmann 			  u64 *func_addr, bool *func_addr_fixed)
1256e2c95a61SDaniel Borkmann {
1257e2c95a61SDaniel Borkmann 	s16 off = insn->off;
1258e2c95a61SDaniel Borkmann 	s32 imm = insn->imm;
1259e2c95a61SDaniel Borkmann 	u8 *addr;
12601cf3bfc6SIlya Leoshkevich 	int err;
1261e2c95a61SDaniel Borkmann 
1262e2c95a61SDaniel Borkmann 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
1263e2c95a61SDaniel Borkmann 	if (!*func_addr_fixed) {
1264e2c95a61SDaniel Borkmann 		/* Place-holder address till the last pass has collected
1265e2c95a61SDaniel Borkmann 		 * all addresses for JITed subprograms in which case we
1266e2c95a61SDaniel Borkmann 		 * can pick them up from prog->aux.
1267e2c95a61SDaniel Borkmann 		 */
1268e2c95a61SDaniel Borkmann 		if (!extra_pass)
1269e2c95a61SDaniel Borkmann 			addr = NULL;
1270e2c95a61SDaniel Borkmann 		else if (prog->aux->func &&
1271335d1c5bSKumar Kartikeya Dwivedi 			 off >= 0 && off < prog->aux->real_func_cnt)
1272e2c95a61SDaniel Borkmann 			addr = (u8 *)prog->aux->func[off]->bpf_func;
1273e2c95a61SDaniel Borkmann 		else
1274e2c95a61SDaniel Borkmann 			return -EINVAL;
12751cf3bfc6SIlya Leoshkevich 	} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
12761cf3bfc6SIlya Leoshkevich 		   bpf_jit_supports_far_kfunc_call()) {
12771cf3bfc6SIlya Leoshkevich 		err = bpf_get_kfunc_addr(prog, insn->imm, insn->off, &addr);
12781cf3bfc6SIlya Leoshkevich 		if (err)
12791cf3bfc6SIlya Leoshkevich 			return err;
1280e2c95a61SDaniel Borkmann 	} else {
1281e2c95a61SDaniel Borkmann 		/* Address of a BPF helper call. Since part of the core
1282e2c95a61SDaniel Borkmann 		 * kernel, it's always at a fixed location. __bpf_call_base
1283e2c95a61SDaniel Borkmann 		 * and the helper with imm relative to it are both in core
1284e2c95a61SDaniel Borkmann 		 * kernel.
1285e2c95a61SDaniel Borkmann 		 */
1286e2c95a61SDaniel Borkmann 		addr = (u8 *)__bpf_call_base + imm;
1287e2c95a61SDaniel Borkmann 	}
1288e2c95a61SDaniel Borkmann 
1289e2c95a61SDaniel Borkmann 	*func_addr = (unsigned long)addr;
1290e2c95a61SDaniel Borkmann 	return 0;
1291e2c95a61SDaniel Borkmann }
1292e2c95a61SDaniel Borkmann 
bpf_jit_blind_insn(const struct bpf_insn * from,const struct bpf_insn * aux,struct bpf_insn * to_buff,bool emit_zext)12934f3446bbSDaniel Borkmann static int bpf_jit_blind_insn(const struct bpf_insn *from,
12944f3446bbSDaniel Borkmann 			      const struct bpf_insn *aux,
1295ede7c460SNaveen N. Rao 			      struct bpf_insn *to_buff,
1296ede7c460SNaveen N. Rao 			      bool emit_zext)
12974f3446bbSDaniel Borkmann {
12984f3446bbSDaniel Borkmann 	struct bpf_insn *to = to_buff;
1299a251c17aSJason A. Donenfeld 	u32 imm_rnd = get_random_u32();
13004f3446bbSDaniel Borkmann 	s16 off;
13014f3446bbSDaniel Borkmann 
13024f3446bbSDaniel Borkmann 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
13034f3446bbSDaniel Borkmann 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
13044f3446bbSDaniel Borkmann 
13059b73bfddSDaniel Borkmann 	/* Constraints on AX register:
13069b73bfddSDaniel Borkmann 	 *
13079b73bfddSDaniel Borkmann 	 * AX register is inaccessible from user space. It is mapped in
13089b73bfddSDaniel Borkmann 	 * all JITs, and used here for constant blinding rewrites. It is
13099b73bfddSDaniel Borkmann 	 * typically "stateless" meaning its contents are only valid within
13109b73bfddSDaniel Borkmann 	 * the executed instruction, but not across several instructions.
13119b73bfddSDaniel Borkmann 	 * There are a few exceptions however which are further detailed
13129b73bfddSDaniel Borkmann 	 * below.
13139b73bfddSDaniel Borkmann 	 *
13149b73bfddSDaniel Borkmann 	 * Constant blinding is only used by JITs, not in the interpreter.
13159b73bfddSDaniel Borkmann 	 * The interpreter uses AX in some occasions as a local temporary
13169b73bfddSDaniel Borkmann 	 * register e.g. in DIV or MOD instructions.
13179b73bfddSDaniel Borkmann 	 *
13189b73bfddSDaniel Borkmann 	 * In restricted circumstances, the verifier can also use the AX
13199b73bfddSDaniel Borkmann 	 * register for rewrites as long as they do not interfere with
13209b73bfddSDaniel Borkmann 	 * the above cases!
13219b73bfddSDaniel Borkmann 	 */
13229b73bfddSDaniel Borkmann 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
13239b73bfddSDaniel Borkmann 		goto out;
13249b73bfddSDaniel Borkmann 
13254f3446bbSDaniel Borkmann 	if (from->imm == 0 &&
13264f3446bbSDaniel Borkmann 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
13274f3446bbSDaniel Borkmann 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
13284f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
13294f3446bbSDaniel Borkmann 		goto out;
13304f3446bbSDaniel Borkmann 	}
13314f3446bbSDaniel Borkmann 
13324f3446bbSDaniel Borkmann 	switch (from->code) {
13334f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_ADD | BPF_K:
13344f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_SUB | BPF_K:
13354f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_AND | BPF_K:
13364f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_OR  | BPF_K:
13374f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_XOR | BPF_K:
13384f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MUL | BPF_K:
13394f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MOV | BPF_K:
13404f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_DIV | BPF_K:
13414f3446bbSDaniel Borkmann 	case BPF_ALU | BPF_MOD | BPF_K:
13424f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13434f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13447058e3a3SYonghong Song 		*to++ = BPF_ALU32_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
13454f3446bbSDaniel Borkmann 		break;
13464f3446bbSDaniel Borkmann 
13474f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_ADD | BPF_K:
13484f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_SUB | BPF_K:
13494f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_AND | BPF_K:
13504f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_OR  | BPF_K:
13514f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_XOR | BPF_K:
13524f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MUL | BPF_K:
13534f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MOV | BPF_K:
13544f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_DIV | BPF_K:
13554f3446bbSDaniel Borkmann 	case BPF_ALU64 | BPF_MOD | BPF_K:
13564f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13574f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13587058e3a3SYonghong Song 		*to++ = BPF_ALU64_REG_OFF(from->code, from->dst_reg, BPF_REG_AX, from->off);
13594f3446bbSDaniel Borkmann 		break;
13604f3446bbSDaniel Borkmann 
13614f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JEQ  | BPF_K:
13624f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JNE  | BPF_K:
13634f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JGT  | BPF_K:
136492b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JLT  | BPF_K:
13654f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JGE  | BPF_K:
136692b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JLE  | BPF_K:
13674f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSGT | BPF_K:
136892b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JSLT | BPF_K:
13694f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSGE | BPF_K:
137092b31a9aSDaniel Borkmann 	case BPF_JMP | BPF_JSLE | BPF_K:
13714f3446bbSDaniel Borkmann 	case BPF_JMP | BPF_JSET | BPF_K:
13724f3446bbSDaniel Borkmann 		/* Accommodate for extra offset in case of a backjump. */
13734f3446bbSDaniel Borkmann 		off = from->off;
13744f3446bbSDaniel Borkmann 		if (off < 0)
13754f3446bbSDaniel Borkmann 			off -= 2;
13764f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
13774f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
13784f3446bbSDaniel Borkmann 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
13794f3446bbSDaniel Borkmann 		break;
13804f3446bbSDaniel Borkmann 
1381a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
1382a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JNE  | BPF_K:
1383a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JGT  | BPF_K:
1384a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JLT  | BPF_K:
1385a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JGE  | BPF_K:
1386a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JLE  | BPF_K:
1387a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1388a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1389a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1390a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1391a7b76c88SJiong Wang 	case BPF_JMP32 | BPF_JSET | BPF_K:
1392a7b76c88SJiong Wang 		/* Accommodate for extra offset in case of a backjump. */
1393a7b76c88SJiong Wang 		off = from->off;
1394a7b76c88SJiong Wang 		if (off < 0)
1395a7b76c88SJiong Wang 			off -= 2;
1396a7b76c88SJiong Wang 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
1397a7b76c88SJiong Wang 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1398a7b76c88SJiong Wang 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
1399a7b76c88SJiong Wang 				      off);
1400a7b76c88SJiong Wang 		break;
1401a7b76c88SJiong Wang 
14024f3446bbSDaniel Borkmann 	case BPF_LD | BPF_IMM | BPF_DW:
14034f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
14044f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
14054f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
14064f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
14074f3446bbSDaniel Borkmann 		break;
14084f3446bbSDaniel Borkmann 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
14094f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
14104f3446bbSDaniel Borkmann 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
1411ede7c460SNaveen N. Rao 		if (emit_zext)
1412ede7c460SNaveen N. Rao 			*to++ = BPF_ZEXT_REG(BPF_REG_AX);
14134f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
14144f3446bbSDaniel Borkmann 		break;
14154f3446bbSDaniel Borkmann 
14164f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_DW:
14174f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_W:
14184f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_H:
14194f3446bbSDaniel Borkmann 	case BPF_ST | BPF_MEM | BPF_B:
14204f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
14214f3446bbSDaniel Borkmann 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
14224f3446bbSDaniel Borkmann 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
14234f3446bbSDaniel Borkmann 		break;
14244f3446bbSDaniel Borkmann 	}
14254f3446bbSDaniel Borkmann out:
14264f3446bbSDaniel Borkmann 	return to - to_buff;
14274f3446bbSDaniel Borkmann }
14284f3446bbSDaniel Borkmann 
bpf_prog_clone_create(struct bpf_prog * fp_other,gfp_t gfp_extra_flags)14294f3446bbSDaniel Borkmann static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
14304f3446bbSDaniel Borkmann 					      gfp_t gfp_extra_flags)
14314f3446bbSDaniel Borkmann {
143219809c2dSMichal Hocko 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
14334f3446bbSDaniel Borkmann 	struct bpf_prog *fp;
14344f3446bbSDaniel Borkmann 
143588dca4caSChristoph Hellwig 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
14364f3446bbSDaniel Borkmann 	if (fp != NULL) {
14374f3446bbSDaniel Borkmann 		/* aux->prog still points to the fp_other one, so
14384f3446bbSDaniel Borkmann 		 * when promoting the clone to the real program,
14394f3446bbSDaniel Borkmann 		 * this still needs to be adapted.
14404f3446bbSDaniel Borkmann 		 */
14414f3446bbSDaniel Borkmann 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
14424f3446bbSDaniel Borkmann 	}
14434f3446bbSDaniel Borkmann 
14444f3446bbSDaniel Borkmann 	return fp;
14454f3446bbSDaniel Borkmann }
14464f3446bbSDaniel Borkmann 
bpf_prog_clone_free(struct bpf_prog * fp)14474f3446bbSDaniel Borkmann static void bpf_prog_clone_free(struct bpf_prog *fp)
14484f3446bbSDaniel Borkmann {
14494f3446bbSDaniel Borkmann 	/* aux was stolen by the other clone, so we cannot free
14504f3446bbSDaniel Borkmann 	 * it from this path! It will be freed eventually by the
14514f3446bbSDaniel Borkmann 	 * other program on release.
14524f3446bbSDaniel Borkmann 	 *
14534f3446bbSDaniel Borkmann 	 * At this point, we don't need a deferred release since
14544f3446bbSDaniel Borkmann 	 * clone is guaranteed to not be locked.
14554f3446bbSDaniel Borkmann 	 */
14564f3446bbSDaniel Borkmann 	fp->aux = NULL;
145753f523f3SCong Wang 	fp->stats = NULL;
145853f523f3SCong Wang 	fp->active = NULL;
14594f3446bbSDaniel Borkmann 	__bpf_prog_free(fp);
14604f3446bbSDaniel Borkmann }
14614f3446bbSDaniel Borkmann 
bpf_jit_prog_release_other(struct bpf_prog * fp,struct bpf_prog * fp_other)14624f3446bbSDaniel Borkmann void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
14634f3446bbSDaniel Borkmann {
14644f3446bbSDaniel Borkmann 	/* We have to repoint aux->prog to self, as we don't
14654f3446bbSDaniel Borkmann 	 * know whether fp here is the clone or the original.
14664f3446bbSDaniel Borkmann 	 */
14674f3446bbSDaniel Borkmann 	fp->aux->prog = fp;
14684f3446bbSDaniel Borkmann 	bpf_prog_clone_free(fp_other);
14694f3446bbSDaniel Borkmann }
14704f3446bbSDaniel Borkmann 
bpf_jit_blind_constants(struct bpf_prog * prog)14714f3446bbSDaniel Borkmann struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
14724f3446bbSDaniel Borkmann {
14734f3446bbSDaniel Borkmann 	struct bpf_insn insn_buff[16], aux[2];
14744f3446bbSDaniel Borkmann 	struct bpf_prog *clone, *tmp;
14754f3446bbSDaniel Borkmann 	int insn_delta, insn_cnt;
14764f3446bbSDaniel Borkmann 	struct bpf_insn *insn;
14774f3446bbSDaniel Borkmann 	int i, rewritten;
14784f3446bbSDaniel Borkmann 
1479d2a3b7c5SHou Tao 	if (!prog->blinding_requested || prog->blinded)
14804f3446bbSDaniel Borkmann 		return prog;
14814f3446bbSDaniel Borkmann 
14824f3446bbSDaniel Borkmann 	clone = bpf_prog_clone_create(prog, GFP_USER);
14834f3446bbSDaniel Borkmann 	if (!clone)
14844f3446bbSDaniel Borkmann 		return ERR_PTR(-ENOMEM);
14854f3446bbSDaniel Borkmann 
14864f3446bbSDaniel Borkmann 	insn_cnt = clone->len;
14874f3446bbSDaniel Borkmann 	insn = clone->insnsi;
14884f3446bbSDaniel Borkmann 
14894f3446bbSDaniel Borkmann 	for (i = 0; i < insn_cnt; i++, insn++) {
14904b6313cfSAlexei Starovoitov 		if (bpf_pseudo_func(insn)) {
14914b6313cfSAlexei Starovoitov 			/* ld_imm64 with an address of bpf subprog is not
14924b6313cfSAlexei Starovoitov 			 * a user controlled constant. Don't randomize it,
14934b6313cfSAlexei Starovoitov 			 * since it will conflict with jit_subprogs() logic.
14944b6313cfSAlexei Starovoitov 			 */
14954b6313cfSAlexei Starovoitov 			insn++;
14964b6313cfSAlexei Starovoitov 			i++;
14974b6313cfSAlexei Starovoitov 			continue;
14984b6313cfSAlexei Starovoitov 		}
14994b6313cfSAlexei Starovoitov 
15004f3446bbSDaniel Borkmann 		/* We temporarily need to hold the original ld64 insn
15014f3446bbSDaniel Borkmann 		 * so that we can still access the first part in the
15024f3446bbSDaniel Borkmann 		 * second blinding run.
15034f3446bbSDaniel Borkmann 		 */
15044f3446bbSDaniel Borkmann 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
15054f3446bbSDaniel Borkmann 		    insn[1].code == 0)
15064f3446bbSDaniel Borkmann 			memcpy(aux, insn, sizeof(aux));
15074f3446bbSDaniel Borkmann 
1508ede7c460SNaveen N. Rao 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff,
1509ede7c460SNaveen N. Rao 						clone->aux->verifier_zext);
15104f3446bbSDaniel Borkmann 		if (!rewritten)
15114f3446bbSDaniel Borkmann 			continue;
15124f3446bbSDaniel Borkmann 
15134f3446bbSDaniel Borkmann 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
15144f73379eSAlexei Starovoitov 		if (IS_ERR(tmp)) {
15154f3446bbSDaniel Borkmann 			/* Patching may have repointed aux->prog during
15164f3446bbSDaniel Borkmann 			 * realloc from the original one, so we need to
15174f3446bbSDaniel Borkmann 			 * fix it up here on error.
15184f3446bbSDaniel Borkmann 			 */
15194f3446bbSDaniel Borkmann 			bpf_jit_prog_release_other(prog, clone);
15204f73379eSAlexei Starovoitov 			return tmp;
15214f3446bbSDaniel Borkmann 		}
15224f3446bbSDaniel Borkmann 
15234f3446bbSDaniel Borkmann 		clone = tmp;
15244f3446bbSDaniel Borkmann 		insn_delta = rewritten - 1;
15254f3446bbSDaniel Borkmann 
15264f3446bbSDaniel Borkmann 		/* Walk new program and skip insns we just inserted. */
15274f3446bbSDaniel Borkmann 		insn = clone->insnsi + i + insn_delta;
15284f3446bbSDaniel Borkmann 		insn_cnt += insn_delta;
15294f3446bbSDaniel Borkmann 		i        += insn_delta;
15304f3446bbSDaniel Borkmann 	}
15314f3446bbSDaniel Borkmann 
15321c2a088aSAlexei Starovoitov 	clone->blinded = 1;
15334f3446bbSDaniel Borkmann 	return clone;
15344f3446bbSDaniel Borkmann }
1535b954d834SDaniel Borkmann #endif /* CONFIG_BPF_JIT */
1536738cbe72SDaniel Borkmann 
1537f5bffecdSAlexei Starovoitov /* Base function for offset calculation. Needs to go into .text section,
1538f5bffecdSAlexei Starovoitov  * therefore keeping it non-static as well; will also be used by JITs
15397105e828SDaniel Borkmann  * anyway later on, so do not let the compiler omit it. This also needs
15407105e828SDaniel Borkmann  * to go into kallsyms for correlation from e.g. bpftool, so naming
15417105e828SDaniel Borkmann  * must not change.
1542f5bffecdSAlexei Starovoitov  */
__bpf_call_base(u64 r1,u64 r2,u64 r3,u64 r4,u64 r5)1543f5bffecdSAlexei Starovoitov noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1544f5bffecdSAlexei Starovoitov {
1545f5bffecdSAlexei Starovoitov 	return 0;
1546f5bffecdSAlexei Starovoitov }
15474d9c5c53SAlexei Starovoitov EXPORT_SYMBOL_GPL(__bpf_call_base);
1548f5bffecdSAlexei Starovoitov 
15495e581dadSDaniel Borkmann /* All UAPI available opcodes. */
15505e581dadSDaniel Borkmann #define BPF_INSN_MAP(INSN_2, INSN_3)		\
15515e581dadSDaniel Borkmann 	/* 32 bit ALU operations. */		\
15525e581dadSDaniel Borkmann 	/*   Register based. */			\
15535e581dadSDaniel Borkmann 	INSN_3(ALU, ADD,  X),			\
15545e581dadSDaniel Borkmann 	INSN_3(ALU, SUB,  X),			\
15555e581dadSDaniel Borkmann 	INSN_3(ALU, AND,  X),			\
15565e581dadSDaniel Borkmann 	INSN_3(ALU, OR,   X),			\
15575e581dadSDaniel Borkmann 	INSN_3(ALU, LSH,  X),			\
15585e581dadSDaniel Borkmann 	INSN_3(ALU, RSH,  X),			\
15595e581dadSDaniel Borkmann 	INSN_3(ALU, XOR,  X),			\
15605e581dadSDaniel Borkmann 	INSN_3(ALU, MUL,  X),			\
15615e581dadSDaniel Borkmann 	INSN_3(ALU, MOV,  X),			\
15622dc6b100SJiong Wang 	INSN_3(ALU, ARSH, X),			\
15635e581dadSDaniel Borkmann 	INSN_3(ALU, DIV,  X),			\
15645e581dadSDaniel Borkmann 	INSN_3(ALU, MOD,  X),			\
15655e581dadSDaniel Borkmann 	INSN_2(ALU, NEG),			\
15665e581dadSDaniel Borkmann 	INSN_3(ALU, END, TO_BE),		\
15675e581dadSDaniel Borkmann 	INSN_3(ALU, END, TO_LE),		\
15685e581dadSDaniel Borkmann 	/*   Immediate based. */		\
15695e581dadSDaniel Borkmann 	INSN_3(ALU, ADD,  K),			\
15705e581dadSDaniel Borkmann 	INSN_3(ALU, SUB,  K),			\
15715e581dadSDaniel Borkmann 	INSN_3(ALU, AND,  K),			\
15725e581dadSDaniel Borkmann 	INSN_3(ALU, OR,   K),			\
15735e581dadSDaniel Borkmann 	INSN_3(ALU, LSH,  K),			\
15745e581dadSDaniel Borkmann 	INSN_3(ALU, RSH,  K),			\
15755e581dadSDaniel Borkmann 	INSN_3(ALU, XOR,  K),			\
15765e581dadSDaniel Borkmann 	INSN_3(ALU, MUL,  K),			\
15775e581dadSDaniel Borkmann 	INSN_3(ALU, MOV,  K),			\
15782dc6b100SJiong Wang 	INSN_3(ALU, ARSH, K),			\
15795e581dadSDaniel Borkmann 	INSN_3(ALU, DIV,  K),			\
15805e581dadSDaniel Borkmann 	INSN_3(ALU, MOD,  K),			\
15815e581dadSDaniel Borkmann 	/* 64 bit ALU operations. */		\
15825e581dadSDaniel Borkmann 	/*   Register based. */			\
15835e581dadSDaniel Borkmann 	INSN_3(ALU64, ADD,  X),			\
15845e581dadSDaniel Borkmann 	INSN_3(ALU64, SUB,  X),			\
15855e581dadSDaniel Borkmann 	INSN_3(ALU64, AND,  X),			\
15865e581dadSDaniel Borkmann 	INSN_3(ALU64, OR,   X),			\
15875e581dadSDaniel Borkmann 	INSN_3(ALU64, LSH,  X),			\
15885e581dadSDaniel Borkmann 	INSN_3(ALU64, RSH,  X),			\
15895e581dadSDaniel Borkmann 	INSN_3(ALU64, XOR,  X),			\
15905e581dadSDaniel Borkmann 	INSN_3(ALU64, MUL,  X),			\
15915e581dadSDaniel Borkmann 	INSN_3(ALU64, MOV,  X),			\
15925e581dadSDaniel Borkmann 	INSN_3(ALU64, ARSH, X),			\
15935e581dadSDaniel Borkmann 	INSN_3(ALU64, DIV,  X),			\
15945e581dadSDaniel Borkmann 	INSN_3(ALU64, MOD,  X),			\
15955e581dadSDaniel Borkmann 	INSN_2(ALU64, NEG),			\
15960845c3dbSYonghong Song 	INSN_3(ALU64, END, TO_LE),		\
15975e581dadSDaniel Borkmann 	/*   Immediate based. */		\
15985e581dadSDaniel Borkmann 	INSN_3(ALU64, ADD,  K),			\
15995e581dadSDaniel Borkmann 	INSN_3(ALU64, SUB,  K),			\
16005e581dadSDaniel Borkmann 	INSN_3(ALU64, AND,  K),			\
16015e581dadSDaniel Borkmann 	INSN_3(ALU64, OR,   K),			\
16025e581dadSDaniel Borkmann 	INSN_3(ALU64, LSH,  K),			\
16035e581dadSDaniel Borkmann 	INSN_3(ALU64, RSH,  K),			\
16045e581dadSDaniel Borkmann 	INSN_3(ALU64, XOR,  K),			\
16055e581dadSDaniel Borkmann 	INSN_3(ALU64, MUL,  K),			\
16065e581dadSDaniel Borkmann 	INSN_3(ALU64, MOV,  K),			\
16075e581dadSDaniel Borkmann 	INSN_3(ALU64, ARSH, K),			\
16085e581dadSDaniel Borkmann 	INSN_3(ALU64, DIV,  K),			\
16095e581dadSDaniel Borkmann 	INSN_3(ALU64, MOD,  K),			\
16105e581dadSDaniel Borkmann 	/* Call instruction. */			\
16115e581dadSDaniel Borkmann 	INSN_2(JMP, CALL),			\
16125e581dadSDaniel Borkmann 	/* Exit instruction. */			\
16135e581dadSDaniel Borkmann 	INSN_2(JMP, EXIT),			\
1614503a8865SJiong Wang 	/* 32-bit Jump instructions. */		\
1615503a8865SJiong Wang 	/*   Register based. */			\
1616503a8865SJiong Wang 	INSN_3(JMP32, JEQ,  X),			\
1617503a8865SJiong Wang 	INSN_3(JMP32, JNE,  X),			\
1618503a8865SJiong Wang 	INSN_3(JMP32, JGT,  X),			\
1619503a8865SJiong Wang 	INSN_3(JMP32, JLT,  X),			\
1620503a8865SJiong Wang 	INSN_3(JMP32, JGE,  X),			\
1621503a8865SJiong Wang 	INSN_3(JMP32, JLE,  X),			\
1622503a8865SJiong Wang 	INSN_3(JMP32, JSGT, X),			\
1623503a8865SJiong Wang 	INSN_3(JMP32, JSLT, X),			\
1624503a8865SJiong Wang 	INSN_3(JMP32, JSGE, X),			\
1625503a8865SJiong Wang 	INSN_3(JMP32, JSLE, X),			\
1626503a8865SJiong Wang 	INSN_3(JMP32, JSET, X),			\
1627503a8865SJiong Wang 	/*   Immediate based. */		\
1628503a8865SJiong Wang 	INSN_3(JMP32, JEQ,  K),			\
1629503a8865SJiong Wang 	INSN_3(JMP32, JNE,  K),			\
1630503a8865SJiong Wang 	INSN_3(JMP32, JGT,  K),			\
1631503a8865SJiong Wang 	INSN_3(JMP32, JLT,  K),			\
1632503a8865SJiong Wang 	INSN_3(JMP32, JGE,  K),			\
1633503a8865SJiong Wang 	INSN_3(JMP32, JLE,  K),			\
1634503a8865SJiong Wang 	INSN_3(JMP32, JSGT, K),			\
1635503a8865SJiong Wang 	INSN_3(JMP32, JSLT, K),			\
1636503a8865SJiong Wang 	INSN_3(JMP32, JSGE, K),			\
1637503a8865SJiong Wang 	INSN_3(JMP32, JSLE, K),			\
1638503a8865SJiong Wang 	INSN_3(JMP32, JSET, K),			\
16395e581dadSDaniel Borkmann 	/* Jump instructions. */		\
16405e581dadSDaniel Borkmann 	/*   Register based. */			\
16415e581dadSDaniel Borkmann 	INSN_3(JMP, JEQ,  X),			\
16425e581dadSDaniel Borkmann 	INSN_3(JMP, JNE,  X),			\
16435e581dadSDaniel Borkmann 	INSN_3(JMP, JGT,  X),			\
16445e581dadSDaniel Borkmann 	INSN_3(JMP, JLT,  X),			\
16455e581dadSDaniel Borkmann 	INSN_3(JMP, JGE,  X),			\
16465e581dadSDaniel Borkmann 	INSN_3(JMP, JLE,  X),			\
16475e581dadSDaniel Borkmann 	INSN_3(JMP, JSGT, X),			\
16485e581dadSDaniel Borkmann 	INSN_3(JMP, JSLT, X),			\
16495e581dadSDaniel Borkmann 	INSN_3(JMP, JSGE, X),			\
16505e581dadSDaniel Borkmann 	INSN_3(JMP, JSLE, X),			\
16515e581dadSDaniel Borkmann 	INSN_3(JMP, JSET, X),			\
16525e581dadSDaniel Borkmann 	/*   Immediate based. */		\
16535e581dadSDaniel Borkmann 	INSN_3(JMP, JEQ,  K),			\
16545e581dadSDaniel Borkmann 	INSN_3(JMP, JNE,  K),			\
16555e581dadSDaniel Borkmann 	INSN_3(JMP, JGT,  K),			\
16565e581dadSDaniel Borkmann 	INSN_3(JMP, JLT,  K),			\
16575e581dadSDaniel Borkmann 	INSN_3(JMP, JGE,  K),			\
16585e581dadSDaniel Borkmann 	INSN_3(JMP, JLE,  K),			\
16595e581dadSDaniel Borkmann 	INSN_3(JMP, JSGT, K),			\
16605e581dadSDaniel Borkmann 	INSN_3(JMP, JSLT, K),			\
16615e581dadSDaniel Borkmann 	INSN_3(JMP, JSGE, K),			\
16625e581dadSDaniel Borkmann 	INSN_3(JMP, JSLE, K),			\
16635e581dadSDaniel Borkmann 	INSN_3(JMP, JSET, K),			\
16645e581dadSDaniel Borkmann 	INSN_2(JMP, JA),			\
16654cd58e9aSYonghong Song 	INSN_2(JMP32, JA),			\
166688044230SPeilin Ye 	/* Atomic operations. */		\
166788044230SPeilin Ye 	INSN_3(STX, ATOMIC, B),			\
166888044230SPeilin Ye 	INSN_3(STX, ATOMIC, H),			\
166988044230SPeilin Ye 	INSN_3(STX, ATOMIC, W),			\
167088044230SPeilin Ye 	INSN_3(STX, ATOMIC, DW),		\
16715e581dadSDaniel Borkmann 	/* Store instructions. */		\
16725e581dadSDaniel Borkmann 	/*   Register based. */			\
16735e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  B),			\
16745e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  H),			\
16755e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  W),			\
16765e581dadSDaniel Borkmann 	INSN_3(STX, MEM,  DW),			\
16775e581dadSDaniel Borkmann 	/*   Immediate based. */		\
16785e581dadSDaniel Borkmann 	INSN_3(ST, MEM, B),			\
16795e581dadSDaniel Borkmann 	INSN_3(ST, MEM, H),			\
16805e581dadSDaniel Borkmann 	INSN_3(ST, MEM, W),			\
16815e581dadSDaniel Borkmann 	INSN_3(ST, MEM, DW),			\
16825e581dadSDaniel Borkmann 	/* Load instructions. */		\
16835e581dadSDaniel Borkmann 	/*   Register based. */			\
16845e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, B),			\
16855e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, H),			\
16865e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, W),			\
16875e581dadSDaniel Borkmann 	INSN_3(LDX, MEM, DW),			\
16881f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, B),			\
16891f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, H),			\
16901f9a1ea8SYonghong Song 	INSN_3(LDX, MEMSX, W),			\
16915e581dadSDaniel Borkmann 	/*   Immediate based. */		\
1692e0cea7ceSDaniel Borkmann 	INSN_3(LD, IMM, DW)
16935e581dadSDaniel Borkmann 
bpf_opcode_in_insntable(u8 code)16945e581dadSDaniel Borkmann bool bpf_opcode_in_insntable(u8 code)
16955e581dadSDaniel Borkmann {
16965e581dadSDaniel Borkmann #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
16975e581dadSDaniel Borkmann #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
16985e581dadSDaniel Borkmann 	static const bool public_insntable[256] = {
16995e581dadSDaniel Borkmann 		[0 ... 255] = false,
17005e581dadSDaniel Borkmann 		/* Now overwrite non-defaults ... */
17015e581dadSDaniel Borkmann 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
1702e0cea7ceSDaniel Borkmann 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
1703e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_B] = true,
1704e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_H] = true,
1705e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_ABS | BPF_W] = true,
1706e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_B] = true,
1707e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_H] = true,
1708e0cea7ceSDaniel Borkmann 		[BPF_LD | BPF_IND | BPF_W] = true,
1709011832b9SAlexei Starovoitov 		[BPF_JMP | BPF_JCOND] = true,
17105e581dadSDaniel Borkmann 	};
17115e581dadSDaniel Borkmann #undef BPF_INSN_3_TBL
17125e581dadSDaniel Borkmann #undef BPF_INSN_2_TBL
17135e581dadSDaniel Borkmann 	return public_insntable[code];
17145e581dadSDaniel Borkmann }
17155e581dadSDaniel Borkmann 
1716290af866SAlexei Starovoitov #ifndef CONFIG_BPF_JIT_ALWAYS_ON
1717f5bffecdSAlexei Starovoitov /**
1718019d0454SRandy Dunlap  *	___bpf_prog_run - run eBPF program on a given context
1719de1da68dSValdis Kletnieks  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
17207ae457c1SAlexei Starovoitov  *	@insn: is the array of eBPF instructions
1721f5bffecdSAlexei Starovoitov  *
17227ae457c1SAlexei Starovoitov  * Decode and execute eBPF instructions.
1723019d0454SRandy Dunlap  *
1724019d0454SRandy Dunlap  * Return: whatever value is in %BPF_R0 at program exit
1725f5bffecdSAlexei Starovoitov  */
___bpf_prog_run(u64 * regs,const struct bpf_insn * insn)17262ec9898eSHe Fengqing static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
1727f5bffecdSAlexei Starovoitov {
17285e581dadSDaniel Borkmann #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
17295e581dadSDaniel Borkmann #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
1730e55a7325SJosh Poimboeuf 	static const void * const jumptable[256] __annotate_jump_table = {
1731f5bffecdSAlexei Starovoitov 		[0 ... 255] = &&default_label,
1732f5bffecdSAlexei Starovoitov 		/* Now overwrite non-defaults ... */
17335e581dadSDaniel Borkmann 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
17345e581dadSDaniel Borkmann 		/* Non-UAPI available opcodes. */
17351ea47e01SAlexei Starovoitov 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
173671189fa9SAlexei Starovoitov 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
1737f5e81d11SDaniel Borkmann 		[BPF_ST  | BPF_NOSPEC] = &&ST_NOSPEC,
17382a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_B] = &&LDX_PROBE_MEM_B,
17392a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_H] = &&LDX_PROBE_MEM_H,
17402a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_W] = &&LDX_PROBE_MEM_W,
17412a02759eSAlexei Starovoitov 		[BPF_LDX | BPF_PROBE_MEM | BPF_DW] = &&LDX_PROBE_MEM_DW,
17421f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_B] = &&LDX_PROBE_MEMSX_B,
17431f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_H] = &&LDX_PROBE_MEMSX_H,
17441f9a1ea8SYonghong Song 		[BPF_LDX | BPF_PROBE_MEMSX | BPF_W] = &&LDX_PROBE_MEMSX_W,
1745f5bffecdSAlexei Starovoitov 	};
17465e581dadSDaniel Borkmann #undef BPF_INSN_3_LBL
17475e581dadSDaniel Borkmann #undef BPF_INSN_2_LBL
174804fd61abSAlexei Starovoitov 	u32 tail_call_cnt = 0;
1749f5bffecdSAlexei Starovoitov 
1750f5bffecdSAlexei Starovoitov #define CONT	 ({ insn++; goto select_insn; })
1751f5bffecdSAlexei Starovoitov #define CONT_JMP ({ insn++; goto select_insn; })
1752f5bffecdSAlexei Starovoitov 
1753f5bffecdSAlexei Starovoitov select_insn:
1754f5bffecdSAlexei Starovoitov 	goto *jumptable[insn->code];
1755f5bffecdSAlexei Starovoitov 
175628131e9dSDaniel Borkmann 	/* Explicitly mask the register-based shift amounts with 63 or 31
175728131e9dSDaniel Borkmann 	 * to avoid undefined behavior. Normally this won't affect the
175828131e9dSDaniel Borkmann 	 * generated code, for example, in case of native 64 bit archs such
175928131e9dSDaniel Borkmann 	 * as x86-64 or arm64, the compiler is optimizing the AND away for
176028131e9dSDaniel Borkmann 	 * the interpreter. In case of JITs, each of the JIT backends compiles
176128131e9dSDaniel Borkmann 	 * the BPF shift operations to machine instructions which produce
176228131e9dSDaniel Borkmann 	 * implementation-defined results in such a case; the resulting
176328131e9dSDaniel Borkmann 	 * contents of the register may be arbitrary, but program behaviour
176428131e9dSDaniel Borkmann 	 * as a whole remains defined. In other words, in case of JIT backends,
176528131e9dSDaniel Borkmann 	 * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation.
176628131e9dSDaniel Borkmann 	 */
176728131e9dSDaniel Borkmann 	/* ALU (shifts) */
176828131e9dSDaniel Borkmann #define SHT(OPCODE, OP)					\
176928131e9dSDaniel Borkmann 	ALU64_##OPCODE##_X:				\
177028131e9dSDaniel Borkmann 		DST = DST OP (SRC & 63);		\
177128131e9dSDaniel Borkmann 		CONT;					\
177228131e9dSDaniel Borkmann 	ALU_##OPCODE##_X:				\
177328131e9dSDaniel Borkmann 		DST = (u32) DST OP ((u32) SRC & 31);	\
177428131e9dSDaniel Borkmann 		CONT;					\
177528131e9dSDaniel Borkmann 	ALU64_##OPCODE##_K:				\
177628131e9dSDaniel Borkmann 		DST = DST OP IMM;			\
177728131e9dSDaniel Borkmann 		CONT;					\
177828131e9dSDaniel Borkmann 	ALU_##OPCODE##_K:				\
177928131e9dSDaniel Borkmann 		DST = (u32) DST OP (u32) IMM;		\
178028131e9dSDaniel Borkmann 		CONT;
178128131e9dSDaniel Borkmann 	/* ALU (rest) */
1782f5bffecdSAlexei Starovoitov #define ALU(OPCODE, OP)					\
1783f5bffecdSAlexei Starovoitov 	ALU64_##OPCODE##_X:				\
1784f5bffecdSAlexei Starovoitov 		DST = DST OP SRC;			\
1785f5bffecdSAlexei Starovoitov 		CONT;					\
1786f5bffecdSAlexei Starovoitov 	ALU_##OPCODE##_X:				\
1787f5bffecdSAlexei Starovoitov 		DST = (u32) DST OP (u32) SRC;		\
1788f5bffecdSAlexei Starovoitov 		CONT;					\
1789f5bffecdSAlexei Starovoitov 	ALU64_##OPCODE##_K:				\
1790f5bffecdSAlexei Starovoitov 		DST = DST OP IMM;			\
1791f5bffecdSAlexei Starovoitov 		CONT;					\
1792f5bffecdSAlexei Starovoitov 	ALU_##OPCODE##_K:				\
1793f5bffecdSAlexei Starovoitov 		DST = (u32) DST OP (u32) IMM;		\
1794f5bffecdSAlexei Starovoitov 		CONT;
1795f5bffecdSAlexei Starovoitov 	ALU(ADD,  +)
1796f5bffecdSAlexei Starovoitov 	ALU(SUB,  -)
1797f5bffecdSAlexei Starovoitov 	ALU(AND,  &)
1798f5bffecdSAlexei Starovoitov 	ALU(OR,   |)
1799f5bffecdSAlexei Starovoitov 	ALU(XOR,  ^)
1800f5bffecdSAlexei Starovoitov 	ALU(MUL,  *)
180128131e9dSDaniel Borkmann 	SHT(LSH, <<)
180228131e9dSDaniel Borkmann 	SHT(RSH, >>)
180328131e9dSDaniel Borkmann #undef SHT
1804f5bffecdSAlexei Starovoitov #undef ALU
1805f5bffecdSAlexei Starovoitov 	ALU_NEG:
1806f5bffecdSAlexei Starovoitov 		DST = (u32) -DST;
1807f5bffecdSAlexei Starovoitov 		CONT;
1808f5bffecdSAlexei Starovoitov 	ALU64_NEG:
1809f5bffecdSAlexei Starovoitov 		DST = -DST;
1810f5bffecdSAlexei Starovoitov 		CONT;
1811f5bffecdSAlexei Starovoitov 	ALU_MOV_X:
18128100928cSYonghong Song 		switch (OFF) {
18138100928cSYonghong Song 		case 0:
1814f5bffecdSAlexei Starovoitov 			DST = (u32) SRC;
18158100928cSYonghong Song 			break;
18168100928cSYonghong Song 		case 8:
18178100928cSYonghong Song 			DST = (u32)(s8) SRC;
18188100928cSYonghong Song 			break;
18198100928cSYonghong Song 		case 16:
18208100928cSYonghong Song 			DST = (u32)(s16) SRC;
18218100928cSYonghong Song 			break;
18228100928cSYonghong Song 		}
1823f5bffecdSAlexei Starovoitov 		CONT;
1824f5bffecdSAlexei Starovoitov 	ALU_MOV_K:
1825f5bffecdSAlexei Starovoitov 		DST = (u32) IMM;
1826f5bffecdSAlexei Starovoitov 		CONT;
1827f5bffecdSAlexei Starovoitov 	ALU64_MOV_X:
18288100928cSYonghong Song 		switch (OFF) {
18298100928cSYonghong Song 		case 0:
1830f5bffecdSAlexei Starovoitov 			DST = SRC;
18318100928cSYonghong Song 			break;
18328100928cSYonghong Song 		case 8:
18338100928cSYonghong Song 			DST = (s8) SRC;
18348100928cSYonghong Song 			break;
18358100928cSYonghong Song 		case 16:
18368100928cSYonghong Song 			DST = (s16) SRC;
18378100928cSYonghong Song 			break;
18388100928cSYonghong Song 		case 32:
18398100928cSYonghong Song 			DST = (s32) SRC;
18408100928cSYonghong Song 			break;
18418100928cSYonghong Song 		}
1842f5bffecdSAlexei Starovoitov 		CONT;
1843f5bffecdSAlexei Starovoitov 	ALU64_MOV_K:
1844f5bffecdSAlexei Starovoitov 		DST = IMM;
1845f5bffecdSAlexei Starovoitov 		CONT;
184602ab695bSAlexei Starovoitov 	LD_IMM_DW:
184702ab695bSAlexei Starovoitov 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
184802ab695bSAlexei Starovoitov 		insn++;
184902ab695bSAlexei Starovoitov 		CONT;
18502dc6b100SJiong Wang 	ALU_ARSH_X:
185128131e9dSDaniel Borkmann 		DST = (u64) (u32) (((s32) DST) >> (SRC & 31));
18522dc6b100SJiong Wang 		CONT;
18532dc6b100SJiong Wang 	ALU_ARSH_K:
185475672ddaSJiong Wang 		DST = (u64) (u32) (((s32) DST) >> IMM);
18552dc6b100SJiong Wang 		CONT;
1856f5bffecdSAlexei Starovoitov 	ALU64_ARSH_X:
185728131e9dSDaniel Borkmann 		(*(s64 *) &DST) >>= (SRC & 63);
1858f5bffecdSAlexei Starovoitov 		CONT;
1859f5bffecdSAlexei Starovoitov 	ALU64_ARSH_K:
1860f5bffecdSAlexei Starovoitov 		(*(s64 *) &DST) >>= IMM;
1861f5bffecdSAlexei Starovoitov 		CONT;
1862f5bffecdSAlexei Starovoitov 	ALU64_MOD_X:
1863ec0e2da9SYonghong Song 		switch (OFF) {
1864ec0e2da9SYonghong Song 		case 0:
1865144cd91cSDaniel Borkmann 			div64_u64_rem(DST, SRC, &AX);
1866144cd91cSDaniel Borkmann 			DST = AX;
1867ec0e2da9SYonghong Song 			break;
1868ec0e2da9SYonghong Song 		case 1:
1869ec0e2da9SYonghong Song 			AX = div64_s64(DST, SRC);
1870ec0e2da9SYonghong Song 			DST = DST - AX * SRC;
1871ec0e2da9SYonghong Song 			break;
1872ec0e2da9SYonghong Song 		}
1873f5bffecdSAlexei Starovoitov 		CONT;
1874f5bffecdSAlexei Starovoitov 	ALU_MOD_X:
1875ec0e2da9SYonghong Song 		switch (OFF) {
1876ec0e2da9SYonghong Song 		case 0:
1877144cd91cSDaniel Borkmann 			AX = (u32) DST;
1878144cd91cSDaniel Borkmann 			DST = do_div(AX, (u32) SRC);
1879ec0e2da9SYonghong Song 			break;
1880ec0e2da9SYonghong Song 		case 1:
1881ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1882ec0e2da9SYonghong Song 			AX = do_div(AX, abs((s32)SRC));
1883ec0e2da9SYonghong Song 			if ((s32)DST < 0)
1884ec0e2da9SYonghong Song 				DST = (u32)-AX;
1885ec0e2da9SYonghong Song 			else
1886ec0e2da9SYonghong Song 				DST = (u32)AX;
1887ec0e2da9SYonghong Song 			break;
1888ec0e2da9SYonghong Song 		}
1889f5bffecdSAlexei Starovoitov 		CONT;
1890f5bffecdSAlexei Starovoitov 	ALU64_MOD_K:
1891ec0e2da9SYonghong Song 		switch (OFF) {
1892ec0e2da9SYonghong Song 		case 0:
1893144cd91cSDaniel Borkmann 			div64_u64_rem(DST, IMM, &AX);
1894144cd91cSDaniel Borkmann 			DST = AX;
1895ec0e2da9SYonghong Song 			break;
1896ec0e2da9SYonghong Song 		case 1:
1897ec0e2da9SYonghong Song 			AX = div64_s64(DST, IMM);
1898ec0e2da9SYonghong Song 			DST = DST - AX * IMM;
1899ec0e2da9SYonghong Song 			break;
1900ec0e2da9SYonghong Song 		}
1901f5bffecdSAlexei Starovoitov 		CONT;
1902f5bffecdSAlexei Starovoitov 	ALU_MOD_K:
1903ec0e2da9SYonghong Song 		switch (OFF) {
1904ec0e2da9SYonghong Song 		case 0:
1905144cd91cSDaniel Borkmann 			AX = (u32) DST;
1906144cd91cSDaniel Borkmann 			DST = do_div(AX, (u32) IMM);
1907ec0e2da9SYonghong Song 			break;
1908ec0e2da9SYonghong Song 		case 1:
1909ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1910ec0e2da9SYonghong Song 			AX = do_div(AX, abs((s32)IMM));
1911ec0e2da9SYonghong Song 			if ((s32)DST < 0)
1912ec0e2da9SYonghong Song 				DST = (u32)-AX;
1913ec0e2da9SYonghong Song 			else
1914ec0e2da9SYonghong Song 				DST = (u32)AX;
1915ec0e2da9SYonghong Song 			break;
1916ec0e2da9SYonghong Song 		}
1917f5bffecdSAlexei Starovoitov 		CONT;
1918f5bffecdSAlexei Starovoitov 	ALU64_DIV_X:
1919ec0e2da9SYonghong Song 		switch (OFF) {
1920ec0e2da9SYonghong Song 		case 0:
1921876a7ae6SAlexei Starovoitov 			DST = div64_u64(DST, SRC);
1922ec0e2da9SYonghong Song 			break;
1923ec0e2da9SYonghong Song 		case 1:
1924ec0e2da9SYonghong Song 			DST = div64_s64(DST, SRC);
1925ec0e2da9SYonghong Song 			break;
1926ec0e2da9SYonghong Song 		}
1927f5bffecdSAlexei Starovoitov 		CONT;
1928f5bffecdSAlexei Starovoitov 	ALU_DIV_X:
1929ec0e2da9SYonghong Song 		switch (OFF) {
1930ec0e2da9SYonghong Song 		case 0:
1931144cd91cSDaniel Borkmann 			AX = (u32) DST;
1932144cd91cSDaniel Borkmann 			do_div(AX, (u32) SRC);
1933144cd91cSDaniel Borkmann 			DST = (u32) AX;
1934ec0e2da9SYonghong Song 			break;
1935ec0e2da9SYonghong Song 		case 1:
1936ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1937ec0e2da9SYonghong Song 			do_div(AX, abs((s32)SRC));
193809fedc73SYonghong Song 			if (((s32)DST < 0) == ((s32)SRC < 0))
1939ec0e2da9SYonghong Song 				DST = (u32)AX;
1940ec0e2da9SYonghong Song 			else
1941ec0e2da9SYonghong Song 				DST = (u32)-AX;
1942ec0e2da9SYonghong Song 			break;
1943ec0e2da9SYonghong Song 		}
1944f5bffecdSAlexei Starovoitov 		CONT;
1945f5bffecdSAlexei Starovoitov 	ALU64_DIV_K:
1946ec0e2da9SYonghong Song 		switch (OFF) {
1947ec0e2da9SYonghong Song 		case 0:
1948876a7ae6SAlexei Starovoitov 			DST = div64_u64(DST, IMM);
1949ec0e2da9SYonghong Song 			break;
1950ec0e2da9SYonghong Song 		case 1:
1951ec0e2da9SYonghong Song 			DST = div64_s64(DST, IMM);
1952ec0e2da9SYonghong Song 			break;
1953ec0e2da9SYonghong Song 		}
1954f5bffecdSAlexei Starovoitov 		CONT;
1955f5bffecdSAlexei Starovoitov 	ALU_DIV_K:
1956ec0e2da9SYonghong Song 		switch (OFF) {
1957ec0e2da9SYonghong Song 		case 0:
1958144cd91cSDaniel Borkmann 			AX = (u32) DST;
1959144cd91cSDaniel Borkmann 			do_div(AX, (u32) IMM);
1960144cd91cSDaniel Borkmann 			DST = (u32) AX;
1961ec0e2da9SYonghong Song 			break;
1962ec0e2da9SYonghong Song 		case 1:
1963ec0e2da9SYonghong Song 			AX = abs((s32)DST);
1964ec0e2da9SYonghong Song 			do_div(AX, abs((s32)IMM));
196509fedc73SYonghong Song 			if (((s32)DST < 0) == ((s32)IMM < 0))
1966ec0e2da9SYonghong Song 				DST = (u32)AX;
1967ec0e2da9SYonghong Song 			else
1968ec0e2da9SYonghong Song 				DST = (u32)-AX;
1969ec0e2da9SYonghong Song 			break;
1970ec0e2da9SYonghong Song 		}
1971f5bffecdSAlexei Starovoitov 		CONT;
1972f5bffecdSAlexei Starovoitov 	ALU_END_TO_BE:
1973f5bffecdSAlexei Starovoitov 		switch (IMM) {
1974f5bffecdSAlexei Starovoitov 		case 16:
1975f5bffecdSAlexei Starovoitov 			DST = (__force u16) cpu_to_be16(DST);
1976f5bffecdSAlexei Starovoitov 			break;
1977f5bffecdSAlexei Starovoitov 		case 32:
1978f5bffecdSAlexei Starovoitov 			DST = (__force u32) cpu_to_be32(DST);
1979f5bffecdSAlexei Starovoitov 			break;
1980f5bffecdSAlexei Starovoitov 		case 64:
1981f5bffecdSAlexei Starovoitov 			DST = (__force u64) cpu_to_be64(DST);
1982f5bffecdSAlexei Starovoitov 			break;
1983f5bffecdSAlexei Starovoitov 		}
1984f5bffecdSAlexei Starovoitov 		CONT;
1985f5bffecdSAlexei Starovoitov 	ALU_END_TO_LE:
1986f5bffecdSAlexei Starovoitov 		switch (IMM) {
1987f5bffecdSAlexei Starovoitov 		case 16:
1988f5bffecdSAlexei Starovoitov 			DST = (__force u16) cpu_to_le16(DST);
1989f5bffecdSAlexei Starovoitov 			break;
1990f5bffecdSAlexei Starovoitov 		case 32:
1991f5bffecdSAlexei Starovoitov 			DST = (__force u32) cpu_to_le32(DST);
1992f5bffecdSAlexei Starovoitov 			break;
1993f5bffecdSAlexei Starovoitov 		case 64:
1994f5bffecdSAlexei Starovoitov 			DST = (__force u64) cpu_to_le64(DST);
1995f5bffecdSAlexei Starovoitov 			break;
1996f5bffecdSAlexei Starovoitov 		}
1997f5bffecdSAlexei Starovoitov 		CONT;
19980845c3dbSYonghong Song 	ALU64_END_TO_LE:
19990845c3dbSYonghong Song 		switch (IMM) {
20000845c3dbSYonghong Song 		case 16:
20010845c3dbSYonghong Song 			DST = (__force u16) __swab16(DST);
20020845c3dbSYonghong Song 			break;
20030845c3dbSYonghong Song 		case 32:
20040845c3dbSYonghong Song 			DST = (__force u32) __swab32(DST);
20050845c3dbSYonghong Song 			break;
20060845c3dbSYonghong Song 		case 64:
20070845c3dbSYonghong Song 			DST = (__force u64) __swab64(DST);
20080845c3dbSYonghong Song 			break;
20090845c3dbSYonghong Song 		}
20100845c3dbSYonghong Song 		CONT;
2011f5bffecdSAlexei Starovoitov 
2012f5bffecdSAlexei Starovoitov 	/* CALL */
2013f5bffecdSAlexei Starovoitov 	JMP_CALL:
2014f5bffecdSAlexei Starovoitov 		/* Function call scratches BPF_R1-BPF_R5 registers,
2015f5bffecdSAlexei Starovoitov 		 * preserves BPF_R6-BPF_R9, and stores return value
2016f5bffecdSAlexei Starovoitov 		 * into BPF_R0.
2017f5bffecdSAlexei Starovoitov 		 */
2018f5bffecdSAlexei Starovoitov 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
2019f5bffecdSAlexei Starovoitov 						       BPF_R4, BPF_R5);
2020f5bffecdSAlexei Starovoitov 		CONT;
2021f5bffecdSAlexei Starovoitov 
20221ea47e01SAlexei Starovoitov 	JMP_CALL_ARGS:
20231ea47e01SAlexei Starovoitov 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
20241ea47e01SAlexei Starovoitov 							    BPF_R3, BPF_R4,
20251ea47e01SAlexei Starovoitov 							    BPF_R5,
20261ea47e01SAlexei Starovoitov 							    insn + insn->off + 1);
20271ea47e01SAlexei Starovoitov 		CONT;
20281ea47e01SAlexei Starovoitov 
202904fd61abSAlexei Starovoitov 	JMP_TAIL_CALL: {
203004fd61abSAlexei Starovoitov 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
203104fd61abSAlexei Starovoitov 		struct bpf_array *array = container_of(map, struct bpf_array, map);
203204fd61abSAlexei Starovoitov 		struct bpf_prog *prog;
203390caccddSAlexei Starovoitov 		u32 index = BPF_R3;
203404fd61abSAlexei Starovoitov 
203504fd61abSAlexei Starovoitov 		if (unlikely(index >= array->map.max_entries))
203604fd61abSAlexei Starovoitov 			goto out;
2037ebf7f6f0STiezhu Yang 
2038ebf7f6f0STiezhu Yang 		if (unlikely(tail_call_cnt >= MAX_TAIL_CALL_CNT))
203904fd61abSAlexei Starovoitov 			goto out;
204004fd61abSAlexei Starovoitov 
204104fd61abSAlexei Starovoitov 		tail_call_cnt++;
204204fd61abSAlexei Starovoitov 
20432a36f0b9SWang Nan 		prog = READ_ONCE(array->ptrs[index]);
20441ca1cc98SDaniel Borkmann 		if (!prog)
204504fd61abSAlexei Starovoitov 			goto out;
204604fd61abSAlexei Starovoitov 
2047c4675f93SDaniel Borkmann 		/* ARG1 at this point is guaranteed to point to CTX from
2048c4675f93SDaniel Borkmann 		 * the verifier side due to the fact that the tail call is
20490142dddcSChris Packham 		 * handled like a helper, that is, bpf_tail_call_proto,
2050c4675f93SDaniel Borkmann 		 * where arg1_type is ARG_PTR_TO_CTX.
2051c4675f93SDaniel Borkmann 		 */
205204fd61abSAlexei Starovoitov 		insn = prog->insnsi;
205304fd61abSAlexei Starovoitov 		goto select_insn;
205404fd61abSAlexei Starovoitov out:
205504fd61abSAlexei Starovoitov 		CONT;
205604fd61abSAlexei Starovoitov 	}
2057f5bffecdSAlexei Starovoitov 	JMP_JA:
2058f5bffecdSAlexei Starovoitov 		insn += insn->off;
2059f5bffecdSAlexei Starovoitov 		CONT;
20604cd58e9aSYonghong Song 	JMP32_JA:
20614cd58e9aSYonghong Song 		insn += insn->imm;
20624cd58e9aSYonghong Song 		CONT;
2063f5bffecdSAlexei Starovoitov 	JMP_EXIT:
2064f5bffecdSAlexei Starovoitov 		return BPF_R0;
2065503a8865SJiong Wang 	/* JMP */
2066503a8865SJiong Wang #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
2067503a8865SJiong Wang 	JMP_##OPCODE##_X:					\
2068503a8865SJiong Wang 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
2069503a8865SJiong Wang 			insn += insn->off;			\
2070503a8865SJiong Wang 			CONT_JMP;				\
2071503a8865SJiong Wang 		}						\
2072503a8865SJiong Wang 		CONT;						\
2073503a8865SJiong Wang 	JMP32_##OPCODE##_X:					\
2074503a8865SJiong Wang 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
2075503a8865SJiong Wang 			insn += insn->off;			\
2076503a8865SJiong Wang 			CONT_JMP;				\
2077503a8865SJiong Wang 		}						\
2078503a8865SJiong Wang 		CONT;						\
2079503a8865SJiong Wang 	JMP_##OPCODE##_K:					\
2080503a8865SJiong Wang 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
2081503a8865SJiong Wang 			insn += insn->off;			\
2082503a8865SJiong Wang 			CONT_JMP;				\
2083503a8865SJiong Wang 		}						\
2084503a8865SJiong Wang 		CONT;						\
2085503a8865SJiong Wang 	JMP32_##OPCODE##_K:					\
2086503a8865SJiong Wang 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
2087503a8865SJiong Wang 			insn += insn->off;			\
2088503a8865SJiong Wang 			CONT_JMP;				\
2089503a8865SJiong Wang 		}						\
2090503a8865SJiong Wang 		CONT;
2091503a8865SJiong Wang 	COND_JMP(u, JEQ, ==)
2092503a8865SJiong Wang 	COND_JMP(u, JNE, !=)
2093503a8865SJiong Wang 	COND_JMP(u, JGT, >)
2094503a8865SJiong Wang 	COND_JMP(u, JLT, <)
2095503a8865SJiong Wang 	COND_JMP(u, JGE, >=)
2096503a8865SJiong Wang 	COND_JMP(u, JLE, <=)
2097503a8865SJiong Wang 	COND_JMP(u, JSET, &)
2098503a8865SJiong Wang 	COND_JMP(s, JSGT, >)
2099503a8865SJiong Wang 	COND_JMP(s, JSLT, <)
2100503a8865SJiong Wang 	COND_JMP(s, JSGE, >=)
2101503a8865SJiong Wang 	COND_JMP(s, JSLE, <=)
2102503a8865SJiong Wang #undef COND_JMP
2103f5e81d11SDaniel Borkmann 	/* ST, STX and LDX*/
2104f5e81d11SDaniel Borkmann 	ST_NOSPEC:
2105f5e81d11SDaniel Borkmann 		/* Speculation barrier for mitigating Speculative Store Bypass.
2106f5e81d11SDaniel Borkmann 		 * In case of arm64, we rely on the firmware mitigation as
2107f5e81d11SDaniel Borkmann 		 * controlled via the ssbd kernel parameter. Whenever the
2108f5e81d11SDaniel Borkmann 		 * mitigation is enabled, it works for all of the kernel code
2109f5e81d11SDaniel Borkmann 		 * with no need to provide any additional instructions here.
2110f5e81d11SDaniel Borkmann 		 * In case of x86, we use 'lfence' insn for mitigation. We
2111f5e81d11SDaniel Borkmann 		 * reuse preexisting logic from Spectre v1 mitigation that
2112f5e81d11SDaniel Borkmann 		 * happens to produce the required code on x86 for v4 as well.
2113f5e81d11SDaniel Borkmann 		 */
2114f5e81d11SDaniel Borkmann 		barrier_nospec();
2115f5e81d11SDaniel Borkmann 		CONT;
2116f5bffecdSAlexei Starovoitov #define LDST(SIZEOP, SIZE)						\
2117f5bffecdSAlexei Starovoitov 	STX_MEM_##SIZEOP:						\
2118f5bffecdSAlexei Starovoitov 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
2119f5bffecdSAlexei Starovoitov 		CONT;							\
2120f5bffecdSAlexei Starovoitov 	ST_MEM_##SIZEOP:						\
2121f5bffecdSAlexei Starovoitov 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
2122f5bffecdSAlexei Starovoitov 		CONT;							\
2123f5bffecdSAlexei Starovoitov 	LDX_MEM_##SIZEOP:						\
2124f5bffecdSAlexei Starovoitov 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
2125caff1fa4SMenglong Dong 		CONT;							\
2126caff1fa4SMenglong Dong 	LDX_PROBE_MEM_##SIZEOP:						\
21276a5a148aSArnd Bergmann 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),	\
2128caff1fa4SMenglong Dong 			      (const void *)(long) (SRC + insn->off));	\
2129caff1fa4SMenglong Dong 		DST = *((SIZE *)&DST);					\
2130f5bffecdSAlexei Starovoitov 		CONT;
2131f5bffecdSAlexei Starovoitov 
2132f5bffecdSAlexei Starovoitov 	LDST(B,   u8)
2133f5bffecdSAlexei Starovoitov 	LDST(H,  u16)
2134f5bffecdSAlexei Starovoitov 	LDST(W,  u32)
2135f5bffecdSAlexei Starovoitov 	LDST(DW, u64)
2136f5bffecdSAlexei Starovoitov #undef LDST
21372a02759eSAlexei Starovoitov 
21381f9a1ea8SYonghong Song #define LDSX(SIZEOP, SIZE)						\
21391f9a1ea8SYonghong Song 	LDX_MEMSX_##SIZEOP:						\
21401f9a1ea8SYonghong Song 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
21411f9a1ea8SYonghong Song 		CONT;							\
21421f9a1ea8SYonghong Song 	LDX_PROBE_MEMSX_##SIZEOP:					\
21436a5a148aSArnd Bergmann 		bpf_probe_read_kernel_common(&DST, sizeof(SIZE),		\
21441f9a1ea8SYonghong Song 				      (const void *)(long) (SRC + insn->off));	\
21451f9a1ea8SYonghong Song 		DST = *((SIZE *)&DST);					\
21461f9a1ea8SYonghong Song 		CONT;
21471f9a1ea8SYonghong Song 
21481f9a1ea8SYonghong Song 	LDSX(B,   s8)
21491f9a1ea8SYonghong Song 	LDSX(H,  s16)
21501f9a1ea8SYonghong Song 	LDSX(W,  s32)
21511f9a1ea8SYonghong Song #undef LDSX
21521f9a1ea8SYonghong Song 
215346291067SBrendan Jackman #define ATOMIC_ALU_OP(BOP, KOP)						\
215446291067SBrendan Jackman 		case BOP:						\
215546291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)		\
215646291067SBrendan Jackman 				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
215746291067SBrendan Jackman 					     (DST + insn->off));	\
215888044230SPeilin Ye 			else if (BPF_SIZE(insn->code) == BPF_DW)	\
215946291067SBrendan Jackman 				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
216046291067SBrendan Jackman 					       (DST + insn->off));	\
216188044230SPeilin Ye 			else						\
216288044230SPeilin Ye 				goto default_label;			\
216346291067SBrendan Jackman 			break;						\
216446291067SBrendan Jackman 		case BOP | BPF_FETCH:					\
216546291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)		\
216646291067SBrendan Jackman 				SRC = (u32) atomic_fetch_##KOP(		\
216746291067SBrendan Jackman 					(u32) SRC,			\
216846291067SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off)); \
216988044230SPeilin Ye 			else if (BPF_SIZE(insn->code) == BPF_DW)	\
217046291067SBrendan Jackman 				SRC = (u64) atomic64_fetch_##KOP(	\
217146291067SBrendan Jackman 					(u64) SRC,			\
217246291067SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off)); \
217388044230SPeilin Ye 			else						\
217488044230SPeilin Ye 				goto default_label;			\
217546291067SBrendan Jackman 			break;
217646291067SBrendan Jackman 
217746291067SBrendan Jackman 	STX_ATOMIC_DW:
217891c960b0SBrendan Jackman 	STX_ATOMIC_W:
217988044230SPeilin Ye 	STX_ATOMIC_H:
218088044230SPeilin Ye 	STX_ATOMIC_B:
218191c960b0SBrendan Jackman 		switch (IMM) {
218288044230SPeilin Ye 		/* Atomic read-modify-write instructions support only W and DW
218388044230SPeilin Ye 		 * size modifiers.
218488044230SPeilin Ye 		 */
218546291067SBrendan Jackman 		ATOMIC_ALU_OP(BPF_ADD, add)
2186981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_AND, and)
2187981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_OR, or)
2188981f94c3SBrendan Jackman 		ATOMIC_ALU_OP(BPF_XOR, xor)
218946291067SBrendan Jackman #undef ATOMIC_ALU_OP
219046291067SBrendan Jackman 
21915ffa2550SBrendan Jackman 		case BPF_XCHG:
219246291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)
21935ffa2550SBrendan Jackman 				SRC = (u32) atomic_xchg(
21945ffa2550SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off),
21955ffa2550SBrendan Jackman 					(u32) SRC);
219688044230SPeilin Ye 			else if (BPF_SIZE(insn->code) == BPF_DW)
21975ffa2550SBrendan Jackman 				SRC = (u64) atomic64_xchg(
21985ffa2550SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off),
21995ffa2550SBrendan Jackman 					(u64) SRC);
220088044230SPeilin Ye 			else
220188044230SPeilin Ye 				goto default_label;
22025ffa2550SBrendan Jackman 			break;
22035ffa2550SBrendan Jackman 		case BPF_CMPXCHG:
220446291067SBrendan Jackman 			if (BPF_SIZE(insn->code) == BPF_W)
220546291067SBrendan Jackman 				BPF_R0 = (u32) atomic_cmpxchg(
220646291067SBrendan Jackman 					(atomic_t *)(unsigned long) (DST + insn->off),
220746291067SBrendan Jackman 					(u32) BPF_R0, (u32) SRC);
220888044230SPeilin Ye 			else if (BPF_SIZE(insn->code) == BPF_DW)
22095ffa2550SBrendan Jackman 				BPF_R0 = (u64) atomic64_cmpxchg(
22105ffa2550SBrendan Jackman 					(atomic64_t *)(unsigned long) (DST + insn->off),
22115ffa2550SBrendan Jackman 					(u64) BPF_R0, (u64) SRC);
221288044230SPeilin Ye 			else
221388044230SPeilin Ye 				goto default_label;
221488044230SPeilin Ye 			break;
221588044230SPeilin Ye 		/* Atomic load and store instructions support all size
221688044230SPeilin Ye 		 * modifiers.
221788044230SPeilin Ye 		 */
221888044230SPeilin Ye 		case BPF_LOAD_ACQ:
221988044230SPeilin Ye 			switch (BPF_SIZE(insn->code)) {
222088044230SPeilin Ye #define LOAD_ACQUIRE(SIZEOP, SIZE)				\
222188044230SPeilin Ye 			case BPF_##SIZEOP:			\
222288044230SPeilin Ye 				DST = (SIZE)smp_load_acquire(	\
222388044230SPeilin Ye 					(SIZE *)(unsigned long)(SRC + insn->off));	\
222488044230SPeilin Ye 				break;
222588044230SPeilin Ye 			LOAD_ACQUIRE(B,   u8)
222688044230SPeilin Ye 			LOAD_ACQUIRE(H,  u16)
222788044230SPeilin Ye 			LOAD_ACQUIRE(W,  u32)
222888044230SPeilin Ye #ifdef CONFIG_64BIT
222988044230SPeilin Ye 			LOAD_ACQUIRE(DW, u64)
223088044230SPeilin Ye #endif
223188044230SPeilin Ye #undef LOAD_ACQUIRE
223288044230SPeilin Ye 			default:
223388044230SPeilin Ye 				goto default_label;
223488044230SPeilin Ye 			}
223588044230SPeilin Ye 			break;
223688044230SPeilin Ye 		case BPF_STORE_REL:
223788044230SPeilin Ye 			switch (BPF_SIZE(insn->code)) {
223888044230SPeilin Ye #define STORE_RELEASE(SIZEOP, SIZE)			\
223988044230SPeilin Ye 			case BPF_##SIZEOP:		\
224088044230SPeilin Ye 				smp_store_release(	\
224188044230SPeilin Ye 					(SIZE *)(unsigned long)(DST + insn->off), (SIZE)SRC);	\
224288044230SPeilin Ye 				break;
224388044230SPeilin Ye 			STORE_RELEASE(B,   u8)
224488044230SPeilin Ye 			STORE_RELEASE(H,  u16)
224588044230SPeilin Ye 			STORE_RELEASE(W,  u32)
224688044230SPeilin Ye #ifdef CONFIG_64BIT
224788044230SPeilin Ye 			STORE_RELEASE(DW, u64)
224888044230SPeilin Ye #endif
224988044230SPeilin Ye #undef STORE_RELEASE
225088044230SPeilin Ye 			default:
225188044230SPeilin Ye 				goto default_label;
225288044230SPeilin Ye 			}
22535ffa2550SBrendan Jackman 			break;
225446291067SBrendan Jackman 
225591c960b0SBrendan Jackman 		default:
225691c960b0SBrendan Jackman 			goto default_label;
225791c960b0SBrendan Jackman 		}
2258f5bffecdSAlexei Starovoitov 		CONT;
2259f5bffecdSAlexei Starovoitov 
2260f5bffecdSAlexei Starovoitov 	default_label:
22615e581dadSDaniel Borkmann 		/* If we ever reach this, we have a bug somewhere. Die hard here
22625e581dadSDaniel Borkmann 		 * instead of just returning 0; we could be somewhere in a subprog,
22635e581dadSDaniel Borkmann 		 * so execution could continue otherwise which we do /not/ want.
22645e581dadSDaniel Borkmann 		 *
22655e581dadSDaniel Borkmann 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
22665e581dadSDaniel Borkmann 		 */
226791c960b0SBrendan Jackman 		pr_warn("BPF interpreter: unknown opcode %02x (imm: 0x%x)\n",
226891c960b0SBrendan Jackman 			insn->code, insn->imm);
22695e581dadSDaniel Borkmann 		BUG_ON(1);
2270f5bffecdSAlexei Starovoitov 		return 0;
2271f5bffecdSAlexei Starovoitov }
2272f696b8f4SAlexei Starovoitov 
2273b870aa90SAlexei Starovoitov #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
2274b870aa90SAlexei Starovoitov #define DEFINE_BPF_PROG_RUN(stack_size) \
2275b870aa90SAlexei Starovoitov static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
2276b870aa90SAlexei Starovoitov { \
2277b870aa90SAlexei Starovoitov 	u64 stack[stack_size / sizeof(u64)]; \
2278a6a7aabaSAlexander Potapenko 	u64 regs[MAX_BPF_EXT_REG] = {}; \
2279b870aa90SAlexei Starovoitov \
2280e8742081SMartin KaFai Lau 	kmsan_unpoison_memory(stack, sizeof(stack)); \
2281b870aa90SAlexei Starovoitov 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
2282b870aa90SAlexei Starovoitov 	ARG1 = (u64) (unsigned long) ctx; \
22832ec9898eSHe Fengqing 	return ___bpf_prog_run(regs, insn); \
2284f696b8f4SAlexei Starovoitov }
2285f5bffecdSAlexei Starovoitov 
22861ea47e01SAlexei Starovoitov #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
22871ea47e01SAlexei Starovoitov #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
22881ea47e01SAlexei Starovoitov static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
22891ea47e01SAlexei Starovoitov 				      const struct bpf_insn *insn) \
22901ea47e01SAlexei Starovoitov { \
22911ea47e01SAlexei Starovoitov 	u64 stack[stack_size / sizeof(u64)]; \
2292144cd91cSDaniel Borkmann 	u64 regs[MAX_BPF_EXT_REG]; \
22931ea47e01SAlexei Starovoitov \
2294e8742081SMartin KaFai Lau 	kmsan_unpoison_memory(stack, sizeof(stack)); \
22951ea47e01SAlexei Starovoitov 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
22961ea47e01SAlexei Starovoitov 	BPF_R1 = r1; \
22971ea47e01SAlexei Starovoitov 	BPF_R2 = r2; \
22981ea47e01SAlexei Starovoitov 	BPF_R3 = r3; \
22991ea47e01SAlexei Starovoitov 	BPF_R4 = r4; \
23001ea47e01SAlexei Starovoitov 	BPF_R5 = r5; \
23012ec9898eSHe Fengqing 	return ___bpf_prog_run(regs, insn); \
23021ea47e01SAlexei Starovoitov }
23031ea47e01SAlexei Starovoitov 
2304b870aa90SAlexei Starovoitov #define EVAL1(FN, X) FN(X)
2305b870aa90SAlexei Starovoitov #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
2306b870aa90SAlexei Starovoitov #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
2307b870aa90SAlexei Starovoitov #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
2308b870aa90SAlexei Starovoitov #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
2309b870aa90SAlexei Starovoitov #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
2310b870aa90SAlexei Starovoitov 
2311b870aa90SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
2312b870aa90SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
2313b870aa90SAlexei Starovoitov EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
2314b870aa90SAlexei Starovoitov 
23151ea47e01SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
23161ea47e01SAlexei Starovoitov EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
23171ea47e01SAlexei Starovoitov EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
23181ea47e01SAlexei Starovoitov 
2319b870aa90SAlexei Starovoitov #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
2320b870aa90SAlexei Starovoitov 
2321b870aa90SAlexei Starovoitov static unsigned int (*interpreters[])(const void *ctx,
2322b870aa90SAlexei Starovoitov 				      const struct bpf_insn *insn) = {
2323b870aa90SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
2324b870aa90SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
2325b870aa90SAlexei Starovoitov EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
2326b870aa90SAlexei Starovoitov };
23271ea47e01SAlexei Starovoitov #undef PROG_NAME_LIST
23281ea47e01SAlexei Starovoitov #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
2329ba49f976SArnd Bergmann static __maybe_unused
2330ba49f976SArnd Bergmann u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
23311ea47e01SAlexei Starovoitov 			   const struct bpf_insn *insn) = {
23321ea47e01SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
23331ea47e01SAlexei Starovoitov EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
23341ea47e01SAlexei Starovoitov EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
23351ea47e01SAlexei Starovoitov };
23361ea47e01SAlexei Starovoitov #undef PROG_NAME_LIST
23371ea47e01SAlexei Starovoitov 
2338ba49f976SArnd Bergmann #ifdef CONFIG_BPF_SYSCALL
bpf_patch_call_args(struct bpf_insn * insn,u32 stack_depth)23391ea47e01SAlexei Starovoitov void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
23401ea47e01SAlexei Starovoitov {
23411ea47e01SAlexei Starovoitov 	stack_depth = max_t(u32, stack_depth, 1);
23421ea47e01SAlexei Starovoitov 	insn->off = (s16) insn->imm;
23431ea47e01SAlexei Starovoitov 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
23441ea47e01SAlexei Starovoitov 		__bpf_call_base_args;
23451ea47e01SAlexei Starovoitov 	insn->code = BPF_JMP | BPF_CALL_ARGS;
23461ea47e01SAlexei Starovoitov }
2347ba49f976SArnd Bergmann #endif
23486ebc5030SJiayuan Chen #endif
23496ebc5030SJiayuan Chen 
__bpf_prog_ret0_warn(const void * ctx,const struct bpf_insn * insn)2350fa9dd599SDaniel Borkmann static unsigned int __bpf_prog_ret0_warn(const void *ctx,
2351290af866SAlexei Starovoitov 					 const struct bpf_insn *insn)
2352290af866SAlexei Starovoitov {
2353fa9dd599SDaniel Borkmann 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
23546ebc5030SJiayuan Chen 	 * is not working properly, or interpreter is being used when
23556ebc5030SJiayuan Chen 	 * prog->jit_requested is not 0, so warn about it!
2356fa9dd599SDaniel Borkmann 	 */
2357fa9dd599SDaniel Borkmann 	WARN_ON_ONCE(1);
2358290af866SAlexei Starovoitov 	return 0;
2359290af866SAlexei Starovoitov }
2360290af866SAlexei Starovoitov 
bpf_prog_map_compatible(struct bpf_map * map,const struct bpf_prog * fp)2361f45d5b6cSToke Hoiland-Jorgensen bool bpf_prog_map_compatible(struct bpf_map *map,
23623324b584SDaniel Borkmann 			     const struct bpf_prog *fp)
2363f5bffecdSAlexei Starovoitov {
23641c123c56SToke Høiland-Jørgensen 	enum bpf_prog_type prog_type = resolve_prog_type(fp);
236554713c85SToke Høiland-Jørgensen 	bool ret;
236628ead3eaSXu Kuohai 	struct bpf_prog_aux *aux = fp->aux;
236754713c85SToke Høiland-Jørgensen 
23689802d865SJosef Bacik 	if (fp->kprobe_override)
23699802d865SJosef Bacik 		return false;
23709802d865SJosef Bacik 
23713d76a4d3SStanislav Fomichev 	/* XDP programs inserted into maps are not guaranteed to run on
23723d76a4d3SStanislav Fomichev 	 * a particular netdev (and can run outside driver context entirely
23733d76a4d3SStanislav Fomichev 	 * in the case of devmap and cpumap). Until device checks
23743d76a4d3SStanislav Fomichev 	 * are implemented, prohibit adding dev-bound programs to program maps.
23753d76a4d3SStanislav Fomichev 	 */
237628ead3eaSXu Kuohai 	if (bpf_prog_is_dev_bound(aux))
23773d76a4d3SStanislav Fomichev 		return false;
23783d76a4d3SStanislav Fomichev 
2379f45d5b6cSToke Hoiland-Jorgensen 	spin_lock(&map->owner.lock);
2380f45d5b6cSToke Hoiland-Jorgensen 	if (!map->owner.type) {
23813324b584SDaniel Borkmann 		/* There's no owner yet where we could check for
23823324b584SDaniel Borkmann 		 * compatibility.
23833324b584SDaniel Borkmann 		 */
23841c123c56SToke Høiland-Jørgensen 		map->owner.type  = prog_type;
2385f45d5b6cSToke Hoiland-Jorgensen 		map->owner.jited = fp->jited;
238628ead3eaSXu Kuohai 		map->owner.xdp_has_frags = aux->xdp_has_frags;
238728ead3eaSXu Kuohai 		map->owner.attach_func_proto = aux->attach_func_proto;
238854713c85SToke Høiland-Jørgensen 		ret = true;
238954713c85SToke Høiland-Jørgensen 	} else {
23901c123c56SToke Høiland-Jørgensen 		ret = map->owner.type  == prog_type &&
2391f45d5b6cSToke Hoiland-Jorgensen 		      map->owner.jited == fp->jited &&
239228ead3eaSXu Kuohai 		      map->owner.xdp_has_frags == aux->xdp_has_frags;
239328ead3eaSXu Kuohai 		if (ret &&
239428ead3eaSXu Kuohai 		    map->owner.attach_func_proto != aux->attach_func_proto) {
239528ead3eaSXu Kuohai 			switch (prog_type) {
239628ead3eaSXu Kuohai 			case BPF_PROG_TYPE_TRACING:
239728ead3eaSXu Kuohai 			case BPF_PROG_TYPE_LSM:
239828ead3eaSXu Kuohai 			case BPF_PROG_TYPE_EXT:
239928ead3eaSXu Kuohai 			case BPF_PROG_TYPE_STRUCT_OPS:
240028ead3eaSXu Kuohai 				ret = false;
240128ead3eaSXu Kuohai 				break;
240228ead3eaSXu Kuohai 			default:
240328ead3eaSXu Kuohai 				break;
240428ead3eaSXu Kuohai 			}
240528ead3eaSXu Kuohai 		}
240604fd61abSAlexei Starovoitov 	}
2407f45d5b6cSToke Hoiland-Jorgensen 	spin_unlock(&map->owner.lock);
2408f45d5b6cSToke Hoiland-Jorgensen 
240954713c85SToke Høiland-Jørgensen 	return ret;
24103324b584SDaniel Borkmann }
24113324b584SDaniel Borkmann 
bpf_check_tail_call(const struct bpf_prog * fp)24123324b584SDaniel Borkmann static int bpf_check_tail_call(const struct bpf_prog *fp)
241304fd61abSAlexei Starovoitov {
241404fd61abSAlexei Starovoitov 	struct bpf_prog_aux *aux = fp->aux;
2415984fe94fSYiFei Zhu 	int i, ret = 0;
241604fd61abSAlexei Starovoitov 
2417984fe94fSYiFei Zhu 	mutex_lock(&aux->used_maps_mutex);
241804fd61abSAlexei Starovoitov 	for (i = 0; i < aux->used_map_cnt; i++) {
24193324b584SDaniel Borkmann 		struct bpf_map *map = aux->used_maps[i];
242004fd61abSAlexei Starovoitov 
2421f45d5b6cSToke Hoiland-Jorgensen 		if (!map_type_contains_progs(map))
242204fd61abSAlexei Starovoitov 			continue;
24233324b584SDaniel Borkmann 
2424f45d5b6cSToke Hoiland-Jorgensen 		if (!bpf_prog_map_compatible(map, fp)) {
2425984fe94fSYiFei Zhu 			ret = -EINVAL;
2426984fe94fSYiFei Zhu 			goto out;
2427984fe94fSYiFei Zhu 		}
242804fd61abSAlexei Starovoitov 	}
242904fd61abSAlexei Starovoitov 
2430984fe94fSYiFei Zhu out:
2431984fe94fSYiFei Zhu 	mutex_unlock(&aux->used_maps_mutex);
2432984fe94fSYiFei Zhu 	return ret;
243304fd61abSAlexei Starovoitov }
243404fd61abSAlexei Starovoitov 
bpf_prog_select_func(struct bpf_prog * fp)24359facc336SDaniel Borkmann static void bpf_prog_select_func(struct bpf_prog *fp)
24369facc336SDaniel Borkmann {
24379facc336SDaniel Borkmann #ifndef CONFIG_BPF_JIT_ALWAYS_ON
24389facc336SDaniel Borkmann 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
24396ebc5030SJiayuan Chen 	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
24409facc336SDaniel Borkmann 
24416ebc5030SJiayuan Chen 	/* may_goto may cause stack size > 512, leading to idx out-of-bounds.
24426ebc5030SJiayuan Chen 	 * But for non-JITed programs, we don't need bpf_func, so no bounds
24436ebc5030SJiayuan Chen 	 * check needed.
24446ebc5030SJiayuan Chen 	 */
24456ebc5030SJiayuan Chen 	if (!fp->jit_requested &&
24466ebc5030SJiayuan Chen 	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
24476ebc5030SJiayuan Chen 		fp->bpf_func = interpreters[idx];
24486ebc5030SJiayuan Chen 	} else {
24496ebc5030SJiayuan Chen 		fp->bpf_func = __bpf_prog_ret0_warn;
24506ebc5030SJiayuan Chen 	}
24519facc336SDaniel Borkmann #else
24529facc336SDaniel Borkmann 	fp->bpf_func = __bpf_prog_ret0_warn;
24539facc336SDaniel Borkmann #endif
24549facc336SDaniel Borkmann }
24559facc336SDaniel Borkmann 
2456f5bffecdSAlexei Starovoitov /**
24573324b584SDaniel Borkmann  *	bpf_prog_select_runtime - select exec runtime for BPF program
245806edc59cSChristoph Hellwig  *	@fp: bpf_prog populated with BPF program
2459d1c55ab5SDaniel Borkmann  *	@err: pointer to error variable
2460f5bffecdSAlexei Starovoitov  *
24613324b584SDaniel Borkmann  * Try to JIT eBPF program, if JIT is not available, use interpreter.
2462fb7dd8bcSAndrii Nakryiko  * The BPF program will be executed via bpf_prog_run() function.
2463019d0454SRandy Dunlap  *
2464019d0454SRandy Dunlap  * Return: the &fp argument along with &err set to 0 for success or
2465019d0454SRandy Dunlap  * a negative errno code on failure
2466f5bffecdSAlexei Starovoitov  */
bpf_prog_select_runtime(struct bpf_prog * fp,int * err)2467d1c55ab5SDaniel Borkmann struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
2468f5bffecdSAlexei Starovoitov {
24699facc336SDaniel Borkmann 	/* In case of BPF to BPF calls, verifier did all the prep
24709facc336SDaniel Borkmann 	 * work with regards to JITing, etc.
24719facc336SDaniel Borkmann 	 */
2472e6ac2450SMartin KaFai Lau 	bool jit_needed = false;
2473e6ac2450SMartin KaFai Lau 
24749facc336SDaniel Borkmann 	if (fp->bpf_func)
24759facc336SDaniel Borkmann 		goto finalize;
24768007e40aSMartin KaFai Lau 
2477e6ac2450SMartin KaFai Lau 	if (IS_ENABLED(CONFIG_BPF_JIT_ALWAYS_ON) ||
2478e6ac2450SMartin KaFai Lau 	    bpf_prog_has_kfunc_call(fp))
2479e6ac2450SMartin KaFai Lau 		jit_needed = true;
2480e6ac2450SMartin KaFai Lau 
24819facc336SDaniel Borkmann 	bpf_prog_select_func(fp);
2482f5bffecdSAlexei Starovoitov 
2483d1c55ab5SDaniel Borkmann 	/* eBPF JITs can rewrite the program in case constant
2484d1c55ab5SDaniel Borkmann 	 * blinding is active. However, in case of error during
2485d1c55ab5SDaniel Borkmann 	 * blinding, bpf_int_jit_compile() must always return a
2486d1c55ab5SDaniel Borkmann 	 * valid program, which in this case would simply not
2487d1c55ab5SDaniel Borkmann 	 * be JITed, but falls back to the interpreter.
2488d1c55ab5SDaniel Borkmann 	 */
24899d03ebc7SStanislav Fomichev 	if (!bpf_prog_is_offloaded(fp->aux)) {
2490c454a46bSMartin KaFai Lau 		*err = bpf_prog_alloc_jited_linfo(fp);
2491c454a46bSMartin KaFai Lau 		if (*err)
2492c454a46bSMartin KaFai Lau 			return fp;
2493c454a46bSMartin KaFai Lau 
2494d1c55ab5SDaniel Borkmann 		fp = bpf_int_jit_compile(fp);
2495e16301fbSMartin KaFai Lau 		bpf_prog_jit_attempt_done(fp);
2496e6ac2450SMartin KaFai Lau 		if (!fp->jited && jit_needed) {
2497290af866SAlexei Starovoitov 			*err = -ENOTSUPP;
2498290af866SAlexei Starovoitov 			return fp;
2499c454a46bSMartin KaFai Lau 		}
2500c454a46bSMartin KaFai Lau 	} else {
2501ab3f0063SJakub Kicinski 		*err = bpf_prog_offload_compile(fp);
2502ab3f0063SJakub Kicinski 		if (*err)
2503ab3f0063SJakub Kicinski 			return fp;
2504ab3f0063SJakub Kicinski 	}
25059facc336SDaniel Borkmann 
25069facc336SDaniel Borkmann finalize:
25077d2cc63eSChristophe Leroy 	*err = bpf_prog_lock_ro(fp);
25087d2cc63eSChristophe Leroy 	if (*err)
25097d2cc63eSChristophe Leroy 		return fp;
251004fd61abSAlexei Starovoitov 
25113324b584SDaniel Borkmann 	/* The tail call compatibility check can only be done at
25123324b584SDaniel Borkmann 	 * this late stage as we need to determine, if we deal
25133324b584SDaniel Borkmann 	 * with JITed or non JITed program concatenations and not
25143324b584SDaniel Borkmann 	 * all eBPF JITs might immediately support all features.
25153324b584SDaniel Borkmann 	 */
2516d1c55ab5SDaniel Borkmann 	*err = bpf_check_tail_call(fp);
2517d1c55ab5SDaniel Borkmann 
2518d1c55ab5SDaniel Borkmann 	return fp;
2519f5bffecdSAlexei Starovoitov }
25207ae457c1SAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
2521f5bffecdSAlexei Starovoitov 
__bpf_prog_ret1(const void * ctx,const struct bpf_insn * insn)2522e87c6bc3SYonghong Song static unsigned int __bpf_prog_ret1(const void *ctx,
2523e87c6bc3SYonghong Song 				    const struct bpf_insn *insn)
2524e87c6bc3SYonghong Song {
2525e87c6bc3SYonghong Song 	return 1;
2526e87c6bc3SYonghong Song }
2527e87c6bc3SYonghong Song 
2528e87c6bc3SYonghong Song static struct bpf_prog_dummy {
2529e87c6bc3SYonghong Song 	struct bpf_prog prog;
2530e87c6bc3SYonghong Song } dummy_bpf_prog = {
2531e87c6bc3SYonghong Song 	.prog = {
2532e87c6bc3SYonghong Song 		.bpf_func = __bpf_prog_ret1,
2533e87c6bc3SYonghong Song 	},
2534e87c6bc3SYonghong Song };
2535e87c6bc3SYonghong Song 
253646531a30SPavel Begunkov struct bpf_empty_prog_array bpf_empty_prog_array = {
2537324bda9eSAlexei Starovoitov 	.null_prog = NULL,
2538324bda9eSAlexei Starovoitov };
253946531a30SPavel Begunkov EXPORT_SYMBOL(bpf_empty_prog_array);
2540324bda9eSAlexei Starovoitov 
bpf_prog_array_alloc(u32 prog_cnt,gfp_t flags)2541d29ab6e1SRoman Gushchin struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
2542324bda9eSAlexei Starovoitov {
2543cb01621bSAndy Shevchenko 	struct bpf_prog_array *p;
2544324bda9eSAlexei Starovoitov 
2545cb01621bSAndy Shevchenko 	if (prog_cnt)
2546cb01621bSAndy Shevchenko 		p = kzalloc(struct_size(p, items, prog_cnt + 1), flags);
2547cb01621bSAndy Shevchenko 	else
2548cb01621bSAndy Shevchenko 		p = &bpf_empty_prog_array.hdr;
2549cb01621bSAndy Shevchenko 
2550cb01621bSAndy Shevchenko 	return p;
2551324bda9eSAlexei Starovoitov }
2552324bda9eSAlexei Starovoitov 
bpf_prog_array_free(struct bpf_prog_array * progs)255354e9c9d4SStanislav Fomichev void bpf_prog_array_free(struct bpf_prog_array *progs)
2554324bda9eSAlexei Starovoitov {
255546531a30SPavel Begunkov 	if (!progs || progs == &bpf_empty_prog_array.hdr)
2556324bda9eSAlexei Starovoitov 		return;
2557324bda9eSAlexei Starovoitov 	kfree_rcu(progs, rcu);
2558324bda9eSAlexei Starovoitov }
2559324bda9eSAlexei Starovoitov 
__bpf_prog_array_free_sleepable_cb(struct rcu_head * rcu)25608c7dcb84SDelyan Kratunov static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
25618c7dcb84SDelyan Kratunov {
25628c7dcb84SDelyan Kratunov 	struct bpf_prog_array *progs;
25638c7dcb84SDelyan Kratunov 
25644835f9eeSHou Tao 	/* If RCU Tasks Trace grace period implies RCU grace period, there is
25654835f9eeSHou Tao 	 * no need to call kfree_rcu(), just call kfree() directly.
25664835f9eeSHou Tao 	 */
25678c7dcb84SDelyan Kratunov 	progs = container_of(rcu, struct bpf_prog_array, rcu);
25684835f9eeSHou Tao 	if (rcu_trace_implies_rcu_gp())
25694835f9eeSHou Tao 		kfree(progs);
25704835f9eeSHou Tao 	else
25718c7dcb84SDelyan Kratunov 		kfree_rcu(progs, rcu);
25728c7dcb84SDelyan Kratunov }
25738c7dcb84SDelyan Kratunov 
bpf_prog_array_free_sleepable(struct bpf_prog_array * progs)25748c7dcb84SDelyan Kratunov void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
25758c7dcb84SDelyan Kratunov {
25768c7dcb84SDelyan Kratunov 	if (!progs || progs == &bpf_empty_prog_array.hdr)
25778c7dcb84SDelyan Kratunov 		return;
25788c7dcb84SDelyan Kratunov 	call_rcu_tasks_trace(&progs->rcu, __bpf_prog_array_free_sleepable_cb);
25798c7dcb84SDelyan Kratunov }
25808c7dcb84SDelyan Kratunov 
bpf_prog_array_length(struct bpf_prog_array * array)258154e9c9d4SStanislav Fomichev int bpf_prog_array_length(struct bpf_prog_array *array)
2582468e2f64SAlexei Starovoitov {
2583394e40a2SRoman Gushchin 	struct bpf_prog_array_item *item;
2584468e2f64SAlexei Starovoitov 	u32 cnt = 0;
2585468e2f64SAlexei Starovoitov 
258654e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++)
2587394e40a2SRoman Gushchin 		if (item->prog != &dummy_bpf_prog.prog)
2588468e2f64SAlexei Starovoitov 			cnt++;
2589468e2f64SAlexei Starovoitov 	return cnt;
2590468e2f64SAlexei Starovoitov }
2591468e2f64SAlexei Starovoitov 
bpf_prog_array_is_empty(struct bpf_prog_array * array)25920d01da6aSStanislav Fomichev bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
25930d01da6aSStanislav Fomichev {
25940d01da6aSStanislav Fomichev 	struct bpf_prog_array_item *item;
25950d01da6aSStanislav Fomichev 
25960d01da6aSStanislav Fomichev 	for (item = array->items; item->prog; item++)
25970d01da6aSStanislav Fomichev 		if (item->prog != &dummy_bpf_prog.prog)
25980d01da6aSStanislav Fomichev 			return false;
25990d01da6aSStanislav Fomichev 	return true;
26000d01da6aSStanislav Fomichev }
2601394e40a2SRoman Gushchin 
bpf_prog_array_copy_core(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt)260254e9c9d4SStanislav Fomichev static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
26033a38bb98SYonghong Song 				     u32 *prog_ids,
26043a38bb98SYonghong Song 				     u32 request_cnt)
26053a38bb98SYonghong Song {
2606394e40a2SRoman Gushchin 	struct bpf_prog_array_item *item;
26073a38bb98SYonghong Song 	int i = 0;
26083a38bb98SYonghong Song 
260954e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++) {
2610394e40a2SRoman Gushchin 		if (item->prog == &dummy_bpf_prog.prog)
26113a38bb98SYonghong Song 			continue;
2612394e40a2SRoman Gushchin 		prog_ids[i] = item->prog->aux->id;
26133a38bb98SYonghong Song 		if (++i == request_cnt) {
2614394e40a2SRoman Gushchin 			item++;
26153a38bb98SYonghong Song 			break;
26163a38bb98SYonghong Song 		}
26173a38bb98SYonghong Song 	}
26183a38bb98SYonghong Song 
2619394e40a2SRoman Gushchin 	return !!(item->prog);
26203a38bb98SYonghong Song }
26213a38bb98SYonghong Song 
bpf_prog_array_copy_to_user(struct bpf_prog_array * array,__u32 __user * prog_ids,u32 cnt)262254e9c9d4SStanislav Fomichev int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
2623468e2f64SAlexei Starovoitov 				__u32 __user *prog_ids, u32 cnt)
2624468e2f64SAlexei Starovoitov {
26250911287cSAlexei Starovoitov 	unsigned long err = 0;
26260911287cSAlexei Starovoitov 	bool nospc;
26273a38bb98SYonghong Song 	u32 *ids;
2628468e2f64SAlexei Starovoitov 
26290911287cSAlexei Starovoitov 	/* users of this function are doing:
26300911287cSAlexei Starovoitov 	 * cnt = bpf_prog_array_length();
26310911287cSAlexei Starovoitov 	 * if (cnt > 0)
26320911287cSAlexei Starovoitov 	 *     bpf_prog_array_copy_to_user(..., cnt);
263354e9c9d4SStanislav Fomichev 	 * so below kcalloc doesn't need extra cnt > 0 check.
26340911287cSAlexei Starovoitov 	 */
26359c481b90SDaniel Borkmann 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
26360911287cSAlexei Starovoitov 	if (!ids)
26370911287cSAlexei Starovoitov 		return -ENOMEM;
2638394e40a2SRoman Gushchin 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
26390911287cSAlexei Starovoitov 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
26400911287cSAlexei Starovoitov 	kfree(ids);
26410911287cSAlexei Starovoitov 	if (err)
26420911287cSAlexei Starovoitov 		return -EFAULT;
26430911287cSAlexei Starovoitov 	if (nospc)
2644468e2f64SAlexei Starovoitov 		return -ENOSPC;
2645468e2f64SAlexei Starovoitov 	return 0;
2646468e2f64SAlexei Starovoitov }
2647468e2f64SAlexei Starovoitov 
bpf_prog_array_delete_safe(struct bpf_prog_array * array,struct bpf_prog * old_prog)264854e9c9d4SStanislav Fomichev void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
2649e87c6bc3SYonghong Song 				struct bpf_prog *old_prog)
2650e87c6bc3SYonghong Song {
265154e9c9d4SStanislav Fomichev 	struct bpf_prog_array_item *item;
2652e87c6bc3SYonghong Song 
265354e9c9d4SStanislav Fomichev 	for (item = array->items; item->prog; item++)
2654394e40a2SRoman Gushchin 		if (item->prog == old_prog) {
2655394e40a2SRoman Gushchin 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
2656e87c6bc3SYonghong Song 			break;
2657e87c6bc3SYonghong Song 		}
2658e87c6bc3SYonghong Song }
2659e87c6bc3SYonghong Song 
2660ce3aa9ccSJakub Sitnicki /**
2661ce3aa9ccSJakub Sitnicki  * bpf_prog_array_delete_safe_at() - Replaces the program at the given
2662ce3aa9ccSJakub Sitnicki  *                                   index into the program array with
2663ce3aa9ccSJakub Sitnicki  *                                   a dummy no-op program.
2664ce3aa9ccSJakub Sitnicki  * @array: a bpf_prog_array
2665ce3aa9ccSJakub Sitnicki  * @index: the index of the program to replace
2666ce3aa9ccSJakub Sitnicki  *
2667ce3aa9ccSJakub Sitnicki  * Skips over dummy programs, by not counting them, when calculating
2668b8c1a309SRandy Dunlap  * the position of the program to replace.
2669ce3aa9ccSJakub Sitnicki  *
2670ce3aa9ccSJakub Sitnicki  * Return:
2671ce3aa9ccSJakub Sitnicki  * * 0		- Success
2672ce3aa9ccSJakub Sitnicki  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2673ce3aa9ccSJakub Sitnicki  * * -ENOENT	- Index out of range
2674ce3aa9ccSJakub Sitnicki  */
bpf_prog_array_delete_safe_at(struct bpf_prog_array * array,int index)2675ce3aa9ccSJakub Sitnicki int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
2676ce3aa9ccSJakub Sitnicki {
2677ce3aa9ccSJakub Sitnicki 	return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
2678ce3aa9ccSJakub Sitnicki }
2679ce3aa9ccSJakub Sitnicki 
2680ce3aa9ccSJakub Sitnicki /**
2681ce3aa9ccSJakub Sitnicki  * bpf_prog_array_update_at() - Updates the program at the given index
2682ce3aa9ccSJakub Sitnicki  *                              into the program array.
2683ce3aa9ccSJakub Sitnicki  * @array: a bpf_prog_array
2684ce3aa9ccSJakub Sitnicki  * @index: the index of the program to update
2685ce3aa9ccSJakub Sitnicki  * @prog: the program to insert into the array
2686ce3aa9ccSJakub Sitnicki  *
2687ce3aa9ccSJakub Sitnicki  * Skips over dummy programs, by not counting them, when calculating
2688ce3aa9ccSJakub Sitnicki  * the position of the program to update.
2689ce3aa9ccSJakub Sitnicki  *
2690ce3aa9ccSJakub Sitnicki  * Return:
2691ce3aa9ccSJakub Sitnicki  * * 0		- Success
2692ce3aa9ccSJakub Sitnicki  * * -EINVAL	- Invalid index value. Must be a non-negative integer.
2693ce3aa9ccSJakub Sitnicki  * * -ENOENT	- Index out of range
2694ce3aa9ccSJakub Sitnicki  */
bpf_prog_array_update_at(struct bpf_prog_array * array,int index,struct bpf_prog * prog)2695ce3aa9ccSJakub Sitnicki int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
2696ce3aa9ccSJakub Sitnicki 			     struct bpf_prog *prog)
2697ce3aa9ccSJakub Sitnicki {
2698ce3aa9ccSJakub Sitnicki 	struct bpf_prog_array_item *item;
2699ce3aa9ccSJakub Sitnicki 
2700ce3aa9ccSJakub Sitnicki 	if (unlikely(index < 0))
2701ce3aa9ccSJakub Sitnicki 		return -EINVAL;
2702ce3aa9ccSJakub Sitnicki 
2703ce3aa9ccSJakub Sitnicki 	for (item = array->items; item->prog; item++) {
2704ce3aa9ccSJakub Sitnicki 		if (item->prog == &dummy_bpf_prog.prog)
2705ce3aa9ccSJakub Sitnicki 			continue;
2706ce3aa9ccSJakub Sitnicki 		if (!index) {
2707ce3aa9ccSJakub Sitnicki 			WRITE_ONCE(item->prog, prog);
2708ce3aa9ccSJakub Sitnicki 			return 0;
2709ce3aa9ccSJakub Sitnicki 		}
2710ce3aa9ccSJakub Sitnicki 		index--;
2711ce3aa9ccSJakub Sitnicki 	}
2712ce3aa9ccSJakub Sitnicki 	return -ENOENT;
2713ce3aa9ccSJakub Sitnicki }
2714ce3aa9ccSJakub Sitnicki 
bpf_prog_array_copy(struct bpf_prog_array * old_array,struct bpf_prog * exclude_prog,struct bpf_prog * include_prog,u64 bpf_cookie,struct bpf_prog_array ** new_array)271554e9c9d4SStanislav Fomichev int bpf_prog_array_copy(struct bpf_prog_array *old_array,
2716e87c6bc3SYonghong Song 			struct bpf_prog *exclude_prog,
2717e87c6bc3SYonghong Song 			struct bpf_prog *include_prog,
271882e6b1eeSAndrii Nakryiko 			u64 bpf_cookie,
2719e87c6bc3SYonghong Song 			struct bpf_prog_array **new_array)
2720e87c6bc3SYonghong Song {
2721e87c6bc3SYonghong Song 	int new_prog_cnt, carry_prog_cnt = 0;
272282e6b1eeSAndrii Nakryiko 	struct bpf_prog_array_item *existing, *new;
2723e87c6bc3SYonghong Song 	struct bpf_prog_array *array;
2724170a7e3eSSean Young 	bool found_exclude = false;
2725e87c6bc3SYonghong Song 
2726e87c6bc3SYonghong Song 	/* Figure out how many existing progs we need to carry over to
2727e87c6bc3SYonghong Song 	 * the new array.
2728e87c6bc3SYonghong Song 	 */
2729e87c6bc3SYonghong Song 	if (old_array) {
2730394e40a2SRoman Gushchin 		existing = old_array->items;
2731394e40a2SRoman Gushchin 		for (; existing->prog; existing++) {
2732394e40a2SRoman Gushchin 			if (existing->prog == exclude_prog) {
2733170a7e3eSSean Young 				found_exclude = true;
2734170a7e3eSSean Young 				continue;
2735170a7e3eSSean Young 			}
2736394e40a2SRoman Gushchin 			if (existing->prog != &dummy_bpf_prog.prog)
2737e87c6bc3SYonghong Song 				carry_prog_cnt++;
2738394e40a2SRoman Gushchin 			if (existing->prog == include_prog)
2739e87c6bc3SYonghong Song 				return -EEXIST;
2740e87c6bc3SYonghong Song 		}
2741e87c6bc3SYonghong Song 	}
2742e87c6bc3SYonghong Song 
2743170a7e3eSSean Young 	if (exclude_prog && !found_exclude)
2744170a7e3eSSean Young 		return -ENOENT;
2745170a7e3eSSean Young 
2746e87c6bc3SYonghong Song 	/* How many progs (not NULL) will be in the new array? */
2747e87c6bc3SYonghong Song 	new_prog_cnt = carry_prog_cnt;
2748e87c6bc3SYonghong Song 	if (include_prog)
2749e87c6bc3SYonghong Song 		new_prog_cnt += 1;
2750e87c6bc3SYonghong Song 
2751e87c6bc3SYonghong Song 	/* Do we have any prog (not NULL) in the new array? */
2752e87c6bc3SYonghong Song 	if (!new_prog_cnt) {
2753e87c6bc3SYonghong Song 		*new_array = NULL;
2754e87c6bc3SYonghong Song 		return 0;
2755e87c6bc3SYonghong Song 	}
2756e87c6bc3SYonghong Song 
2757e87c6bc3SYonghong Song 	/* +1 as the end of prog_array is marked with NULL */
2758e87c6bc3SYonghong Song 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
2759e87c6bc3SYonghong Song 	if (!array)
2760e87c6bc3SYonghong Song 		return -ENOMEM;
276182e6b1eeSAndrii Nakryiko 	new = array->items;
2762e87c6bc3SYonghong Song 
2763e87c6bc3SYonghong Song 	/* Fill in the new prog array */
2764e87c6bc3SYonghong Song 	if (carry_prog_cnt) {
2765394e40a2SRoman Gushchin 		existing = old_array->items;
276682e6b1eeSAndrii Nakryiko 		for (; existing->prog; existing++) {
276782e6b1eeSAndrii Nakryiko 			if (existing->prog == exclude_prog ||
276882e6b1eeSAndrii Nakryiko 			    existing->prog == &dummy_bpf_prog.prog)
276982e6b1eeSAndrii Nakryiko 				continue;
277082e6b1eeSAndrii Nakryiko 
277182e6b1eeSAndrii Nakryiko 			new->prog = existing->prog;
277282e6b1eeSAndrii Nakryiko 			new->bpf_cookie = existing->bpf_cookie;
277382e6b1eeSAndrii Nakryiko 			new++;
2774394e40a2SRoman Gushchin 		}
2775e87c6bc3SYonghong Song 	}
277682e6b1eeSAndrii Nakryiko 	if (include_prog) {
277782e6b1eeSAndrii Nakryiko 		new->prog = include_prog;
277882e6b1eeSAndrii Nakryiko 		new->bpf_cookie = bpf_cookie;
277982e6b1eeSAndrii Nakryiko 		new++;
278082e6b1eeSAndrii Nakryiko 	}
278182e6b1eeSAndrii Nakryiko 	new->prog = NULL;
2782e87c6bc3SYonghong Song 	*new_array = array;
2783e87c6bc3SYonghong Song 	return 0;
2784e87c6bc3SYonghong Song }
2785e87c6bc3SYonghong Song 
bpf_prog_array_copy_info(struct bpf_prog_array * array,u32 * prog_ids,u32 request_cnt,u32 * prog_cnt)278654e9c9d4SStanislav Fomichev int bpf_prog_array_copy_info(struct bpf_prog_array *array,
27873a38bb98SYonghong Song 			     u32 *prog_ids, u32 request_cnt,
27883a38bb98SYonghong Song 			     u32 *prog_cnt)
2789f371b304SYonghong Song {
2790f371b304SYonghong Song 	u32 cnt = 0;
2791f371b304SYonghong Song 
2792f371b304SYonghong Song 	if (array)
2793f371b304SYonghong Song 		cnt = bpf_prog_array_length(array);
2794f371b304SYonghong Song 
27953a38bb98SYonghong Song 	*prog_cnt = cnt;
2796f371b304SYonghong Song 
2797f371b304SYonghong Song 	/* return early if user requested only program count or nothing to copy */
2798f371b304SYonghong Song 	if (!request_cnt || !cnt)
2799f371b304SYonghong Song 		return 0;
2800f371b304SYonghong Song 
28013a38bb98SYonghong Song 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
2802394e40a2SRoman Gushchin 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
28033a38bb98SYonghong Song 								     : 0;
2804f371b304SYonghong Song }
2805f371b304SYonghong Song 
__bpf_free_used_maps(struct bpf_prog_aux * aux,struct bpf_map ** used_maps,u32 len)2806a2ea0746SDaniel Borkmann void __bpf_free_used_maps(struct bpf_prog_aux *aux,
2807a2ea0746SDaniel Borkmann 			  struct bpf_map **used_maps, u32 len)
28086332be04SDaniel Borkmann {
2809da765a2fSDaniel Borkmann 	struct bpf_map *map;
2810af66bfd3SHou Tao 	bool sleepable;
2811a2ea0746SDaniel Borkmann 	u32 i;
28126332be04SDaniel Borkmann 
281366c84731SAndrii Nakryiko 	sleepable = aux->prog->sleepable;
2814a2ea0746SDaniel Borkmann 	for (i = 0; i < len; i++) {
2815a2ea0746SDaniel Borkmann 		map = used_maps[i];
2816da765a2fSDaniel Borkmann 		if (map->ops->map_poke_untrack)
2817da765a2fSDaniel Borkmann 			map->ops->map_poke_untrack(map, aux);
2818af66bfd3SHou Tao 		if (sleepable)
2819af66bfd3SHou Tao 			atomic64_dec(&map->sleepable_refcnt);
2820da765a2fSDaniel Borkmann 		bpf_map_put(map);
2821da765a2fSDaniel Borkmann 	}
2822a2ea0746SDaniel Borkmann }
2823a2ea0746SDaniel Borkmann 
bpf_free_used_maps(struct bpf_prog_aux * aux)2824a2ea0746SDaniel Borkmann static void bpf_free_used_maps(struct bpf_prog_aux *aux)
2825a2ea0746SDaniel Borkmann {
2826a2ea0746SDaniel Borkmann 	__bpf_free_used_maps(aux, aux->used_maps, aux->used_map_cnt);
28276332be04SDaniel Borkmann 	kfree(aux->used_maps);
28286332be04SDaniel Borkmann }
28296332be04SDaniel Borkmann 
__bpf_free_used_btfs(struct btf_mod_pair * used_btfs,u32 len)2830ab224b9eSRafael Passos void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len)
2831541c3badSAndrii Nakryiko {
2832541c3badSAndrii Nakryiko #ifdef CONFIG_BPF_SYSCALL
2833541c3badSAndrii Nakryiko 	struct btf_mod_pair *btf_mod;
2834541c3badSAndrii Nakryiko 	u32 i;
2835541c3badSAndrii Nakryiko 
2836541c3badSAndrii Nakryiko 	for (i = 0; i < len; i++) {
2837541c3badSAndrii Nakryiko 		btf_mod = &used_btfs[i];
2838541c3badSAndrii Nakryiko 		if (btf_mod->module)
2839541c3badSAndrii Nakryiko 			module_put(btf_mod->module);
2840541c3badSAndrii Nakryiko 		btf_put(btf_mod->btf);
2841541c3badSAndrii Nakryiko 	}
2842541c3badSAndrii Nakryiko #endif
2843541c3badSAndrii Nakryiko }
2844541c3badSAndrii Nakryiko 
bpf_free_used_btfs(struct bpf_prog_aux * aux)2845541c3badSAndrii Nakryiko static void bpf_free_used_btfs(struct bpf_prog_aux *aux)
2846541c3badSAndrii Nakryiko {
2847ab224b9eSRafael Passos 	__bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt);
2848541c3badSAndrii Nakryiko 	kfree(aux->used_btfs);
2849541c3badSAndrii Nakryiko }
2850541c3badSAndrii Nakryiko 
bpf_prog_free_deferred(struct work_struct * work)285160a3b225SDaniel Borkmann static void bpf_prog_free_deferred(struct work_struct *work)
285260a3b225SDaniel Borkmann {
285309756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux;
28541c2a088aSAlexei Starovoitov 	int i;
285560a3b225SDaniel Borkmann 
285609756af4SAlexei Starovoitov 	aux = container_of(work, struct bpf_prog_aux, work);
28572357672cSKumar Kartikeya Dwivedi #ifdef CONFIG_BPF_SYSCALL
28582357672cSKumar Kartikeya Dwivedi 	bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
28592357672cSKumar Kartikeya Dwivedi #endif
2860c0e19f2cSStanislav Fomichev #ifdef CONFIG_CGROUP_BPF
2861c0e19f2cSStanislav Fomichev 	if (aux->cgroup_atype != CGROUP_BPF_ATTACH_TYPE_INVALID)
2862c0e19f2cSStanislav Fomichev 		bpf_cgroup_atype_put(aux->cgroup_atype);
2863c0e19f2cSStanislav Fomichev #endif
28646332be04SDaniel Borkmann 	bpf_free_used_maps(aux);
2865541c3badSAndrii Nakryiko 	bpf_free_used_btfs(aux);
2866ab3f0063SJakub Kicinski 	if (bpf_prog_is_dev_bound(aux))
28672b3486bcSStanislav Fomichev 		bpf_prog_dev_bound_destroy(aux->prog);
2868c195651eSYonghong Song #ifdef CONFIG_PERF_EVENTS
2869c195651eSYonghong Song 	if (aux->prog->has_callchain_buf)
2870c195651eSYonghong Song 		put_callchain_buffers();
2871c195651eSYonghong Song #endif
28723aac1eadSToke Høiland-Jørgensen 	if (aux->dst_trampoline)
28733aac1eadSToke Høiland-Jørgensen 		bpf_trampoline_put(aux->dst_trampoline);
2874335d1c5bSKumar Kartikeya Dwivedi 	for (i = 0; i < aux->real_func_cnt; i++) {
2875f263a814SJohn Fastabend 		/* We can just unlink the subprog poke descriptor table as
2876f263a814SJohn Fastabend 		 * it was originally linked to the main program and is also
2877f263a814SJohn Fastabend 		 * released along with it.
2878f263a814SJohn Fastabend 		 */
2879f263a814SJohn Fastabend 		aux->func[i]->aux->poke_tab = NULL;
28801c2a088aSAlexei Starovoitov 		bpf_jit_free(aux->func[i]);
2881f263a814SJohn Fastabend 	}
2882335d1c5bSKumar Kartikeya Dwivedi 	if (aux->real_func_cnt) {
28831c2a088aSAlexei Starovoitov 		kfree(aux->func);
28841c2a088aSAlexei Starovoitov 		bpf_prog_unlock_free(aux->prog);
28851c2a088aSAlexei Starovoitov 	} else {
288609756af4SAlexei Starovoitov 		bpf_jit_free(aux->prog);
288760a3b225SDaniel Borkmann 	}
28881c2a088aSAlexei Starovoitov }
288960a3b225SDaniel Borkmann 
bpf_prog_free(struct bpf_prog * fp)28907ae457c1SAlexei Starovoitov void bpf_prog_free(struct bpf_prog *fp)
2891f5bffecdSAlexei Starovoitov {
289209756af4SAlexei Starovoitov 	struct bpf_prog_aux *aux = fp->aux;
289360a3b225SDaniel Borkmann 
28943aac1eadSToke Høiland-Jørgensen 	if (aux->dst_prog)
28953aac1eadSToke Høiland-Jørgensen 		bpf_prog_put(aux->dst_prog);
2896caf8f28eSAndrii Nakryiko 	bpf_token_put(aux->token);
289709756af4SAlexei Starovoitov 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
289809756af4SAlexei Starovoitov 	schedule_work(&aux->work);
2899f5bffecdSAlexei Starovoitov }
29007ae457c1SAlexei Starovoitov EXPORT_SYMBOL_GPL(bpf_prog_free);
2901f89b7755SAlexei Starovoitov 
2902a7de265cSRafael Passos /* RNG for unprivileged user space with separated state from prandom_u32(). */
29033ad00405SDaniel Borkmann static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
29043ad00405SDaniel Borkmann 
bpf_user_rnd_init_once(void)29053ad00405SDaniel Borkmann void bpf_user_rnd_init_once(void)
29063ad00405SDaniel Borkmann {
29073ad00405SDaniel Borkmann 	prandom_init_once(&bpf_user_rnd_state);
29083ad00405SDaniel Borkmann }
29093ad00405SDaniel Borkmann 
BPF_CALL_0(bpf_user_rnd_u32)2910f3694e00SDaniel Borkmann BPF_CALL_0(bpf_user_rnd_u32)
29113ad00405SDaniel Borkmann {
29123ad00405SDaniel Borkmann 	/* Should someone ever have the rather unwise idea to use some
29133ad00405SDaniel Borkmann 	 * of the registers passed into this function, then note that
29143ad00405SDaniel Borkmann 	 * this function is called from native eBPF and classic-to-eBPF
29153ad00405SDaniel Borkmann 	 * transformations. Register assignments from both sides are
29163ad00405SDaniel Borkmann 	 * different, f.e. classic always sets fn(ctx, A, X) here.
29173ad00405SDaniel Borkmann 	 */
29183ad00405SDaniel Borkmann 	struct rnd_state *state;
29193ad00405SDaniel Borkmann 	u32 res;
29203ad00405SDaniel Borkmann 
29213ad00405SDaniel Borkmann 	state = &get_cpu_var(bpf_user_rnd_state);
29223ad00405SDaniel Borkmann 	res = prandom_u32_state(state);
2923b761fe22SShaohua Li 	put_cpu_var(bpf_user_rnd_state);
29243ad00405SDaniel Borkmann 
29253ad00405SDaniel Borkmann 	return res;
29263ad00405SDaniel Borkmann }
29273ad00405SDaniel Borkmann 
BPF_CALL_0(bpf_get_raw_cpu_id)29286890896bSStanislav Fomichev BPF_CALL_0(bpf_get_raw_cpu_id)
29296890896bSStanislav Fomichev {
29306890896bSStanislav Fomichev 	return raw_smp_processor_id();
29316890896bSStanislav Fomichev }
29326890896bSStanislav Fomichev 
29333ba67dabSDaniel Borkmann /* Weak definitions of helper functions in case we don't have bpf syscall. */
29343ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
29353ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_update_elem_proto __weak;
29363ba67dabSDaniel Borkmann const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
2937f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_push_elem_proto __weak;
2938f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
2939f1a2e44aSMauricio Vasquez B const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
294007343110SFeng Zhou const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto __weak;
2941d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_lock_proto __weak;
2942d83525caSAlexei Starovoitov const struct bpf_func_proto bpf_spin_unlock_proto __weak;
29435576b991SMartin KaFai Lau const struct bpf_func_proto bpf_jiffies64_proto __weak;
29443ba67dabSDaniel Borkmann 
294503e69b50SDaniel Borkmann const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
2946c04167ceSDaniel Borkmann const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
29472d0e30c3SDaniel Borkmann const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
294817ca8cbfSDaniel Borkmann const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
294971d19214SMaciej Żenczykowski const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
2950d0551261SDmitrii Banshchikov const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto __weak;
2951c8996c98SJesper Dangaard Brouer const struct bpf_func_proto bpf_ktime_get_tai_ns_proto __weak;
2952bd570ff9SDaniel Borkmann 
2953ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
2954ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
2955ffeedafbSAlexei Starovoitov const struct bpf_func_proto bpf_get_current_comm_proto __weak;
2956bf6fa2c8SYonghong Song const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
29570f09abd1SDaniel Borkmann const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
2958cd339431SRoman Gushchin const struct bpf_func_proto bpf_get_local_storage_proto __weak;
2959b4490c5cSCarlos Neira const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
2960c4d0bfb4SAlan Maguire const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
2961eb411377SAlan Maguire const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
296269fd337aSStanislav Fomichev const struct bpf_func_proto bpf_set_retval_proto __weak;
296369fd337aSStanislav Fomichev const struct bpf_func_proto bpf_get_retval_proto __weak;
2964bd570ff9SDaniel Borkmann 
bpf_get_trace_printk_proto(void)29650756ea3eSAlexei Starovoitov const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
29660756ea3eSAlexei Starovoitov {
29670756ea3eSAlexei Starovoitov 	return NULL;
29680756ea3eSAlexei Starovoitov }
296903e69b50SDaniel Borkmann 
bpf_get_trace_vprintk_proto(void)297010aceb62SDave Marchevsky const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void)
297110aceb62SDave Marchevsky {
297210aceb62SDave Marchevsky 	return NULL;
297310aceb62SDave Marchevsky }
297410aceb62SDave Marchevsky 
bpf_get_perf_event_read_value_proto(void)2975*ae0a457fSEmil Tsalapatis const struct bpf_func_proto * __weak bpf_get_perf_event_read_value_proto(void)
2976*ae0a457fSEmil Tsalapatis {
2977*ae0a457fSEmil Tsalapatis 	return NULL;
2978*ae0a457fSEmil Tsalapatis }
2979*ae0a457fSEmil Tsalapatis 
2980555c8a86SDaniel Borkmann u64 __weak
bpf_event_output(struct bpf_map * map,u64 flags,void * meta,u64 meta_size,void * ctx,u64 ctx_size,bpf_ctx_copy_t ctx_copy)2981555c8a86SDaniel Borkmann bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
2982555c8a86SDaniel Borkmann 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
2983bd570ff9SDaniel Borkmann {
2984555c8a86SDaniel Borkmann 	return -ENOTSUPP;
2985bd570ff9SDaniel Borkmann }
29866cb5fb38SJakub Kicinski EXPORT_SYMBOL_GPL(bpf_event_output);
2987bd570ff9SDaniel Borkmann 
29883324b584SDaniel Borkmann /* Always built-in helper functions. */
29893324b584SDaniel Borkmann const struct bpf_func_proto bpf_tail_call_proto = {
29903324b584SDaniel Borkmann 	.func		= NULL,
29913324b584SDaniel Borkmann 	.gpl_only	= false,
29923324b584SDaniel Borkmann 	.ret_type	= RET_VOID,
29933324b584SDaniel Borkmann 	.arg1_type	= ARG_PTR_TO_CTX,
29943324b584SDaniel Borkmann 	.arg2_type	= ARG_CONST_MAP_PTR,
29953324b584SDaniel Borkmann 	.arg3_type	= ARG_ANYTHING,
29963324b584SDaniel Borkmann };
29973324b584SDaniel Borkmann 
29989383191dSDaniel Borkmann /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
29999383191dSDaniel Borkmann  * It is encouraged to implement bpf_int_jit_compile() instead, so that
30009383191dSDaniel Borkmann  * eBPF and implicitly also cBPF can get JITed!
30019383191dSDaniel Borkmann  */
bpf_int_jit_compile(struct bpf_prog * prog)3002d1c55ab5SDaniel Borkmann struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
30033324b584SDaniel Borkmann {
3004d1c55ab5SDaniel Borkmann 	return prog;
30053324b584SDaniel Borkmann }
30063324b584SDaniel Borkmann 
30079383191dSDaniel Borkmann /* Stub for JITs that support eBPF. All cBPF code gets transformed into
30089383191dSDaniel Borkmann  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
30099383191dSDaniel Borkmann  */
bpf_jit_compile(struct bpf_prog * prog)30109383191dSDaniel Borkmann void __weak bpf_jit_compile(struct bpf_prog *prog)
30119383191dSDaniel Borkmann {
30129383191dSDaniel Borkmann }
30139383191dSDaniel Borkmann 
bpf_helper_changes_pkt_data(enum bpf_func_id func_id)3014b238e187SEduard Zingerman bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
3015969bf05eSAlexei Starovoitov {
3016969bf05eSAlexei Starovoitov 	return false;
3017969bf05eSAlexei Starovoitov }
3018969bf05eSAlexei Starovoitov 
3019a4b1d3c1SJiong Wang /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
3020a4b1d3c1SJiong Wang  * analysis code and wants explicit zero extension inserted by verifier.
3021a4b1d3c1SJiong Wang  * Otherwise, return FALSE.
302239491867SBrendan Jackman  *
302339491867SBrendan Jackman  * The verifier inserts an explicit zero extension after BPF_CMPXCHGs even if
302439491867SBrendan Jackman  * you don't override this. JITs that don't want these extra insns can detect
302539491867SBrendan Jackman  * them using insn_is_zext.
3026a4b1d3c1SJiong Wang  */
bpf_jit_needs_zext(void)3027a4b1d3c1SJiong Wang bool __weak bpf_jit_needs_zext(void)
3028a4b1d3c1SJiong Wang {
3029a4b1d3c1SJiong Wang 	return false;
3030a4b1d3c1SJiong Wang }
3031a4b1d3c1SJiong Wang 
30322ddec2c8SPuranjay Mohan /* Return true if the JIT inlines the call to the helper corresponding to
30332ddec2c8SPuranjay Mohan  * the imm.
30342ddec2c8SPuranjay Mohan  *
30352ddec2c8SPuranjay Mohan  * The verifier will not patch the insn->imm for the call to the helper if
30362ddec2c8SPuranjay Mohan  * this returns true.
30372ddec2c8SPuranjay Mohan  */
bpf_jit_inlines_helper_call(s32 imm)30382ddec2c8SPuranjay Mohan bool __weak bpf_jit_inlines_helper_call(s32 imm)
30392ddec2c8SPuranjay Mohan {
30402ddec2c8SPuranjay Mohan 	return false;
30412ddec2c8SPuranjay Mohan }
30422ddec2c8SPuranjay Mohan 
304395acd881STony Ambardar /* Return TRUE if the JIT backend supports mixing bpf2bpf and tailcalls. */
bpf_jit_supports_subprog_tailcalls(void)304495acd881STony Ambardar bool __weak bpf_jit_supports_subprog_tailcalls(void)
304595acd881STony Ambardar {
304695acd881STony Ambardar 	return false;
304795acd881STony Ambardar }
304895acd881STony Ambardar 
bpf_jit_supports_percpu_insn(void)30497bdbf744SAndrii Nakryiko bool __weak bpf_jit_supports_percpu_insn(void)
30507bdbf744SAndrii Nakryiko {
30517bdbf744SAndrii Nakryiko 	return false;
30527bdbf744SAndrii Nakryiko }
30537bdbf744SAndrii Nakryiko 
bpf_jit_supports_kfunc_call(void)3054e6ac2450SMartin KaFai Lau bool __weak bpf_jit_supports_kfunc_call(void)
3055e6ac2450SMartin KaFai Lau {
3056e6ac2450SMartin KaFai Lau 	return false;
3057e6ac2450SMartin KaFai Lau }
3058e6ac2450SMartin KaFai Lau 
bpf_jit_supports_far_kfunc_call(void)30591cf3bfc6SIlya Leoshkevich bool __weak bpf_jit_supports_far_kfunc_call(void)
30601cf3bfc6SIlya Leoshkevich {
30611cf3bfc6SIlya Leoshkevich 	return false;
30621cf3bfc6SIlya Leoshkevich }
30631cf3bfc6SIlya Leoshkevich 
bpf_jit_supports_arena(void)3064142fd4d2SAlexei Starovoitov bool __weak bpf_jit_supports_arena(void)
3065142fd4d2SAlexei Starovoitov {
3066142fd4d2SAlexei Starovoitov 	return false;
3067142fd4d2SAlexei Starovoitov }
3068142fd4d2SAlexei Starovoitov 
bpf_jit_supports_insn(struct bpf_insn * insn,bool in_arena)3069d503a04fSAlexei Starovoitov bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
3070d503a04fSAlexei Starovoitov {
3071d503a04fSAlexei Starovoitov 	return false;
3072d503a04fSAlexei Starovoitov }
3073d503a04fSAlexei Starovoitov 
bpf_arch_uaddress_limit(void)307466e13b61SPuranjay Mohan u64 __weak bpf_arch_uaddress_limit(void)
307566e13b61SPuranjay Mohan {
307666e13b61SPuranjay Mohan #if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
307766e13b61SPuranjay Mohan 	return TASK_SIZE;
307866e13b61SPuranjay Mohan #else
307966e13b61SPuranjay Mohan 	return 0;
308066e13b61SPuranjay Mohan #endif
308166e13b61SPuranjay Mohan }
308266e13b61SPuranjay Mohan 
30837c05e7f3SHou Tao /* Return TRUE if the JIT backend satisfies the following two conditions:
30847c05e7f3SHou Tao  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
30857c05e7f3SHou Tao  * 2) Under the specific arch, the implementation of xchg() is the same
30867c05e7f3SHou Tao  *    as atomic_xchg() on pointer-sized words.
30877c05e7f3SHou Tao  */
bpf_jit_supports_ptr_xchg(void)30887c05e7f3SHou Tao bool __weak bpf_jit_supports_ptr_xchg(void)
30897c05e7f3SHou Tao {
30907c05e7f3SHou Tao 	return false;
30917c05e7f3SHou Tao }
30927c05e7f3SHou Tao 
3093f89b7755SAlexei Starovoitov /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
3094f89b7755SAlexei Starovoitov  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
3095f89b7755SAlexei Starovoitov  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)3096f89b7755SAlexei Starovoitov int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
3097f89b7755SAlexei Starovoitov 			 int len)
3098f89b7755SAlexei Starovoitov {
3099f89b7755SAlexei Starovoitov 	return -EFAULT;
3100f89b7755SAlexei Starovoitov }
3101a67edbf4SDaniel Borkmann 
bpf_arch_text_poke(void * ip,enum bpf_text_poke_type t,void * addr1,void * addr2)31025964b200SAlexei Starovoitov int __weak bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
31035964b200SAlexei Starovoitov 			      void *addr1, void *addr2)
31045964b200SAlexei Starovoitov {
31055964b200SAlexei Starovoitov 	return -ENOTSUPP;
31065964b200SAlexei Starovoitov }
31075964b200SAlexei Starovoitov 
bpf_arch_text_copy(void * dst,void * src,size_t len)3108ebc1415dSSong Liu void * __weak bpf_arch_text_copy(void *dst, void *src, size_t len)
3109ebc1415dSSong Liu {
3110ebc1415dSSong Liu 	return ERR_PTR(-ENOTSUPP);
3111ebc1415dSSong Liu }
3112ebc1415dSSong Liu 
bpf_arch_text_invalidate(void * dst,size_t len)3113fe736565SSong Liu int __weak bpf_arch_text_invalidate(void *dst, size_t len)
3114fe736565SSong Liu {
3115fe736565SSong Liu 	return -ENOTSUPP;
3116fe736565SSong Liu }
3117fe736565SSong Liu 
bpf_jit_supports_exceptions(void)3118fd5d27b7SKumar Kartikeya Dwivedi bool __weak bpf_jit_supports_exceptions(void)
3119fd5d27b7SKumar Kartikeya Dwivedi {
3120fd5d27b7SKumar Kartikeya Dwivedi 	return false;
3121fd5d27b7SKumar Kartikeya Dwivedi }
3122fd5d27b7SKumar Kartikeya Dwivedi 
bpf_jit_supports_private_stack(void)3123a76ab573SYonghong Song bool __weak bpf_jit_supports_private_stack(void)
3124a76ab573SYonghong Song {
3125a76ab573SYonghong Song 	return false;
3126a76ab573SYonghong Song }
3127a76ab573SYonghong Song 
arch_bpf_stack_walk(bool (* consume_fn)(void * cookie,u64 ip,u64 sp,u64 bp),void * cookie)3128fd5d27b7SKumar Kartikeya Dwivedi void __weak arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie)
3129fd5d27b7SKumar Kartikeya Dwivedi {
3130fd5d27b7SKumar Kartikeya Dwivedi }
3131fd5d27b7SKumar Kartikeya Dwivedi 
bpf_jit_supports_timed_may_goto(void)3132e723608bSKumar Kartikeya Dwivedi bool __weak bpf_jit_supports_timed_may_goto(void)
3133e723608bSKumar Kartikeya Dwivedi {
3134e723608bSKumar Kartikeya Dwivedi 	return false;
3135e723608bSKumar Kartikeya Dwivedi }
3136e723608bSKumar Kartikeya Dwivedi 
arch_bpf_timed_may_goto(void)3137e723608bSKumar Kartikeya Dwivedi u64 __weak arch_bpf_timed_may_goto(void)
3138e723608bSKumar Kartikeya Dwivedi {
3139e723608bSKumar Kartikeya Dwivedi 	return 0;
3140e723608bSKumar Kartikeya Dwivedi }
3141e723608bSKumar Kartikeya Dwivedi 
bpf_check_timed_may_goto(struct bpf_timed_may_goto * p)3142e723608bSKumar Kartikeya Dwivedi u64 bpf_check_timed_may_goto(struct bpf_timed_may_goto *p)
3143e723608bSKumar Kartikeya Dwivedi {
3144e723608bSKumar Kartikeya Dwivedi 	u64 time = ktime_get_mono_fast_ns();
3145e723608bSKumar Kartikeya Dwivedi 
3146e723608bSKumar Kartikeya Dwivedi 	/* Populate the timestamp for this stack frame, and refresh count. */
3147e723608bSKumar Kartikeya Dwivedi 	if (!p->timestamp) {
3148e723608bSKumar Kartikeya Dwivedi 		p->timestamp = time;
3149e723608bSKumar Kartikeya Dwivedi 		return BPF_MAX_TIMED_LOOPS;
3150e723608bSKumar Kartikeya Dwivedi 	}
3151e723608bSKumar Kartikeya Dwivedi 	/* Check if we've exhausted our time slice, and zero count. */
3152e723608bSKumar Kartikeya Dwivedi 	if (time - p->timestamp >= (NSEC_PER_SEC / 4))
3153e723608bSKumar Kartikeya Dwivedi 		return 0;
3154e723608bSKumar Kartikeya Dwivedi 	/* Refresh the count for the stack frame. */
3155e723608bSKumar Kartikeya Dwivedi 	return BPF_MAX_TIMED_LOOPS;
3156e723608bSKumar Kartikeya Dwivedi }
3157e723608bSKumar Kartikeya Dwivedi 
315831746031SAlexei Starovoitov /* for configs without MMU or 32-bit */
315931746031SAlexei Starovoitov __weak const struct bpf_map_ops arena_map_ops;
bpf_arena_get_user_vm_start(struct bpf_arena * arena)316031746031SAlexei Starovoitov __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
316131746031SAlexei Starovoitov {
316231746031SAlexei Starovoitov 	return 0;
316331746031SAlexei Starovoitov }
bpf_arena_get_kern_vm_start(struct bpf_arena * arena)316431746031SAlexei Starovoitov __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
316531746031SAlexei Starovoitov {
316631746031SAlexei Starovoitov 	return 0;
316731746031SAlexei Starovoitov }
316831746031SAlexei Starovoitov 
3169958cf2e2SKumar Kartikeya Dwivedi #ifdef CONFIG_BPF_SYSCALL
bpf_global_ma_init(void)3170958cf2e2SKumar Kartikeya Dwivedi static int __init bpf_global_ma_init(void)
3171958cf2e2SKumar Kartikeya Dwivedi {
3172958cf2e2SKumar Kartikeya Dwivedi 	int ret;
3173958cf2e2SKumar Kartikeya Dwivedi 
3174958cf2e2SKumar Kartikeya Dwivedi 	ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
3175958cf2e2SKumar Kartikeya Dwivedi 	bpf_global_ma_set = !ret;
31761fda5bb6SYonghong Song 	return ret;
3177958cf2e2SKumar Kartikeya Dwivedi }
3178958cf2e2SKumar Kartikeya Dwivedi late_initcall(bpf_global_ma_init);
3179958cf2e2SKumar Kartikeya Dwivedi #endif
3180958cf2e2SKumar Kartikeya Dwivedi 
3181492ecee8SAlexei Starovoitov DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
3182492ecee8SAlexei Starovoitov EXPORT_SYMBOL(bpf_stats_enabled_key);
3183492ecee8SAlexei Starovoitov 
3184a67edbf4SDaniel Borkmann /* All definitions of tracepoints related to BPF. */
3185a67edbf4SDaniel Borkmann #define CREATE_TRACE_POINTS
3186a67edbf4SDaniel Borkmann #include <linux/bpf_trace.h>
3187a67edbf4SDaniel Borkmann 
3188a67edbf4SDaniel Borkmann EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
3189e7d47989SToshiaki Makita EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
3190