xref: /linux-6.15/kernel/bpf/arraymap.c (revision 792caccc)
15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
381ed18abSAlexei Starovoitov  * Copyright (c) 2016,2017 Facebook
428fbcfa0SAlexei Starovoitov  */
528fbcfa0SAlexei Starovoitov #include <linux/bpf.h>
6a26ca7c9SMartin KaFai Lau #include <linux/btf.h>
728fbcfa0SAlexei Starovoitov #include <linux/err.h>
828fbcfa0SAlexei Starovoitov #include <linux/slab.h>
928fbcfa0SAlexei Starovoitov #include <linux/mm.h>
1004fd61abSAlexei Starovoitov #include <linux/filter.h>
110cdf5640SDaniel Borkmann #include <linux/perf_event.h>
12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h>
131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h>
1428fbcfa0SAlexei Starovoitov 
1556f668dfSMartin KaFai Lau #include "map_in_map.h"
1656f668dfSMartin KaFai Lau 
176e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \
18*792cacccSSong Liu 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
19*792cacccSSong Liu 	 BPF_F_PRESERVE_ELEMS)
206e71b04aSChenbo Feng 
21a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array)
22a10423b8SAlexei Starovoitov {
23a10423b8SAlexei Starovoitov 	int i;
24a10423b8SAlexei Starovoitov 
2532fff239SEric Dumazet 	for (i = 0; i < array->map.max_entries; i++) {
26a10423b8SAlexei Starovoitov 		free_percpu(array->pptrs[i]);
2732fff239SEric Dumazet 		cond_resched();
2832fff239SEric Dumazet 	}
29a10423b8SAlexei Starovoitov }
30a10423b8SAlexei Starovoitov 
31a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array)
32a10423b8SAlexei Starovoitov {
33a10423b8SAlexei Starovoitov 	void __percpu *ptr;
34a10423b8SAlexei Starovoitov 	int i;
35a10423b8SAlexei Starovoitov 
36a10423b8SAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++) {
37a10423b8SAlexei Starovoitov 		ptr = __alloc_percpu_gfp(array->elem_size, 8,
38a10423b8SAlexei Starovoitov 					 GFP_USER | __GFP_NOWARN);
39a10423b8SAlexei Starovoitov 		if (!ptr) {
40a10423b8SAlexei Starovoitov 			bpf_array_free_percpu(array);
41a10423b8SAlexei Starovoitov 			return -ENOMEM;
42a10423b8SAlexei Starovoitov 		}
43a10423b8SAlexei Starovoitov 		array->pptrs[i] = ptr;
4432fff239SEric Dumazet 		cond_resched();
45a10423b8SAlexei Starovoitov 	}
46a10423b8SAlexei Starovoitov 
47a10423b8SAlexei Starovoitov 	return 0;
48a10423b8SAlexei Starovoitov }
49a10423b8SAlexei Starovoitov 
5028fbcfa0SAlexei Starovoitov /* Called from syscall */
515dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr)
52ad46061fSJakub Kicinski {
53ad46061fSJakub Kicinski 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
54ad46061fSJakub Kicinski 	int numa_node = bpf_map_attr_numa_node(attr);
55ad46061fSJakub Kicinski 
56ad46061fSJakub Kicinski 	/* check sanity of attributes */
57ad46061fSJakub Kicinski 	if (attr->max_entries == 0 || attr->key_size != 4 ||
58ad46061fSJakub Kicinski 	    attr->value_size == 0 ||
59ad46061fSJakub Kicinski 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
60591fe988SDaniel Borkmann 	    !bpf_map_flags_access_ok(attr->map_flags) ||
61ad46061fSJakub Kicinski 	    (percpu && numa_node != NUMA_NO_NODE))
62ad46061fSJakub Kicinski 		return -EINVAL;
63ad46061fSJakub Kicinski 
64fc970227SAndrii Nakryiko 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
65fc970227SAndrii Nakryiko 	    attr->map_flags & BPF_F_MMAPABLE)
66fc970227SAndrii Nakryiko 		return -EINVAL;
67fc970227SAndrii Nakryiko 
68*792cacccSSong Liu 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69*792cacccSSong Liu 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
70*792cacccSSong Liu 		return -EINVAL;
71*792cacccSSong Liu 
72ad46061fSJakub Kicinski 	if (attr->value_size > KMALLOC_MAX_SIZE)
73ad46061fSJakub Kicinski 		/* if value_size is bigger, the user space won't be able to
74ad46061fSJakub Kicinski 		 * access the elements.
75ad46061fSJakub Kicinski 		 */
76ad46061fSJakub Kicinski 		return -E2BIG;
77ad46061fSJakub Kicinski 
78ad46061fSJakub Kicinski 	return 0;
79ad46061fSJakub Kicinski }
80ad46061fSJakub Kicinski 
8128fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8228fbcfa0SAlexei Starovoitov {
83a10423b8SAlexei Starovoitov 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
849c2d63b8SDaniel Borkmann 	int ret, numa_node = bpf_map_attr_numa_node(attr);
85b2157399SAlexei Starovoitov 	u32 elem_size, index_mask, max_entries;
862c78ee89SAlexei Starovoitov 	bool bypass_spec_v1 = bpf_bypass_spec_v1();
879c2d63b8SDaniel Borkmann 	u64 cost, array_size, mask64;
88b936ca64SRoman Gushchin 	struct bpf_map_memory mem;
8928fbcfa0SAlexei Starovoitov 	struct bpf_array *array;
9028fbcfa0SAlexei Starovoitov 
9128fbcfa0SAlexei Starovoitov 	elem_size = round_up(attr->value_size, 8);
9228fbcfa0SAlexei Starovoitov 
93b2157399SAlexei Starovoitov 	max_entries = attr->max_entries;
94b2157399SAlexei Starovoitov 
95bbeb6e43SDaniel Borkmann 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
96bbeb6e43SDaniel Borkmann 	 * upper most bit set in u32 space is undefined behavior due to
97bbeb6e43SDaniel Borkmann 	 * resulting 1U << 32, so do it manually here in u64 space.
98bbeb6e43SDaniel Borkmann 	 */
99bbeb6e43SDaniel Borkmann 	mask64 = fls_long(max_entries - 1);
100bbeb6e43SDaniel Borkmann 	mask64 = 1ULL << mask64;
101bbeb6e43SDaniel Borkmann 	mask64 -= 1;
102bbeb6e43SDaniel Borkmann 
103bbeb6e43SDaniel Borkmann 	index_mask = mask64;
1042c78ee89SAlexei Starovoitov 	if (!bypass_spec_v1) {
105b2157399SAlexei Starovoitov 		/* round up array size to nearest power of 2,
106b2157399SAlexei Starovoitov 		 * since cpu will speculate within index_mask limits
107b2157399SAlexei Starovoitov 		 */
108b2157399SAlexei Starovoitov 		max_entries = index_mask + 1;
109bbeb6e43SDaniel Borkmann 		/* Check for overflows. */
110bbeb6e43SDaniel Borkmann 		if (max_entries < attr->max_entries)
111bbeb6e43SDaniel Borkmann 			return ERR_PTR(-E2BIG);
112bbeb6e43SDaniel Borkmann 	}
113b2157399SAlexei Starovoitov 
114a10423b8SAlexei Starovoitov 	array_size = sizeof(*array);
115fc970227SAndrii Nakryiko 	if (percpu) {
116b2157399SAlexei Starovoitov 		array_size += (u64) max_entries * sizeof(void *);
117fc970227SAndrii Nakryiko 	} else {
118fc970227SAndrii Nakryiko 		/* rely on vmalloc() to return page-aligned memory and
119fc970227SAndrii Nakryiko 		 * ensure array->value is exactly page-aligned
120fc970227SAndrii Nakryiko 		 */
121fc970227SAndrii Nakryiko 		if (attr->map_flags & BPF_F_MMAPABLE) {
122fc970227SAndrii Nakryiko 			array_size = PAGE_ALIGN(array_size);
123fc970227SAndrii Nakryiko 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124fc970227SAndrii Nakryiko 		} else {
125b2157399SAlexei Starovoitov 			array_size += (u64) max_entries * elem_size;
126fc970227SAndrii Nakryiko 		}
127fc970227SAndrii Nakryiko 	}
128a10423b8SAlexei Starovoitov 
129a10423b8SAlexei Starovoitov 	/* make sure there is no u32 overflow later in round_up() */
1309c2d63b8SDaniel Borkmann 	cost = array_size;
131c85d6913SRoman Gushchin 	if (percpu)
1329c2d63b8SDaniel Borkmann 		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
1339c2d63b8SDaniel Borkmann 
134b936ca64SRoman Gushchin 	ret = bpf_map_charge_init(&mem, cost);
1359c2d63b8SDaniel Borkmann 	if (ret < 0)
1369c2d63b8SDaniel Borkmann 		return ERR_PTR(ret);
137daaf427cSAlexei Starovoitov 
13828fbcfa0SAlexei Starovoitov 	/* allocate all map elements and zero-initialize them */
139fc970227SAndrii Nakryiko 	if (attr->map_flags & BPF_F_MMAPABLE) {
140fc970227SAndrii Nakryiko 		void *data;
141fc970227SAndrii Nakryiko 
142fc970227SAndrii Nakryiko 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
143fc970227SAndrii Nakryiko 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
144fc970227SAndrii Nakryiko 		if (!data) {
145fc970227SAndrii Nakryiko 			bpf_map_charge_finish(&mem);
146fc970227SAndrii Nakryiko 			return ERR_PTR(-ENOMEM);
147fc970227SAndrii Nakryiko 		}
148fc970227SAndrii Nakryiko 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
149fc970227SAndrii Nakryiko 			- offsetof(struct bpf_array, value);
150fc970227SAndrii Nakryiko 	} else {
15196eabe7aSMartin KaFai Lau 		array = bpf_map_area_alloc(array_size, numa_node);
152fc970227SAndrii Nakryiko 	}
153b936ca64SRoman Gushchin 	if (!array) {
154b936ca64SRoman Gushchin 		bpf_map_charge_finish(&mem);
15528fbcfa0SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
156b936ca64SRoman Gushchin 	}
157b2157399SAlexei Starovoitov 	array->index_mask = index_mask;
1582c78ee89SAlexei Starovoitov 	array->map.bypass_spec_v1 = bypass_spec_v1;
15928fbcfa0SAlexei Starovoitov 
16028fbcfa0SAlexei Starovoitov 	/* copy mandatory map attributes */
16132852649SJakub Kicinski 	bpf_map_init_from_attr(&array->map, attr);
162b936ca64SRoman Gushchin 	bpf_map_charge_move(&array->map.memory, &mem);
16328fbcfa0SAlexei Starovoitov 	array->elem_size = elem_size;
16428fbcfa0SAlexei Starovoitov 
1659c2d63b8SDaniel Borkmann 	if (percpu && bpf_array_alloc_percpu(array)) {
166b936ca64SRoman Gushchin 		bpf_map_charge_finish(&array->map.memory);
167d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
168a10423b8SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
169a10423b8SAlexei Starovoitov 	}
170a10423b8SAlexei Starovoitov 
17128fbcfa0SAlexei Starovoitov 	return &array->map;
17228fbcfa0SAlexei Starovoitov }
17328fbcfa0SAlexei Starovoitov 
17428fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
17528fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key)
17628fbcfa0SAlexei Starovoitov {
17728fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
17828fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
17928fbcfa0SAlexei Starovoitov 
180a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
18128fbcfa0SAlexei Starovoitov 		return NULL;
18228fbcfa0SAlexei Starovoitov 
183b2157399SAlexei Starovoitov 	return array->value + array->elem_size * (index & array->index_mask);
18428fbcfa0SAlexei Starovoitov }
18528fbcfa0SAlexei Starovoitov 
186d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
187d8eca5bbSDaniel Borkmann 				       u32 off)
188d8eca5bbSDaniel Borkmann {
189d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
190d8eca5bbSDaniel Borkmann 
191d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
192d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
193d8eca5bbSDaniel Borkmann 	if (off >= map->value_size)
194d8eca5bbSDaniel Borkmann 		return -EINVAL;
195d8eca5bbSDaniel Borkmann 
196d8eca5bbSDaniel Borkmann 	*imm = (unsigned long)array->value;
197d8eca5bbSDaniel Borkmann 	return 0;
198d8eca5bbSDaniel Borkmann }
199d8eca5bbSDaniel Borkmann 
200d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
201d8eca5bbSDaniel Borkmann 				       u32 *off)
202d8eca5bbSDaniel Borkmann {
203d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
204d8eca5bbSDaniel Borkmann 	u64 base = (unsigned long)array->value;
205d8eca5bbSDaniel Borkmann 	u64 range = array->elem_size;
206d8eca5bbSDaniel Borkmann 
207d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
208d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
209d8eca5bbSDaniel Borkmann 	if (imm < base || imm >= base + range)
210d8eca5bbSDaniel Borkmann 		return -ENOENT;
211d8eca5bbSDaniel Borkmann 
212d8eca5bbSDaniel Borkmann 	*off = imm - base;
213d8eca5bbSDaniel Borkmann 	return 0;
214d8eca5bbSDaniel Borkmann }
215d8eca5bbSDaniel Borkmann 
21681ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
21781ed18abSAlexei Starovoitov static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
21881ed18abSAlexei Starovoitov {
219b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
22081ed18abSAlexei Starovoitov 	struct bpf_insn *insn = insn_buf;
221fad73a1aSMartin KaFai Lau 	u32 elem_size = round_up(map->value_size, 8);
22281ed18abSAlexei Starovoitov 	const int ret = BPF_REG_0;
22381ed18abSAlexei Starovoitov 	const int map_ptr = BPF_REG_1;
22481ed18abSAlexei Starovoitov 	const int index = BPF_REG_2;
22581ed18abSAlexei Starovoitov 
22681ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
22781ed18abSAlexei Starovoitov 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2282c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
229b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
230b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
231b2157399SAlexei Starovoitov 	} else {
232fad73a1aSMartin KaFai Lau 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
233b2157399SAlexei Starovoitov 	}
234fad73a1aSMartin KaFai Lau 
235fad73a1aSMartin KaFai Lau 	if (is_power_of_2(elem_size)) {
23681ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
23781ed18abSAlexei Starovoitov 	} else {
23881ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
23981ed18abSAlexei Starovoitov 	}
24081ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
24181ed18abSAlexei Starovoitov 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
24281ed18abSAlexei Starovoitov 	*insn++ = BPF_MOV64_IMM(ret, 0);
24381ed18abSAlexei Starovoitov 	return insn - insn_buf;
24481ed18abSAlexei Starovoitov }
24581ed18abSAlexei Starovoitov 
246a10423b8SAlexei Starovoitov /* Called from eBPF program */
247a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
248a10423b8SAlexei Starovoitov {
249a10423b8SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
250a10423b8SAlexei Starovoitov 	u32 index = *(u32 *)key;
251a10423b8SAlexei Starovoitov 
252a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
253a10423b8SAlexei Starovoitov 		return NULL;
254a10423b8SAlexei Starovoitov 
255b2157399SAlexei Starovoitov 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
256a10423b8SAlexei Starovoitov }
257a10423b8SAlexei Starovoitov 
25815a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
25915a07b33SAlexei Starovoitov {
26015a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
26115a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
26215a07b33SAlexei Starovoitov 	void __percpu *pptr;
26315a07b33SAlexei Starovoitov 	int cpu, off = 0;
26415a07b33SAlexei Starovoitov 	u32 size;
26515a07b33SAlexei Starovoitov 
26615a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
26715a07b33SAlexei Starovoitov 		return -ENOENT;
26815a07b33SAlexei Starovoitov 
26915a07b33SAlexei Starovoitov 	/* per_cpu areas are zero-filled and bpf programs can only
27015a07b33SAlexei Starovoitov 	 * access 'value_size' of them, so copying rounded areas
27115a07b33SAlexei Starovoitov 	 * will not leak any kernel data
27215a07b33SAlexei Starovoitov 	 */
27315a07b33SAlexei Starovoitov 	size = round_up(map->value_size, 8);
27415a07b33SAlexei Starovoitov 	rcu_read_lock();
275b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
27615a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
27715a07b33SAlexei Starovoitov 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
27815a07b33SAlexei Starovoitov 		off += size;
27915a07b33SAlexei Starovoitov 	}
28015a07b33SAlexei Starovoitov 	rcu_read_unlock();
28115a07b33SAlexei Starovoitov 	return 0;
28215a07b33SAlexei Starovoitov }
28315a07b33SAlexei Starovoitov 
28428fbcfa0SAlexei Starovoitov /* Called from syscall */
28528fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
28628fbcfa0SAlexei Starovoitov {
28728fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2888fe45924STeng Qin 	u32 index = key ? *(u32 *)key : U32_MAX;
28928fbcfa0SAlexei Starovoitov 	u32 *next = (u32 *)next_key;
29028fbcfa0SAlexei Starovoitov 
29128fbcfa0SAlexei Starovoitov 	if (index >= array->map.max_entries) {
29228fbcfa0SAlexei Starovoitov 		*next = 0;
29328fbcfa0SAlexei Starovoitov 		return 0;
29428fbcfa0SAlexei Starovoitov 	}
29528fbcfa0SAlexei Starovoitov 
29628fbcfa0SAlexei Starovoitov 	if (index == array->map.max_entries - 1)
29728fbcfa0SAlexei Starovoitov 		return -ENOENT;
29828fbcfa0SAlexei Starovoitov 
29928fbcfa0SAlexei Starovoitov 	*next = index + 1;
30028fbcfa0SAlexei Starovoitov 	return 0;
30128fbcfa0SAlexei Starovoitov }
30228fbcfa0SAlexei Starovoitov 
30328fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
30428fbcfa0SAlexei Starovoitov static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
30528fbcfa0SAlexei Starovoitov 				 u64 map_flags)
30628fbcfa0SAlexei Starovoitov {
30728fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
30828fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
30996049f3aSAlexei Starovoitov 	char *val;
31028fbcfa0SAlexei Starovoitov 
31196049f3aSAlexei Starovoitov 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
31228fbcfa0SAlexei Starovoitov 		/* unknown flags */
31328fbcfa0SAlexei Starovoitov 		return -EINVAL;
31428fbcfa0SAlexei Starovoitov 
315a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
31628fbcfa0SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
31728fbcfa0SAlexei Starovoitov 		return -E2BIG;
31828fbcfa0SAlexei Starovoitov 
31996049f3aSAlexei Starovoitov 	if (unlikely(map_flags & BPF_NOEXIST))
320daaf427cSAlexei Starovoitov 		/* all elements already exist */
32128fbcfa0SAlexei Starovoitov 		return -EEXIST;
32228fbcfa0SAlexei Starovoitov 
32396049f3aSAlexei Starovoitov 	if (unlikely((map_flags & BPF_F_LOCK) &&
32496049f3aSAlexei Starovoitov 		     !map_value_has_spin_lock(map)))
32596049f3aSAlexei Starovoitov 		return -EINVAL;
32696049f3aSAlexei Starovoitov 
32796049f3aSAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
328b2157399SAlexei Starovoitov 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
329a10423b8SAlexei Starovoitov 		       value, map->value_size);
33096049f3aSAlexei Starovoitov 	} else {
33196049f3aSAlexei Starovoitov 		val = array->value +
33296049f3aSAlexei Starovoitov 			array->elem_size * (index & array->index_mask);
33396049f3aSAlexei Starovoitov 		if (map_flags & BPF_F_LOCK)
33496049f3aSAlexei Starovoitov 			copy_map_value_locked(map, val, value, false);
335a10423b8SAlexei Starovoitov 		else
33696049f3aSAlexei Starovoitov 			copy_map_value(map, val, value);
33796049f3aSAlexei Starovoitov 	}
33828fbcfa0SAlexei Starovoitov 	return 0;
33928fbcfa0SAlexei Starovoitov }
34028fbcfa0SAlexei Starovoitov 
34115a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
34215a07b33SAlexei Starovoitov 			    u64 map_flags)
34315a07b33SAlexei Starovoitov {
34415a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
34515a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
34615a07b33SAlexei Starovoitov 	void __percpu *pptr;
34715a07b33SAlexei Starovoitov 	int cpu, off = 0;
34815a07b33SAlexei Starovoitov 	u32 size;
34915a07b33SAlexei Starovoitov 
35015a07b33SAlexei Starovoitov 	if (unlikely(map_flags > BPF_EXIST))
35115a07b33SAlexei Starovoitov 		/* unknown flags */
35215a07b33SAlexei Starovoitov 		return -EINVAL;
35315a07b33SAlexei Starovoitov 
35415a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
35515a07b33SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
35615a07b33SAlexei Starovoitov 		return -E2BIG;
35715a07b33SAlexei Starovoitov 
35815a07b33SAlexei Starovoitov 	if (unlikely(map_flags == BPF_NOEXIST))
35915a07b33SAlexei Starovoitov 		/* all elements already exist */
36015a07b33SAlexei Starovoitov 		return -EEXIST;
36115a07b33SAlexei Starovoitov 
36215a07b33SAlexei Starovoitov 	/* the user space will provide round_up(value_size, 8) bytes that
36315a07b33SAlexei Starovoitov 	 * will be copied into per-cpu area. bpf programs can only access
36415a07b33SAlexei Starovoitov 	 * value_size of it. During lookup the same extra bytes will be
36515a07b33SAlexei Starovoitov 	 * returned or zeros which were zero-filled by percpu_alloc,
36615a07b33SAlexei Starovoitov 	 * so no kernel data leaks possible
36715a07b33SAlexei Starovoitov 	 */
36815a07b33SAlexei Starovoitov 	size = round_up(map->value_size, 8);
36915a07b33SAlexei Starovoitov 	rcu_read_lock();
370b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
37115a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
37215a07b33SAlexei Starovoitov 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
37315a07b33SAlexei Starovoitov 		off += size;
37415a07b33SAlexei Starovoitov 	}
37515a07b33SAlexei Starovoitov 	rcu_read_unlock();
37615a07b33SAlexei Starovoitov 	return 0;
37715a07b33SAlexei Starovoitov }
37815a07b33SAlexei Starovoitov 
37928fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
38028fbcfa0SAlexei Starovoitov static int array_map_delete_elem(struct bpf_map *map, void *key)
38128fbcfa0SAlexei Starovoitov {
38228fbcfa0SAlexei Starovoitov 	return -EINVAL;
38328fbcfa0SAlexei Starovoitov }
38428fbcfa0SAlexei Starovoitov 
385fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array)
386fc970227SAndrii Nakryiko {
387fc970227SAndrii Nakryiko 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
388fc970227SAndrii Nakryiko }
389fc970227SAndrii Nakryiko 
39028fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
39128fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map)
39228fbcfa0SAlexei Starovoitov {
39328fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
39428fbcfa0SAlexei Starovoitov 
395a10423b8SAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
396a10423b8SAlexei Starovoitov 		bpf_array_free_percpu(array);
397a10423b8SAlexei Starovoitov 
398fc970227SAndrii Nakryiko 	if (array->map.map_flags & BPF_F_MMAPABLE)
399fc970227SAndrii Nakryiko 		bpf_map_area_free(array_map_vmalloc_addr(array));
400fc970227SAndrii Nakryiko 	else
401d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
40228fbcfa0SAlexei Starovoitov }
40328fbcfa0SAlexei Starovoitov 
404a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key,
405a26ca7c9SMartin KaFai Lau 				    struct seq_file *m)
406a26ca7c9SMartin KaFai Lau {
407a26ca7c9SMartin KaFai Lau 	void *value;
408a26ca7c9SMartin KaFai Lau 
409a26ca7c9SMartin KaFai Lau 	rcu_read_lock();
410a26ca7c9SMartin KaFai Lau 
411a26ca7c9SMartin KaFai Lau 	value = array_map_lookup_elem(map, key);
412a26ca7c9SMartin KaFai Lau 	if (!value) {
413a26ca7c9SMartin KaFai Lau 		rcu_read_unlock();
414a26ca7c9SMartin KaFai Lau 		return;
415a26ca7c9SMartin KaFai Lau 	}
416a26ca7c9SMartin KaFai Lau 
4172824ecb7SDaniel Borkmann 	if (map->btf_key_type_id)
418a26ca7c9SMartin KaFai Lau 		seq_printf(m, "%u: ", *(u32 *)key);
4199b2cf328SMartin KaFai Lau 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
420a26ca7c9SMartin KaFai Lau 	seq_puts(m, "\n");
421a26ca7c9SMartin KaFai Lau 
422a26ca7c9SMartin KaFai Lau 	rcu_read_unlock();
423a26ca7c9SMartin KaFai Lau }
424a26ca7c9SMartin KaFai Lau 
425c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
426c7b27c37SYonghong Song 					   struct seq_file *m)
427c7b27c37SYonghong Song {
428c7b27c37SYonghong Song 	struct bpf_array *array = container_of(map, struct bpf_array, map);
429c7b27c37SYonghong Song 	u32 index = *(u32 *)key;
430c7b27c37SYonghong Song 	void __percpu *pptr;
431c7b27c37SYonghong Song 	int cpu;
432c7b27c37SYonghong Song 
433c7b27c37SYonghong Song 	rcu_read_lock();
434c7b27c37SYonghong Song 
435c7b27c37SYonghong Song 	seq_printf(m, "%u: {\n", *(u32 *)key);
436c7b27c37SYonghong Song 	pptr = array->pptrs[index & array->index_mask];
437c7b27c37SYonghong Song 	for_each_possible_cpu(cpu) {
438c7b27c37SYonghong Song 		seq_printf(m, "\tcpu%d: ", cpu);
439c7b27c37SYonghong Song 		btf_type_seq_show(map->btf, map->btf_value_type_id,
440c7b27c37SYonghong Song 				  per_cpu_ptr(pptr, cpu), m);
441c7b27c37SYonghong Song 		seq_puts(m, "\n");
442c7b27c37SYonghong Song 	}
443c7b27c37SYonghong Song 	seq_puts(m, "}\n");
444c7b27c37SYonghong Song 
445c7b27c37SYonghong Song 	rcu_read_unlock();
446c7b27c37SYonghong Song }
447c7b27c37SYonghong Song 
448e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map,
4491b2b234bSRoman Gushchin 			       const struct btf *btf,
450e8d2bec0SDaniel Borkmann 			       const struct btf_type *key_type,
451e8d2bec0SDaniel Borkmann 			       const struct btf_type *value_type)
452a26ca7c9SMartin KaFai Lau {
453a26ca7c9SMartin KaFai Lau 	u32 int_data;
454a26ca7c9SMartin KaFai Lau 
4552824ecb7SDaniel Borkmann 	/* One exception for keyless BTF: .bss/.data/.rodata map */
4562824ecb7SDaniel Borkmann 	if (btf_type_is_void(key_type)) {
4572824ecb7SDaniel Borkmann 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
4582824ecb7SDaniel Borkmann 		    map->max_entries != 1)
4592824ecb7SDaniel Borkmann 			return -EINVAL;
4602824ecb7SDaniel Borkmann 
4612824ecb7SDaniel Borkmann 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
4622824ecb7SDaniel Borkmann 			return -EINVAL;
4632824ecb7SDaniel Borkmann 
4642824ecb7SDaniel Borkmann 		return 0;
4652824ecb7SDaniel Borkmann 	}
4662824ecb7SDaniel Borkmann 
467e8d2bec0SDaniel Borkmann 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
468a26ca7c9SMartin KaFai Lau 		return -EINVAL;
469a26ca7c9SMartin KaFai Lau 
470a26ca7c9SMartin KaFai Lau 	int_data = *(u32 *)(key_type + 1);
471e8d2bec0SDaniel Borkmann 	/* bpf array can only take a u32 key. This check makes sure
472e8d2bec0SDaniel Borkmann 	 * that the btf matches the attr used during map_create.
473a26ca7c9SMartin KaFai Lau 	 */
474e8d2bec0SDaniel Borkmann 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
475a26ca7c9SMartin KaFai Lau 		return -EINVAL;
476a26ca7c9SMartin KaFai Lau 
477a26ca7c9SMartin KaFai Lau 	return 0;
478a26ca7c9SMartin KaFai Lau }
479a26ca7c9SMartin KaFai Lau 
480b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
481fc970227SAndrii Nakryiko {
482fc970227SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
483fc970227SAndrii Nakryiko 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
484fc970227SAndrii Nakryiko 
485fc970227SAndrii Nakryiko 	if (!(map->map_flags & BPF_F_MMAPABLE))
486fc970227SAndrii Nakryiko 		return -EINVAL;
487fc970227SAndrii Nakryiko 
488333291ceSAndrii Nakryiko 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
489333291ceSAndrii Nakryiko 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
490333291ceSAndrii Nakryiko 		return -EINVAL;
491333291ceSAndrii Nakryiko 
492333291ceSAndrii Nakryiko 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
493333291ceSAndrii Nakryiko 				   vma->vm_pgoff + pgoff);
494fc970227SAndrii Nakryiko }
495fc970227SAndrii Nakryiko 
496134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0,
497134fede4SMartin KaFai Lau 				 const struct bpf_map *meta1)
498134fede4SMartin KaFai Lau {
499134fede4SMartin KaFai Lau 	return meta0->max_entries == meta1->max_entries &&
500134fede4SMartin KaFai Lau 		bpf_map_meta_equal(meta0, meta1);
501134fede4SMartin KaFai Lau }
502134fede4SMartin KaFai Lau 
503d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info {
504d3cc2ab5SYonghong Song 	struct bpf_map *map;
505d3cc2ab5SYonghong Song 	void *percpu_value_buf;
506d3cc2ab5SYonghong Song 	u32 index;
507d3cc2ab5SYonghong Song };
508d3cc2ab5SYonghong Song 
509d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
510d3cc2ab5SYonghong Song {
511d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
512d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
513d3cc2ab5SYonghong Song 	struct bpf_array *array;
514d3cc2ab5SYonghong Song 	u32 index;
515d3cc2ab5SYonghong Song 
516d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
517d3cc2ab5SYonghong Song 		return NULL;
518d3cc2ab5SYonghong Song 
519d3cc2ab5SYonghong Song 	if (*pos == 0)
520d3cc2ab5SYonghong Song 		++*pos;
521d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
522d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
523d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
524d3cc2ab5SYonghong Song 	       return array->pptrs[index];
525d3cc2ab5SYonghong Song 	return array->value + array->elem_size * index;
526d3cc2ab5SYonghong Song }
527d3cc2ab5SYonghong Song 
528d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
529d3cc2ab5SYonghong Song {
530d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
531d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
532d3cc2ab5SYonghong Song 	struct bpf_array *array;
533d3cc2ab5SYonghong Song 	u32 index;
534d3cc2ab5SYonghong Song 
535d3cc2ab5SYonghong Song 	++*pos;
536d3cc2ab5SYonghong Song 	++info->index;
537d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
538d3cc2ab5SYonghong Song 		return NULL;
539d3cc2ab5SYonghong Song 
540d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
541d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
542d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
543d3cc2ab5SYonghong Song 	       return array->pptrs[index];
544d3cc2ab5SYonghong Song 	return array->value + array->elem_size * index;
545d3cc2ab5SYonghong Song }
546d3cc2ab5SYonghong Song 
547d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
548d3cc2ab5SYonghong Song {
549d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
550d3cc2ab5SYonghong Song 	struct bpf_iter__bpf_map_elem ctx = {};
551d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
552d3cc2ab5SYonghong Song 	struct bpf_iter_meta meta;
553d3cc2ab5SYonghong Song 	struct bpf_prog *prog;
554d3cc2ab5SYonghong Song 	int off = 0, cpu = 0;
555d3cc2ab5SYonghong Song 	void __percpu **pptr;
556d3cc2ab5SYonghong Song 	u32 size;
557d3cc2ab5SYonghong Song 
558d3cc2ab5SYonghong Song 	meta.seq = seq;
559d3cc2ab5SYonghong Song 	prog = bpf_iter_get_info(&meta, v == NULL);
560d3cc2ab5SYonghong Song 	if (!prog)
561d3cc2ab5SYonghong Song 		return 0;
562d3cc2ab5SYonghong Song 
563d3cc2ab5SYonghong Song 	ctx.meta = &meta;
564d3cc2ab5SYonghong Song 	ctx.map = info->map;
565d3cc2ab5SYonghong Song 	if (v) {
566d3cc2ab5SYonghong Song 		ctx.key = &info->index;
567d3cc2ab5SYonghong Song 
568d3cc2ab5SYonghong Song 		if (!info->percpu_value_buf) {
569d3cc2ab5SYonghong Song 			ctx.value = v;
570d3cc2ab5SYonghong Song 		} else {
571d3cc2ab5SYonghong Song 			pptr = v;
572d3cc2ab5SYonghong Song 			size = round_up(map->value_size, 8);
573d3cc2ab5SYonghong Song 			for_each_possible_cpu(cpu) {
574d3cc2ab5SYonghong Song 				bpf_long_memcpy(info->percpu_value_buf + off,
575d3cc2ab5SYonghong Song 						per_cpu_ptr(pptr, cpu),
576d3cc2ab5SYonghong Song 						size);
577d3cc2ab5SYonghong Song 				off += size;
578d3cc2ab5SYonghong Song 			}
579d3cc2ab5SYonghong Song 			ctx.value = info->percpu_value_buf;
580d3cc2ab5SYonghong Song 		}
581d3cc2ab5SYonghong Song 	}
582d3cc2ab5SYonghong Song 
583d3cc2ab5SYonghong Song 	return bpf_iter_run_prog(prog, &ctx);
584d3cc2ab5SYonghong Song }
585d3cc2ab5SYonghong Song 
586d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
587d3cc2ab5SYonghong Song {
588d3cc2ab5SYonghong Song 	return __bpf_array_map_seq_show(seq, v);
589d3cc2ab5SYonghong Song }
590d3cc2ab5SYonghong Song 
591d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
592d3cc2ab5SYonghong Song {
593d3cc2ab5SYonghong Song 	if (!v)
594d3cc2ab5SYonghong Song 		(void)__bpf_array_map_seq_show(seq, NULL);
595d3cc2ab5SYonghong Song }
596d3cc2ab5SYonghong Song 
597d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data,
598d3cc2ab5SYonghong Song 				   struct bpf_iter_aux_info *aux)
599d3cc2ab5SYonghong Song {
600d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
601d3cc2ab5SYonghong Song 	struct bpf_map *map = aux->map;
602d3cc2ab5SYonghong Song 	void *value_buf;
603d3cc2ab5SYonghong Song 	u32 buf_size;
604d3cc2ab5SYonghong Song 
605d3cc2ab5SYonghong Song 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
606d3cc2ab5SYonghong Song 		buf_size = round_up(map->value_size, 8) * num_possible_cpus();
607d3cc2ab5SYonghong Song 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
608d3cc2ab5SYonghong Song 		if (!value_buf)
609d3cc2ab5SYonghong Song 			return -ENOMEM;
610d3cc2ab5SYonghong Song 
611d3cc2ab5SYonghong Song 		seq_info->percpu_value_buf = value_buf;
612d3cc2ab5SYonghong Song 	}
613d3cc2ab5SYonghong Song 
614d3cc2ab5SYonghong Song 	seq_info->map = map;
615d3cc2ab5SYonghong Song 	return 0;
616d3cc2ab5SYonghong Song }
617d3cc2ab5SYonghong Song 
618d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data)
619d3cc2ab5SYonghong Song {
620d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
621d3cc2ab5SYonghong Song 
622d3cc2ab5SYonghong Song 	kfree(seq_info->percpu_value_buf);
623d3cc2ab5SYonghong Song }
624d3cc2ab5SYonghong Song 
625d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = {
626d3cc2ab5SYonghong Song 	.start	= bpf_array_map_seq_start,
627d3cc2ab5SYonghong Song 	.next	= bpf_array_map_seq_next,
628d3cc2ab5SYonghong Song 	.stop	= bpf_array_map_seq_stop,
629d3cc2ab5SYonghong Song 	.show	= bpf_array_map_seq_show,
630d3cc2ab5SYonghong Song };
631d3cc2ab5SYonghong Song 
632d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = {
633d3cc2ab5SYonghong Song 	.seq_ops		= &bpf_array_map_seq_ops,
634d3cc2ab5SYonghong Song 	.init_seq_private	= bpf_iter_init_array_map,
635d3cc2ab5SYonghong Song 	.fini_seq_private	= bpf_iter_fini_array_map,
636d3cc2ab5SYonghong Song 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
637d3cc2ab5SYonghong Song };
638d3cc2ab5SYonghong Song 
63941c48f3aSAndrey Ignatov static int array_map_btf_id;
64040077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = {
641134fede4SMartin KaFai Lau 	.map_meta_equal = array_map_meta_equal,
642ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
64328fbcfa0SAlexei Starovoitov 	.map_alloc = array_map_alloc,
64428fbcfa0SAlexei Starovoitov 	.map_free = array_map_free,
64528fbcfa0SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
64628fbcfa0SAlexei Starovoitov 	.map_lookup_elem = array_map_lookup_elem,
64728fbcfa0SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
64828fbcfa0SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
64981ed18abSAlexei Starovoitov 	.map_gen_lookup = array_map_gen_lookup,
650d8eca5bbSDaniel Borkmann 	.map_direct_value_addr = array_map_direct_value_addr,
651d8eca5bbSDaniel Borkmann 	.map_direct_value_meta = array_map_direct_value_meta,
652fc970227SAndrii Nakryiko 	.map_mmap = array_map_mmap,
653a26ca7c9SMartin KaFai Lau 	.map_seq_show_elem = array_map_seq_show_elem,
654a26ca7c9SMartin KaFai Lau 	.map_check_btf = array_map_check_btf,
655c60f2d28SBrian Vazquez 	.map_lookup_batch = generic_map_lookup_batch,
656c60f2d28SBrian Vazquez 	.map_update_batch = generic_map_update_batch,
65741c48f3aSAndrey Ignatov 	.map_btf_name = "bpf_array",
65841c48f3aSAndrey Ignatov 	.map_btf_id = &array_map_btf_id,
659d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
66028fbcfa0SAlexei Starovoitov };
66128fbcfa0SAlexei Starovoitov 
6622872e9acSAndrey Ignatov static int percpu_array_map_btf_id;
66340077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = {
664f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
665ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
666a10423b8SAlexei Starovoitov 	.map_alloc = array_map_alloc,
667a10423b8SAlexei Starovoitov 	.map_free = array_map_free,
668a10423b8SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
669a10423b8SAlexei Starovoitov 	.map_lookup_elem = percpu_array_map_lookup_elem,
670a10423b8SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
671a10423b8SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
672c7b27c37SYonghong Song 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
673e8d2bec0SDaniel Borkmann 	.map_check_btf = array_map_check_btf,
6742872e9acSAndrey Ignatov 	.map_btf_name = "bpf_array",
6752872e9acSAndrey Ignatov 	.map_btf_id = &percpu_array_map_btf_id,
676d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
677a10423b8SAlexei Starovoitov };
678a10423b8SAlexei Starovoitov 
679ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr)
68004fd61abSAlexei Starovoitov {
6812a36f0b9SWang Nan 	/* only file descriptors can be stored in this type of map */
68204fd61abSAlexei Starovoitov 	if (attr->value_size != sizeof(u32))
683ad46061fSJakub Kicinski 		return -EINVAL;
684591fe988SDaniel Borkmann 	/* Program read-only/write-only not supported for special maps yet. */
685591fe988SDaniel Borkmann 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
686591fe988SDaniel Borkmann 		return -EINVAL;
687ad46061fSJakub Kicinski 	return array_map_alloc_check(attr);
68804fd61abSAlexei Starovoitov }
68904fd61abSAlexei Starovoitov 
6902a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map)
69104fd61abSAlexei Starovoitov {
69204fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
69304fd61abSAlexei Starovoitov 	int i;
69404fd61abSAlexei Starovoitov 
69504fd61abSAlexei Starovoitov 	/* make sure it's empty */
69604fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
6972a36f0b9SWang Nan 		BUG_ON(array->ptrs[i] != NULL);
698d407bd25SDaniel Borkmann 
699d407bd25SDaniel Borkmann 	bpf_map_area_free(array);
70004fd61abSAlexei Starovoitov }
70104fd61abSAlexei Starovoitov 
7022a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
70304fd61abSAlexei Starovoitov {
7043b4a63f6SPrashant Bhole 	return ERR_PTR(-EOPNOTSUPP);
70504fd61abSAlexei Starovoitov }
70604fd61abSAlexei Starovoitov 
70704fd61abSAlexei Starovoitov /* only called from syscall */
70814dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
70914dc6f04SMartin KaFai Lau {
71014dc6f04SMartin KaFai Lau 	void **elem, *ptr;
71114dc6f04SMartin KaFai Lau 	int ret =  0;
71214dc6f04SMartin KaFai Lau 
71314dc6f04SMartin KaFai Lau 	if (!map->ops->map_fd_sys_lookup_elem)
71414dc6f04SMartin KaFai Lau 		return -ENOTSUPP;
71514dc6f04SMartin KaFai Lau 
71614dc6f04SMartin KaFai Lau 	rcu_read_lock();
71714dc6f04SMartin KaFai Lau 	elem = array_map_lookup_elem(map, key);
71814dc6f04SMartin KaFai Lau 	if (elem && (ptr = READ_ONCE(*elem)))
71914dc6f04SMartin KaFai Lau 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
72014dc6f04SMartin KaFai Lau 	else
72114dc6f04SMartin KaFai Lau 		ret = -ENOENT;
72214dc6f04SMartin KaFai Lau 	rcu_read_unlock();
72314dc6f04SMartin KaFai Lau 
72414dc6f04SMartin KaFai Lau 	return ret;
72514dc6f04SMartin KaFai Lau }
72614dc6f04SMartin KaFai Lau 
72714dc6f04SMartin KaFai Lau /* only called from syscall */
728d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
729d056a788SDaniel Borkmann 				 void *key, void *value, u64 map_flags)
73004fd61abSAlexei Starovoitov {
73104fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
7322a36f0b9SWang Nan 	void *new_ptr, *old_ptr;
73304fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key, ufd;
73404fd61abSAlexei Starovoitov 
73504fd61abSAlexei Starovoitov 	if (map_flags != BPF_ANY)
73604fd61abSAlexei Starovoitov 		return -EINVAL;
73704fd61abSAlexei Starovoitov 
73804fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
73904fd61abSAlexei Starovoitov 		return -E2BIG;
74004fd61abSAlexei Starovoitov 
74104fd61abSAlexei Starovoitov 	ufd = *(u32 *)value;
742d056a788SDaniel Borkmann 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
7432a36f0b9SWang Nan 	if (IS_ERR(new_ptr))
7442a36f0b9SWang Nan 		return PTR_ERR(new_ptr);
74504fd61abSAlexei Starovoitov 
746da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
747da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
7482a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, new_ptr);
749da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
750da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
751da765a2fSDaniel Borkmann 	} else {
752da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, new_ptr);
753da765a2fSDaniel Borkmann 	}
754da765a2fSDaniel Borkmann 
7552a36f0b9SWang Nan 	if (old_ptr)
7562a36f0b9SWang Nan 		map->ops->map_fd_put_ptr(old_ptr);
75704fd61abSAlexei Starovoitov 	return 0;
75804fd61abSAlexei Starovoitov }
75904fd61abSAlexei Starovoitov 
7602a36f0b9SWang Nan static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
76104fd61abSAlexei Starovoitov {
76204fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
7632a36f0b9SWang Nan 	void *old_ptr;
76404fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key;
76504fd61abSAlexei Starovoitov 
76604fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
76704fd61abSAlexei Starovoitov 		return -E2BIG;
76804fd61abSAlexei Starovoitov 
769da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
770da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
7712a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, NULL);
772da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, NULL);
773da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
774da765a2fSDaniel Borkmann 	} else {
775da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, NULL);
776da765a2fSDaniel Borkmann 	}
777da765a2fSDaniel Borkmann 
7782a36f0b9SWang Nan 	if (old_ptr) {
7792a36f0b9SWang Nan 		map->ops->map_fd_put_ptr(old_ptr);
78004fd61abSAlexei Starovoitov 		return 0;
78104fd61abSAlexei Starovoitov 	} else {
78204fd61abSAlexei Starovoitov 		return -ENOENT;
78304fd61abSAlexei Starovoitov 	}
78404fd61abSAlexei Starovoitov }
78504fd61abSAlexei Starovoitov 
786d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map,
787d056a788SDaniel Borkmann 				   struct file *map_file, int fd)
7882a36f0b9SWang Nan {
7892a36f0b9SWang Nan 	struct bpf_array *array = container_of(map, struct bpf_array, map);
7902a36f0b9SWang Nan 	struct bpf_prog *prog = bpf_prog_get(fd);
791d056a788SDaniel Borkmann 
7922a36f0b9SWang Nan 	if (IS_ERR(prog))
7932a36f0b9SWang Nan 		return prog;
7942a36f0b9SWang Nan 
7952a36f0b9SWang Nan 	if (!bpf_prog_array_compatible(array, prog)) {
7962a36f0b9SWang Nan 		bpf_prog_put(prog);
7972a36f0b9SWang Nan 		return ERR_PTR(-EINVAL);
7982a36f0b9SWang Nan 	}
799d056a788SDaniel Borkmann 
8002a36f0b9SWang Nan 	return prog;
8012a36f0b9SWang Nan }
8022a36f0b9SWang Nan 
8032a36f0b9SWang Nan static void prog_fd_array_put_ptr(void *ptr)
8042a36f0b9SWang Nan {
8051aacde3dSDaniel Borkmann 	bpf_prog_put(ptr);
8062a36f0b9SWang Nan }
8072a36f0b9SWang Nan 
80814dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr)
80914dc6f04SMartin KaFai Lau {
81014dc6f04SMartin KaFai Lau 	return ((struct bpf_prog *)ptr)->aux->id;
81114dc6f04SMartin KaFai Lau }
81214dc6f04SMartin KaFai Lau 
81304fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */
814ba6b8de4SJohn Fastabend static void bpf_fd_array_map_clear(struct bpf_map *map)
81504fd61abSAlexei Starovoitov {
81604fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
81704fd61abSAlexei Starovoitov 	int i;
81804fd61abSAlexei Starovoitov 
81904fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
8202a36f0b9SWang Nan 		fd_array_map_delete_elem(map, &i);
82104fd61abSAlexei Starovoitov }
82204fd61abSAlexei Starovoitov 
823a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
824a7c19db3SYonghong Song 					 struct seq_file *m)
825a7c19db3SYonghong Song {
826a7c19db3SYonghong Song 	void **elem, *ptr;
827a7c19db3SYonghong Song 	u32 prog_id;
828a7c19db3SYonghong Song 
829a7c19db3SYonghong Song 	rcu_read_lock();
830a7c19db3SYonghong Song 
831a7c19db3SYonghong Song 	elem = array_map_lookup_elem(map, key);
832a7c19db3SYonghong Song 	if (elem) {
833a7c19db3SYonghong Song 		ptr = READ_ONCE(*elem);
834a7c19db3SYonghong Song 		if (ptr) {
835a7c19db3SYonghong Song 			seq_printf(m, "%u: ", *(u32 *)key);
836a7c19db3SYonghong Song 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
837a7c19db3SYonghong Song 			btf_type_seq_show(map->btf, map->btf_value_type_id,
838a7c19db3SYonghong Song 					  &prog_id, m);
839a7c19db3SYonghong Song 			seq_puts(m, "\n");
840a7c19db3SYonghong Song 		}
841a7c19db3SYonghong Song 	}
842a7c19db3SYonghong Song 
843a7c19db3SYonghong Song 	rcu_read_unlock();
844a7c19db3SYonghong Song }
845a7c19db3SYonghong Song 
846da765a2fSDaniel Borkmann struct prog_poke_elem {
847da765a2fSDaniel Borkmann 	struct list_head list;
848da765a2fSDaniel Borkmann 	struct bpf_prog_aux *aux;
849da765a2fSDaniel Borkmann };
850da765a2fSDaniel Borkmann 
851da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map,
852da765a2fSDaniel Borkmann 				     struct bpf_prog_aux *prog_aux)
853da765a2fSDaniel Borkmann {
854da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
855da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
856da765a2fSDaniel Borkmann 	int ret = 0;
857da765a2fSDaniel Borkmann 
858da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
859da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
860da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
861da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux)
862da765a2fSDaniel Borkmann 			goto out;
863da765a2fSDaniel Borkmann 	}
864da765a2fSDaniel Borkmann 
865da765a2fSDaniel Borkmann 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
866da765a2fSDaniel Borkmann 	if (!elem) {
867da765a2fSDaniel Borkmann 		ret = -ENOMEM;
868da765a2fSDaniel Borkmann 		goto out;
869da765a2fSDaniel Borkmann 	}
870da765a2fSDaniel Borkmann 
871da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&elem->list);
872da765a2fSDaniel Borkmann 	/* We must track the program's aux info at this point in time
873da765a2fSDaniel Borkmann 	 * since the program pointer itself may not be stable yet, see
874da765a2fSDaniel Borkmann 	 * also comment in prog_array_map_poke_run().
875da765a2fSDaniel Borkmann 	 */
876da765a2fSDaniel Borkmann 	elem->aux = prog_aux;
877da765a2fSDaniel Borkmann 
878da765a2fSDaniel Borkmann 	list_add_tail(&elem->list, &aux->poke_progs);
879da765a2fSDaniel Borkmann out:
880da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
881da765a2fSDaniel Borkmann 	return ret;
882da765a2fSDaniel Borkmann }
883da765a2fSDaniel Borkmann 
884da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map,
885da765a2fSDaniel Borkmann 					struct bpf_prog_aux *prog_aux)
886da765a2fSDaniel Borkmann {
887da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
888da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
889da765a2fSDaniel Borkmann 
890da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
891da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
892da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
893da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux) {
894da765a2fSDaniel Borkmann 			list_del_init(&elem->list);
895da765a2fSDaniel Borkmann 			kfree(elem);
896da765a2fSDaniel Borkmann 			break;
897da765a2fSDaniel Borkmann 		}
898da765a2fSDaniel Borkmann 	}
899da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
900da765a2fSDaniel Borkmann }
901da765a2fSDaniel Borkmann 
902da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
903da765a2fSDaniel Borkmann 				    struct bpf_prog *old,
904da765a2fSDaniel Borkmann 				    struct bpf_prog *new)
905da765a2fSDaniel Borkmann {
906ebf7d1f5SMaciej Fijalkowski 	u8 *old_addr, *new_addr, *old_bypass_addr;
907da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
908da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
909da765a2fSDaniel Borkmann 
910da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
911da765a2fSDaniel Borkmann 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
912da765a2fSDaniel Borkmann 
913da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
914da765a2fSDaniel Borkmann 		struct bpf_jit_poke_descriptor *poke;
915da765a2fSDaniel Borkmann 		int i, ret;
916da765a2fSDaniel Borkmann 
917da765a2fSDaniel Borkmann 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
918da765a2fSDaniel Borkmann 			poke = &elem->aux->poke_tab[i];
919da765a2fSDaniel Borkmann 
920da765a2fSDaniel Borkmann 			/* Few things to be aware of:
921da765a2fSDaniel Borkmann 			 *
922da765a2fSDaniel Borkmann 			 * 1) We can only ever access aux in this context, but
923da765a2fSDaniel Borkmann 			 *    not aux->prog since it might not be stable yet and
924da765a2fSDaniel Borkmann 			 *    there could be danger of use after free otherwise.
925da765a2fSDaniel Borkmann 			 * 2) Initially when we start tracking aux, the program
926da765a2fSDaniel Borkmann 			 *    is not JITed yet and also does not have a kallsyms
927cf71b174SMaciej Fijalkowski 			 *    entry. We skip these as poke->tailcall_target_stable
928cf71b174SMaciej Fijalkowski 			 *    is not active yet. The JIT will do the final fixup
929cf71b174SMaciej Fijalkowski 			 *    before setting it stable. The various
930cf71b174SMaciej Fijalkowski 			 *    poke->tailcall_target_stable are successively
931cf71b174SMaciej Fijalkowski 			 *    activated, so tail call updates can arrive from here
932cf71b174SMaciej Fijalkowski 			 *    while JIT is still finishing its final fixup for
933cf71b174SMaciej Fijalkowski 			 *    non-activated poke entries.
934da765a2fSDaniel Borkmann 			 * 3) On program teardown, the program's kallsym entry gets
935da765a2fSDaniel Borkmann 			 *    removed out of RCU callback, but we can only untrack
936da765a2fSDaniel Borkmann 			 *    from sleepable context, therefore bpf_arch_text_poke()
937da765a2fSDaniel Borkmann 			 *    might not see that this is in BPF text section and
938da765a2fSDaniel Borkmann 			 *    bails out with -EINVAL. As these are unreachable since
939da765a2fSDaniel Borkmann 			 *    RCU grace period already passed, we simply skip them.
940da765a2fSDaniel Borkmann 			 * 4) Also programs reaching refcount of zero while patching
941da765a2fSDaniel Borkmann 			 *    is in progress is okay since we're protected under
942da765a2fSDaniel Borkmann 			 *    poke_mutex and untrack the programs before the JIT
943da765a2fSDaniel Borkmann 			 *    buffer is freed. When we're still in the middle of
944da765a2fSDaniel Borkmann 			 *    patching and suddenly kallsyms entry of the program
945da765a2fSDaniel Borkmann 			 *    gets evicted, we just skip the rest which is fine due
946da765a2fSDaniel Borkmann 			 *    to point 3).
947da765a2fSDaniel Borkmann 			 * 5) Any other error happening below from bpf_arch_text_poke()
948da765a2fSDaniel Borkmann 			 *    is a unexpected bug.
949da765a2fSDaniel Borkmann 			 */
950cf71b174SMaciej Fijalkowski 			if (!READ_ONCE(poke->tailcall_target_stable))
951da765a2fSDaniel Borkmann 				continue;
952da765a2fSDaniel Borkmann 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
953da765a2fSDaniel Borkmann 				continue;
954da765a2fSDaniel Borkmann 			if (poke->tail_call.map != map ||
955da765a2fSDaniel Borkmann 			    poke->tail_call.key != key)
956da765a2fSDaniel Borkmann 				continue;
957da765a2fSDaniel Borkmann 
958ebf7d1f5SMaciej Fijalkowski 			old_bypass_addr = old ? NULL : poke->bypass_addr;
959ebf7d1f5SMaciej Fijalkowski 			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
960ebf7d1f5SMaciej Fijalkowski 			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
961ebf7d1f5SMaciej Fijalkowski 
962ebf7d1f5SMaciej Fijalkowski 			if (new) {
963ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_target,
964ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
965ebf7d1f5SMaciej Fijalkowski 							 old_addr, new_addr);
966da765a2fSDaniel Borkmann 				BUG_ON(ret < 0 && ret != -EINVAL);
967ebf7d1f5SMaciej Fijalkowski 				if (!old) {
968ebf7d1f5SMaciej Fijalkowski 					ret = bpf_arch_text_poke(poke->tailcall_bypass,
969ebf7d1f5SMaciej Fijalkowski 								 BPF_MOD_JUMP,
970ebf7d1f5SMaciej Fijalkowski 								 poke->bypass_addr,
971ebf7d1f5SMaciej Fijalkowski 								 NULL);
972ebf7d1f5SMaciej Fijalkowski 					BUG_ON(ret < 0 && ret != -EINVAL);
973ebf7d1f5SMaciej Fijalkowski 				}
974ebf7d1f5SMaciej Fijalkowski 			} else {
975ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_bypass,
976ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
977ebf7d1f5SMaciej Fijalkowski 							 old_bypass_addr,
978ebf7d1f5SMaciej Fijalkowski 							 poke->bypass_addr);
979ebf7d1f5SMaciej Fijalkowski 				BUG_ON(ret < 0 && ret != -EINVAL);
980ebf7d1f5SMaciej Fijalkowski 				/* let other CPUs finish the execution of program
981ebf7d1f5SMaciej Fijalkowski 				 * so that it will not possible to expose them
982ebf7d1f5SMaciej Fijalkowski 				 * to invalid nop, stack unwind, nop state
983ebf7d1f5SMaciej Fijalkowski 				 */
984ebf7d1f5SMaciej Fijalkowski 				if (!ret)
985ebf7d1f5SMaciej Fijalkowski 					synchronize_rcu();
986ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_target,
987ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
988ebf7d1f5SMaciej Fijalkowski 							 old_addr, NULL);
989ebf7d1f5SMaciej Fijalkowski 				BUG_ON(ret < 0 && ret != -EINVAL);
990ebf7d1f5SMaciej Fijalkowski 			}
991da765a2fSDaniel Borkmann 		}
992da765a2fSDaniel Borkmann 	}
993da765a2fSDaniel Borkmann }
994da765a2fSDaniel Borkmann 
995da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work)
996da765a2fSDaniel Borkmann {
997da765a2fSDaniel Borkmann 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
998da765a2fSDaniel Borkmann 					   work)->map;
999da765a2fSDaniel Borkmann 	bpf_fd_array_map_clear(map);
1000da765a2fSDaniel Borkmann 	bpf_map_put(map);
1001da765a2fSDaniel Borkmann }
1002da765a2fSDaniel Borkmann 
1003da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map)
1004da765a2fSDaniel Borkmann {
1005da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1006da765a2fSDaniel Borkmann 						 map)->aux;
1007da765a2fSDaniel Borkmann 	bpf_map_inc(map);
1008da765a2fSDaniel Borkmann 	schedule_work(&aux->work);
1009da765a2fSDaniel Borkmann }
1010da765a2fSDaniel Borkmann 
10112beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
10122beee5f5SDaniel Borkmann {
10132beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
10142beee5f5SDaniel Borkmann 	struct bpf_map *map;
10152beee5f5SDaniel Borkmann 
10162beee5f5SDaniel Borkmann 	aux = kzalloc(sizeof(*aux), GFP_KERNEL);
10172beee5f5SDaniel Borkmann 	if (!aux)
10182beee5f5SDaniel Borkmann 		return ERR_PTR(-ENOMEM);
10192beee5f5SDaniel Borkmann 
1020da765a2fSDaniel Borkmann 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1021da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&aux->poke_progs);
1022da765a2fSDaniel Borkmann 	mutex_init(&aux->poke_mutex);
1023da765a2fSDaniel Borkmann 
10242beee5f5SDaniel Borkmann 	map = array_map_alloc(attr);
10252beee5f5SDaniel Borkmann 	if (IS_ERR(map)) {
10262beee5f5SDaniel Borkmann 		kfree(aux);
10272beee5f5SDaniel Borkmann 		return map;
10282beee5f5SDaniel Borkmann 	}
10292beee5f5SDaniel Borkmann 
10302beee5f5SDaniel Borkmann 	container_of(map, struct bpf_array, map)->aux = aux;
1031da765a2fSDaniel Borkmann 	aux->map = map;
1032da765a2fSDaniel Borkmann 
10332beee5f5SDaniel Borkmann 	return map;
10342beee5f5SDaniel Borkmann }
10352beee5f5SDaniel Borkmann 
10362beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map)
10372beee5f5SDaniel Borkmann {
1038da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
10392beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
10402beee5f5SDaniel Borkmann 
10412beee5f5SDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1042da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1043da765a2fSDaniel Borkmann 		list_del_init(&elem->list);
1044da765a2fSDaniel Borkmann 		kfree(elem);
1045da765a2fSDaniel Borkmann 	}
10462beee5f5SDaniel Borkmann 	kfree(aux);
10472beee5f5SDaniel Borkmann 	fd_array_map_free(map);
10482beee5f5SDaniel Borkmann }
10492beee5f5SDaniel Borkmann 
1050f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding.
1051f4d05259SMartin KaFai Lau  * Doing static check alone in the verifier is not enough.
1052f4d05259SMartin KaFai Lau  * Thus, prog_array_map cannot be used as an inner_map
1053f4d05259SMartin KaFai Lau  * and map_meta_equal is not implemented.
1054f4d05259SMartin KaFai Lau  */
10552872e9acSAndrey Ignatov static int prog_array_map_btf_id;
105640077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = {
1057ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
10582beee5f5SDaniel Borkmann 	.map_alloc = prog_array_map_alloc,
10592beee5f5SDaniel Borkmann 	.map_free = prog_array_map_free,
1060da765a2fSDaniel Borkmann 	.map_poke_track = prog_array_map_poke_track,
1061da765a2fSDaniel Borkmann 	.map_poke_untrack = prog_array_map_poke_untrack,
1062da765a2fSDaniel Borkmann 	.map_poke_run = prog_array_map_poke_run,
106304fd61abSAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
10642a36f0b9SWang Nan 	.map_lookup_elem = fd_array_map_lookup_elem,
10652a36f0b9SWang Nan 	.map_delete_elem = fd_array_map_delete_elem,
10662a36f0b9SWang Nan 	.map_fd_get_ptr = prog_fd_array_get_ptr,
10672a36f0b9SWang Nan 	.map_fd_put_ptr = prog_fd_array_put_ptr,
106814dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1069da765a2fSDaniel Borkmann 	.map_release_uref = prog_array_map_clear,
1070a7c19db3SYonghong Song 	.map_seq_show_elem = prog_array_map_seq_show_elem,
10712872e9acSAndrey Ignatov 	.map_btf_name = "bpf_array",
10722872e9acSAndrey Ignatov 	.map_btf_id = &prog_array_map_btf_id,
107304fd61abSAlexei Starovoitov };
107404fd61abSAlexei Starovoitov 
10753b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
10763b1efb19SDaniel Borkmann 						   struct file *map_file)
1077ea317b26SKaixu Xia {
10783b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
10793b1efb19SDaniel Borkmann 
1080858d68f1SDaniel Borkmann 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
10813b1efb19SDaniel Borkmann 	if (ee) {
10823b1efb19SDaniel Borkmann 		ee->event = perf_file->private_data;
10833b1efb19SDaniel Borkmann 		ee->perf_file = perf_file;
10843b1efb19SDaniel Borkmann 		ee->map_file = map_file;
10853b1efb19SDaniel Borkmann 	}
10863b1efb19SDaniel Borkmann 
10873b1efb19SDaniel Borkmann 	return ee;
10883b1efb19SDaniel Borkmann }
10893b1efb19SDaniel Borkmann 
10903b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu)
10913b1efb19SDaniel Borkmann {
10923b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
10933b1efb19SDaniel Borkmann 
10943b1efb19SDaniel Borkmann 	ee = container_of(rcu, struct bpf_event_entry, rcu);
10953b1efb19SDaniel Borkmann 	fput(ee->perf_file);
10963b1efb19SDaniel Borkmann 	kfree(ee);
10973b1efb19SDaniel Borkmann }
10983b1efb19SDaniel Borkmann 
10993b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
11003b1efb19SDaniel Borkmann {
11013b1efb19SDaniel Borkmann 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1102ea317b26SKaixu Xia }
1103ea317b26SKaixu Xia 
1104d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1105d056a788SDaniel Borkmann 					 struct file *map_file, int fd)
1106ea317b26SKaixu Xia {
11073b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
11083b1efb19SDaniel Borkmann 	struct perf_event *event;
11093b1efb19SDaniel Borkmann 	struct file *perf_file;
1110f91840a3SAlexei Starovoitov 	u64 value;
1111ea317b26SKaixu Xia 
11123b1efb19SDaniel Borkmann 	perf_file = perf_event_get(fd);
11133b1efb19SDaniel Borkmann 	if (IS_ERR(perf_file))
11143b1efb19SDaniel Borkmann 		return perf_file;
1115e03e7ee3SAlexei Starovoitov 
1116f91840a3SAlexei Starovoitov 	ee = ERR_PTR(-EOPNOTSUPP);
11173b1efb19SDaniel Borkmann 	event = perf_file->private_data;
111897562633SYonghong Song 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
11193b1efb19SDaniel Borkmann 		goto err_out;
1120ea317b26SKaixu Xia 
11213b1efb19SDaniel Borkmann 	ee = bpf_event_entry_gen(perf_file, map_file);
11223b1efb19SDaniel Borkmann 	if (ee)
11233b1efb19SDaniel Borkmann 		return ee;
11243b1efb19SDaniel Borkmann 	ee = ERR_PTR(-ENOMEM);
11253b1efb19SDaniel Borkmann err_out:
11263b1efb19SDaniel Borkmann 	fput(perf_file);
11273b1efb19SDaniel Borkmann 	return ee;
1128ea317b26SKaixu Xia }
1129ea317b26SKaixu Xia 
1130ea317b26SKaixu Xia static void perf_event_fd_array_put_ptr(void *ptr)
1131ea317b26SKaixu Xia {
11323b1efb19SDaniel Borkmann 	bpf_event_entry_free_rcu(ptr);
11333b1efb19SDaniel Borkmann }
11343b1efb19SDaniel Borkmann 
11353b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map,
11363b1efb19SDaniel Borkmann 					struct file *map_file)
11373b1efb19SDaniel Borkmann {
11383b1efb19SDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
11393b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
11403b1efb19SDaniel Borkmann 	int i;
11413b1efb19SDaniel Borkmann 
1142*792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1143*792cacccSSong Liu 		return;
1144*792cacccSSong Liu 
11453b1efb19SDaniel Borkmann 	rcu_read_lock();
11463b1efb19SDaniel Borkmann 	for (i = 0; i < array->map.max_entries; i++) {
11473b1efb19SDaniel Borkmann 		ee = READ_ONCE(array->ptrs[i]);
11483b1efb19SDaniel Borkmann 		if (ee && ee->map_file == map_file)
11493b1efb19SDaniel Borkmann 			fd_array_map_delete_elem(map, &i);
11503b1efb19SDaniel Borkmann 	}
11513b1efb19SDaniel Borkmann 	rcu_read_unlock();
1152ea317b26SKaixu Xia }
1153ea317b26SKaixu Xia 
1154*792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map)
1155*792cacccSSong Liu {
1156*792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1157*792cacccSSong Liu 		bpf_fd_array_map_clear(map);
1158*792cacccSSong Liu 	fd_array_map_free(map);
1159*792cacccSSong Liu }
1160*792cacccSSong Liu 
11612872e9acSAndrey Ignatov static int perf_event_array_map_btf_id;
116240077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = {
1163f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1164ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1165ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
1166*792cacccSSong Liu 	.map_free = perf_event_fd_array_map_free,
1167ea317b26SKaixu Xia 	.map_get_next_key = array_map_get_next_key,
1168ea317b26SKaixu Xia 	.map_lookup_elem = fd_array_map_lookup_elem,
1169ea317b26SKaixu Xia 	.map_delete_elem = fd_array_map_delete_elem,
1170ea317b26SKaixu Xia 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1171ea317b26SKaixu Xia 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
11723b1efb19SDaniel Borkmann 	.map_release = perf_event_fd_array_release,
1173e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
11742872e9acSAndrey Ignatov 	.map_btf_name = "bpf_array",
11752872e9acSAndrey Ignatov 	.map_btf_id = &perf_event_array_map_btf_id,
1176ea317b26SKaixu Xia };
1177ea317b26SKaixu Xia 
117860d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS
11794ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
11804ed8ec52SMartin KaFai Lau 				     struct file *map_file /* not used */,
11814ed8ec52SMartin KaFai Lau 				     int fd)
11824ed8ec52SMartin KaFai Lau {
11834ed8ec52SMartin KaFai Lau 	return cgroup_get_from_fd(fd);
11844ed8ec52SMartin KaFai Lau }
11854ed8ec52SMartin KaFai Lau 
11864ed8ec52SMartin KaFai Lau static void cgroup_fd_array_put_ptr(void *ptr)
11874ed8ec52SMartin KaFai Lau {
11884ed8ec52SMartin KaFai Lau 	/* cgroup_put free cgrp after a rcu grace period */
11894ed8ec52SMartin KaFai Lau 	cgroup_put(ptr);
11904ed8ec52SMartin KaFai Lau }
11914ed8ec52SMartin KaFai Lau 
11924ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map)
11934ed8ec52SMartin KaFai Lau {
11944ed8ec52SMartin KaFai Lau 	bpf_fd_array_map_clear(map);
11954ed8ec52SMartin KaFai Lau 	fd_array_map_free(map);
11964ed8ec52SMartin KaFai Lau }
11974ed8ec52SMartin KaFai Lau 
11982872e9acSAndrey Ignatov static int cgroup_array_map_btf_id;
119940077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = {
1200f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1201ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1202ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
12034ed8ec52SMartin KaFai Lau 	.map_free = cgroup_fd_array_free,
12044ed8ec52SMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
12054ed8ec52SMartin KaFai Lau 	.map_lookup_elem = fd_array_map_lookup_elem,
12064ed8ec52SMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
12074ed8ec52SMartin KaFai Lau 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
12084ed8ec52SMartin KaFai Lau 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1209e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
12102872e9acSAndrey Ignatov 	.map_btf_name = "bpf_array",
12112872e9acSAndrey Ignatov 	.map_btf_id = &cgroup_array_map_btf_id,
12124ed8ec52SMartin KaFai Lau };
12134ed8ec52SMartin KaFai Lau #endif
121456f668dfSMartin KaFai Lau 
121556f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
121656f668dfSMartin KaFai Lau {
121756f668dfSMartin KaFai Lau 	struct bpf_map *map, *inner_map_meta;
121856f668dfSMartin KaFai Lau 
121956f668dfSMartin KaFai Lau 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
122056f668dfSMartin KaFai Lau 	if (IS_ERR(inner_map_meta))
122156f668dfSMartin KaFai Lau 		return inner_map_meta;
122256f668dfSMartin KaFai Lau 
1223ad46061fSJakub Kicinski 	map = array_map_alloc(attr);
122456f668dfSMartin KaFai Lau 	if (IS_ERR(map)) {
122556f668dfSMartin KaFai Lau 		bpf_map_meta_free(inner_map_meta);
122656f668dfSMartin KaFai Lau 		return map;
122756f668dfSMartin KaFai Lau 	}
122856f668dfSMartin KaFai Lau 
122956f668dfSMartin KaFai Lau 	map->inner_map_meta = inner_map_meta;
123056f668dfSMartin KaFai Lau 
123156f668dfSMartin KaFai Lau 	return map;
123256f668dfSMartin KaFai Lau }
123356f668dfSMartin KaFai Lau 
123456f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map)
123556f668dfSMartin KaFai Lau {
123656f668dfSMartin KaFai Lau 	/* map->inner_map_meta is only accessed by syscall which
123756f668dfSMartin KaFai Lau 	 * is protected by fdget/fdput.
123856f668dfSMartin KaFai Lau 	 */
123956f668dfSMartin KaFai Lau 	bpf_map_meta_free(map->inner_map_meta);
124056f668dfSMartin KaFai Lau 	bpf_fd_array_map_clear(map);
124156f668dfSMartin KaFai Lau 	fd_array_map_free(map);
124256f668dfSMartin KaFai Lau }
124356f668dfSMartin KaFai Lau 
124456f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
124556f668dfSMartin KaFai Lau {
124656f668dfSMartin KaFai Lau 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
124756f668dfSMartin KaFai Lau 
124856f668dfSMartin KaFai Lau 	if (!inner_map)
124956f668dfSMartin KaFai Lau 		return NULL;
125056f668dfSMartin KaFai Lau 
125156f668dfSMartin KaFai Lau 	return READ_ONCE(*inner_map);
125256f668dfSMartin KaFai Lau }
125356f668dfSMartin KaFai Lau 
12547b0c2a05SDaniel Borkmann static u32 array_of_map_gen_lookup(struct bpf_map *map,
12557b0c2a05SDaniel Borkmann 				   struct bpf_insn *insn_buf)
12567b0c2a05SDaniel Borkmann {
1257b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
12587b0c2a05SDaniel Borkmann 	u32 elem_size = round_up(map->value_size, 8);
12597b0c2a05SDaniel Borkmann 	struct bpf_insn *insn = insn_buf;
12607b0c2a05SDaniel Borkmann 	const int ret = BPF_REG_0;
12617b0c2a05SDaniel Borkmann 	const int map_ptr = BPF_REG_1;
12627b0c2a05SDaniel Borkmann 	const int index = BPF_REG_2;
12637b0c2a05SDaniel Borkmann 
12647b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
12657b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
12662c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
1267b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1268b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1269b2157399SAlexei Starovoitov 	} else {
12707b0c2a05SDaniel Borkmann 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1271b2157399SAlexei Starovoitov 	}
12727b0c2a05SDaniel Borkmann 	if (is_power_of_2(elem_size))
12737b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
12747b0c2a05SDaniel Borkmann 	else
12757b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
12767b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
12777b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
12787b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
12797b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
12807b0c2a05SDaniel Borkmann 	*insn++ = BPF_MOV64_IMM(ret, 0);
12817b0c2a05SDaniel Borkmann 
12827b0c2a05SDaniel Borkmann 	return insn - insn_buf;
12837b0c2a05SDaniel Borkmann }
12847b0c2a05SDaniel Borkmann 
12852872e9acSAndrey Ignatov static int array_of_maps_map_btf_id;
128640077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = {
1287ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
128856f668dfSMartin KaFai Lau 	.map_alloc = array_of_map_alloc,
128956f668dfSMartin KaFai Lau 	.map_free = array_of_map_free,
129056f668dfSMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
129156f668dfSMartin KaFai Lau 	.map_lookup_elem = array_of_map_lookup_elem,
129256f668dfSMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
129356f668dfSMartin KaFai Lau 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
129456f668dfSMartin KaFai Lau 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
129514dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
12967b0c2a05SDaniel Borkmann 	.map_gen_lookup = array_of_map_gen_lookup,
1297e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
12982872e9acSAndrey Ignatov 	.map_btf_name = "bpf_array",
12992872e9acSAndrey Ignatov 	.map_btf_id = &array_of_maps_map_btf_id,
130056f668dfSMartin KaFai Lau };
1301