xref: /linux-6.15/kernel/bpf/arraymap.c (revision d6083f04)
15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
381ed18abSAlexei Starovoitov  * Copyright (c) 2016,2017 Facebook
428fbcfa0SAlexei Starovoitov  */
528fbcfa0SAlexei Starovoitov #include <linux/bpf.h>
6a26ca7c9SMartin KaFai Lau #include <linux/btf.h>
728fbcfa0SAlexei Starovoitov #include <linux/err.h>
828fbcfa0SAlexei Starovoitov #include <linux/slab.h>
928fbcfa0SAlexei Starovoitov #include <linux/mm.h>
1004fd61abSAlexei Starovoitov #include <linux/filter.h>
110cdf5640SDaniel Borkmann #include <linux/perf_event.h>
12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h>
131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h>
14c317ab71SMenglong Dong #include <linux/btf_ids.h>
1528fbcfa0SAlexei Starovoitov 
1656f668dfSMartin KaFai Lau #include "map_in_map.h"
1756f668dfSMartin KaFai Lau 
186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \
19792cacccSSong Liu 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
204a8f87e6SDaniel Borkmann 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
216e71b04aSChenbo Feng 
22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array)
23a10423b8SAlexei Starovoitov {
24a10423b8SAlexei Starovoitov 	int i;
25a10423b8SAlexei Starovoitov 
2632fff239SEric Dumazet 	for (i = 0; i < array->map.max_entries; i++) {
27a10423b8SAlexei Starovoitov 		free_percpu(array->pptrs[i]);
2832fff239SEric Dumazet 		cond_resched();
2932fff239SEric Dumazet 	}
30a10423b8SAlexei Starovoitov }
31a10423b8SAlexei Starovoitov 
32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array)
33a10423b8SAlexei Starovoitov {
34a10423b8SAlexei Starovoitov 	void __percpu *ptr;
35a10423b8SAlexei Starovoitov 	int i;
36a10423b8SAlexei Starovoitov 
37a10423b8SAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++) {
386d192c79SRoman Gushchin 		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39a10423b8SAlexei Starovoitov 					   GFP_USER | __GFP_NOWARN);
40a10423b8SAlexei Starovoitov 		if (!ptr) {
41a10423b8SAlexei Starovoitov 			bpf_array_free_percpu(array);
42a10423b8SAlexei Starovoitov 			return -ENOMEM;
43a10423b8SAlexei Starovoitov 		}
44a10423b8SAlexei Starovoitov 		array->pptrs[i] = ptr;
4532fff239SEric Dumazet 		cond_resched();
46a10423b8SAlexei Starovoitov 	}
47a10423b8SAlexei Starovoitov 
48a10423b8SAlexei Starovoitov 	return 0;
49a10423b8SAlexei Starovoitov }
50a10423b8SAlexei Starovoitov 
5128fbcfa0SAlexei Starovoitov /* Called from syscall */
525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr)
53ad46061fSJakub Kicinski {
54ad46061fSJakub Kicinski 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55ad46061fSJakub Kicinski 	int numa_node = bpf_map_attr_numa_node(attr);
56ad46061fSJakub Kicinski 
57ad46061fSJakub Kicinski 	/* check sanity of attributes */
58ad46061fSJakub Kicinski 	if (attr->max_entries == 0 || attr->key_size != 4 ||
59ad46061fSJakub Kicinski 	    attr->value_size == 0 ||
60ad46061fSJakub Kicinski 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61591fe988SDaniel Borkmann 	    !bpf_map_flags_access_ok(attr->map_flags) ||
62ad46061fSJakub Kicinski 	    (percpu && numa_node != NUMA_NO_NODE))
63ad46061fSJakub Kicinski 		return -EINVAL;
64ad46061fSJakub Kicinski 
65fc970227SAndrii Nakryiko 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
664a8f87e6SDaniel Borkmann 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67fc970227SAndrii Nakryiko 		return -EINVAL;
68fc970227SAndrii Nakryiko 
69792cacccSSong Liu 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70792cacccSSong Liu 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
71792cacccSSong Liu 		return -EINVAL;
72792cacccSSong Liu 
7363b8ce77SAndrii Nakryiko 	/* avoid overflow on round_up(map->value_size) */
7463b8ce77SAndrii Nakryiko 	if (attr->value_size > INT_MAX)
75ad46061fSJakub Kicinski 		return -E2BIG;
761d244784STao Chen 	/* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
771d244784STao Chen 	if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
781d244784STao Chen 		return -E2BIG;
79ad46061fSJakub Kicinski 
80ad46061fSJakub Kicinski 	return 0;
81ad46061fSJakub Kicinski }
82ad46061fSJakub Kicinski 
8328fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8428fbcfa0SAlexei Starovoitov {
85a10423b8SAlexei Starovoitov 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
861bc59756SRoman Gushchin 	int numa_node = bpf_map_attr_numa_node(attr);
87b2157399SAlexei Starovoitov 	u32 elem_size, index_mask, max_entries;
88d79a3549SAndrii Nakryiko 	bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
891bc59756SRoman Gushchin 	u64 array_size, mask64;
9028fbcfa0SAlexei Starovoitov 	struct bpf_array *array;
9128fbcfa0SAlexei Starovoitov 
9228fbcfa0SAlexei Starovoitov 	elem_size = round_up(attr->value_size, 8);
9328fbcfa0SAlexei Starovoitov 
94b2157399SAlexei Starovoitov 	max_entries = attr->max_entries;
95b2157399SAlexei Starovoitov 
96bbeb6e43SDaniel Borkmann 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
97bbeb6e43SDaniel Borkmann 	 * upper most bit set in u32 space is undefined behavior due to
98bbeb6e43SDaniel Borkmann 	 * resulting 1U << 32, so do it manually here in u64 space.
99bbeb6e43SDaniel Borkmann 	 */
100bbeb6e43SDaniel Borkmann 	mask64 = fls_long(max_entries - 1);
101bbeb6e43SDaniel Borkmann 	mask64 = 1ULL << mask64;
102bbeb6e43SDaniel Borkmann 	mask64 -= 1;
103bbeb6e43SDaniel Borkmann 
104bbeb6e43SDaniel Borkmann 	index_mask = mask64;
1052c78ee89SAlexei Starovoitov 	if (!bypass_spec_v1) {
106b2157399SAlexei Starovoitov 		/* round up array size to nearest power of 2,
107b2157399SAlexei Starovoitov 		 * since cpu will speculate within index_mask limits
108b2157399SAlexei Starovoitov 		 */
109b2157399SAlexei Starovoitov 		max_entries = index_mask + 1;
110bbeb6e43SDaniel Borkmann 		/* Check for overflows. */
111bbeb6e43SDaniel Borkmann 		if (max_entries < attr->max_entries)
112bbeb6e43SDaniel Borkmann 			return ERR_PTR(-E2BIG);
113bbeb6e43SDaniel Borkmann 	}
114b2157399SAlexei Starovoitov 
115a10423b8SAlexei Starovoitov 	array_size = sizeof(*array);
116fc970227SAndrii Nakryiko 	if (percpu) {
117b2157399SAlexei Starovoitov 		array_size += (u64) max_entries * sizeof(void *);
118fc970227SAndrii Nakryiko 	} else {
119fc970227SAndrii Nakryiko 		/* rely on vmalloc() to return page-aligned memory and
120fc970227SAndrii Nakryiko 		 * ensure array->value is exactly page-aligned
121fc970227SAndrii Nakryiko 		 */
122fc970227SAndrii Nakryiko 		if (attr->map_flags & BPF_F_MMAPABLE) {
123fc970227SAndrii Nakryiko 			array_size = PAGE_ALIGN(array_size);
124fc970227SAndrii Nakryiko 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125fc970227SAndrii Nakryiko 		} else {
126b2157399SAlexei Starovoitov 			array_size += (u64) max_entries * elem_size;
127fc970227SAndrii Nakryiko 		}
128fc970227SAndrii Nakryiko 	}
129a10423b8SAlexei Starovoitov 
13028fbcfa0SAlexei Starovoitov 	/* allocate all map elements and zero-initialize them */
131fc970227SAndrii Nakryiko 	if (attr->map_flags & BPF_F_MMAPABLE) {
132fc970227SAndrii Nakryiko 		void *data;
133fc970227SAndrii Nakryiko 
134fc970227SAndrii Nakryiko 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135fc970227SAndrii Nakryiko 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
1361bc59756SRoman Gushchin 		if (!data)
137fc970227SAndrii Nakryiko 			return ERR_PTR(-ENOMEM);
138fc970227SAndrii Nakryiko 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
139fc970227SAndrii Nakryiko 			- offsetof(struct bpf_array, value);
140fc970227SAndrii Nakryiko 	} else {
14196eabe7aSMartin KaFai Lau 		array = bpf_map_area_alloc(array_size, numa_node);
142fc970227SAndrii Nakryiko 	}
1431bc59756SRoman Gushchin 	if (!array)
14428fbcfa0SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
145b2157399SAlexei Starovoitov 	array->index_mask = index_mask;
1462c78ee89SAlexei Starovoitov 	array->map.bypass_spec_v1 = bypass_spec_v1;
14728fbcfa0SAlexei Starovoitov 
14828fbcfa0SAlexei Starovoitov 	/* copy mandatory map attributes */
14932852649SJakub Kicinski 	bpf_map_init_from_attr(&array->map, attr);
15028fbcfa0SAlexei Starovoitov 	array->elem_size = elem_size;
15128fbcfa0SAlexei Starovoitov 
1529c2d63b8SDaniel Borkmann 	if (percpu && bpf_array_alloc_percpu(array)) {
153d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
154a10423b8SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
155a10423b8SAlexei Starovoitov 	}
156a10423b8SAlexei Starovoitov 
15728fbcfa0SAlexei Starovoitov 	return &array->map;
15828fbcfa0SAlexei Starovoitov }
15928fbcfa0SAlexei Starovoitov 
16087ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
16187ac0d60SAndrii Nakryiko {
16287ac0d60SAndrii Nakryiko 	return array->value + (u64)array->elem_size * index;
16387ac0d60SAndrii Nakryiko }
16487ac0d60SAndrii Nakryiko 
16528fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
16628fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key)
16728fbcfa0SAlexei Starovoitov {
16828fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
16928fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
17028fbcfa0SAlexei Starovoitov 
171a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
17228fbcfa0SAlexei Starovoitov 		return NULL;
17328fbcfa0SAlexei Starovoitov 
17487ac0d60SAndrii Nakryiko 	return array->value + (u64)array->elem_size * (index & array->index_mask);
17528fbcfa0SAlexei Starovoitov }
17628fbcfa0SAlexei Starovoitov 
177d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
178d8eca5bbSDaniel Borkmann 				       u32 off)
179d8eca5bbSDaniel Borkmann {
180d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
181d8eca5bbSDaniel Borkmann 
182d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
183d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
184d8eca5bbSDaniel Borkmann 	if (off >= map->value_size)
185d8eca5bbSDaniel Borkmann 		return -EINVAL;
186d8eca5bbSDaniel Borkmann 
187d8eca5bbSDaniel Borkmann 	*imm = (unsigned long)array->value;
188d8eca5bbSDaniel Borkmann 	return 0;
189d8eca5bbSDaniel Borkmann }
190d8eca5bbSDaniel Borkmann 
191d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
192d8eca5bbSDaniel Borkmann 				       u32 *off)
193d8eca5bbSDaniel Borkmann {
194d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
195d8eca5bbSDaniel Borkmann 	u64 base = (unsigned long)array->value;
196d8eca5bbSDaniel Borkmann 	u64 range = array->elem_size;
197d8eca5bbSDaniel Borkmann 
198d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
199d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
200d8eca5bbSDaniel Borkmann 	if (imm < base || imm >= base + range)
201d8eca5bbSDaniel Borkmann 		return -ENOENT;
202d8eca5bbSDaniel Borkmann 
203d8eca5bbSDaniel Borkmann 	*off = imm - base;
204d8eca5bbSDaniel Borkmann 	return 0;
205d8eca5bbSDaniel Borkmann }
206d8eca5bbSDaniel Borkmann 
20781ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
2084a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
20981ed18abSAlexei Starovoitov {
210b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
21181ed18abSAlexei Starovoitov 	struct bpf_insn *insn = insn_buf;
212d937bc34SAndrii Nakryiko 	u32 elem_size = array->elem_size;
21381ed18abSAlexei Starovoitov 	const int ret = BPF_REG_0;
21481ed18abSAlexei Starovoitov 	const int map_ptr = BPF_REG_1;
21581ed18abSAlexei Starovoitov 	const int index = BPF_REG_2;
21681ed18abSAlexei Starovoitov 
2174a8f87e6SDaniel Borkmann 	if (map->map_flags & BPF_F_INNER_MAP)
2184a8f87e6SDaniel Borkmann 		return -EOPNOTSUPP;
2194a8f87e6SDaniel Borkmann 
22081ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
22181ed18abSAlexei Starovoitov 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2222c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
223b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225b2157399SAlexei Starovoitov 	} else {
226fad73a1aSMartin KaFai Lau 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227b2157399SAlexei Starovoitov 	}
228fad73a1aSMartin KaFai Lau 
229fad73a1aSMartin KaFai Lau 	if (is_power_of_2(elem_size)) {
23081ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
23181ed18abSAlexei Starovoitov 	} else {
23281ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
23381ed18abSAlexei Starovoitov 	}
23481ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
23581ed18abSAlexei Starovoitov 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
23681ed18abSAlexei Starovoitov 	*insn++ = BPF_MOV64_IMM(ret, 0);
23781ed18abSAlexei Starovoitov 	return insn - insn_buf;
23881ed18abSAlexei Starovoitov }
23981ed18abSAlexei Starovoitov 
240a10423b8SAlexei Starovoitov /* Called from eBPF program */
241a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242a10423b8SAlexei Starovoitov {
243a10423b8SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
244a10423b8SAlexei Starovoitov 	u32 index = *(u32 *)key;
245a10423b8SAlexei Starovoitov 
246a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
247a10423b8SAlexei Starovoitov 		return NULL;
248a10423b8SAlexei Starovoitov 
249b2157399SAlexei Starovoitov 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250a10423b8SAlexei Starovoitov }
251a10423b8SAlexei Starovoitov 
252db69718bSAndrii Nakryiko /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
253db69718bSAndrii Nakryiko static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
254db69718bSAndrii Nakryiko {
255db69718bSAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
256db69718bSAndrii Nakryiko 	struct bpf_insn *insn = insn_buf;
257db69718bSAndrii Nakryiko 
258db69718bSAndrii Nakryiko 	if (!bpf_jit_supports_percpu_insn())
259db69718bSAndrii Nakryiko 		return -EOPNOTSUPP;
260db69718bSAndrii Nakryiko 
261db69718bSAndrii Nakryiko 	if (map->map_flags & BPF_F_INNER_MAP)
262db69718bSAndrii Nakryiko 		return -EOPNOTSUPP;
263db69718bSAndrii Nakryiko 
264db69718bSAndrii Nakryiko 	BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
265db69718bSAndrii Nakryiko 	*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
266db69718bSAndrii Nakryiko 
267db69718bSAndrii Nakryiko 	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
268db69718bSAndrii Nakryiko 	if (!map->bypass_spec_v1) {
269db69718bSAndrii Nakryiko 		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
270db69718bSAndrii Nakryiko 		*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
271db69718bSAndrii Nakryiko 	} else {
272db69718bSAndrii Nakryiko 		*insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
273db69718bSAndrii Nakryiko 	}
274db69718bSAndrii Nakryiko 
275db69718bSAndrii Nakryiko 	*insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
276db69718bSAndrii Nakryiko 	*insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
277db69718bSAndrii Nakryiko 	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
278db69718bSAndrii Nakryiko 	*insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
279db69718bSAndrii Nakryiko 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
280db69718bSAndrii Nakryiko 	*insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
281db69718bSAndrii Nakryiko 	return insn - insn_buf;
282db69718bSAndrii Nakryiko }
283db69718bSAndrii Nakryiko 
28407343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
28507343110SFeng Zhou {
28607343110SFeng Zhou 	struct bpf_array *array = container_of(map, struct bpf_array, map);
28707343110SFeng Zhou 	u32 index = *(u32 *)key;
28807343110SFeng Zhou 
28907343110SFeng Zhou 	if (cpu >= nr_cpu_ids)
29007343110SFeng Zhou 		return NULL;
29107343110SFeng Zhou 
29207343110SFeng Zhou 	if (unlikely(index >= array->map.max_entries))
29307343110SFeng Zhou 		return NULL;
29407343110SFeng Zhou 
29507343110SFeng Zhou 	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
29607343110SFeng Zhou }
29707343110SFeng Zhou 
29815a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
29915a07b33SAlexei Starovoitov {
30015a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
30115a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
30215a07b33SAlexei Starovoitov 	void __percpu *pptr;
30315a07b33SAlexei Starovoitov 	int cpu, off = 0;
30415a07b33SAlexei Starovoitov 	u32 size;
30515a07b33SAlexei Starovoitov 
30615a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
30715a07b33SAlexei Starovoitov 		return -ENOENT;
30815a07b33SAlexei Starovoitov 
30915a07b33SAlexei Starovoitov 	/* per_cpu areas are zero-filled and bpf programs can only
31015a07b33SAlexei Starovoitov 	 * access 'value_size' of them, so copying rounded areas
31115a07b33SAlexei Starovoitov 	 * will not leak any kernel data
31215a07b33SAlexei Starovoitov 	 */
313d937bc34SAndrii Nakryiko 	size = array->elem_size;
31415a07b33SAlexei Starovoitov 	rcu_read_lock();
315b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
31615a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
3176df4ea1fSKumar Kartikeya Dwivedi 		copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
3186df4ea1fSKumar Kartikeya Dwivedi 		check_and_init_map_value(map, value + off);
31915a07b33SAlexei Starovoitov 		off += size;
32015a07b33SAlexei Starovoitov 	}
32115a07b33SAlexei Starovoitov 	rcu_read_unlock();
32215a07b33SAlexei Starovoitov 	return 0;
32315a07b33SAlexei Starovoitov }
32415a07b33SAlexei Starovoitov 
32528fbcfa0SAlexei Starovoitov /* Called from syscall */
32628fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
32728fbcfa0SAlexei Starovoitov {
32828fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
3298fe45924STeng Qin 	u32 index = key ? *(u32 *)key : U32_MAX;
33028fbcfa0SAlexei Starovoitov 	u32 *next = (u32 *)next_key;
33128fbcfa0SAlexei Starovoitov 
33228fbcfa0SAlexei Starovoitov 	if (index >= array->map.max_entries) {
33328fbcfa0SAlexei Starovoitov 		*next = 0;
33428fbcfa0SAlexei Starovoitov 		return 0;
33528fbcfa0SAlexei Starovoitov 	}
33628fbcfa0SAlexei Starovoitov 
33728fbcfa0SAlexei Starovoitov 	if (index == array->map.max_entries - 1)
33828fbcfa0SAlexei Starovoitov 		return -ENOENT;
33928fbcfa0SAlexei Starovoitov 
34028fbcfa0SAlexei Starovoitov 	*next = index + 1;
34128fbcfa0SAlexei Starovoitov 	return 0;
34228fbcfa0SAlexei Starovoitov }
34328fbcfa0SAlexei Starovoitov 
34428fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
345d7ba4cc9SJP Kobryn static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
34628fbcfa0SAlexei Starovoitov 				  u64 map_flags)
34728fbcfa0SAlexei Starovoitov {
34828fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
34928fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
35096049f3aSAlexei Starovoitov 	char *val;
35128fbcfa0SAlexei Starovoitov 
35296049f3aSAlexei Starovoitov 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
35328fbcfa0SAlexei Starovoitov 		/* unknown flags */
35428fbcfa0SAlexei Starovoitov 		return -EINVAL;
35528fbcfa0SAlexei Starovoitov 
356a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
35728fbcfa0SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
35828fbcfa0SAlexei Starovoitov 		return -E2BIG;
35928fbcfa0SAlexei Starovoitov 
36096049f3aSAlexei Starovoitov 	if (unlikely(map_flags & BPF_NOEXIST))
361daaf427cSAlexei Starovoitov 		/* all elements already exist */
36228fbcfa0SAlexei Starovoitov 		return -EEXIST;
36328fbcfa0SAlexei Starovoitov 
36496049f3aSAlexei Starovoitov 	if (unlikely((map_flags & BPF_F_LOCK) &&
365db559117SKumar Kartikeya Dwivedi 		     !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
36696049f3aSAlexei Starovoitov 		return -EINVAL;
36796049f3aSAlexei Starovoitov 
36896049f3aSAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
3696df4ea1fSKumar Kartikeya Dwivedi 		val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
3706df4ea1fSKumar Kartikeya Dwivedi 		copy_map_value(map, val, value);
371db559117SKumar Kartikeya Dwivedi 		bpf_obj_free_fields(array->map.record, val);
37296049f3aSAlexei Starovoitov 	} else {
37396049f3aSAlexei Starovoitov 		val = array->value +
37487ac0d60SAndrii Nakryiko 			(u64)array->elem_size * (index & array->index_mask);
37596049f3aSAlexei Starovoitov 		if (map_flags & BPF_F_LOCK)
37696049f3aSAlexei Starovoitov 			copy_map_value_locked(map, val, value, false);
377a10423b8SAlexei Starovoitov 		else
37896049f3aSAlexei Starovoitov 			copy_map_value(map, val, value);
379db559117SKumar Kartikeya Dwivedi 		bpf_obj_free_fields(array->map.record, val);
38096049f3aSAlexei Starovoitov 	}
38128fbcfa0SAlexei Starovoitov 	return 0;
38228fbcfa0SAlexei Starovoitov }
38328fbcfa0SAlexei Starovoitov 
38415a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
38515a07b33SAlexei Starovoitov 			    u64 map_flags)
38615a07b33SAlexei Starovoitov {
38715a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
38815a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
38915a07b33SAlexei Starovoitov 	void __percpu *pptr;
39015a07b33SAlexei Starovoitov 	int cpu, off = 0;
39115a07b33SAlexei Starovoitov 	u32 size;
39215a07b33SAlexei Starovoitov 
39315a07b33SAlexei Starovoitov 	if (unlikely(map_flags > BPF_EXIST))
39415a07b33SAlexei Starovoitov 		/* unknown flags */
39515a07b33SAlexei Starovoitov 		return -EINVAL;
39615a07b33SAlexei Starovoitov 
39715a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
39815a07b33SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
39915a07b33SAlexei Starovoitov 		return -E2BIG;
40015a07b33SAlexei Starovoitov 
40115a07b33SAlexei Starovoitov 	if (unlikely(map_flags == BPF_NOEXIST))
40215a07b33SAlexei Starovoitov 		/* all elements already exist */
40315a07b33SAlexei Starovoitov 		return -EEXIST;
40415a07b33SAlexei Starovoitov 
40515a07b33SAlexei Starovoitov 	/* the user space will provide round_up(value_size, 8) bytes that
40615a07b33SAlexei Starovoitov 	 * will be copied into per-cpu area. bpf programs can only access
40715a07b33SAlexei Starovoitov 	 * value_size of it. During lookup the same extra bytes will be
40815a07b33SAlexei Starovoitov 	 * returned or zeros which were zero-filled by percpu_alloc,
40915a07b33SAlexei Starovoitov 	 * so no kernel data leaks possible
41015a07b33SAlexei Starovoitov 	 */
411d937bc34SAndrii Nakryiko 	size = array->elem_size;
41215a07b33SAlexei Starovoitov 	rcu_read_lock();
413b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
41415a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
4156df4ea1fSKumar Kartikeya Dwivedi 		copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
416db559117SKumar Kartikeya Dwivedi 		bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
41715a07b33SAlexei Starovoitov 		off += size;
41815a07b33SAlexei Starovoitov 	}
41915a07b33SAlexei Starovoitov 	rcu_read_unlock();
42015a07b33SAlexei Starovoitov 	return 0;
42115a07b33SAlexei Starovoitov }
42215a07b33SAlexei Starovoitov 
42328fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
424d7ba4cc9SJP Kobryn static long array_map_delete_elem(struct bpf_map *map, void *key)
42528fbcfa0SAlexei Starovoitov {
42628fbcfa0SAlexei Starovoitov 	return -EINVAL;
42728fbcfa0SAlexei Starovoitov }
42828fbcfa0SAlexei Starovoitov 
429fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array)
430fc970227SAndrii Nakryiko {
431fc970227SAndrii Nakryiko 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
432fc970227SAndrii Nakryiko }
433fc970227SAndrii Nakryiko 
434246331e3SBenjamin Tissoires static void array_map_free_timers_wq(struct bpf_map *map)
43568134668SAlexei Starovoitov {
43668134668SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
43768134668SAlexei Starovoitov 	int i;
43868134668SAlexei Starovoitov 
439246331e3SBenjamin Tissoires 	/* We don't reset or free fields other than timer and workqueue
440246331e3SBenjamin Tissoires 	 * on uref dropping to zero.
441246331e3SBenjamin Tissoires 	 */
442b98a5c68SBenjamin Tissoires 	if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) {
443b98a5c68SBenjamin Tissoires 		for (i = 0; i < array->map.max_entries; i++) {
444246331e3SBenjamin Tissoires 			if (btf_record_has_field(map->record, BPF_TIMER))
445db559117SKumar Kartikeya Dwivedi 				bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
446246331e3SBenjamin Tissoires 			if (btf_record_has_field(map->record, BPF_WORKQUEUE))
447246331e3SBenjamin Tissoires 				bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
44868134668SAlexei Starovoitov 		}
449b98a5c68SBenjamin Tissoires 	}
450b98a5c68SBenjamin Tissoires }
45168134668SAlexei Starovoitov 
45228fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
45328fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map)
45428fbcfa0SAlexei Starovoitov {
45528fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
45614a324f6SKumar Kartikeya Dwivedi 	int i;
45714a324f6SKumar Kartikeya Dwivedi 
458aa3496acSKumar Kartikeya Dwivedi 	if (!IS_ERR_OR_NULL(map->record)) {
4596df4ea1fSKumar Kartikeya Dwivedi 		if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4606df4ea1fSKumar Kartikeya Dwivedi 			for (i = 0; i < array->map.max_entries; i++) {
4616df4ea1fSKumar Kartikeya Dwivedi 				void __percpu *pptr = array->pptrs[i & array->index_mask];
4626df4ea1fSKumar Kartikeya Dwivedi 				int cpu;
4636df4ea1fSKumar Kartikeya Dwivedi 
4646df4ea1fSKumar Kartikeya Dwivedi 				for_each_possible_cpu(cpu) {
465aa3496acSKumar Kartikeya Dwivedi 					bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
4666df4ea1fSKumar Kartikeya Dwivedi 					cond_resched();
4676df4ea1fSKumar Kartikeya Dwivedi 				}
4686df4ea1fSKumar Kartikeya Dwivedi 			}
4696df4ea1fSKumar Kartikeya Dwivedi 		} else {
47014a324f6SKumar Kartikeya Dwivedi 			for (i = 0; i < array->map.max_entries; i++)
471aa3496acSKumar Kartikeya Dwivedi 				bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
4726df4ea1fSKumar Kartikeya Dwivedi 		}
47314a324f6SKumar Kartikeya Dwivedi 	}
47428fbcfa0SAlexei Starovoitov 
475a10423b8SAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
476a10423b8SAlexei Starovoitov 		bpf_array_free_percpu(array);
477a10423b8SAlexei Starovoitov 
478fc970227SAndrii Nakryiko 	if (array->map.map_flags & BPF_F_MMAPABLE)
479fc970227SAndrii Nakryiko 		bpf_map_area_free(array_map_vmalloc_addr(array));
480fc970227SAndrii Nakryiko 	else
481d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
48228fbcfa0SAlexei Starovoitov }
48328fbcfa0SAlexei Starovoitov 
484a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key,
485a26ca7c9SMartin KaFai Lau 				    struct seq_file *m)
486a26ca7c9SMartin KaFai Lau {
487a26ca7c9SMartin KaFai Lau 	void *value;
488a26ca7c9SMartin KaFai Lau 
489a26ca7c9SMartin KaFai Lau 	rcu_read_lock();
490a26ca7c9SMartin KaFai Lau 
491a26ca7c9SMartin KaFai Lau 	value = array_map_lookup_elem(map, key);
492a26ca7c9SMartin KaFai Lau 	if (!value) {
493a26ca7c9SMartin KaFai Lau 		rcu_read_unlock();
494a26ca7c9SMartin KaFai Lau 		return;
495a26ca7c9SMartin KaFai Lau 	}
496a26ca7c9SMartin KaFai Lau 
4972824ecb7SDaniel Borkmann 	if (map->btf_key_type_id)
498a26ca7c9SMartin KaFai Lau 		seq_printf(m, "%u: ", *(u32 *)key);
4999b2cf328SMartin KaFai Lau 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
500df862de4SMarkus Elfring 	seq_putc(m, '\n');
501a26ca7c9SMartin KaFai Lau 
502a26ca7c9SMartin KaFai Lau 	rcu_read_unlock();
503a26ca7c9SMartin KaFai Lau }
504a26ca7c9SMartin KaFai Lau 
505c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
506c7b27c37SYonghong Song 					   struct seq_file *m)
507c7b27c37SYonghong Song {
508c7b27c37SYonghong Song 	struct bpf_array *array = container_of(map, struct bpf_array, map);
509c7b27c37SYonghong Song 	u32 index = *(u32 *)key;
510c7b27c37SYonghong Song 	void __percpu *pptr;
511c7b27c37SYonghong Song 	int cpu;
512c7b27c37SYonghong Song 
513c7b27c37SYonghong Song 	rcu_read_lock();
514c7b27c37SYonghong Song 
515c7b27c37SYonghong Song 	seq_printf(m, "%u: {\n", *(u32 *)key);
516c7b27c37SYonghong Song 	pptr = array->pptrs[index & array->index_mask];
517c7b27c37SYonghong Song 	for_each_possible_cpu(cpu) {
518c7b27c37SYonghong Song 		seq_printf(m, "\tcpu%d: ", cpu);
519c7b27c37SYonghong Song 		btf_type_seq_show(map->btf, map->btf_value_type_id,
520c7b27c37SYonghong Song 				  per_cpu_ptr(pptr, cpu), m);
521df862de4SMarkus Elfring 		seq_putc(m, '\n');
522c7b27c37SYonghong Song 	}
523c7b27c37SYonghong Song 	seq_puts(m, "}\n");
524c7b27c37SYonghong Song 
525c7b27c37SYonghong Song 	rcu_read_unlock();
526c7b27c37SYonghong Song }
527c7b27c37SYonghong Song 
528e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map,
5291b2b234bSRoman Gushchin 			       const struct btf *btf,
530e8d2bec0SDaniel Borkmann 			       const struct btf_type *key_type,
531e8d2bec0SDaniel Borkmann 			       const struct btf_type *value_type)
532a26ca7c9SMartin KaFai Lau {
533a26ca7c9SMartin KaFai Lau 	u32 int_data;
534a26ca7c9SMartin KaFai Lau 
5352824ecb7SDaniel Borkmann 	/* One exception for keyless BTF: .bss/.data/.rodata map */
5362824ecb7SDaniel Borkmann 	if (btf_type_is_void(key_type)) {
5372824ecb7SDaniel Borkmann 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
5382824ecb7SDaniel Borkmann 		    map->max_entries != 1)
5392824ecb7SDaniel Borkmann 			return -EINVAL;
5402824ecb7SDaniel Borkmann 
5412824ecb7SDaniel Borkmann 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
5422824ecb7SDaniel Borkmann 			return -EINVAL;
5432824ecb7SDaniel Borkmann 
5442824ecb7SDaniel Borkmann 		return 0;
5452824ecb7SDaniel Borkmann 	}
5462824ecb7SDaniel Borkmann 
547e8d2bec0SDaniel Borkmann 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
548a26ca7c9SMartin KaFai Lau 		return -EINVAL;
549a26ca7c9SMartin KaFai Lau 
550a26ca7c9SMartin KaFai Lau 	int_data = *(u32 *)(key_type + 1);
551e8d2bec0SDaniel Borkmann 	/* bpf array can only take a u32 key. This check makes sure
552e8d2bec0SDaniel Borkmann 	 * that the btf matches the attr used during map_create.
553a26ca7c9SMartin KaFai Lau 	 */
554e8d2bec0SDaniel Borkmann 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
555a26ca7c9SMartin KaFai Lau 		return -EINVAL;
556a26ca7c9SMartin KaFai Lau 
557a26ca7c9SMartin KaFai Lau 	return 0;
558a26ca7c9SMartin KaFai Lau }
559a26ca7c9SMartin KaFai Lau 
560b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
561fc970227SAndrii Nakryiko {
562fc970227SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
563fc970227SAndrii Nakryiko 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
564fc970227SAndrii Nakryiko 
565fc970227SAndrii Nakryiko 	if (!(map->map_flags & BPF_F_MMAPABLE))
566fc970227SAndrii Nakryiko 		return -EINVAL;
567fc970227SAndrii Nakryiko 
568333291ceSAndrii Nakryiko 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
569333291ceSAndrii Nakryiko 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
570333291ceSAndrii Nakryiko 		return -EINVAL;
571333291ceSAndrii Nakryiko 
572333291ceSAndrii Nakryiko 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
573333291ceSAndrii Nakryiko 				   vma->vm_pgoff + pgoff);
574fc970227SAndrii Nakryiko }
575fc970227SAndrii Nakryiko 
576134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0,
577134fede4SMartin KaFai Lau 				 const struct bpf_map *meta1)
578134fede4SMartin KaFai Lau {
5794a8f87e6SDaniel Borkmann 	if (!bpf_map_meta_equal(meta0, meta1))
5804a8f87e6SDaniel Borkmann 		return false;
5814a8f87e6SDaniel Borkmann 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
5824a8f87e6SDaniel Borkmann 	       meta0->max_entries == meta1->max_entries;
583134fede4SMartin KaFai Lau }
584134fede4SMartin KaFai Lau 
585d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info {
586d3cc2ab5SYonghong Song 	struct bpf_map *map;
587d3cc2ab5SYonghong Song 	void *percpu_value_buf;
588d3cc2ab5SYonghong Song 	u32 index;
589d3cc2ab5SYonghong Song };
590d3cc2ab5SYonghong Song 
591d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
592d3cc2ab5SYonghong Song {
593d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
594d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
595d3cc2ab5SYonghong Song 	struct bpf_array *array;
596d3cc2ab5SYonghong Song 	u32 index;
597d3cc2ab5SYonghong Song 
598d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
599d3cc2ab5SYonghong Song 		return NULL;
600d3cc2ab5SYonghong Song 
601d3cc2ab5SYonghong Song 	if (*pos == 0)
602d3cc2ab5SYonghong Song 		++*pos;
603d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
604d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
605d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
6066d641ca5SUros Bizjak 		return (void *)(uintptr_t)array->pptrs[index];
60787ac0d60SAndrii Nakryiko 	return array_map_elem_ptr(array, index);
608d3cc2ab5SYonghong Song }
609d3cc2ab5SYonghong Song 
610d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
611d3cc2ab5SYonghong Song {
612d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
613d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
614d3cc2ab5SYonghong Song 	struct bpf_array *array;
615d3cc2ab5SYonghong Song 	u32 index;
616d3cc2ab5SYonghong Song 
617d3cc2ab5SYonghong Song 	++*pos;
618d3cc2ab5SYonghong Song 	++info->index;
619d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
620d3cc2ab5SYonghong Song 		return NULL;
621d3cc2ab5SYonghong Song 
622d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
623d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
624d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
6256d641ca5SUros Bizjak 		return (void *)(uintptr_t)array->pptrs[index];
62687ac0d60SAndrii Nakryiko 	return array_map_elem_ptr(array, index);
627d3cc2ab5SYonghong Song }
628d3cc2ab5SYonghong Song 
629d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
630d3cc2ab5SYonghong Song {
631d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
632d3cc2ab5SYonghong Song 	struct bpf_iter__bpf_map_elem ctx = {};
633d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
634d937bc34SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
635d3cc2ab5SYonghong Song 	struct bpf_iter_meta meta;
636d3cc2ab5SYonghong Song 	struct bpf_prog *prog;
637d3cc2ab5SYonghong Song 	int off = 0, cpu = 0;
6386d641ca5SUros Bizjak 	void __percpu *pptr;
639d3cc2ab5SYonghong Song 	u32 size;
640d3cc2ab5SYonghong Song 
641d3cc2ab5SYonghong Song 	meta.seq = seq;
642d3cc2ab5SYonghong Song 	prog = bpf_iter_get_info(&meta, v == NULL);
643d3cc2ab5SYonghong Song 	if (!prog)
644d3cc2ab5SYonghong Song 		return 0;
645d3cc2ab5SYonghong Song 
646d3cc2ab5SYonghong Song 	ctx.meta = &meta;
647d3cc2ab5SYonghong Song 	ctx.map = info->map;
648d3cc2ab5SYonghong Song 	if (v) {
649d3cc2ab5SYonghong Song 		ctx.key = &info->index;
650d3cc2ab5SYonghong Song 
651d3cc2ab5SYonghong Song 		if (!info->percpu_value_buf) {
652d3cc2ab5SYonghong Song 			ctx.value = v;
653d3cc2ab5SYonghong Song 		} else {
6546d641ca5SUros Bizjak 			pptr = (void __percpu *)(uintptr_t)v;
655d937bc34SAndrii Nakryiko 			size = array->elem_size;
656d3cc2ab5SYonghong Song 			for_each_possible_cpu(cpu) {
6576df4ea1fSKumar Kartikeya Dwivedi 				copy_map_value_long(map, info->percpu_value_buf + off,
6586df4ea1fSKumar Kartikeya Dwivedi 						    per_cpu_ptr(pptr, cpu));
6596df4ea1fSKumar Kartikeya Dwivedi 				check_and_init_map_value(map, info->percpu_value_buf + off);
660d3cc2ab5SYonghong Song 				off += size;
661d3cc2ab5SYonghong Song 			}
662d3cc2ab5SYonghong Song 			ctx.value = info->percpu_value_buf;
663d3cc2ab5SYonghong Song 		}
664d3cc2ab5SYonghong Song 	}
665d3cc2ab5SYonghong Song 
666d3cc2ab5SYonghong Song 	return bpf_iter_run_prog(prog, &ctx);
667d3cc2ab5SYonghong Song }
668d3cc2ab5SYonghong Song 
669d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
670d3cc2ab5SYonghong Song {
671d3cc2ab5SYonghong Song 	return __bpf_array_map_seq_show(seq, v);
672d3cc2ab5SYonghong Song }
673d3cc2ab5SYonghong Song 
674d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
675d3cc2ab5SYonghong Song {
676d3cc2ab5SYonghong Song 	if (!v)
677d3cc2ab5SYonghong Song 		(void)__bpf_array_map_seq_show(seq, NULL);
678d3cc2ab5SYonghong Song }
679d3cc2ab5SYonghong Song 
680d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data,
681d3cc2ab5SYonghong Song 				   struct bpf_iter_aux_info *aux)
682d3cc2ab5SYonghong Song {
683d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
684d3cc2ab5SYonghong Song 	struct bpf_map *map = aux->map;
685d937bc34SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
686d3cc2ab5SYonghong Song 	void *value_buf;
687d3cc2ab5SYonghong Song 	u32 buf_size;
688d3cc2ab5SYonghong Song 
689d3cc2ab5SYonghong Song 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
690d937bc34SAndrii Nakryiko 		buf_size = array->elem_size * num_possible_cpus();
691d3cc2ab5SYonghong Song 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
692d3cc2ab5SYonghong Song 		if (!value_buf)
693d3cc2ab5SYonghong Song 			return -ENOMEM;
694d3cc2ab5SYonghong Song 
695d3cc2ab5SYonghong Song 		seq_info->percpu_value_buf = value_buf;
696d3cc2ab5SYonghong Song 	}
697d3cc2ab5SYonghong Song 
698f76fa6b3SHou Tao 	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
699f76fa6b3SHou Tao 	 * released before or in the middle of iterating map elements, so
700f76fa6b3SHou Tao 	 * acquire an extra map uref for iterator.
701f76fa6b3SHou Tao 	 */
702f76fa6b3SHou Tao 	bpf_map_inc_with_uref(map);
703d3cc2ab5SYonghong Song 	seq_info->map = map;
704d3cc2ab5SYonghong Song 	return 0;
705d3cc2ab5SYonghong Song }
706d3cc2ab5SYonghong Song 
707d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data)
708d3cc2ab5SYonghong Song {
709d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
710d3cc2ab5SYonghong Song 
711f76fa6b3SHou Tao 	bpf_map_put_with_uref(seq_info->map);
712d3cc2ab5SYonghong Song 	kfree(seq_info->percpu_value_buf);
713d3cc2ab5SYonghong Song }
714d3cc2ab5SYonghong Song 
715d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = {
716d3cc2ab5SYonghong Song 	.start	= bpf_array_map_seq_start,
717d3cc2ab5SYonghong Song 	.next	= bpf_array_map_seq_next,
718d3cc2ab5SYonghong Song 	.stop	= bpf_array_map_seq_stop,
719d3cc2ab5SYonghong Song 	.show	= bpf_array_map_seq_show,
720d3cc2ab5SYonghong Song };
721d3cc2ab5SYonghong Song 
722d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = {
723d3cc2ab5SYonghong Song 	.seq_ops		= &bpf_array_map_seq_ops,
724d3cc2ab5SYonghong Song 	.init_seq_private	= bpf_iter_init_array_map,
725d3cc2ab5SYonghong Song 	.fini_seq_private	= bpf_iter_fini_array_map,
726d3cc2ab5SYonghong Song 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
727d3cc2ab5SYonghong Song };
728d3cc2ab5SYonghong Song 
729d7ba4cc9SJP Kobryn static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
73006dcdcd4SYonghong Song 				    void *callback_ctx, u64 flags)
73106dcdcd4SYonghong Song {
73206dcdcd4SYonghong Song 	u32 i, key, num_elems = 0;
73306dcdcd4SYonghong Song 	struct bpf_array *array;
73406dcdcd4SYonghong Song 	bool is_percpu;
73506dcdcd4SYonghong Song 	u64 ret = 0;
73606dcdcd4SYonghong Song 	void *val;
73706dcdcd4SYonghong Song 
73806dcdcd4SYonghong Song 	if (flags != 0)
73906dcdcd4SYonghong Song 		return -EINVAL;
74006dcdcd4SYonghong Song 
74106dcdcd4SYonghong Song 	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
74206dcdcd4SYonghong Song 	array = container_of(map, struct bpf_array, map);
74306dcdcd4SYonghong Song 	if (is_percpu)
74406dcdcd4SYonghong Song 		migrate_disable();
74506dcdcd4SYonghong Song 	for (i = 0; i < map->max_entries; i++) {
74606dcdcd4SYonghong Song 		if (is_percpu)
74706dcdcd4SYonghong Song 			val = this_cpu_ptr(array->pptrs[i]);
74806dcdcd4SYonghong Song 		else
74987ac0d60SAndrii Nakryiko 			val = array_map_elem_ptr(array, i);
75006dcdcd4SYonghong Song 		num_elems++;
75106dcdcd4SYonghong Song 		key = i;
752102acbacSKees Cook 		ret = callback_fn((u64)(long)map, (u64)(long)&key,
753102acbacSKees Cook 				  (u64)(long)val, (u64)(long)callback_ctx, 0);
75406dcdcd4SYonghong Song 		/* return value: 0 - continue, 1 - stop and return */
75506dcdcd4SYonghong Song 		if (ret)
75606dcdcd4SYonghong Song 			break;
75706dcdcd4SYonghong Song 	}
75806dcdcd4SYonghong Song 
75906dcdcd4SYonghong Song 	if (is_percpu)
76006dcdcd4SYonghong Song 		migrate_enable();
76106dcdcd4SYonghong Song 	return num_elems;
76206dcdcd4SYonghong Song }
76306dcdcd4SYonghong Song 
7641746d055SYafang Shao static u64 array_map_mem_usage(const struct bpf_map *map)
7651746d055SYafang Shao {
7661746d055SYafang Shao 	struct bpf_array *array = container_of(map, struct bpf_array, map);
7671746d055SYafang Shao 	bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
7681746d055SYafang Shao 	u32 elem_size = array->elem_size;
7691746d055SYafang Shao 	u64 entries = map->max_entries;
7701746d055SYafang Shao 	u64 usage = sizeof(*array);
7711746d055SYafang Shao 
7721746d055SYafang Shao 	if (percpu) {
7731746d055SYafang Shao 		usage += entries * sizeof(void *);
7741746d055SYafang Shao 		usage += entries * elem_size * num_possible_cpus();
7751746d055SYafang Shao 	} else {
7761746d055SYafang Shao 		if (map->map_flags & BPF_F_MMAPABLE) {
7771746d055SYafang Shao 			usage = PAGE_ALIGN(usage);
7781746d055SYafang Shao 			usage += PAGE_ALIGN(entries * elem_size);
7791746d055SYafang Shao 		} else {
7801746d055SYafang Shao 			usage += entries * elem_size;
7811746d055SYafang Shao 		}
7821746d055SYafang Shao 	}
7831746d055SYafang Shao 	return usage;
7841746d055SYafang Shao }
7851746d055SYafang Shao 
786c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
78740077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = {
788134fede4SMartin KaFai Lau 	.map_meta_equal = array_map_meta_equal,
789ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
79028fbcfa0SAlexei Starovoitov 	.map_alloc = array_map_alloc,
79128fbcfa0SAlexei Starovoitov 	.map_free = array_map_free,
79228fbcfa0SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
793246331e3SBenjamin Tissoires 	.map_release_uref = array_map_free_timers_wq,
79428fbcfa0SAlexei Starovoitov 	.map_lookup_elem = array_map_lookup_elem,
79528fbcfa0SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
79628fbcfa0SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
79781ed18abSAlexei Starovoitov 	.map_gen_lookup = array_map_gen_lookup,
798d8eca5bbSDaniel Borkmann 	.map_direct_value_addr = array_map_direct_value_addr,
799d8eca5bbSDaniel Borkmann 	.map_direct_value_meta = array_map_direct_value_meta,
800fc970227SAndrii Nakryiko 	.map_mmap = array_map_mmap,
801a26ca7c9SMartin KaFai Lau 	.map_seq_show_elem = array_map_seq_show_elem,
802a26ca7c9SMartin KaFai Lau 	.map_check_btf = array_map_check_btf,
803c60f2d28SBrian Vazquez 	.map_lookup_batch = generic_map_lookup_batch,
804c60f2d28SBrian Vazquez 	.map_update_batch = generic_map_update_batch,
80506dcdcd4SYonghong Song 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
80606dcdcd4SYonghong Song 	.map_for_each_callback = bpf_for_each_array_elem,
8071746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
808c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
809d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
81028fbcfa0SAlexei Starovoitov };
81128fbcfa0SAlexei Starovoitov 
81240077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = {
813f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
814ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
815a10423b8SAlexei Starovoitov 	.map_alloc = array_map_alloc,
816a10423b8SAlexei Starovoitov 	.map_free = array_map_free,
817a10423b8SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
818a10423b8SAlexei Starovoitov 	.map_lookup_elem = percpu_array_map_lookup_elem,
819db69718bSAndrii Nakryiko 	.map_gen_lookup = percpu_array_map_gen_lookup,
820a10423b8SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
821a10423b8SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
82207343110SFeng Zhou 	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
823c7b27c37SYonghong Song 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
824e8d2bec0SDaniel Borkmann 	.map_check_btf = array_map_check_btf,
825f008d732SPedro Tammela 	.map_lookup_batch = generic_map_lookup_batch,
826f008d732SPedro Tammela 	.map_update_batch = generic_map_update_batch,
82706dcdcd4SYonghong Song 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
82806dcdcd4SYonghong Song 	.map_for_each_callback = bpf_for_each_array_elem,
8291746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
830c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
831d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
832a10423b8SAlexei Starovoitov };
833a10423b8SAlexei Starovoitov 
834ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr)
83504fd61abSAlexei Starovoitov {
8362a36f0b9SWang Nan 	/* only file descriptors can be stored in this type of map */
83704fd61abSAlexei Starovoitov 	if (attr->value_size != sizeof(u32))
838ad46061fSJakub Kicinski 		return -EINVAL;
839591fe988SDaniel Borkmann 	/* Program read-only/write-only not supported for special maps yet. */
840591fe988SDaniel Borkmann 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
841591fe988SDaniel Borkmann 		return -EINVAL;
842ad46061fSJakub Kicinski 	return array_map_alloc_check(attr);
84304fd61abSAlexei Starovoitov }
84404fd61abSAlexei Starovoitov 
8452a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map)
84604fd61abSAlexei Starovoitov {
84704fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
84804fd61abSAlexei Starovoitov 	int i;
84904fd61abSAlexei Starovoitov 
85004fd61abSAlexei Starovoitov 	/* make sure it's empty */
85104fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
8522a36f0b9SWang Nan 		BUG_ON(array->ptrs[i] != NULL);
853d407bd25SDaniel Borkmann 
854d407bd25SDaniel Borkmann 	bpf_map_area_free(array);
85504fd61abSAlexei Starovoitov }
85604fd61abSAlexei Starovoitov 
8572a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
85804fd61abSAlexei Starovoitov {
8593b4a63f6SPrashant Bhole 	return ERR_PTR(-EOPNOTSUPP);
86004fd61abSAlexei Starovoitov }
86104fd61abSAlexei Starovoitov 
86204fd61abSAlexei Starovoitov /* only called from syscall */
86314dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
86414dc6f04SMartin KaFai Lau {
86514dc6f04SMartin KaFai Lau 	void **elem, *ptr;
86614dc6f04SMartin KaFai Lau 	int ret =  0;
86714dc6f04SMartin KaFai Lau 
86814dc6f04SMartin KaFai Lau 	if (!map->ops->map_fd_sys_lookup_elem)
86914dc6f04SMartin KaFai Lau 		return -ENOTSUPP;
87014dc6f04SMartin KaFai Lau 
87114dc6f04SMartin KaFai Lau 	rcu_read_lock();
87214dc6f04SMartin KaFai Lau 	elem = array_map_lookup_elem(map, key);
87314dc6f04SMartin KaFai Lau 	if (elem && (ptr = READ_ONCE(*elem)))
87414dc6f04SMartin KaFai Lau 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
87514dc6f04SMartin KaFai Lau 	else
87614dc6f04SMartin KaFai Lau 		ret = -ENOENT;
87714dc6f04SMartin KaFai Lau 	rcu_read_unlock();
87814dc6f04SMartin KaFai Lau 
87914dc6f04SMartin KaFai Lau 	return ret;
88014dc6f04SMartin KaFai Lau }
88114dc6f04SMartin KaFai Lau 
88214dc6f04SMartin KaFai Lau /* only called from syscall */
883d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
884d056a788SDaniel Borkmann 				 void *key, void *value, u64 map_flags)
88504fd61abSAlexei Starovoitov {
88604fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
8872a36f0b9SWang Nan 	void *new_ptr, *old_ptr;
88804fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key, ufd;
88904fd61abSAlexei Starovoitov 
89004fd61abSAlexei Starovoitov 	if (map_flags != BPF_ANY)
89104fd61abSAlexei Starovoitov 		return -EINVAL;
89204fd61abSAlexei Starovoitov 
89304fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
89404fd61abSAlexei Starovoitov 		return -E2BIG;
89504fd61abSAlexei Starovoitov 
89604fd61abSAlexei Starovoitov 	ufd = *(u32 *)value;
897d056a788SDaniel Borkmann 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
8982a36f0b9SWang Nan 	if (IS_ERR(new_ptr))
8992a36f0b9SWang Nan 		return PTR_ERR(new_ptr);
90004fd61abSAlexei Starovoitov 
901da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
902da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
9032a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, new_ptr);
904da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
905da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
906da765a2fSDaniel Borkmann 	} else {
907da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, new_ptr);
908da765a2fSDaniel Borkmann 	}
909da765a2fSDaniel Borkmann 
9102a36f0b9SWang Nan 	if (old_ptr)
91120c20bd1SHou Tao 		map->ops->map_fd_put_ptr(map, old_ptr, true);
91204fd61abSAlexei Starovoitov 	return 0;
91304fd61abSAlexei Starovoitov }
91404fd61abSAlexei Starovoitov 
91579d93b3cSHou Tao static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
91604fd61abSAlexei Starovoitov {
91704fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
9182a36f0b9SWang Nan 	void *old_ptr;
91904fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key;
92004fd61abSAlexei Starovoitov 
92104fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
92204fd61abSAlexei Starovoitov 		return -E2BIG;
92304fd61abSAlexei Starovoitov 
924da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
925da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
9262a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, NULL);
927da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, NULL);
928da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
929da765a2fSDaniel Borkmann 	} else {
930da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, NULL);
931da765a2fSDaniel Borkmann 	}
932da765a2fSDaniel Borkmann 
9332a36f0b9SWang Nan 	if (old_ptr) {
93479d93b3cSHou Tao 		map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
93504fd61abSAlexei Starovoitov 		return 0;
93604fd61abSAlexei Starovoitov 	} else {
93704fd61abSAlexei Starovoitov 		return -ENOENT;
93804fd61abSAlexei Starovoitov 	}
93904fd61abSAlexei Starovoitov }
94004fd61abSAlexei Starovoitov 
94179d93b3cSHou Tao static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
94279d93b3cSHou Tao {
94379d93b3cSHou Tao 	return __fd_array_map_delete_elem(map, key, true);
94479d93b3cSHou Tao }
94579d93b3cSHou Tao 
946d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map,
947d056a788SDaniel Borkmann 				   struct file *map_file, int fd)
9482a36f0b9SWang Nan {
9492a36f0b9SWang Nan 	struct bpf_prog *prog = bpf_prog_get(fd);
950*d6083f04SLeon Hwang 	bool is_extended;
951d056a788SDaniel Borkmann 
9522a36f0b9SWang Nan 	if (IS_ERR(prog))
9532a36f0b9SWang Nan 		return prog;
9542a36f0b9SWang Nan 
955*d6083f04SLeon Hwang 	if (prog->type == BPF_PROG_TYPE_EXT ||
956*d6083f04SLeon Hwang 	    !bpf_prog_map_compatible(map, prog)) {
9572a36f0b9SWang Nan 		bpf_prog_put(prog);
9582a36f0b9SWang Nan 		return ERR_PTR(-EINVAL);
9592a36f0b9SWang Nan 	}
960d056a788SDaniel Borkmann 
961*d6083f04SLeon Hwang 	mutex_lock(&prog->aux->ext_mutex);
962*d6083f04SLeon Hwang 	is_extended = prog->aux->is_extended;
963*d6083f04SLeon Hwang 	if (!is_extended)
964*d6083f04SLeon Hwang 		prog->aux->prog_array_member_cnt++;
965*d6083f04SLeon Hwang 	mutex_unlock(&prog->aux->ext_mutex);
966*d6083f04SLeon Hwang 	if (is_extended) {
967*d6083f04SLeon Hwang 		/* Extended prog can not be tail callee. It's to prevent a
968*d6083f04SLeon Hwang 		 * potential infinite loop like:
969*d6083f04SLeon Hwang 		 * tail callee prog entry -> tail callee prog subprog ->
970*d6083f04SLeon Hwang 		 * freplace prog entry --tailcall-> tail callee prog entry.
971*d6083f04SLeon Hwang 		 */
972*d6083f04SLeon Hwang 		bpf_prog_put(prog);
973*d6083f04SLeon Hwang 		return ERR_PTR(-EBUSY);
974*d6083f04SLeon Hwang 	}
975*d6083f04SLeon Hwang 
9762a36f0b9SWang Nan 	return prog;
9772a36f0b9SWang Nan }
9782a36f0b9SWang Nan 
97920c20bd1SHou Tao static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
9802a36f0b9SWang Nan {
981*d6083f04SLeon Hwang 	struct bpf_prog *prog = ptr;
982*d6083f04SLeon Hwang 
983*d6083f04SLeon Hwang 	mutex_lock(&prog->aux->ext_mutex);
984*d6083f04SLeon Hwang 	prog->aux->prog_array_member_cnt--;
985*d6083f04SLeon Hwang 	mutex_unlock(&prog->aux->ext_mutex);
98620c20bd1SHou Tao 	/* bpf_prog is freed after one RCU or tasks trace grace period */
987*d6083f04SLeon Hwang 	bpf_prog_put(prog);
9882a36f0b9SWang Nan }
9892a36f0b9SWang Nan 
99014dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr)
99114dc6f04SMartin KaFai Lau {
99214dc6f04SMartin KaFai Lau 	return ((struct bpf_prog *)ptr)->aux->id;
99314dc6f04SMartin KaFai Lau }
99414dc6f04SMartin KaFai Lau 
99504fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */
99679d93b3cSHou Tao static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
99704fd61abSAlexei Starovoitov {
99804fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
99904fd61abSAlexei Starovoitov 	int i;
100004fd61abSAlexei Starovoitov 
100104fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
100279d93b3cSHou Tao 		__fd_array_map_delete_elem(map, &i, need_defer);
100304fd61abSAlexei Starovoitov }
100404fd61abSAlexei Starovoitov 
1005a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
1006a7c19db3SYonghong Song 					 struct seq_file *m)
1007a7c19db3SYonghong Song {
1008a7c19db3SYonghong Song 	void **elem, *ptr;
1009a7c19db3SYonghong Song 	u32 prog_id;
1010a7c19db3SYonghong Song 
1011a7c19db3SYonghong Song 	rcu_read_lock();
1012a7c19db3SYonghong Song 
1013a7c19db3SYonghong Song 	elem = array_map_lookup_elem(map, key);
1014a7c19db3SYonghong Song 	if (elem) {
1015a7c19db3SYonghong Song 		ptr = READ_ONCE(*elem);
1016a7c19db3SYonghong Song 		if (ptr) {
1017a7c19db3SYonghong Song 			seq_printf(m, "%u: ", *(u32 *)key);
1018a7c19db3SYonghong Song 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
1019a7c19db3SYonghong Song 			btf_type_seq_show(map->btf, map->btf_value_type_id,
1020a7c19db3SYonghong Song 					  &prog_id, m);
1021df862de4SMarkus Elfring 			seq_putc(m, '\n');
1022a7c19db3SYonghong Song 		}
1023a7c19db3SYonghong Song 	}
1024a7c19db3SYonghong Song 
1025a7c19db3SYonghong Song 	rcu_read_unlock();
1026a7c19db3SYonghong Song }
1027a7c19db3SYonghong Song 
1028da765a2fSDaniel Borkmann struct prog_poke_elem {
1029da765a2fSDaniel Borkmann 	struct list_head list;
1030da765a2fSDaniel Borkmann 	struct bpf_prog_aux *aux;
1031da765a2fSDaniel Borkmann };
1032da765a2fSDaniel Borkmann 
1033da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map,
1034da765a2fSDaniel Borkmann 				     struct bpf_prog_aux *prog_aux)
1035da765a2fSDaniel Borkmann {
1036da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
1037da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
1038da765a2fSDaniel Borkmann 	int ret = 0;
1039da765a2fSDaniel Borkmann 
1040da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1041da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
1042da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
1043da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux)
1044da765a2fSDaniel Borkmann 			goto out;
1045da765a2fSDaniel Borkmann 	}
1046da765a2fSDaniel Borkmann 
1047da765a2fSDaniel Borkmann 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
1048da765a2fSDaniel Borkmann 	if (!elem) {
1049da765a2fSDaniel Borkmann 		ret = -ENOMEM;
1050da765a2fSDaniel Borkmann 		goto out;
1051da765a2fSDaniel Borkmann 	}
1052da765a2fSDaniel Borkmann 
1053da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&elem->list);
1054da765a2fSDaniel Borkmann 	/* We must track the program's aux info at this point in time
1055da765a2fSDaniel Borkmann 	 * since the program pointer itself may not be stable yet, see
1056da765a2fSDaniel Borkmann 	 * also comment in prog_array_map_poke_run().
1057da765a2fSDaniel Borkmann 	 */
1058da765a2fSDaniel Borkmann 	elem->aux = prog_aux;
1059da765a2fSDaniel Borkmann 
1060da765a2fSDaniel Borkmann 	list_add_tail(&elem->list, &aux->poke_progs);
1061da765a2fSDaniel Borkmann out:
1062da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
1063da765a2fSDaniel Borkmann 	return ret;
1064da765a2fSDaniel Borkmann }
1065da765a2fSDaniel Borkmann 
1066da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map,
1067da765a2fSDaniel Borkmann 					struct bpf_prog_aux *prog_aux)
1068da765a2fSDaniel Borkmann {
1069da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
1070da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
1071da765a2fSDaniel Borkmann 
1072da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1073da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
1074da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1075da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux) {
1076da765a2fSDaniel Borkmann 			list_del_init(&elem->list);
1077da765a2fSDaniel Borkmann 			kfree(elem);
1078da765a2fSDaniel Borkmann 			break;
1079da765a2fSDaniel Borkmann 		}
1080da765a2fSDaniel Borkmann 	}
1081da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
1082da765a2fSDaniel Borkmann }
1083da765a2fSDaniel Borkmann 
10844b7de801SJiri Olsa void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
10854b7de801SJiri Olsa 				      struct bpf_prog *new, struct bpf_prog *old)
10864b7de801SJiri Olsa {
10874b7de801SJiri Olsa 	WARN_ON_ONCE(1);
10884b7de801SJiri Olsa }
10894b7de801SJiri Olsa 
1090da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1091da765a2fSDaniel Borkmann 				    struct bpf_prog *old,
1092da765a2fSDaniel Borkmann 				    struct bpf_prog *new)
1093da765a2fSDaniel Borkmann {
1094da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
1095da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
1096da765a2fSDaniel Borkmann 
1097da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1098da765a2fSDaniel Borkmann 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1099da765a2fSDaniel Borkmann 
1100da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
1101da765a2fSDaniel Borkmann 		struct bpf_jit_poke_descriptor *poke;
11024b7de801SJiri Olsa 		int i;
1103da765a2fSDaniel Borkmann 
1104da765a2fSDaniel Borkmann 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
1105da765a2fSDaniel Borkmann 			poke = &elem->aux->poke_tab[i];
1106da765a2fSDaniel Borkmann 
1107da765a2fSDaniel Borkmann 			/* Few things to be aware of:
1108da765a2fSDaniel Borkmann 			 *
1109da765a2fSDaniel Borkmann 			 * 1) We can only ever access aux in this context, but
1110da765a2fSDaniel Borkmann 			 *    not aux->prog since it might not be stable yet and
1111da765a2fSDaniel Borkmann 			 *    there could be danger of use after free otherwise.
1112da765a2fSDaniel Borkmann 			 * 2) Initially when we start tracking aux, the program
1113da765a2fSDaniel Borkmann 			 *    is not JITed yet and also does not have a kallsyms
1114cf71b174SMaciej Fijalkowski 			 *    entry. We skip these as poke->tailcall_target_stable
1115cf71b174SMaciej Fijalkowski 			 *    is not active yet. The JIT will do the final fixup
1116cf71b174SMaciej Fijalkowski 			 *    before setting it stable. The various
1117cf71b174SMaciej Fijalkowski 			 *    poke->tailcall_target_stable are successively
1118cf71b174SMaciej Fijalkowski 			 *    activated, so tail call updates can arrive from here
1119cf71b174SMaciej Fijalkowski 			 *    while JIT is still finishing its final fixup for
1120cf71b174SMaciej Fijalkowski 			 *    non-activated poke entries.
11214b7de801SJiri Olsa 			 * 3) Also programs reaching refcount of zero while patching
1122da765a2fSDaniel Borkmann 			 *    is in progress is okay since we're protected under
1123da765a2fSDaniel Borkmann 			 *    poke_mutex and untrack the programs before the JIT
11244b7de801SJiri Olsa 			 *    buffer is freed.
1125da765a2fSDaniel Borkmann 			 */
1126cf71b174SMaciej Fijalkowski 			if (!READ_ONCE(poke->tailcall_target_stable))
1127da765a2fSDaniel Borkmann 				continue;
1128da765a2fSDaniel Borkmann 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1129da765a2fSDaniel Borkmann 				continue;
1130da765a2fSDaniel Borkmann 			if (poke->tail_call.map != map ||
1131da765a2fSDaniel Borkmann 			    poke->tail_call.key != key)
1132da765a2fSDaniel Borkmann 				continue;
1133da765a2fSDaniel Borkmann 
11344b7de801SJiri Olsa 			bpf_arch_poke_desc_update(poke, new, old);
1135da765a2fSDaniel Borkmann 		}
1136da765a2fSDaniel Borkmann 	}
1137da765a2fSDaniel Borkmann }
1138da765a2fSDaniel Borkmann 
1139da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work)
1140da765a2fSDaniel Borkmann {
1141da765a2fSDaniel Borkmann 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1142da765a2fSDaniel Borkmann 					   work)->map;
114379d93b3cSHou Tao 	bpf_fd_array_map_clear(map, true);
1144da765a2fSDaniel Borkmann 	bpf_map_put(map);
1145da765a2fSDaniel Borkmann }
1146da765a2fSDaniel Borkmann 
1147da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map)
1148da765a2fSDaniel Borkmann {
1149da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1150da765a2fSDaniel Borkmann 						 map)->aux;
1151da765a2fSDaniel Borkmann 	bpf_map_inc(map);
1152da765a2fSDaniel Borkmann 	schedule_work(&aux->work);
1153da765a2fSDaniel Borkmann }
1154da765a2fSDaniel Borkmann 
11552beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
11562beee5f5SDaniel Borkmann {
11572beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
11582beee5f5SDaniel Borkmann 	struct bpf_map *map;
11592beee5f5SDaniel Borkmann 
11606d192c79SRoman Gushchin 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
11612beee5f5SDaniel Borkmann 	if (!aux)
11622beee5f5SDaniel Borkmann 		return ERR_PTR(-ENOMEM);
11632beee5f5SDaniel Borkmann 
1164da765a2fSDaniel Borkmann 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1165da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&aux->poke_progs);
1166da765a2fSDaniel Borkmann 	mutex_init(&aux->poke_mutex);
1167da765a2fSDaniel Borkmann 
11682beee5f5SDaniel Borkmann 	map = array_map_alloc(attr);
11692beee5f5SDaniel Borkmann 	if (IS_ERR(map)) {
11702beee5f5SDaniel Borkmann 		kfree(aux);
11712beee5f5SDaniel Borkmann 		return map;
11722beee5f5SDaniel Borkmann 	}
11732beee5f5SDaniel Borkmann 
11742beee5f5SDaniel Borkmann 	container_of(map, struct bpf_array, map)->aux = aux;
1175da765a2fSDaniel Borkmann 	aux->map = map;
1176da765a2fSDaniel Borkmann 
11772beee5f5SDaniel Borkmann 	return map;
11782beee5f5SDaniel Borkmann }
11792beee5f5SDaniel Borkmann 
11802beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map)
11812beee5f5SDaniel Borkmann {
1182da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
11832beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
11842beee5f5SDaniel Borkmann 
11852beee5f5SDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1186da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1187da765a2fSDaniel Borkmann 		list_del_init(&elem->list);
1188da765a2fSDaniel Borkmann 		kfree(elem);
1189da765a2fSDaniel Borkmann 	}
11902beee5f5SDaniel Borkmann 	kfree(aux);
11912beee5f5SDaniel Borkmann 	fd_array_map_free(map);
11922beee5f5SDaniel Borkmann }
11932beee5f5SDaniel Borkmann 
1194f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding.
1195f4d05259SMartin KaFai Lau  * Doing static check alone in the verifier is not enough.
1196f4d05259SMartin KaFai Lau  * Thus, prog_array_map cannot be used as an inner_map
1197f4d05259SMartin KaFai Lau  * and map_meta_equal is not implemented.
1198f4d05259SMartin KaFai Lau  */
119940077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = {
1200ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
12012beee5f5SDaniel Borkmann 	.map_alloc = prog_array_map_alloc,
12022beee5f5SDaniel Borkmann 	.map_free = prog_array_map_free,
1203da765a2fSDaniel Borkmann 	.map_poke_track = prog_array_map_poke_track,
1204da765a2fSDaniel Borkmann 	.map_poke_untrack = prog_array_map_poke_untrack,
1205da765a2fSDaniel Borkmann 	.map_poke_run = prog_array_map_poke_run,
120604fd61abSAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
12072a36f0b9SWang Nan 	.map_lookup_elem = fd_array_map_lookup_elem,
12082a36f0b9SWang Nan 	.map_delete_elem = fd_array_map_delete_elem,
12092a36f0b9SWang Nan 	.map_fd_get_ptr = prog_fd_array_get_ptr,
12102a36f0b9SWang Nan 	.map_fd_put_ptr = prog_fd_array_put_ptr,
121114dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1212da765a2fSDaniel Borkmann 	.map_release_uref = prog_array_map_clear,
1213a7c19db3SYonghong Song 	.map_seq_show_elem = prog_array_map_seq_show_elem,
12141746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
1215c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
121604fd61abSAlexei Starovoitov };
121704fd61abSAlexei Starovoitov 
12183b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
12193b1efb19SDaniel Borkmann 						   struct file *map_file)
1220ea317b26SKaixu Xia {
12213b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
12223b1efb19SDaniel Borkmann 
1223dc685409SHou Tao 	ee = kzalloc(sizeof(*ee), GFP_KERNEL);
12243b1efb19SDaniel Borkmann 	if (ee) {
12253b1efb19SDaniel Borkmann 		ee->event = perf_file->private_data;
12263b1efb19SDaniel Borkmann 		ee->perf_file = perf_file;
12273b1efb19SDaniel Borkmann 		ee->map_file = map_file;
12283b1efb19SDaniel Borkmann 	}
12293b1efb19SDaniel Borkmann 
12303b1efb19SDaniel Borkmann 	return ee;
12313b1efb19SDaniel Borkmann }
12323b1efb19SDaniel Borkmann 
12333b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu)
12343b1efb19SDaniel Borkmann {
12353b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
12363b1efb19SDaniel Borkmann 
12373b1efb19SDaniel Borkmann 	ee = container_of(rcu, struct bpf_event_entry, rcu);
12383b1efb19SDaniel Borkmann 	fput(ee->perf_file);
12393b1efb19SDaniel Borkmann 	kfree(ee);
12403b1efb19SDaniel Borkmann }
12413b1efb19SDaniel Borkmann 
12423b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
12433b1efb19SDaniel Borkmann {
12443b1efb19SDaniel Borkmann 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1245ea317b26SKaixu Xia }
1246ea317b26SKaixu Xia 
1247d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1248d056a788SDaniel Borkmann 					 struct file *map_file, int fd)
1249ea317b26SKaixu Xia {
12503b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
12513b1efb19SDaniel Borkmann 	struct perf_event *event;
12523b1efb19SDaniel Borkmann 	struct file *perf_file;
1253f91840a3SAlexei Starovoitov 	u64 value;
1254ea317b26SKaixu Xia 
12553b1efb19SDaniel Borkmann 	perf_file = perf_event_get(fd);
12563b1efb19SDaniel Borkmann 	if (IS_ERR(perf_file))
12573b1efb19SDaniel Borkmann 		return perf_file;
1258e03e7ee3SAlexei Starovoitov 
1259f91840a3SAlexei Starovoitov 	ee = ERR_PTR(-EOPNOTSUPP);
12603b1efb19SDaniel Borkmann 	event = perf_file->private_data;
126197562633SYonghong Song 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
12623b1efb19SDaniel Borkmann 		goto err_out;
1263ea317b26SKaixu Xia 
12643b1efb19SDaniel Borkmann 	ee = bpf_event_entry_gen(perf_file, map_file);
12653b1efb19SDaniel Borkmann 	if (ee)
12663b1efb19SDaniel Borkmann 		return ee;
12673b1efb19SDaniel Borkmann 	ee = ERR_PTR(-ENOMEM);
12683b1efb19SDaniel Borkmann err_out:
12693b1efb19SDaniel Borkmann 	fput(perf_file);
12703b1efb19SDaniel Borkmann 	return ee;
1271ea317b26SKaixu Xia }
1272ea317b26SKaixu Xia 
127320c20bd1SHou Tao static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1274ea317b26SKaixu Xia {
127520c20bd1SHou Tao 	/* bpf_perf_event is freed after one RCU grace period */
12763b1efb19SDaniel Borkmann 	bpf_event_entry_free_rcu(ptr);
12773b1efb19SDaniel Borkmann }
12783b1efb19SDaniel Borkmann 
12793b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map,
12803b1efb19SDaniel Borkmann 					struct file *map_file)
12813b1efb19SDaniel Borkmann {
12823b1efb19SDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
12833b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
12843b1efb19SDaniel Borkmann 	int i;
12853b1efb19SDaniel Borkmann 
1286792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1287792cacccSSong Liu 		return;
1288792cacccSSong Liu 
12893b1efb19SDaniel Borkmann 	rcu_read_lock();
12903b1efb19SDaniel Borkmann 	for (i = 0; i < array->map.max_entries; i++) {
12913b1efb19SDaniel Borkmann 		ee = READ_ONCE(array->ptrs[i]);
12923b1efb19SDaniel Borkmann 		if (ee && ee->map_file == map_file)
129379d93b3cSHou Tao 			__fd_array_map_delete_elem(map, &i, true);
12943b1efb19SDaniel Borkmann 	}
12953b1efb19SDaniel Borkmann 	rcu_read_unlock();
1296ea317b26SKaixu Xia }
1297ea317b26SKaixu Xia 
1298792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map)
1299792cacccSSong Liu {
1300792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
130179d93b3cSHou Tao 		bpf_fd_array_map_clear(map, false);
1302792cacccSSong Liu 	fd_array_map_free(map);
1303792cacccSSong Liu }
1304792cacccSSong Liu 
130540077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = {
1306f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1307ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1308ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
1309792cacccSSong Liu 	.map_free = perf_event_fd_array_map_free,
1310ea317b26SKaixu Xia 	.map_get_next_key = array_map_get_next_key,
1311ea317b26SKaixu Xia 	.map_lookup_elem = fd_array_map_lookup_elem,
1312ea317b26SKaixu Xia 	.map_delete_elem = fd_array_map_delete_elem,
1313ea317b26SKaixu Xia 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1314ea317b26SKaixu Xia 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
13153b1efb19SDaniel Borkmann 	.map_release = perf_event_fd_array_release,
1316e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
13171746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
1318c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
1319ea317b26SKaixu Xia };
1320ea317b26SKaixu Xia 
132160d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS
13224ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
13234ed8ec52SMartin KaFai Lau 				     struct file *map_file /* not used */,
13244ed8ec52SMartin KaFai Lau 				     int fd)
13254ed8ec52SMartin KaFai Lau {
13264ed8ec52SMartin KaFai Lau 	return cgroup_get_from_fd(fd);
13274ed8ec52SMartin KaFai Lau }
13284ed8ec52SMartin KaFai Lau 
132920c20bd1SHou Tao static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
13304ed8ec52SMartin KaFai Lau {
13314ed8ec52SMartin KaFai Lau 	/* cgroup_put free cgrp after a rcu grace period */
13324ed8ec52SMartin KaFai Lau 	cgroup_put(ptr);
13334ed8ec52SMartin KaFai Lau }
13344ed8ec52SMartin KaFai Lau 
13354ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map)
13364ed8ec52SMartin KaFai Lau {
133779d93b3cSHou Tao 	bpf_fd_array_map_clear(map, false);
13384ed8ec52SMartin KaFai Lau 	fd_array_map_free(map);
13394ed8ec52SMartin KaFai Lau }
13404ed8ec52SMartin KaFai Lau 
134140077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = {
1342f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1343ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1344ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
13454ed8ec52SMartin KaFai Lau 	.map_free = cgroup_fd_array_free,
13464ed8ec52SMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
13474ed8ec52SMartin KaFai Lau 	.map_lookup_elem = fd_array_map_lookup_elem,
13484ed8ec52SMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
13494ed8ec52SMartin KaFai Lau 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
13504ed8ec52SMartin KaFai Lau 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1351e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
13521746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
1353c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
13544ed8ec52SMartin KaFai Lau };
13554ed8ec52SMartin KaFai Lau #endif
135656f668dfSMartin KaFai Lau 
135756f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
135856f668dfSMartin KaFai Lau {
135956f668dfSMartin KaFai Lau 	struct bpf_map *map, *inner_map_meta;
136056f668dfSMartin KaFai Lau 
136156f668dfSMartin KaFai Lau 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
136256f668dfSMartin KaFai Lau 	if (IS_ERR(inner_map_meta))
136356f668dfSMartin KaFai Lau 		return inner_map_meta;
136456f668dfSMartin KaFai Lau 
1365ad46061fSJakub Kicinski 	map = array_map_alloc(attr);
136656f668dfSMartin KaFai Lau 	if (IS_ERR(map)) {
136756f668dfSMartin KaFai Lau 		bpf_map_meta_free(inner_map_meta);
136856f668dfSMartin KaFai Lau 		return map;
136956f668dfSMartin KaFai Lau 	}
137056f668dfSMartin KaFai Lau 
137156f668dfSMartin KaFai Lau 	map->inner_map_meta = inner_map_meta;
137256f668dfSMartin KaFai Lau 
137356f668dfSMartin KaFai Lau 	return map;
137456f668dfSMartin KaFai Lau }
137556f668dfSMartin KaFai Lau 
137656f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map)
137756f668dfSMartin KaFai Lau {
137856f668dfSMartin KaFai Lau 	/* map->inner_map_meta is only accessed by syscall which
137956f668dfSMartin KaFai Lau 	 * is protected by fdget/fdput.
138056f668dfSMartin KaFai Lau 	 */
138156f668dfSMartin KaFai Lau 	bpf_map_meta_free(map->inner_map_meta);
138279d93b3cSHou Tao 	bpf_fd_array_map_clear(map, false);
138356f668dfSMartin KaFai Lau 	fd_array_map_free(map);
138456f668dfSMartin KaFai Lau }
138556f668dfSMartin KaFai Lau 
138656f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
138756f668dfSMartin KaFai Lau {
138856f668dfSMartin KaFai Lau 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
138956f668dfSMartin KaFai Lau 
139056f668dfSMartin KaFai Lau 	if (!inner_map)
139156f668dfSMartin KaFai Lau 		return NULL;
139256f668dfSMartin KaFai Lau 
139356f668dfSMartin KaFai Lau 	return READ_ONCE(*inner_map);
139456f668dfSMartin KaFai Lau }
139556f668dfSMartin KaFai Lau 
13964a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map,
13977b0c2a05SDaniel Borkmann 				   struct bpf_insn *insn_buf)
13987b0c2a05SDaniel Borkmann {
1399b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1400d937bc34SAndrii Nakryiko 	u32 elem_size = array->elem_size;
14017b0c2a05SDaniel Borkmann 	struct bpf_insn *insn = insn_buf;
14027b0c2a05SDaniel Borkmann 	const int ret = BPF_REG_0;
14037b0c2a05SDaniel Borkmann 	const int map_ptr = BPF_REG_1;
14047b0c2a05SDaniel Borkmann 	const int index = BPF_REG_2;
14057b0c2a05SDaniel Borkmann 
14067b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
14077b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
14082c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
1409b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1410b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1411b2157399SAlexei Starovoitov 	} else {
14127b0c2a05SDaniel Borkmann 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1413b2157399SAlexei Starovoitov 	}
14147b0c2a05SDaniel Borkmann 	if (is_power_of_2(elem_size))
14157b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
14167b0c2a05SDaniel Borkmann 	else
14177b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
14187b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
14197b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
14207b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
14217b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
14227b0c2a05SDaniel Borkmann 	*insn++ = BPF_MOV64_IMM(ret, 0);
14237b0c2a05SDaniel Borkmann 
14247b0c2a05SDaniel Borkmann 	return insn - insn_buf;
14257b0c2a05SDaniel Borkmann }
14267b0c2a05SDaniel Borkmann 
142740077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = {
1428ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
142956f668dfSMartin KaFai Lau 	.map_alloc = array_of_map_alloc,
143056f668dfSMartin KaFai Lau 	.map_free = array_of_map_free,
143156f668dfSMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
143256f668dfSMartin KaFai Lau 	.map_lookup_elem = array_of_map_lookup_elem,
143356f668dfSMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
143456f668dfSMartin KaFai Lau 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
143556f668dfSMartin KaFai Lau 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
143614dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
14377b0c2a05SDaniel Borkmann 	.map_gen_lookup = array_of_map_gen_lookup,
14389263dddcSTakshak Chahande 	.map_lookup_batch = generic_map_lookup_batch,
14399263dddcSTakshak Chahande 	.map_update_batch = generic_map_update_batch,
1440e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
14411746d055SYafang Shao 	.map_mem_usage = array_map_mem_usage,
1442c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
144356f668dfSMartin KaFai Lau };
1444