xref: /linux-6.15/kernel/bpf/arraymap.c (revision d937bc34)
15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
381ed18abSAlexei Starovoitov  * Copyright (c) 2016,2017 Facebook
428fbcfa0SAlexei Starovoitov  */
528fbcfa0SAlexei Starovoitov #include <linux/bpf.h>
6a26ca7c9SMartin KaFai Lau #include <linux/btf.h>
728fbcfa0SAlexei Starovoitov #include <linux/err.h>
828fbcfa0SAlexei Starovoitov #include <linux/slab.h>
928fbcfa0SAlexei Starovoitov #include <linux/mm.h>
1004fd61abSAlexei Starovoitov #include <linux/filter.h>
110cdf5640SDaniel Borkmann #include <linux/perf_event.h>
12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h>
131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h>
14c317ab71SMenglong Dong #include <linux/btf_ids.h>
1528fbcfa0SAlexei Starovoitov 
1656f668dfSMartin KaFai Lau #include "map_in_map.h"
1756f668dfSMartin KaFai Lau 
186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \
19792cacccSSong Liu 	(BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
204a8f87e6SDaniel Borkmann 	 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
216e71b04aSChenbo Feng 
22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array)
23a10423b8SAlexei Starovoitov {
24a10423b8SAlexei Starovoitov 	int i;
25a10423b8SAlexei Starovoitov 
2632fff239SEric Dumazet 	for (i = 0; i < array->map.max_entries; i++) {
27a10423b8SAlexei Starovoitov 		free_percpu(array->pptrs[i]);
2832fff239SEric Dumazet 		cond_resched();
2932fff239SEric Dumazet 	}
30a10423b8SAlexei Starovoitov }
31a10423b8SAlexei Starovoitov 
32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array)
33a10423b8SAlexei Starovoitov {
34a10423b8SAlexei Starovoitov 	void __percpu *ptr;
35a10423b8SAlexei Starovoitov 	int i;
36a10423b8SAlexei Starovoitov 
37a10423b8SAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++) {
386d192c79SRoman Gushchin 		ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39a10423b8SAlexei Starovoitov 					   GFP_USER | __GFP_NOWARN);
40a10423b8SAlexei Starovoitov 		if (!ptr) {
41a10423b8SAlexei Starovoitov 			bpf_array_free_percpu(array);
42a10423b8SAlexei Starovoitov 			return -ENOMEM;
43a10423b8SAlexei Starovoitov 		}
44a10423b8SAlexei Starovoitov 		array->pptrs[i] = ptr;
4532fff239SEric Dumazet 		cond_resched();
46a10423b8SAlexei Starovoitov 	}
47a10423b8SAlexei Starovoitov 
48a10423b8SAlexei Starovoitov 	return 0;
49a10423b8SAlexei Starovoitov }
50a10423b8SAlexei Starovoitov 
5128fbcfa0SAlexei Starovoitov /* Called from syscall */
525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr)
53ad46061fSJakub Kicinski {
54ad46061fSJakub Kicinski 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55ad46061fSJakub Kicinski 	int numa_node = bpf_map_attr_numa_node(attr);
56ad46061fSJakub Kicinski 
57ad46061fSJakub Kicinski 	/* check sanity of attributes */
58ad46061fSJakub Kicinski 	if (attr->max_entries == 0 || attr->key_size != 4 ||
59ad46061fSJakub Kicinski 	    attr->value_size == 0 ||
60ad46061fSJakub Kicinski 	    attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61591fe988SDaniel Borkmann 	    !bpf_map_flags_access_ok(attr->map_flags) ||
62ad46061fSJakub Kicinski 	    (percpu && numa_node != NUMA_NO_NODE))
63ad46061fSJakub Kicinski 		return -EINVAL;
64ad46061fSJakub Kicinski 
65fc970227SAndrii Nakryiko 	if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
664a8f87e6SDaniel Borkmann 	    attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67fc970227SAndrii Nakryiko 		return -EINVAL;
68fc970227SAndrii Nakryiko 
69792cacccSSong Liu 	if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70792cacccSSong Liu 	    attr->map_flags & BPF_F_PRESERVE_ELEMS)
71792cacccSSong Liu 		return -EINVAL;
72792cacccSSong Liu 
73ad46061fSJakub Kicinski 	if (attr->value_size > KMALLOC_MAX_SIZE)
74ad46061fSJakub Kicinski 		/* if value_size is bigger, the user space won't be able to
75ad46061fSJakub Kicinski 		 * access the elements.
76ad46061fSJakub Kicinski 		 */
77ad46061fSJakub Kicinski 		return -E2BIG;
78ad46061fSJakub Kicinski 
79ad46061fSJakub Kicinski 	return 0;
80ad46061fSJakub Kicinski }
81ad46061fSJakub Kicinski 
8228fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8328fbcfa0SAlexei Starovoitov {
84a10423b8SAlexei Starovoitov 	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
851bc59756SRoman Gushchin 	int numa_node = bpf_map_attr_numa_node(attr);
86b2157399SAlexei Starovoitov 	u32 elem_size, index_mask, max_entries;
872c78ee89SAlexei Starovoitov 	bool bypass_spec_v1 = bpf_bypass_spec_v1();
881bc59756SRoman Gushchin 	u64 array_size, mask64;
8928fbcfa0SAlexei Starovoitov 	struct bpf_array *array;
9028fbcfa0SAlexei Starovoitov 
9128fbcfa0SAlexei Starovoitov 	elem_size = round_up(attr->value_size, 8);
9228fbcfa0SAlexei Starovoitov 
93b2157399SAlexei Starovoitov 	max_entries = attr->max_entries;
94b2157399SAlexei Starovoitov 
95bbeb6e43SDaniel Borkmann 	/* On 32 bit archs roundup_pow_of_two() with max_entries that has
96bbeb6e43SDaniel Borkmann 	 * upper most bit set in u32 space is undefined behavior due to
97bbeb6e43SDaniel Borkmann 	 * resulting 1U << 32, so do it manually here in u64 space.
98bbeb6e43SDaniel Borkmann 	 */
99bbeb6e43SDaniel Borkmann 	mask64 = fls_long(max_entries - 1);
100bbeb6e43SDaniel Borkmann 	mask64 = 1ULL << mask64;
101bbeb6e43SDaniel Borkmann 	mask64 -= 1;
102bbeb6e43SDaniel Borkmann 
103bbeb6e43SDaniel Borkmann 	index_mask = mask64;
1042c78ee89SAlexei Starovoitov 	if (!bypass_spec_v1) {
105b2157399SAlexei Starovoitov 		/* round up array size to nearest power of 2,
106b2157399SAlexei Starovoitov 		 * since cpu will speculate within index_mask limits
107b2157399SAlexei Starovoitov 		 */
108b2157399SAlexei Starovoitov 		max_entries = index_mask + 1;
109bbeb6e43SDaniel Borkmann 		/* Check for overflows. */
110bbeb6e43SDaniel Borkmann 		if (max_entries < attr->max_entries)
111bbeb6e43SDaniel Borkmann 			return ERR_PTR(-E2BIG);
112bbeb6e43SDaniel Borkmann 	}
113b2157399SAlexei Starovoitov 
114a10423b8SAlexei Starovoitov 	array_size = sizeof(*array);
115fc970227SAndrii Nakryiko 	if (percpu) {
116b2157399SAlexei Starovoitov 		array_size += (u64) max_entries * sizeof(void *);
117fc970227SAndrii Nakryiko 	} else {
118fc970227SAndrii Nakryiko 		/* rely on vmalloc() to return page-aligned memory and
119fc970227SAndrii Nakryiko 		 * ensure array->value is exactly page-aligned
120fc970227SAndrii Nakryiko 		 */
121fc970227SAndrii Nakryiko 		if (attr->map_flags & BPF_F_MMAPABLE) {
122fc970227SAndrii Nakryiko 			array_size = PAGE_ALIGN(array_size);
123fc970227SAndrii Nakryiko 			array_size += PAGE_ALIGN((u64) max_entries * elem_size);
124fc970227SAndrii Nakryiko 		} else {
125b2157399SAlexei Starovoitov 			array_size += (u64) max_entries * elem_size;
126fc970227SAndrii Nakryiko 		}
127fc970227SAndrii Nakryiko 	}
128a10423b8SAlexei Starovoitov 
12928fbcfa0SAlexei Starovoitov 	/* allocate all map elements and zero-initialize them */
130fc970227SAndrii Nakryiko 	if (attr->map_flags & BPF_F_MMAPABLE) {
131fc970227SAndrii Nakryiko 		void *data;
132fc970227SAndrii Nakryiko 
133fc970227SAndrii Nakryiko 		/* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
134fc970227SAndrii Nakryiko 		data = bpf_map_area_mmapable_alloc(array_size, numa_node);
1351bc59756SRoman Gushchin 		if (!data)
136fc970227SAndrii Nakryiko 			return ERR_PTR(-ENOMEM);
137fc970227SAndrii Nakryiko 		array = data + PAGE_ALIGN(sizeof(struct bpf_array))
138fc970227SAndrii Nakryiko 			- offsetof(struct bpf_array, value);
139fc970227SAndrii Nakryiko 	} else {
14096eabe7aSMartin KaFai Lau 		array = bpf_map_area_alloc(array_size, numa_node);
141fc970227SAndrii Nakryiko 	}
1421bc59756SRoman Gushchin 	if (!array)
14328fbcfa0SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
144b2157399SAlexei Starovoitov 	array->index_mask = index_mask;
1452c78ee89SAlexei Starovoitov 	array->map.bypass_spec_v1 = bypass_spec_v1;
14628fbcfa0SAlexei Starovoitov 
14728fbcfa0SAlexei Starovoitov 	/* copy mandatory map attributes */
14832852649SJakub Kicinski 	bpf_map_init_from_attr(&array->map, attr);
14928fbcfa0SAlexei Starovoitov 	array->elem_size = elem_size;
15028fbcfa0SAlexei Starovoitov 
1519c2d63b8SDaniel Borkmann 	if (percpu && bpf_array_alloc_percpu(array)) {
152d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
153a10423b8SAlexei Starovoitov 		return ERR_PTR(-ENOMEM);
154a10423b8SAlexei Starovoitov 	}
155a10423b8SAlexei Starovoitov 
15628fbcfa0SAlexei Starovoitov 	return &array->map;
15728fbcfa0SAlexei Starovoitov }
15828fbcfa0SAlexei Starovoitov 
15987ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
16087ac0d60SAndrii Nakryiko {
16187ac0d60SAndrii Nakryiko 	return array->value + (u64)array->elem_size * index;
16287ac0d60SAndrii Nakryiko }
16387ac0d60SAndrii Nakryiko 
16428fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
16528fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key)
16628fbcfa0SAlexei Starovoitov {
16728fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
16828fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
16928fbcfa0SAlexei Starovoitov 
170a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
17128fbcfa0SAlexei Starovoitov 		return NULL;
17228fbcfa0SAlexei Starovoitov 
17387ac0d60SAndrii Nakryiko 	return array->value + (u64)array->elem_size * (index & array->index_mask);
17428fbcfa0SAlexei Starovoitov }
17528fbcfa0SAlexei Starovoitov 
176d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
177d8eca5bbSDaniel Borkmann 				       u32 off)
178d8eca5bbSDaniel Borkmann {
179d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
180d8eca5bbSDaniel Borkmann 
181d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
182d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
183d8eca5bbSDaniel Borkmann 	if (off >= map->value_size)
184d8eca5bbSDaniel Borkmann 		return -EINVAL;
185d8eca5bbSDaniel Borkmann 
186d8eca5bbSDaniel Borkmann 	*imm = (unsigned long)array->value;
187d8eca5bbSDaniel Borkmann 	return 0;
188d8eca5bbSDaniel Borkmann }
189d8eca5bbSDaniel Borkmann 
190d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
191d8eca5bbSDaniel Borkmann 				       u32 *off)
192d8eca5bbSDaniel Borkmann {
193d8eca5bbSDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
194d8eca5bbSDaniel Borkmann 	u64 base = (unsigned long)array->value;
195d8eca5bbSDaniel Borkmann 	u64 range = array->elem_size;
196d8eca5bbSDaniel Borkmann 
197d8eca5bbSDaniel Borkmann 	if (map->max_entries != 1)
198d8eca5bbSDaniel Borkmann 		return -ENOTSUPP;
199d8eca5bbSDaniel Borkmann 	if (imm < base || imm >= base + range)
200d8eca5bbSDaniel Borkmann 		return -ENOENT;
201d8eca5bbSDaniel Borkmann 
202d8eca5bbSDaniel Borkmann 	*off = imm - base;
203d8eca5bbSDaniel Borkmann 	return 0;
204d8eca5bbSDaniel Borkmann }
205d8eca5bbSDaniel Borkmann 
20681ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
2074a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
20881ed18abSAlexei Starovoitov {
209b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
21081ed18abSAlexei Starovoitov 	struct bpf_insn *insn = insn_buf;
211*d937bc34SAndrii Nakryiko 	u32 elem_size = array->elem_size;
21281ed18abSAlexei Starovoitov 	const int ret = BPF_REG_0;
21381ed18abSAlexei Starovoitov 	const int map_ptr = BPF_REG_1;
21481ed18abSAlexei Starovoitov 	const int index = BPF_REG_2;
21581ed18abSAlexei Starovoitov 
2164a8f87e6SDaniel Borkmann 	if (map->map_flags & BPF_F_INNER_MAP)
2174a8f87e6SDaniel Borkmann 		return -EOPNOTSUPP;
2184a8f87e6SDaniel Borkmann 
21981ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
22081ed18abSAlexei Starovoitov 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2212c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
222b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
223b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
224b2157399SAlexei Starovoitov 	} else {
225fad73a1aSMartin KaFai Lau 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
226b2157399SAlexei Starovoitov 	}
227fad73a1aSMartin KaFai Lau 
228fad73a1aSMartin KaFai Lau 	if (is_power_of_2(elem_size)) {
22981ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
23081ed18abSAlexei Starovoitov 	} else {
23181ed18abSAlexei Starovoitov 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
23281ed18abSAlexei Starovoitov 	}
23381ed18abSAlexei Starovoitov 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
23481ed18abSAlexei Starovoitov 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
23581ed18abSAlexei Starovoitov 	*insn++ = BPF_MOV64_IMM(ret, 0);
23681ed18abSAlexei Starovoitov 	return insn - insn_buf;
23781ed18abSAlexei Starovoitov }
23881ed18abSAlexei Starovoitov 
239a10423b8SAlexei Starovoitov /* Called from eBPF program */
240a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
241a10423b8SAlexei Starovoitov {
242a10423b8SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
243a10423b8SAlexei Starovoitov 	u32 index = *(u32 *)key;
244a10423b8SAlexei Starovoitov 
245a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
246a10423b8SAlexei Starovoitov 		return NULL;
247a10423b8SAlexei Starovoitov 
248b2157399SAlexei Starovoitov 	return this_cpu_ptr(array->pptrs[index & array->index_mask]);
249a10423b8SAlexei Starovoitov }
250a10423b8SAlexei Starovoitov 
25107343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
25207343110SFeng Zhou {
25307343110SFeng Zhou 	struct bpf_array *array = container_of(map, struct bpf_array, map);
25407343110SFeng Zhou 	u32 index = *(u32 *)key;
25507343110SFeng Zhou 
25607343110SFeng Zhou 	if (cpu >= nr_cpu_ids)
25707343110SFeng Zhou 		return NULL;
25807343110SFeng Zhou 
25907343110SFeng Zhou 	if (unlikely(index >= array->map.max_entries))
26007343110SFeng Zhou 		return NULL;
26107343110SFeng Zhou 
26207343110SFeng Zhou 	return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
26307343110SFeng Zhou }
26407343110SFeng Zhou 
26515a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
26615a07b33SAlexei Starovoitov {
26715a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
26815a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
26915a07b33SAlexei Starovoitov 	void __percpu *pptr;
27015a07b33SAlexei Starovoitov 	int cpu, off = 0;
27115a07b33SAlexei Starovoitov 	u32 size;
27215a07b33SAlexei Starovoitov 
27315a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
27415a07b33SAlexei Starovoitov 		return -ENOENT;
27515a07b33SAlexei Starovoitov 
27615a07b33SAlexei Starovoitov 	/* per_cpu areas are zero-filled and bpf programs can only
27715a07b33SAlexei Starovoitov 	 * access 'value_size' of them, so copying rounded areas
27815a07b33SAlexei Starovoitov 	 * will not leak any kernel data
27915a07b33SAlexei Starovoitov 	 */
280*d937bc34SAndrii Nakryiko 	size = array->elem_size;
28115a07b33SAlexei Starovoitov 	rcu_read_lock();
282b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
28315a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
28415a07b33SAlexei Starovoitov 		bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
28515a07b33SAlexei Starovoitov 		off += size;
28615a07b33SAlexei Starovoitov 	}
28715a07b33SAlexei Starovoitov 	rcu_read_unlock();
28815a07b33SAlexei Starovoitov 	return 0;
28915a07b33SAlexei Starovoitov }
29015a07b33SAlexei Starovoitov 
29128fbcfa0SAlexei Starovoitov /* Called from syscall */
29228fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
29328fbcfa0SAlexei Starovoitov {
29428fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2958fe45924STeng Qin 	u32 index = key ? *(u32 *)key : U32_MAX;
29628fbcfa0SAlexei Starovoitov 	u32 *next = (u32 *)next_key;
29728fbcfa0SAlexei Starovoitov 
29828fbcfa0SAlexei Starovoitov 	if (index >= array->map.max_entries) {
29928fbcfa0SAlexei Starovoitov 		*next = 0;
30028fbcfa0SAlexei Starovoitov 		return 0;
30128fbcfa0SAlexei Starovoitov 	}
30228fbcfa0SAlexei Starovoitov 
30328fbcfa0SAlexei Starovoitov 	if (index == array->map.max_entries - 1)
30428fbcfa0SAlexei Starovoitov 		return -ENOENT;
30528fbcfa0SAlexei Starovoitov 
30628fbcfa0SAlexei Starovoitov 	*next = index + 1;
30728fbcfa0SAlexei Starovoitov 	return 0;
30828fbcfa0SAlexei Starovoitov }
30928fbcfa0SAlexei Starovoitov 
31014a324f6SKumar Kartikeya Dwivedi static void check_and_free_fields(struct bpf_array *arr, void *val)
31168134668SAlexei Starovoitov {
31214a324f6SKumar Kartikeya Dwivedi 	if (map_value_has_timer(&arr->map))
31368134668SAlexei Starovoitov 		bpf_timer_cancel_and_free(val + arr->map.timer_off);
31414a324f6SKumar Kartikeya Dwivedi 	if (map_value_has_kptrs(&arr->map))
31514a324f6SKumar Kartikeya Dwivedi 		bpf_map_free_kptrs(&arr->map, val);
31668134668SAlexei Starovoitov }
31768134668SAlexei Starovoitov 
31828fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
31928fbcfa0SAlexei Starovoitov static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
32028fbcfa0SAlexei Starovoitov 				 u64 map_flags)
32128fbcfa0SAlexei Starovoitov {
32228fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
32328fbcfa0SAlexei Starovoitov 	u32 index = *(u32 *)key;
32496049f3aSAlexei Starovoitov 	char *val;
32528fbcfa0SAlexei Starovoitov 
32696049f3aSAlexei Starovoitov 	if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
32728fbcfa0SAlexei Starovoitov 		/* unknown flags */
32828fbcfa0SAlexei Starovoitov 		return -EINVAL;
32928fbcfa0SAlexei Starovoitov 
330a10423b8SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
33128fbcfa0SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
33228fbcfa0SAlexei Starovoitov 		return -E2BIG;
33328fbcfa0SAlexei Starovoitov 
33496049f3aSAlexei Starovoitov 	if (unlikely(map_flags & BPF_NOEXIST))
335daaf427cSAlexei Starovoitov 		/* all elements already exist */
33628fbcfa0SAlexei Starovoitov 		return -EEXIST;
33728fbcfa0SAlexei Starovoitov 
33896049f3aSAlexei Starovoitov 	if (unlikely((map_flags & BPF_F_LOCK) &&
33996049f3aSAlexei Starovoitov 		     !map_value_has_spin_lock(map)))
34096049f3aSAlexei Starovoitov 		return -EINVAL;
34196049f3aSAlexei Starovoitov 
34296049f3aSAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
343b2157399SAlexei Starovoitov 		memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
344a10423b8SAlexei Starovoitov 		       value, map->value_size);
34596049f3aSAlexei Starovoitov 	} else {
34696049f3aSAlexei Starovoitov 		val = array->value +
34787ac0d60SAndrii Nakryiko 			(u64)array->elem_size * (index & array->index_mask);
34896049f3aSAlexei Starovoitov 		if (map_flags & BPF_F_LOCK)
34996049f3aSAlexei Starovoitov 			copy_map_value_locked(map, val, value, false);
350a10423b8SAlexei Starovoitov 		else
35196049f3aSAlexei Starovoitov 			copy_map_value(map, val, value);
35214a324f6SKumar Kartikeya Dwivedi 		check_and_free_fields(array, val);
35396049f3aSAlexei Starovoitov 	}
35428fbcfa0SAlexei Starovoitov 	return 0;
35528fbcfa0SAlexei Starovoitov }
35628fbcfa0SAlexei Starovoitov 
35715a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
35815a07b33SAlexei Starovoitov 			    u64 map_flags)
35915a07b33SAlexei Starovoitov {
36015a07b33SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
36115a07b33SAlexei Starovoitov 	u32 index = *(u32 *)key;
36215a07b33SAlexei Starovoitov 	void __percpu *pptr;
36315a07b33SAlexei Starovoitov 	int cpu, off = 0;
36415a07b33SAlexei Starovoitov 	u32 size;
36515a07b33SAlexei Starovoitov 
36615a07b33SAlexei Starovoitov 	if (unlikely(map_flags > BPF_EXIST))
36715a07b33SAlexei Starovoitov 		/* unknown flags */
36815a07b33SAlexei Starovoitov 		return -EINVAL;
36915a07b33SAlexei Starovoitov 
37015a07b33SAlexei Starovoitov 	if (unlikely(index >= array->map.max_entries))
37115a07b33SAlexei Starovoitov 		/* all elements were pre-allocated, cannot insert a new one */
37215a07b33SAlexei Starovoitov 		return -E2BIG;
37315a07b33SAlexei Starovoitov 
37415a07b33SAlexei Starovoitov 	if (unlikely(map_flags == BPF_NOEXIST))
37515a07b33SAlexei Starovoitov 		/* all elements already exist */
37615a07b33SAlexei Starovoitov 		return -EEXIST;
37715a07b33SAlexei Starovoitov 
37815a07b33SAlexei Starovoitov 	/* the user space will provide round_up(value_size, 8) bytes that
37915a07b33SAlexei Starovoitov 	 * will be copied into per-cpu area. bpf programs can only access
38015a07b33SAlexei Starovoitov 	 * value_size of it. During lookup the same extra bytes will be
38115a07b33SAlexei Starovoitov 	 * returned or zeros which were zero-filled by percpu_alloc,
38215a07b33SAlexei Starovoitov 	 * so no kernel data leaks possible
38315a07b33SAlexei Starovoitov 	 */
384*d937bc34SAndrii Nakryiko 	size = array->elem_size;
38515a07b33SAlexei Starovoitov 	rcu_read_lock();
386b2157399SAlexei Starovoitov 	pptr = array->pptrs[index & array->index_mask];
38715a07b33SAlexei Starovoitov 	for_each_possible_cpu(cpu) {
38815a07b33SAlexei Starovoitov 		bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
38915a07b33SAlexei Starovoitov 		off += size;
39015a07b33SAlexei Starovoitov 	}
39115a07b33SAlexei Starovoitov 	rcu_read_unlock();
39215a07b33SAlexei Starovoitov 	return 0;
39315a07b33SAlexei Starovoitov }
39415a07b33SAlexei Starovoitov 
39528fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
39628fbcfa0SAlexei Starovoitov static int array_map_delete_elem(struct bpf_map *map, void *key)
39728fbcfa0SAlexei Starovoitov {
39828fbcfa0SAlexei Starovoitov 	return -EINVAL;
39928fbcfa0SAlexei Starovoitov }
40028fbcfa0SAlexei Starovoitov 
401fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array)
402fc970227SAndrii Nakryiko {
403fc970227SAndrii Nakryiko 	return (void *)round_down((unsigned long)array, PAGE_SIZE);
404fc970227SAndrii Nakryiko }
405fc970227SAndrii Nakryiko 
40668134668SAlexei Starovoitov static void array_map_free_timers(struct bpf_map *map)
40768134668SAlexei Starovoitov {
40868134668SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
40968134668SAlexei Starovoitov 	int i;
41068134668SAlexei Starovoitov 
41114a324f6SKumar Kartikeya Dwivedi 	/* We don't reset or free kptr on uref dropping to zero. */
41214a324f6SKumar Kartikeya Dwivedi 	if (!map_value_has_timer(map))
41368134668SAlexei Starovoitov 		return;
41468134668SAlexei Starovoitov 
41568134668SAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
41687ac0d60SAndrii Nakryiko 		bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);
41768134668SAlexei Starovoitov }
41868134668SAlexei Starovoitov 
41928fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
42028fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map)
42128fbcfa0SAlexei Starovoitov {
42228fbcfa0SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
42314a324f6SKumar Kartikeya Dwivedi 	int i;
42414a324f6SKumar Kartikeya Dwivedi 
42514a324f6SKumar Kartikeya Dwivedi 	if (map_value_has_kptrs(map)) {
42614a324f6SKumar Kartikeya Dwivedi 		for (i = 0; i < array->map.max_entries; i++)
42787ac0d60SAndrii Nakryiko 			bpf_map_free_kptrs(map, array_map_elem_ptr(array, i));
42814a324f6SKumar Kartikeya Dwivedi 		bpf_map_free_kptr_off_tab(map);
42914a324f6SKumar Kartikeya Dwivedi 	}
43028fbcfa0SAlexei Starovoitov 
431a10423b8SAlexei Starovoitov 	if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
432a10423b8SAlexei Starovoitov 		bpf_array_free_percpu(array);
433a10423b8SAlexei Starovoitov 
434fc970227SAndrii Nakryiko 	if (array->map.map_flags & BPF_F_MMAPABLE)
435fc970227SAndrii Nakryiko 		bpf_map_area_free(array_map_vmalloc_addr(array));
436fc970227SAndrii Nakryiko 	else
437d407bd25SDaniel Borkmann 		bpf_map_area_free(array);
43828fbcfa0SAlexei Starovoitov }
43928fbcfa0SAlexei Starovoitov 
440a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key,
441a26ca7c9SMartin KaFai Lau 				    struct seq_file *m)
442a26ca7c9SMartin KaFai Lau {
443a26ca7c9SMartin KaFai Lau 	void *value;
444a26ca7c9SMartin KaFai Lau 
445a26ca7c9SMartin KaFai Lau 	rcu_read_lock();
446a26ca7c9SMartin KaFai Lau 
447a26ca7c9SMartin KaFai Lau 	value = array_map_lookup_elem(map, key);
448a26ca7c9SMartin KaFai Lau 	if (!value) {
449a26ca7c9SMartin KaFai Lau 		rcu_read_unlock();
450a26ca7c9SMartin KaFai Lau 		return;
451a26ca7c9SMartin KaFai Lau 	}
452a26ca7c9SMartin KaFai Lau 
4532824ecb7SDaniel Borkmann 	if (map->btf_key_type_id)
454a26ca7c9SMartin KaFai Lau 		seq_printf(m, "%u: ", *(u32 *)key);
4559b2cf328SMartin KaFai Lau 	btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
456a26ca7c9SMartin KaFai Lau 	seq_puts(m, "\n");
457a26ca7c9SMartin KaFai Lau 
458a26ca7c9SMartin KaFai Lau 	rcu_read_unlock();
459a26ca7c9SMartin KaFai Lau }
460a26ca7c9SMartin KaFai Lau 
461c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
462c7b27c37SYonghong Song 					   struct seq_file *m)
463c7b27c37SYonghong Song {
464c7b27c37SYonghong Song 	struct bpf_array *array = container_of(map, struct bpf_array, map);
465c7b27c37SYonghong Song 	u32 index = *(u32 *)key;
466c7b27c37SYonghong Song 	void __percpu *pptr;
467c7b27c37SYonghong Song 	int cpu;
468c7b27c37SYonghong Song 
469c7b27c37SYonghong Song 	rcu_read_lock();
470c7b27c37SYonghong Song 
471c7b27c37SYonghong Song 	seq_printf(m, "%u: {\n", *(u32 *)key);
472c7b27c37SYonghong Song 	pptr = array->pptrs[index & array->index_mask];
473c7b27c37SYonghong Song 	for_each_possible_cpu(cpu) {
474c7b27c37SYonghong Song 		seq_printf(m, "\tcpu%d: ", cpu);
475c7b27c37SYonghong Song 		btf_type_seq_show(map->btf, map->btf_value_type_id,
476c7b27c37SYonghong Song 				  per_cpu_ptr(pptr, cpu), m);
477c7b27c37SYonghong Song 		seq_puts(m, "\n");
478c7b27c37SYonghong Song 	}
479c7b27c37SYonghong Song 	seq_puts(m, "}\n");
480c7b27c37SYonghong Song 
481c7b27c37SYonghong Song 	rcu_read_unlock();
482c7b27c37SYonghong Song }
483c7b27c37SYonghong Song 
484e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map,
4851b2b234bSRoman Gushchin 			       const struct btf *btf,
486e8d2bec0SDaniel Borkmann 			       const struct btf_type *key_type,
487e8d2bec0SDaniel Borkmann 			       const struct btf_type *value_type)
488a26ca7c9SMartin KaFai Lau {
489a26ca7c9SMartin KaFai Lau 	u32 int_data;
490a26ca7c9SMartin KaFai Lau 
4912824ecb7SDaniel Borkmann 	/* One exception for keyless BTF: .bss/.data/.rodata map */
4922824ecb7SDaniel Borkmann 	if (btf_type_is_void(key_type)) {
4932824ecb7SDaniel Borkmann 		if (map->map_type != BPF_MAP_TYPE_ARRAY ||
4942824ecb7SDaniel Borkmann 		    map->max_entries != 1)
4952824ecb7SDaniel Borkmann 			return -EINVAL;
4962824ecb7SDaniel Borkmann 
4972824ecb7SDaniel Borkmann 		if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
4982824ecb7SDaniel Borkmann 			return -EINVAL;
4992824ecb7SDaniel Borkmann 
5002824ecb7SDaniel Borkmann 		return 0;
5012824ecb7SDaniel Borkmann 	}
5022824ecb7SDaniel Borkmann 
503e8d2bec0SDaniel Borkmann 	if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
504a26ca7c9SMartin KaFai Lau 		return -EINVAL;
505a26ca7c9SMartin KaFai Lau 
506a26ca7c9SMartin KaFai Lau 	int_data = *(u32 *)(key_type + 1);
507e8d2bec0SDaniel Borkmann 	/* bpf array can only take a u32 key. This check makes sure
508e8d2bec0SDaniel Borkmann 	 * that the btf matches the attr used during map_create.
509a26ca7c9SMartin KaFai Lau 	 */
510e8d2bec0SDaniel Borkmann 	if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
511a26ca7c9SMartin KaFai Lau 		return -EINVAL;
512a26ca7c9SMartin KaFai Lau 
513a26ca7c9SMartin KaFai Lau 	return 0;
514a26ca7c9SMartin KaFai Lau }
515a26ca7c9SMartin KaFai Lau 
516b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
517fc970227SAndrii Nakryiko {
518fc970227SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
519fc970227SAndrii Nakryiko 	pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
520fc970227SAndrii Nakryiko 
521fc970227SAndrii Nakryiko 	if (!(map->map_flags & BPF_F_MMAPABLE))
522fc970227SAndrii Nakryiko 		return -EINVAL;
523fc970227SAndrii Nakryiko 
524333291ceSAndrii Nakryiko 	if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
525333291ceSAndrii Nakryiko 	    PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
526333291ceSAndrii Nakryiko 		return -EINVAL;
527333291ceSAndrii Nakryiko 
528333291ceSAndrii Nakryiko 	return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
529333291ceSAndrii Nakryiko 				   vma->vm_pgoff + pgoff);
530fc970227SAndrii Nakryiko }
531fc970227SAndrii Nakryiko 
532134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0,
533134fede4SMartin KaFai Lau 				 const struct bpf_map *meta1)
534134fede4SMartin KaFai Lau {
5354a8f87e6SDaniel Borkmann 	if (!bpf_map_meta_equal(meta0, meta1))
5364a8f87e6SDaniel Borkmann 		return false;
5374a8f87e6SDaniel Borkmann 	return meta0->map_flags & BPF_F_INNER_MAP ? true :
5384a8f87e6SDaniel Borkmann 	       meta0->max_entries == meta1->max_entries;
539134fede4SMartin KaFai Lau }
540134fede4SMartin KaFai Lau 
541d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info {
542d3cc2ab5SYonghong Song 	struct bpf_map *map;
543d3cc2ab5SYonghong Song 	void *percpu_value_buf;
544d3cc2ab5SYonghong Song 	u32 index;
545d3cc2ab5SYonghong Song };
546d3cc2ab5SYonghong Song 
547d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
548d3cc2ab5SYonghong Song {
549d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
550d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
551d3cc2ab5SYonghong Song 	struct bpf_array *array;
552d3cc2ab5SYonghong Song 	u32 index;
553d3cc2ab5SYonghong Song 
554d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
555d3cc2ab5SYonghong Song 		return NULL;
556d3cc2ab5SYonghong Song 
557d3cc2ab5SYonghong Song 	if (*pos == 0)
558d3cc2ab5SYonghong Song 		++*pos;
559d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
560d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
561d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
562d3cc2ab5SYonghong Song 	       return array->pptrs[index];
56387ac0d60SAndrii Nakryiko 	return array_map_elem_ptr(array, index);
564d3cc2ab5SYonghong Song }
565d3cc2ab5SYonghong Song 
566d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
567d3cc2ab5SYonghong Song {
568d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
569d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
570d3cc2ab5SYonghong Song 	struct bpf_array *array;
571d3cc2ab5SYonghong Song 	u32 index;
572d3cc2ab5SYonghong Song 
573d3cc2ab5SYonghong Song 	++*pos;
574d3cc2ab5SYonghong Song 	++info->index;
575d3cc2ab5SYonghong Song 	if (info->index >= map->max_entries)
576d3cc2ab5SYonghong Song 		return NULL;
577d3cc2ab5SYonghong Song 
578d3cc2ab5SYonghong Song 	array = container_of(map, struct bpf_array, map);
579d3cc2ab5SYonghong Song 	index = info->index & array->index_mask;
580d3cc2ab5SYonghong Song 	if (info->percpu_value_buf)
581d3cc2ab5SYonghong Song 	       return array->pptrs[index];
58287ac0d60SAndrii Nakryiko 	return array_map_elem_ptr(array, index);
583d3cc2ab5SYonghong Song }
584d3cc2ab5SYonghong Song 
585d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
586d3cc2ab5SYonghong Song {
587d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *info = seq->private;
588d3cc2ab5SYonghong Song 	struct bpf_iter__bpf_map_elem ctx = {};
589d3cc2ab5SYonghong Song 	struct bpf_map *map = info->map;
590*d937bc34SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
591d3cc2ab5SYonghong Song 	struct bpf_iter_meta meta;
592d3cc2ab5SYonghong Song 	struct bpf_prog *prog;
593d3cc2ab5SYonghong Song 	int off = 0, cpu = 0;
594d3cc2ab5SYonghong Song 	void __percpu **pptr;
595d3cc2ab5SYonghong Song 	u32 size;
596d3cc2ab5SYonghong Song 
597d3cc2ab5SYonghong Song 	meta.seq = seq;
598d3cc2ab5SYonghong Song 	prog = bpf_iter_get_info(&meta, v == NULL);
599d3cc2ab5SYonghong Song 	if (!prog)
600d3cc2ab5SYonghong Song 		return 0;
601d3cc2ab5SYonghong Song 
602d3cc2ab5SYonghong Song 	ctx.meta = &meta;
603d3cc2ab5SYonghong Song 	ctx.map = info->map;
604d3cc2ab5SYonghong Song 	if (v) {
605d3cc2ab5SYonghong Song 		ctx.key = &info->index;
606d3cc2ab5SYonghong Song 
607d3cc2ab5SYonghong Song 		if (!info->percpu_value_buf) {
608d3cc2ab5SYonghong Song 			ctx.value = v;
609d3cc2ab5SYonghong Song 		} else {
610d3cc2ab5SYonghong Song 			pptr = v;
611*d937bc34SAndrii Nakryiko 			size = array->elem_size;
612d3cc2ab5SYonghong Song 			for_each_possible_cpu(cpu) {
613d3cc2ab5SYonghong Song 				bpf_long_memcpy(info->percpu_value_buf + off,
614d3cc2ab5SYonghong Song 						per_cpu_ptr(pptr, cpu),
615d3cc2ab5SYonghong Song 						size);
616d3cc2ab5SYonghong Song 				off += size;
617d3cc2ab5SYonghong Song 			}
618d3cc2ab5SYonghong Song 			ctx.value = info->percpu_value_buf;
619d3cc2ab5SYonghong Song 		}
620d3cc2ab5SYonghong Song 	}
621d3cc2ab5SYonghong Song 
622d3cc2ab5SYonghong Song 	return bpf_iter_run_prog(prog, &ctx);
623d3cc2ab5SYonghong Song }
624d3cc2ab5SYonghong Song 
625d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
626d3cc2ab5SYonghong Song {
627d3cc2ab5SYonghong Song 	return __bpf_array_map_seq_show(seq, v);
628d3cc2ab5SYonghong Song }
629d3cc2ab5SYonghong Song 
630d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
631d3cc2ab5SYonghong Song {
632d3cc2ab5SYonghong Song 	if (!v)
633d3cc2ab5SYonghong Song 		(void)__bpf_array_map_seq_show(seq, NULL);
634d3cc2ab5SYonghong Song }
635d3cc2ab5SYonghong Song 
636d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data,
637d3cc2ab5SYonghong Song 				   struct bpf_iter_aux_info *aux)
638d3cc2ab5SYonghong Song {
639d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
640d3cc2ab5SYonghong Song 	struct bpf_map *map = aux->map;
641*d937bc34SAndrii Nakryiko 	struct bpf_array *array = container_of(map, struct bpf_array, map);
642d3cc2ab5SYonghong Song 	void *value_buf;
643d3cc2ab5SYonghong Song 	u32 buf_size;
644d3cc2ab5SYonghong Song 
645d3cc2ab5SYonghong Song 	if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
646*d937bc34SAndrii Nakryiko 		buf_size = array->elem_size * num_possible_cpus();
647d3cc2ab5SYonghong Song 		value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
648d3cc2ab5SYonghong Song 		if (!value_buf)
649d3cc2ab5SYonghong Song 			return -ENOMEM;
650d3cc2ab5SYonghong Song 
651d3cc2ab5SYonghong Song 		seq_info->percpu_value_buf = value_buf;
652d3cc2ab5SYonghong Song 	}
653d3cc2ab5SYonghong Song 
654d3cc2ab5SYonghong Song 	seq_info->map = map;
655d3cc2ab5SYonghong Song 	return 0;
656d3cc2ab5SYonghong Song }
657d3cc2ab5SYonghong Song 
658d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data)
659d3cc2ab5SYonghong Song {
660d3cc2ab5SYonghong Song 	struct bpf_iter_seq_array_map_info *seq_info = priv_data;
661d3cc2ab5SYonghong Song 
662d3cc2ab5SYonghong Song 	kfree(seq_info->percpu_value_buf);
663d3cc2ab5SYonghong Song }
664d3cc2ab5SYonghong Song 
665d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = {
666d3cc2ab5SYonghong Song 	.start	= bpf_array_map_seq_start,
667d3cc2ab5SYonghong Song 	.next	= bpf_array_map_seq_next,
668d3cc2ab5SYonghong Song 	.stop	= bpf_array_map_seq_stop,
669d3cc2ab5SYonghong Song 	.show	= bpf_array_map_seq_show,
670d3cc2ab5SYonghong Song };
671d3cc2ab5SYonghong Song 
672d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = {
673d3cc2ab5SYonghong Song 	.seq_ops		= &bpf_array_map_seq_ops,
674d3cc2ab5SYonghong Song 	.init_seq_private	= bpf_iter_init_array_map,
675d3cc2ab5SYonghong Song 	.fini_seq_private	= bpf_iter_fini_array_map,
676d3cc2ab5SYonghong Song 	.seq_priv_size		= sizeof(struct bpf_iter_seq_array_map_info),
677d3cc2ab5SYonghong Song };
678d3cc2ab5SYonghong Song 
679102acbacSKees Cook static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
68006dcdcd4SYonghong Song 				   void *callback_ctx, u64 flags)
68106dcdcd4SYonghong Song {
68206dcdcd4SYonghong Song 	u32 i, key, num_elems = 0;
68306dcdcd4SYonghong Song 	struct bpf_array *array;
68406dcdcd4SYonghong Song 	bool is_percpu;
68506dcdcd4SYonghong Song 	u64 ret = 0;
68606dcdcd4SYonghong Song 	void *val;
68706dcdcd4SYonghong Song 
68806dcdcd4SYonghong Song 	if (flags != 0)
68906dcdcd4SYonghong Song 		return -EINVAL;
69006dcdcd4SYonghong Song 
69106dcdcd4SYonghong Song 	is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
69206dcdcd4SYonghong Song 	array = container_of(map, struct bpf_array, map);
69306dcdcd4SYonghong Song 	if (is_percpu)
69406dcdcd4SYonghong Song 		migrate_disable();
69506dcdcd4SYonghong Song 	for (i = 0; i < map->max_entries; i++) {
69606dcdcd4SYonghong Song 		if (is_percpu)
69706dcdcd4SYonghong Song 			val = this_cpu_ptr(array->pptrs[i]);
69806dcdcd4SYonghong Song 		else
69987ac0d60SAndrii Nakryiko 			val = array_map_elem_ptr(array, i);
70006dcdcd4SYonghong Song 		num_elems++;
70106dcdcd4SYonghong Song 		key = i;
702102acbacSKees Cook 		ret = callback_fn((u64)(long)map, (u64)(long)&key,
703102acbacSKees Cook 				  (u64)(long)val, (u64)(long)callback_ctx, 0);
70406dcdcd4SYonghong Song 		/* return value: 0 - continue, 1 - stop and return */
70506dcdcd4SYonghong Song 		if (ret)
70606dcdcd4SYonghong Song 			break;
70706dcdcd4SYonghong Song 	}
70806dcdcd4SYonghong Song 
70906dcdcd4SYonghong Song 	if (is_percpu)
71006dcdcd4SYonghong Song 		migrate_enable();
71106dcdcd4SYonghong Song 	return num_elems;
71206dcdcd4SYonghong Song }
71306dcdcd4SYonghong Song 
714c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
71540077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = {
716134fede4SMartin KaFai Lau 	.map_meta_equal = array_map_meta_equal,
717ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
71828fbcfa0SAlexei Starovoitov 	.map_alloc = array_map_alloc,
71928fbcfa0SAlexei Starovoitov 	.map_free = array_map_free,
72028fbcfa0SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
72168134668SAlexei Starovoitov 	.map_release_uref = array_map_free_timers,
72228fbcfa0SAlexei Starovoitov 	.map_lookup_elem = array_map_lookup_elem,
72328fbcfa0SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
72428fbcfa0SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
72581ed18abSAlexei Starovoitov 	.map_gen_lookup = array_map_gen_lookup,
726d8eca5bbSDaniel Borkmann 	.map_direct_value_addr = array_map_direct_value_addr,
727d8eca5bbSDaniel Borkmann 	.map_direct_value_meta = array_map_direct_value_meta,
728fc970227SAndrii Nakryiko 	.map_mmap = array_map_mmap,
729a26ca7c9SMartin KaFai Lau 	.map_seq_show_elem = array_map_seq_show_elem,
730a26ca7c9SMartin KaFai Lau 	.map_check_btf = array_map_check_btf,
731c60f2d28SBrian Vazquez 	.map_lookup_batch = generic_map_lookup_batch,
732c60f2d28SBrian Vazquez 	.map_update_batch = generic_map_update_batch,
73306dcdcd4SYonghong Song 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
73406dcdcd4SYonghong Song 	.map_for_each_callback = bpf_for_each_array_elem,
735c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
736d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
73728fbcfa0SAlexei Starovoitov };
73828fbcfa0SAlexei Starovoitov 
73940077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = {
740f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
741ad46061fSJakub Kicinski 	.map_alloc_check = array_map_alloc_check,
742a10423b8SAlexei Starovoitov 	.map_alloc = array_map_alloc,
743a10423b8SAlexei Starovoitov 	.map_free = array_map_free,
744a10423b8SAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
745a10423b8SAlexei Starovoitov 	.map_lookup_elem = percpu_array_map_lookup_elem,
746a10423b8SAlexei Starovoitov 	.map_update_elem = array_map_update_elem,
747a10423b8SAlexei Starovoitov 	.map_delete_elem = array_map_delete_elem,
74807343110SFeng Zhou 	.map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
749c7b27c37SYonghong Song 	.map_seq_show_elem = percpu_array_map_seq_show_elem,
750e8d2bec0SDaniel Borkmann 	.map_check_btf = array_map_check_btf,
751f008d732SPedro Tammela 	.map_lookup_batch = generic_map_lookup_batch,
752f008d732SPedro Tammela 	.map_update_batch = generic_map_update_batch,
75306dcdcd4SYonghong Song 	.map_set_for_each_callback_args = map_set_for_each_callback_args,
75406dcdcd4SYonghong Song 	.map_for_each_callback = bpf_for_each_array_elem,
755c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
756d3cc2ab5SYonghong Song 	.iter_seq_info = &iter_seq_info,
757a10423b8SAlexei Starovoitov };
758a10423b8SAlexei Starovoitov 
759ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr)
76004fd61abSAlexei Starovoitov {
7612a36f0b9SWang Nan 	/* only file descriptors can be stored in this type of map */
76204fd61abSAlexei Starovoitov 	if (attr->value_size != sizeof(u32))
763ad46061fSJakub Kicinski 		return -EINVAL;
764591fe988SDaniel Borkmann 	/* Program read-only/write-only not supported for special maps yet. */
765591fe988SDaniel Borkmann 	if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
766591fe988SDaniel Borkmann 		return -EINVAL;
767ad46061fSJakub Kicinski 	return array_map_alloc_check(attr);
76804fd61abSAlexei Starovoitov }
76904fd61abSAlexei Starovoitov 
7702a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map)
77104fd61abSAlexei Starovoitov {
77204fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
77304fd61abSAlexei Starovoitov 	int i;
77404fd61abSAlexei Starovoitov 
77504fd61abSAlexei Starovoitov 	/* make sure it's empty */
77604fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
7772a36f0b9SWang Nan 		BUG_ON(array->ptrs[i] != NULL);
778d407bd25SDaniel Borkmann 
779d407bd25SDaniel Borkmann 	bpf_map_area_free(array);
78004fd61abSAlexei Starovoitov }
78104fd61abSAlexei Starovoitov 
7822a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
78304fd61abSAlexei Starovoitov {
7843b4a63f6SPrashant Bhole 	return ERR_PTR(-EOPNOTSUPP);
78504fd61abSAlexei Starovoitov }
78604fd61abSAlexei Starovoitov 
78704fd61abSAlexei Starovoitov /* only called from syscall */
78814dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
78914dc6f04SMartin KaFai Lau {
79014dc6f04SMartin KaFai Lau 	void **elem, *ptr;
79114dc6f04SMartin KaFai Lau 	int ret =  0;
79214dc6f04SMartin KaFai Lau 
79314dc6f04SMartin KaFai Lau 	if (!map->ops->map_fd_sys_lookup_elem)
79414dc6f04SMartin KaFai Lau 		return -ENOTSUPP;
79514dc6f04SMartin KaFai Lau 
79614dc6f04SMartin KaFai Lau 	rcu_read_lock();
79714dc6f04SMartin KaFai Lau 	elem = array_map_lookup_elem(map, key);
79814dc6f04SMartin KaFai Lau 	if (elem && (ptr = READ_ONCE(*elem)))
79914dc6f04SMartin KaFai Lau 		*value = map->ops->map_fd_sys_lookup_elem(ptr);
80014dc6f04SMartin KaFai Lau 	else
80114dc6f04SMartin KaFai Lau 		ret = -ENOENT;
80214dc6f04SMartin KaFai Lau 	rcu_read_unlock();
80314dc6f04SMartin KaFai Lau 
80414dc6f04SMartin KaFai Lau 	return ret;
80514dc6f04SMartin KaFai Lau }
80614dc6f04SMartin KaFai Lau 
80714dc6f04SMartin KaFai Lau /* only called from syscall */
808d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
809d056a788SDaniel Borkmann 				 void *key, void *value, u64 map_flags)
81004fd61abSAlexei Starovoitov {
81104fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
8122a36f0b9SWang Nan 	void *new_ptr, *old_ptr;
81304fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key, ufd;
81404fd61abSAlexei Starovoitov 
81504fd61abSAlexei Starovoitov 	if (map_flags != BPF_ANY)
81604fd61abSAlexei Starovoitov 		return -EINVAL;
81704fd61abSAlexei Starovoitov 
81804fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
81904fd61abSAlexei Starovoitov 		return -E2BIG;
82004fd61abSAlexei Starovoitov 
82104fd61abSAlexei Starovoitov 	ufd = *(u32 *)value;
822d056a788SDaniel Borkmann 	new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
8232a36f0b9SWang Nan 	if (IS_ERR(new_ptr))
8242a36f0b9SWang Nan 		return PTR_ERR(new_ptr);
82504fd61abSAlexei Starovoitov 
826da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
827da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
8282a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, new_ptr);
829da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, new_ptr);
830da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
831da765a2fSDaniel Borkmann 	} else {
832da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, new_ptr);
833da765a2fSDaniel Borkmann 	}
834da765a2fSDaniel Borkmann 
8352a36f0b9SWang Nan 	if (old_ptr)
8362a36f0b9SWang Nan 		map->ops->map_fd_put_ptr(old_ptr);
83704fd61abSAlexei Starovoitov 	return 0;
83804fd61abSAlexei Starovoitov }
83904fd61abSAlexei Starovoitov 
8402a36f0b9SWang Nan static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
84104fd61abSAlexei Starovoitov {
84204fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
8432a36f0b9SWang Nan 	void *old_ptr;
84404fd61abSAlexei Starovoitov 	u32 index = *(u32 *)key;
84504fd61abSAlexei Starovoitov 
84604fd61abSAlexei Starovoitov 	if (index >= array->map.max_entries)
84704fd61abSAlexei Starovoitov 		return -E2BIG;
84804fd61abSAlexei Starovoitov 
849da765a2fSDaniel Borkmann 	if (map->ops->map_poke_run) {
850da765a2fSDaniel Borkmann 		mutex_lock(&array->aux->poke_mutex);
8512a36f0b9SWang Nan 		old_ptr = xchg(array->ptrs + index, NULL);
852da765a2fSDaniel Borkmann 		map->ops->map_poke_run(map, index, old_ptr, NULL);
853da765a2fSDaniel Borkmann 		mutex_unlock(&array->aux->poke_mutex);
854da765a2fSDaniel Borkmann 	} else {
855da765a2fSDaniel Borkmann 		old_ptr = xchg(array->ptrs + index, NULL);
856da765a2fSDaniel Borkmann 	}
857da765a2fSDaniel Borkmann 
8582a36f0b9SWang Nan 	if (old_ptr) {
8592a36f0b9SWang Nan 		map->ops->map_fd_put_ptr(old_ptr);
86004fd61abSAlexei Starovoitov 		return 0;
86104fd61abSAlexei Starovoitov 	} else {
86204fd61abSAlexei Starovoitov 		return -ENOENT;
86304fd61abSAlexei Starovoitov 	}
86404fd61abSAlexei Starovoitov }
86504fd61abSAlexei Starovoitov 
866d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map,
867d056a788SDaniel Borkmann 				   struct file *map_file, int fd)
8682a36f0b9SWang Nan {
8692a36f0b9SWang Nan 	struct bpf_prog *prog = bpf_prog_get(fd);
870d056a788SDaniel Borkmann 
8712a36f0b9SWang Nan 	if (IS_ERR(prog))
8722a36f0b9SWang Nan 		return prog;
8732a36f0b9SWang Nan 
874f45d5b6cSToke Hoiland-Jorgensen 	if (!bpf_prog_map_compatible(map, prog)) {
8752a36f0b9SWang Nan 		bpf_prog_put(prog);
8762a36f0b9SWang Nan 		return ERR_PTR(-EINVAL);
8772a36f0b9SWang Nan 	}
878d056a788SDaniel Borkmann 
8792a36f0b9SWang Nan 	return prog;
8802a36f0b9SWang Nan }
8812a36f0b9SWang Nan 
8822a36f0b9SWang Nan static void prog_fd_array_put_ptr(void *ptr)
8832a36f0b9SWang Nan {
8841aacde3dSDaniel Borkmann 	bpf_prog_put(ptr);
8852a36f0b9SWang Nan }
8862a36f0b9SWang Nan 
88714dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr)
88814dc6f04SMartin KaFai Lau {
88914dc6f04SMartin KaFai Lau 	return ((struct bpf_prog *)ptr)->aux->id;
89014dc6f04SMartin KaFai Lau }
89114dc6f04SMartin KaFai Lau 
89204fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */
893ba6b8de4SJohn Fastabend static void bpf_fd_array_map_clear(struct bpf_map *map)
89404fd61abSAlexei Starovoitov {
89504fd61abSAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
89604fd61abSAlexei Starovoitov 	int i;
89704fd61abSAlexei Starovoitov 
89804fd61abSAlexei Starovoitov 	for (i = 0; i < array->map.max_entries; i++)
8992a36f0b9SWang Nan 		fd_array_map_delete_elem(map, &i);
90004fd61abSAlexei Starovoitov }
90104fd61abSAlexei Starovoitov 
902a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
903a7c19db3SYonghong Song 					 struct seq_file *m)
904a7c19db3SYonghong Song {
905a7c19db3SYonghong Song 	void **elem, *ptr;
906a7c19db3SYonghong Song 	u32 prog_id;
907a7c19db3SYonghong Song 
908a7c19db3SYonghong Song 	rcu_read_lock();
909a7c19db3SYonghong Song 
910a7c19db3SYonghong Song 	elem = array_map_lookup_elem(map, key);
911a7c19db3SYonghong Song 	if (elem) {
912a7c19db3SYonghong Song 		ptr = READ_ONCE(*elem);
913a7c19db3SYonghong Song 		if (ptr) {
914a7c19db3SYonghong Song 			seq_printf(m, "%u: ", *(u32 *)key);
915a7c19db3SYonghong Song 			prog_id = prog_fd_array_sys_lookup_elem(ptr);
916a7c19db3SYonghong Song 			btf_type_seq_show(map->btf, map->btf_value_type_id,
917a7c19db3SYonghong Song 					  &prog_id, m);
918a7c19db3SYonghong Song 			seq_puts(m, "\n");
919a7c19db3SYonghong Song 		}
920a7c19db3SYonghong Song 	}
921a7c19db3SYonghong Song 
922a7c19db3SYonghong Song 	rcu_read_unlock();
923a7c19db3SYonghong Song }
924a7c19db3SYonghong Song 
925da765a2fSDaniel Borkmann struct prog_poke_elem {
926da765a2fSDaniel Borkmann 	struct list_head list;
927da765a2fSDaniel Borkmann 	struct bpf_prog_aux *aux;
928da765a2fSDaniel Borkmann };
929da765a2fSDaniel Borkmann 
930da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map,
931da765a2fSDaniel Borkmann 				     struct bpf_prog_aux *prog_aux)
932da765a2fSDaniel Borkmann {
933da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
934da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
935da765a2fSDaniel Borkmann 	int ret = 0;
936da765a2fSDaniel Borkmann 
937da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
938da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
939da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
940da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux)
941da765a2fSDaniel Borkmann 			goto out;
942da765a2fSDaniel Borkmann 	}
943da765a2fSDaniel Borkmann 
944da765a2fSDaniel Borkmann 	elem = kmalloc(sizeof(*elem), GFP_KERNEL);
945da765a2fSDaniel Borkmann 	if (!elem) {
946da765a2fSDaniel Borkmann 		ret = -ENOMEM;
947da765a2fSDaniel Borkmann 		goto out;
948da765a2fSDaniel Borkmann 	}
949da765a2fSDaniel Borkmann 
950da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&elem->list);
951da765a2fSDaniel Borkmann 	/* We must track the program's aux info at this point in time
952da765a2fSDaniel Borkmann 	 * since the program pointer itself may not be stable yet, see
953da765a2fSDaniel Borkmann 	 * also comment in prog_array_map_poke_run().
954da765a2fSDaniel Borkmann 	 */
955da765a2fSDaniel Borkmann 	elem->aux = prog_aux;
956da765a2fSDaniel Borkmann 
957da765a2fSDaniel Borkmann 	list_add_tail(&elem->list, &aux->poke_progs);
958da765a2fSDaniel Borkmann out:
959da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
960da765a2fSDaniel Borkmann 	return ret;
961da765a2fSDaniel Borkmann }
962da765a2fSDaniel Borkmann 
963da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map,
964da765a2fSDaniel Borkmann 					struct bpf_prog_aux *prog_aux)
965da765a2fSDaniel Borkmann {
966da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
967da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
968da765a2fSDaniel Borkmann 
969da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
970da765a2fSDaniel Borkmann 	mutex_lock(&aux->poke_mutex);
971da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
972da765a2fSDaniel Borkmann 		if (elem->aux == prog_aux) {
973da765a2fSDaniel Borkmann 			list_del_init(&elem->list);
974da765a2fSDaniel Borkmann 			kfree(elem);
975da765a2fSDaniel Borkmann 			break;
976da765a2fSDaniel Borkmann 		}
977da765a2fSDaniel Borkmann 	}
978da765a2fSDaniel Borkmann 	mutex_unlock(&aux->poke_mutex);
979da765a2fSDaniel Borkmann }
980da765a2fSDaniel Borkmann 
981da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
982da765a2fSDaniel Borkmann 				    struct bpf_prog *old,
983da765a2fSDaniel Borkmann 				    struct bpf_prog *new)
984da765a2fSDaniel Borkmann {
985ebf7d1f5SMaciej Fijalkowski 	u8 *old_addr, *new_addr, *old_bypass_addr;
986da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem;
987da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux;
988da765a2fSDaniel Borkmann 
989da765a2fSDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
990da765a2fSDaniel Borkmann 	WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
991da765a2fSDaniel Borkmann 
992da765a2fSDaniel Borkmann 	list_for_each_entry(elem, &aux->poke_progs, list) {
993da765a2fSDaniel Borkmann 		struct bpf_jit_poke_descriptor *poke;
994da765a2fSDaniel Borkmann 		int i, ret;
995da765a2fSDaniel Borkmann 
996da765a2fSDaniel Borkmann 		for (i = 0; i < elem->aux->size_poke_tab; i++) {
997da765a2fSDaniel Borkmann 			poke = &elem->aux->poke_tab[i];
998da765a2fSDaniel Borkmann 
999da765a2fSDaniel Borkmann 			/* Few things to be aware of:
1000da765a2fSDaniel Borkmann 			 *
1001da765a2fSDaniel Borkmann 			 * 1) We can only ever access aux in this context, but
1002da765a2fSDaniel Borkmann 			 *    not aux->prog since it might not be stable yet and
1003da765a2fSDaniel Borkmann 			 *    there could be danger of use after free otherwise.
1004da765a2fSDaniel Borkmann 			 * 2) Initially when we start tracking aux, the program
1005da765a2fSDaniel Borkmann 			 *    is not JITed yet and also does not have a kallsyms
1006cf71b174SMaciej Fijalkowski 			 *    entry. We skip these as poke->tailcall_target_stable
1007cf71b174SMaciej Fijalkowski 			 *    is not active yet. The JIT will do the final fixup
1008cf71b174SMaciej Fijalkowski 			 *    before setting it stable. The various
1009cf71b174SMaciej Fijalkowski 			 *    poke->tailcall_target_stable are successively
1010cf71b174SMaciej Fijalkowski 			 *    activated, so tail call updates can arrive from here
1011cf71b174SMaciej Fijalkowski 			 *    while JIT is still finishing its final fixup for
1012cf71b174SMaciej Fijalkowski 			 *    non-activated poke entries.
1013da765a2fSDaniel Borkmann 			 * 3) On program teardown, the program's kallsym entry gets
1014da765a2fSDaniel Borkmann 			 *    removed out of RCU callback, but we can only untrack
1015da765a2fSDaniel Borkmann 			 *    from sleepable context, therefore bpf_arch_text_poke()
1016da765a2fSDaniel Borkmann 			 *    might not see that this is in BPF text section and
1017da765a2fSDaniel Borkmann 			 *    bails out with -EINVAL. As these are unreachable since
1018da765a2fSDaniel Borkmann 			 *    RCU grace period already passed, we simply skip them.
1019da765a2fSDaniel Borkmann 			 * 4) Also programs reaching refcount of zero while patching
1020da765a2fSDaniel Borkmann 			 *    is in progress is okay since we're protected under
1021da765a2fSDaniel Borkmann 			 *    poke_mutex and untrack the programs before the JIT
1022da765a2fSDaniel Borkmann 			 *    buffer is freed. When we're still in the middle of
1023da765a2fSDaniel Borkmann 			 *    patching and suddenly kallsyms entry of the program
1024da765a2fSDaniel Borkmann 			 *    gets evicted, we just skip the rest which is fine due
1025da765a2fSDaniel Borkmann 			 *    to point 3).
1026da765a2fSDaniel Borkmann 			 * 5) Any other error happening below from bpf_arch_text_poke()
1027da765a2fSDaniel Borkmann 			 *    is a unexpected bug.
1028da765a2fSDaniel Borkmann 			 */
1029cf71b174SMaciej Fijalkowski 			if (!READ_ONCE(poke->tailcall_target_stable))
1030da765a2fSDaniel Borkmann 				continue;
1031da765a2fSDaniel Borkmann 			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1032da765a2fSDaniel Borkmann 				continue;
1033da765a2fSDaniel Borkmann 			if (poke->tail_call.map != map ||
1034da765a2fSDaniel Borkmann 			    poke->tail_call.key != key)
1035da765a2fSDaniel Borkmann 				continue;
1036da765a2fSDaniel Borkmann 
1037ebf7d1f5SMaciej Fijalkowski 			old_bypass_addr = old ? NULL : poke->bypass_addr;
1038ebf7d1f5SMaciej Fijalkowski 			old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1039ebf7d1f5SMaciej Fijalkowski 			new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1040ebf7d1f5SMaciej Fijalkowski 
1041ebf7d1f5SMaciej Fijalkowski 			if (new) {
1042ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_target,
1043ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
1044ebf7d1f5SMaciej Fijalkowski 							 old_addr, new_addr);
1045da765a2fSDaniel Borkmann 				BUG_ON(ret < 0 && ret != -EINVAL);
1046ebf7d1f5SMaciej Fijalkowski 				if (!old) {
1047ebf7d1f5SMaciej Fijalkowski 					ret = bpf_arch_text_poke(poke->tailcall_bypass,
1048ebf7d1f5SMaciej Fijalkowski 								 BPF_MOD_JUMP,
1049ebf7d1f5SMaciej Fijalkowski 								 poke->bypass_addr,
1050ebf7d1f5SMaciej Fijalkowski 								 NULL);
1051ebf7d1f5SMaciej Fijalkowski 					BUG_ON(ret < 0 && ret != -EINVAL);
1052ebf7d1f5SMaciej Fijalkowski 				}
1053ebf7d1f5SMaciej Fijalkowski 			} else {
1054ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_bypass,
1055ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
1056ebf7d1f5SMaciej Fijalkowski 							 old_bypass_addr,
1057ebf7d1f5SMaciej Fijalkowski 							 poke->bypass_addr);
1058ebf7d1f5SMaciej Fijalkowski 				BUG_ON(ret < 0 && ret != -EINVAL);
1059ebf7d1f5SMaciej Fijalkowski 				/* let other CPUs finish the execution of program
1060ebf7d1f5SMaciej Fijalkowski 				 * so that it will not possible to expose them
1061ebf7d1f5SMaciej Fijalkowski 				 * to invalid nop, stack unwind, nop state
1062ebf7d1f5SMaciej Fijalkowski 				 */
1063ebf7d1f5SMaciej Fijalkowski 				if (!ret)
1064ebf7d1f5SMaciej Fijalkowski 					synchronize_rcu();
1065ebf7d1f5SMaciej Fijalkowski 				ret = bpf_arch_text_poke(poke->tailcall_target,
1066ebf7d1f5SMaciej Fijalkowski 							 BPF_MOD_JUMP,
1067ebf7d1f5SMaciej Fijalkowski 							 old_addr, NULL);
1068ebf7d1f5SMaciej Fijalkowski 				BUG_ON(ret < 0 && ret != -EINVAL);
1069ebf7d1f5SMaciej Fijalkowski 			}
1070da765a2fSDaniel Borkmann 		}
1071da765a2fSDaniel Borkmann 	}
1072da765a2fSDaniel Borkmann }
1073da765a2fSDaniel Borkmann 
1074da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work)
1075da765a2fSDaniel Borkmann {
1076da765a2fSDaniel Borkmann 	struct bpf_map *map = container_of(work, struct bpf_array_aux,
1077da765a2fSDaniel Borkmann 					   work)->map;
1078da765a2fSDaniel Borkmann 	bpf_fd_array_map_clear(map);
1079da765a2fSDaniel Borkmann 	bpf_map_put(map);
1080da765a2fSDaniel Borkmann }
1081da765a2fSDaniel Borkmann 
1082da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map)
1083da765a2fSDaniel Borkmann {
1084da765a2fSDaniel Borkmann 	struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1085da765a2fSDaniel Borkmann 						 map)->aux;
1086da765a2fSDaniel Borkmann 	bpf_map_inc(map);
1087da765a2fSDaniel Borkmann 	schedule_work(&aux->work);
1088da765a2fSDaniel Borkmann }
1089da765a2fSDaniel Borkmann 
10902beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
10912beee5f5SDaniel Borkmann {
10922beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
10932beee5f5SDaniel Borkmann 	struct bpf_map *map;
10942beee5f5SDaniel Borkmann 
10956d192c79SRoman Gushchin 	aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
10962beee5f5SDaniel Borkmann 	if (!aux)
10972beee5f5SDaniel Borkmann 		return ERR_PTR(-ENOMEM);
10982beee5f5SDaniel Borkmann 
1099da765a2fSDaniel Borkmann 	INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1100da765a2fSDaniel Borkmann 	INIT_LIST_HEAD(&aux->poke_progs);
1101da765a2fSDaniel Borkmann 	mutex_init(&aux->poke_mutex);
1102da765a2fSDaniel Borkmann 
11032beee5f5SDaniel Borkmann 	map = array_map_alloc(attr);
11042beee5f5SDaniel Borkmann 	if (IS_ERR(map)) {
11052beee5f5SDaniel Borkmann 		kfree(aux);
11062beee5f5SDaniel Borkmann 		return map;
11072beee5f5SDaniel Borkmann 	}
11082beee5f5SDaniel Borkmann 
11092beee5f5SDaniel Borkmann 	container_of(map, struct bpf_array, map)->aux = aux;
1110da765a2fSDaniel Borkmann 	aux->map = map;
1111da765a2fSDaniel Borkmann 
11122beee5f5SDaniel Borkmann 	return map;
11132beee5f5SDaniel Borkmann }
11142beee5f5SDaniel Borkmann 
11152beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map)
11162beee5f5SDaniel Borkmann {
1117da765a2fSDaniel Borkmann 	struct prog_poke_elem *elem, *tmp;
11182beee5f5SDaniel Borkmann 	struct bpf_array_aux *aux;
11192beee5f5SDaniel Borkmann 
11202beee5f5SDaniel Borkmann 	aux = container_of(map, struct bpf_array, map)->aux;
1121da765a2fSDaniel Borkmann 	list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1122da765a2fSDaniel Borkmann 		list_del_init(&elem->list);
1123da765a2fSDaniel Borkmann 		kfree(elem);
1124da765a2fSDaniel Borkmann 	}
11252beee5f5SDaniel Borkmann 	kfree(aux);
11262beee5f5SDaniel Borkmann 	fd_array_map_free(map);
11272beee5f5SDaniel Borkmann }
11282beee5f5SDaniel Borkmann 
1129f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding.
1130f4d05259SMartin KaFai Lau  * Doing static check alone in the verifier is not enough.
1131f4d05259SMartin KaFai Lau  * Thus, prog_array_map cannot be used as an inner_map
1132f4d05259SMartin KaFai Lau  * and map_meta_equal is not implemented.
1133f4d05259SMartin KaFai Lau  */
113440077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = {
1135ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
11362beee5f5SDaniel Borkmann 	.map_alloc = prog_array_map_alloc,
11372beee5f5SDaniel Borkmann 	.map_free = prog_array_map_free,
1138da765a2fSDaniel Borkmann 	.map_poke_track = prog_array_map_poke_track,
1139da765a2fSDaniel Borkmann 	.map_poke_untrack = prog_array_map_poke_untrack,
1140da765a2fSDaniel Borkmann 	.map_poke_run = prog_array_map_poke_run,
114104fd61abSAlexei Starovoitov 	.map_get_next_key = array_map_get_next_key,
11422a36f0b9SWang Nan 	.map_lookup_elem = fd_array_map_lookup_elem,
11432a36f0b9SWang Nan 	.map_delete_elem = fd_array_map_delete_elem,
11442a36f0b9SWang Nan 	.map_fd_get_ptr = prog_fd_array_get_ptr,
11452a36f0b9SWang Nan 	.map_fd_put_ptr = prog_fd_array_put_ptr,
114614dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1147da765a2fSDaniel Borkmann 	.map_release_uref = prog_array_map_clear,
1148a7c19db3SYonghong Song 	.map_seq_show_elem = prog_array_map_seq_show_elem,
1149c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
115004fd61abSAlexei Starovoitov };
115104fd61abSAlexei Starovoitov 
11523b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
11533b1efb19SDaniel Borkmann 						   struct file *map_file)
1154ea317b26SKaixu Xia {
11553b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
11563b1efb19SDaniel Borkmann 
1157858d68f1SDaniel Borkmann 	ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
11583b1efb19SDaniel Borkmann 	if (ee) {
11593b1efb19SDaniel Borkmann 		ee->event = perf_file->private_data;
11603b1efb19SDaniel Borkmann 		ee->perf_file = perf_file;
11613b1efb19SDaniel Borkmann 		ee->map_file = map_file;
11623b1efb19SDaniel Borkmann 	}
11633b1efb19SDaniel Borkmann 
11643b1efb19SDaniel Borkmann 	return ee;
11653b1efb19SDaniel Borkmann }
11663b1efb19SDaniel Borkmann 
11673b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu)
11683b1efb19SDaniel Borkmann {
11693b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
11703b1efb19SDaniel Borkmann 
11713b1efb19SDaniel Borkmann 	ee = container_of(rcu, struct bpf_event_entry, rcu);
11723b1efb19SDaniel Borkmann 	fput(ee->perf_file);
11733b1efb19SDaniel Borkmann 	kfree(ee);
11743b1efb19SDaniel Borkmann }
11753b1efb19SDaniel Borkmann 
11763b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
11773b1efb19SDaniel Borkmann {
11783b1efb19SDaniel Borkmann 	call_rcu(&ee->rcu, __bpf_event_entry_free);
1179ea317b26SKaixu Xia }
1180ea317b26SKaixu Xia 
1181d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1182d056a788SDaniel Borkmann 					 struct file *map_file, int fd)
1183ea317b26SKaixu Xia {
11843b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
11853b1efb19SDaniel Borkmann 	struct perf_event *event;
11863b1efb19SDaniel Borkmann 	struct file *perf_file;
1187f91840a3SAlexei Starovoitov 	u64 value;
1188ea317b26SKaixu Xia 
11893b1efb19SDaniel Borkmann 	perf_file = perf_event_get(fd);
11903b1efb19SDaniel Borkmann 	if (IS_ERR(perf_file))
11913b1efb19SDaniel Borkmann 		return perf_file;
1192e03e7ee3SAlexei Starovoitov 
1193f91840a3SAlexei Starovoitov 	ee = ERR_PTR(-EOPNOTSUPP);
11943b1efb19SDaniel Borkmann 	event = perf_file->private_data;
119597562633SYonghong Song 	if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
11963b1efb19SDaniel Borkmann 		goto err_out;
1197ea317b26SKaixu Xia 
11983b1efb19SDaniel Borkmann 	ee = bpf_event_entry_gen(perf_file, map_file);
11993b1efb19SDaniel Borkmann 	if (ee)
12003b1efb19SDaniel Borkmann 		return ee;
12013b1efb19SDaniel Borkmann 	ee = ERR_PTR(-ENOMEM);
12023b1efb19SDaniel Borkmann err_out:
12033b1efb19SDaniel Borkmann 	fput(perf_file);
12043b1efb19SDaniel Borkmann 	return ee;
1205ea317b26SKaixu Xia }
1206ea317b26SKaixu Xia 
1207ea317b26SKaixu Xia static void perf_event_fd_array_put_ptr(void *ptr)
1208ea317b26SKaixu Xia {
12093b1efb19SDaniel Borkmann 	bpf_event_entry_free_rcu(ptr);
12103b1efb19SDaniel Borkmann }
12113b1efb19SDaniel Borkmann 
12123b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map,
12133b1efb19SDaniel Borkmann 					struct file *map_file)
12143b1efb19SDaniel Borkmann {
12153b1efb19SDaniel Borkmann 	struct bpf_array *array = container_of(map, struct bpf_array, map);
12163b1efb19SDaniel Borkmann 	struct bpf_event_entry *ee;
12173b1efb19SDaniel Borkmann 	int i;
12183b1efb19SDaniel Borkmann 
1219792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1220792cacccSSong Liu 		return;
1221792cacccSSong Liu 
12223b1efb19SDaniel Borkmann 	rcu_read_lock();
12233b1efb19SDaniel Borkmann 	for (i = 0; i < array->map.max_entries; i++) {
12243b1efb19SDaniel Borkmann 		ee = READ_ONCE(array->ptrs[i]);
12253b1efb19SDaniel Borkmann 		if (ee && ee->map_file == map_file)
12263b1efb19SDaniel Borkmann 			fd_array_map_delete_elem(map, &i);
12273b1efb19SDaniel Borkmann 	}
12283b1efb19SDaniel Borkmann 	rcu_read_unlock();
1229ea317b26SKaixu Xia }
1230ea317b26SKaixu Xia 
1231792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map)
1232792cacccSSong Liu {
1233792cacccSSong Liu 	if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1234792cacccSSong Liu 		bpf_fd_array_map_clear(map);
1235792cacccSSong Liu 	fd_array_map_free(map);
1236792cacccSSong Liu }
1237792cacccSSong Liu 
123840077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = {
1239f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1240ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1241ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
1242792cacccSSong Liu 	.map_free = perf_event_fd_array_map_free,
1243ea317b26SKaixu Xia 	.map_get_next_key = array_map_get_next_key,
1244ea317b26SKaixu Xia 	.map_lookup_elem = fd_array_map_lookup_elem,
1245ea317b26SKaixu Xia 	.map_delete_elem = fd_array_map_delete_elem,
1246ea317b26SKaixu Xia 	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
1247ea317b26SKaixu Xia 	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
12483b1efb19SDaniel Borkmann 	.map_release = perf_event_fd_array_release,
1249e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
1250c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
1251ea317b26SKaixu Xia };
1252ea317b26SKaixu Xia 
125360d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS
12544ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
12554ed8ec52SMartin KaFai Lau 				     struct file *map_file /* not used */,
12564ed8ec52SMartin KaFai Lau 				     int fd)
12574ed8ec52SMartin KaFai Lau {
12584ed8ec52SMartin KaFai Lau 	return cgroup_get_from_fd(fd);
12594ed8ec52SMartin KaFai Lau }
12604ed8ec52SMartin KaFai Lau 
12614ed8ec52SMartin KaFai Lau static void cgroup_fd_array_put_ptr(void *ptr)
12624ed8ec52SMartin KaFai Lau {
12634ed8ec52SMartin KaFai Lau 	/* cgroup_put free cgrp after a rcu grace period */
12644ed8ec52SMartin KaFai Lau 	cgroup_put(ptr);
12654ed8ec52SMartin KaFai Lau }
12664ed8ec52SMartin KaFai Lau 
12674ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map)
12684ed8ec52SMartin KaFai Lau {
12694ed8ec52SMartin KaFai Lau 	bpf_fd_array_map_clear(map);
12704ed8ec52SMartin KaFai Lau 	fd_array_map_free(map);
12714ed8ec52SMartin KaFai Lau }
12724ed8ec52SMartin KaFai Lau 
127340077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = {
1274f4d05259SMartin KaFai Lau 	.map_meta_equal = bpf_map_meta_equal,
1275ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
1276ad46061fSJakub Kicinski 	.map_alloc = array_map_alloc,
12774ed8ec52SMartin KaFai Lau 	.map_free = cgroup_fd_array_free,
12784ed8ec52SMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
12794ed8ec52SMartin KaFai Lau 	.map_lookup_elem = fd_array_map_lookup_elem,
12804ed8ec52SMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
12814ed8ec52SMartin KaFai Lau 	.map_fd_get_ptr = cgroup_fd_array_get_ptr,
12824ed8ec52SMartin KaFai Lau 	.map_fd_put_ptr = cgroup_fd_array_put_ptr,
1283e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
1284c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
12854ed8ec52SMartin KaFai Lau };
12864ed8ec52SMartin KaFai Lau #endif
128756f668dfSMartin KaFai Lau 
128856f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
128956f668dfSMartin KaFai Lau {
129056f668dfSMartin KaFai Lau 	struct bpf_map *map, *inner_map_meta;
129156f668dfSMartin KaFai Lau 
129256f668dfSMartin KaFai Lau 	inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
129356f668dfSMartin KaFai Lau 	if (IS_ERR(inner_map_meta))
129456f668dfSMartin KaFai Lau 		return inner_map_meta;
129556f668dfSMartin KaFai Lau 
1296ad46061fSJakub Kicinski 	map = array_map_alloc(attr);
129756f668dfSMartin KaFai Lau 	if (IS_ERR(map)) {
129856f668dfSMartin KaFai Lau 		bpf_map_meta_free(inner_map_meta);
129956f668dfSMartin KaFai Lau 		return map;
130056f668dfSMartin KaFai Lau 	}
130156f668dfSMartin KaFai Lau 
130256f668dfSMartin KaFai Lau 	map->inner_map_meta = inner_map_meta;
130356f668dfSMartin KaFai Lau 
130456f668dfSMartin KaFai Lau 	return map;
130556f668dfSMartin KaFai Lau }
130656f668dfSMartin KaFai Lau 
130756f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map)
130856f668dfSMartin KaFai Lau {
130956f668dfSMartin KaFai Lau 	/* map->inner_map_meta is only accessed by syscall which
131056f668dfSMartin KaFai Lau 	 * is protected by fdget/fdput.
131156f668dfSMartin KaFai Lau 	 */
131256f668dfSMartin KaFai Lau 	bpf_map_meta_free(map->inner_map_meta);
131356f668dfSMartin KaFai Lau 	bpf_fd_array_map_clear(map);
131456f668dfSMartin KaFai Lau 	fd_array_map_free(map);
131556f668dfSMartin KaFai Lau }
131656f668dfSMartin KaFai Lau 
131756f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
131856f668dfSMartin KaFai Lau {
131956f668dfSMartin KaFai Lau 	struct bpf_map **inner_map = array_map_lookup_elem(map, key);
132056f668dfSMartin KaFai Lau 
132156f668dfSMartin KaFai Lau 	if (!inner_map)
132256f668dfSMartin KaFai Lau 		return NULL;
132356f668dfSMartin KaFai Lau 
132456f668dfSMartin KaFai Lau 	return READ_ONCE(*inner_map);
132556f668dfSMartin KaFai Lau }
132656f668dfSMartin KaFai Lau 
13274a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map,
13287b0c2a05SDaniel Borkmann 				   struct bpf_insn *insn_buf)
13297b0c2a05SDaniel Borkmann {
1330b2157399SAlexei Starovoitov 	struct bpf_array *array = container_of(map, struct bpf_array, map);
1331*d937bc34SAndrii Nakryiko 	u32 elem_size = array->elem_size;
13327b0c2a05SDaniel Borkmann 	struct bpf_insn *insn = insn_buf;
13337b0c2a05SDaniel Borkmann 	const int ret = BPF_REG_0;
13347b0c2a05SDaniel Borkmann 	const int map_ptr = BPF_REG_1;
13357b0c2a05SDaniel Borkmann 	const int index = BPF_REG_2;
13367b0c2a05SDaniel Borkmann 
13377b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
13387b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
13392c78ee89SAlexei Starovoitov 	if (!map->bypass_spec_v1) {
1340b2157399SAlexei Starovoitov 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1341b2157399SAlexei Starovoitov 		*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1342b2157399SAlexei Starovoitov 	} else {
13437b0c2a05SDaniel Borkmann 		*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1344b2157399SAlexei Starovoitov 	}
13457b0c2a05SDaniel Borkmann 	if (is_power_of_2(elem_size))
13467b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
13477b0c2a05SDaniel Borkmann 	else
13487b0c2a05SDaniel Borkmann 		*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
13497b0c2a05SDaniel Borkmann 	*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
13507b0c2a05SDaniel Borkmann 	*insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
13517b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
13527b0c2a05SDaniel Borkmann 	*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
13537b0c2a05SDaniel Borkmann 	*insn++ = BPF_MOV64_IMM(ret, 0);
13547b0c2a05SDaniel Borkmann 
13557b0c2a05SDaniel Borkmann 	return insn - insn_buf;
13567b0c2a05SDaniel Borkmann }
13577b0c2a05SDaniel Borkmann 
135840077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = {
1359ad46061fSJakub Kicinski 	.map_alloc_check = fd_array_map_alloc_check,
136056f668dfSMartin KaFai Lau 	.map_alloc = array_of_map_alloc,
136156f668dfSMartin KaFai Lau 	.map_free = array_of_map_free,
136256f668dfSMartin KaFai Lau 	.map_get_next_key = array_map_get_next_key,
136356f668dfSMartin KaFai Lau 	.map_lookup_elem = array_of_map_lookup_elem,
136456f668dfSMartin KaFai Lau 	.map_delete_elem = fd_array_map_delete_elem,
136556f668dfSMartin KaFai Lau 	.map_fd_get_ptr = bpf_map_fd_get_ptr,
136656f668dfSMartin KaFai Lau 	.map_fd_put_ptr = bpf_map_fd_put_ptr,
136714dc6f04SMartin KaFai Lau 	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
13687b0c2a05SDaniel Borkmann 	.map_gen_lookup = array_of_map_gen_lookup,
13699263dddcSTakshak Chahande 	.map_lookup_batch = generic_map_lookup_batch,
13709263dddcSTakshak Chahande 	.map_update_batch = generic_map_update_batch,
1371e8d2bec0SDaniel Borkmann 	.map_check_btf = map_check_no_btf,
1372c317ab71SMenglong Dong 	.map_btf_id = &array_map_btf_ids[0],
137356f668dfSMartin KaFai Lau };
1374