15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook
428fbcfa0SAlexei Starovoitov */
528fbcfa0SAlexei Starovoitov #include <linux/bpf.h>
6a26ca7c9SMartin KaFai Lau #include <linux/btf.h>
728fbcfa0SAlexei Starovoitov #include <linux/err.h>
828fbcfa0SAlexei Starovoitov #include <linux/slab.h>
928fbcfa0SAlexei Starovoitov #include <linux/mm.h>
1004fd61abSAlexei Starovoitov #include <linux/filter.h>
110cdf5640SDaniel Borkmann #include <linux/perf_event.h>
12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h>
131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h>
14c317ab71SMenglong Dong #include <linux/btf_ids.h>
1528fbcfa0SAlexei Starovoitov
1656f668dfSMartin KaFai Lau #include "map_in_map.h"
1756f668dfSMartin KaFai Lau
186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \
19792cacccSSong Liu (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
204a8f87e6SDaniel Borkmann BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
216e71b04aSChenbo Feng
bpf_array_free_percpu(struct bpf_array * array)22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array)
23a10423b8SAlexei Starovoitov {
24a10423b8SAlexei Starovoitov int i;
25a10423b8SAlexei Starovoitov
2632fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) {
27a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]);
2832fff239SEric Dumazet cond_resched();
2932fff239SEric Dumazet }
30a10423b8SAlexei Starovoitov }
31a10423b8SAlexei Starovoitov
bpf_array_alloc_percpu(struct bpf_array * array)32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array)
33a10423b8SAlexei Starovoitov {
34a10423b8SAlexei Starovoitov void __percpu *ptr;
35a10423b8SAlexei Starovoitov int i;
36a10423b8SAlexei Starovoitov
37a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) {
386d192c79SRoman Gushchin ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN);
40a10423b8SAlexei Starovoitov if (!ptr) {
41a10423b8SAlexei Starovoitov bpf_array_free_percpu(array);
42a10423b8SAlexei Starovoitov return -ENOMEM;
43a10423b8SAlexei Starovoitov }
44a10423b8SAlexei Starovoitov array->pptrs[i] = ptr;
4532fff239SEric Dumazet cond_resched();
46a10423b8SAlexei Starovoitov }
47a10423b8SAlexei Starovoitov
48a10423b8SAlexei Starovoitov return 0;
49a10423b8SAlexei Starovoitov }
50a10423b8SAlexei Starovoitov
5128fbcfa0SAlexei Starovoitov /* Called from syscall */
array_map_alloc_check(union bpf_attr * attr)525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr)
53ad46061fSJakub Kicinski {
54ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr);
56ad46061fSJakub Kicinski
57ad46061fSJakub Kicinski /* check sanity of attributes */
58ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 ||
59ad46061fSJakub Kicinski attr->value_size == 0 ||
60ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) ||
62ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE))
63ad46061fSJakub Kicinski return -EINVAL;
64ad46061fSJakub Kicinski
65fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
664a8f87e6SDaniel Borkmann attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
67fc970227SAndrii Nakryiko return -EINVAL;
68fc970227SAndrii Nakryiko
69792cacccSSong Liu if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70792cacccSSong Liu attr->map_flags & BPF_F_PRESERVE_ELEMS)
71792cacccSSong Liu return -EINVAL;
72792cacccSSong Liu
7363b8ce77SAndrii Nakryiko /* avoid overflow on round_up(map->value_size) */
7463b8ce77SAndrii Nakryiko if (attr->value_size > INT_MAX)
75ad46061fSJakub Kicinski return -E2BIG;
761d244784STao Chen /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
771d244784STao Chen if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
781d244784STao Chen return -E2BIG;
79ad46061fSJakub Kicinski
80ad46061fSJakub Kicinski return 0;
81ad46061fSJakub Kicinski }
82ad46061fSJakub Kicinski
array_map_alloc(union bpf_attr * attr)8328fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr)
8428fbcfa0SAlexei Starovoitov {
85a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
861bc59756SRoman Gushchin int numa_node = bpf_map_attr_numa_node(attr);
87b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries;
88d79a3549SAndrii Nakryiko bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
891bc59756SRoman Gushchin u64 array_size, mask64;
9028fbcfa0SAlexei Starovoitov struct bpf_array *array;
9128fbcfa0SAlexei Starovoitov
9228fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8);
9328fbcfa0SAlexei Starovoitov
94b2157399SAlexei Starovoitov max_entries = attr->max_entries;
95b2157399SAlexei Starovoitov
96bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has
97bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to
98bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space.
99bbeb6e43SDaniel Borkmann */
100bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1);
101bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64;
102bbeb6e43SDaniel Borkmann mask64 -= 1;
103bbeb6e43SDaniel Borkmann
104bbeb6e43SDaniel Borkmann index_mask = mask64;
1052c78ee89SAlexei Starovoitov if (!bypass_spec_v1) {
106b2157399SAlexei Starovoitov /* round up array size to nearest power of 2,
107b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits
108b2157399SAlexei Starovoitov */
109b2157399SAlexei Starovoitov max_entries = index_mask + 1;
110bbeb6e43SDaniel Borkmann /* Check for overflows. */
111bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries)
112bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG);
113bbeb6e43SDaniel Borkmann }
114b2157399SAlexei Starovoitov
115a10423b8SAlexei Starovoitov array_size = sizeof(*array);
116fc970227SAndrii Nakryiko if (percpu) {
117b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *);
118fc970227SAndrii Nakryiko } else {
119fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and
120fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned
121fc970227SAndrii Nakryiko */
122fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) {
123fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size);
124fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size);
125fc970227SAndrii Nakryiko } else {
126b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size;
127fc970227SAndrii Nakryiko }
128fc970227SAndrii Nakryiko }
129a10423b8SAlexei Starovoitov
13028fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */
131fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) {
132fc970227SAndrii Nakryiko void *data;
133fc970227SAndrii Nakryiko
134fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node);
1361bc59756SRoman Gushchin if (!data)
137fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM);
138fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array))
139fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value);
140fc970227SAndrii Nakryiko } else {
14196eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node);
142fc970227SAndrii Nakryiko }
1431bc59756SRoman Gushchin if (!array)
14428fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM);
145b2157399SAlexei Starovoitov array->index_mask = index_mask;
1462c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1;
14728fbcfa0SAlexei Starovoitov
14828fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */
14932852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr);
15028fbcfa0SAlexei Starovoitov array->elem_size = elem_size;
15128fbcfa0SAlexei Starovoitov
1529c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) {
153d407bd25SDaniel Borkmann bpf_map_area_free(array);
154a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM);
155a10423b8SAlexei Starovoitov }
156a10423b8SAlexei Starovoitov
15728fbcfa0SAlexei Starovoitov return &array->map;
15828fbcfa0SAlexei Starovoitov }
15928fbcfa0SAlexei Starovoitov
array_map_elem_ptr(struct bpf_array * array,u32 index)16087ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
16187ac0d60SAndrii Nakryiko {
16287ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * index;
16387ac0d60SAndrii Nakryiko }
16487ac0d60SAndrii Nakryiko
16528fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_lookup_elem(struct bpf_map * map,void * key)16628fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key)
16728fbcfa0SAlexei Starovoitov {
16828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
16928fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key;
17028fbcfa0SAlexei Starovoitov
171a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
17228fbcfa0SAlexei Starovoitov return NULL;
17328fbcfa0SAlexei Starovoitov
17487ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * (index & array->index_mask);
17528fbcfa0SAlexei Starovoitov }
17628fbcfa0SAlexei Starovoitov
array_map_direct_value_addr(const struct bpf_map * map,u64 * imm,u32 off)177d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
178d8eca5bbSDaniel Borkmann u32 off)
179d8eca5bbSDaniel Borkmann {
180d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
181d8eca5bbSDaniel Borkmann
182d8eca5bbSDaniel Borkmann if (map->max_entries != 1)
183d8eca5bbSDaniel Borkmann return -ENOTSUPP;
184d8eca5bbSDaniel Borkmann if (off >= map->value_size)
185d8eca5bbSDaniel Borkmann return -EINVAL;
186d8eca5bbSDaniel Borkmann
187d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value;
188d8eca5bbSDaniel Borkmann return 0;
189d8eca5bbSDaniel Borkmann }
190d8eca5bbSDaniel Borkmann
array_map_direct_value_meta(const struct bpf_map * map,u64 imm,u32 * off)191d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
192d8eca5bbSDaniel Borkmann u32 *off)
193d8eca5bbSDaniel Borkmann {
194d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
195d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value;
196d8eca5bbSDaniel Borkmann u64 range = array->elem_size;
197d8eca5bbSDaniel Borkmann
198d8eca5bbSDaniel Borkmann if (map->max_entries != 1)
199d8eca5bbSDaniel Borkmann return -ENOTSUPP;
200d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range)
201d8eca5bbSDaniel Borkmann return -ENOENT;
202d8eca5bbSDaniel Borkmann
203d8eca5bbSDaniel Borkmann *off = imm - base;
204d8eca5bbSDaniel Borkmann return 0;
205d8eca5bbSDaniel Borkmann }
206d8eca5bbSDaniel Borkmann
20781ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)2084a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
20981ed18abSAlexei Starovoitov {
210b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
21181ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf;
212d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size;
21381ed18abSAlexei Starovoitov const int ret = BPF_REG_0;
21481ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1;
21581ed18abSAlexei Starovoitov const int index = BPF_REG_2;
21681ed18abSAlexei Starovoitov
2174a8f87e6SDaniel Borkmann if (map->map_flags & BPF_F_INNER_MAP)
2184a8f87e6SDaniel Borkmann return -EOPNOTSUPP;
2194a8f87e6SDaniel Borkmann
22081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
22181ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2222c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) {
223b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
225b2157399SAlexei Starovoitov } else {
226fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
227b2157399SAlexei Starovoitov }
228fad73a1aSMartin KaFai Lau
229fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) {
23081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
23181ed18abSAlexei Starovoitov } else {
23281ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
23381ed18abSAlexei Starovoitov }
23481ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
23581ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
23681ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0);
23781ed18abSAlexei Starovoitov return insn - insn_buf;
23881ed18abSAlexei Starovoitov }
23981ed18abSAlexei Starovoitov
240a10423b8SAlexei Starovoitov /* Called from eBPF program */
percpu_array_map_lookup_elem(struct bpf_map * map,void * key)241a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
242a10423b8SAlexei Starovoitov {
243a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
244a10423b8SAlexei Starovoitov u32 index = *(u32 *)key;
245a10423b8SAlexei Starovoitov
246a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
247a10423b8SAlexei Starovoitov return NULL;
248a10423b8SAlexei Starovoitov
249b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]);
250a10423b8SAlexei Starovoitov }
251a10423b8SAlexei Starovoitov
252db69718bSAndrii Nakryiko /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
percpu_array_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)253db69718bSAndrii Nakryiko static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
254db69718bSAndrii Nakryiko {
255db69718bSAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
256db69718bSAndrii Nakryiko struct bpf_insn *insn = insn_buf;
257db69718bSAndrii Nakryiko
258db69718bSAndrii Nakryiko if (!bpf_jit_supports_percpu_insn())
259db69718bSAndrii Nakryiko return -EOPNOTSUPP;
260db69718bSAndrii Nakryiko
261db69718bSAndrii Nakryiko if (map->map_flags & BPF_F_INNER_MAP)
262db69718bSAndrii Nakryiko return -EOPNOTSUPP;
263db69718bSAndrii Nakryiko
264db69718bSAndrii Nakryiko BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
265db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
266db69718bSAndrii Nakryiko
267db69718bSAndrii Nakryiko *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
268db69718bSAndrii Nakryiko if (!map->bypass_spec_v1) {
269db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
270db69718bSAndrii Nakryiko *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
271db69718bSAndrii Nakryiko } else {
272db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
273db69718bSAndrii Nakryiko }
274db69718bSAndrii Nakryiko
275db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
276db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
277db69718bSAndrii Nakryiko *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
278db69718bSAndrii Nakryiko *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
279db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
280db69718bSAndrii Nakryiko *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
281db69718bSAndrii Nakryiko return insn - insn_buf;
282db69718bSAndrii Nakryiko }
283db69718bSAndrii Nakryiko
percpu_array_map_lookup_percpu_elem(struct bpf_map * map,void * key,u32 cpu)28407343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
28507343110SFeng Zhou {
28607343110SFeng Zhou struct bpf_array *array = container_of(map, struct bpf_array, map);
28707343110SFeng Zhou u32 index = *(u32 *)key;
28807343110SFeng Zhou
28907343110SFeng Zhou if (cpu >= nr_cpu_ids)
29007343110SFeng Zhou return NULL;
29107343110SFeng Zhou
29207343110SFeng Zhou if (unlikely(index >= array->map.max_entries))
29307343110SFeng Zhou return NULL;
29407343110SFeng Zhou
29507343110SFeng Zhou return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
29607343110SFeng Zhou }
29707343110SFeng Zhou
bpf_percpu_array_copy(struct bpf_map * map,void * key,void * value)29815a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
29915a07b33SAlexei Starovoitov {
30015a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
30115a07b33SAlexei Starovoitov u32 index = *(u32 *)key;
30215a07b33SAlexei Starovoitov void __percpu *pptr;
30315a07b33SAlexei Starovoitov int cpu, off = 0;
30415a07b33SAlexei Starovoitov u32 size;
30515a07b33SAlexei Starovoitov
30615a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
30715a07b33SAlexei Starovoitov return -ENOENT;
30815a07b33SAlexei Starovoitov
30915a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only
31015a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas
31115a07b33SAlexei Starovoitov * will not leak any kernel data
31215a07b33SAlexei Starovoitov */
313d937bc34SAndrii Nakryiko size = array->elem_size;
31415a07b33SAlexei Starovoitov rcu_read_lock();
315b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask];
31615a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) {
3176df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
3186df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, value + off);
31915a07b33SAlexei Starovoitov off += size;
32015a07b33SAlexei Starovoitov }
32115a07b33SAlexei Starovoitov rcu_read_unlock();
32215a07b33SAlexei Starovoitov return 0;
32315a07b33SAlexei Starovoitov }
32415a07b33SAlexei Starovoitov
32528fbcfa0SAlexei Starovoitov /* Called from syscall */
array_map_get_next_key(struct bpf_map * map,void * key,void * next_key)32628fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
32728fbcfa0SAlexei Starovoitov {
32828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
3298fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX;
33028fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key;
33128fbcfa0SAlexei Starovoitov
33228fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) {
33328fbcfa0SAlexei Starovoitov *next = 0;
33428fbcfa0SAlexei Starovoitov return 0;
33528fbcfa0SAlexei Starovoitov }
33628fbcfa0SAlexei Starovoitov
33728fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1)
33828fbcfa0SAlexei Starovoitov return -ENOENT;
33928fbcfa0SAlexei Starovoitov
34028fbcfa0SAlexei Starovoitov *next = index + 1;
34128fbcfa0SAlexei Starovoitov return 0;
34228fbcfa0SAlexei Starovoitov }
34328fbcfa0SAlexei Starovoitov
34428fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)345d7ba4cc9SJP Kobryn static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
34628fbcfa0SAlexei Starovoitov u64 map_flags)
34728fbcfa0SAlexei Starovoitov {
34828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
34928fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key;
35096049f3aSAlexei Starovoitov char *val;
35128fbcfa0SAlexei Starovoitov
35296049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
35328fbcfa0SAlexei Starovoitov /* unknown flags */
35428fbcfa0SAlexei Starovoitov return -EINVAL;
35528fbcfa0SAlexei Starovoitov
356a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
35728fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */
35828fbcfa0SAlexei Starovoitov return -E2BIG;
35928fbcfa0SAlexei Starovoitov
36096049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST))
361daaf427cSAlexei Starovoitov /* all elements already exist */
36228fbcfa0SAlexei Starovoitov return -EEXIST;
36328fbcfa0SAlexei Starovoitov
36496049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) &&
365db559117SKumar Kartikeya Dwivedi !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
36696049f3aSAlexei Starovoitov return -EINVAL;
36796049f3aSAlexei Starovoitov
36896049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
3696df4ea1fSKumar Kartikeya Dwivedi val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
3706df4ea1fSKumar Kartikeya Dwivedi copy_map_value(map, val, value);
371db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val);
37296049f3aSAlexei Starovoitov } else {
37396049f3aSAlexei Starovoitov val = array->value +
37487ac0d60SAndrii Nakryiko (u64)array->elem_size * (index & array->index_mask);
37596049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK)
37696049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false);
377a10423b8SAlexei Starovoitov else
37896049f3aSAlexei Starovoitov copy_map_value(map, val, value);
379db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val);
38096049f3aSAlexei Starovoitov }
38128fbcfa0SAlexei Starovoitov return 0;
38228fbcfa0SAlexei Starovoitov }
38328fbcfa0SAlexei Starovoitov
bpf_percpu_array_update(struct bpf_map * map,void * key,void * value,u64 map_flags)38415a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
38515a07b33SAlexei Starovoitov u64 map_flags)
38615a07b33SAlexei Starovoitov {
38715a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
38815a07b33SAlexei Starovoitov u32 index = *(u32 *)key;
38915a07b33SAlexei Starovoitov void __percpu *pptr;
39015a07b33SAlexei Starovoitov int cpu, off = 0;
39115a07b33SAlexei Starovoitov u32 size;
39215a07b33SAlexei Starovoitov
39315a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST))
39415a07b33SAlexei Starovoitov /* unknown flags */
39515a07b33SAlexei Starovoitov return -EINVAL;
39615a07b33SAlexei Starovoitov
39715a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries))
39815a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */
39915a07b33SAlexei Starovoitov return -E2BIG;
40015a07b33SAlexei Starovoitov
40115a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST))
40215a07b33SAlexei Starovoitov /* all elements already exist */
40315a07b33SAlexei Starovoitov return -EEXIST;
40415a07b33SAlexei Starovoitov
40515a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that
40615a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access
40715a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be
40815a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc,
40915a07b33SAlexei Starovoitov * so no kernel data leaks possible
41015a07b33SAlexei Starovoitov */
411d937bc34SAndrii Nakryiko size = array->elem_size;
41215a07b33SAlexei Starovoitov rcu_read_lock();
413b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask];
41415a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) {
4156df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
416db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
41715a07b33SAlexei Starovoitov off += size;
41815a07b33SAlexei Starovoitov }
41915a07b33SAlexei Starovoitov rcu_read_unlock();
42015a07b33SAlexei Starovoitov return 0;
42115a07b33SAlexei Starovoitov }
42215a07b33SAlexei Starovoitov
42328fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */
array_map_delete_elem(struct bpf_map * map,void * key)424d7ba4cc9SJP Kobryn static long array_map_delete_elem(struct bpf_map *map, void *key)
42528fbcfa0SAlexei Starovoitov {
42628fbcfa0SAlexei Starovoitov return -EINVAL;
42728fbcfa0SAlexei Starovoitov }
42828fbcfa0SAlexei Starovoitov
array_map_vmalloc_addr(struct bpf_array * array)429fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array)
430fc970227SAndrii Nakryiko {
431fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE);
432fc970227SAndrii Nakryiko }
433fc970227SAndrii Nakryiko
array_map_free_timers_wq(struct bpf_map * map)434246331e3SBenjamin Tissoires static void array_map_free_timers_wq(struct bpf_map *map)
43568134668SAlexei Starovoitov {
43668134668SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
43768134668SAlexei Starovoitov int i;
43868134668SAlexei Starovoitov
439246331e3SBenjamin Tissoires /* We don't reset or free fields other than timer and workqueue
440246331e3SBenjamin Tissoires * on uref dropping to zero.
441246331e3SBenjamin Tissoires */
442b98a5c68SBenjamin Tissoires if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) {
443b98a5c68SBenjamin Tissoires for (i = 0; i < array->map.max_entries; i++) {
444246331e3SBenjamin Tissoires if (btf_record_has_field(map->record, BPF_TIMER))
445db559117SKumar Kartikeya Dwivedi bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
446246331e3SBenjamin Tissoires if (btf_record_has_field(map->record, BPF_WORKQUEUE))
447246331e3SBenjamin Tissoires bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
44868134668SAlexei Starovoitov }
449b98a5c68SBenjamin Tissoires }
450b98a5c68SBenjamin Tissoires }
45168134668SAlexei Starovoitov
45228fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
array_map_free(struct bpf_map * map)45328fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map)
45428fbcfa0SAlexei Starovoitov {
45528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
45614a324f6SKumar Kartikeya Dwivedi int i;
45714a324f6SKumar Kartikeya Dwivedi
458aa3496acSKumar Kartikeya Dwivedi if (!IS_ERR_OR_NULL(map->record)) {
4596df4ea1fSKumar Kartikeya Dwivedi if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4606df4ea1fSKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) {
4616df4ea1fSKumar Kartikeya Dwivedi void __percpu *pptr = array->pptrs[i & array->index_mask];
4626df4ea1fSKumar Kartikeya Dwivedi int cpu;
4636df4ea1fSKumar Kartikeya Dwivedi
4646df4ea1fSKumar Kartikeya Dwivedi for_each_possible_cpu(cpu) {
465aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
4666df4ea1fSKumar Kartikeya Dwivedi cond_resched();
4676df4ea1fSKumar Kartikeya Dwivedi }
4686df4ea1fSKumar Kartikeya Dwivedi }
4696df4ea1fSKumar Kartikeya Dwivedi } else {
47014a324f6SKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++)
471aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
4726df4ea1fSKumar Kartikeya Dwivedi }
47314a324f6SKumar Kartikeya Dwivedi }
47428fbcfa0SAlexei Starovoitov
475a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
476a10423b8SAlexei Starovoitov bpf_array_free_percpu(array);
477a10423b8SAlexei Starovoitov
478fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE)
479fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array));
480fc970227SAndrii Nakryiko else
481d407bd25SDaniel Borkmann bpf_map_area_free(array);
48228fbcfa0SAlexei Starovoitov }
48328fbcfa0SAlexei Starovoitov
array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)484a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key,
485a26ca7c9SMartin KaFai Lau struct seq_file *m)
486a26ca7c9SMartin KaFai Lau {
487a26ca7c9SMartin KaFai Lau void *value;
488a26ca7c9SMartin KaFai Lau
489a26ca7c9SMartin KaFai Lau rcu_read_lock();
490a26ca7c9SMartin KaFai Lau
491a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key);
492a26ca7c9SMartin KaFai Lau if (!value) {
493a26ca7c9SMartin KaFai Lau rcu_read_unlock();
494a26ca7c9SMartin KaFai Lau return;
495a26ca7c9SMartin KaFai Lau }
496a26ca7c9SMartin KaFai Lau
4972824ecb7SDaniel Borkmann if (map->btf_key_type_id)
498a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key);
4999b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
500df862de4SMarkus Elfring seq_putc(m, '\n');
501a26ca7c9SMartin KaFai Lau
502a26ca7c9SMartin KaFai Lau rcu_read_unlock();
503a26ca7c9SMartin KaFai Lau }
504a26ca7c9SMartin KaFai Lau
percpu_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)505c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
506c7b27c37SYonghong Song struct seq_file *m)
507c7b27c37SYonghong Song {
508c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map);
509c7b27c37SYonghong Song u32 index = *(u32 *)key;
510c7b27c37SYonghong Song void __percpu *pptr;
511c7b27c37SYonghong Song int cpu;
512c7b27c37SYonghong Song
513c7b27c37SYonghong Song rcu_read_lock();
514c7b27c37SYonghong Song
515c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key);
516c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask];
517c7b27c37SYonghong Song for_each_possible_cpu(cpu) {
518c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu);
519c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id,
520c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m);
521df862de4SMarkus Elfring seq_putc(m, '\n');
522c7b27c37SYonghong Song }
523c7b27c37SYonghong Song seq_puts(m, "}\n");
524c7b27c37SYonghong Song
525c7b27c37SYonghong Song rcu_read_unlock();
526c7b27c37SYonghong Song }
527c7b27c37SYonghong Song
array_map_check_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)528e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map,
5291b2b234bSRoman Gushchin const struct btf *btf,
530e8d2bec0SDaniel Borkmann const struct btf_type *key_type,
531e8d2bec0SDaniel Borkmann const struct btf_type *value_type)
532a26ca7c9SMartin KaFai Lau {
533a26ca7c9SMartin KaFai Lau u32 int_data;
534a26ca7c9SMartin KaFai Lau
5352824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */
5362824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) {
5372824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY ||
5382824ecb7SDaniel Borkmann map->max_entries != 1)
5392824ecb7SDaniel Borkmann return -EINVAL;
5402824ecb7SDaniel Borkmann
5412824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
5422824ecb7SDaniel Borkmann return -EINVAL;
5432824ecb7SDaniel Borkmann
5442824ecb7SDaniel Borkmann return 0;
5452824ecb7SDaniel Borkmann }
5462824ecb7SDaniel Borkmann
547e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
548a26ca7c9SMartin KaFai Lau return -EINVAL;
549a26ca7c9SMartin KaFai Lau
550a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1);
551e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure
552e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create.
553a26ca7c9SMartin KaFai Lau */
554e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
555a26ca7c9SMartin KaFai Lau return -EINVAL;
556a26ca7c9SMartin KaFai Lau
557a26ca7c9SMartin KaFai Lau return 0;
558a26ca7c9SMartin KaFai Lau }
559a26ca7c9SMartin KaFai Lau
array_map_mmap(struct bpf_map * map,struct vm_area_struct * vma)560b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
561fc970227SAndrii Nakryiko {
562fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
563fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
564fc970227SAndrii Nakryiko
565fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE))
566fc970227SAndrii Nakryiko return -EINVAL;
567fc970227SAndrii Nakryiko
568333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
569333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
570333291ceSAndrii Nakryiko return -EINVAL;
571333291ceSAndrii Nakryiko
572333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
573333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff);
574fc970227SAndrii Nakryiko }
575fc970227SAndrii Nakryiko
array_map_meta_equal(const struct bpf_map * meta0,const struct bpf_map * meta1)576134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0,
577134fede4SMartin KaFai Lau const struct bpf_map *meta1)
578134fede4SMartin KaFai Lau {
5794a8f87e6SDaniel Borkmann if (!bpf_map_meta_equal(meta0, meta1))
5804a8f87e6SDaniel Borkmann return false;
5814a8f87e6SDaniel Borkmann return meta0->map_flags & BPF_F_INNER_MAP ? true :
5824a8f87e6SDaniel Borkmann meta0->max_entries == meta1->max_entries;
583134fede4SMartin KaFai Lau }
584134fede4SMartin KaFai Lau
585d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info {
586d3cc2ab5SYonghong Song struct bpf_map *map;
587d3cc2ab5SYonghong Song void *percpu_value_buf;
588d3cc2ab5SYonghong Song u32 index;
589d3cc2ab5SYonghong Song };
590d3cc2ab5SYonghong Song
bpf_array_map_seq_start(struct seq_file * seq,loff_t * pos)591d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
592d3cc2ab5SYonghong Song {
593d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
594d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
595d3cc2ab5SYonghong Song struct bpf_array *array;
596d3cc2ab5SYonghong Song u32 index;
597d3cc2ab5SYonghong Song
598d3cc2ab5SYonghong Song if (info->index >= map->max_entries)
599d3cc2ab5SYonghong Song return NULL;
600d3cc2ab5SYonghong Song
601d3cc2ab5SYonghong Song if (*pos == 0)
602d3cc2ab5SYonghong Song ++*pos;
603d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map);
604d3cc2ab5SYonghong Song index = info->index & array->index_mask;
605d3cc2ab5SYonghong Song if (info->percpu_value_buf)
6066d641ca5SUros Bizjak return (void *)(uintptr_t)array->pptrs[index];
60787ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index);
608d3cc2ab5SYonghong Song }
609d3cc2ab5SYonghong Song
bpf_array_map_seq_next(struct seq_file * seq,void * v,loff_t * pos)610d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
611d3cc2ab5SYonghong Song {
612d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
613d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
614d3cc2ab5SYonghong Song struct bpf_array *array;
615d3cc2ab5SYonghong Song u32 index;
616d3cc2ab5SYonghong Song
617d3cc2ab5SYonghong Song ++*pos;
618d3cc2ab5SYonghong Song ++info->index;
619d3cc2ab5SYonghong Song if (info->index >= map->max_entries)
620d3cc2ab5SYonghong Song return NULL;
621d3cc2ab5SYonghong Song
622d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map);
623d3cc2ab5SYonghong Song index = info->index & array->index_mask;
624d3cc2ab5SYonghong Song if (info->percpu_value_buf)
6256d641ca5SUros Bizjak return (void *)(uintptr_t)array->pptrs[index];
62687ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index);
627d3cc2ab5SYonghong Song }
628d3cc2ab5SYonghong Song
__bpf_array_map_seq_show(struct seq_file * seq,void * v)629d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
630d3cc2ab5SYonghong Song {
631d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private;
632d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {};
633d3cc2ab5SYonghong Song struct bpf_map *map = info->map;
634d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
635d3cc2ab5SYonghong Song struct bpf_iter_meta meta;
636d3cc2ab5SYonghong Song struct bpf_prog *prog;
637d3cc2ab5SYonghong Song int off = 0, cpu = 0;
6386d641ca5SUros Bizjak void __percpu *pptr;
639d3cc2ab5SYonghong Song u32 size;
640d3cc2ab5SYonghong Song
641d3cc2ab5SYonghong Song meta.seq = seq;
642d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL);
643d3cc2ab5SYonghong Song if (!prog)
644d3cc2ab5SYonghong Song return 0;
645d3cc2ab5SYonghong Song
646d3cc2ab5SYonghong Song ctx.meta = &meta;
647d3cc2ab5SYonghong Song ctx.map = info->map;
648d3cc2ab5SYonghong Song if (v) {
649d3cc2ab5SYonghong Song ctx.key = &info->index;
650d3cc2ab5SYonghong Song
651d3cc2ab5SYonghong Song if (!info->percpu_value_buf) {
652d3cc2ab5SYonghong Song ctx.value = v;
653d3cc2ab5SYonghong Song } else {
6546d641ca5SUros Bizjak pptr = (void __percpu *)(uintptr_t)v;
655d937bc34SAndrii Nakryiko size = array->elem_size;
656d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) {
6576df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, info->percpu_value_buf + off,
6586df4ea1fSKumar Kartikeya Dwivedi per_cpu_ptr(pptr, cpu));
6596df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, info->percpu_value_buf + off);
660d3cc2ab5SYonghong Song off += size;
661d3cc2ab5SYonghong Song }
662d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf;
663d3cc2ab5SYonghong Song }
664d3cc2ab5SYonghong Song }
665d3cc2ab5SYonghong Song
666d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx);
667d3cc2ab5SYonghong Song }
668d3cc2ab5SYonghong Song
bpf_array_map_seq_show(struct seq_file * seq,void * v)669d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
670d3cc2ab5SYonghong Song {
671d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v);
672d3cc2ab5SYonghong Song }
673d3cc2ab5SYonghong Song
bpf_array_map_seq_stop(struct seq_file * seq,void * v)674d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
675d3cc2ab5SYonghong Song {
676d3cc2ab5SYonghong Song if (!v)
677d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL);
678d3cc2ab5SYonghong Song }
679d3cc2ab5SYonghong Song
bpf_iter_init_array_map(void * priv_data,struct bpf_iter_aux_info * aux)680d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data,
681d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux)
682d3cc2ab5SYonghong Song {
683d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data;
684d3cc2ab5SYonghong Song struct bpf_map *map = aux->map;
685d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map);
686d3cc2ab5SYonghong Song void *value_buf;
687d3cc2ab5SYonghong Song u32 buf_size;
688d3cc2ab5SYonghong Song
689d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
690d937bc34SAndrii Nakryiko buf_size = array->elem_size * num_possible_cpus();
691d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
692d3cc2ab5SYonghong Song if (!value_buf)
693d3cc2ab5SYonghong Song return -ENOMEM;
694d3cc2ab5SYonghong Song
695d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf;
696d3cc2ab5SYonghong Song }
697d3cc2ab5SYonghong Song
698f76fa6b3SHou Tao /* bpf_iter_attach_map() acquires a map uref, and the uref may be
699f76fa6b3SHou Tao * released before or in the middle of iterating map elements, so
700f76fa6b3SHou Tao * acquire an extra map uref for iterator.
701f76fa6b3SHou Tao */
702f76fa6b3SHou Tao bpf_map_inc_with_uref(map);
703d3cc2ab5SYonghong Song seq_info->map = map;
704d3cc2ab5SYonghong Song return 0;
705d3cc2ab5SYonghong Song }
706d3cc2ab5SYonghong Song
bpf_iter_fini_array_map(void * priv_data)707d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data)
708d3cc2ab5SYonghong Song {
709d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data;
710d3cc2ab5SYonghong Song
711f76fa6b3SHou Tao bpf_map_put_with_uref(seq_info->map);
712d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf);
713d3cc2ab5SYonghong Song }
714d3cc2ab5SYonghong Song
715d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = {
716d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start,
717d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next,
718d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop,
719d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show,
720d3cc2ab5SYonghong Song };
721d3cc2ab5SYonghong Song
722d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = {
723d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops,
724d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map,
725d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map,
726d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
727d3cc2ab5SYonghong Song };
728d3cc2ab5SYonghong Song
bpf_for_each_array_elem(struct bpf_map * map,bpf_callback_t callback_fn,void * callback_ctx,u64 flags)729d7ba4cc9SJP Kobryn static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
73006dcdcd4SYonghong Song void *callback_ctx, u64 flags)
73106dcdcd4SYonghong Song {
73206dcdcd4SYonghong Song u32 i, key, num_elems = 0;
73306dcdcd4SYonghong Song struct bpf_array *array;
73406dcdcd4SYonghong Song bool is_percpu;
73506dcdcd4SYonghong Song u64 ret = 0;
73606dcdcd4SYonghong Song void *val;
73706dcdcd4SYonghong Song
738*ea5b2296SHou Tao cant_migrate();
739*ea5b2296SHou Tao
74006dcdcd4SYonghong Song if (flags != 0)
74106dcdcd4SYonghong Song return -EINVAL;
74206dcdcd4SYonghong Song
74306dcdcd4SYonghong Song is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
74406dcdcd4SYonghong Song array = container_of(map, struct bpf_array, map);
74506dcdcd4SYonghong Song for (i = 0; i < map->max_entries; i++) {
74606dcdcd4SYonghong Song if (is_percpu)
74706dcdcd4SYonghong Song val = this_cpu_ptr(array->pptrs[i]);
74806dcdcd4SYonghong Song else
74987ac0d60SAndrii Nakryiko val = array_map_elem_ptr(array, i);
75006dcdcd4SYonghong Song num_elems++;
75106dcdcd4SYonghong Song key = i;
752102acbacSKees Cook ret = callback_fn((u64)(long)map, (u64)(long)&key,
753102acbacSKees Cook (u64)(long)val, (u64)(long)callback_ctx, 0);
75406dcdcd4SYonghong Song /* return value: 0 - continue, 1 - stop and return */
75506dcdcd4SYonghong Song if (ret)
75606dcdcd4SYonghong Song break;
75706dcdcd4SYonghong Song }
75806dcdcd4SYonghong Song
75906dcdcd4SYonghong Song return num_elems;
76006dcdcd4SYonghong Song }
76106dcdcd4SYonghong Song
array_map_mem_usage(const struct bpf_map * map)7621746d055SYafang Shao static u64 array_map_mem_usage(const struct bpf_map *map)
7631746d055SYafang Shao {
7641746d055SYafang Shao struct bpf_array *array = container_of(map, struct bpf_array, map);
7651746d055SYafang Shao bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
7661746d055SYafang Shao u32 elem_size = array->elem_size;
7671746d055SYafang Shao u64 entries = map->max_entries;
7681746d055SYafang Shao u64 usage = sizeof(*array);
7691746d055SYafang Shao
7701746d055SYafang Shao if (percpu) {
7711746d055SYafang Shao usage += entries * sizeof(void *);
7721746d055SYafang Shao usage += entries * elem_size * num_possible_cpus();
7731746d055SYafang Shao } else {
7741746d055SYafang Shao if (map->map_flags & BPF_F_MMAPABLE) {
7751746d055SYafang Shao usage = PAGE_ALIGN(usage);
7761746d055SYafang Shao usage += PAGE_ALIGN(entries * elem_size);
7771746d055SYafang Shao } else {
7781746d055SYafang Shao usage += entries * elem_size;
7791746d055SYafang Shao }
7801746d055SYafang Shao }
7811746d055SYafang Shao return usage;
7821746d055SYafang Shao }
7831746d055SYafang Shao
784c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
78540077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = {
786134fede4SMartin KaFai Lau .map_meta_equal = array_map_meta_equal,
787ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check,
78828fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc,
78928fbcfa0SAlexei Starovoitov .map_free = array_map_free,
79028fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
791246331e3SBenjamin Tissoires .map_release_uref = array_map_free_timers_wq,
79228fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem,
79328fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem,
79428fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem,
79581ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup,
796d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr,
797d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta,
798fc970227SAndrii Nakryiko .map_mmap = array_map_mmap,
799a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem,
800a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf,
801c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch,
802c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch,
80306dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args,
80406dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem,
8051746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
806c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
807d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info,
80828fbcfa0SAlexei Starovoitov };
80928fbcfa0SAlexei Starovoitov
81040077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = {
811f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
812ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check,
813a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc,
814a10423b8SAlexei Starovoitov .map_free = array_map_free,
815a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
816a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem,
817db69718bSAndrii Nakryiko .map_gen_lookup = percpu_array_map_gen_lookup,
818a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem,
819a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem,
82007343110SFeng Zhou .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
821c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem,
822e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf,
823f008d732SPedro Tammela .map_lookup_batch = generic_map_lookup_batch,
824f008d732SPedro Tammela .map_update_batch = generic_map_update_batch,
82506dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args,
82606dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem,
8271746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
828c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
829d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info,
830a10423b8SAlexei Starovoitov };
831a10423b8SAlexei Starovoitov
fd_array_map_alloc_check(union bpf_attr * attr)832ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr)
83304fd61abSAlexei Starovoitov {
8342a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */
83504fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32))
836ad46061fSJakub Kicinski return -EINVAL;
837591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */
838591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
839591fe988SDaniel Borkmann return -EINVAL;
840ad46061fSJakub Kicinski return array_map_alloc_check(attr);
84104fd61abSAlexei Starovoitov }
84204fd61abSAlexei Starovoitov
fd_array_map_free(struct bpf_map * map)8432a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map)
84404fd61abSAlexei Starovoitov {
84504fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
84604fd61abSAlexei Starovoitov int i;
84704fd61abSAlexei Starovoitov
84804fd61abSAlexei Starovoitov /* make sure it's empty */
84904fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++)
8502a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL);
851d407bd25SDaniel Borkmann
852d407bd25SDaniel Borkmann bpf_map_area_free(array);
85304fd61abSAlexei Starovoitov }
85404fd61abSAlexei Starovoitov
fd_array_map_lookup_elem(struct bpf_map * map,void * key)8552a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
85604fd61abSAlexei Starovoitov {
8573b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP);
85804fd61abSAlexei Starovoitov }
85904fd61abSAlexei Starovoitov
86004fd61abSAlexei Starovoitov /* only called from syscall */
bpf_fd_array_map_lookup_elem(struct bpf_map * map,void * key,u32 * value)86114dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
86214dc6f04SMartin KaFai Lau {
86314dc6f04SMartin KaFai Lau void **elem, *ptr;
86414dc6f04SMartin KaFai Lau int ret = 0;
86514dc6f04SMartin KaFai Lau
86614dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem)
86714dc6f04SMartin KaFai Lau return -ENOTSUPP;
86814dc6f04SMartin KaFai Lau
86914dc6f04SMartin KaFai Lau rcu_read_lock();
87014dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key);
87114dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem)))
87214dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr);
87314dc6f04SMartin KaFai Lau else
87414dc6f04SMartin KaFai Lau ret = -ENOENT;
87514dc6f04SMartin KaFai Lau rcu_read_unlock();
87614dc6f04SMartin KaFai Lau
87714dc6f04SMartin KaFai Lau return ret;
87814dc6f04SMartin KaFai Lau }
87914dc6f04SMartin KaFai Lau
88014dc6f04SMartin KaFai Lau /* only called from syscall */
bpf_fd_array_map_update_elem(struct bpf_map * map,struct file * map_file,void * key,void * value,u64 map_flags)881d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
882d056a788SDaniel Borkmann void *key, void *value, u64 map_flags)
88304fd61abSAlexei Starovoitov {
88404fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
8852a36f0b9SWang Nan void *new_ptr, *old_ptr;
88604fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd;
88704fd61abSAlexei Starovoitov
88804fd61abSAlexei Starovoitov if (map_flags != BPF_ANY)
88904fd61abSAlexei Starovoitov return -EINVAL;
89004fd61abSAlexei Starovoitov
89104fd61abSAlexei Starovoitov if (index >= array->map.max_entries)
89204fd61abSAlexei Starovoitov return -E2BIG;
89304fd61abSAlexei Starovoitov
89404fd61abSAlexei Starovoitov ufd = *(u32 *)value;
895d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
8962a36f0b9SWang Nan if (IS_ERR(new_ptr))
8972a36f0b9SWang Nan return PTR_ERR(new_ptr);
89804fd61abSAlexei Starovoitov
899da765a2fSDaniel Borkmann if (map->ops->map_poke_run) {
900da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex);
9012a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr);
902da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr);
903da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex);
904da765a2fSDaniel Borkmann } else {
905da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr);
906da765a2fSDaniel Borkmann }
907da765a2fSDaniel Borkmann
9082a36f0b9SWang Nan if (old_ptr)
90920c20bd1SHou Tao map->ops->map_fd_put_ptr(map, old_ptr, true);
91004fd61abSAlexei Starovoitov return 0;
91104fd61abSAlexei Starovoitov }
91204fd61abSAlexei Starovoitov
__fd_array_map_delete_elem(struct bpf_map * map,void * key,bool need_defer)91379d93b3cSHou Tao static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
91404fd61abSAlexei Starovoitov {
91504fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
9162a36f0b9SWang Nan void *old_ptr;
91704fd61abSAlexei Starovoitov u32 index = *(u32 *)key;
91804fd61abSAlexei Starovoitov
91904fd61abSAlexei Starovoitov if (index >= array->map.max_entries)
92004fd61abSAlexei Starovoitov return -E2BIG;
92104fd61abSAlexei Starovoitov
922da765a2fSDaniel Borkmann if (map->ops->map_poke_run) {
923da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex);
9242a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL);
925da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL);
926da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex);
927da765a2fSDaniel Borkmann } else {
928da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL);
929da765a2fSDaniel Borkmann }
930da765a2fSDaniel Borkmann
9312a36f0b9SWang Nan if (old_ptr) {
93279d93b3cSHou Tao map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
93304fd61abSAlexei Starovoitov return 0;
93404fd61abSAlexei Starovoitov } else {
93504fd61abSAlexei Starovoitov return -ENOENT;
93604fd61abSAlexei Starovoitov }
93704fd61abSAlexei Starovoitov }
93804fd61abSAlexei Starovoitov
fd_array_map_delete_elem(struct bpf_map * map,void * key)93979d93b3cSHou Tao static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
94079d93b3cSHou Tao {
94179d93b3cSHou Tao return __fd_array_map_delete_elem(map, key, true);
94279d93b3cSHou Tao }
94379d93b3cSHou Tao
prog_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)944d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map,
945d056a788SDaniel Borkmann struct file *map_file, int fd)
9462a36f0b9SWang Nan {
9472a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd);
948d6083f04SLeon Hwang bool is_extended;
949d056a788SDaniel Borkmann
9502a36f0b9SWang Nan if (IS_ERR(prog))
9512a36f0b9SWang Nan return prog;
9522a36f0b9SWang Nan
953d6083f04SLeon Hwang if (prog->type == BPF_PROG_TYPE_EXT ||
954d6083f04SLeon Hwang !bpf_prog_map_compatible(map, prog)) {
9552a36f0b9SWang Nan bpf_prog_put(prog);
9562a36f0b9SWang Nan return ERR_PTR(-EINVAL);
9572a36f0b9SWang Nan }
958d056a788SDaniel Borkmann
959d6083f04SLeon Hwang mutex_lock(&prog->aux->ext_mutex);
960d6083f04SLeon Hwang is_extended = prog->aux->is_extended;
961d6083f04SLeon Hwang if (!is_extended)
962d6083f04SLeon Hwang prog->aux->prog_array_member_cnt++;
963d6083f04SLeon Hwang mutex_unlock(&prog->aux->ext_mutex);
964d6083f04SLeon Hwang if (is_extended) {
965d6083f04SLeon Hwang /* Extended prog can not be tail callee. It's to prevent a
966d6083f04SLeon Hwang * potential infinite loop like:
967d6083f04SLeon Hwang * tail callee prog entry -> tail callee prog subprog ->
968d6083f04SLeon Hwang * freplace prog entry --tailcall-> tail callee prog entry.
969d6083f04SLeon Hwang */
970d6083f04SLeon Hwang bpf_prog_put(prog);
971d6083f04SLeon Hwang return ERR_PTR(-EBUSY);
972d6083f04SLeon Hwang }
973d6083f04SLeon Hwang
9742a36f0b9SWang Nan return prog;
9752a36f0b9SWang Nan }
9762a36f0b9SWang Nan
prog_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)97720c20bd1SHou Tao static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
9782a36f0b9SWang Nan {
979d6083f04SLeon Hwang struct bpf_prog *prog = ptr;
980d6083f04SLeon Hwang
981d6083f04SLeon Hwang mutex_lock(&prog->aux->ext_mutex);
982d6083f04SLeon Hwang prog->aux->prog_array_member_cnt--;
983d6083f04SLeon Hwang mutex_unlock(&prog->aux->ext_mutex);
98420c20bd1SHou Tao /* bpf_prog is freed after one RCU or tasks trace grace period */
985d6083f04SLeon Hwang bpf_prog_put(prog);
9862a36f0b9SWang Nan }
9872a36f0b9SWang Nan
prog_fd_array_sys_lookup_elem(void * ptr)98814dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr)
98914dc6f04SMartin KaFai Lau {
99014dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id;
99114dc6f04SMartin KaFai Lau }
99214dc6f04SMartin KaFai Lau
99304fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */
bpf_fd_array_map_clear(struct bpf_map * map,bool need_defer)99479d93b3cSHou Tao static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
99504fd61abSAlexei Starovoitov {
99604fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
99704fd61abSAlexei Starovoitov int i;
99804fd61abSAlexei Starovoitov
99904fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++)
100079d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, need_defer);
100104fd61abSAlexei Starovoitov }
100204fd61abSAlexei Starovoitov
prog_array_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)1003a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
1004a7c19db3SYonghong Song struct seq_file *m)
1005a7c19db3SYonghong Song {
1006a7c19db3SYonghong Song void **elem, *ptr;
1007a7c19db3SYonghong Song u32 prog_id;
1008a7c19db3SYonghong Song
1009a7c19db3SYonghong Song rcu_read_lock();
1010a7c19db3SYonghong Song
1011a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key);
1012a7c19db3SYonghong Song if (elem) {
1013a7c19db3SYonghong Song ptr = READ_ONCE(*elem);
1014a7c19db3SYonghong Song if (ptr) {
1015a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key);
1016a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr);
1017a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id,
1018a7c19db3SYonghong Song &prog_id, m);
1019df862de4SMarkus Elfring seq_putc(m, '\n');
1020a7c19db3SYonghong Song }
1021a7c19db3SYonghong Song }
1022a7c19db3SYonghong Song
1023a7c19db3SYonghong Song rcu_read_unlock();
1024a7c19db3SYonghong Song }
1025a7c19db3SYonghong Song
1026da765a2fSDaniel Borkmann struct prog_poke_elem {
1027da765a2fSDaniel Borkmann struct list_head list;
1028da765a2fSDaniel Borkmann struct bpf_prog_aux *aux;
1029da765a2fSDaniel Borkmann };
1030da765a2fSDaniel Borkmann
prog_array_map_poke_track(struct bpf_map * map,struct bpf_prog_aux * prog_aux)1031da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map,
1032da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux)
1033da765a2fSDaniel Borkmann {
1034da765a2fSDaniel Borkmann struct prog_poke_elem *elem;
1035da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
1036da765a2fSDaniel Borkmann int ret = 0;
1037da765a2fSDaniel Borkmann
1038da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1039da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex);
1040da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) {
1041da765a2fSDaniel Borkmann if (elem->aux == prog_aux)
1042da765a2fSDaniel Borkmann goto out;
1043da765a2fSDaniel Borkmann }
1044da765a2fSDaniel Borkmann
1045da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL);
1046da765a2fSDaniel Borkmann if (!elem) {
1047da765a2fSDaniel Borkmann ret = -ENOMEM;
1048da765a2fSDaniel Borkmann goto out;
1049da765a2fSDaniel Borkmann }
1050da765a2fSDaniel Borkmann
1051da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list);
1052da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time
1053da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see
1054da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run().
1055da765a2fSDaniel Borkmann */
1056da765a2fSDaniel Borkmann elem->aux = prog_aux;
1057da765a2fSDaniel Borkmann
1058da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs);
1059da765a2fSDaniel Borkmann out:
1060da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex);
1061da765a2fSDaniel Borkmann return ret;
1062da765a2fSDaniel Borkmann }
1063da765a2fSDaniel Borkmann
prog_array_map_poke_untrack(struct bpf_map * map,struct bpf_prog_aux * prog_aux)1064da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map,
1065da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux)
1066da765a2fSDaniel Borkmann {
1067da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp;
1068da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
1069da765a2fSDaniel Borkmann
1070da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1071da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex);
1072da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1073da765a2fSDaniel Borkmann if (elem->aux == prog_aux) {
1074da765a2fSDaniel Borkmann list_del_init(&elem->list);
1075da765a2fSDaniel Borkmann kfree(elem);
1076da765a2fSDaniel Borkmann break;
1077da765a2fSDaniel Borkmann }
1078da765a2fSDaniel Borkmann }
1079da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex);
1080da765a2fSDaniel Borkmann }
1081da765a2fSDaniel Borkmann
bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor * poke,struct bpf_prog * new,struct bpf_prog * old)10824b7de801SJiri Olsa void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
10834b7de801SJiri Olsa struct bpf_prog *new, struct bpf_prog *old)
10844b7de801SJiri Olsa {
10854b7de801SJiri Olsa WARN_ON_ONCE(1);
10864b7de801SJiri Olsa }
10874b7de801SJiri Olsa
prog_array_map_poke_run(struct bpf_map * map,u32 key,struct bpf_prog * old,struct bpf_prog * new)1088da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1089da765a2fSDaniel Borkmann struct bpf_prog *old,
1090da765a2fSDaniel Borkmann struct bpf_prog *new)
1091da765a2fSDaniel Borkmann {
1092da765a2fSDaniel Borkmann struct prog_poke_elem *elem;
1093da765a2fSDaniel Borkmann struct bpf_array_aux *aux;
1094da765a2fSDaniel Borkmann
1095da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1096da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1097da765a2fSDaniel Borkmann
1098da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) {
1099da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke;
11004b7de801SJiri Olsa int i;
1101da765a2fSDaniel Borkmann
1102da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) {
1103da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i];
1104da765a2fSDaniel Borkmann
1105da765a2fSDaniel Borkmann /* Few things to be aware of:
1106da765a2fSDaniel Borkmann *
1107da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but
1108da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and
1109da765a2fSDaniel Borkmann * there could be danger of use after free otherwise.
1110da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program
1111da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms
1112cf71b174SMaciej Fijalkowski * entry. We skip these as poke->tailcall_target_stable
1113cf71b174SMaciej Fijalkowski * is not active yet. The JIT will do the final fixup
1114cf71b174SMaciej Fijalkowski * before setting it stable. The various
1115cf71b174SMaciej Fijalkowski * poke->tailcall_target_stable are successively
1116cf71b174SMaciej Fijalkowski * activated, so tail call updates can arrive from here
1117cf71b174SMaciej Fijalkowski * while JIT is still finishing its final fixup for
1118cf71b174SMaciej Fijalkowski * non-activated poke entries.
11194b7de801SJiri Olsa * 3) Also programs reaching refcount of zero while patching
1120da765a2fSDaniel Borkmann * is in progress is okay since we're protected under
1121da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT
11224b7de801SJiri Olsa * buffer is freed.
1123da765a2fSDaniel Borkmann */
1124cf71b174SMaciej Fijalkowski if (!READ_ONCE(poke->tailcall_target_stable))
1125da765a2fSDaniel Borkmann continue;
1126da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1127da765a2fSDaniel Borkmann continue;
1128da765a2fSDaniel Borkmann if (poke->tail_call.map != map ||
1129da765a2fSDaniel Borkmann poke->tail_call.key != key)
1130da765a2fSDaniel Borkmann continue;
1131da765a2fSDaniel Borkmann
11324b7de801SJiri Olsa bpf_arch_poke_desc_update(poke, new, old);
1133da765a2fSDaniel Borkmann }
1134da765a2fSDaniel Borkmann }
1135da765a2fSDaniel Borkmann }
1136da765a2fSDaniel Borkmann
prog_array_map_clear_deferred(struct work_struct * work)1137da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work)
1138da765a2fSDaniel Borkmann {
1139da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux,
1140da765a2fSDaniel Borkmann work)->map;
114179d93b3cSHou Tao bpf_fd_array_map_clear(map, true);
1142da765a2fSDaniel Borkmann bpf_map_put(map);
1143da765a2fSDaniel Borkmann }
1144da765a2fSDaniel Borkmann
prog_array_map_clear(struct bpf_map * map)1145da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map)
1146da765a2fSDaniel Borkmann {
1147da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1148da765a2fSDaniel Borkmann map)->aux;
1149da765a2fSDaniel Borkmann bpf_map_inc(map);
1150da765a2fSDaniel Borkmann schedule_work(&aux->work);
1151da765a2fSDaniel Borkmann }
1152da765a2fSDaniel Borkmann
prog_array_map_alloc(union bpf_attr * attr)11532beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
11542beee5f5SDaniel Borkmann {
11552beee5f5SDaniel Borkmann struct bpf_array_aux *aux;
11562beee5f5SDaniel Borkmann struct bpf_map *map;
11572beee5f5SDaniel Borkmann
11586d192c79SRoman Gushchin aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
11592beee5f5SDaniel Borkmann if (!aux)
11602beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM);
11612beee5f5SDaniel Borkmann
1162da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1163da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs);
1164da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex);
1165da765a2fSDaniel Borkmann
11662beee5f5SDaniel Borkmann map = array_map_alloc(attr);
11672beee5f5SDaniel Borkmann if (IS_ERR(map)) {
11682beee5f5SDaniel Borkmann kfree(aux);
11692beee5f5SDaniel Borkmann return map;
11702beee5f5SDaniel Borkmann }
11712beee5f5SDaniel Borkmann
11722beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux;
1173da765a2fSDaniel Borkmann aux->map = map;
1174da765a2fSDaniel Borkmann
11752beee5f5SDaniel Borkmann return map;
11762beee5f5SDaniel Borkmann }
11772beee5f5SDaniel Borkmann
prog_array_map_free(struct bpf_map * map)11782beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map)
11792beee5f5SDaniel Borkmann {
1180da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp;
11812beee5f5SDaniel Borkmann struct bpf_array_aux *aux;
11822beee5f5SDaniel Borkmann
11832beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux;
1184da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1185da765a2fSDaniel Borkmann list_del_init(&elem->list);
1186da765a2fSDaniel Borkmann kfree(elem);
1187da765a2fSDaniel Borkmann }
11882beee5f5SDaniel Borkmann kfree(aux);
11892beee5f5SDaniel Borkmann fd_array_map_free(map);
11902beee5f5SDaniel Borkmann }
11912beee5f5SDaniel Borkmann
1192f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding.
1193f4d05259SMartin KaFai Lau * Doing static check alone in the verifier is not enough.
1194f4d05259SMartin KaFai Lau * Thus, prog_array_map cannot be used as an inner_map
1195f4d05259SMartin KaFai Lau * and map_meta_equal is not implemented.
1196f4d05259SMartin KaFai Lau */
119740077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = {
1198ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
11992beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc,
12002beee5f5SDaniel Borkmann .map_free = prog_array_map_free,
1201da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track,
1202da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack,
1203da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run,
120404fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key,
12052a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem,
12062a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem,
12072a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr,
12082a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr,
120914dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1210da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear,
1211a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem,
12121746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1213c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
121404fd61abSAlexei Starovoitov };
121504fd61abSAlexei Starovoitov
bpf_event_entry_gen(struct file * perf_file,struct file * map_file)12163b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
12173b1efb19SDaniel Borkmann struct file *map_file)
1218ea317b26SKaixu Xia {
12193b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
12203b1efb19SDaniel Borkmann
1221dc685409SHou Tao ee = kzalloc(sizeof(*ee), GFP_KERNEL);
12223b1efb19SDaniel Borkmann if (ee) {
12233b1efb19SDaniel Borkmann ee->event = perf_file->private_data;
12243b1efb19SDaniel Borkmann ee->perf_file = perf_file;
12253b1efb19SDaniel Borkmann ee->map_file = map_file;
12263b1efb19SDaniel Borkmann }
12273b1efb19SDaniel Borkmann
12283b1efb19SDaniel Borkmann return ee;
12293b1efb19SDaniel Borkmann }
12303b1efb19SDaniel Borkmann
__bpf_event_entry_free(struct rcu_head * rcu)12313b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu)
12323b1efb19SDaniel Borkmann {
12333b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
12343b1efb19SDaniel Borkmann
12353b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu);
12363b1efb19SDaniel Borkmann fput(ee->perf_file);
12373b1efb19SDaniel Borkmann kfree(ee);
12383b1efb19SDaniel Borkmann }
12393b1efb19SDaniel Borkmann
bpf_event_entry_free_rcu(struct bpf_event_entry * ee)12403b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
12413b1efb19SDaniel Borkmann {
12423b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free);
1243ea317b26SKaixu Xia }
1244ea317b26SKaixu Xia
perf_event_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)1245d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1246d056a788SDaniel Borkmann struct file *map_file, int fd)
1247ea317b26SKaixu Xia {
12483b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
12493b1efb19SDaniel Borkmann struct perf_event *event;
12503b1efb19SDaniel Borkmann struct file *perf_file;
1251f91840a3SAlexei Starovoitov u64 value;
1252ea317b26SKaixu Xia
12533b1efb19SDaniel Borkmann perf_file = perf_event_get(fd);
12543b1efb19SDaniel Borkmann if (IS_ERR(perf_file))
12553b1efb19SDaniel Borkmann return perf_file;
1256e03e7ee3SAlexei Starovoitov
1257f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP);
12583b1efb19SDaniel Borkmann event = perf_file->private_data;
125997562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
12603b1efb19SDaniel Borkmann goto err_out;
1261ea317b26SKaixu Xia
12623b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file);
12633b1efb19SDaniel Borkmann if (ee)
12643b1efb19SDaniel Borkmann return ee;
12653b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM);
12663b1efb19SDaniel Borkmann err_out:
12673b1efb19SDaniel Borkmann fput(perf_file);
12683b1efb19SDaniel Borkmann return ee;
1269ea317b26SKaixu Xia }
1270ea317b26SKaixu Xia
perf_event_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)127120c20bd1SHou Tao static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1272ea317b26SKaixu Xia {
127320c20bd1SHou Tao /* bpf_perf_event is freed after one RCU grace period */
12743b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr);
12753b1efb19SDaniel Borkmann }
12763b1efb19SDaniel Borkmann
perf_event_fd_array_release(struct bpf_map * map,struct file * map_file)12773b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map,
12783b1efb19SDaniel Borkmann struct file *map_file)
12793b1efb19SDaniel Borkmann {
12803b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map);
12813b1efb19SDaniel Borkmann struct bpf_event_entry *ee;
12823b1efb19SDaniel Borkmann int i;
12833b1efb19SDaniel Borkmann
1284792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1285792cacccSSong Liu return;
1286792cacccSSong Liu
12873b1efb19SDaniel Borkmann rcu_read_lock();
12883b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) {
12893b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]);
12903b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file)
129179d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, true);
12923b1efb19SDaniel Borkmann }
12933b1efb19SDaniel Borkmann rcu_read_unlock();
1294ea317b26SKaixu Xia }
1295ea317b26SKaixu Xia
perf_event_fd_array_map_free(struct bpf_map * map)1296792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map)
1297792cacccSSong Liu {
1298792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS)
129979d93b3cSHou Tao bpf_fd_array_map_clear(map, false);
1300792cacccSSong Liu fd_array_map_free(map);
1301792cacccSSong Liu }
1302792cacccSSong Liu
130340077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = {
1304f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
1305ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
1306ad46061fSJakub Kicinski .map_alloc = array_map_alloc,
1307792cacccSSong Liu .map_free = perf_event_fd_array_map_free,
1308ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key,
1309ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem,
1310ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem,
1311ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1312ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr,
13133b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release,
1314e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
13151746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1316c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
1317ea317b26SKaixu Xia };
1318ea317b26SKaixu Xia
131960d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS
cgroup_fd_array_get_ptr(struct bpf_map * map,struct file * map_file,int fd)13204ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
13214ed8ec52SMartin KaFai Lau struct file *map_file /* not used */,
13224ed8ec52SMartin KaFai Lau int fd)
13234ed8ec52SMartin KaFai Lau {
13244ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd);
13254ed8ec52SMartin KaFai Lau }
13264ed8ec52SMartin KaFai Lau
cgroup_fd_array_put_ptr(struct bpf_map * map,void * ptr,bool need_defer)132720c20bd1SHou Tao static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
13284ed8ec52SMartin KaFai Lau {
13294ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */
13304ed8ec52SMartin KaFai Lau cgroup_put(ptr);
13314ed8ec52SMartin KaFai Lau }
13324ed8ec52SMartin KaFai Lau
cgroup_fd_array_free(struct bpf_map * map)13334ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map)
13344ed8ec52SMartin KaFai Lau {
133579d93b3cSHou Tao bpf_fd_array_map_clear(map, false);
13364ed8ec52SMartin KaFai Lau fd_array_map_free(map);
13374ed8ec52SMartin KaFai Lau }
13384ed8ec52SMartin KaFai Lau
133940077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = {
1340f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
1341ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
1342ad46061fSJakub Kicinski .map_alloc = array_map_alloc,
13434ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free,
13444ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key,
13454ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem,
13464ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem,
13474ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr,
13484ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1349e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
13501746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1351c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
13524ed8ec52SMartin KaFai Lau };
13534ed8ec52SMartin KaFai Lau #endif
135456f668dfSMartin KaFai Lau
array_of_map_alloc(union bpf_attr * attr)135556f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
135656f668dfSMartin KaFai Lau {
135756f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta;
135856f668dfSMartin KaFai Lau
135956f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
136056f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta))
136156f668dfSMartin KaFai Lau return inner_map_meta;
136256f668dfSMartin KaFai Lau
1363ad46061fSJakub Kicinski map = array_map_alloc(attr);
136456f668dfSMartin KaFai Lau if (IS_ERR(map)) {
136556f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta);
136656f668dfSMartin KaFai Lau return map;
136756f668dfSMartin KaFai Lau }
136856f668dfSMartin KaFai Lau
136956f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta;
137056f668dfSMartin KaFai Lau
137156f668dfSMartin KaFai Lau return map;
137256f668dfSMartin KaFai Lau }
137356f668dfSMartin KaFai Lau
array_of_map_free(struct bpf_map * map)137456f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map)
137556f668dfSMartin KaFai Lau {
137656f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which
137756f668dfSMartin KaFai Lau * is protected by fdget/fdput.
137856f668dfSMartin KaFai Lau */
137956f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta);
138079d93b3cSHou Tao bpf_fd_array_map_clear(map, false);
138156f668dfSMartin KaFai Lau fd_array_map_free(map);
138256f668dfSMartin KaFai Lau }
138356f668dfSMartin KaFai Lau
array_of_map_lookup_elem(struct bpf_map * map,void * key)138456f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
138556f668dfSMartin KaFai Lau {
138656f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key);
138756f668dfSMartin KaFai Lau
138856f668dfSMartin KaFai Lau if (!inner_map)
138956f668dfSMartin KaFai Lau return NULL;
139056f668dfSMartin KaFai Lau
139156f668dfSMartin KaFai Lau return READ_ONCE(*inner_map);
139256f668dfSMartin KaFai Lau }
139356f668dfSMartin KaFai Lau
array_of_map_gen_lookup(struct bpf_map * map,struct bpf_insn * insn_buf)13944a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map,
13957b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf)
13967b0c2a05SDaniel Borkmann {
1397b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map);
1398d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size;
13997b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf;
14007b0c2a05SDaniel Borkmann const int ret = BPF_REG_0;
14017b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1;
14027b0c2a05SDaniel Borkmann const int index = BPF_REG_2;
14037b0c2a05SDaniel Borkmann
14047b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
14057b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
14062c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) {
1407b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1408b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1409b2157399SAlexei Starovoitov } else {
14107b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1411b2157399SAlexei Starovoitov }
14127b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size))
14137b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
14147b0c2a05SDaniel Borkmann else
14157b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
14167b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
14177b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
14187b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
14197b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
14207b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0);
14217b0c2a05SDaniel Borkmann
14227b0c2a05SDaniel Borkmann return insn - insn_buf;
14237b0c2a05SDaniel Borkmann }
14247b0c2a05SDaniel Borkmann
142540077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = {
1426ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check,
142756f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc,
142856f668dfSMartin KaFai Lau .map_free = array_of_map_free,
142956f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key,
143056f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem,
143156f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem,
143256f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr,
143356f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr,
143414dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
14357b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup,
14369263dddcSTakshak Chahande .map_lookup_batch = generic_map_lookup_batch,
14379263dddcSTakshak Chahande .map_update_batch = generic_map_update_batch,
1438e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
14391746d055SYafang Shao .map_mem_usage = array_map_mem_usage,
1440c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0],
144156f668dfSMartin KaFai Lau };
1442