15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook 428fbcfa0SAlexei Starovoitov */ 528fbcfa0SAlexei Starovoitov #include <linux/bpf.h> 6a26ca7c9SMartin KaFai Lau #include <linux/btf.h> 728fbcfa0SAlexei Starovoitov #include <linux/err.h> 828fbcfa0SAlexei Starovoitov #include <linux/slab.h> 928fbcfa0SAlexei Starovoitov #include <linux/mm.h> 1004fd61abSAlexei Starovoitov #include <linux/filter.h> 110cdf5640SDaniel Borkmann #include <linux/perf_event.h> 12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h> 1328fbcfa0SAlexei Starovoitov 1456f668dfSMartin KaFai Lau #include "map_in_map.h" 1556f668dfSMartin KaFai Lau 166e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \ 17fc970227SAndrii Nakryiko (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK) 186e71b04aSChenbo Feng 19a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array) 20a10423b8SAlexei Starovoitov { 21a10423b8SAlexei Starovoitov int i; 22a10423b8SAlexei Starovoitov 2332fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) { 24a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]); 2532fff239SEric Dumazet cond_resched(); 2632fff239SEric Dumazet } 27a10423b8SAlexei Starovoitov } 28a10423b8SAlexei Starovoitov 29a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array) 30a10423b8SAlexei Starovoitov { 31a10423b8SAlexei Starovoitov void __percpu *ptr; 32a10423b8SAlexei Starovoitov int i; 33a10423b8SAlexei Starovoitov 34a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) { 35a10423b8SAlexei Starovoitov ptr = __alloc_percpu_gfp(array->elem_size, 8, 36a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN); 37a10423b8SAlexei Starovoitov if (!ptr) { 38a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 39a10423b8SAlexei Starovoitov return -ENOMEM; 40a10423b8SAlexei Starovoitov } 41a10423b8SAlexei Starovoitov array->pptrs[i] = ptr; 4232fff239SEric Dumazet cond_resched(); 43a10423b8SAlexei Starovoitov } 44a10423b8SAlexei Starovoitov 45a10423b8SAlexei Starovoitov return 0; 46a10423b8SAlexei Starovoitov } 47a10423b8SAlexei Starovoitov 4828fbcfa0SAlexei Starovoitov /* Called from syscall */ 495dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr) 50ad46061fSJakub Kicinski { 51ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 52ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr); 53ad46061fSJakub Kicinski 54ad46061fSJakub Kicinski /* check sanity of attributes */ 55ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 || 56ad46061fSJakub Kicinski attr->value_size == 0 || 57ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 58591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) || 59ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE)) 60ad46061fSJakub Kicinski return -EINVAL; 61ad46061fSJakub Kicinski 62fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY && 63fc970227SAndrii Nakryiko attr->map_flags & BPF_F_MMAPABLE) 64fc970227SAndrii Nakryiko return -EINVAL; 65fc970227SAndrii Nakryiko 66ad46061fSJakub Kicinski if (attr->value_size > KMALLOC_MAX_SIZE) 67ad46061fSJakub Kicinski /* if value_size is bigger, the user space won't be able to 68ad46061fSJakub Kicinski * access the elements. 69ad46061fSJakub Kicinski */ 70ad46061fSJakub Kicinski return -E2BIG; 71ad46061fSJakub Kicinski 72ad46061fSJakub Kicinski return 0; 73ad46061fSJakub Kicinski } 74ad46061fSJakub Kicinski 7528fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr) 7628fbcfa0SAlexei Starovoitov { 77a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 789c2d63b8SDaniel Borkmann int ret, numa_node = bpf_map_attr_numa_node(attr); 79b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries; 802c78ee89SAlexei Starovoitov bool bypass_spec_v1 = bpf_bypass_spec_v1(); 819c2d63b8SDaniel Borkmann u64 cost, array_size, mask64; 82b936ca64SRoman Gushchin struct bpf_map_memory mem; 8328fbcfa0SAlexei Starovoitov struct bpf_array *array; 8428fbcfa0SAlexei Starovoitov 8528fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8); 8628fbcfa0SAlexei Starovoitov 87b2157399SAlexei Starovoitov max_entries = attr->max_entries; 88b2157399SAlexei Starovoitov 89bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has 90bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to 91bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space. 92bbeb6e43SDaniel Borkmann */ 93bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1); 94bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64; 95bbeb6e43SDaniel Borkmann mask64 -= 1; 96bbeb6e43SDaniel Borkmann 97bbeb6e43SDaniel Borkmann index_mask = mask64; 982c78ee89SAlexei Starovoitov if (!bypass_spec_v1) { 99b2157399SAlexei Starovoitov /* round up array size to nearest power of 2, 100b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits 101b2157399SAlexei Starovoitov */ 102b2157399SAlexei Starovoitov max_entries = index_mask + 1; 103bbeb6e43SDaniel Borkmann /* Check for overflows. */ 104bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries) 105bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG); 106bbeb6e43SDaniel Borkmann } 107b2157399SAlexei Starovoitov 108a10423b8SAlexei Starovoitov array_size = sizeof(*array); 109fc970227SAndrii Nakryiko if (percpu) { 110b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *); 111fc970227SAndrii Nakryiko } else { 112fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and 113fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned 114fc970227SAndrii Nakryiko */ 115fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 116fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size); 117fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size); 118fc970227SAndrii Nakryiko } else { 119b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size; 120fc970227SAndrii Nakryiko } 121fc970227SAndrii Nakryiko } 122a10423b8SAlexei Starovoitov 123a10423b8SAlexei Starovoitov /* make sure there is no u32 overflow later in round_up() */ 1249c2d63b8SDaniel Borkmann cost = array_size; 125c85d6913SRoman Gushchin if (percpu) 1269c2d63b8SDaniel Borkmann cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 1279c2d63b8SDaniel Borkmann 128b936ca64SRoman Gushchin ret = bpf_map_charge_init(&mem, cost); 1299c2d63b8SDaniel Borkmann if (ret < 0) 1309c2d63b8SDaniel Borkmann return ERR_PTR(ret); 131daaf427cSAlexei Starovoitov 13228fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */ 133fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 134fc970227SAndrii Nakryiko void *data; 135fc970227SAndrii Nakryiko 136fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 137fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node); 138fc970227SAndrii Nakryiko if (!data) { 139fc970227SAndrii Nakryiko bpf_map_charge_finish(&mem); 140fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM); 141fc970227SAndrii Nakryiko } 142fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 143fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value); 144fc970227SAndrii Nakryiko } else { 14596eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node); 146fc970227SAndrii Nakryiko } 147b936ca64SRoman Gushchin if (!array) { 148b936ca64SRoman Gushchin bpf_map_charge_finish(&mem); 14928fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM); 150b936ca64SRoman Gushchin } 151b2157399SAlexei Starovoitov array->index_mask = index_mask; 1522c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1; 15328fbcfa0SAlexei Starovoitov 15428fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */ 15532852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr); 156b936ca64SRoman Gushchin bpf_map_charge_move(&array->map.memory, &mem); 15728fbcfa0SAlexei Starovoitov array->elem_size = elem_size; 15828fbcfa0SAlexei Starovoitov 1599c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) { 160b936ca64SRoman Gushchin bpf_map_charge_finish(&array->map.memory); 161d407bd25SDaniel Borkmann bpf_map_area_free(array); 162a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM); 163a10423b8SAlexei Starovoitov } 164a10423b8SAlexei Starovoitov 16528fbcfa0SAlexei Starovoitov return &array->map; 16628fbcfa0SAlexei Starovoitov } 16728fbcfa0SAlexei Starovoitov 16828fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 16928fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key) 17028fbcfa0SAlexei Starovoitov { 17128fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 17228fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 17328fbcfa0SAlexei Starovoitov 174a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 17528fbcfa0SAlexei Starovoitov return NULL; 17628fbcfa0SAlexei Starovoitov 177b2157399SAlexei Starovoitov return array->value + array->elem_size * (index & array->index_mask); 17828fbcfa0SAlexei Starovoitov } 17928fbcfa0SAlexei Starovoitov 180d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 181d8eca5bbSDaniel Borkmann u32 off) 182d8eca5bbSDaniel Borkmann { 183d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 184d8eca5bbSDaniel Borkmann 185d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 186d8eca5bbSDaniel Borkmann return -ENOTSUPP; 187d8eca5bbSDaniel Borkmann if (off >= map->value_size) 188d8eca5bbSDaniel Borkmann return -EINVAL; 189d8eca5bbSDaniel Borkmann 190d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value; 191d8eca5bbSDaniel Borkmann return 0; 192d8eca5bbSDaniel Borkmann } 193d8eca5bbSDaniel Borkmann 194d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 195d8eca5bbSDaniel Borkmann u32 *off) 196d8eca5bbSDaniel Borkmann { 197d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 198d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value; 199d8eca5bbSDaniel Borkmann u64 range = array->elem_size; 200d8eca5bbSDaniel Borkmann 201d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 202d8eca5bbSDaniel Borkmann return -ENOTSUPP; 203d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range) 204d8eca5bbSDaniel Borkmann return -ENOENT; 205d8eca5bbSDaniel Borkmann 206d8eca5bbSDaniel Borkmann *off = imm - base; 207d8eca5bbSDaniel Borkmann return 0; 208d8eca5bbSDaniel Borkmann } 209d8eca5bbSDaniel Borkmann 21081ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 21181ed18abSAlexei Starovoitov static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 21281ed18abSAlexei Starovoitov { 213b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 21481ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf; 215fad73a1aSMartin KaFai Lau u32 elem_size = round_up(map->value_size, 8); 21681ed18abSAlexei Starovoitov const int ret = BPF_REG_0; 21781ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1; 21881ed18abSAlexei Starovoitov const int index = BPF_REG_2; 21981ed18abSAlexei Starovoitov 22081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 22181ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 2222c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 223b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 224b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 225b2157399SAlexei Starovoitov } else { 226fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 227b2157399SAlexei Starovoitov } 228fad73a1aSMartin KaFai Lau 229fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) { 23081ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 23181ed18abSAlexei Starovoitov } else { 23281ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 23381ed18abSAlexei Starovoitov } 23481ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 23581ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 23681ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0); 23781ed18abSAlexei Starovoitov return insn - insn_buf; 23881ed18abSAlexei Starovoitov } 23981ed18abSAlexei Starovoitov 240a10423b8SAlexei Starovoitov /* Called from eBPF program */ 241a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 242a10423b8SAlexei Starovoitov { 243a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 244a10423b8SAlexei Starovoitov u32 index = *(u32 *)key; 245a10423b8SAlexei Starovoitov 246a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 247a10423b8SAlexei Starovoitov return NULL; 248a10423b8SAlexei Starovoitov 249b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]); 250a10423b8SAlexei Starovoitov } 251a10423b8SAlexei Starovoitov 25215a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 25315a07b33SAlexei Starovoitov { 25415a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 25515a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 25615a07b33SAlexei Starovoitov void __percpu *pptr; 25715a07b33SAlexei Starovoitov int cpu, off = 0; 25815a07b33SAlexei Starovoitov u32 size; 25915a07b33SAlexei Starovoitov 26015a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 26115a07b33SAlexei Starovoitov return -ENOENT; 26215a07b33SAlexei Starovoitov 26315a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only 26415a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas 26515a07b33SAlexei Starovoitov * will not leak any kernel data 26615a07b33SAlexei Starovoitov */ 26715a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 26815a07b33SAlexei Starovoitov rcu_read_lock(); 269b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 27015a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 27115a07b33SAlexei Starovoitov bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 27215a07b33SAlexei Starovoitov off += size; 27315a07b33SAlexei Starovoitov } 27415a07b33SAlexei Starovoitov rcu_read_unlock(); 27515a07b33SAlexei Starovoitov return 0; 27615a07b33SAlexei Starovoitov } 27715a07b33SAlexei Starovoitov 27828fbcfa0SAlexei Starovoitov /* Called from syscall */ 27928fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 28028fbcfa0SAlexei Starovoitov { 28128fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 2828fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX; 28328fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key; 28428fbcfa0SAlexei Starovoitov 28528fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) { 28628fbcfa0SAlexei Starovoitov *next = 0; 28728fbcfa0SAlexei Starovoitov return 0; 28828fbcfa0SAlexei Starovoitov } 28928fbcfa0SAlexei Starovoitov 29028fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1) 29128fbcfa0SAlexei Starovoitov return -ENOENT; 29228fbcfa0SAlexei Starovoitov 29328fbcfa0SAlexei Starovoitov *next = index + 1; 29428fbcfa0SAlexei Starovoitov return 0; 29528fbcfa0SAlexei Starovoitov } 29628fbcfa0SAlexei Starovoitov 29728fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 29828fbcfa0SAlexei Starovoitov static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 29928fbcfa0SAlexei Starovoitov u64 map_flags) 30028fbcfa0SAlexei Starovoitov { 30128fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 30228fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 30396049f3aSAlexei Starovoitov char *val; 30428fbcfa0SAlexei Starovoitov 30596049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 30628fbcfa0SAlexei Starovoitov /* unknown flags */ 30728fbcfa0SAlexei Starovoitov return -EINVAL; 30828fbcfa0SAlexei Starovoitov 309a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 31028fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 31128fbcfa0SAlexei Starovoitov return -E2BIG; 31228fbcfa0SAlexei Starovoitov 31396049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST)) 314daaf427cSAlexei Starovoitov /* all elements already exist */ 31528fbcfa0SAlexei Starovoitov return -EEXIST; 31628fbcfa0SAlexei Starovoitov 31796049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) && 31896049f3aSAlexei Starovoitov !map_value_has_spin_lock(map))) 31996049f3aSAlexei Starovoitov return -EINVAL; 32096049f3aSAlexei Starovoitov 32196049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 322b2157399SAlexei Starovoitov memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), 323a10423b8SAlexei Starovoitov value, map->value_size); 32496049f3aSAlexei Starovoitov } else { 32596049f3aSAlexei Starovoitov val = array->value + 32696049f3aSAlexei Starovoitov array->elem_size * (index & array->index_mask); 32796049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK) 32896049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false); 329a10423b8SAlexei Starovoitov else 33096049f3aSAlexei Starovoitov copy_map_value(map, val, value); 33196049f3aSAlexei Starovoitov } 33228fbcfa0SAlexei Starovoitov return 0; 33328fbcfa0SAlexei Starovoitov } 33428fbcfa0SAlexei Starovoitov 33515a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 33615a07b33SAlexei Starovoitov u64 map_flags) 33715a07b33SAlexei Starovoitov { 33815a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 33915a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 34015a07b33SAlexei Starovoitov void __percpu *pptr; 34115a07b33SAlexei Starovoitov int cpu, off = 0; 34215a07b33SAlexei Starovoitov u32 size; 34315a07b33SAlexei Starovoitov 34415a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 34515a07b33SAlexei Starovoitov /* unknown flags */ 34615a07b33SAlexei Starovoitov return -EINVAL; 34715a07b33SAlexei Starovoitov 34815a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 34915a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 35015a07b33SAlexei Starovoitov return -E2BIG; 35115a07b33SAlexei Starovoitov 35215a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 35315a07b33SAlexei Starovoitov /* all elements already exist */ 35415a07b33SAlexei Starovoitov return -EEXIST; 35515a07b33SAlexei Starovoitov 35615a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that 35715a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access 35815a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be 35915a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc, 36015a07b33SAlexei Starovoitov * so no kernel data leaks possible 36115a07b33SAlexei Starovoitov */ 36215a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 36315a07b33SAlexei Starovoitov rcu_read_lock(); 364b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 36515a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 36615a07b33SAlexei Starovoitov bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 36715a07b33SAlexei Starovoitov off += size; 36815a07b33SAlexei Starovoitov } 36915a07b33SAlexei Starovoitov rcu_read_unlock(); 37015a07b33SAlexei Starovoitov return 0; 37115a07b33SAlexei Starovoitov } 37215a07b33SAlexei Starovoitov 37328fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 37428fbcfa0SAlexei Starovoitov static int array_map_delete_elem(struct bpf_map *map, void *key) 37528fbcfa0SAlexei Starovoitov { 37628fbcfa0SAlexei Starovoitov return -EINVAL; 37728fbcfa0SAlexei Starovoitov } 37828fbcfa0SAlexei Starovoitov 379fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array) 380fc970227SAndrii Nakryiko { 381fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE); 382fc970227SAndrii Nakryiko } 383fc970227SAndrii Nakryiko 38428fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 38528fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map) 38628fbcfa0SAlexei Starovoitov { 38728fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 38828fbcfa0SAlexei Starovoitov 389a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 390a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 391a10423b8SAlexei Starovoitov 392fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE) 393fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array)); 394fc970227SAndrii Nakryiko else 395d407bd25SDaniel Borkmann bpf_map_area_free(array); 39628fbcfa0SAlexei Starovoitov } 39728fbcfa0SAlexei Starovoitov 398a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key, 399a26ca7c9SMartin KaFai Lau struct seq_file *m) 400a26ca7c9SMartin KaFai Lau { 401a26ca7c9SMartin KaFai Lau void *value; 402a26ca7c9SMartin KaFai Lau 403a26ca7c9SMartin KaFai Lau rcu_read_lock(); 404a26ca7c9SMartin KaFai Lau 405a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key); 406a26ca7c9SMartin KaFai Lau if (!value) { 407a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 408a26ca7c9SMartin KaFai Lau return; 409a26ca7c9SMartin KaFai Lau } 410a26ca7c9SMartin KaFai Lau 4112824ecb7SDaniel Borkmann if (map->btf_key_type_id) 412a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key); 4139b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 414a26ca7c9SMartin KaFai Lau seq_puts(m, "\n"); 415a26ca7c9SMartin KaFai Lau 416a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 417a26ca7c9SMartin KaFai Lau } 418a26ca7c9SMartin KaFai Lau 419c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 420c7b27c37SYonghong Song struct seq_file *m) 421c7b27c37SYonghong Song { 422c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map); 423c7b27c37SYonghong Song u32 index = *(u32 *)key; 424c7b27c37SYonghong Song void __percpu *pptr; 425c7b27c37SYonghong Song int cpu; 426c7b27c37SYonghong Song 427c7b27c37SYonghong Song rcu_read_lock(); 428c7b27c37SYonghong Song 429c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key); 430c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask]; 431c7b27c37SYonghong Song for_each_possible_cpu(cpu) { 432c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu); 433c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 434c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m); 435c7b27c37SYonghong Song seq_puts(m, "\n"); 436c7b27c37SYonghong Song } 437c7b27c37SYonghong Song seq_puts(m, "}\n"); 438c7b27c37SYonghong Song 439c7b27c37SYonghong Song rcu_read_unlock(); 440c7b27c37SYonghong Song } 441c7b27c37SYonghong Song 442e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map, 4431b2b234bSRoman Gushchin const struct btf *btf, 444e8d2bec0SDaniel Borkmann const struct btf_type *key_type, 445e8d2bec0SDaniel Borkmann const struct btf_type *value_type) 446a26ca7c9SMartin KaFai Lau { 447a26ca7c9SMartin KaFai Lau u32 int_data; 448a26ca7c9SMartin KaFai Lau 4492824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */ 4502824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) { 4512824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY || 4522824ecb7SDaniel Borkmann map->max_entries != 1) 4532824ecb7SDaniel Borkmann return -EINVAL; 4542824ecb7SDaniel Borkmann 4552824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 4562824ecb7SDaniel Borkmann return -EINVAL; 4572824ecb7SDaniel Borkmann 4582824ecb7SDaniel Borkmann return 0; 4592824ecb7SDaniel Borkmann } 4602824ecb7SDaniel Borkmann 461e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 462a26ca7c9SMartin KaFai Lau return -EINVAL; 463a26ca7c9SMartin KaFai Lau 464a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1); 465e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure 466e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create. 467a26ca7c9SMartin KaFai Lau */ 468e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 469a26ca7c9SMartin KaFai Lau return -EINVAL; 470a26ca7c9SMartin KaFai Lau 471a26ca7c9SMartin KaFai Lau return 0; 472a26ca7c9SMartin KaFai Lau } 473a26ca7c9SMartin KaFai Lau 474b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 475fc970227SAndrii Nakryiko { 476fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 477fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 478fc970227SAndrii Nakryiko 479fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE)) 480fc970227SAndrii Nakryiko return -EINVAL; 481fc970227SAndrii Nakryiko 482333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 483333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 484333291ceSAndrii Nakryiko return -EINVAL; 485333291ceSAndrii Nakryiko 486333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 487333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff); 488fc970227SAndrii Nakryiko } 489fc970227SAndrii Nakryiko 490*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info { 491*d3cc2ab5SYonghong Song struct bpf_map *map; 492*d3cc2ab5SYonghong Song void *percpu_value_buf; 493*d3cc2ab5SYonghong Song u32 index; 494*d3cc2ab5SYonghong Song }; 495*d3cc2ab5SYonghong Song 496*d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 497*d3cc2ab5SYonghong Song { 498*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 499*d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 500*d3cc2ab5SYonghong Song struct bpf_array *array; 501*d3cc2ab5SYonghong Song u32 index; 502*d3cc2ab5SYonghong Song 503*d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 504*d3cc2ab5SYonghong Song return NULL; 505*d3cc2ab5SYonghong Song 506*d3cc2ab5SYonghong Song if (*pos == 0) 507*d3cc2ab5SYonghong Song ++*pos; 508*d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 509*d3cc2ab5SYonghong Song index = info->index & array->index_mask; 510*d3cc2ab5SYonghong Song if (info->percpu_value_buf) 511*d3cc2ab5SYonghong Song return array->pptrs[index]; 512*d3cc2ab5SYonghong Song return array->value + array->elem_size * index; 513*d3cc2ab5SYonghong Song } 514*d3cc2ab5SYonghong Song 515*d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 516*d3cc2ab5SYonghong Song { 517*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 518*d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 519*d3cc2ab5SYonghong Song struct bpf_array *array; 520*d3cc2ab5SYonghong Song u32 index; 521*d3cc2ab5SYonghong Song 522*d3cc2ab5SYonghong Song ++*pos; 523*d3cc2ab5SYonghong Song ++info->index; 524*d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 525*d3cc2ab5SYonghong Song return NULL; 526*d3cc2ab5SYonghong Song 527*d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 528*d3cc2ab5SYonghong Song index = info->index & array->index_mask; 529*d3cc2ab5SYonghong Song if (info->percpu_value_buf) 530*d3cc2ab5SYonghong Song return array->pptrs[index]; 531*d3cc2ab5SYonghong Song return array->value + array->elem_size * index; 532*d3cc2ab5SYonghong Song } 533*d3cc2ab5SYonghong Song 534*d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 535*d3cc2ab5SYonghong Song { 536*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 537*d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {}; 538*d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 539*d3cc2ab5SYonghong Song struct bpf_iter_meta meta; 540*d3cc2ab5SYonghong Song struct bpf_prog *prog; 541*d3cc2ab5SYonghong Song int off = 0, cpu = 0; 542*d3cc2ab5SYonghong Song void __percpu **pptr; 543*d3cc2ab5SYonghong Song u32 size; 544*d3cc2ab5SYonghong Song 545*d3cc2ab5SYonghong Song meta.seq = seq; 546*d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL); 547*d3cc2ab5SYonghong Song if (!prog) 548*d3cc2ab5SYonghong Song return 0; 549*d3cc2ab5SYonghong Song 550*d3cc2ab5SYonghong Song ctx.meta = &meta; 551*d3cc2ab5SYonghong Song ctx.map = info->map; 552*d3cc2ab5SYonghong Song if (v) { 553*d3cc2ab5SYonghong Song ctx.key = &info->index; 554*d3cc2ab5SYonghong Song 555*d3cc2ab5SYonghong Song if (!info->percpu_value_buf) { 556*d3cc2ab5SYonghong Song ctx.value = v; 557*d3cc2ab5SYonghong Song } else { 558*d3cc2ab5SYonghong Song pptr = v; 559*d3cc2ab5SYonghong Song size = round_up(map->value_size, 8); 560*d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) { 561*d3cc2ab5SYonghong Song bpf_long_memcpy(info->percpu_value_buf + off, 562*d3cc2ab5SYonghong Song per_cpu_ptr(pptr, cpu), 563*d3cc2ab5SYonghong Song size); 564*d3cc2ab5SYonghong Song off += size; 565*d3cc2ab5SYonghong Song } 566*d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf; 567*d3cc2ab5SYonghong Song } 568*d3cc2ab5SYonghong Song } 569*d3cc2ab5SYonghong Song 570*d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx); 571*d3cc2ab5SYonghong Song } 572*d3cc2ab5SYonghong Song 573*d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 574*d3cc2ab5SYonghong Song { 575*d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v); 576*d3cc2ab5SYonghong Song } 577*d3cc2ab5SYonghong Song 578*d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 579*d3cc2ab5SYonghong Song { 580*d3cc2ab5SYonghong Song if (!v) 581*d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL); 582*d3cc2ab5SYonghong Song } 583*d3cc2ab5SYonghong Song 584*d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data, 585*d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux) 586*d3cc2ab5SYonghong Song { 587*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 588*d3cc2ab5SYonghong Song struct bpf_map *map = aux->map; 589*d3cc2ab5SYonghong Song void *value_buf; 590*d3cc2ab5SYonghong Song u32 buf_size; 591*d3cc2ab5SYonghong Song 592*d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 593*d3cc2ab5SYonghong Song buf_size = round_up(map->value_size, 8) * num_possible_cpus(); 594*d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 595*d3cc2ab5SYonghong Song if (!value_buf) 596*d3cc2ab5SYonghong Song return -ENOMEM; 597*d3cc2ab5SYonghong Song 598*d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf; 599*d3cc2ab5SYonghong Song } 600*d3cc2ab5SYonghong Song 601*d3cc2ab5SYonghong Song seq_info->map = map; 602*d3cc2ab5SYonghong Song return 0; 603*d3cc2ab5SYonghong Song } 604*d3cc2ab5SYonghong Song 605*d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data) 606*d3cc2ab5SYonghong Song { 607*d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 608*d3cc2ab5SYonghong Song 609*d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf); 610*d3cc2ab5SYonghong Song } 611*d3cc2ab5SYonghong Song 612*d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = { 613*d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start, 614*d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next, 615*d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop, 616*d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show, 617*d3cc2ab5SYonghong Song }; 618*d3cc2ab5SYonghong Song 619*d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = { 620*d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops, 621*d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map, 622*d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map, 623*d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 624*d3cc2ab5SYonghong Song }; 625*d3cc2ab5SYonghong Song 62641c48f3aSAndrey Ignatov static int array_map_btf_id; 62740077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = { 628ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 62928fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc, 63028fbcfa0SAlexei Starovoitov .map_free = array_map_free, 63128fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 63228fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem, 63328fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem, 63428fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 63581ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup, 636d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr, 637d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta, 638fc970227SAndrii Nakryiko .map_mmap = array_map_mmap, 639a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem, 640a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf, 641c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch, 642c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch, 64341c48f3aSAndrey Ignatov .map_btf_name = "bpf_array", 64441c48f3aSAndrey Ignatov .map_btf_id = &array_map_btf_id, 645*d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 64628fbcfa0SAlexei Starovoitov }; 64728fbcfa0SAlexei Starovoitov 6482872e9acSAndrey Ignatov static int percpu_array_map_btf_id; 64940077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = { 650ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 651a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc, 652a10423b8SAlexei Starovoitov .map_free = array_map_free, 653a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 654a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem, 655a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem, 656a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 657c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem, 658e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf, 6592872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 6602872e9acSAndrey Ignatov .map_btf_id = &percpu_array_map_btf_id, 661*d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 662a10423b8SAlexei Starovoitov }; 663a10423b8SAlexei Starovoitov 664ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr) 66504fd61abSAlexei Starovoitov { 6662a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */ 66704fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32)) 668ad46061fSJakub Kicinski return -EINVAL; 669591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */ 670591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 671591fe988SDaniel Borkmann return -EINVAL; 672ad46061fSJakub Kicinski return array_map_alloc_check(attr); 67304fd61abSAlexei Starovoitov } 67404fd61abSAlexei Starovoitov 6752a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map) 67604fd61abSAlexei Starovoitov { 67704fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 67804fd61abSAlexei Starovoitov int i; 67904fd61abSAlexei Starovoitov 68004fd61abSAlexei Starovoitov /* make sure it's empty */ 68104fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 6822a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL); 683d407bd25SDaniel Borkmann 684d407bd25SDaniel Borkmann bpf_map_area_free(array); 68504fd61abSAlexei Starovoitov } 68604fd61abSAlexei Starovoitov 6872a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 68804fd61abSAlexei Starovoitov { 6893b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP); 69004fd61abSAlexei Starovoitov } 69104fd61abSAlexei Starovoitov 69204fd61abSAlexei Starovoitov /* only called from syscall */ 69314dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 69414dc6f04SMartin KaFai Lau { 69514dc6f04SMartin KaFai Lau void **elem, *ptr; 69614dc6f04SMartin KaFai Lau int ret = 0; 69714dc6f04SMartin KaFai Lau 69814dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem) 69914dc6f04SMartin KaFai Lau return -ENOTSUPP; 70014dc6f04SMartin KaFai Lau 70114dc6f04SMartin KaFai Lau rcu_read_lock(); 70214dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key); 70314dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem))) 70414dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr); 70514dc6f04SMartin KaFai Lau else 70614dc6f04SMartin KaFai Lau ret = -ENOENT; 70714dc6f04SMartin KaFai Lau rcu_read_unlock(); 70814dc6f04SMartin KaFai Lau 70914dc6f04SMartin KaFai Lau return ret; 71014dc6f04SMartin KaFai Lau } 71114dc6f04SMartin KaFai Lau 71214dc6f04SMartin KaFai Lau /* only called from syscall */ 713d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 714d056a788SDaniel Borkmann void *key, void *value, u64 map_flags) 71504fd61abSAlexei Starovoitov { 71604fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 7172a36f0b9SWang Nan void *new_ptr, *old_ptr; 71804fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd; 71904fd61abSAlexei Starovoitov 72004fd61abSAlexei Starovoitov if (map_flags != BPF_ANY) 72104fd61abSAlexei Starovoitov return -EINVAL; 72204fd61abSAlexei Starovoitov 72304fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 72404fd61abSAlexei Starovoitov return -E2BIG; 72504fd61abSAlexei Starovoitov 72604fd61abSAlexei Starovoitov ufd = *(u32 *)value; 727d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 7282a36f0b9SWang Nan if (IS_ERR(new_ptr)) 7292a36f0b9SWang Nan return PTR_ERR(new_ptr); 73004fd61abSAlexei Starovoitov 731da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 732da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 7332a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr); 734da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr); 735da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 736da765a2fSDaniel Borkmann } else { 737da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr); 738da765a2fSDaniel Borkmann } 739da765a2fSDaniel Borkmann 7402a36f0b9SWang Nan if (old_ptr) 7412a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 74204fd61abSAlexei Starovoitov return 0; 74304fd61abSAlexei Starovoitov } 74404fd61abSAlexei Starovoitov 7452a36f0b9SWang Nan static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 74604fd61abSAlexei Starovoitov { 74704fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 7482a36f0b9SWang Nan void *old_ptr; 74904fd61abSAlexei Starovoitov u32 index = *(u32 *)key; 75004fd61abSAlexei Starovoitov 75104fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 75204fd61abSAlexei Starovoitov return -E2BIG; 75304fd61abSAlexei Starovoitov 754da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 755da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 7562a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL); 757da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL); 758da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 759da765a2fSDaniel Borkmann } else { 760da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL); 761da765a2fSDaniel Borkmann } 762da765a2fSDaniel Borkmann 7632a36f0b9SWang Nan if (old_ptr) { 7642a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 76504fd61abSAlexei Starovoitov return 0; 76604fd61abSAlexei Starovoitov } else { 76704fd61abSAlexei Starovoitov return -ENOENT; 76804fd61abSAlexei Starovoitov } 76904fd61abSAlexei Starovoitov } 77004fd61abSAlexei Starovoitov 771d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map, 772d056a788SDaniel Borkmann struct file *map_file, int fd) 7732a36f0b9SWang Nan { 7742a36f0b9SWang Nan struct bpf_array *array = container_of(map, struct bpf_array, map); 7752a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd); 776d056a788SDaniel Borkmann 7772a36f0b9SWang Nan if (IS_ERR(prog)) 7782a36f0b9SWang Nan return prog; 7792a36f0b9SWang Nan 7802a36f0b9SWang Nan if (!bpf_prog_array_compatible(array, prog)) { 7812a36f0b9SWang Nan bpf_prog_put(prog); 7822a36f0b9SWang Nan return ERR_PTR(-EINVAL); 7832a36f0b9SWang Nan } 784d056a788SDaniel Borkmann 7852a36f0b9SWang Nan return prog; 7862a36f0b9SWang Nan } 7872a36f0b9SWang Nan 7882a36f0b9SWang Nan static void prog_fd_array_put_ptr(void *ptr) 7892a36f0b9SWang Nan { 7901aacde3dSDaniel Borkmann bpf_prog_put(ptr); 7912a36f0b9SWang Nan } 7922a36f0b9SWang Nan 79314dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr) 79414dc6f04SMartin KaFai Lau { 79514dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id; 79614dc6f04SMartin KaFai Lau } 79714dc6f04SMartin KaFai Lau 79804fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */ 799ba6b8de4SJohn Fastabend static void bpf_fd_array_map_clear(struct bpf_map *map) 80004fd61abSAlexei Starovoitov { 80104fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 80204fd61abSAlexei Starovoitov int i; 80304fd61abSAlexei Starovoitov 80404fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 8052a36f0b9SWang Nan fd_array_map_delete_elem(map, &i); 80604fd61abSAlexei Starovoitov } 80704fd61abSAlexei Starovoitov 808a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 809a7c19db3SYonghong Song struct seq_file *m) 810a7c19db3SYonghong Song { 811a7c19db3SYonghong Song void **elem, *ptr; 812a7c19db3SYonghong Song u32 prog_id; 813a7c19db3SYonghong Song 814a7c19db3SYonghong Song rcu_read_lock(); 815a7c19db3SYonghong Song 816a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key); 817a7c19db3SYonghong Song if (elem) { 818a7c19db3SYonghong Song ptr = READ_ONCE(*elem); 819a7c19db3SYonghong Song if (ptr) { 820a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key); 821a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr); 822a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 823a7c19db3SYonghong Song &prog_id, m); 824a7c19db3SYonghong Song seq_puts(m, "\n"); 825a7c19db3SYonghong Song } 826a7c19db3SYonghong Song } 827a7c19db3SYonghong Song 828a7c19db3SYonghong Song rcu_read_unlock(); 829a7c19db3SYonghong Song } 830a7c19db3SYonghong Song 831da765a2fSDaniel Borkmann struct prog_poke_elem { 832da765a2fSDaniel Borkmann struct list_head list; 833da765a2fSDaniel Borkmann struct bpf_prog_aux *aux; 834da765a2fSDaniel Borkmann }; 835da765a2fSDaniel Borkmann 836da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map, 837da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 838da765a2fSDaniel Borkmann { 839da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 840da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 841da765a2fSDaniel Borkmann int ret = 0; 842da765a2fSDaniel Borkmann 843da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 844da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 845da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 846da765a2fSDaniel Borkmann if (elem->aux == prog_aux) 847da765a2fSDaniel Borkmann goto out; 848da765a2fSDaniel Borkmann } 849da765a2fSDaniel Borkmann 850da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL); 851da765a2fSDaniel Borkmann if (!elem) { 852da765a2fSDaniel Borkmann ret = -ENOMEM; 853da765a2fSDaniel Borkmann goto out; 854da765a2fSDaniel Borkmann } 855da765a2fSDaniel Borkmann 856da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list); 857da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time 858da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see 859da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run(). 860da765a2fSDaniel Borkmann */ 861da765a2fSDaniel Borkmann elem->aux = prog_aux; 862da765a2fSDaniel Borkmann 863da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs); 864da765a2fSDaniel Borkmann out: 865da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 866da765a2fSDaniel Borkmann return ret; 867da765a2fSDaniel Borkmann } 868da765a2fSDaniel Borkmann 869da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map, 870da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 871da765a2fSDaniel Borkmann { 872da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 873da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 874da765a2fSDaniel Borkmann 875da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 876da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 877da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 878da765a2fSDaniel Borkmann if (elem->aux == prog_aux) { 879da765a2fSDaniel Borkmann list_del_init(&elem->list); 880da765a2fSDaniel Borkmann kfree(elem); 881da765a2fSDaniel Borkmann break; 882da765a2fSDaniel Borkmann } 883da765a2fSDaniel Borkmann } 884da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 885da765a2fSDaniel Borkmann } 886da765a2fSDaniel Borkmann 887da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 888da765a2fSDaniel Borkmann struct bpf_prog *old, 889da765a2fSDaniel Borkmann struct bpf_prog *new) 890da765a2fSDaniel Borkmann { 891da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 892da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 893da765a2fSDaniel Borkmann 894da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 895da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 896da765a2fSDaniel Borkmann 897da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 898da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 899da765a2fSDaniel Borkmann int i, ret; 900da765a2fSDaniel Borkmann 901da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) { 902da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i]; 903da765a2fSDaniel Borkmann 904da765a2fSDaniel Borkmann /* Few things to be aware of: 905da765a2fSDaniel Borkmann * 906da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but 907da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and 908da765a2fSDaniel Borkmann * there could be danger of use after free otherwise. 909da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program 910da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms 911da765a2fSDaniel Borkmann * entry. We skip these as poke->ip_stable is not 912da765a2fSDaniel Borkmann * active yet. The JIT will do the final fixup before 913da765a2fSDaniel Borkmann * setting it stable. The various poke->ip_stable are 914da765a2fSDaniel Borkmann * successively activated, so tail call updates can 915da765a2fSDaniel Borkmann * arrive from here while JIT is still finishing its 916da765a2fSDaniel Borkmann * final fixup for non-activated poke entries. 917da765a2fSDaniel Borkmann * 3) On program teardown, the program's kallsym entry gets 918da765a2fSDaniel Borkmann * removed out of RCU callback, but we can only untrack 919da765a2fSDaniel Borkmann * from sleepable context, therefore bpf_arch_text_poke() 920da765a2fSDaniel Borkmann * might not see that this is in BPF text section and 921da765a2fSDaniel Borkmann * bails out with -EINVAL. As these are unreachable since 922da765a2fSDaniel Borkmann * RCU grace period already passed, we simply skip them. 923da765a2fSDaniel Borkmann * 4) Also programs reaching refcount of zero while patching 924da765a2fSDaniel Borkmann * is in progress is okay since we're protected under 925da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT 926da765a2fSDaniel Borkmann * buffer is freed. When we're still in the middle of 927da765a2fSDaniel Borkmann * patching and suddenly kallsyms entry of the program 928da765a2fSDaniel Borkmann * gets evicted, we just skip the rest which is fine due 929da765a2fSDaniel Borkmann * to point 3). 930da765a2fSDaniel Borkmann * 5) Any other error happening below from bpf_arch_text_poke() 931da765a2fSDaniel Borkmann * is a unexpected bug. 932da765a2fSDaniel Borkmann */ 933da765a2fSDaniel Borkmann if (!READ_ONCE(poke->ip_stable)) 934da765a2fSDaniel Borkmann continue; 935da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 936da765a2fSDaniel Borkmann continue; 937da765a2fSDaniel Borkmann if (poke->tail_call.map != map || 938da765a2fSDaniel Borkmann poke->tail_call.key != key) 939da765a2fSDaniel Borkmann continue; 940da765a2fSDaniel Borkmann 941b553a6ecSDaniel Borkmann ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, 942da765a2fSDaniel Borkmann old ? (u8 *)old->bpf_func + 943da765a2fSDaniel Borkmann poke->adj_off : NULL, 944da765a2fSDaniel Borkmann new ? (u8 *)new->bpf_func + 945da765a2fSDaniel Borkmann poke->adj_off : NULL); 946da765a2fSDaniel Borkmann BUG_ON(ret < 0 && ret != -EINVAL); 947da765a2fSDaniel Borkmann } 948da765a2fSDaniel Borkmann } 949da765a2fSDaniel Borkmann } 950da765a2fSDaniel Borkmann 951da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work) 952da765a2fSDaniel Borkmann { 953da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux, 954da765a2fSDaniel Borkmann work)->map; 955da765a2fSDaniel Borkmann bpf_fd_array_map_clear(map); 956da765a2fSDaniel Borkmann bpf_map_put(map); 957da765a2fSDaniel Borkmann } 958da765a2fSDaniel Borkmann 959da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map) 960da765a2fSDaniel Borkmann { 961da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array, 962da765a2fSDaniel Borkmann map)->aux; 963da765a2fSDaniel Borkmann bpf_map_inc(map); 964da765a2fSDaniel Borkmann schedule_work(&aux->work); 965da765a2fSDaniel Borkmann } 966da765a2fSDaniel Borkmann 9672beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 9682beee5f5SDaniel Borkmann { 9692beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 9702beee5f5SDaniel Borkmann struct bpf_map *map; 9712beee5f5SDaniel Borkmann 9722beee5f5SDaniel Borkmann aux = kzalloc(sizeof(*aux), GFP_KERNEL); 9732beee5f5SDaniel Borkmann if (!aux) 9742beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM); 9752beee5f5SDaniel Borkmann 976da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred); 977da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs); 978da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex); 979da765a2fSDaniel Borkmann 9802beee5f5SDaniel Borkmann map = array_map_alloc(attr); 9812beee5f5SDaniel Borkmann if (IS_ERR(map)) { 9822beee5f5SDaniel Borkmann kfree(aux); 9832beee5f5SDaniel Borkmann return map; 9842beee5f5SDaniel Borkmann } 9852beee5f5SDaniel Borkmann 9862beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux; 987da765a2fSDaniel Borkmann aux->map = map; 988da765a2fSDaniel Borkmann 9892beee5f5SDaniel Borkmann return map; 9902beee5f5SDaniel Borkmann } 9912beee5f5SDaniel Borkmann 9922beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map) 9932beee5f5SDaniel Borkmann { 994da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 9952beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 9962beee5f5SDaniel Borkmann 9972beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 998da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 999da765a2fSDaniel Borkmann list_del_init(&elem->list); 1000da765a2fSDaniel Borkmann kfree(elem); 1001da765a2fSDaniel Borkmann } 10022beee5f5SDaniel Borkmann kfree(aux); 10032beee5f5SDaniel Borkmann fd_array_map_free(map); 10042beee5f5SDaniel Borkmann } 10052beee5f5SDaniel Borkmann 10062872e9acSAndrey Ignatov static int prog_array_map_btf_id; 100740077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = { 1008ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 10092beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc, 10102beee5f5SDaniel Borkmann .map_free = prog_array_map_free, 1011da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track, 1012da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack, 1013da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run, 101404fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 10152a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem, 10162a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem, 10172a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr, 10182a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr, 101914dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1020da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear, 1021a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem, 10222872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 10232872e9acSAndrey Ignatov .map_btf_id = &prog_array_map_btf_id, 102404fd61abSAlexei Starovoitov }; 102504fd61abSAlexei Starovoitov 10263b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 10273b1efb19SDaniel Borkmann struct file *map_file) 1028ea317b26SKaixu Xia { 10293b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10303b1efb19SDaniel Borkmann 1031858d68f1SDaniel Borkmann ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 10323b1efb19SDaniel Borkmann if (ee) { 10333b1efb19SDaniel Borkmann ee->event = perf_file->private_data; 10343b1efb19SDaniel Borkmann ee->perf_file = perf_file; 10353b1efb19SDaniel Borkmann ee->map_file = map_file; 10363b1efb19SDaniel Borkmann } 10373b1efb19SDaniel Borkmann 10383b1efb19SDaniel Borkmann return ee; 10393b1efb19SDaniel Borkmann } 10403b1efb19SDaniel Borkmann 10413b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu) 10423b1efb19SDaniel Borkmann { 10433b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10443b1efb19SDaniel Borkmann 10453b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu); 10463b1efb19SDaniel Borkmann fput(ee->perf_file); 10473b1efb19SDaniel Borkmann kfree(ee); 10483b1efb19SDaniel Borkmann } 10493b1efb19SDaniel Borkmann 10503b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 10513b1efb19SDaniel Borkmann { 10523b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free); 1053ea317b26SKaixu Xia } 1054ea317b26SKaixu Xia 1055d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1056d056a788SDaniel Borkmann struct file *map_file, int fd) 1057ea317b26SKaixu Xia { 10583b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10593b1efb19SDaniel Borkmann struct perf_event *event; 10603b1efb19SDaniel Borkmann struct file *perf_file; 1061f91840a3SAlexei Starovoitov u64 value; 1062ea317b26SKaixu Xia 10633b1efb19SDaniel Borkmann perf_file = perf_event_get(fd); 10643b1efb19SDaniel Borkmann if (IS_ERR(perf_file)) 10653b1efb19SDaniel Borkmann return perf_file; 1066e03e7ee3SAlexei Starovoitov 1067f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP); 10683b1efb19SDaniel Borkmann event = perf_file->private_data; 106997562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 10703b1efb19SDaniel Borkmann goto err_out; 1071ea317b26SKaixu Xia 10723b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file); 10733b1efb19SDaniel Borkmann if (ee) 10743b1efb19SDaniel Borkmann return ee; 10753b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM); 10763b1efb19SDaniel Borkmann err_out: 10773b1efb19SDaniel Borkmann fput(perf_file); 10783b1efb19SDaniel Borkmann return ee; 1079ea317b26SKaixu Xia } 1080ea317b26SKaixu Xia 1081ea317b26SKaixu Xia static void perf_event_fd_array_put_ptr(void *ptr) 1082ea317b26SKaixu Xia { 10833b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr); 10843b1efb19SDaniel Borkmann } 10853b1efb19SDaniel Borkmann 10863b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map, 10873b1efb19SDaniel Borkmann struct file *map_file) 10883b1efb19SDaniel Borkmann { 10893b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 10903b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10913b1efb19SDaniel Borkmann int i; 10923b1efb19SDaniel Borkmann 10933b1efb19SDaniel Borkmann rcu_read_lock(); 10943b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) { 10953b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]); 10963b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file) 10973b1efb19SDaniel Borkmann fd_array_map_delete_elem(map, &i); 10983b1efb19SDaniel Borkmann } 10993b1efb19SDaniel Borkmann rcu_read_unlock(); 1100ea317b26SKaixu Xia } 1101ea317b26SKaixu Xia 11022872e9acSAndrey Ignatov static int perf_event_array_map_btf_id; 110340077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = { 1104ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1105ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 11063b1efb19SDaniel Borkmann .map_free = fd_array_map_free, 1107ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key, 1108ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem, 1109ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem, 1110ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1111ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr, 11123b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release, 1113e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 11142872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 11152872e9acSAndrey Ignatov .map_btf_id = &perf_event_array_map_btf_id, 1116ea317b26SKaixu Xia }; 1117ea317b26SKaixu Xia 111860d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS 11194ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 11204ed8ec52SMartin KaFai Lau struct file *map_file /* not used */, 11214ed8ec52SMartin KaFai Lau int fd) 11224ed8ec52SMartin KaFai Lau { 11234ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd); 11244ed8ec52SMartin KaFai Lau } 11254ed8ec52SMartin KaFai Lau 11264ed8ec52SMartin KaFai Lau static void cgroup_fd_array_put_ptr(void *ptr) 11274ed8ec52SMartin KaFai Lau { 11284ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */ 11294ed8ec52SMartin KaFai Lau cgroup_put(ptr); 11304ed8ec52SMartin KaFai Lau } 11314ed8ec52SMartin KaFai Lau 11324ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map) 11334ed8ec52SMartin KaFai Lau { 11344ed8ec52SMartin KaFai Lau bpf_fd_array_map_clear(map); 11354ed8ec52SMartin KaFai Lau fd_array_map_free(map); 11364ed8ec52SMartin KaFai Lau } 11374ed8ec52SMartin KaFai Lau 11382872e9acSAndrey Ignatov static int cgroup_array_map_btf_id; 113940077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = { 1140ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1141ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 11424ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free, 11434ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 11444ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem, 11454ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 11464ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr, 11474ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1148e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 11492872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 11502872e9acSAndrey Ignatov .map_btf_id = &cgroup_array_map_btf_id, 11514ed8ec52SMartin KaFai Lau }; 11524ed8ec52SMartin KaFai Lau #endif 115356f668dfSMartin KaFai Lau 115456f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 115556f668dfSMartin KaFai Lau { 115656f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta; 115756f668dfSMartin KaFai Lau 115856f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 115956f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta)) 116056f668dfSMartin KaFai Lau return inner_map_meta; 116156f668dfSMartin KaFai Lau 1162ad46061fSJakub Kicinski map = array_map_alloc(attr); 116356f668dfSMartin KaFai Lau if (IS_ERR(map)) { 116456f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta); 116556f668dfSMartin KaFai Lau return map; 116656f668dfSMartin KaFai Lau } 116756f668dfSMartin KaFai Lau 116856f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta; 116956f668dfSMartin KaFai Lau 117056f668dfSMartin KaFai Lau return map; 117156f668dfSMartin KaFai Lau } 117256f668dfSMartin KaFai Lau 117356f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map) 117456f668dfSMartin KaFai Lau { 117556f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which 117656f668dfSMartin KaFai Lau * is protected by fdget/fdput. 117756f668dfSMartin KaFai Lau */ 117856f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta); 117956f668dfSMartin KaFai Lau bpf_fd_array_map_clear(map); 118056f668dfSMartin KaFai Lau fd_array_map_free(map); 118156f668dfSMartin KaFai Lau } 118256f668dfSMartin KaFai Lau 118356f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 118456f668dfSMartin KaFai Lau { 118556f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key); 118656f668dfSMartin KaFai Lau 118756f668dfSMartin KaFai Lau if (!inner_map) 118856f668dfSMartin KaFai Lau return NULL; 118956f668dfSMartin KaFai Lau 119056f668dfSMartin KaFai Lau return READ_ONCE(*inner_map); 119156f668dfSMartin KaFai Lau } 119256f668dfSMartin KaFai Lau 11937b0c2a05SDaniel Borkmann static u32 array_of_map_gen_lookup(struct bpf_map *map, 11947b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf) 11957b0c2a05SDaniel Borkmann { 1196b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 11977b0c2a05SDaniel Borkmann u32 elem_size = round_up(map->value_size, 8); 11987b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf; 11997b0c2a05SDaniel Borkmann const int ret = BPF_REG_0; 12007b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1; 12017b0c2a05SDaniel Borkmann const int index = BPF_REG_2; 12027b0c2a05SDaniel Borkmann 12037b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 12047b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 12052c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 1206b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1207b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1208b2157399SAlexei Starovoitov } else { 12097b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1210b2157399SAlexei Starovoitov } 12117b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size)) 12127b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 12137b0c2a05SDaniel Borkmann else 12147b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 12157b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 12167b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 12177b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 12187b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 12197b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0); 12207b0c2a05SDaniel Borkmann 12217b0c2a05SDaniel Borkmann return insn - insn_buf; 12227b0c2a05SDaniel Borkmann } 12237b0c2a05SDaniel Borkmann 12242872e9acSAndrey Ignatov static int array_of_maps_map_btf_id; 122540077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = { 1226ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 122756f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc, 122856f668dfSMartin KaFai Lau .map_free = array_of_map_free, 122956f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 123056f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem, 123156f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 123256f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr, 123356f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr, 123414dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 12357b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup, 1236e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 12372872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 12382872e9acSAndrey Ignatov .map_btf_id = &array_of_maps_map_btf_id, 123956f668dfSMartin KaFai Lau }; 1240