15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook 428fbcfa0SAlexei Starovoitov */ 528fbcfa0SAlexei Starovoitov #include <linux/bpf.h> 6a26ca7c9SMartin KaFai Lau #include <linux/btf.h> 728fbcfa0SAlexei Starovoitov #include <linux/err.h> 828fbcfa0SAlexei Starovoitov #include <linux/slab.h> 928fbcfa0SAlexei Starovoitov #include <linux/mm.h> 1004fd61abSAlexei Starovoitov #include <linux/filter.h> 110cdf5640SDaniel Borkmann #include <linux/perf_event.h> 12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h> 131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h> 1428fbcfa0SAlexei Starovoitov 1556f668dfSMartin KaFai Lau #include "map_in_map.h" 1656f668dfSMartin KaFai Lau 176e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \ 18fc970227SAndrii Nakryiko (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK) 196e71b04aSChenbo Feng 20a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array) 21a10423b8SAlexei Starovoitov { 22a10423b8SAlexei Starovoitov int i; 23a10423b8SAlexei Starovoitov 2432fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) { 25a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]); 2632fff239SEric Dumazet cond_resched(); 2732fff239SEric Dumazet } 28a10423b8SAlexei Starovoitov } 29a10423b8SAlexei Starovoitov 30a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array) 31a10423b8SAlexei Starovoitov { 32a10423b8SAlexei Starovoitov void __percpu *ptr; 33a10423b8SAlexei Starovoitov int i; 34a10423b8SAlexei Starovoitov 35a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) { 36a10423b8SAlexei Starovoitov ptr = __alloc_percpu_gfp(array->elem_size, 8, 37a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN); 38a10423b8SAlexei Starovoitov if (!ptr) { 39a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 40a10423b8SAlexei Starovoitov return -ENOMEM; 41a10423b8SAlexei Starovoitov } 42a10423b8SAlexei Starovoitov array->pptrs[i] = ptr; 4332fff239SEric Dumazet cond_resched(); 44a10423b8SAlexei Starovoitov } 45a10423b8SAlexei Starovoitov 46a10423b8SAlexei Starovoitov return 0; 47a10423b8SAlexei Starovoitov } 48a10423b8SAlexei Starovoitov 4928fbcfa0SAlexei Starovoitov /* Called from syscall */ 505dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr) 51ad46061fSJakub Kicinski { 52ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 53ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr); 54ad46061fSJakub Kicinski 55ad46061fSJakub Kicinski /* check sanity of attributes */ 56ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 || 57ad46061fSJakub Kicinski attr->value_size == 0 || 58ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 59591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) || 60ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE)) 61ad46061fSJakub Kicinski return -EINVAL; 62ad46061fSJakub Kicinski 63fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY && 64fc970227SAndrii Nakryiko attr->map_flags & BPF_F_MMAPABLE) 65fc970227SAndrii Nakryiko return -EINVAL; 66fc970227SAndrii Nakryiko 67ad46061fSJakub Kicinski if (attr->value_size > KMALLOC_MAX_SIZE) 68ad46061fSJakub Kicinski /* if value_size is bigger, the user space won't be able to 69ad46061fSJakub Kicinski * access the elements. 70ad46061fSJakub Kicinski */ 71ad46061fSJakub Kicinski return -E2BIG; 72ad46061fSJakub Kicinski 73ad46061fSJakub Kicinski return 0; 74ad46061fSJakub Kicinski } 75ad46061fSJakub Kicinski 7628fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr) 7728fbcfa0SAlexei Starovoitov { 78a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 799c2d63b8SDaniel Borkmann int ret, numa_node = bpf_map_attr_numa_node(attr); 80b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries; 812c78ee89SAlexei Starovoitov bool bypass_spec_v1 = bpf_bypass_spec_v1(); 829c2d63b8SDaniel Borkmann u64 cost, array_size, mask64; 83b936ca64SRoman Gushchin struct bpf_map_memory mem; 8428fbcfa0SAlexei Starovoitov struct bpf_array *array; 8528fbcfa0SAlexei Starovoitov 8628fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8); 8728fbcfa0SAlexei Starovoitov 88b2157399SAlexei Starovoitov max_entries = attr->max_entries; 89b2157399SAlexei Starovoitov 90bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has 91bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to 92bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space. 93bbeb6e43SDaniel Borkmann */ 94bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1); 95bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64; 96bbeb6e43SDaniel Borkmann mask64 -= 1; 97bbeb6e43SDaniel Borkmann 98bbeb6e43SDaniel Borkmann index_mask = mask64; 992c78ee89SAlexei Starovoitov if (!bypass_spec_v1) { 100b2157399SAlexei Starovoitov /* round up array size to nearest power of 2, 101b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits 102b2157399SAlexei Starovoitov */ 103b2157399SAlexei Starovoitov max_entries = index_mask + 1; 104bbeb6e43SDaniel Borkmann /* Check for overflows. */ 105bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries) 106bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG); 107bbeb6e43SDaniel Borkmann } 108b2157399SAlexei Starovoitov 109a10423b8SAlexei Starovoitov array_size = sizeof(*array); 110fc970227SAndrii Nakryiko if (percpu) { 111b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *); 112fc970227SAndrii Nakryiko } else { 113fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and 114fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned 115fc970227SAndrii Nakryiko */ 116fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 117fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size); 118fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size); 119fc970227SAndrii Nakryiko } else { 120b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size; 121fc970227SAndrii Nakryiko } 122fc970227SAndrii Nakryiko } 123a10423b8SAlexei Starovoitov 124a10423b8SAlexei Starovoitov /* make sure there is no u32 overflow later in round_up() */ 1259c2d63b8SDaniel Borkmann cost = array_size; 126c85d6913SRoman Gushchin if (percpu) 1279c2d63b8SDaniel Borkmann cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 1289c2d63b8SDaniel Borkmann 129b936ca64SRoman Gushchin ret = bpf_map_charge_init(&mem, cost); 1309c2d63b8SDaniel Borkmann if (ret < 0) 1319c2d63b8SDaniel Borkmann return ERR_PTR(ret); 132daaf427cSAlexei Starovoitov 13328fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */ 134fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 135fc970227SAndrii Nakryiko void *data; 136fc970227SAndrii Nakryiko 137fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 138fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node); 139fc970227SAndrii Nakryiko if (!data) { 140fc970227SAndrii Nakryiko bpf_map_charge_finish(&mem); 141fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM); 142fc970227SAndrii Nakryiko } 143fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 144fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value); 145fc970227SAndrii Nakryiko } else { 14696eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node); 147fc970227SAndrii Nakryiko } 148b936ca64SRoman Gushchin if (!array) { 149b936ca64SRoman Gushchin bpf_map_charge_finish(&mem); 15028fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM); 151b936ca64SRoman Gushchin } 152b2157399SAlexei Starovoitov array->index_mask = index_mask; 1532c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1; 15428fbcfa0SAlexei Starovoitov 15528fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */ 15632852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr); 157b936ca64SRoman Gushchin bpf_map_charge_move(&array->map.memory, &mem); 15828fbcfa0SAlexei Starovoitov array->elem_size = elem_size; 15928fbcfa0SAlexei Starovoitov 1609c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) { 161b936ca64SRoman Gushchin bpf_map_charge_finish(&array->map.memory); 162d407bd25SDaniel Borkmann bpf_map_area_free(array); 163a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM); 164a10423b8SAlexei Starovoitov } 165a10423b8SAlexei Starovoitov 16628fbcfa0SAlexei Starovoitov return &array->map; 16728fbcfa0SAlexei Starovoitov } 16828fbcfa0SAlexei Starovoitov 16928fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 17028fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key) 17128fbcfa0SAlexei Starovoitov { 17228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 17328fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 17428fbcfa0SAlexei Starovoitov 175a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 17628fbcfa0SAlexei Starovoitov return NULL; 17728fbcfa0SAlexei Starovoitov 178b2157399SAlexei Starovoitov return array->value + array->elem_size * (index & array->index_mask); 17928fbcfa0SAlexei Starovoitov } 18028fbcfa0SAlexei Starovoitov 181d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 182d8eca5bbSDaniel Borkmann u32 off) 183d8eca5bbSDaniel Borkmann { 184d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 185d8eca5bbSDaniel Borkmann 186d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 187d8eca5bbSDaniel Borkmann return -ENOTSUPP; 188d8eca5bbSDaniel Borkmann if (off >= map->value_size) 189d8eca5bbSDaniel Borkmann return -EINVAL; 190d8eca5bbSDaniel Borkmann 191d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value; 192d8eca5bbSDaniel Borkmann return 0; 193d8eca5bbSDaniel Borkmann } 194d8eca5bbSDaniel Borkmann 195d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 196d8eca5bbSDaniel Borkmann u32 *off) 197d8eca5bbSDaniel Borkmann { 198d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 199d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value; 200d8eca5bbSDaniel Borkmann u64 range = array->elem_size; 201d8eca5bbSDaniel Borkmann 202d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 203d8eca5bbSDaniel Borkmann return -ENOTSUPP; 204d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range) 205d8eca5bbSDaniel Borkmann return -ENOENT; 206d8eca5bbSDaniel Borkmann 207d8eca5bbSDaniel Borkmann *off = imm - base; 208d8eca5bbSDaniel Borkmann return 0; 209d8eca5bbSDaniel Borkmann } 210d8eca5bbSDaniel Borkmann 21181ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 21281ed18abSAlexei Starovoitov static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 21381ed18abSAlexei Starovoitov { 214b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 21581ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf; 216fad73a1aSMartin KaFai Lau u32 elem_size = round_up(map->value_size, 8); 21781ed18abSAlexei Starovoitov const int ret = BPF_REG_0; 21881ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1; 21981ed18abSAlexei Starovoitov const int index = BPF_REG_2; 22081ed18abSAlexei Starovoitov 22181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 22281ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 2232c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 224b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 225b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 226b2157399SAlexei Starovoitov } else { 227fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 228b2157399SAlexei Starovoitov } 229fad73a1aSMartin KaFai Lau 230fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) { 23181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 23281ed18abSAlexei Starovoitov } else { 23381ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 23481ed18abSAlexei Starovoitov } 23581ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 23681ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 23781ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0); 23881ed18abSAlexei Starovoitov return insn - insn_buf; 23981ed18abSAlexei Starovoitov } 24081ed18abSAlexei Starovoitov 241a10423b8SAlexei Starovoitov /* Called from eBPF program */ 242a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 243a10423b8SAlexei Starovoitov { 244a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 245a10423b8SAlexei Starovoitov u32 index = *(u32 *)key; 246a10423b8SAlexei Starovoitov 247a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 248a10423b8SAlexei Starovoitov return NULL; 249a10423b8SAlexei Starovoitov 250b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]); 251a10423b8SAlexei Starovoitov } 252a10423b8SAlexei Starovoitov 25315a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 25415a07b33SAlexei Starovoitov { 25515a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 25615a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 25715a07b33SAlexei Starovoitov void __percpu *pptr; 25815a07b33SAlexei Starovoitov int cpu, off = 0; 25915a07b33SAlexei Starovoitov u32 size; 26015a07b33SAlexei Starovoitov 26115a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 26215a07b33SAlexei Starovoitov return -ENOENT; 26315a07b33SAlexei Starovoitov 26415a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only 26515a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas 26615a07b33SAlexei Starovoitov * will not leak any kernel data 26715a07b33SAlexei Starovoitov */ 26815a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 26915a07b33SAlexei Starovoitov rcu_read_lock(); 270b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 27115a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 27215a07b33SAlexei Starovoitov bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 27315a07b33SAlexei Starovoitov off += size; 27415a07b33SAlexei Starovoitov } 27515a07b33SAlexei Starovoitov rcu_read_unlock(); 27615a07b33SAlexei Starovoitov return 0; 27715a07b33SAlexei Starovoitov } 27815a07b33SAlexei Starovoitov 27928fbcfa0SAlexei Starovoitov /* Called from syscall */ 28028fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 28128fbcfa0SAlexei Starovoitov { 28228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 2838fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX; 28428fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key; 28528fbcfa0SAlexei Starovoitov 28628fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) { 28728fbcfa0SAlexei Starovoitov *next = 0; 28828fbcfa0SAlexei Starovoitov return 0; 28928fbcfa0SAlexei Starovoitov } 29028fbcfa0SAlexei Starovoitov 29128fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1) 29228fbcfa0SAlexei Starovoitov return -ENOENT; 29328fbcfa0SAlexei Starovoitov 29428fbcfa0SAlexei Starovoitov *next = index + 1; 29528fbcfa0SAlexei Starovoitov return 0; 29628fbcfa0SAlexei Starovoitov } 29728fbcfa0SAlexei Starovoitov 29828fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 29928fbcfa0SAlexei Starovoitov static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 30028fbcfa0SAlexei Starovoitov u64 map_flags) 30128fbcfa0SAlexei Starovoitov { 30228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 30328fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 30496049f3aSAlexei Starovoitov char *val; 30528fbcfa0SAlexei Starovoitov 30696049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 30728fbcfa0SAlexei Starovoitov /* unknown flags */ 30828fbcfa0SAlexei Starovoitov return -EINVAL; 30928fbcfa0SAlexei Starovoitov 310a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 31128fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 31228fbcfa0SAlexei Starovoitov return -E2BIG; 31328fbcfa0SAlexei Starovoitov 31496049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST)) 315daaf427cSAlexei Starovoitov /* all elements already exist */ 31628fbcfa0SAlexei Starovoitov return -EEXIST; 31728fbcfa0SAlexei Starovoitov 31896049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) && 31996049f3aSAlexei Starovoitov !map_value_has_spin_lock(map))) 32096049f3aSAlexei Starovoitov return -EINVAL; 32196049f3aSAlexei Starovoitov 32296049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 323b2157399SAlexei Starovoitov memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), 324a10423b8SAlexei Starovoitov value, map->value_size); 32596049f3aSAlexei Starovoitov } else { 32696049f3aSAlexei Starovoitov val = array->value + 32796049f3aSAlexei Starovoitov array->elem_size * (index & array->index_mask); 32896049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK) 32996049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false); 330a10423b8SAlexei Starovoitov else 33196049f3aSAlexei Starovoitov copy_map_value(map, val, value); 33296049f3aSAlexei Starovoitov } 33328fbcfa0SAlexei Starovoitov return 0; 33428fbcfa0SAlexei Starovoitov } 33528fbcfa0SAlexei Starovoitov 33615a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 33715a07b33SAlexei Starovoitov u64 map_flags) 33815a07b33SAlexei Starovoitov { 33915a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 34015a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 34115a07b33SAlexei Starovoitov void __percpu *pptr; 34215a07b33SAlexei Starovoitov int cpu, off = 0; 34315a07b33SAlexei Starovoitov u32 size; 34415a07b33SAlexei Starovoitov 34515a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 34615a07b33SAlexei Starovoitov /* unknown flags */ 34715a07b33SAlexei Starovoitov return -EINVAL; 34815a07b33SAlexei Starovoitov 34915a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 35015a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 35115a07b33SAlexei Starovoitov return -E2BIG; 35215a07b33SAlexei Starovoitov 35315a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 35415a07b33SAlexei Starovoitov /* all elements already exist */ 35515a07b33SAlexei Starovoitov return -EEXIST; 35615a07b33SAlexei Starovoitov 35715a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that 35815a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access 35915a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be 36015a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc, 36115a07b33SAlexei Starovoitov * so no kernel data leaks possible 36215a07b33SAlexei Starovoitov */ 36315a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 36415a07b33SAlexei Starovoitov rcu_read_lock(); 365b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 36615a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 36715a07b33SAlexei Starovoitov bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 36815a07b33SAlexei Starovoitov off += size; 36915a07b33SAlexei Starovoitov } 37015a07b33SAlexei Starovoitov rcu_read_unlock(); 37115a07b33SAlexei Starovoitov return 0; 37215a07b33SAlexei Starovoitov } 37315a07b33SAlexei Starovoitov 37428fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 37528fbcfa0SAlexei Starovoitov static int array_map_delete_elem(struct bpf_map *map, void *key) 37628fbcfa0SAlexei Starovoitov { 37728fbcfa0SAlexei Starovoitov return -EINVAL; 37828fbcfa0SAlexei Starovoitov } 37928fbcfa0SAlexei Starovoitov 380fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array) 381fc970227SAndrii Nakryiko { 382fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE); 383fc970227SAndrii Nakryiko } 384fc970227SAndrii Nakryiko 38528fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 38628fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map) 38728fbcfa0SAlexei Starovoitov { 38828fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 38928fbcfa0SAlexei Starovoitov 390a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 391a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 392a10423b8SAlexei Starovoitov 393fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE) 394fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array)); 395fc970227SAndrii Nakryiko else 396d407bd25SDaniel Borkmann bpf_map_area_free(array); 39728fbcfa0SAlexei Starovoitov } 39828fbcfa0SAlexei Starovoitov 399a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key, 400a26ca7c9SMartin KaFai Lau struct seq_file *m) 401a26ca7c9SMartin KaFai Lau { 402a26ca7c9SMartin KaFai Lau void *value; 403a26ca7c9SMartin KaFai Lau 404a26ca7c9SMartin KaFai Lau rcu_read_lock(); 405a26ca7c9SMartin KaFai Lau 406a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key); 407a26ca7c9SMartin KaFai Lau if (!value) { 408a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 409a26ca7c9SMartin KaFai Lau return; 410a26ca7c9SMartin KaFai Lau } 411a26ca7c9SMartin KaFai Lau 4122824ecb7SDaniel Borkmann if (map->btf_key_type_id) 413a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key); 4149b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 415a26ca7c9SMartin KaFai Lau seq_puts(m, "\n"); 416a26ca7c9SMartin KaFai Lau 417a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 418a26ca7c9SMartin KaFai Lau } 419a26ca7c9SMartin KaFai Lau 420c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 421c7b27c37SYonghong Song struct seq_file *m) 422c7b27c37SYonghong Song { 423c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map); 424c7b27c37SYonghong Song u32 index = *(u32 *)key; 425c7b27c37SYonghong Song void __percpu *pptr; 426c7b27c37SYonghong Song int cpu; 427c7b27c37SYonghong Song 428c7b27c37SYonghong Song rcu_read_lock(); 429c7b27c37SYonghong Song 430c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key); 431c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask]; 432c7b27c37SYonghong Song for_each_possible_cpu(cpu) { 433c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu); 434c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 435c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m); 436c7b27c37SYonghong Song seq_puts(m, "\n"); 437c7b27c37SYonghong Song } 438c7b27c37SYonghong Song seq_puts(m, "}\n"); 439c7b27c37SYonghong Song 440c7b27c37SYonghong Song rcu_read_unlock(); 441c7b27c37SYonghong Song } 442c7b27c37SYonghong Song 443e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map, 4441b2b234bSRoman Gushchin const struct btf *btf, 445e8d2bec0SDaniel Borkmann const struct btf_type *key_type, 446e8d2bec0SDaniel Borkmann const struct btf_type *value_type) 447a26ca7c9SMartin KaFai Lau { 448a26ca7c9SMartin KaFai Lau u32 int_data; 449a26ca7c9SMartin KaFai Lau 4502824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */ 4512824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) { 4522824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY || 4532824ecb7SDaniel Borkmann map->max_entries != 1) 4542824ecb7SDaniel Borkmann return -EINVAL; 4552824ecb7SDaniel Borkmann 4562824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 4572824ecb7SDaniel Borkmann return -EINVAL; 4582824ecb7SDaniel Borkmann 4592824ecb7SDaniel Borkmann return 0; 4602824ecb7SDaniel Borkmann } 4612824ecb7SDaniel Borkmann 462e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 463a26ca7c9SMartin KaFai Lau return -EINVAL; 464a26ca7c9SMartin KaFai Lau 465a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1); 466e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure 467e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create. 468a26ca7c9SMartin KaFai Lau */ 469e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 470a26ca7c9SMartin KaFai Lau return -EINVAL; 471a26ca7c9SMartin KaFai Lau 472a26ca7c9SMartin KaFai Lau return 0; 473a26ca7c9SMartin KaFai Lau } 474a26ca7c9SMartin KaFai Lau 475b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 476fc970227SAndrii Nakryiko { 477fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 478fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 479fc970227SAndrii Nakryiko 480fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE)) 481fc970227SAndrii Nakryiko return -EINVAL; 482fc970227SAndrii Nakryiko 483333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 484333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 485333291ceSAndrii Nakryiko return -EINVAL; 486333291ceSAndrii Nakryiko 487333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 488333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff); 489fc970227SAndrii Nakryiko } 490fc970227SAndrii Nakryiko 491134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0, 492134fede4SMartin KaFai Lau const struct bpf_map *meta1) 493134fede4SMartin KaFai Lau { 494134fede4SMartin KaFai Lau return meta0->max_entries == meta1->max_entries && 495134fede4SMartin KaFai Lau bpf_map_meta_equal(meta0, meta1); 496134fede4SMartin KaFai Lau } 497134fede4SMartin KaFai Lau 498d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info { 499d3cc2ab5SYonghong Song struct bpf_map *map; 500d3cc2ab5SYonghong Song void *percpu_value_buf; 501d3cc2ab5SYonghong Song u32 index; 502d3cc2ab5SYonghong Song }; 503d3cc2ab5SYonghong Song 504d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 505d3cc2ab5SYonghong Song { 506d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 507d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 508d3cc2ab5SYonghong Song struct bpf_array *array; 509d3cc2ab5SYonghong Song u32 index; 510d3cc2ab5SYonghong Song 511d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 512d3cc2ab5SYonghong Song return NULL; 513d3cc2ab5SYonghong Song 514d3cc2ab5SYonghong Song if (*pos == 0) 515d3cc2ab5SYonghong Song ++*pos; 516d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 517d3cc2ab5SYonghong Song index = info->index & array->index_mask; 518d3cc2ab5SYonghong Song if (info->percpu_value_buf) 519d3cc2ab5SYonghong Song return array->pptrs[index]; 520d3cc2ab5SYonghong Song return array->value + array->elem_size * index; 521d3cc2ab5SYonghong Song } 522d3cc2ab5SYonghong Song 523d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 524d3cc2ab5SYonghong Song { 525d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 526d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 527d3cc2ab5SYonghong Song struct bpf_array *array; 528d3cc2ab5SYonghong Song u32 index; 529d3cc2ab5SYonghong Song 530d3cc2ab5SYonghong Song ++*pos; 531d3cc2ab5SYonghong Song ++info->index; 532d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 533d3cc2ab5SYonghong Song return NULL; 534d3cc2ab5SYonghong Song 535d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 536d3cc2ab5SYonghong Song index = info->index & array->index_mask; 537d3cc2ab5SYonghong Song if (info->percpu_value_buf) 538d3cc2ab5SYonghong Song return array->pptrs[index]; 539d3cc2ab5SYonghong Song return array->value + array->elem_size * index; 540d3cc2ab5SYonghong Song } 541d3cc2ab5SYonghong Song 542d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 543d3cc2ab5SYonghong Song { 544d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 545d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {}; 546d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 547d3cc2ab5SYonghong Song struct bpf_iter_meta meta; 548d3cc2ab5SYonghong Song struct bpf_prog *prog; 549d3cc2ab5SYonghong Song int off = 0, cpu = 0; 550d3cc2ab5SYonghong Song void __percpu **pptr; 551d3cc2ab5SYonghong Song u32 size; 552d3cc2ab5SYonghong Song 553d3cc2ab5SYonghong Song meta.seq = seq; 554d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL); 555d3cc2ab5SYonghong Song if (!prog) 556d3cc2ab5SYonghong Song return 0; 557d3cc2ab5SYonghong Song 558d3cc2ab5SYonghong Song ctx.meta = &meta; 559d3cc2ab5SYonghong Song ctx.map = info->map; 560d3cc2ab5SYonghong Song if (v) { 561d3cc2ab5SYonghong Song ctx.key = &info->index; 562d3cc2ab5SYonghong Song 563d3cc2ab5SYonghong Song if (!info->percpu_value_buf) { 564d3cc2ab5SYonghong Song ctx.value = v; 565d3cc2ab5SYonghong Song } else { 566d3cc2ab5SYonghong Song pptr = v; 567d3cc2ab5SYonghong Song size = round_up(map->value_size, 8); 568d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) { 569d3cc2ab5SYonghong Song bpf_long_memcpy(info->percpu_value_buf + off, 570d3cc2ab5SYonghong Song per_cpu_ptr(pptr, cpu), 571d3cc2ab5SYonghong Song size); 572d3cc2ab5SYonghong Song off += size; 573d3cc2ab5SYonghong Song } 574d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf; 575d3cc2ab5SYonghong Song } 576d3cc2ab5SYonghong Song } 577d3cc2ab5SYonghong Song 578d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx); 579d3cc2ab5SYonghong Song } 580d3cc2ab5SYonghong Song 581d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 582d3cc2ab5SYonghong Song { 583d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v); 584d3cc2ab5SYonghong Song } 585d3cc2ab5SYonghong Song 586d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 587d3cc2ab5SYonghong Song { 588d3cc2ab5SYonghong Song if (!v) 589d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL); 590d3cc2ab5SYonghong Song } 591d3cc2ab5SYonghong Song 592d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data, 593d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux) 594d3cc2ab5SYonghong Song { 595d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 596d3cc2ab5SYonghong Song struct bpf_map *map = aux->map; 597d3cc2ab5SYonghong Song void *value_buf; 598d3cc2ab5SYonghong Song u32 buf_size; 599d3cc2ab5SYonghong Song 600d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 601d3cc2ab5SYonghong Song buf_size = round_up(map->value_size, 8) * num_possible_cpus(); 602d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 603d3cc2ab5SYonghong Song if (!value_buf) 604d3cc2ab5SYonghong Song return -ENOMEM; 605d3cc2ab5SYonghong Song 606d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf; 607d3cc2ab5SYonghong Song } 608d3cc2ab5SYonghong Song 609d3cc2ab5SYonghong Song seq_info->map = map; 610d3cc2ab5SYonghong Song return 0; 611d3cc2ab5SYonghong Song } 612d3cc2ab5SYonghong Song 613d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data) 614d3cc2ab5SYonghong Song { 615d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 616d3cc2ab5SYonghong Song 617d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf); 618d3cc2ab5SYonghong Song } 619d3cc2ab5SYonghong Song 620d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = { 621d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start, 622d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next, 623d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop, 624d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show, 625d3cc2ab5SYonghong Song }; 626d3cc2ab5SYonghong Song 627d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = { 628d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops, 629d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map, 630d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map, 631d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 632d3cc2ab5SYonghong Song }; 633d3cc2ab5SYonghong Song 63441c48f3aSAndrey Ignatov static int array_map_btf_id; 63540077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = { 636134fede4SMartin KaFai Lau .map_meta_equal = array_map_meta_equal, 637ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 63828fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc, 63928fbcfa0SAlexei Starovoitov .map_free = array_map_free, 64028fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 64128fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem, 64228fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem, 64328fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 64481ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup, 645d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr, 646d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta, 647fc970227SAndrii Nakryiko .map_mmap = array_map_mmap, 648a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem, 649a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf, 650c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch, 651c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch, 65241c48f3aSAndrey Ignatov .map_btf_name = "bpf_array", 65341c48f3aSAndrey Ignatov .map_btf_id = &array_map_btf_id, 654d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 65528fbcfa0SAlexei Starovoitov }; 65628fbcfa0SAlexei Starovoitov 6572872e9acSAndrey Ignatov static int percpu_array_map_btf_id; 65840077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = { 659f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 660ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 661a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc, 662a10423b8SAlexei Starovoitov .map_free = array_map_free, 663a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 664a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem, 665a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem, 666a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 667c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem, 668e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf, 6692872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 6702872e9acSAndrey Ignatov .map_btf_id = &percpu_array_map_btf_id, 671d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 672a10423b8SAlexei Starovoitov }; 673a10423b8SAlexei Starovoitov 674ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr) 67504fd61abSAlexei Starovoitov { 6762a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */ 67704fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32)) 678ad46061fSJakub Kicinski return -EINVAL; 679591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */ 680591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 681591fe988SDaniel Borkmann return -EINVAL; 682ad46061fSJakub Kicinski return array_map_alloc_check(attr); 68304fd61abSAlexei Starovoitov } 68404fd61abSAlexei Starovoitov 6852a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map) 68604fd61abSAlexei Starovoitov { 68704fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 68804fd61abSAlexei Starovoitov int i; 68904fd61abSAlexei Starovoitov 69004fd61abSAlexei Starovoitov /* make sure it's empty */ 69104fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 6922a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL); 693d407bd25SDaniel Borkmann 694d407bd25SDaniel Borkmann bpf_map_area_free(array); 69504fd61abSAlexei Starovoitov } 69604fd61abSAlexei Starovoitov 6972a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 69804fd61abSAlexei Starovoitov { 6993b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP); 70004fd61abSAlexei Starovoitov } 70104fd61abSAlexei Starovoitov 70204fd61abSAlexei Starovoitov /* only called from syscall */ 70314dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 70414dc6f04SMartin KaFai Lau { 70514dc6f04SMartin KaFai Lau void **elem, *ptr; 70614dc6f04SMartin KaFai Lau int ret = 0; 70714dc6f04SMartin KaFai Lau 70814dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem) 70914dc6f04SMartin KaFai Lau return -ENOTSUPP; 71014dc6f04SMartin KaFai Lau 71114dc6f04SMartin KaFai Lau rcu_read_lock(); 71214dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key); 71314dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem))) 71414dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr); 71514dc6f04SMartin KaFai Lau else 71614dc6f04SMartin KaFai Lau ret = -ENOENT; 71714dc6f04SMartin KaFai Lau rcu_read_unlock(); 71814dc6f04SMartin KaFai Lau 71914dc6f04SMartin KaFai Lau return ret; 72014dc6f04SMartin KaFai Lau } 72114dc6f04SMartin KaFai Lau 72214dc6f04SMartin KaFai Lau /* only called from syscall */ 723d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 724d056a788SDaniel Borkmann void *key, void *value, u64 map_flags) 72504fd61abSAlexei Starovoitov { 72604fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 7272a36f0b9SWang Nan void *new_ptr, *old_ptr; 72804fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd; 72904fd61abSAlexei Starovoitov 73004fd61abSAlexei Starovoitov if (map_flags != BPF_ANY) 73104fd61abSAlexei Starovoitov return -EINVAL; 73204fd61abSAlexei Starovoitov 73304fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 73404fd61abSAlexei Starovoitov return -E2BIG; 73504fd61abSAlexei Starovoitov 73604fd61abSAlexei Starovoitov ufd = *(u32 *)value; 737d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 7382a36f0b9SWang Nan if (IS_ERR(new_ptr)) 7392a36f0b9SWang Nan return PTR_ERR(new_ptr); 74004fd61abSAlexei Starovoitov 741da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 742da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 7432a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr); 744da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr); 745da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 746da765a2fSDaniel Borkmann } else { 747da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr); 748da765a2fSDaniel Borkmann } 749da765a2fSDaniel Borkmann 7502a36f0b9SWang Nan if (old_ptr) 7512a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 75204fd61abSAlexei Starovoitov return 0; 75304fd61abSAlexei Starovoitov } 75404fd61abSAlexei Starovoitov 7552a36f0b9SWang Nan static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 75604fd61abSAlexei Starovoitov { 75704fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 7582a36f0b9SWang Nan void *old_ptr; 75904fd61abSAlexei Starovoitov u32 index = *(u32 *)key; 76004fd61abSAlexei Starovoitov 76104fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 76204fd61abSAlexei Starovoitov return -E2BIG; 76304fd61abSAlexei Starovoitov 764da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 765da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 7662a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL); 767da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL); 768da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 769da765a2fSDaniel Borkmann } else { 770da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL); 771da765a2fSDaniel Borkmann } 772da765a2fSDaniel Borkmann 7732a36f0b9SWang Nan if (old_ptr) { 7742a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 77504fd61abSAlexei Starovoitov return 0; 77604fd61abSAlexei Starovoitov } else { 77704fd61abSAlexei Starovoitov return -ENOENT; 77804fd61abSAlexei Starovoitov } 77904fd61abSAlexei Starovoitov } 78004fd61abSAlexei Starovoitov 781d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map, 782d056a788SDaniel Borkmann struct file *map_file, int fd) 7832a36f0b9SWang Nan { 7842a36f0b9SWang Nan struct bpf_array *array = container_of(map, struct bpf_array, map); 7852a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd); 786d056a788SDaniel Borkmann 7872a36f0b9SWang Nan if (IS_ERR(prog)) 7882a36f0b9SWang Nan return prog; 7892a36f0b9SWang Nan 7902a36f0b9SWang Nan if (!bpf_prog_array_compatible(array, prog)) { 7912a36f0b9SWang Nan bpf_prog_put(prog); 7922a36f0b9SWang Nan return ERR_PTR(-EINVAL); 7932a36f0b9SWang Nan } 794d056a788SDaniel Borkmann 7952a36f0b9SWang Nan return prog; 7962a36f0b9SWang Nan } 7972a36f0b9SWang Nan 7982a36f0b9SWang Nan static void prog_fd_array_put_ptr(void *ptr) 7992a36f0b9SWang Nan { 8001aacde3dSDaniel Borkmann bpf_prog_put(ptr); 8012a36f0b9SWang Nan } 8022a36f0b9SWang Nan 80314dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr) 80414dc6f04SMartin KaFai Lau { 80514dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id; 80614dc6f04SMartin KaFai Lau } 80714dc6f04SMartin KaFai Lau 80804fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */ 809ba6b8de4SJohn Fastabend static void bpf_fd_array_map_clear(struct bpf_map *map) 81004fd61abSAlexei Starovoitov { 81104fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 81204fd61abSAlexei Starovoitov int i; 81304fd61abSAlexei Starovoitov 81404fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 8152a36f0b9SWang Nan fd_array_map_delete_elem(map, &i); 81604fd61abSAlexei Starovoitov } 81704fd61abSAlexei Starovoitov 818a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 819a7c19db3SYonghong Song struct seq_file *m) 820a7c19db3SYonghong Song { 821a7c19db3SYonghong Song void **elem, *ptr; 822a7c19db3SYonghong Song u32 prog_id; 823a7c19db3SYonghong Song 824a7c19db3SYonghong Song rcu_read_lock(); 825a7c19db3SYonghong Song 826a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key); 827a7c19db3SYonghong Song if (elem) { 828a7c19db3SYonghong Song ptr = READ_ONCE(*elem); 829a7c19db3SYonghong Song if (ptr) { 830a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key); 831a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr); 832a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 833a7c19db3SYonghong Song &prog_id, m); 834a7c19db3SYonghong Song seq_puts(m, "\n"); 835a7c19db3SYonghong Song } 836a7c19db3SYonghong Song } 837a7c19db3SYonghong Song 838a7c19db3SYonghong Song rcu_read_unlock(); 839a7c19db3SYonghong Song } 840a7c19db3SYonghong Song 841da765a2fSDaniel Borkmann struct prog_poke_elem { 842da765a2fSDaniel Borkmann struct list_head list; 843da765a2fSDaniel Borkmann struct bpf_prog_aux *aux; 844da765a2fSDaniel Borkmann }; 845da765a2fSDaniel Borkmann 846da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map, 847da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 848da765a2fSDaniel Borkmann { 849da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 850da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 851da765a2fSDaniel Borkmann int ret = 0; 852da765a2fSDaniel Borkmann 853da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 854da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 855da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 856da765a2fSDaniel Borkmann if (elem->aux == prog_aux) 857da765a2fSDaniel Borkmann goto out; 858da765a2fSDaniel Borkmann } 859da765a2fSDaniel Borkmann 860da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL); 861da765a2fSDaniel Borkmann if (!elem) { 862da765a2fSDaniel Borkmann ret = -ENOMEM; 863da765a2fSDaniel Borkmann goto out; 864da765a2fSDaniel Borkmann } 865da765a2fSDaniel Borkmann 866da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list); 867da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time 868da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see 869da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run(). 870da765a2fSDaniel Borkmann */ 871da765a2fSDaniel Borkmann elem->aux = prog_aux; 872da765a2fSDaniel Borkmann 873da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs); 874da765a2fSDaniel Borkmann out: 875da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 876da765a2fSDaniel Borkmann return ret; 877da765a2fSDaniel Borkmann } 878da765a2fSDaniel Borkmann 879da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map, 880da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 881da765a2fSDaniel Borkmann { 882da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 883da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 884da765a2fSDaniel Borkmann 885da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 886da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 887da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 888da765a2fSDaniel Borkmann if (elem->aux == prog_aux) { 889da765a2fSDaniel Borkmann list_del_init(&elem->list); 890da765a2fSDaniel Borkmann kfree(elem); 891da765a2fSDaniel Borkmann break; 892da765a2fSDaniel Borkmann } 893da765a2fSDaniel Borkmann } 894da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 895da765a2fSDaniel Borkmann } 896da765a2fSDaniel Borkmann 897da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 898da765a2fSDaniel Borkmann struct bpf_prog *old, 899da765a2fSDaniel Borkmann struct bpf_prog *new) 900da765a2fSDaniel Borkmann { 901da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 902da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 903da765a2fSDaniel Borkmann 904da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 905da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 906da765a2fSDaniel Borkmann 907da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 908da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 909da765a2fSDaniel Borkmann int i, ret; 910da765a2fSDaniel Borkmann 911da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) { 912da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i]; 913da765a2fSDaniel Borkmann 914da765a2fSDaniel Borkmann /* Few things to be aware of: 915da765a2fSDaniel Borkmann * 916da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but 917da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and 918da765a2fSDaniel Borkmann * there could be danger of use after free otherwise. 919da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program 920da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms 921*cf71b174SMaciej Fijalkowski * entry. We skip these as poke->tailcall_target_stable 922*cf71b174SMaciej Fijalkowski * is not active yet. The JIT will do the final fixup 923*cf71b174SMaciej Fijalkowski * before setting it stable. The various 924*cf71b174SMaciej Fijalkowski * poke->tailcall_target_stable are successively 925*cf71b174SMaciej Fijalkowski * activated, so tail call updates can arrive from here 926*cf71b174SMaciej Fijalkowski * while JIT is still finishing its final fixup for 927*cf71b174SMaciej Fijalkowski * non-activated poke entries. 928da765a2fSDaniel Borkmann * 3) On program teardown, the program's kallsym entry gets 929da765a2fSDaniel Borkmann * removed out of RCU callback, but we can only untrack 930da765a2fSDaniel Borkmann * from sleepable context, therefore bpf_arch_text_poke() 931da765a2fSDaniel Borkmann * might not see that this is in BPF text section and 932da765a2fSDaniel Borkmann * bails out with -EINVAL. As these are unreachable since 933da765a2fSDaniel Borkmann * RCU grace period already passed, we simply skip them. 934da765a2fSDaniel Borkmann * 4) Also programs reaching refcount of zero while patching 935da765a2fSDaniel Borkmann * is in progress is okay since we're protected under 936da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT 937da765a2fSDaniel Borkmann * buffer is freed. When we're still in the middle of 938da765a2fSDaniel Borkmann * patching and suddenly kallsyms entry of the program 939da765a2fSDaniel Borkmann * gets evicted, we just skip the rest which is fine due 940da765a2fSDaniel Borkmann * to point 3). 941da765a2fSDaniel Borkmann * 5) Any other error happening below from bpf_arch_text_poke() 942da765a2fSDaniel Borkmann * is a unexpected bug. 943da765a2fSDaniel Borkmann */ 944*cf71b174SMaciej Fijalkowski if (!READ_ONCE(poke->tailcall_target_stable)) 945da765a2fSDaniel Borkmann continue; 946da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 947da765a2fSDaniel Borkmann continue; 948da765a2fSDaniel Borkmann if (poke->tail_call.map != map || 949da765a2fSDaniel Borkmann poke->tail_call.key != key) 950da765a2fSDaniel Borkmann continue; 951da765a2fSDaniel Borkmann 952*cf71b174SMaciej Fijalkowski ret = bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP, 953da765a2fSDaniel Borkmann old ? (u8 *)old->bpf_func + 954da765a2fSDaniel Borkmann poke->adj_off : NULL, 955da765a2fSDaniel Borkmann new ? (u8 *)new->bpf_func + 956da765a2fSDaniel Borkmann poke->adj_off : NULL); 957da765a2fSDaniel Borkmann BUG_ON(ret < 0 && ret != -EINVAL); 958da765a2fSDaniel Borkmann } 959da765a2fSDaniel Borkmann } 960da765a2fSDaniel Borkmann } 961da765a2fSDaniel Borkmann 962da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work) 963da765a2fSDaniel Borkmann { 964da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux, 965da765a2fSDaniel Borkmann work)->map; 966da765a2fSDaniel Borkmann bpf_fd_array_map_clear(map); 967da765a2fSDaniel Borkmann bpf_map_put(map); 968da765a2fSDaniel Borkmann } 969da765a2fSDaniel Borkmann 970da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map) 971da765a2fSDaniel Borkmann { 972da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array, 973da765a2fSDaniel Borkmann map)->aux; 974da765a2fSDaniel Borkmann bpf_map_inc(map); 975da765a2fSDaniel Borkmann schedule_work(&aux->work); 976da765a2fSDaniel Borkmann } 977da765a2fSDaniel Borkmann 9782beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 9792beee5f5SDaniel Borkmann { 9802beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 9812beee5f5SDaniel Borkmann struct bpf_map *map; 9822beee5f5SDaniel Borkmann 9832beee5f5SDaniel Borkmann aux = kzalloc(sizeof(*aux), GFP_KERNEL); 9842beee5f5SDaniel Borkmann if (!aux) 9852beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM); 9862beee5f5SDaniel Borkmann 987da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred); 988da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs); 989da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex); 990da765a2fSDaniel Borkmann 9912beee5f5SDaniel Borkmann map = array_map_alloc(attr); 9922beee5f5SDaniel Borkmann if (IS_ERR(map)) { 9932beee5f5SDaniel Borkmann kfree(aux); 9942beee5f5SDaniel Borkmann return map; 9952beee5f5SDaniel Borkmann } 9962beee5f5SDaniel Borkmann 9972beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux; 998da765a2fSDaniel Borkmann aux->map = map; 999da765a2fSDaniel Borkmann 10002beee5f5SDaniel Borkmann return map; 10012beee5f5SDaniel Borkmann } 10022beee5f5SDaniel Borkmann 10032beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map) 10042beee5f5SDaniel Borkmann { 1005da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 10062beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 10072beee5f5SDaniel Borkmann 10082beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1009da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1010da765a2fSDaniel Borkmann list_del_init(&elem->list); 1011da765a2fSDaniel Borkmann kfree(elem); 1012da765a2fSDaniel Borkmann } 10132beee5f5SDaniel Borkmann kfree(aux); 10142beee5f5SDaniel Borkmann fd_array_map_free(map); 10152beee5f5SDaniel Borkmann } 10162beee5f5SDaniel Borkmann 1017f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding. 1018f4d05259SMartin KaFai Lau * Doing static check alone in the verifier is not enough. 1019f4d05259SMartin KaFai Lau * Thus, prog_array_map cannot be used as an inner_map 1020f4d05259SMartin KaFai Lau * and map_meta_equal is not implemented. 1021f4d05259SMartin KaFai Lau */ 10222872e9acSAndrey Ignatov static int prog_array_map_btf_id; 102340077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = { 1024ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 10252beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc, 10262beee5f5SDaniel Borkmann .map_free = prog_array_map_free, 1027da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track, 1028da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack, 1029da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run, 103004fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 10312a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem, 10322a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem, 10332a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr, 10342a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr, 103514dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1036da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear, 1037a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem, 10382872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 10392872e9acSAndrey Ignatov .map_btf_id = &prog_array_map_btf_id, 104004fd61abSAlexei Starovoitov }; 104104fd61abSAlexei Starovoitov 10423b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 10433b1efb19SDaniel Borkmann struct file *map_file) 1044ea317b26SKaixu Xia { 10453b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10463b1efb19SDaniel Borkmann 1047858d68f1SDaniel Borkmann ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 10483b1efb19SDaniel Borkmann if (ee) { 10493b1efb19SDaniel Borkmann ee->event = perf_file->private_data; 10503b1efb19SDaniel Borkmann ee->perf_file = perf_file; 10513b1efb19SDaniel Borkmann ee->map_file = map_file; 10523b1efb19SDaniel Borkmann } 10533b1efb19SDaniel Borkmann 10543b1efb19SDaniel Borkmann return ee; 10553b1efb19SDaniel Borkmann } 10563b1efb19SDaniel Borkmann 10573b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu) 10583b1efb19SDaniel Borkmann { 10593b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10603b1efb19SDaniel Borkmann 10613b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu); 10623b1efb19SDaniel Borkmann fput(ee->perf_file); 10633b1efb19SDaniel Borkmann kfree(ee); 10643b1efb19SDaniel Borkmann } 10653b1efb19SDaniel Borkmann 10663b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 10673b1efb19SDaniel Borkmann { 10683b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free); 1069ea317b26SKaixu Xia } 1070ea317b26SKaixu Xia 1071d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1072d056a788SDaniel Borkmann struct file *map_file, int fd) 1073ea317b26SKaixu Xia { 10743b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 10753b1efb19SDaniel Borkmann struct perf_event *event; 10763b1efb19SDaniel Borkmann struct file *perf_file; 1077f91840a3SAlexei Starovoitov u64 value; 1078ea317b26SKaixu Xia 10793b1efb19SDaniel Borkmann perf_file = perf_event_get(fd); 10803b1efb19SDaniel Borkmann if (IS_ERR(perf_file)) 10813b1efb19SDaniel Borkmann return perf_file; 1082e03e7ee3SAlexei Starovoitov 1083f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP); 10843b1efb19SDaniel Borkmann event = perf_file->private_data; 108597562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 10863b1efb19SDaniel Borkmann goto err_out; 1087ea317b26SKaixu Xia 10883b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file); 10893b1efb19SDaniel Borkmann if (ee) 10903b1efb19SDaniel Borkmann return ee; 10913b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM); 10923b1efb19SDaniel Borkmann err_out: 10933b1efb19SDaniel Borkmann fput(perf_file); 10943b1efb19SDaniel Borkmann return ee; 1095ea317b26SKaixu Xia } 1096ea317b26SKaixu Xia 1097ea317b26SKaixu Xia static void perf_event_fd_array_put_ptr(void *ptr) 1098ea317b26SKaixu Xia { 10993b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr); 11003b1efb19SDaniel Borkmann } 11013b1efb19SDaniel Borkmann 11023b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map, 11033b1efb19SDaniel Borkmann struct file *map_file) 11043b1efb19SDaniel Borkmann { 11053b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 11063b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 11073b1efb19SDaniel Borkmann int i; 11083b1efb19SDaniel Borkmann 11093b1efb19SDaniel Borkmann rcu_read_lock(); 11103b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) { 11113b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]); 11123b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file) 11133b1efb19SDaniel Borkmann fd_array_map_delete_elem(map, &i); 11143b1efb19SDaniel Borkmann } 11153b1efb19SDaniel Borkmann rcu_read_unlock(); 1116ea317b26SKaixu Xia } 1117ea317b26SKaixu Xia 11182872e9acSAndrey Ignatov static int perf_event_array_map_btf_id; 111940077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = { 1120f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1121ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1122ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 11233b1efb19SDaniel Borkmann .map_free = fd_array_map_free, 1124ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key, 1125ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem, 1126ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem, 1127ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1128ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr, 11293b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release, 1130e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 11312872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 11322872e9acSAndrey Ignatov .map_btf_id = &perf_event_array_map_btf_id, 1133ea317b26SKaixu Xia }; 1134ea317b26SKaixu Xia 113560d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS 11364ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 11374ed8ec52SMartin KaFai Lau struct file *map_file /* not used */, 11384ed8ec52SMartin KaFai Lau int fd) 11394ed8ec52SMartin KaFai Lau { 11404ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd); 11414ed8ec52SMartin KaFai Lau } 11424ed8ec52SMartin KaFai Lau 11434ed8ec52SMartin KaFai Lau static void cgroup_fd_array_put_ptr(void *ptr) 11444ed8ec52SMartin KaFai Lau { 11454ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */ 11464ed8ec52SMartin KaFai Lau cgroup_put(ptr); 11474ed8ec52SMartin KaFai Lau } 11484ed8ec52SMartin KaFai Lau 11494ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map) 11504ed8ec52SMartin KaFai Lau { 11514ed8ec52SMartin KaFai Lau bpf_fd_array_map_clear(map); 11524ed8ec52SMartin KaFai Lau fd_array_map_free(map); 11534ed8ec52SMartin KaFai Lau } 11544ed8ec52SMartin KaFai Lau 11552872e9acSAndrey Ignatov static int cgroup_array_map_btf_id; 115640077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = { 1157f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1158ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1159ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 11604ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free, 11614ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 11624ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem, 11634ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 11644ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr, 11654ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1166e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 11672872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 11682872e9acSAndrey Ignatov .map_btf_id = &cgroup_array_map_btf_id, 11694ed8ec52SMartin KaFai Lau }; 11704ed8ec52SMartin KaFai Lau #endif 117156f668dfSMartin KaFai Lau 117256f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 117356f668dfSMartin KaFai Lau { 117456f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta; 117556f668dfSMartin KaFai Lau 117656f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 117756f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta)) 117856f668dfSMartin KaFai Lau return inner_map_meta; 117956f668dfSMartin KaFai Lau 1180ad46061fSJakub Kicinski map = array_map_alloc(attr); 118156f668dfSMartin KaFai Lau if (IS_ERR(map)) { 118256f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta); 118356f668dfSMartin KaFai Lau return map; 118456f668dfSMartin KaFai Lau } 118556f668dfSMartin KaFai Lau 118656f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta; 118756f668dfSMartin KaFai Lau 118856f668dfSMartin KaFai Lau return map; 118956f668dfSMartin KaFai Lau } 119056f668dfSMartin KaFai Lau 119156f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map) 119256f668dfSMartin KaFai Lau { 119356f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which 119456f668dfSMartin KaFai Lau * is protected by fdget/fdput. 119556f668dfSMartin KaFai Lau */ 119656f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta); 119756f668dfSMartin KaFai Lau bpf_fd_array_map_clear(map); 119856f668dfSMartin KaFai Lau fd_array_map_free(map); 119956f668dfSMartin KaFai Lau } 120056f668dfSMartin KaFai Lau 120156f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 120256f668dfSMartin KaFai Lau { 120356f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key); 120456f668dfSMartin KaFai Lau 120556f668dfSMartin KaFai Lau if (!inner_map) 120656f668dfSMartin KaFai Lau return NULL; 120756f668dfSMartin KaFai Lau 120856f668dfSMartin KaFai Lau return READ_ONCE(*inner_map); 120956f668dfSMartin KaFai Lau } 121056f668dfSMartin KaFai Lau 12117b0c2a05SDaniel Borkmann static u32 array_of_map_gen_lookup(struct bpf_map *map, 12127b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf) 12137b0c2a05SDaniel Borkmann { 1214b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 12157b0c2a05SDaniel Borkmann u32 elem_size = round_up(map->value_size, 8); 12167b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf; 12177b0c2a05SDaniel Borkmann const int ret = BPF_REG_0; 12187b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1; 12197b0c2a05SDaniel Borkmann const int index = BPF_REG_2; 12207b0c2a05SDaniel Borkmann 12217b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 12227b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 12232c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 1224b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1225b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1226b2157399SAlexei Starovoitov } else { 12277b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1228b2157399SAlexei Starovoitov } 12297b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size)) 12307b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 12317b0c2a05SDaniel Borkmann else 12327b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 12337b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 12347b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 12357b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 12367b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 12377b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0); 12387b0c2a05SDaniel Borkmann 12397b0c2a05SDaniel Borkmann return insn - insn_buf; 12407b0c2a05SDaniel Borkmann } 12417b0c2a05SDaniel Borkmann 12422872e9acSAndrey Ignatov static int array_of_maps_map_btf_id; 124340077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = { 1244ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 124556f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc, 124656f668dfSMartin KaFai Lau .map_free = array_of_map_free, 124756f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 124856f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem, 124956f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 125056f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr, 125156f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr, 125214dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 12537b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup, 1254e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 12552872e9acSAndrey Ignatov .map_btf_name = "bpf_array", 12562872e9acSAndrey Ignatov .map_btf_id = &array_of_maps_map_btf_id, 125756f668dfSMartin KaFai Lau }; 1258