128fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 281ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook 328fbcfa0SAlexei Starovoitov * 428fbcfa0SAlexei Starovoitov * This program is free software; you can redistribute it and/or 528fbcfa0SAlexei Starovoitov * modify it under the terms of version 2 of the GNU General Public 628fbcfa0SAlexei Starovoitov * License as published by the Free Software Foundation. 728fbcfa0SAlexei Starovoitov * 828fbcfa0SAlexei Starovoitov * This program is distributed in the hope that it will be useful, but 928fbcfa0SAlexei Starovoitov * WITHOUT ANY WARRANTY; without even the implied warranty of 1028fbcfa0SAlexei Starovoitov * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 1128fbcfa0SAlexei Starovoitov * General Public License for more details. 1228fbcfa0SAlexei Starovoitov */ 1328fbcfa0SAlexei Starovoitov #include <linux/bpf.h> 1428fbcfa0SAlexei Starovoitov #include <linux/err.h> 1528fbcfa0SAlexei Starovoitov #include <linux/slab.h> 1628fbcfa0SAlexei Starovoitov #include <linux/mm.h> 1704fd61abSAlexei Starovoitov #include <linux/filter.h> 180cdf5640SDaniel Borkmann #include <linux/perf_event.h> 1928fbcfa0SAlexei Starovoitov 2056f668dfSMartin KaFai Lau #include "map_in_map.h" 2156f668dfSMartin KaFai Lau 226e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \ 236e71b04aSChenbo Feng (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) 246e71b04aSChenbo Feng 25a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array) 26a10423b8SAlexei Starovoitov { 27a10423b8SAlexei Starovoitov int i; 28a10423b8SAlexei Starovoitov 29*32fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) { 30a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]); 31*32fff239SEric Dumazet cond_resched(); 32*32fff239SEric Dumazet } 33a10423b8SAlexei Starovoitov } 34a10423b8SAlexei Starovoitov 35a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array) 36a10423b8SAlexei Starovoitov { 37a10423b8SAlexei Starovoitov void __percpu *ptr; 38a10423b8SAlexei Starovoitov int i; 39a10423b8SAlexei Starovoitov 40a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) { 41a10423b8SAlexei Starovoitov ptr = __alloc_percpu_gfp(array->elem_size, 8, 42a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN); 43a10423b8SAlexei Starovoitov if (!ptr) { 44a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 45a10423b8SAlexei Starovoitov return -ENOMEM; 46a10423b8SAlexei Starovoitov } 47a10423b8SAlexei Starovoitov array->pptrs[i] = ptr; 48*32fff239SEric Dumazet cond_resched(); 49a10423b8SAlexei Starovoitov } 50a10423b8SAlexei Starovoitov 51a10423b8SAlexei Starovoitov return 0; 52a10423b8SAlexei Starovoitov } 53a10423b8SAlexei Starovoitov 5428fbcfa0SAlexei Starovoitov /* Called from syscall */ 55ad46061fSJakub Kicinski static int array_map_alloc_check(union bpf_attr *attr) 56ad46061fSJakub Kicinski { 57ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 58ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr); 59ad46061fSJakub Kicinski 60ad46061fSJakub Kicinski /* check sanity of attributes */ 61ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 || 62ad46061fSJakub Kicinski attr->value_size == 0 || 63ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 64ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE)) 65ad46061fSJakub Kicinski return -EINVAL; 66ad46061fSJakub Kicinski 67ad46061fSJakub Kicinski if (attr->value_size > KMALLOC_MAX_SIZE) 68ad46061fSJakub Kicinski /* if value_size is bigger, the user space won't be able to 69ad46061fSJakub Kicinski * access the elements. 70ad46061fSJakub Kicinski */ 71ad46061fSJakub Kicinski return -E2BIG; 72ad46061fSJakub Kicinski 73ad46061fSJakub Kicinski return 0; 74ad46061fSJakub Kicinski } 75ad46061fSJakub Kicinski 7628fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr) 7728fbcfa0SAlexei Starovoitov { 78a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 799c2d63b8SDaniel Borkmann int ret, numa_node = bpf_map_attr_numa_node(attr); 80b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries; 81b2157399SAlexei Starovoitov bool unpriv = !capable(CAP_SYS_ADMIN); 829c2d63b8SDaniel Borkmann u64 cost, array_size, mask64; 8328fbcfa0SAlexei Starovoitov struct bpf_array *array; 8428fbcfa0SAlexei Starovoitov 8528fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8); 8628fbcfa0SAlexei Starovoitov 87b2157399SAlexei Starovoitov max_entries = attr->max_entries; 88b2157399SAlexei Starovoitov 89bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has 90bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to 91bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space. 92bbeb6e43SDaniel Borkmann */ 93bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1); 94bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64; 95bbeb6e43SDaniel Borkmann mask64 -= 1; 96bbeb6e43SDaniel Borkmann 97bbeb6e43SDaniel Borkmann index_mask = mask64; 98bbeb6e43SDaniel Borkmann if (unpriv) { 99b2157399SAlexei Starovoitov /* round up array size to nearest power of 2, 100b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits 101b2157399SAlexei Starovoitov */ 102b2157399SAlexei Starovoitov max_entries = index_mask + 1; 103bbeb6e43SDaniel Borkmann /* Check for overflows. */ 104bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries) 105bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG); 106bbeb6e43SDaniel Borkmann } 107b2157399SAlexei Starovoitov 108a10423b8SAlexei Starovoitov array_size = sizeof(*array); 109a10423b8SAlexei Starovoitov if (percpu) 110b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *); 111a10423b8SAlexei Starovoitov else 112b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size; 113a10423b8SAlexei Starovoitov 114a10423b8SAlexei Starovoitov /* make sure there is no u32 overflow later in round_up() */ 1159c2d63b8SDaniel Borkmann cost = array_size; 1169c2d63b8SDaniel Borkmann if (cost >= U32_MAX - PAGE_SIZE) 117daaf427cSAlexei Starovoitov return ERR_PTR(-ENOMEM); 1189c2d63b8SDaniel Borkmann if (percpu) { 1199c2d63b8SDaniel Borkmann cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 1209c2d63b8SDaniel Borkmann if (cost >= U32_MAX - PAGE_SIZE) 1219c2d63b8SDaniel Borkmann return ERR_PTR(-ENOMEM); 1229c2d63b8SDaniel Borkmann } 1239c2d63b8SDaniel Borkmann cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 1249c2d63b8SDaniel Borkmann 1259c2d63b8SDaniel Borkmann ret = bpf_map_precharge_memlock(cost); 1269c2d63b8SDaniel Borkmann if (ret < 0) 1279c2d63b8SDaniel Borkmann return ERR_PTR(ret); 128daaf427cSAlexei Starovoitov 12928fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */ 13096eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node); 13128fbcfa0SAlexei Starovoitov if (!array) 13228fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM); 133b2157399SAlexei Starovoitov array->index_mask = index_mask; 134b2157399SAlexei Starovoitov array->map.unpriv_array = unpriv; 13528fbcfa0SAlexei Starovoitov 13628fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */ 13732852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr); 1389c2d63b8SDaniel Borkmann array->map.pages = cost; 13928fbcfa0SAlexei Starovoitov array->elem_size = elem_size; 14028fbcfa0SAlexei Starovoitov 1419c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) { 142d407bd25SDaniel Borkmann bpf_map_area_free(array); 143a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM); 144a10423b8SAlexei Starovoitov } 145a10423b8SAlexei Starovoitov 14628fbcfa0SAlexei Starovoitov return &array->map; 14728fbcfa0SAlexei Starovoitov } 14828fbcfa0SAlexei Starovoitov 14928fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 15028fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key) 15128fbcfa0SAlexei Starovoitov { 15228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 15328fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 15428fbcfa0SAlexei Starovoitov 155a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 15628fbcfa0SAlexei Starovoitov return NULL; 15728fbcfa0SAlexei Starovoitov 158b2157399SAlexei Starovoitov return array->value + array->elem_size * (index & array->index_mask); 15928fbcfa0SAlexei Starovoitov } 16028fbcfa0SAlexei Starovoitov 16181ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 16281ed18abSAlexei Starovoitov static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 16381ed18abSAlexei Starovoitov { 164b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 16581ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf; 166fad73a1aSMartin KaFai Lau u32 elem_size = round_up(map->value_size, 8); 16781ed18abSAlexei Starovoitov const int ret = BPF_REG_0; 16881ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1; 16981ed18abSAlexei Starovoitov const int index = BPF_REG_2; 17081ed18abSAlexei Starovoitov 17181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 17281ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 173b2157399SAlexei Starovoitov if (map->unpriv_array) { 174b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 175b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 176b2157399SAlexei Starovoitov } else { 177fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 178b2157399SAlexei Starovoitov } 179fad73a1aSMartin KaFai Lau 180fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) { 18181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 18281ed18abSAlexei Starovoitov } else { 18381ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 18481ed18abSAlexei Starovoitov } 18581ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 18681ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 18781ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0); 18881ed18abSAlexei Starovoitov return insn - insn_buf; 18981ed18abSAlexei Starovoitov } 19081ed18abSAlexei Starovoitov 191a10423b8SAlexei Starovoitov /* Called from eBPF program */ 192a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 193a10423b8SAlexei Starovoitov { 194a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 195a10423b8SAlexei Starovoitov u32 index = *(u32 *)key; 196a10423b8SAlexei Starovoitov 197a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 198a10423b8SAlexei Starovoitov return NULL; 199a10423b8SAlexei Starovoitov 200b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]); 201a10423b8SAlexei Starovoitov } 202a10423b8SAlexei Starovoitov 20315a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 20415a07b33SAlexei Starovoitov { 20515a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 20615a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 20715a07b33SAlexei Starovoitov void __percpu *pptr; 20815a07b33SAlexei Starovoitov int cpu, off = 0; 20915a07b33SAlexei Starovoitov u32 size; 21015a07b33SAlexei Starovoitov 21115a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 21215a07b33SAlexei Starovoitov return -ENOENT; 21315a07b33SAlexei Starovoitov 21415a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only 21515a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas 21615a07b33SAlexei Starovoitov * will not leak any kernel data 21715a07b33SAlexei Starovoitov */ 21815a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 21915a07b33SAlexei Starovoitov rcu_read_lock(); 220b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 22115a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 22215a07b33SAlexei Starovoitov bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); 22315a07b33SAlexei Starovoitov off += size; 22415a07b33SAlexei Starovoitov } 22515a07b33SAlexei Starovoitov rcu_read_unlock(); 22615a07b33SAlexei Starovoitov return 0; 22715a07b33SAlexei Starovoitov } 22815a07b33SAlexei Starovoitov 22928fbcfa0SAlexei Starovoitov /* Called from syscall */ 23028fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 23128fbcfa0SAlexei Starovoitov { 23228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 2338fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX; 23428fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key; 23528fbcfa0SAlexei Starovoitov 23628fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) { 23728fbcfa0SAlexei Starovoitov *next = 0; 23828fbcfa0SAlexei Starovoitov return 0; 23928fbcfa0SAlexei Starovoitov } 24028fbcfa0SAlexei Starovoitov 24128fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1) 24228fbcfa0SAlexei Starovoitov return -ENOENT; 24328fbcfa0SAlexei Starovoitov 24428fbcfa0SAlexei Starovoitov *next = index + 1; 24528fbcfa0SAlexei Starovoitov return 0; 24628fbcfa0SAlexei Starovoitov } 24728fbcfa0SAlexei Starovoitov 24828fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 24928fbcfa0SAlexei Starovoitov static int array_map_update_elem(struct bpf_map *map, void *key, void *value, 25028fbcfa0SAlexei Starovoitov u64 map_flags) 25128fbcfa0SAlexei Starovoitov { 25228fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 25328fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 25428fbcfa0SAlexei Starovoitov 255a10423b8SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 25628fbcfa0SAlexei Starovoitov /* unknown flags */ 25728fbcfa0SAlexei Starovoitov return -EINVAL; 25828fbcfa0SAlexei Starovoitov 259a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 26028fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 26128fbcfa0SAlexei Starovoitov return -E2BIG; 26228fbcfa0SAlexei Starovoitov 263a10423b8SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 264daaf427cSAlexei Starovoitov /* all elements already exist */ 26528fbcfa0SAlexei Starovoitov return -EEXIST; 26628fbcfa0SAlexei Starovoitov 267a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 268b2157399SAlexei Starovoitov memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), 269a10423b8SAlexei Starovoitov value, map->value_size); 270a10423b8SAlexei Starovoitov else 271b2157399SAlexei Starovoitov memcpy(array->value + 272b2157399SAlexei Starovoitov array->elem_size * (index & array->index_mask), 273a10423b8SAlexei Starovoitov value, map->value_size); 27428fbcfa0SAlexei Starovoitov return 0; 27528fbcfa0SAlexei Starovoitov } 27628fbcfa0SAlexei Starovoitov 27715a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 27815a07b33SAlexei Starovoitov u64 map_flags) 27915a07b33SAlexei Starovoitov { 28015a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 28115a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 28215a07b33SAlexei Starovoitov void __percpu *pptr; 28315a07b33SAlexei Starovoitov int cpu, off = 0; 28415a07b33SAlexei Starovoitov u32 size; 28515a07b33SAlexei Starovoitov 28615a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 28715a07b33SAlexei Starovoitov /* unknown flags */ 28815a07b33SAlexei Starovoitov return -EINVAL; 28915a07b33SAlexei Starovoitov 29015a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 29115a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 29215a07b33SAlexei Starovoitov return -E2BIG; 29315a07b33SAlexei Starovoitov 29415a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 29515a07b33SAlexei Starovoitov /* all elements already exist */ 29615a07b33SAlexei Starovoitov return -EEXIST; 29715a07b33SAlexei Starovoitov 29815a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that 29915a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access 30015a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be 30115a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc, 30215a07b33SAlexei Starovoitov * so no kernel data leaks possible 30315a07b33SAlexei Starovoitov */ 30415a07b33SAlexei Starovoitov size = round_up(map->value_size, 8); 30515a07b33SAlexei Starovoitov rcu_read_lock(); 306b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 30715a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 30815a07b33SAlexei Starovoitov bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); 30915a07b33SAlexei Starovoitov off += size; 31015a07b33SAlexei Starovoitov } 31115a07b33SAlexei Starovoitov rcu_read_unlock(); 31215a07b33SAlexei Starovoitov return 0; 31315a07b33SAlexei Starovoitov } 31415a07b33SAlexei Starovoitov 31528fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 31628fbcfa0SAlexei Starovoitov static int array_map_delete_elem(struct bpf_map *map, void *key) 31728fbcfa0SAlexei Starovoitov { 31828fbcfa0SAlexei Starovoitov return -EINVAL; 31928fbcfa0SAlexei Starovoitov } 32028fbcfa0SAlexei Starovoitov 32128fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 32228fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map) 32328fbcfa0SAlexei Starovoitov { 32428fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 32528fbcfa0SAlexei Starovoitov 32628fbcfa0SAlexei Starovoitov /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, 32728fbcfa0SAlexei Starovoitov * so the programs (can be more than one that used this map) were 32828fbcfa0SAlexei Starovoitov * disconnected from events. Wait for outstanding programs to complete 32928fbcfa0SAlexei Starovoitov * and free the array 33028fbcfa0SAlexei Starovoitov */ 33128fbcfa0SAlexei Starovoitov synchronize_rcu(); 33228fbcfa0SAlexei Starovoitov 333a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 334a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 335a10423b8SAlexei Starovoitov 336d407bd25SDaniel Borkmann bpf_map_area_free(array); 33728fbcfa0SAlexei Starovoitov } 33828fbcfa0SAlexei Starovoitov 33940077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = { 340ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 34128fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc, 34228fbcfa0SAlexei Starovoitov .map_free = array_map_free, 34328fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 34428fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem, 34528fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem, 34628fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 34781ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup, 34828fbcfa0SAlexei Starovoitov }; 34928fbcfa0SAlexei Starovoitov 35040077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = { 351ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 352a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc, 353a10423b8SAlexei Starovoitov .map_free = array_map_free, 354a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 355a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem, 356a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem, 357a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 358a10423b8SAlexei Starovoitov }; 359a10423b8SAlexei Starovoitov 360ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr) 36104fd61abSAlexei Starovoitov { 3622a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */ 36304fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32)) 364ad46061fSJakub Kicinski return -EINVAL; 365ad46061fSJakub Kicinski return array_map_alloc_check(attr); 36604fd61abSAlexei Starovoitov } 36704fd61abSAlexei Starovoitov 3682a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map) 36904fd61abSAlexei Starovoitov { 37004fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 37104fd61abSAlexei Starovoitov int i; 37204fd61abSAlexei Starovoitov 37304fd61abSAlexei Starovoitov synchronize_rcu(); 37404fd61abSAlexei Starovoitov 37504fd61abSAlexei Starovoitov /* make sure it's empty */ 37604fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 3772a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL); 378d407bd25SDaniel Borkmann 379d407bd25SDaniel Borkmann bpf_map_area_free(array); 38004fd61abSAlexei Starovoitov } 38104fd61abSAlexei Starovoitov 3822a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 38304fd61abSAlexei Starovoitov { 38404fd61abSAlexei Starovoitov return NULL; 38504fd61abSAlexei Starovoitov } 38604fd61abSAlexei Starovoitov 38704fd61abSAlexei Starovoitov /* only called from syscall */ 38814dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 38914dc6f04SMartin KaFai Lau { 39014dc6f04SMartin KaFai Lau void **elem, *ptr; 39114dc6f04SMartin KaFai Lau int ret = 0; 39214dc6f04SMartin KaFai Lau 39314dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem) 39414dc6f04SMartin KaFai Lau return -ENOTSUPP; 39514dc6f04SMartin KaFai Lau 39614dc6f04SMartin KaFai Lau rcu_read_lock(); 39714dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key); 39814dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem))) 39914dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr); 40014dc6f04SMartin KaFai Lau else 40114dc6f04SMartin KaFai Lau ret = -ENOENT; 40214dc6f04SMartin KaFai Lau rcu_read_unlock(); 40314dc6f04SMartin KaFai Lau 40414dc6f04SMartin KaFai Lau return ret; 40514dc6f04SMartin KaFai Lau } 40614dc6f04SMartin KaFai Lau 40714dc6f04SMartin KaFai Lau /* only called from syscall */ 408d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 409d056a788SDaniel Borkmann void *key, void *value, u64 map_flags) 41004fd61abSAlexei Starovoitov { 41104fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 4122a36f0b9SWang Nan void *new_ptr, *old_ptr; 41304fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd; 41404fd61abSAlexei Starovoitov 41504fd61abSAlexei Starovoitov if (map_flags != BPF_ANY) 41604fd61abSAlexei Starovoitov return -EINVAL; 41704fd61abSAlexei Starovoitov 41804fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 41904fd61abSAlexei Starovoitov return -E2BIG; 42004fd61abSAlexei Starovoitov 42104fd61abSAlexei Starovoitov ufd = *(u32 *)value; 422d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 4232a36f0b9SWang Nan if (IS_ERR(new_ptr)) 4242a36f0b9SWang Nan return PTR_ERR(new_ptr); 42504fd61abSAlexei Starovoitov 4262a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr); 4272a36f0b9SWang Nan if (old_ptr) 4282a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 42904fd61abSAlexei Starovoitov 43004fd61abSAlexei Starovoitov return 0; 43104fd61abSAlexei Starovoitov } 43204fd61abSAlexei Starovoitov 4332a36f0b9SWang Nan static int fd_array_map_delete_elem(struct bpf_map *map, void *key) 43404fd61abSAlexei Starovoitov { 43504fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 4362a36f0b9SWang Nan void *old_ptr; 43704fd61abSAlexei Starovoitov u32 index = *(u32 *)key; 43804fd61abSAlexei Starovoitov 43904fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 44004fd61abSAlexei Starovoitov return -E2BIG; 44104fd61abSAlexei Starovoitov 4422a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL); 4432a36f0b9SWang Nan if (old_ptr) { 4442a36f0b9SWang Nan map->ops->map_fd_put_ptr(old_ptr); 44504fd61abSAlexei Starovoitov return 0; 44604fd61abSAlexei Starovoitov } else { 44704fd61abSAlexei Starovoitov return -ENOENT; 44804fd61abSAlexei Starovoitov } 44904fd61abSAlexei Starovoitov } 45004fd61abSAlexei Starovoitov 451d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map, 452d056a788SDaniel Borkmann struct file *map_file, int fd) 4532a36f0b9SWang Nan { 4542a36f0b9SWang Nan struct bpf_array *array = container_of(map, struct bpf_array, map); 4552a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd); 456d056a788SDaniel Borkmann 4572a36f0b9SWang Nan if (IS_ERR(prog)) 4582a36f0b9SWang Nan return prog; 4592a36f0b9SWang Nan 4602a36f0b9SWang Nan if (!bpf_prog_array_compatible(array, prog)) { 4612a36f0b9SWang Nan bpf_prog_put(prog); 4622a36f0b9SWang Nan return ERR_PTR(-EINVAL); 4632a36f0b9SWang Nan } 464d056a788SDaniel Borkmann 4652a36f0b9SWang Nan return prog; 4662a36f0b9SWang Nan } 4672a36f0b9SWang Nan 4682a36f0b9SWang Nan static void prog_fd_array_put_ptr(void *ptr) 4692a36f0b9SWang Nan { 4701aacde3dSDaniel Borkmann bpf_prog_put(ptr); 4712a36f0b9SWang Nan } 4722a36f0b9SWang Nan 47314dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr) 47414dc6f04SMartin KaFai Lau { 47514dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id; 47614dc6f04SMartin KaFai Lau } 47714dc6f04SMartin KaFai Lau 47804fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */ 4792a36f0b9SWang Nan void bpf_fd_array_map_clear(struct bpf_map *map) 48004fd61abSAlexei Starovoitov { 48104fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 48204fd61abSAlexei Starovoitov int i; 48304fd61abSAlexei Starovoitov 48404fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 4852a36f0b9SWang Nan fd_array_map_delete_elem(map, &i); 48604fd61abSAlexei Starovoitov } 48704fd61abSAlexei Starovoitov 48840077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = { 489ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 490ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 4912a36f0b9SWang Nan .map_free = fd_array_map_free, 49204fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 4932a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem, 4942a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem, 4952a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr, 4962a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr, 49714dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 49804fd61abSAlexei Starovoitov }; 49904fd61abSAlexei Starovoitov 5003b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 5013b1efb19SDaniel Borkmann struct file *map_file) 502ea317b26SKaixu Xia { 5033b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 5043b1efb19SDaniel Borkmann 505858d68f1SDaniel Borkmann ee = kzalloc(sizeof(*ee), GFP_ATOMIC); 5063b1efb19SDaniel Borkmann if (ee) { 5073b1efb19SDaniel Borkmann ee->event = perf_file->private_data; 5083b1efb19SDaniel Borkmann ee->perf_file = perf_file; 5093b1efb19SDaniel Borkmann ee->map_file = map_file; 5103b1efb19SDaniel Borkmann } 5113b1efb19SDaniel Borkmann 5123b1efb19SDaniel Borkmann return ee; 5133b1efb19SDaniel Borkmann } 5143b1efb19SDaniel Borkmann 5153b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu) 5163b1efb19SDaniel Borkmann { 5173b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 5183b1efb19SDaniel Borkmann 5193b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu); 5203b1efb19SDaniel Borkmann fput(ee->perf_file); 5213b1efb19SDaniel Borkmann kfree(ee); 5223b1efb19SDaniel Borkmann } 5233b1efb19SDaniel Borkmann 5243b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 5253b1efb19SDaniel Borkmann { 5263b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free); 527ea317b26SKaixu Xia } 528ea317b26SKaixu Xia 529d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 530d056a788SDaniel Borkmann struct file *map_file, int fd) 531ea317b26SKaixu Xia { 5323b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 5333b1efb19SDaniel Borkmann struct perf_event *event; 5343b1efb19SDaniel Borkmann struct file *perf_file; 535f91840a3SAlexei Starovoitov u64 value; 536ea317b26SKaixu Xia 5373b1efb19SDaniel Borkmann perf_file = perf_event_get(fd); 5383b1efb19SDaniel Borkmann if (IS_ERR(perf_file)) 5393b1efb19SDaniel Borkmann return perf_file; 540e03e7ee3SAlexei Starovoitov 541f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP); 5423b1efb19SDaniel Borkmann event = perf_file->private_data; 54397562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 5443b1efb19SDaniel Borkmann goto err_out; 545ea317b26SKaixu Xia 5463b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file); 5473b1efb19SDaniel Borkmann if (ee) 5483b1efb19SDaniel Borkmann return ee; 5493b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM); 5503b1efb19SDaniel Borkmann err_out: 5513b1efb19SDaniel Borkmann fput(perf_file); 5523b1efb19SDaniel Borkmann return ee; 553ea317b26SKaixu Xia } 554ea317b26SKaixu Xia 555ea317b26SKaixu Xia static void perf_event_fd_array_put_ptr(void *ptr) 556ea317b26SKaixu Xia { 5573b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr); 5583b1efb19SDaniel Borkmann } 5593b1efb19SDaniel Borkmann 5603b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map, 5613b1efb19SDaniel Borkmann struct file *map_file) 5623b1efb19SDaniel Borkmann { 5633b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 5643b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 5653b1efb19SDaniel Borkmann int i; 5663b1efb19SDaniel Borkmann 5673b1efb19SDaniel Borkmann rcu_read_lock(); 5683b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) { 5693b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]); 5703b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file) 5713b1efb19SDaniel Borkmann fd_array_map_delete_elem(map, &i); 5723b1efb19SDaniel Borkmann } 5733b1efb19SDaniel Borkmann rcu_read_unlock(); 574ea317b26SKaixu Xia } 575ea317b26SKaixu Xia 57640077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = { 577ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 578ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 5793b1efb19SDaniel Borkmann .map_free = fd_array_map_free, 580ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key, 581ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem, 582ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem, 583ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr, 584ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr, 5853b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release, 586ea317b26SKaixu Xia }; 587ea317b26SKaixu Xia 58860d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS 5894ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 5904ed8ec52SMartin KaFai Lau struct file *map_file /* not used */, 5914ed8ec52SMartin KaFai Lau int fd) 5924ed8ec52SMartin KaFai Lau { 5934ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd); 5944ed8ec52SMartin KaFai Lau } 5954ed8ec52SMartin KaFai Lau 5964ed8ec52SMartin KaFai Lau static void cgroup_fd_array_put_ptr(void *ptr) 5974ed8ec52SMartin KaFai Lau { 5984ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */ 5994ed8ec52SMartin KaFai Lau cgroup_put(ptr); 6004ed8ec52SMartin KaFai Lau } 6014ed8ec52SMartin KaFai Lau 6024ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map) 6034ed8ec52SMartin KaFai Lau { 6044ed8ec52SMartin KaFai Lau bpf_fd_array_map_clear(map); 6054ed8ec52SMartin KaFai Lau fd_array_map_free(map); 6064ed8ec52SMartin KaFai Lau } 6074ed8ec52SMartin KaFai Lau 60840077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = { 609ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 610ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 6114ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free, 6124ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 6134ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem, 6144ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 6154ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr, 6164ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr, 6174ed8ec52SMartin KaFai Lau }; 6184ed8ec52SMartin KaFai Lau #endif 61956f668dfSMartin KaFai Lau 62056f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 62156f668dfSMartin KaFai Lau { 62256f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta; 62356f668dfSMartin KaFai Lau 62456f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 62556f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta)) 62656f668dfSMartin KaFai Lau return inner_map_meta; 62756f668dfSMartin KaFai Lau 628ad46061fSJakub Kicinski map = array_map_alloc(attr); 62956f668dfSMartin KaFai Lau if (IS_ERR(map)) { 63056f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta); 63156f668dfSMartin KaFai Lau return map; 63256f668dfSMartin KaFai Lau } 63356f668dfSMartin KaFai Lau 63456f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta; 63556f668dfSMartin KaFai Lau 63656f668dfSMartin KaFai Lau return map; 63756f668dfSMartin KaFai Lau } 63856f668dfSMartin KaFai Lau 63956f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map) 64056f668dfSMartin KaFai Lau { 64156f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which 64256f668dfSMartin KaFai Lau * is protected by fdget/fdput. 64356f668dfSMartin KaFai Lau */ 64456f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta); 64556f668dfSMartin KaFai Lau bpf_fd_array_map_clear(map); 64656f668dfSMartin KaFai Lau fd_array_map_free(map); 64756f668dfSMartin KaFai Lau } 64856f668dfSMartin KaFai Lau 64956f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 65056f668dfSMartin KaFai Lau { 65156f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key); 65256f668dfSMartin KaFai Lau 65356f668dfSMartin KaFai Lau if (!inner_map) 65456f668dfSMartin KaFai Lau return NULL; 65556f668dfSMartin KaFai Lau 65656f668dfSMartin KaFai Lau return READ_ONCE(*inner_map); 65756f668dfSMartin KaFai Lau } 65856f668dfSMartin KaFai Lau 6597b0c2a05SDaniel Borkmann static u32 array_of_map_gen_lookup(struct bpf_map *map, 6607b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf) 6617b0c2a05SDaniel Borkmann { 662b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 6637b0c2a05SDaniel Borkmann u32 elem_size = round_up(map->value_size, 8); 6647b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf; 6657b0c2a05SDaniel Borkmann const int ret = BPF_REG_0; 6667b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1; 6677b0c2a05SDaniel Borkmann const int index = BPF_REG_2; 6687b0c2a05SDaniel Borkmann 6697b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 6707b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 671b2157399SAlexei Starovoitov if (map->unpriv_array) { 672b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 673b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 674b2157399SAlexei Starovoitov } else { 6757b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 676b2157399SAlexei Starovoitov } 6777b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size)) 6787b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 6797b0c2a05SDaniel Borkmann else 6807b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 6817b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 6827b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 6837b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 6847b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 6857b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0); 6867b0c2a05SDaniel Borkmann 6877b0c2a05SDaniel Borkmann return insn - insn_buf; 6887b0c2a05SDaniel Borkmann } 6897b0c2a05SDaniel Borkmann 69040077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = { 691ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 69256f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc, 69356f668dfSMartin KaFai Lau .map_free = array_of_map_free, 69456f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 69556f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem, 69656f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 69756f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr, 69856f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr, 69914dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 7007b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup, 70156f668dfSMartin KaFai Lau }; 702