15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook 428fbcfa0SAlexei Starovoitov */ 528fbcfa0SAlexei Starovoitov #include <linux/bpf.h> 6a26ca7c9SMartin KaFai Lau #include <linux/btf.h> 728fbcfa0SAlexei Starovoitov #include <linux/err.h> 828fbcfa0SAlexei Starovoitov #include <linux/slab.h> 928fbcfa0SAlexei Starovoitov #include <linux/mm.h> 1004fd61abSAlexei Starovoitov #include <linux/filter.h> 110cdf5640SDaniel Borkmann #include <linux/perf_event.h> 12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h> 131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h> 14c317ab71SMenglong Dong #include <linux/btf_ids.h> 1528fbcfa0SAlexei Starovoitov 1656f668dfSMartin KaFai Lau #include "map_in_map.h" 1756f668dfSMartin KaFai Lau 186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \ 19792cacccSSong Liu (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ 204a8f87e6SDaniel Borkmann BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) 216e71b04aSChenbo Feng 22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array) 23a10423b8SAlexei Starovoitov { 24a10423b8SAlexei Starovoitov int i; 25a10423b8SAlexei Starovoitov 2632fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) { 27a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]); 2832fff239SEric Dumazet cond_resched(); 2932fff239SEric Dumazet } 30a10423b8SAlexei Starovoitov } 31a10423b8SAlexei Starovoitov 32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array) 33a10423b8SAlexei Starovoitov { 34a10423b8SAlexei Starovoitov void __percpu *ptr; 35a10423b8SAlexei Starovoitov int i; 36a10423b8SAlexei Starovoitov 37a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) { 386d192c79SRoman Gushchin ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, 39a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN); 40a10423b8SAlexei Starovoitov if (!ptr) { 41a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 42a10423b8SAlexei Starovoitov return -ENOMEM; 43a10423b8SAlexei Starovoitov } 44a10423b8SAlexei Starovoitov array->pptrs[i] = ptr; 4532fff239SEric Dumazet cond_resched(); 46a10423b8SAlexei Starovoitov } 47a10423b8SAlexei Starovoitov 48a10423b8SAlexei Starovoitov return 0; 49a10423b8SAlexei Starovoitov } 50a10423b8SAlexei Starovoitov 5128fbcfa0SAlexei Starovoitov /* Called from syscall */ 525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr) 53ad46061fSJakub Kicinski { 54ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 55ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr); 56ad46061fSJakub Kicinski 57ad46061fSJakub Kicinski /* check sanity of attributes */ 58ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 || 59ad46061fSJakub Kicinski attr->value_size == 0 || 60ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 61591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) || 62ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE)) 63ad46061fSJakub Kicinski return -EINVAL; 64ad46061fSJakub Kicinski 65fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY && 664a8f87e6SDaniel Borkmann attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) 67fc970227SAndrii Nakryiko return -EINVAL; 68fc970227SAndrii Nakryiko 69792cacccSSong Liu if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && 70792cacccSSong Liu attr->map_flags & BPF_F_PRESERVE_ELEMS) 71792cacccSSong Liu return -EINVAL; 72792cacccSSong Liu 7363b8ce77SAndrii Nakryiko /* avoid overflow on round_up(map->value_size) */ 7463b8ce77SAndrii Nakryiko if (attr->value_size > INT_MAX) 75ad46061fSJakub Kicinski return -E2BIG; 76ad46061fSJakub Kicinski 77ad46061fSJakub Kicinski return 0; 78ad46061fSJakub Kicinski } 79ad46061fSJakub Kicinski 8028fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr) 8128fbcfa0SAlexei Starovoitov { 82a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 831bc59756SRoman Gushchin int numa_node = bpf_map_attr_numa_node(attr); 84b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries; 85d79a3549SAndrii Nakryiko bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); 861bc59756SRoman Gushchin u64 array_size, mask64; 8728fbcfa0SAlexei Starovoitov struct bpf_array *array; 8828fbcfa0SAlexei Starovoitov 8928fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8); 9028fbcfa0SAlexei Starovoitov 91b2157399SAlexei Starovoitov max_entries = attr->max_entries; 92b2157399SAlexei Starovoitov 93bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has 94bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to 95bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space. 96bbeb6e43SDaniel Borkmann */ 97bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1); 98bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64; 99bbeb6e43SDaniel Borkmann mask64 -= 1; 100bbeb6e43SDaniel Borkmann 101bbeb6e43SDaniel Borkmann index_mask = mask64; 1022c78ee89SAlexei Starovoitov if (!bypass_spec_v1) { 103b2157399SAlexei Starovoitov /* round up array size to nearest power of 2, 104b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits 105b2157399SAlexei Starovoitov */ 106b2157399SAlexei Starovoitov max_entries = index_mask + 1; 107bbeb6e43SDaniel Borkmann /* Check for overflows. */ 108bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries) 109bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG); 110bbeb6e43SDaniel Borkmann } 111b2157399SAlexei Starovoitov 112a10423b8SAlexei Starovoitov array_size = sizeof(*array); 113fc970227SAndrii Nakryiko if (percpu) { 114b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *); 115fc970227SAndrii Nakryiko } else { 116fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and 117fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned 118fc970227SAndrii Nakryiko */ 119fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 120fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size); 121fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size); 122fc970227SAndrii Nakryiko } else { 123b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size; 124fc970227SAndrii Nakryiko } 125fc970227SAndrii Nakryiko } 126a10423b8SAlexei Starovoitov 12728fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */ 128fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 129fc970227SAndrii Nakryiko void *data; 130fc970227SAndrii Nakryiko 131fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 132fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node); 1331bc59756SRoman Gushchin if (!data) 134fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM); 135fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 136fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value); 137fc970227SAndrii Nakryiko } else { 13896eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node); 139fc970227SAndrii Nakryiko } 1401bc59756SRoman Gushchin if (!array) 14128fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM); 142b2157399SAlexei Starovoitov array->index_mask = index_mask; 1432c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1; 14428fbcfa0SAlexei Starovoitov 14528fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */ 14632852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr); 14728fbcfa0SAlexei Starovoitov array->elem_size = elem_size; 14828fbcfa0SAlexei Starovoitov 1499c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) { 150d407bd25SDaniel Borkmann bpf_map_area_free(array); 151a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM); 152a10423b8SAlexei Starovoitov } 153a10423b8SAlexei Starovoitov 15428fbcfa0SAlexei Starovoitov return &array->map; 15528fbcfa0SAlexei Starovoitov } 15628fbcfa0SAlexei Starovoitov 15787ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index) 15887ac0d60SAndrii Nakryiko { 15987ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * index; 16087ac0d60SAndrii Nakryiko } 16187ac0d60SAndrii Nakryiko 16228fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 16328fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key) 16428fbcfa0SAlexei Starovoitov { 16528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 16628fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 16728fbcfa0SAlexei Starovoitov 168a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 16928fbcfa0SAlexei Starovoitov return NULL; 17028fbcfa0SAlexei Starovoitov 17187ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * (index & array->index_mask); 17228fbcfa0SAlexei Starovoitov } 17328fbcfa0SAlexei Starovoitov 174d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 175d8eca5bbSDaniel Borkmann u32 off) 176d8eca5bbSDaniel Borkmann { 177d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 178d8eca5bbSDaniel Borkmann 179d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 180d8eca5bbSDaniel Borkmann return -ENOTSUPP; 181d8eca5bbSDaniel Borkmann if (off >= map->value_size) 182d8eca5bbSDaniel Borkmann return -EINVAL; 183d8eca5bbSDaniel Borkmann 184d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value; 185d8eca5bbSDaniel Borkmann return 0; 186d8eca5bbSDaniel Borkmann } 187d8eca5bbSDaniel Borkmann 188d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 189d8eca5bbSDaniel Borkmann u32 *off) 190d8eca5bbSDaniel Borkmann { 191d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 192d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value; 193d8eca5bbSDaniel Borkmann u64 range = array->elem_size; 194d8eca5bbSDaniel Borkmann 195d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 196d8eca5bbSDaniel Borkmann return -ENOTSUPP; 197d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range) 198d8eca5bbSDaniel Borkmann return -ENOENT; 199d8eca5bbSDaniel Borkmann 200d8eca5bbSDaniel Borkmann *off = imm - base; 201d8eca5bbSDaniel Borkmann return 0; 202d8eca5bbSDaniel Borkmann } 203d8eca5bbSDaniel Borkmann 20481ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 2054a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 20681ed18abSAlexei Starovoitov { 207b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 20881ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf; 209d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size; 21081ed18abSAlexei Starovoitov const int ret = BPF_REG_0; 21181ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1; 21281ed18abSAlexei Starovoitov const int index = BPF_REG_2; 21381ed18abSAlexei Starovoitov 2144a8f87e6SDaniel Borkmann if (map->map_flags & BPF_F_INNER_MAP) 2154a8f87e6SDaniel Borkmann return -EOPNOTSUPP; 2164a8f87e6SDaniel Borkmann 21781ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 21881ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 2192c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 220b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 221b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 222b2157399SAlexei Starovoitov } else { 223fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 224b2157399SAlexei Starovoitov } 225fad73a1aSMartin KaFai Lau 226fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) { 22781ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 22881ed18abSAlexei Starovoitov } else { 22981ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 23081ed18abSAlexei Starovoitov } 23181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 23281ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 23381ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0); 23481ed18abSAlexei Starovoitov return insn - insn_buf; 23581ed18abSAlexei Starovoitov } 23681ed18abSAlexei Starovoitov 237a10423b8SAlexei Starovoitov /* Called from eBPF program */ 238a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 239a10423b8SAlexei Starovoitov { 240a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 241a10423b8SAlexei Starovoitov u32 index = *(u32 *)key; 242a10423b8SAlexei Starovoitov 243a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 244a10423b8SAlexei Starovoitov return NULL; 245a10423b8SAlexei Starovoitov 246b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]); 247a10423b8SAlexei Starovoitov } 248a10423b8SAlexei Starovoitov 249db69718bSAndrii Nakryiko /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */ 250db69718bSAndrii Nakryiko static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 251db69718bSAndrii Nakryiko { 252db69718bSAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 253db69718bSAndrii Nakryiko struct bpf_insn *insn = insn_buf; 254db69718bSAndrii Nakryiko 255db69718bSAndrii Nakryiko if (!bpf_jit_supports_percpu_insn()) 256db69718bSAndrii Nakryiko return -EOPNOTSUPP; 257db69718bSAndrii Nakryiko 258db69718bSAndrii Nakryiko if (map->map_flags & BPF_F_INNER_MAP) 259db69718bSAndrii Nakryiko return -EOPNOTSUPP; 260db69718bSAndrii Nakryiko 261db69718bSAndrii Nakryiko BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); 262db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs)); 263db69718bSAndrii Nakryiko 264db69718bSAndrii Nakryiko *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0); 265db69718bSAndrii Nakryiko if (!map->bypass_spec_v1) { 266db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); 267db69718bSAndrii Nakryiko *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask); 268db69718bSAndrii Nakryiko } else { 269db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); 270db69718bSAndrii Nakryiko } 271db69718bSAndrii Nakryiko 272db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3); 273db69718bSAndrii Nakryiko *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1); 274db69718bSAndrii Nakryiko *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0); 275db69718bSAndrii Nakryiko *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); 276db69718bSAndrii Nakryiko *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 277db69718bSAndrii Nakryiko *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0); 278db69718bSAndrii Nakryiko return insn - insn_buf; 279db69718bSAndrii Nakryiko } 280db69718bSAndrii Nakryiko 28107343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 28207343110SFeng Zhou { 28307343110SFeng Zhou struct bpf_array *array = container_of(map, struct bpf_array, map); 28407343110SFeng Zhou u32 index = *(u32 *)key; 28507343110SFeng Zhou 28607343110SFeng Zhou if (cpu >= nr_cpu_ids) 28707343110SFeng Zhou return NULL; 28807343110SFeng Zhou 28907343110SFeng Zhou if (unlikely(index >= array->map.max_entries)) 29007343110SFeng Zhou return NULL; 29107343110SFeng Zhou 29207343110SFeng Zhou return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); 29307343110SFeng Zhou } 29407343110SFeng Zhou 29515a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 29615a07b33SAlexei Starovoitov { 29715a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 29815a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 29915a07b33SAlexei Starovoitov void __percpu *pptr; 30015a07b33SAlexei Starovoitov int cpu, off = 0; 30115a07b33SAlexei Starovoitov u32 size; 30215a07b33SAlexei Starovoitov 30315a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 30415a07b33SAlexei Starovoitov return -ENOENT; 30515a07b33SAlexei Starovoitov 30615a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only 30715a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas 30815a07b33SAlexei Starovoitov * will not leak any kernel data 30915a07b33SAlexei Starovoitov */ 310d937bc34SAndrii Nakryiko size = array->elem_size; 31115a07b33SAlexei Starovoitov rcu_read_lock(); 312b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 31315a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 3146df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 3156df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, value + off); 31615a07b33SAlexei Starovoitov off += size; 31715a07b33SAlexei Starovoitov } 31815a07b33SAlexei Starovoitov rcu_read_unlock(); 31915a07b33SAlexei Starovoitov return 0; 32015a07b33SAlexei Starovoitov } 32115a07b33SAlexei Starovoitov 32228fbcfa0SAlexei Starovoitov /* Called from syscall */ 32328fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 32428fbcfa0SAlexei Starovoitov { 32528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 3268fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX; 32728fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key; 32828fbcfa0SAlexei Starovoitov 32928fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) { 33028fbcfa0SAlexei Starovoitov *next = 0; 33128fbcfa0SAlexei Starovoitov return 0; 33228fbcfa0SAlexei Starovoitov } 33328fbcfa0SAlexei Starovoitov 33428fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1) 33528fbcfa0SAlexei Starovoitov return -ENOENT; 33628fbcfa0SAlexei Starovoitov 33728fbcfa0SAlexei Starovoitov *next = index + 1; 33828fbcfa0SAlexei Starovoitov return 0; 33928fbcfa0SAlexei Starovoitov } 34028fbcfa0SAlexei Starovoitov 34128fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 342d7ba4cc9SJP Kobryn static long array_map_update_elem(struct bpf_map *map, void *key, void *value, 34328fbcfa0SAlexei Starovoitov u64 map_flags) 34428fbcfa0SAlexei Starovoitov { 34528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 34628fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 34796049f3aSAlexei Starovoitov char *val; 34828fbcfa0SAlexei Starovoitov 34996049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 35028fbcfa0SAlexei Starovoitov /* unknown flags */ 35128fbcfa0SAlexei Starovoitov return -EINVAL; 35228fbcfa0SAlexei Starovoitov 353a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 35428fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 35528fbcfa0SAlexei Starovoitov return -E2BIG; 35628fbcfa0SAlexei Starovoitov 35796049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST)) 358daaf427cSAlexei Starovoitov /* all elements already exist */ 35928fbcfa0SAlexei Starovoitov return -EEXIST; 36028fbcfa0SAlexei Starovoitov 36196049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) && 362db559117SKumar Kartikeya Dwivedi !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 36396049f3aSAlexei Starovoitov return -EINVAL; 36496049f3aSAlexei Starovoitov 36596049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 3666df4ea1fSKumar Kartikeya Dwivedi val = this_cpu_ptr(array->pptrs[index & array->index_mask]); 3676df4ea1fSKumar Kartikeya Dwivedi copy_map_value(map, val, value); 368db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val); 36996049f3aSAlexei Starovoitov } else { 37096049f3aSAlexei Starovoitov val = array->value + 37187ac0d60SAndrii Nakryiko (u64)array->elem_size * (index & array->index_mask); 37296049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK) 37396049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false); 374a10423b8SAlexei Starovoitov else 37596049f3aSAlexei Starovoitov copy_map_value(map, val, value); 376db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val); 37796049f3aSAlexei Starovoitov } 37828fbcfa0SAlexei Starovoitov return 0; 37928fbcfa0SAlexei Starovoitov } 38028fbcfa0SAlexei Starovoitov 38115a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 38215a07b33SAlexei Starovoitov u64 map_flags) 38315a07b33SAlexei Starovoitov { 38415a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 38515a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 38615a07b33SAlexei Starovoitov void __percpu *pptr; 38715a07b33SAlexei Starovoitov int cpu, off = 0; 38815a07b33SAlexei Starovoitov u32 size; 38915a07b33SAlexei Starovoitov 39015a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 39115a07b33SAlexei Starovoitov /* unknown flags */ 39215a07b33SAlexei Starovoitov return -EINVAL; 39315a07b33SAlexei Starovoitov 39415a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 39515a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 39615a07b33SAlexei Starovoitov return -E2BIG; 39715a07b33SAlexei Starovoitov 39815a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 39915a07b33SAlexei Starovoitov /* all elements already exist */ 40015a07b33SAlexei Starovoitov return -EEXIST; 40115a07b33SAlexei Starovoitov 40215a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that 40315a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access 40415a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be 40515a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc, 40615a07b33SAlexei Starovoitov * so no kernel data leaks possible 40715a07b33SAlexei Starovoitov */ 408d937bc34SAndrii Nakryiko size = array->elem_size; 40915a07b33SAlexei Starovoitov rcu_read_lock(); 410b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 41115a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 4126df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); 413db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); 41415a07b33SAlexei Starovoitov off += size; 41515a07b33SAlexei Starovoitov } 41615a07b33SAlexei Starovoitov rcu_read_unlock(); 41715a07b33SAlexei Starovoitov return 0; 41815a07b33SAlexei Starovoitov } 41915a07b33SAlexei Starovoitov 42028fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 421d7ba4cc9SJP Kobryn static long array_map_delete_elem(struct bpf_map *map, void *key) 42228fbcfa0SAlexei Starovoitov { 42328fbcfa0SAlexei Starovoitov return -EINVAL; 42428fbcfa0SAlexei Starovoitov } 42528fbcfa0SAlexei Starovoitov 426fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array) 427fc970227SAndrii Nakryiko { 428fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE); 429fc970227SAndrii Nakryiko } 430fc970227SAndrii Nakryiko 431*246331e3SBenjamin Tissoires static void array_map_free_timers_wq(struct bpf_map *map) 43268134668SAlexei Starovoitov { 43368134668SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 43468134668SAlexei Starovoitov int i; 43568134668SAlexei Starovoitov 436*246331e3SBenjamin Tissoires /* We don't reset or free fields other than timer and workqueue 437*246331e3SBenjamin Tissoires * on uref dropping to zero. 438*246331e3SBenjamin Tissoires */ 439*246331e3SBenjamin Tissoires if (btf_record_has_field(map->record, BPF_TIMER)) 44068134668SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 441db559117SKumar Kartikeya Dwivedi bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); 442*246331e3SBenjamin Tissoires 443*246331e3SBenjamin Tissoires if (btf_record_has_field(map->record, BPF_WORKQUEUE)) 444*246331e3SBenjamin Tissoires for (i = 0; i < array->map.max_entries; i++) 445*246331e3SBenjamin Tissoires bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); 44668134668SAlexei Starovoitov } 44768134668SAlexei Starovoitov 44828fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 44928fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map) 45028fbcfa0SAlexei Starovoitov { 45128fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 45214a324f6SKumar Kartikeya Dwivedi int i; 45314a324f6SKumar Kartikeya Dwivedi 454aa3496acSKumar Kartikeya Dwivedi if (!IS_ERR_OR_NULL(map->record)) { 4556df4ea1fSKumar Kartikeya Dwivedi if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 4566df4ea1fSKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) { 4576df4ea1fSKumar Kartikeya Dwivedi void __percpu *pptr = array->pptrs[i & array->index_mask]; 4586df4ea1fSKumar Kartikeya Dwivedi int cpu; 4596df4ea1fSKumar Kartikeya Dwivedi 4606df4ea1fSKumar Kartikeya Dwivedi for_each_possible_cpu(cpu) { 461aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); 4626df4ea1fSKumar Kartikeya Dwivedi cond_resched(); 4636df4ea1fSKumar Kartikeya Dwivedi } 4646df4ea1fSKumar Kartikeya Dwivedi } 4656df4ea1fSKumar Kartikeya Dwivedi } else { 46614a324f6SKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) 467aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); 4686df4ea1fSKumar Kartikeya Dwivedi } 46914a324f6SKumar Kartikeya Dwivedi } 47028fbcfa0SAlexei Starovoitov 471a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 472a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 473a10423b8SAlexei Starovoitov 474fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE) 475fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array)); 476fc970227SAndrii Nakryiko else 477d407bd25SDaniel Borkmann bpf_map_area_free(array); 47828fbcfa0SAlexei Starovoitov } 47928fbcfa0SAlexei Starovoitov 480a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key, 481a26ca7c9SMartin KaFai Lau struct seq_file *m) 482a26ca7c9SMartin KaFai Lau { 483a26ca7c9SMartin KaFai Lau void *value; 484a26ca7c9SMartin KaFai Lau 485a26ca7c9SMartin KaFai Lau rcu_read_lock(); 486a26ca7c9SMartin KaFai Lau 487a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key); 488a26ca7c9SMartin KaFai Lau if (!value) { 489a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 490a26ca7c9SMartin KaFai Lau return; 491a26ca7c9SMartin KaFai Lau } 492a26ca7c9SMartin KaFai Lau 4932824ecb7SDaniel Borkmann if (map->btf_key_type_id) 494a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key); 4959b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 496a26ca7c9SMartin KaFai Lau seq_puts(m, "\n"); 497a26ca7c9SMartin KaFai Lau 498a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 499a26ca7c9SMartin KaFai Lau } 500a26ca7c9SMartin KaFai Lau 501c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 502c7b27c37SYonghong Song struct seq_file *m) 503c7b27c37SYonghong Song { 504c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map); 505c7b27c37SYonghong Song u32 index = *(u32 *)key; 506c7b27c37SYonghong Song void __percpu *pptr; 507c7b27c37SYonghong Song int cpu; 508c7b27c37SYonghong Song 509c7b27c37SYonghong Song rcu_read_lock(); 510c7b27c37SYonghong Song 511c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key); 512c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask]; 513c7b27c37SYonghong Song for_each_possible_cpu(cpu) { 514c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu); 515c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 516c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m); 517c7b27c37SYonghong Song seq_puts(m, "\n"); 518c7b27c37SYonghong Song } 519c7b27c37SYonghong Song seq_puts(m, "}\n"); 520c7b27c37SYonghong Song 521c7b27c37SYonghong Song rcu_read_unlock(); 522c7b27c37SYonghong Song } 523c7b27c37SYonghong Song 524e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map, 5251b2b234bSRoman Gushchin const struct btf *btf, 526e8d2bec0SDaniel Borkmann const struct btf_type *key_type, 527e8d2bec0SDaniel Borkmann const struct btf_type *value_type) 528a26ca7c9SMartin KaFai Lau { 529a26ca7c9SMartin KaFai Lau u32 int_data; 530a26ca7c9SMartin KaFai Lau 5312824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */ 5322824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) { 5332824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY || 5342824ecb7SDaniel Borkmann map->max_entries != 1) 5352824ecb7SDaniel Borkmann return -EINVAL; 5362824ecb7SDaniel Borkmann 5372824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 5382824ecb7SDaniel Borkmann return -EINVAL; 5392824ecb7SDaniel Borkmann 5402824ecb7SDaniel Borkmann return 0; 5412824ecb7SDaniel Borkmann } 5422824ecb7SDaniel Borkmann 543e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 544a26ca7c9SMartin KaFai Lau return -EINVAL; 545a26ca7c9SMartin KaFai Lau 546a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1); 547e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure 548e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create. 549a26ca7c9SMartin KaFai Lau */ 550e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 551a26ca7c9SMartin KaFai Lau return -EINVAL; 552a26ca7c9SMartin KaFai Lau 553a26ca7c9SMartin KaFai Lau return 0; 554a26ca7c9SMartin KaFai Lau } 555a26ca7c9SMartin KaFai Lau 556b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 557fc970227SAndrii Nakryiko { 558fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 559fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 560fc970227SAndrii Nakryiko 561fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE)) 562fc970227SAndrii Nakryiko return -EINVAL; 563fc970227SAndrii Nakryiko 564333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 565333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 566333291ceSAndrii Nakryiko return -EINVAL; 567333291ceSAndrii Nakryiko 568333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 569333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff); 570fc970227SAndrii Nakryiko } 571fc970227SAndrii Nakryiko 572134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0, 573134fede4SMartin KaFai Lau const struct bpf_map *meta1) 574134fede4SMartin KaFai Lau { 5754a8f87e6SDaniel Borkmann if (!bpf_map_meta_equal(meta0, meta1)) 5764a8f87e6SDaniel Borkmann return false; 5774a8f87e6SDaniel Borkmann return meta0->map_flags & BPF_F_INNER_MAP ? true : 5784a8f87e6SDaniel Borkmann meta0->max_entries == meta1->max_entries; 579134fede4SMartin KaFai Lau } 580134fede4SMartin KaFai Lau 581d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info { 582d3cc2ab5SYonghong Song struct bpf_map *map; 583d3cc2ab5SYonghong Song void *percpu_value_buf; 584d3cc2ab5SYonghong Song u32 index; 585d3cc2ab5SYonghong Song }; 586d3cc2ab5SYonghong Song 587d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 588d3cc2ab5SYonghong Song { 589d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 590d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 591d3cc2ab5SYonghong Song struct bpf_array *array; 592d3cc2ab5SYonghong Song u32 index; 593d3cc2ab5SYonghong Song 594d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 595d3cc2ab5SYonghong Song return NULL; 596d3cc2ab5SYonghong Song 597d3cc2ab5SYonghong Song if (*pos == 0) 598d3cc2ab5SYonghong Song ++*pos; 599d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 600d3cc2ab5SYonghong Song index = info->index & array->index_mask; 601d3cc2ab5SYonghong Song if (info->percpu_value_buf) 602d3cc2ab5SYonghong Song return array->pptrs[index]; 60387ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index); 604d3cc2ab5SYonghong Song } 605d3cc2ab5SYonghong Song 606d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 607d3cc2ab5SYonghong Song { 608d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 609d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 610d3cc2ab5SYonghong Song struct bpf_array *array; 611d3cc2ab5SYonghong Song u32 index; 612d3cc2ab5SYonghong Song 613d3cc2ab5SYonghong Song ++*pos; 614d3cc2ab5SYonghong Song ++info->index; 615d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 616d3cc2ab5SYonghong Song return NULL; 617d3cc2ab5SYonghong Song 618d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 619d3cc2ab5SYonghong Song index = info->index & array->index_mask; 620d3cc2ab5SYonghong Song if (info->percpu_value_buf) 621d3cc2ab5SYonghong Song return array->pptrs[index]; 62287ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index); 623d3cc2ab5SYonghong Song } 624d3cc2ab5SYonghong Song 625d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 626d3cc2ab5SYonghong Song { 627d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 628d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {}; 629d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 630d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 631d3cc2ab5SYonghong Song struct bpf_iter_meta meta; 632d3cc2ab5SYonghong Song struct bpf_prog *prog; 633d3cc2ab5SYonghong Song int off = 0, cpu = 0; 634d3cc2ab5SYonghong Song void __percpu **pptr; 635d3cc2ab5SYonghong Song u32 size; 636d3cc2ab5SYonghong Song 637d3cc2ab5SYonghong Song meta.seq = seq; 638d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL); 639d3cc2ab5SYonghong Song if (!prog) 640d3cc2ab5SYonghong Song return 0; 641d3cc2ab5SYonghong Song 642d3cc2ab5SYonghong Song ctx.meta = &meta; 643d3cc2ab5SYonghong Song ctx.map = info->map; 644d3cc2ab5SYonghong Song if (v) { 645d3cc2ab5SYonghong Song ctx.key = &info->index; 646d3cc2ab5SYonghong Song 647d3cc2ab5SYonghong Song if (!info->percpu_value_buf) { 648d3cc2ab5SYonghong Song ctx.value = v; 649d3cc2ab5SYonghong Song } else { 650d3cc2ab5SYonghong Song pptr = v; 651d937bc34SAndrii Nakryiko size = array->elem_size; 652d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) { 6536df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, info->percpu_value_buf + off, 6546df4ea1fSKumar Kartikeya Dwivedi per_cpu_ptr(pptr, cpu)); 6556df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, info->percpu_value_buf + off); 656d3cc2ab5SYonghong Song off += size; 657d3cc2ab5SYonghong Song } 658d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf; 659d3cc2ab5SYonghong Song } 660d3cc2ab5SYonghong Song } 661d3cc2ab5SYonghong Song 662d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx); 663d3cc2ab5SYonghong Song } 664d3cc2ab5SYonghong Song 665d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 666d3cc2ab5SYonghong Song { 667d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v); 668d3cc2ab5SYonghong Song } 669d3cc2ab5SYonghong Song 670d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 671d3cc2ab5SYonghong Song { 672d3cc2ab5SYonghong Song if (!v) 673d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL); 674d3cc2ab5SYonghong Song } 675d3cc2ab5SYonghong Song 676d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data, 677d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux) 678d3cc2ab5SYonghong Song { 679d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 680d3cc2ab5SYonghong Song struct bpf_map *map = aux->map; 681d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 682d3cc2ab5SYonghong Song void *value_buf; 683d3cc2ab5SYonghong Song u32 buf_size; 684d3cc2ab5SYonghong Song 685d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 686d937bc34SAndrii Nakryiko buf_size = array->elem_size * num_possible_cpus(); 687d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 688d3cc2ab5SYonghong Song if (!value_buf) 689d3cc2ab5SYonghong Song return -ENOMEM; 690d3cc2ab5SYonghong Song 691d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf; 692d3cc2ab5SYonghong Song } 693d3cc2ab5SYonghong Song 694f76fa6b3SHou Tao /* bpf_iter_attach_map() acquires a map uref, and the uref may be 695f76fa6b3SHou Tao * released before or in the middle of iterating map elements, so 696f76fa6b3SHou Tao * acquire an extra map uref for iterator. 697f76fa6b3SHou Tao */ 698f76fa6b3SHou Tao bpf_map_inc_with_uref(map); 699d3cc2ab5SYonghong Song seq_info->map = map; 700d3cc2ab5SYonghong Song return 0; 701d3cc2ab5SYonghong Song } 702d3cc2ab5SYonghong Song 703d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data) 704d3cc2ab5SYonghong Song { 705d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 706d3cc2ab5SYonghong Song 707f76fa6b3SHou Tao bpf_map_put_with_uref(seq_info->map); 708d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf); 709d3cc2ab5SYonghong Song } 710d3cc2ab5SYonghong Song 711d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = { 712d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start, 713d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next, 714d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop, 715d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show, 716d3cc2ab5SYonghong Song }; 717d3cc2ab5SYonghong Song 718d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = { 719d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops, 720d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map, 721d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map, 722d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 723d3cc2ab5SYonghong Song }; 724d3cc2ab5SYonghong Song 725d7ba4cc9SJP Kobryn static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, 72606dcdcd4SYonghong Song void *callback_ctx, u64 flags) 72706dcdcd4SYonghong Song { 72806dcdcd4SYonghong Song u32 i, key, num_elems = 0; 72906dcdcd4SYonghong Song struct bpf_array *array; 73006dcdcd4SYonghong Song bool is_percpu; 73106dcdcd4SYonghong Song u64 ret = 0; 73206dcdcd4SYonghong Song void *val; 73306dcdcd4SYonghong Song 73406dcdcd4SYonghong Song if (flags != 0) 73506dcdcd4SYonghong Song return -EINVAL; 73606dcdcd4SYonghong Song 73706dcdcd4SYonghong Song is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 73806dcdcd4SYonghong Song array = container_of(map, struct bpf_array, map); 73906dcdcd4SYonghong Song if (is_percpu) 74006dcdcd4SYonghong Song migrate_disable(); 74106dcdcd4SYonghong Song for (i = 0; i < map->max_entries; i++) { 74206dcdcd4SYonghong Song if (is_percpu) 74306dcdcd4SYonghong Song val = this_cpu_ptr(array->pptrs[i]); 74406dcdcd4SYonghong Song else 74587ac0d60SAndrii Nakryiko val = array_map_elem_ptr(array, i); 74606dcdcd4SYonghong Song num_elems++; 74706dcdcd4SYonghong Song key = i; 748102acbacSKees Cook ret = callback_fn((u64)(long)map, (u64)(long)&key, 749102acbacSKees Cook (u64)(long)val, (u64)(long)callback_ctx, 0); 75006dcdcd4SYonghong Song /* return value: 0 - continue, 1 - stop and return */ 75106dcdcd4SYonghong Song if (ret) 75206dcdcd4SYonghong Song break; 75306dcdcd4SYonghong Song } 75406dcdcd4SYonghong Song 75506dcdcd4SYonghong Song if (is_percpu) 75606dcdcd4SYonghong Song migrate_enable(); 75706dcdcd4SYonghong Song return num_elems; 75806dcdcd4SYonghong Song } 75906dcdcd4SYonghong Song 7601746d055SYafang Shao static u64 array_map_mem_usage(const struct bpf_map *map) 7611746d055SYafang Shao { 7621746d055SYafang Shao struct bpf_array *array = container_of(map, struct bpf_array, map); 7631746d055SYafang Shao bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 7641746d055SYafang Shao u32 elem_size = array->elem_size; 7651746d055SYafang Shao u64 entries = map->max_entries; 7661746d055SYafang Shao u64 usage = sizeof(*array); 7671746d055SYafang Shao 7681746d055SYafang Shao if (percpu) { 7691746d055SYafang Shao usage += entries * sizeof(void *); 7701746d055SYafang Shao usage += entries * elem_size * num_possible_cpus(); 7711746d055SYafang Shao } else { 7721746d055SYafang Shao if (map->map_flags & BPF_F_MMAPABLE) { 7731746d055SYafang Shao usage = PAGE_ALIGN(usage); 7741746d055SYafang Shao usage += PAGE_ALIGN(entries * elem_size); 7751746d055SYafang Shao } else { 7761746d055SYafang Shao usage += entries * elem_size; 7771746d055SYafang Shao } 7781746d055SYafang Shao } 7791746d055SYafang Shao return usage; 7801746d055SYafang Shao } 7811746d055SYafang Shao 782c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) 78340077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = { 784134fede4SMartin KaFai Lau .map_meta_equal = array_map_meta_equal, 785ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 78628fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc, 78728fbcfa0SAlexei Starovoitov .map_free = array_map_free, 78828fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 789*246331e3SBenjamin Tissoires .map_release_uref = array_map_free_timers_wq, 79028fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem, 79128fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem, 79228fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 79381ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup, 794d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr, 795d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta, 796fc970227SAndrii Nakryiko .map_mmap = array_map_mmap, 797a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem, 798a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf, 799c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch, 800c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch, 80106dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args, 80206dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem, 8031746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 804c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 805d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 80628fbcfa0SAlexei Starovoitov }; 80728fbcfa0SAlexei Starovoitov 80840077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = { 809f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 810ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 811a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc, 812a10423b8SAlexei Starovoitov .map_free = array_map_free, 813a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 814a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem, 815db69718bSAndrii Nakryiko .map_gen_lookup = percpu_array_map_gen_lookup, 816a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem, 817a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 81807343110SFeng Zhou .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, 819c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem, 820e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf, 821f008d732SPedro Tammela .map_lookup_batch = generic_map_lookup_batch, 822f008d732SPedro Tammela .map_update_batch = generic_map_update_batch, 82306dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args, 82406dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem, 8251746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 826c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 827d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 828a10423b8SAlexei Starovoitov }; 829a10423b8SAlexei Starovoitov 830ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr) 83104fd61abSAlexei Starovoitov { 8322a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */ 83304fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32)) 834ad46061fSJakub Kicinski return -EINVAL; 835591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */ 836591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 837591fe988SDaniel Borkmann return -EINVAL; 838ad46061fSJakub Kicinski return array_map_alloc_check(attr); 83904fd61abSAlexei Starovoitov } 84004fd61abSAlexei Starovoitov 8412a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map) 84204fd61abSAlexei Starovoitov { 84304fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 84404fd61abSAlexei Starovoitov int i; 84504fd61abSAlexei Starovoitov 84604fd61abSAlexei Starovoitov /* make sure it's empty */ 84704fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 8482a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL); 849d407bd25SDaniel Borkmann 850d407bd25SDaniel Borkmann bpf_map_area_free(array); 85104fd61abSAlexei Starovoitov } 85204fd61abSAlexei Starovoitov 8532a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 85404fd61abSAlexei Starovoitov { 8553b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP); 85604fd61abSAlexei Starovoitov } 85704fd61abSAlexei Starovoitov 85804fd61abSAlexei Starovoitov /* only called from syscall */ 85914dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 86014dc6f04SMartin KaFai Lau { 86114dc6f04SMartin KaFai Lau void **elem, *ptr; 86214dc6f04SMartin KaFai Lau int ret = 0; 86314dc6f04SMartin KaFai Lau 86414dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem) 86514dc6f04SMartin KaFai Lau return -ENOTSUPP; 86614dc6f04SMartin KaFai Lau 86714dc6f04SMartin KaFai Lau rcu_read_lock(); 86814dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key); 86914dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem))) 87014dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr); 87114dc6f04SMartin KaFai Lau else 87214dc6f04SMartin KaFai Lau ret = -ENOENT; 87314dc6f04SMartin KaFai Lau rcu_read_unlock(); 87414dc6f04SMartin KaFai Lau 87514dc6f04SMartin KaFai Lau return ret; 87614dc6f04SMartin KaFai Lau } 87714dc6f04SMartin KaFai Lau 87814dc6f04SMartin KaFai Lau /* only called from syscall */ 879d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 880d056a788SDaniel Borkmann void *key, void *value, u64 map_flags) 88104fd61abSAlexei Starovoitov { 88204fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 8832a36f0b9SWang Nan void *new_ptr, *old_ptr; 88404fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd; 88504fd61abSAlexei Starovoitov 88604fd61abSAlexei Starovoitov if (map_flags != BPF_ANY) 88704fd61abSAlexei Starovoitov return -EINVAL; 88804fd61abSAlexei Starovoitov 88904fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 89004fd61abSAlexei Starovoitov return -E2BIG; 89104fd61abSAlexei Starovoitov 89204fd61abSAlexei Starovoitov ufd = *(u32 *)value; 893d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 8942a36f0b9SWang Nan if (IS_ERR(new_ptr)) 8952a36f0b9SWang Nan return PTR_ERR(new_ptr); 89604fd61abSAlexei Starovoitov 897da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 898da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 8992a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr); 900da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr); 901da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 902da765a2fSDaniel Borkmann } else { 903da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr); 904da765a2fSDaniel Borkmann } 905da765a2fSDaniel Borkmann 9062a36f0b9SWang Nan if (old_ptr) 90720c20bd1SHou Tao map->ops->map_fd_put_ptr(map, old_ptr, true); 90804fd61abSAlexei Starovoitov return 0; 90904fd61abSAlexei Starovoitov } 91004fd61abSAlexei Starovoitov 91179d93b3cSHou Tao static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) 91204fd61abSAlexei Starovoitov { 91304fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 9142a36f0b9SWang Nan void *old_ptr; 91504fd61abSAlexei Starovoitov u32 index = *(u32 *)key; 91604fd61abSAlexei Starovoitov 91704fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 91804fd61abSAlexei Starovoitov return -E2BIG; 91904fd61abSAlexei Starovoitov 920da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 921da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 9222a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL); 923da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL); 924da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 925da765a2fSDaniel Borkmann } else { 926da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL); 927da765a2fSDaniel Borkmann } 928da765a2fSDaniel Borkmann 9292a36f0b9SWang Nan if (old_ptr) { 93079d93b3cSHou Tao map->ops->map_fd_put_ptr(map, old_ptr, need_defer); 93104fd61abSAlexei Starovoitov return 0; 93204fd61abSAlexei Starovoitov } else { 93304fd61abSAlexei Starovoitov return -ENOENT; 93404fd61abSAlexei Starovoitov } 93504fd61abSAlexei Starovoitov } 93604fd61abSAlexei Starovoitov 93779d93b3cSHou Tao static long fd_array_map_delete_elem(struct bpf_map *map, void *key) 93879d93b3cSHou Tao { 93979d93b3cSHou Tao return __fd_array_map_delete_elem(map, key, true); 94079d93b3cSHou Tao } 94179d93b3cSHou Tao 942d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map, 943d056a788SDaniel Borkmann struct file *map_file, int fd) 9442a36f0b9SWang Nan { 9452a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd); 946d056a788SDaniel Borkmann 9472a36f0b9SWang Nan if (IS_ERR(prog)) 9482a36f0b9SWang Nan return prog; 9492a36f0b9SWang Nan 950f45d5b6cSToke Hoiland-Jorgensen if (!bpf_prog_map_compatible(map, prog)) { 9512a36f0b9SWang Nan bpf_prog_put(prog); 9522a36f0b9SWang Nan return ERR_PTR(-EINVAL); 9532a36f0b9SWang Nan } 954d056a788SDaniel Borkmann 9552a36f0b9SWang Nan return prog; 9562a36f0b9SWang Nan } 9572a36f0b9SWang Nan 95820c20bd1SHou Tao static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 9592a36f0b9SWang Nan { 96020c20bd1SHou Tao /* bpf_prog is freed after one RCU or tasks trace grace period */ 9611aacde3dSDaniel Borkmann bpf_prog_put(ptr); 9622a36f0b9SWang Nan } 9632a36f0b9SWang Nan 96414dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr) 96514dc6f04SMartin KaFai Lau { 96614dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id; 96714dc6f04SMartin KaFai Lau } 96814dc6f04SMartin KaFai Lau 96904fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */ 97079d93b3cSHou Tao static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) 97104fd61abSAlexei Starovoitov { 97204fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 97304fd61abSAlexei Starovoitov int i; 97404fd61abSAlexei Starovoitov 97504fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 97679d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, need_defer); 97704fd61abSAlexei Starovoitov } 97804fd61abSAlexei Starovoitov 979a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 980a7c19db3SYonghong Song struct seq_file *m) 981a7c19db3SYonghong Song { 982a7c19db3SYonghong Song void **elem, *ptr; 983a7c19db3SYonghong Song u32 prog_id; 984a7c19db3SYonghong Song 985a7c19db3SYonghong Song rcu_read_lock(); 986a7c19db3SYonghong Song 987a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key); 988a7c19db3SYonghong Song if (elem) { 989a7c19db3SYonghong Song ptr = READ_ONCE(*elem); 990a7c19db3SYonghong Song if (ptr) { 991a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key); 992a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr); 993a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 994a7c19db3SYonghong Song &prog_id, m); 995a7c19db3SYonghong Song seq_puts(m, "\n"); 996a7c19db3SYonghong Song } 997a7c19db3SYonghong Song } 998a7c19db3SYonghong Song 999a7c19db3SYonghong Song rcu_read_unlock(); 1000a7c19db3SYonghong Song } 1001a7c19db3SYonghong Song 1002da765a2fSDaniel Borkmann struct prog_poke_elem { 1003da765a2fSDaniel Borkmann struct list_head list; 1004da765a2fSDaniel Borkmann struct bpf_prog_aux *aux; 1005da765a2fSDaniel Borkmann }; 1006da765a2fSDaniel Borkmann 1007da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map, 1008da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 1009da765a2fSDaniel Borkmann { 1010da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 1011da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 1012da765a2fSDaniel Borkmann int ret = 0; 1013da765a2fSDaniel Borkmann 1014da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1015da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 1016da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 1017da765a2fSDaniel Borkmann if (elem->aux == prog_aux) 1018da765a2fSDaniel Borkmann goto out; 1019da765a2fSDaniel Borkmann } 1020da765a2fSDaniel Borkmann 1021da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL); 1022da765a2fSDaniel Borkmann if (!elem) { 1023da765a2fSDaniel Borkmann ret = -ENOMEM; 1024da765a2fSDaniel Borkmann goto out; 1025da765a2fSDaniel Borkmann } 1026da765a2fSDaniel Borkmann 1027da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list); 1028da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time 1029da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see 1030da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run(). 1031da765a2fSDaniel Borkmann */ 1032da765a2fSDaniel Borkmann elem->aux = prog_aux; 1033da765a2fSDaniel Borkmann 1034da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs); 1035da765a2fSDaniel Borkmann out: 1036da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 1037da765a2fSDaniel Borkmann return ret; 1038da765a2fSDaniel Borkmann } 1039da765a2fSDaniel Borkmann 1040da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map, 1041da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 1042da765a2fSDaniel Borkmann { 1043da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 1044da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 1045da765a2fSDaniel Borkmann 1046da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1047da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 1048da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1049da765a2fSDaniel Borkmann if (elem->aux == prog_aux) { 1050da765a2fSDaniel Borkmann list_del_init(&elem->list); 1051da765a2fSDaniel Borkmann kfree(elem); 1052da765a2fSDaniel Borkmann break; 1053da765a2fSDaniel Borkmann } 1054da765a2fSDaniel Borkmann } 1055da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 1056da765a2fSDaniel Borkmann } 1057da765a2fSDaniel Borkmann 10584b7de801SJiri Olsa void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, 10594b7de801SJiri Olsa struct bpf_prog *new, struct bpf_prog *old) 10604b7de801SJiri Olsa { 10614b7de801SJiri Olsa WARN_ON_ONCE(1); 10624b7de801SJiri Olsa } 10634b7de801SJiri Olsa 1064da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 1065da765a2fSDaniel Borkmann struct bpf_prog *old, 1066da765a2fSDaniel Borkmann struct bpf_prog *new) 1067da765a2fSDaniel Borkmann { 1068da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 1069da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 1070da765a2fSDaniel Borkmann 1071da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1072da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 1073da765a2fSDaniel Borkmann 1074da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 1075da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 10764b7de801SJiri Olsa int i; 1077da765a2fSDaniel Borkmann 1078da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) { 1079da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i]; 1080da765a2fSDaniel Borkmann 1081da765a2fSDaniel Borkmann /* Few things to be aware of: 1082da765a2fSDaniel Borkmann * 1083da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but 1084da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and 1085da765a2fSDaniel Borkmann * there could be danger of use after free otherwise. 1086da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program 1087da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms 1088cf71b174SMaciej Fijalkowski * entry. We skip these as poke->tailcall_target_stable 1089cf71b174SMaciej Fijalkowski * is not active yet. The JIT will do the final fixup 1090cf71b174SMaciej Fijalkowski * before setting it stable. The various 1091cf71b174SMaciej Fijalkowski * poke->tailcall_target_stable are successively 1092cf71b174SMaciej Fijalkowski * activated, so tail call updates can arrive from here 1093cf71b174SMaciej Fijalkowski * while JIT is still finishing its final fixup for 1094cf71b174SMaciej Fijalkowski * non-activated poke entries. 10954b7de801SJiri Olsa * 3) Also programs reaching refcount of zero while patching 1096da765a2fSDaniel Borkmann * is in progress is okay since we're protected under 1097da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT 10984b7de801SJiri Olsa * buffer is freed. 1099da765a2fSDaniel Borkmann */ 1100cf71b174SMaciej Fijalkowski if (!READ_ONCE(poke->tailcall_target_stable)) 1101da765a2fSDaniel Borkmann continue; 1102da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 1103da765a2fSDaniel Borkmann continue; 1104da765a2fSDaniel Borkmann if (poke->tail_call.map != map || 1105da765a2fSDaniel Borkmann poke->tail_call.key != key) 1106da765a2fSDaniel Borkmann continue; 1107da765a2fSDaniel Borkmann 11084b7de801SJiri Olsa bpf_arch_poke_desc_update(poke, new, old); 1109da765a2fSDaniel Borkmann } 1110da765a2fSDaniel Borkmann } 1111da765a2fSDaniel Borkmann } 1112da765a2fSDaniel Borkmann 1113da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work) 1114da765a2fSDaniel Borkmann { 1115da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux, 1116da765a2fSDaniel Borkmann work)->map; 111779d93b3cSHou Tao bpf_fd_array_map_clear(map, true); 1118da765a2fSDaniel Borkmann bpf_map_put(map); 1119da765a2fSDaniel Borkmann } 1120da765a2fSDaniel Borkmann 1121da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map) 1122da765a2fSDaniel Borkmann { 1123da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array, 1124da765a2fSDaniel Borkmann map)->aux; 1125da765a2fSDaniel Borkmann bpf_map_inc(map); 1126da765a2fSDaniel Borkmann schedule_work(&aux->work); 1127da765a2fSDaniel Borkmann } 1128da765a2fSDaniel Borkmann 11292beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 11302beee5f5SDaniel Borkmann { 11312beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 11322beee5f5SDaniel Borkmann struct bpf_map *map; 11332beee5f5SDaniel Borkmann 11346d192c79SRoman Gushchin aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); 11352beee5f5SDaniel Borkmann if (!aux) 11362beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM); 11372beee5f5SDaniel Borkmann 1138da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1139da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs); 1140da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex); 1141da765a2fSDaniel Borkmann 11422beee5f5SDaniel Borkmann map = array_map_alloc(attr); 11432beee5f5SDaniel Borkmann if (IS_ERR(map)) { 11442beee5f5SDaniel Borkmann kfree(aux); 11452beee5f5SDaniel Borkmann return map; 11462beee5f5SDaniel Borkmann } 11472beee5f5SDaniel Borkmann 11482beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux; 1149da765a2fSDaniel Borkmann aux->map = map; 1150da765a2fSDaniel Borkmann 11512beee5f5SDaniel Borkmann return map; 11522beee5f5SDaniel Borkmann } 11532beee5f5SDaniel Borkmann 11542beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map) 11552beee5f5SDaniel Borkmann { 1156da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 11572beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 11582beee5f5SDaniel Borkmann 11592beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1160da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1161da765a2fSDaniel Borkmann list_del_init(&elem->list); 1162da765a2fSDaniel Borkmann kfree(elem); 1163da765a2fSDaniel Borkmann } 11642beee5f5SDaniel Borkmann kfree(aux); 11652beee5f5SDaniel Borkmann fd_array_map_free(map); 11662beee5f5SDaniel Borkmann } 11672beee5f5SDaniel Borkmann 1168f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding. 1169f4d05259SMartin KaFai Lau * Doing static check alone in the verifier is not enough. 1170f4d05259SMartin KaFai Lau * Thus, prog_array_map cannot be used as an inner_map 1171f4d05259SMartin KaFai Lau * and map_meta_equal is not implemented. 1172f4d05259SMartin KaFai Lau */ 117340077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = { 1174ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 11752beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc, 11762beee5f5SDaniel Borkmann .map_free = prog_array_map_free, 1177da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track, 1178da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack, 1179da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run, 118004fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 11812a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem, 11822a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem, 11832a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr, 11842a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr, 118514dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1186da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear, 1187a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem, 11881746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1189c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 119004fd61abSAlexei Starovoitov }; 119104fd61abSAlexei Starovoitov 11923b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 11933b1efb19SDaniel Borkmann struct file *map_file) 1194ea317b26SKaixu Xia { 11953b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 11963b1efb19SDaniel Borkmann 1197dc685409SHou Tao ee = kzalloc(sizeof(*ee), GFP_KERNEL); 11983b1efb19SDaniel Borkmann if (ee) { 11993b1efb19SDaniel Borkmann ee->event = perf_file->private_data; 12003b1efb19SDaniel Borkmann ee->perf_file = perf_file; 12013b1efb19SDaniel Borkmann ee->map_file = map_file; 12023b1efb19SDaniel Borkmann } 12033b1efb19SDaniel Borkmann 12043b1efb19SDaniel Borkmann return ee; 12053b1efb19SDaniel Borkmann } 12063b1efb19SDaniel Borkmann 12073b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu) 12083b1efb19SDaniel Borkmann { 12093b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12103b1efb19SDaniel Borkmann 12113b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu); 12123b1efb19SDaniel Borkmann fput(ee->perf_file); 12133b1efb19SDaniel Borkmann kfree(ee); 12143b1efb19SDaniel Borkmann } 12153b1efb19SDaniel Borkmann 12163b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 12173b1efb19SDaniel Borkmann { 12183b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free); 1219ea317b26SKaixu Xia } 1220ea317b26SKaixu Xia 1221d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1222d056a788SDaniel Borkmann struct file *map_file, int fd) 1223ea317b26SKaixu Xia { 12243b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12253b1efb19SDaniel Borkmann struct perf_event *event; 12263b1efb19SDaniel Borkmann struct file *perf_file; 1227f91840a3SAlexei Starovoitov u64 value; 1228ea317b26SKaixu Xia 12293b1efb19SDaniel Borkmann perf_file = perf_event_get(fd); 12303b1efb19SDaniel Borkmann if (IS_ERR(perf_file)) 12313b1efb19SDaniel Borkmann return perf_file; 1232e03e7ee3SAlexei Starovoitov 1233f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP); 12343b1efb19SDaniel Borkmann event = perf_file->private_data; 123597562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 12363b1efb19SDaniel Borkmann goto err_out; 1237ea317b26SKaixu Xia 12383b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file); 12393b1efb19SDaniel Borkmann if (ee) 12403b1efb19SDaniel Borkmann return ee; 12413b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM); 12423b1efb19SDaniel Borkmann err_out: 12433b1efb19SDaniel Borkmann fput(perf_file); 12443b1efb19SDaniel Borkmann return ee; 1245ea317b26SKaixu Xia } 1246ea317b26SKaixu Xia 124720c20bd1SHou Tao static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1248ea317b26SKaixu Xia { 124920c20bd1SHou Tao /* bpf_perf_event is freed after one RCU grace period */ 12503b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr); 12513b1efb19SDaniel Borkmann } 12523b1efb19SDaniel Borkmann 12533b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map, 12543b1efb19SDaniel Borkmann struct file *map_file) 12553b1efb19SDaniel Borkmann { 12563b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 12573b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12583b1efb19SDaniel Borkmann int i; 12593b1efb19SDaniel Borkmann 1260792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1261792cacccSSong Liu return; 1262792cacccSSong Liu 12633b1efb19SDaniel Borkmann rcu_read_lock(); 12643b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) { 12653b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]); 12663b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file) 126779d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, true); 12683b1efb19SDaniel Borkmann } 12693b1efb19SDaniel Borkmann rcu_read_unlock(); 1270ea317b26SKaixu Xia } 1271ea317b26SKaixu Xia 1272792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map) 1273792cacccSSong Liu { 1274792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS) 127579d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 1276792cacccSSong Liu fd_array_map_free(map); 1277792cacccSSong Liu } 1278792cacccSSong Liu 127940077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = { 1280f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1281ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1282ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 1283792cacccSSong Liu .map_free = perf_event_fd_array_map_free, 1284ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key, 1285ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem, 1286ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem, 1287ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1288ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr, 12893b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release, 1290e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 12911746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1292c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 1293ea317b26SKaixu Xia }; 1294ea317b26SKaixu Xia 129560d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS 12964ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 12974ed8ec52SMartin KaFai Lau struct file *map_file /* not used */, 12984ed8ec52SMartin KaFai Lau int fd) 12994ed8ec52SMartin KaFai Lau { 13004ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd); 13014ed8ec52SMartin KaFai Lau } 13024ed8ec52SMartin KaFai Lau 130320c20bd1SHou Tao static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 13044ed8ec52SMartin KaFai Lau { 13054ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */ 13064ed8ec52SMartin KaFai Lau cgroup_put(ptr); 13074ed8ec52SMartin KaFai Lau } 13084ed8ec52SMartin KaFai Lau 13094ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map) 13104ed8ec52SMartin KaFai Lau { 131179d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 13124ed8ec52SMartin KaFai Lau fd_array_map_free(map); 13134ed8ec52SMartin KaFai Lau } 13144ed8ec52SMartin KaFai Lau 131540077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = { 1316f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1317ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1318ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 13194ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free, 13204ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 13214ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem, 13224ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 13234ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr, 13244ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1325e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 13261746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1327c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 13284ed8ec52SMartin KaFai Lau }; 13294ed8ec52SMartin KaFai Lau #endif 133056f668dfSMartin KaFai Lau 133156f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 133256f668dfSMartin KaFai Lau { 133356f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta; 133456f668dfSMartin KaFai Lau 133556f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 133656f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta)) 133756f668dfSMartin KaFai Lau return inner_map_meta; 133856f668dfSMartin KaFai Lau 1339ad46061fSJakub Kicinski map = array_map_alloc(attr); 134056f668dfSMartin KaFai Lau if (IS_ERR(map)) { 134156f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta); 134256f668dfSMartin KaFai Lau return map; 134356f668dfSMartin KaFai Lau } 134456f668dfSMartin KaFai Lau 134556f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta; 134656f668dfSMartin KaFai Lau 134756f668dfSMartin KaFai Lau return map; 134856f668dfSMartin KaFai Lau } 134956f668dfSMartin KaFai Lau 135056f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map) 135156f668dfSMartin KaFai Lau { 135256f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which 135356f668dfSMartin KaFai Lau * is protected by fdget/fdput. 135456f668dfSMartin KaFai Lau */ 135556f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta); 135679d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 135756f668dfSMartin KaFai Lau fd_array_map_free(map); 135856f668dfSMartin KaFai Lau } 135956f668dfSMartin KaFai Lau 136056f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 136156f668dfSMartin KaFai Lau { 136256f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key); 136356f668dfSMartin KaFai Lau 136456f668dfSMartin KaFai Lau if (!inner_map) 136556f668dfSMartin KaFai Lau return NULL; 136656f668dfSMartin KaFai Lau 136756f668dfSMartin KaFai Lau return READ_ONCE(*inner_map); 136856f668dfSMartin KaFai Lau } 136956f668dfSMartin KaFai Lau 13704a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map, 13717b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf) 13727b0c2a05SDaniel Borkmann { 1373b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 1374d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size; 13757b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf; 13767b0c2a05SDaniel Borkmann const int ret = BPF_REG_0; 13777b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1; 13787b0c2a05SDaniel Borkmann const int index = BPF_REG_2; 13797b0c2a05SDaniel Borkmann 13807b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 13817b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 13822c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 1383b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1384b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1385b2157399SAlexei Starovoitov } else { 13867b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1387b2157399SAlexei Starovoitov } 13887b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size)) 13897b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 13907b0c2a05SDaniel Borkmann else 13917b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 13927b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 13937b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 13947b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 13957b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 13967b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0); 13977b0c2a05SDaniel Borkmann 13987b0c2a05SDaniel Borkmann return insn - insn_buf; 13997b0c2a05SDaniel Borkmann } 14007b0c2a05SDaniel Borkmann 140140077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = { 1402ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 140356f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc, 140456f668dfSMartin KaFai Lau .map_free = array_of_map_free, 140556f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 140656f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem, 140756f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 140856f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr, 140956f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr, 141014dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 14117b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup, 14129263dddcSTakshak Chahande .map_lookup_batch = generic_map_lookup_batch, 14139263dddcSTakshak Chahande .map_update_batch = generic_map_update_batch, 1414e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 14151746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1416c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 141756f668dfSMartin KaFai Lau }; 1418