15b497af4SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 228fbcfa0SAlexei Starovoitov /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 381ed18abSAlexei Starovoitov * Copyright (c) 2016,2017 Facebook 428fbcfa0SAlexei Starovoitov */ 528fbcfa0SAlexei Starovoitov #include <linux/bpf.h> 6a26ca7c9SMartin KaFai Lau #include <linux/btf.h> 728fbcfa0SAlexei Starovoitov #include <linux/err.h> 828fbcfa0SAlexei Starovoitov #include <linux/slab.h> 928fbcfa0SAlexei Starovoitov #include <linux/mm.h> 1004fd61abSAlexei Starovoitov #include <linux/filter.h> 110cdf5640SDaniel Borkmann #include <linux/perf_event.h> 12a26ca7c9SMartin KaFai Lau #include <uapi/linux/btf.h> 131e6c62a8SAlexei Starovoitov #include <linux/rcupdate_trace.h> 14c317ab71SMenglong Dong #include <linux/btf_ids.h> 1528fbcfa0SAlexei Starovoitov 1656f668dfSMartin KaFai Lau #include "map_in_map.h" 1756f668dfSMartin KaFai Lau 186e71b04aSChenbo Feng #define ARRAY_CREATE_FLAG_MASK \ 19792cacccSSong Liu (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ 204a8f87e6SDaniel Borkmann BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) 216e71b04aSChenbo Feng 22a10423b8SAlexei Starovoitov static void bpf_array_free_percpu(struct bpf_array *array) 23a10423b8SAlexei Starovoitov { 24a10423b8SAlexei Starovoitov int i; 25a10423b8SAlexei Starovoitov 2632fff239SEric Dumazet for (i = 0; i < array->map.max_entries; i++) { 27a10423b8SAlexei Starovoitov free_percpu(array->pptrs[i]); 2832fff239SEric Dumazet cond_resched(); 2932fff239SEric Dumazet } 30a10423b8SAlexei Starovoitov } 31a10423b8SAlexei Starovoitov 32a10423b8SAlexei Starovoitov static int bpf_array_alloc_percpu(struct bpf_array *array) 33a10423b8SAlexei Starovoitov { 34a10423b8SAlexei Starovoitov void __percpu *ptr; 35a10423b8SAlexei Starovoitov int i; 36a10423b8SAlexei Starovoitov 37a10423b8SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) { 386d192c79SRoman Gushchin ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, 39a10423b8SAlexei Starovoitov GFP_USER | __GFP_NOWARN); 40a10423b8SAlexei Starovoitov if (!ptr) { 41a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 42a10423b8SAlexei Starovoitov return -ENOMEM; 43a10423b8SAlexei Starovoitov } 44a10423b8SAlexei Starovoitov array->pptrs[i] = ptr; 4532fff239SEric Dumazet cond_resched(); 46a10423b8SAlexei Starovoitov } 47a10423b8SAlexei Starovoitov 48a10423b8SAlexei Starovoitov return 0; 49a10423b8SAlexei Starovoitov } 50a10423b8SAlexei Starovoitov 5128fbcfa0SAlexei Starovoitov /* Called from syscall */ 525dc4c4b7SMartin KaFai Lau int array_map_alloc_check(union bpf_attr *attr) 53ad46061fSJakub Kicinski { 54ad46061fSJakub Kicinski bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 55ad46061fSJakub Kicinski int numa_node = bpf_map_attr_numa_node(attr); 56ad46061fSJakub Kicinski 57ad46061fSJakub Kicinski /* check sanity of attributes */ 58ad46061fSJakub Kicinski if (attr->max_entries == 0 || attr->key_size != 4 || 59ad46061fSJakub Kicinski attr->value_size == 0 || 60ad46061fSJakub Kicinski attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || 61591fe988SDaniel Borkmann !bpf_map_flags_access_ok(attr->map_flags) || 62ad46061fSJakub Kicinski (percpu && numa_node != NUMA_NO_NODE)) 63ad46061fSJakub Kicinski return -EINVAL; 64ad46061fSJakub Kicinski 65fc970227SAndrii Nakryiko if (attr->map_type != BPF_MAP_TYPE_ARRAY && 664a8f87e6SDaniel Borkmann attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP)) 67fc970227SAndrii Nakryiko return -EINVAL; 68fc970227SAndrii Nakryiko 69792cacccSSong Liu if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY && 70792cacccSSong Liu attr->map_flags & BPF_F_PRESERVE_ELEMS) 71792cacccSSong Liu return -EINVAL; 72792cacccSSong Liu 7363b8ce77SAndrii Nakryiko /* avoid overflow on round_up(map->value_size) */ 7463b8ce77SAndrii Nakryiko if (attr->value_size > INT_MAX) 75ad46061fSJakub Kicinski return -E2BIG; 76ad46061fSJakub Kicinski 77ad46061fSJakub Kicinski return 0; 78ad46061fSJakub Kicinski } 79ad46061fSJakub Kicinski 8028fbcfa0SAlexei Starovoitov static struct bpf_map *array_map_alloc(union bpf_attr *attr) 8128fbcfa0SAlexei Starovoitov { 82a10423b8SAlexei Starovoitov bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 831bc59756SRoman Gushchin int numa_node = bpf_map_attr_numa_node(attr); 84b2157399SAlexei Starovoitov u32 elem_size, index_mask, max_entries; 858062fb12SAndrii Nakryiko bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL); 861bc59756SRoman Gushchin u64 array_size, mask64; 8728fbcfa0SAlexei Starovoitov struct bpf_array *array; 8828fbcfa0SAlexei Starovoitov 8928fbcfa0SAlexei Starovoitov elem_size = round_up(attr->value_size, 8); 9028fbcfa0SAlexei Starovoitov 91b2157399SAlexei Starovoitov max_entries = attr->max_entries; 92b2157399SAlexei Starovoitov 93bbeb6e43SDaniel Borkmann /* On 32 bit archs roundup_pow_of_two() with max_entries that has 94bbeb6e43SDaniel Borkmann * upper most bit set in u32 space is undefined behavior due to 95bbeb6e43SDaniel Borkmann * resulting 1U << 32, so do it manually here in u64 space. 96bbeb6e43SDaniel Borkmann */ 97bbeb6e43SDaniel Borkmann mask64 = fls_long(max_entries - 1); 98bbeb6e43SDaniel Borkmann mask64 = 1ULL << mask64; 99bbeb6e43SDaniel Borkmann mask64 -= 1; 100bbeb6e43SDaniel Borkmann 101bbeb6e43SDaniel Borkmann index_mask = mask64; 1022c78ee89SAlexei Starovoitov if (!bypass_spec_v1) { 103b2157399SAlexei Starovoitov /* round up array size to nearest power of 2, 104b2157399SAlexei Starovoitov * since cpu will speculate within index_mask limits 105b2157399SAlexei Starovoitov */ 106b2157399SAlexei Starovoitov max_entries = index_mask + 1; 107bbeb6e43SDaniel Borkmann /* Check for overflows. */ 108bbeb6e43SDaniel Borkmann if (max_entries < attr->max_entries) 109bbeb6e43SDaniel Borkmann return ERR_PTR(-E2BIG); 110bbeb6e43SDaniel Borkmann } 111b2157399SAlexei Starovoitov 112a10423b8SAlexei Starovoitov array_size = sizeof(*array); 113fc970227SAndrii Nakryiko if (percpu) { 114b2157399SAlexei Starovoitov array_size += (u64) max_entries * sizeof(void *); 115fc970227SAndrii Nakryiko } else { 116fc970227SAndrii Nakryiko /* rely on vmalloc() to return page-aligned memory and 117fc970227SAndrii Nakryiko * ensure array->value is exactly page-aligned 118fc970227SAndrii Nakryiko */ 119fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 120fc970227SAndrii Nakryiko array_size = PAGE_ALIGN(array_size); 121fc970227SAndrii Nakryiko array_size += PAGE_ALIGN((u64) max_entries * elem_size); 122fc970227SAndrii Nakryiko } else { 123b2157399SAlexei Starovoitov array_size += (u64) max_entries * elem_size; 124fc970227SAndrii Nakryiko } 125fc970227SAndrii Nakryiko } 126a10423b8SAlexei Starovoitov 12728fbcfa0SAlexei Starovoitov /* allocate all map elements and zero-initialize them */ 128fc970227SAndrii Nakryiko if (attr->map_flags & BPF_F_MMAPABLE) { 129fc970227SAndrii Nakryiko void *data; 130fc970227SAndrii Nakryiko 131fc970227SAndrii Nakryiko /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */ 132fc970227SAndrii Nakryiko data = bpf_map_area_mmapable_alloc(array_size, numa_node); 1331bc59756SRoman Gushchin if (!data) 134fc970227SAndrii Nakryiko return ERR_PTR(-ENOMEM); 135fc970227SAndrii Nakryiko array = data + PAGE_ALIGN(sizeof(struct bpf_array)) 136fc970227SAndrii Nakryiko - offsetof(struct bpf_array, value); 137fc970227SAndrii Nakryiko } else { 13896eabe7aSMartin KaFai Lau array = bpf_map_area_alloc(array_size, numa_node); 139fc970227SAndrii Nakryiko } 1401bc59756SRoman Gushchin if (!array) 14128fbcfa0SAlexei Starovoitov return ERR_PTR(-ENOMEM); 142b2157399SAlexei Starovoitov array->index_mask = index_mask; 1432c78ee89SAlexei Starovoitov array->map.bypass_spec_v1 = bypass_spec_v1; 14428fbcfa0SAlexei Starovoitov 14528fbcfa0SAlexei Starovoitov /* copy mandatory map attributes */ 14632852649SJakub Kicinski bpf_map_init_from_attr(&array->map, attr); 14728fbcfa0SAlexei Starovoitov array->elem_size = elem_size; 14828fbcfa0SAlexei Starovoitov 1499c2d63b8SDaniel Borkmann if (percpu && bpf_array_alloc_percpu(array)) { 150d407bd25SDaniel Borkmann bpf_map_area_free(array); 151a10423b8SAlexei Starovoitov return ERR_PTR(-ENOMEM); 152a10423b8SAlexei Starovoitov } 153a10423b8SAlexei Starovoitov 15428fbcfa0SAlexei Starovoitov return &array->map; 15528fbcfa0SAlexei Starovoitov } 15628fbcfa0SAlexei Starovoitov 15787ac0d60SAndrii Nakryiko static void *array_map_elem_ptr(struct bpf_array* array, u32 index) 15887ac0d60SAndrii Nakryiko { 15987ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * index; 16087ac0d60SAndrii Nakryiko } 16187ac0d60SAndrii Nakryiko 16228fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 16328fbcfa0SAlexei Starovoitov static void *array_map_lookup_elem(struct bpf_map *map, void *key) 16428fbcfa0SAlexei Starovoitov { 16528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 16628fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 16728fbcfa0SAlexei Starovoitov 168a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 16928fbcfa0SAlexei Starovoitov return NULL; 17028fbcfa0SAlexei Starovoitov 17187ac0d60SAndrii Nakryiko return array->value + (u64)array->elem_size * (index & array->index_mask); 17228fbcfa0SAlexei Starovoitov } 17328fbcfa0SAlexei Starovoitov 174d8eca5bbSDaniel Borkmann static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, 175d8eca5bbSDaniel Borkmann u32 off) 176d8eca5bbSDaniel Borkmann { 177d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 178d8eca5bbSDaniel Borkmann 179d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 180d8eca5bbSDaniel Borkmann return -ENOTSUPP; 181d8eca5bbSDaniel Borkmann if (off >= map->value_size) 182d8eca5bbSDaniel Borkmann return -EINVAL; 183d8eca5bbSDaniel Borkmann 184d8eca5bbSDaniel Borkmann *imm = (unsigned long)array->value; 185d8eca5bbSDaniel Borkmann return 0; 186d8eca5bbSDaniel Borkmann } 187d8eca5bbSDaniel Borkmann 188d8eca5bbSDaniel Borkmann static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, 189d8eca5bbSDaniel Borkmann u32 *off) 190d8eca5bbSDaniel Borkmann { 191d8eca5bbSDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 192d8eca5bbSDaniel Borkmann u64 base = (unsigned long)array->value; 193d8eca5bbSDaniel Borkmann u64 range = array->elem_size; 194d8eca5bbSDaniel Borkmann 195d8eca5bbSDaniel Borkmann if (map->max_entries != 1) 196d8eca5bbSDaniel Borkmann return -ENOTSUPP; 197d8eca5bbSDaniel Borkmann if (imm < base || imm >= base + range) 198d8eca5bbSDaniel Borkmann return -ENOENT; 199d8eca5bbSDaniel Borkmann 200d8eca5bbSDaniel Borkmann *off = imm - base; 201d8eca5bbSDaniel Borkmann return 0; 202d8eca5bbSDaniel Borkmann } 203d8eca5bbSDaniel Borkmann 20481ed18abSAlexei Starovoitov /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ 2054a8f87e6SDaniel Borkmann static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) 20681ed18abSAlexei Starovoitov { 207b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 20881ed18abSAlexei Starovoitov struct bpf_insn *insn = insn_buf; 209d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size; 21081ed18abSAlexei Starovoitov const int ret = BPF_REG_0; 21181ed18abSAlexei Starovoitov const int map_ptr = BPF_REG_1; 21281ed18abSAlexei Starovoitov const int index = BPF_REG_2; 21381ed18abSAlexei Starovoitov 2144a8f87e6SDaniel Borkmann if (map->map_flags & BPF_F_INNER_MAP) 2154a8f87e6SDaniel Borkmann return -EOPNOTSUPP; 2164a8f87e6SDaniel Borkmann 21781ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 21881ed18abSAlexei Starovoitov *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 2192c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 220b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); 221b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 222b2157399SAlexei Starovoitov } else { 223fad73a1aSMartin KaFai Lau *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); 224b2157399SAlexei Starovoitov } 225fad73a1aSMartin KaFai Lau 226fad73a1aSMartin KaFai Lau if (is_power_of_2(elem_size)) { 22781ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 22881ed18abSAlexei Starovoitov } else { 22981ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 23081ed18abSAlexei Starovoitov } 23181ed18abSAlexei Starovoitov *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 23281ed18abSAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 23381ed18abSAlexei Starovoitov *insn++ = BPF_MOV64_IMM(ret, 0); 23481ed18abSAlexei Starovoitov return insn - insn_buf; 23581ed18abSAlexei Starovoitov } 23681ed18abSAlexei Starovoitov 237a10423b8SAlexei Starovoitov /* Called from eBPF program */ 238a10423b8SAlexei Starovoitov static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) 239a10423b8SAlexei Starovoitov { 240a10423b8SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 241a10423b8SAlexei Starovoitov u32 index = *(u32 *)key; 242a10423b8SAlexei Starovoitov 243a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 244a10423b8SAlexei Starovoitov return NULL; 245a10423b8SAlexei Starovoitov 246b2157399SAlexei Starovoitov return this_cpu_ptr(array->pptrs[index & array->index_mask]); 247a10423b8SAlexei Starovoitov } 248a10423b8SAlexei Starovoitov 24907343110SFeng Zhou static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) 25007343110SFeng Zhou { 25107343110SFeng Zhou struct bpf_array *array = container_of(map, struct bpf_array, map); 25207343110SFeng Zhou u32 index = *(u32 *)key; 25307343110SFeng Zhou 25407343110SFeng Zhou if (cpu >= nr_cpu_ids) 25507343110SFeng Zhou return NULL; 25607343110SFeng Zhou 25707343110SFeng Zhou if (unlikely(index >= array->map.max_entries)) 25807343110SFeng Zhou return NULL; 25907343110SFeng Zhou 26007343110SFeng Zhou return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu); 26107343110SFeng Zhou } 26207343110SFeng Zhou 26315a07b33SAlexei Starovoitov int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) 26415a07b33SAlexei Starovoitov { 26515a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 26615a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 26715a07b33SAlexei Starovoitov void __percpu *pptr; 26815a07b33SAlexei Starovoitov int cpu, off = 0; 26915a07b33SAlexei Starovoitov u32 size; 27015a07b33SAlexei Starovoitov 27115a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 27215a07b33SAlexei Starovoitov return -ENOENT; 27315a07b33SAlexei Starovoitov 27415a07b33SAlexei Starovoitov /* per_cpu areas are zero-filled and bpf programs can only 27515a07b33SAlexei Starovoitov * access 'value_size' of them, so copying rounded areas 27615a07b33SAlexei Starovoitov * will not leak any kernel data 27715a07b33SAlexei Starovoitov */ 278d937bc34SAndrii Nakryiko size = array->elem_size; 27915a07b33SAlexei Starovoitov rcu_read_lock(); 280b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 28115a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 2826df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); 2836df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, value + off); 28415a07b33SAlexei Starovoitov off += size; 28515a07b33SAlexei Starovoitov } 28615a07b33SAlexei Starovoitov rcu_read_unlock(); 28715a07b33SAlexei Starovoitov return 0; 28815a07b33SAlexei Starovoitov } 28915a07b33SAlexei Starovoitov 29028fbcfa0SAlexei Starovoitov /* Called from syscall */ 29128fbcfa0SAlexei Starovoitov static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) 29228fbcfa0SAlexei Starovoitov { 29328fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 2948fe45924STeng Qin u32 index = key ? *(u32 *)key : U32_MAX; 29528fbcfa0SAlexei Starovoitov u32 *next = (u32 *)next_key; 29628fbcfa0SAlexei Starovoitov 29728fbcfa0SAlexei Starovoitov if (index >= array->map.max_entries) { 29828fbcfa0SAlexei Starovoitov *next = 0; 29928fbcfa0SAlexei Starovoitov return 0; 30028fbcfa0SAlexei Starovoitov } 30128fbcfa0SAlexei Starovoitov 30228fbcfa0SAlexei Starovoitov if (index == array->map.max_entries - 1) 30328fbcfa0SAlexei Starovoitov return -ENOENT; 30428fbcfa0SAlexei Starovoitov 30528fbcfa0SAlexei Starovoitov *next = index + 1; 30628fbcfa0SAlexei Starovoitov return 0; 30728fbcfa0SAlexei Starovoitov } 30828fbcfa0SAlexei Starovoitov 30928fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 310d7ba4cc9SJP Kobryn static long array_map_update_elem(struct bpf_map *map, void *key, void *value, 31128fbcfa0SAlexei Starovoitov u64 map_flags) 31228fbcfa0SAlexei Starovoitov { 31328fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 31428fbcfa0SAlexei Starovoitov u32 index = *(u32 *)key; 31596049f3aSAlexei Starovoitov char *val; 31628fbcfa0SAlexei Starovoitov 31796049f3aSAlexei Starovoitov if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) 31828fbcfa0SAlexei Starovoitov /* unknown flags */ 31928fbcfa0SAlexei Starovoitov return -EINVAL; 32028fbcfa0SAlexei Starovoitov 321a10423b8SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 32228fbcfa0SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 32328fbcfa0SAlexei Starovoitov return -E2BIG; 32428fbcfa0SAlexei Starovoitov 32596049f3aSAlexei Starovoitov if (unlikely(map_flags & BPF_NOEXIST)) 326daaf427cSAlexei Starovoitov /* all elements already exist */ 32728fbcfa0SAlexei Starovoitov return -EEXIST; 32828fbcfa0SAlexei Starovoitov 32996049f3aSAlexei Starovoitov if (unlikely((map_flags & BPF_F_LOCK) && 330db559117SKumar Kartikeya Dwivedi !btf_record_has_field(map->record, BPF_SPIN_LOCK))) 33196049f3aSAlexei Starovoitov return -EINVAL; 33296049f3aSAlexei Starovoitov 33396049f3aSAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 3346df4ea1fSKumar Kartikeya Dwivedi val = this_cpu_ptr(array->pptrs[index & array->index_mask]); 3356df4ea1fSKumar Kartikeya Dwivedi copy_map_value(map, val, value); 336db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val); 33796049f3aSAlexei Starovoitov } else { 33896049f3aSAlexei Starovoitov val = array->value + 33987ac0d60SAndrii Nakryiko (u64)array->elem_size * (index & array->index_mask); 34096049f3aSAlexei Starovoitov if (map_flags & BPF_F_LOCK) 34196049f3aSAlexei Starovoitov copy_map_value_locked(map, val, value, false); 342a10423b8SAlexei Starovoitov else 34396049f3aSAlexei Starovoitov copy_map_value(map, val, value); 344db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, val); 34596049f3aSAlexei Starovoitov } 34628fbcfa0SAlexei Starovoitov return 0; 34728fbcfa0SAlexei Starovoitov } 34828fbcfa0SAlexei Starovoitov 34915a07b33SAlexei Starovoitov int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 35015a07b33SAlexei Starovoitov u64 map_flags) 35115a07b33SAlexei Starovoitov { 35215a07b33SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 35315a07b33SAlexei Starovoitov u32 index = *(u32 *)key; 35415a07b33SAlexei Starovoitov void __percpu *pptr; 35515a07b33SAlexei Starovoitov int cpu, off = 0; 35615a07b33SAlexei Starovoitov u32 size; 35715a07b33SAlexei Starovoitov 35815a07b33SAlexei Starovoitov if (unlikely(map_flags > BPF_EXIST)) 35915a07b33SAlexei Starovoitov /* unknown flags */ 36015a07b33SAlexei Starovoitov return -EINVAL; 36115a07b33SAlexei Starovoitov 36215a07b33SAlexei Starovoitov if (unlikely(index >= array->map.max_entries)) 36315a07b33SAlexei Starovoitov /* all elements were pre-allocated, cannot insert a new one */ 36415a07b33SAlexei Starovoitov return -E2BIG; 36515a07b33SAlexei Starovoitov 36615a07b33SAlexei Starovoitov if (unlikely(map_flags == BPF_NOEXIST)) 36715a07b33SAlexei Starovoitov /* all elements already exist */ 36815a07b33SAlexei Starovoitov return -EEXIST; 36915a07b33SAlexei Starovoitov 37015a07b33SAlexei Starovoitov /* the user space will provide round_up(value_size, 8) bytes that 37115a07b33SAlexei Starovoitov * will be copied into per-cpu area. bpf programs can only access 37215a07b33SAlexei Starovoitov * value_size of it. During lookup the same extra bytes will be 37315a07b33SAlexei Starovoitov * returned or zeros which were zero-filled by percpu_alloc, 37415a07b33SAlexei Starovoitov * so no kernel data leaks possible 37515a07b33SAlexei Starovoitov */ 376d937bc34SAndrii Nakryiko size = array->elem_size; 37715a07b33SAlexei Starovoitov rcu_read_lock(); 378b2157399SAlexei Starovoitov pptr = array->pptrs[index & array->index_mask]; 37915a07b33SAlexei Starovoitov for_each_possible_cpu(cpu) { 3806df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); 381db559117SKumar Kartikeya Dwivedi bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); 38215a07b33SAlexei Starovoitov off += size; 38315a07b33SAlexei Starovoitov } 38415a07b33SAlexei Starovoitov rcu_read_unlock(); 38515a07b33SAlexei Starovoitov return 0; 38615a07b33SAlexei Starovoitov } 38715a07b33SAlexei Starovoitov 38828fbcfa0SAlexei Starovoitov /* Called from syscall or from eBPF program */ 389d7ba4cc9SJP Kobryn static long array_map_delete_elem(struct bpf_map *map, void *key) 39028fbcfa0SAlexei Starovoitov { 39128fbcfa0SAlexei Starovoitov return -EINVAL; 39228fbcfa0SAlexei Starovoitov } 39328fbcfa0SAlexei Starovoitov 394fc970227SAndrii Nakryiko static void *array_map_vmalloc_addr(struct bpf_array *array) 395fc970227SAndrii Nakryiko { 396fc970227SAndrii Nakryiko return (void *)round_down((unsigned long)array, PAGE_SIZE); 397fc970227SAndrii Nakryiko } 398fc970227SAndrii Nakryiko 39968134668SAlexei Starovoitov static void array_map_free_timers(struct bpf_map *map) 40068134668SAlexei Starovoitov { 40168134668SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 40268134668SAlexei Starovoitov int i; 40368134668SAlexei Starovoitov 404aa3496acSKumar Kartikeya Dwivedi /* We don't reset or free fields other than timer on uref dropping to zero. */ 405db559117SKumar Kartikeya Dwivedi if (!btf_record_has_field(map->record, BPF_TIMER)) 40668134668SAlexei Starovoitov return; 40768134668SAlexei Starovoitov 40868134668SAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 409db559117SKumar Kartikeya Dwivedi bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); 41068134668SAlexei Starovoitov } 41168134668SAlexei Starovoitov 41228fbcfa0SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 41328fbcfa0SAlexei Starovoitov static void array_map_free(struct bpf_map *map) 41428fbcfa0SAlexei Starovoitov { 41528fbcfa0SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 41614a324f6SKumar Kartikeya Dwivedi int i; 41714a324f6SKumar Kartikeya Dwivedi 418aa3496acSKumar Kartikeya Dwivedi if (!IS_ERR_OR_NULL(map->record)) { 4196df4ea1fSKumar Kartikeya Dwivedi if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 4206df4ea1fSKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) { 4216df4ea1fSKumar Kartikeya Dwivedi void __percpu *pptr = array->pptrs[i & array->index_mask]; 4226df4ea1fSKumar Kartikeya Dwivedi int cpu; 4236df4ea1fSKumar Kartikeya Dwivedi 4246df4ea1fSKumar Kartikeya Dwivedi for_each_possible_cpu(cpu) { 425aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); 4266df4ea1fSKumar Kartikeya Dwivedi cond_resched(); 4276df4ea1fSKumar Kartikeya Dwivedi } 4286df4ea1fSKumar Kartikeya Dwivedi } 4296df4ea1fSKumar Kartikeya Dwivedi } else { 43014a324f6SKumar Kartikeya Dwivedi for (i = 0; i < array->map.max_entries; i++) 431aa3496acSKumar Kartikeya Dwivedi bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); 4326df4ea1fSKumar Kartikeya Dwivedi } 43314a324f6SKumar Kartikeya Dwivedi } 43428fbcfa0SAlexei Starovoitov 435a10423b8SAlexei Starovoitov if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 436a10423b8SAlexei Starovoitov bpf_array_free_percpu(array); 437a10423b8SAlexei Starovoitov 438fc970227SAndrii Nakryiko if (array->map.map_flags & BPF_F_MMAPABLE) 439fc970227SAndrii Nakryiko bpf_map_area_free(array_map_vmalloc_addr(array)); 440fc970227SAndrii Nakryiko else 441d407bd25SDaniel Borkmann bpf_map_area_free(array); 44228fbcfa0SAlexei Starovoitov } 44328fbcfa0SAlexei Starovoitov 444a26ca7c9SMartin KaFai Lau static void array_map_seq_show_elem(struct bpf_map *map, void *key, 445a26ca7c9SMartin KaFai Lau struct seq_file *m) 446a26ca7c9SMartin KaFai Lau { 447a26ca7c9SMartin KaFai Lau void *value; 448a26ca7c9SMartin KaFai Lau 449a26ca7c9SMartin KaFai Lau rcu_read_lock(); 450a26ca7c9SMartin KaFai Lau 451a26ca7c9SMartin KaFai Lau value = array_map_lookup_elem(map, key); 452a26ca7c9SMartin KaFai Lau if (!value) { 453a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 454a26ca7c9SMartin KaFai Lau return; 455a26ca7c9SMartin KaFai Lau } 456a26ca7c9SMartin KaFai Lau 4572824ecb7SDaniel Borkmann if (map->btf_key_type_id) 458a26ca7c9SMartin KaFai Lau seq_printf(m, "%u: ", *(u32 *)key); 4599b2cf328SMartin KaFai Lau btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); 460a26ca7c9SMartin KaFai Lau seq_puts(m, "\n"); 461a26ca7c9SMartin KaFai Lau 462a26ca7c9SMartin KaFai Lau rcu_read_unlock(); 463a26ca7c9SMartin KaFai Lau } 464a26ca7c9SMartin KaFai Lau 465c7b27c37SYonghong Song static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, 466c7b27c37SYonghong Song struct seq_file *m) 467c7b27c37SYonghong Song { 468c7b27c37SYonghong Song struct bpf_array *array = container_of(map, struct bpf_array, map); 469c7b27c37SYonghong Song u32 index = *(u32 *)key; 470c7b27c37SYonghong Song void __percpu *pptr; 471c7b27c37SYonghong Song int cpu; 472c7b27c37SYonghong Song 473c7b27c37SYonghong Song rcu_read_lock(); 474c7b27c37SYonghong Song 475c7b27c37SYonghong Song seq_printf(m, "%u: {\n", *(u32 *)key); 476c7b27c37SYonghong Song pptr = array->pptrs[index & array->index_mask]; 477c7b27c37SYonghong Song for_each_possible_cpu(cpu) { 478c7b27c37SYonghong Song seq_printf(m, "\tcpu%d: ", cpu); 479c7b27c37SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 480c7b27c37SYonghong Song per_cpu_ptr(pptr, cpu), m); 481c7b27c37SYonghong Song seq_puts(m, "\n"); 482c7b27c37SYonghong Song } 483c7b27c37SYonghong Song seq_puts(m, "}\n"); 484c7b27c37SYonghong Song 485c7b27c37SYonghong Song rcu_read_unlock(); 486c7b27c37SYonghong Song } 487c7b27c37SYonghong Song 488e8d2bec0SDaniel Borkmann static int array_map_check_btf(const struct bpf_map *map, 4891b2b234bSRoman Gushchin const struct btf *btf, 490e8d2bec0SDaniel Borkmann const struct btf_type *key_type, 491e8d2bec0SDaniel Borkmann const struct btf_type *value_type) 492a26ca7c9SMartin KaFai Lau { 493a26ca7c9SMartin KaFai Lau u32 int_data; 494a26ca7c9SMartin KaFai Lau 4952824ecb7SDaniel Borkmann /* One exception for keyless BTF: .bss/.data/.rodata map */ 4962824ecb7SDaniel Borkmann if (btf_type_is_void(key_type)) { 4972824ecb7SDaniel Borkmann if (map->map_type != BPF_MAP_TYPE_ARRAY || 4982824ecb7SDaniel Borkmann map->max_entries != 1) 4992824ecb7SDaniel Borkmann return -EINVAL; 5002824ecb7SDaniel Borkmann 5012824ecb7SDaniel Borkmann if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) 5022824ecb7SDaniel Borkmann return -EINVAL; 5032824ecb7SDaniel Borkmann 5042824ecb7SDaniel Borkmann return 0; 5052824ecb7SDaniel Borkmann } 5062824ecb7SDaniel Borkmann 507e8d2bec0SDaniel Borkmann if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 508a26ca7c9SMartin KaFai Lau return -EINVAL; 509a26ca7c9SMartin KaFai Lau 510a26ca7c9SMartin KaFai Lau int_data = *(u32 *)(key_type + 1); 511e8d2bec0SDaniel Borkmann /* bpf array can only take a u32 key. This check makes sure 512e8d2bec0SDaniel Borkmann * that the btf matches the attr used during map_create. 513a26ca7c9SMartin KaFai Lau */ 514e8d2bec0SDaniel Borkmann if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 515a26ca7c9SMartin KaFai Lau return -EINVAL; 516a26ca7c9SMartin KaFai Lau 517a26ca7c9SMartin KaFai Lau return 0; 518a26ca7c9SMartin KaFai Lau } 519a26ca7c9SMartin KaFai Lau 520b2e2f0e6SYueHaibing static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) 521fc970227SAndrii Nakryiko { 522fc970227SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 523fc970227SAndrii Nakryiko pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT; 524fc970227SAndrii Nakryiko 525fc970227SAndrii Nakryiko if (!(map->map_flags & BPF_F_MMAPABLE)) 526fc970227SAndrii Nakryiko return -EINVAL; 527fc970227SAndrii Nakryiko 528333291ceSAndrii Nakryiko if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > 529333291ceSAndrii Nakryiko PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) 530333291ceSAndrii Nakryiko return -EINVAL; 531333291ceSAndrii Nakryiko 532333291ceSAndrii Nakryiko return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), 533333291ceSAndrii Nakryiko vma->vm_pgoff + pgoff); 534fc970227SAndrii Nakryiko } 535fc970227SAndrii Nakryiko 536134fede4SMartin KaFai Lau static bool array_map_meta_equal(const struct bpf_map *meta0, 537134fede4SMartin KaFai Lau const struct bpf_map *meta1) 538134fede4SMartin KaFai Lau { 5394a8f87e6SDaniel Borkmann if (!bpf_map_meta_equal(meta0, meta1)) 5404a8f87e6SDaniel Borkmann return false; 5414a8f87e6SDaniel Borkmann return meta0->map_flags & BPF_F_INNER_MAP ? true : 5424a8f87e6SDaniel Borkmann meta0->max_entries == meta1->max_entries; 543134fede4SMartin KaFai Lau } 544134fede4SMartin KaFai Lau 545d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info { 546d3cc2ab5SYonghong Song struct bpf_map *map; 547d3cc2ab5SYonghong Song void *percpu_value_buf; 548d3cc2ab5SYonghong Song u32 index; 549d3cc2ab5SYonghong Song }; 550d3cc2ab5SYonghong Song 551d3cc2ab5SYonghong Song static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) 552d3cc2ab5SYonghong Song { 553d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 554d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 555d3cc2ab5SYonghong Song struct bpf_array *array; 556d3cc2ab5SYonghong Song u32 index; 557d3cc2ab5SYonghong Song 558d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 559d3cc2ab5SYonghong Song return NULL; 560d3cc2ab5SYonghong Song 561d3cc2ab5SYonghong Song if (*pos == 0) 562d3cc2ab5SYonghong Song ++*pos; 563d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 564d3cc2ab5SYonghong Song index = info->index & array->index_mask; 565d3cc2ab5SYonghong Song if (info->percpu_value_buf) 566d3cc2ab5SYonghong Song return array->pptrs[index]; 56787ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index); 568d3cc2ab5SYonghong Song } 569d3cc2ab5SYonghong Song 570d3cc2ab5SYonghong Song static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) 571d3cc2ab5SYonghong Song { 572d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 573d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 574d3cc2ab5SYonghong Song struct bpf_array *array; 575d3cc2ab5SYonghong Song u32 index; 576d3cc2ab5SYonghong Song 577d3cc2ab5SYonghong Song ++*pos; 578d3cc2ab5SYonghong Song ++info->index; 579d3cc2ab5SYonghong Song if (info->index >= map->max_entries) 580d3cc2ab5SYonghong Song return NULL; 581d3cc2ab5SYonghong Song 582d3cc2ab5SYonghong Song array = container_of(map, struct bpf_array, map); 583d3cc2ab5SYonghong Song index = info->index & array->index_mask; 584d3cc2ab5SYonghong Song if (info->percpu_value_buf) 585d3cc2ab5SYonghong Song return array->pptrs[index]; 58687ac0d60SAndrii Nakryiko return array_map_elem_ptr(array, index); 587d3cc2ab5SYonghong Song } 588d3cc2ab5SYonghong Song 589d3cc2ab5SYonghong Song static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) 590d3cc2ab5SYonghong Song { 591d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *info = seq->private; 592d3cc2ab5SYonghong Song struct bpf_iter__bpf_map_elem ctx = {}; 593d3cc2ab5SYonghong Song struct bpf_map *map = info->map; 594d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 595d3cc2ab5SYonghong Song struct bpf_iter_meta meta; 596d3cc2ab5SYonghong Song struct bpf_prog *prog; 597d3cc2ab5SYonghong Song int off = 0, cpu = 0; 598d3cc2ab5SYonghong Song void __percpu **pptr; 599d3cc2ab5SYonghong Song u32 size; 600d3cc2ab5SYonghong Song 601d3cc2ab5SYonghong Song meta.seq = seq; 602d3cc2ab5SYonghong Song prog = bpf_iter_get_info(&meta, v == NULL); 603d3cc2ab5SYonghong Song if (!prog) 604d3cc2ab5SYonghong Song return 0; 605d3cc2ab5SYonghong Song 606d3cc2ab5SYonghong Song ctx.meta = &meta; 607d3cc2ab5SYonghong Song ctx.map = info->map; 608d3cc2ab5SYonghong Song if (v) { 609d3cc2ab5SYonghong Song ctx.key = &info->index; 610d3cc2ab5SYonghong Song 611d3cc2ab5SYonghong Song if (!info->percpu_value_buf) { 612d3cc2ab5SYonghong Song ctx.value = v; 613d3cc2ab5SYonghong Song } else { 614d3cc2ab5SYonghong Song pptr = v; 615d937bc34SAndrii Nakryiko size = array->elem_size; 616d3cc2ab5SYonghong Song for_each_possible_cpu(cpu) { 6176df4ea1fSKumar Kartikeya Dwivedi copy_map_value_long(map, info->percpu_value_buf + off, 6186df4ea1fSKumar Kartikeya Dwivedi per_cpu_ptr(pptr, cpu)); 6196df4ea1fSKumar Kartikeya Dwivedi check_and_init_map_value(map, info->percpu_value_buf + off); 620d3cc2ab5SYonghong Song off += size; 621d3cc2ab5SYonghong Song } 622d3cc2ab5SYonghong Song ctx.value = info->percpu_value_buf; 623d3cc2ab5SYonghong Song } 624d3cc2ab5SYonghong Song } 625d3cc2ab5SYonghong Song 626d3cc2ab5SYonghong Song return bpf_iter_run_prog(prog, &ctx); 627d3cc2ab5SYonghong Song } 628d3cc2ab5SYonghong Song 629d3cc2ab5SYonghong Song static int bpf_array_map_seq_show(struct seq_file *seq, void *v) 630d3cc2ab5SYonghong Song { 631d3cc2ab5SYonghong Song return __bpf_array_map_seq_show(seq, v); 632d3cc2ab5SYonghong Song } 633d3cc2ab5SYonghong Song 634d3cc2ab5SYonghong Song static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) 635d3cc2ab5SYonghong Song { 636d3cc2ab5SYonghong Song if (!v) 637d3cc2ab5SYonghong Song (void)__bpf_array_map_seq_show(seq, NULL); 638d3cc2ab5SYonghong Song } 639d3cc2ab5SYonghong Song 640d3cc2ab5SYonghong Song static int bpf_iter_init_array_map(void *priv_data, 641d3cc2ab5SYonghong Song struct bpf_iter_aux_info *aux) 642d3cc2ab5SYonghong Song { 643d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 644d3cc2ab5SYonghong Song struct bpf_map *map = aux->map; 645d937bc34SAndrii Nakryiko struct bpf_array *array = container_of(map, struct bpf_array, map); 646d3cc2ab5SYonghong Song void *value_buf; 647d3cc2ab5SYonghong Song u32 buf_size; 648d3cc2ab5SYonghong Song 649d3cc2ab5SYonghong Song if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { 650d937bc34SAndrii Nakryiko buf_size = array->elem_size * num_possible_cpus(); 651d3cc2ab5SYonghong Song value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); 652d3cc2ab5SYonghong Song if (!value_buf) 653d3cc2ab5SYonghong Song return -ENOMEM; 654d3cc2ab5SYonghong Song 655d3cc2ab5SYonghong Song seq_info->percpu_value_buf = value_buf; 656d3cc2ab5SYonghong Song } 657d3cc2ab5SYonghong Song 658f76fa6b3SHou Tao /* bpf_iter_attach_map() acquires a map uref, and the uref may be 659f76fa6b3SHou Tao * released before or in the middle of iterating map elements, so 660f76fa6b3SHou Tao * acquire an extra map uref for iterator. 661f76fa6b3SHou Tao */ 662f76fa6b3SHou Tao bpf_map_inc_with_uref(map); 663d3cc2ab5SYonghong Song seq_info->map = map; 664d3cc2ab5SYonghong Song return 0; 665d3cc2ab5SYonghong Song } 666d3cc2ab5SYonghong Song 667d3cc2ab5SYonghong Song static void bpf_iter_fini_array_map(void *priv_data) 668d3cc2ab5SYonghong Song { 669d3cc2ab5SYonghong Song struct bpf_iter_seq_array_map_info *seq_info = priv_data; 670d3cc2ab5SYonghong Song 671f76fa6b3SHou Tao bpf_map_put_with_uref(seq_info->map); 672d3cc2ab5SYonghong Song kfree(seq_info->percpu_value_buf); 673d3cc2ab5SYonghong Song } 674d3cc2ab5SYonghong Song 675d3cc2ab5SYonghong Song static const struct seq_operations bpf_array_map_seq_ops = { 676d3cc2ab5SYonghong Song .start = bpf_array_map_seq_start, 677d3cc2ab5SYonghong Song .next = bpf_array_map_seq_next, 678d3cc2ab5SYonghong Song .stop = bpf_array_map_seq_stop, 679d3cc2ab5SYonghong Song .show = bpf_array_map_seq_show, 680d3cc2ab5SYonghong Song }; 681d3cc2ab5SYonghong Song 682d3cc2ab5SYonghong Song static const struct bpf_iter_seq_info iter_seq_info = { 683d3cc2ab5SYonghong Song .seq_ops = &bpf_array_map_seq_ops, 684d3cc2ab5SYonghong Song .init_seq_private = bpf_iter_init_array_map, 685d3cc2ab5SYonghong Song .fini_seq_private = bpf_iter_fini_array_map, 686d3cc2ab5SYonghong Song .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), 687d3cc2ab5SYonghong Song }; 688d3cc2ab5SYonghong Song 689d7ba4cc9SJP Kobryn static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, 69006dcdcd4SYonghong Song void *callback_ctx, u64 flags) 69106dcdcd4SYonghong Song { 69206dcdcd4SYonghong Song u32 i, key, num_elems = 0; 69306dcdcd4SYonghong Song struct bpf_array *array; 69406dcdcd4SYonghong Song bool is_percpu; 69506dcdcd4SYonghong Song u64 ret = 0; 69606dcdcd4SYonghong Song void *val; 69706dcdcd4SYonghong Song 69806dcdcd4SYonghong Song if (flags != 0) 69906dcdcd4SYonghong Song return -EINVAL; 70006dcdcd4SYonghong Song 70106dcdcd4SYonghong Song is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 70206dcdcd4SYonghong Song array = container_of(map, struct bpf_array, map); 70306dcdcd4SYonghong Song if (is_percpu) 70406dcdcd4SYonghong Song migrate_disable(); 70506dcdcd4SYonghong Song for (i = 0; i < map->max_entries; i++) { 70606dcdcd4SYonghong Song if (is_percpu) 70706dcdcd4SYonghong Song val = this_cpu_ptr(array->pptrs[i]); 70806dcdcd4SYonghong Song else 70987ac0d60SAndrii Nakryiko val = array_map_elem_ptr(array, i); 71006dcdcd4SYonghong Song num_elems++; 71106dcdcd4SYonghong Song key = i; 712102acbacSKees Cook ret = callback_fn((u64)(long)map, (u64)(long)&key, 713102acbacSKees Cook (u64)(long)val, (u64)(long)callback_ctx, 0); 71406dcdcd4SYonghong Song /* return value: 0 - continue, 1 - stop and return */ 71506dcdcd4SYonghong Song if (ret) 71606dcdcd4SYonghong Song break; 71706dcdcd4SYonghong Song } 71806dcdcd4SYonghong Song 71906dcdcd4SYonghong Song if (is_percpu) 72006dcdcd4SYonghong Song migrate_enable(); 72106dcdcd4SYonghong Song return num_elems; 72206dcdcd4SYonghong Song } 72306dcdcd4SYonghong Song 7241746d055SYafang Shao static u64 array_map_mem_usage(const struct bpf_map *map) 7251746d055SYafang Shao { 7261746d055SYafang Shao struct bpf_array *array = container_of(map, struct bpf_array, map); 7271746d055SYafang Shao bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; 7281746d055SYafang Shao u32 elem_size = array->elem_size; 7291746d055SYafang Shao u64 entries = map->max_entries; 7301746d055SYafang Shao u64 usage = sizeof(*array); 7311746d055SYafang Shao 7321746d055SYafang Shao if (percpu) { 7331746d055SYafang Shao usage += entries * sizeof(void *); 7341746d055SYafang Shao usage += entries * elem_size * num_possible_cpus(); 7351746d055SYafang Shao } else { 7361746d055SYafang Shao if (map->map_flags & BPF_F_MMAPABLE) { 7371746d055SYafang Shao usage = PAGE_ALIGN(usage); 7381746d055SYafang Shao usage += PAGE_ALIGN(entries * elem_size); 7391746d055SYafang Shao } else { 7401746d055SYafang Shao usage += entries * elem_size; 7411746d055SYafang Shao } 7421746d055SYafang Shao } 7431746d055SYafang Shao return usage; 7441746d055SYafang Shao } 7451746d055SYafang Shao 746c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array) 74740077e0cSJohannes Berg const struct bpf_map_ops array_map_ops = { 748134fede4SMartin KaFai Lau .map_meta_equal = array_map_meta_equal, 749ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 75028fbcfa0SAlexei Starovoitov .map_alloc = array_map_alloc, 75128fbcfa0SAlexei Starovoitov .map_free = array_map_free, 75228fbcfa0SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 75368134668SAlexei Starovoitov .map_release_uref = array_map_free_timers, 75428fbcfa0SAlexei Starovoitov .map_lookup_elem = array_map_lookup_elem, 75528fbcfa0SAlexei Starovoitov .map_update_elem = array_map_update_elem, 75628fbcfa0SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 75781ed18abSAlexei Starovoitov .map_gen_lookup = array_map_gen_lookup, 758d8eca5bbSDaniel Borkmann .map_direct_value_addr = array_map_direct_value_addr, 759d8eca5bbSDaniel Borkmann .map_direct_value_meta = array_map_direct_value_meta, 760fc970227SAndrii Nakryiko .map_mmap = array_map_mmap, 761a26ca7c9SMartin KaFai Lau .map_seq_show_elem = array_map_seq_show_elem, 762a26ca7c9SMartin KaFai Lau .map_check_btf = array_map_check_btf, 763c60f2d28SBrian Vazquez .map_lookup_batch = generic_map_lookup_batch, 764c60f2d28SBrian Vazquez .map_update_batch = generic_map_update_batch, 76506dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args, 76606dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem, 7671746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 768c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 769d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 77028fbcfa0SAlexei Starovoitov }; 77128fbcfa0SAlexei Starovoitov 77240077e0cSJohannes Berg const struct bpf_map_ops percpu_array_map_ops = { 773f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 774ad46061fSJakub Kicinski .map_alloc_check = array_map_alloc_check, 775a10423b8SAlexei Starovoitov .map_alloc = array_map_alloc, 776a10423b8SAlexei Starovoitov .map_free = array_map_free, 777a10423b8SAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 778a10423b8SAlexei Starovoitov .map_lookup_elem = percpu_array_map_lookup_elem, 779a10423b8SAlexei Starovoitov .map_update_elem = array_map_update_elem, 780a10423b8SAlexei Starovoitov .map_delete_elem = array_map_delete_elem, 78107343110SFeng Zhou .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem, 782c7b27c37SYonghong Song .map_seq_show_elem = percpu_array_map_seq_show_elem, 783e8d2bec0SDaniel Borkmann .map_check_btf = array_map_check_btf, 784f008d732SPedro Tammela .map_lookup_batch = generic_map_lookup_batch, 785f008d732SPedro Tammela .map_update_batch = generic_map_update_batch, 78606dcdcd4SYonghong Song .map_set_for_each_callback_args = map_set_for_each_callback_args, 78706dcdcd4SYonghong Song .map_for_each_callback = bpf_for_each_array_elem, 7881746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 789c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 790d3cc2ab5SYonghong Song .iter_seq_info = &iter_seq_info, 791a10423b8SAlexei Starovoitov }; 792a10423b8SAlexei Starovoitov 793ad46061fSJakub Kicinski static int fd_array_map_alloc_check(union bpf_attr *attr) 79404fd61abSAlexei Starovoitov { 7952a36f0b9SWang Nan /* only file descriptors can be stored in this type of map */ 79604fd61abSAlexei Starovoitov if (attr->value_size != sizeof(u32)) 797ad46061fSJakub Kicinski return -EINVAL; 798591fe988SDaniel Borkmann /* Program read-only/write-only not supported for special maps yet. */ 799591fe988SDaniel Borkmann if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) 800591fe988SDaniel Borkmann return -EINVAL; 801ad46061fSJakub Kicinski return array_map_alloc_check(attr); 80204fd61abSAlexei Starovoitov } 80304fd61abSAlexei Starovoitov 8042a36f0b9SWang Nan static void fd_array_map_free(struct bpf_map *map) 80504fd61abSAlexei Starovoitov { 80604fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 80704fd61abSAlexei Starovoitov int i; 80804fd61abSAlexei Starovoitov 80904fd61abSAlexei Starovoitov /* make sure it's empty */ 81004fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 8112a36f0b9SWang Nan BUG_ON(array->ptrs[i] != NULL); 812d407bd25SDaniel Borkmann 813d407bd25SDaniel Borkmann bpf_map_area_free(array); 81404fd61abSAlexei Starovoitov } 81504fd61abSAlexei Starovoitov 8162a36f0b9SWang Nan static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) 81704fd61abSAlexei Starovoitov { 8183b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP); 81904fd61abSAlexei Starovoitov } 82004fd61abSAlexei Starovoitov 82104fd61abSAlexei Starovoitov /* only called from syscall */ 82214dc6f04SMartin KaFai Lau int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) 82314dc6f04SMartin KaFai Lau { 82414dc6f04SMartin KaFai Lau void **elem, *ptr; 82514dc6f04SMartin KaFai Lau int ret = 0; 82614dc6f04SMartin KaFai Lau 82714dc6f04SMartin KaFai Lau if (!map->ops->map_fd_sys_lookup_elem) 82814dc6f04SMartin KaFai Lau return -ENOTSUPP; 82914dc6f04SMartin KaFai Lau 83014dc6f04SMartin KaFai Lau rcu_read_lock(); 83114dc6f04SMartin KaFai Lau elem = array_map_lookup_elem(map, key); 83214dc6f04SMartin KaFai Lau if (elem && (ptr = READ_ONCE(*elem))) 83314dc6f04SMartin KaFai Lau *value = map->ops->map_fd_sys_lookup_elem(ptr); 83414dc6f04SMartin KaFai Lau else 83514dc6f04SMartin KaFai Lau ret = -ENOENT; 83614dc6f04SMartin KaFai Lau rcu_read_unlock(); 83714dc6f04SMartin KaFai Lau 83814dc6f04SMartin KaFai Lau return ret; 83914dc6f04SMartin KaFai Lau } 84014dc6f04SMartin KaFai Lau 84114dc6f04SMartin KaFai Lau /* only called from syscall */ 842d056a788SDaniel Borkmann int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, 843d056a788SDaniel Borkmann void *key, void *value, u64 map_flags) 84404fd61abSAlexei Starovoitov { 84504fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 8462a36f0b9SWang Nan void *new_ptr, *old_ptr; 84704fd61abSAlexei Starovoitov u32 index = *(u32 *)key, ufd; 84804fd61abSAlexei Starovoitov 84904fd61abSAlexei Starovoitov if (map_flags != BPF_ANY) 85004fd61abSAlexei Starovoitov return -EINVAL; 85104fd61abSAlexei Starovoitov 85204fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 85304fd61abSAlexei Starovoitov return -E2BIG; 85404fd61abSAlexei Starovoitov 85504fd61abSAlexei Starovoitov ufd = *(u32 *)value; 856d056a788SDaniel Borkmann new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); 8572a36f0b9SWang Nan if (IS_ERR(new_ptr)) 8582a36f0b9SWang Nan return PTR_ERR(new_ptr); 85904fd61abSAlexei Starovoitov 860da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 861da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 8622a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, new_ptr); 863da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, new_ptr); 864da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 865da765a2fSDaniel Borkmann } else { 866da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, new_ptr); 867da765a2fSDaniel Borkmann } 868da765a2fSDaniel Borkmann 8692a36f0b9SWang Nan if (old_ptr) 87020c20bd1SHou Tao map->ops->map_fd_put_ptr(map, old_ptr, true); 87104fd61abSAlexei Starovoitov return 0; 87204fd61abSAlexei Starovoitov } 87304fd61abSAlexei Starovoitov 87479d93b3cSHou Tao static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) 87504fd61abSAlexei Starovoitov { 87604fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 8772a36f0b9SWang Nan void *old_ptr; 87804fd61abSAlexei Starovoitov u32 index = *(u32 *)key; 87904fd61abSAlexei Starovoitov 88004fd61abSAlexei Starovoitov if (index >= array->map.max_entries) 88104fd61abSAlexei Starovoitov return -E2BIG; 88204fd61abSAlexei Starovoitov 883da765a2fSDaniel Borkmann if (map->ops->map_poke_run) { 884da765a2fSDaniel Borkmann mutex_lock(&array->aux->poke_mutex); 8852a36f0b9SWang Nan old_ptr = xchg(array->ptrs + index, NULL); 886da765a2fSDaniel Borkmann map->ops->map_poke_run(map, index, old_ptr, NULL); 887da765a2fSDaniel Borkmann mutex_unlock(&array->aux->poke_mutex); 888da765a2fSDaniel Borkmann } else { 889da765a2fSDaniel Borkmann old_ptr = xchg(array->ptrs + index, NULL); 890da765a2fSDaniel Borkmann } 891da765a2fSDaniel Borkmann 8922a36f0b9SWang Nan if (old_ptr) { 89379d93b3cSHou Tao map->ops->map_fd_put_ptr(map, old_ptr, need_defer); 89404fd61abSAlexei Starovoitov return 0; 89504fd61abSAlexei Starovoitov } else { 89604fd61abSAlexei Starovoitov return -ENOENT; 89704fd61abSAlexei Starovoitov } 89804fd61abSAlexei Starovoitov } 89904fd61abSAlexei Starovoitov 90079d93b3cSHou Tao static long fd_array_map_delete_elem(struct bpf_map *map, void *key) 90179d93b3cSHou Tao { 90279d93b3cSHou Tao return __fd_array_map_delete_elem(map, key, true); 90379d93b3cSHou Tao } 90479d93b3cSHou Tao 905d056a788SDaniel Borkmann static void *prog_fd_array_get_ptr(struct bpf_map *map, 906d056a788SDaniel Borkmann struct file *map_file, int fd) 9072a36f0b9SWang Nan { 9082a36f0b9SWang Nan struct bpf_prog *prog = bpf_prog_get(fd); 909d056a788SDaniel Borkmann 9102a36f0b9SWang Nan if (IS_ERR(prog)) 9112a36f0b9SWang Nan return prog; 9122a36f0b9SWang Nan 913f45d5b6cSToke Hoiland-Jorgensen if (!bpf_prog_map_compatible(map, prog)) { 9142a36f0b9SWang Nan bpf_prog_put(prog); 9152a36f0b9SWang Nan return ERR_PTR(-EINVAL); 9162a36f0b9SWang Nan } 917d056a788SDaniel Borkmann 9182a36f0b9SWang Nan return prog; 9192a36f0b9SWang Nan } 9202a36f0b9SWang Nan 92120c20bd1SHou Tao static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 9222a36f0b9SWang Nan { 92320c20bd1SHou Tao /* bpf_prog is freed after one RCU or tasks trace grace period */ 9241aacde3dSDaniel Borkmann bpf_prog_put(ptr); 9252a36f0b9SWang Nan } 9262a36f0b9SWang Nan 92714dc6f04SMartin KaFai Lau static u32 prog_fd_array_sys_lookup_elem(void *ptr) 92814dc6f04SMartin KaFai Lau { 92914dc6f04SMartin KaFai Lau return ((struct bpf_prog *)ptr)->aux->id; 93014dc6f04SMartin KaFai Lau } 93114dc6f04SMartin KaFai Lau 93204fd61abSAlexei Starovoitov /* decrement refcnt of all bpf_progs that are stored in this map */ 93379d93b3cSHou Tao static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) 93404fd61abSAlexei Starovoitov { 93504fd61abSAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 93604fd61abSAlexei Starovoitov int i; 93704fd61abSAlexei Starovoitov 93804fd61abSAlexei Starovoitov for (i = 0; i < array->map.max_entries; i++) 93979d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, need_defer); 94004fd61abSAlexei Starovoitov } 94104fd61abSAlexei Starovoitov 942a7c19db3SYonghong Song static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, 943a7c19db3SYonghong Song struct seq_file *m) 944a7c19db3SYonghong Song { 945a7c19db3SYonghong Song void **elem, *ptr; 946a7c19db3SYonghong Song u32 prog_id; 947a7c19db3SYonghong Song 948a7c19db3SYonghong Song rcu_read_lock(); 949a7c19db3SYonghong Song 950a7c19db3SYonghong Song elem = array_map_lookup_elem(map, key); 951a7c19db3SYonghong Song if (elem) { 952a7c19db3SYonghong Song ptr = READ_ONCE(*elem); 953a7c19db3SYonghong Song if (ptr) { 954a7c19db3SYonghong Song seq_printf(m, "%u: ", *(u32 *)key); 955a7c19db3SYonghong Song prog_id = prog_fd_array_sys_lookup_elem(ptr); 956a7c19db3SYonghong Song btf_type_seq_show(map->btf, map->btf_value_type_id, 957a7c19db3SYonghong Song &prog_id, m); 958a7c19db3SYonghong Song seq_puts(m, "\n"); 959a7c19db3SYonghong Song } 960a7c19db3SYonghong Song } 961a7c19db3SYonghong Song 962a7c19db3SYonghong Song rcu_read_unlock(); 963a7c19db3SYonghong Song } 964a7c19db3SYonghong Song 965da765a2fSDaniel Borkmann struct prog_poke_elem { 966da765a2fSDaniel Borkmann struct list_head list; 967da765a2fSDaniel Borkmann struct bpf_prog_aux *aux; 968da765a2fSDaniel Borkmann }; 969da765a2fSDaniel Borkmann 970da765a2fSDaniel Borkmann static int prog_array_map_poke_track(struct bpf_map *map, 971da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 972da765a2fSDaniel Borkmann { 973da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 974da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 975da765a2fSDaniel Borkmann int ret = 0; 976da765a2fSDaniel Borkmann 977da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 978da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 979da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 980da765a2fSDaniel Borkmann if (elem->aux == prog_aux) 981da765a2fSDaniel Borkmann goto out; 982da765a2fSDaniel Borkmann } 983da765a2fSDaniel Borkmann 984da765a2fSDaniel Borkmann elem = kmalloc(sizeof(*elem), GFP_KERNEL); 985da765a2fSDaniel Borkmann if (!elem) { 986da765a2fSDaniel Borkmann ret = -ENOMEM; 987da765a2fSDaniel Borkmann goto out; 988da765a2fSDaniel Borkmann } 989da765a2fSDaniel Borkmann 990da765a2fSDaniel Borkmann INIT_LIST_HEAD(&elem->list); 991da765a2fSDaniel Borkmann /* We must track the program's aux info at this point in time 992da765a2fSDaniel Borkmann * since the program pointer itself may not be stable yet, see 993da765a2fSDaniel Borkmann * also comment in prog_array_map_poke_run(). 994da765a2fSDaniel Borkmann */ 995da765a2fSDaniel Borkmann elem->aux = prog_aux; 996da765a2fSDaniel Borkmann 997da765a2fSDaniel Borkmann list_add_tail(&elem->list, &aux->poke_progs); 998da765a2fSDaniel Borkmann out: 999da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 1000da765a2fSDaniel Borkmann return ret; 1001da765a2fSDaniel Borkmann } 1002da765a2fSDaniel Borkmann 1003da765a2fSDaniel Borkmann static void prog_array_map_poke_untrack(struct bpf_map *map, 1004da765a2fSDaniel Borkmann struct bpf_prog_aux *prog_aux) 1005da765a2fSDaniel Borkmann { 1006da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 1007da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 1008da765a2fSDaniel Borkmann 1009da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1010da765a2fSDaniel Borkmann mutex_lock(&aux->poke_mutex); 1011da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1012da765a2fSDaniel Borkmann if (elem->aux == prog_aux) { 1013da765a2fSDaniel Borkmann list_del_init(&elem->list); 1014da765a2fSDaniel Borkmann kfree(elem); 1015da765a2fSDaniel Borkmann break; 1016da765a2fSDaniel Borkmann } 1017da765a2fSDaniel Borkmann } 1018da765a2fSDaniel Borkmann mutex_unlock(&aux->poke_mutex); 1019da765a2fSDaniel Borkmann } 1020da765a2fSDaniel Borkmann 1021da765a2fSDaniel Borkmann static void prog_array_map_poke_run(struct bpf_map *map, u32 key, 1022da765a2fSDaniel Borkmann struct bpf_prog *old, 1023da765a2fSDaniel Borkmann struct bpf_prog *new) 1024da765a2fSDaniel Borkmann { 1025ebf7d1f5SMaciej Fijalkowski u8 *old_addr, *new_addr, *old_bypass_addr; 1026da765a2fSDaniel Borkmann struct prog_poke_elem *elem; 1027da765a2fSDaniel Borkmann struct bpf_array_aux *aux; 1028da765a2fSDaniel Borkmann 1029da765a2fSDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1030da765a2fSDaniel Borkmann WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex)); 1031da765a2fSDaniel Borkmann 1032da765a2fSDaniel Borkmann list_for_each_entry(elem, &aux->poke_progs, list) { 1033da765a2fSDaniel Borkmann struct bpf_jit_poke_descriptor *poke; 1034da765a2fSDaniel Borkmann int i, ret; 1035da765a2fSDaniel Borkmann 1036da765a2fSDaniel Borkmann for (i = 0; i < elem->aux->size_poke_tab; i++) { 1037da765a2fSDaniel Borkmann poke = &elem->aux->poke_tab[i]; 1038da765a2fSDaniel Borkmann 1039da765a2fSDaniel Borkmann /* Few things to be aware of: 1040da765a2fSDaniel Borkmann * 1041da765a2fSDaniel Borkmann * 1) We can only ever access aux in this context, but 1042da765a2fSDaniel Borkmann * not aux->prog since it might not be stable yet and 1043da765a2fSDaniel Borkmann * there could be danger of use after free otherwise. 1044da765a2fSDaniel Borkmann * 2) Initially when we start tracking aux, the program 1045da765a2fSDaniel Borkmann * is not JITed yet and also does not have a kallsyms 1046cf71b174SMaciej Fijalkowski * entry. We skip these as poke->tailcall_target_stable 1047cf71b174SMaciej Fijalkowski * is not active yet. The JIT will do the final fixup 1048cf71b174SMaciej Fijalkowski * before setting it stable. The various 1049cf71b174SMaciej Fijalkowski * poke->tailcall_target_stable are successively 1050cf71b174SMaciej Fijalkowski * activated, so tail call updates can arrive from here 1051cf71b174SMaciej Fijalkowski * while JIT is still finishing its final fixup for 1052cf71b174SMaciej Fijalkowski * non-activated poke entries. 1053da765a2fSDaniel Borkmann * 3) On program teardown, the program's kallsym entry gets 1054da765a2fSDaniel Borkmann * removed out of RCU callback, but we can only untrack 1055da765a2fSDaniel Borkmann * from sleepable context, therefore bpf_arch_text_poke() 1056da765a2fSDaniel Borkmann * might not see that this is in BPF text section and 1057da765a2fSDaniel Borkmann * bails out with -EINVAL. As these are unreachable since 1058da765a2fSDaniel Borkmann * RCU grace period already passed, we simply skip them. 1059da765a2fSDaniel Borkmann * 4) Also programs reaching refcount of zero while patching 1060da765a2fSDaniel Borkmann * is in progress is okay since we're protected under 1061da765a2fSDaniel Borkmann * poke_mutex and untrack the programs before the JIT 1062da765a2fSDaniel Borkmann * buffer is freed. When we're still in the middle of 1063da765a2fSDaniel Borkmann * patching and suddenly kallsyms entry of the program 1064da765a2fSDaniel Borkmann * gets evicted, we just skip the rest which is fine due 1065da765a2fSDaniel Borkmann * to point 3). 1066da765a2fSDaniel Borkmann * 5) Any other error happening below from bpf_arch_text_poke() 1067da765a2fSDaniel Borkmann * is a unexpected bug. 1068da765a2fSDaniel Borkmann */ 1069cf71b174SMaciej Fijalkowski if (!READ_ONCE(poke->tailcall_target_stable)) 1070da765a2fSDaniel Borkmann continue; 1071da765a2fSDaniel Borkmann if (poke->reason != BPF_POKE_REASON_TAIL_CALL) 1072da765a2fSDaniel Borkmann continue; 1073da765a2fSDaniel Borkmann if (poke->tail_call.map != map || 1074da765a2fSDaniel Borkmann poke->tail_call.key != key) 1075da765a2fSDaniel Borkmann continue; 1076da765a2fSDaniel Borkmann 1077ebf7d1f5SMaciej Fijalkowski old_bypass_addr = old ? NULL : poke->bypass_addr; 1078ebf7d1f5SMaciej Fijalkowski old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL; 1079ebf7d1f5SMaciej Fijalkowski new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL; 1080ebf7d1f5SMaciej Fijalkowski 1081ebf7d1f5SMaciej Fijalkowski if (new) { 1082ebf7d1f5SMaciej Fijalkowski ret = bpf_arch_text_poke(poke->tailcall_target, 1083ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 1084ebf7d1f5SMaciej Fijalkowski old_addr, new_addr); 1085da765a2fSDaniel Borkmann BUG_ON(ret < 0 && ret != -EINVAL); 1086ebf7d1f5SMaciej Fijalkowski if (!old) { 1087ebf7d1f5SMaciej Fijalkowski ret = bpf_arch_text_poke(poke->tailcall_bypass, 1088ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 1089ebf7d1f5SMaciej Fijalkowski poke->bypass_addr, 1090ebf7d1f5SMaciej Fijalkowski NULL); 1091ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0 && ret != -EINVAL); 1092ebf7d1f5SMaciej Fijalkowski } 1093ebf7d1f5SMaciej Fijalkowski } else { 1094ebf7d1f5SMaciej Fijalkowski ret = bpf_arch_text_poke(poke->tailcall_bypass, 1095ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 1096ebf7d1f5SMaciej Fijalkowski old_bypass_addr, 1097ebf7d1f5SMaciej Fijalkowski poke->bypass_addr); 1098ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0 && ret != -EINVAL); 1099ebf7d1f5SMaciej Fijalkowski /* let other CPUs finish the execution of program 1100ebf7d1f5SMaciej Fijalkowski * so that it will not possible to expose them 1101ebf7d1f5SMaciej Fijalkowski * to invalid nop, stack unwind, nop state 1102ebf7d1f5SMaciej Fijalkowski */ 1103ebf7d1f5SMaciej Fijalkowski if (!ret) 1104ebf7d1f5SMaciej Fijalkowski synchronize_rcu(); 1105ebf7d1f5SMaciej Fijalkowski ret = bpf_arch_text_poke(poke->tailcall_target, 1106ebf7d1f5SMaciej Fijalkowski BPF_MOD_JUMP, 1107ebf7d1f5SMaciej Fijalkowski old_addr, NULL); 1108ebf7d1f5SMaciej Fijalkowski BUG_ON(ret < 0 && ret != -EINVAL); 1109ebf7d1f5SMaciej Fijalkowski } 1110da765a2fSDaniel Borkmann } 1111da765a2fSDaniel Borkmann } 1112da765a2fSDaniel Borkmann } 1113da765a2fSDaniel Borkmann 1114da765a2fSDaniel Borkmann static void prog_array_map_clear_deferred(struct work_struct *work) 1115da765a2fSDaniel Borkmann { 1116da765a2fSDaniel Borkmann struct bpf_map *map = container_of(work, struct bpf_array_aux, 1117da765a2fSDaniel Borkmann work)->map; 111879d93b3cSHou Tao bpf_fd_array_map_clear(map, true); 1119da765a2fSDaniel Borkmann bpf_map_put(map); 1120da765a2fSDaniel Borkmann } 1121da765a2fSDaniel Borkmann 1122da765a2fSDaniel Borkmann static void prog_array_map_clear(struct bpf_map *map) 1123da765a2fSDaniel Borkmann { 1124da765a2fSDaniel Borkmann struct bpf_array_aux *aux = container_of(map, struct bpf_array, 1125da765a2fSDaniel Borkmann map)->aux; 1126da765a2fSDaniel Borkmann bpf_map_inc(map); 1127da765a2fSDaniel Borkmann schedule_work(&aux->work); 1128da765a2fSDaniel Borkmann } 1129da765a2fSDaniel Borkmann 11302beee5f5SDaniel Borkmann static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) 11312beee5f5SDaniel Borkmann { 11322beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 11332beee5f5SDaniel Borkmann struct bpf_map *map; 11342beee5f5SDaniel Borkmann 11356d192c79SRoman Gushchin aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT); 11362beee5f5SDaniel Borkmann if (!aux) 11372beee5f5SDaniel Borkmann return ERR_PTR(-ENOMEM); 11382beee5f5SDaniel Borkmann 1139da765a2fSDaniel Borkmann INIT_WORK(&aux->work, prog_array_map_clear_deferred); 1140da765a2fSDaniel Borkmann INIT_LIST_HEAD(&aux->poke_progs); 1141da765a2fSDaniel Borkmann mutex_init(&aux->poke_mutex); 1142da765a2fSDaniel Borkmann 11432beee5f5SDaniel Borkmann map = array_map_alloc(attr); 11442beee5f5SDaniel Borkmann if (IS_ERR(map)) { 11452beee5f5SDaniel Borkmann kfree(aux); 11462beee5f5SDaniel Borkmann return map; 11472beee5f5SDaniel Borkmann } 11482beee5f5SDaniel Borkmann 11492beee5f5SDaniel Borkmann container_of(map, struct bpf_array, map)->aux = aux; 1150da765a2fSDaniel Borkmann aux->map = map; 1151da765a2fSDaniel Borkmann 11522beee5f5SDaniel Borkmann return map; 11532beee5f5SDaniel Borkmann } 11542beee5f5SDaniel Borkmann 11552beee5f5SDaniel Borkmann static void prog_array_map_free(struct bpf_map *map) 11562beee5f5SDaniel Borkmann { 1157da765a2fSDaniel Borkmann struct prog_poke_elem *elem, *tmp; 11582beee5f5SDaniel Borkmann struct bpf_array_aux *aux; 11592beee5f5SDaniel Borkmann 11602beee5f5SDaniel Borkmann aux = container_of(map, struct bpf_array, map)->aux; 1161da765a2fSDaniel Borkmann list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) { 1162da765a2fSDaniel Borkmann list_del_init(&elem->list); 1163da765a2fSDaniel Borkmann kfree(elem); 1164da765a2fSDaniel Borkmann } 11652beee5f5SDaniel Borkmann kfree(aux); 11662beee5f5SDaniel Borkmann fd_array_map_free(map); 11672beee5f5SDaniel Borkmann } 11682beee5f5SDaniel Borkmann 1169f4d05259SMartin KaFai Lau /* prog_array->aux->{type,jited} is a runtime binding. 1170f4d05259SMartin KaFai Lau * Doing static check alone in the verifier is not enough. 1171f4d05259SMartin KaFai Lau * Thus, prog_array_map cannot be used as an inner_map 1172f4d05259SMartin KaFai Lau * and map_meta_equal is not implemented. 1173f4d05259SMartin KaFai Lau */ 117440077e0cSJohannes Berg const struct bpf_map_ops prog_array_map_ops = { 1175ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 11762beee5f5SDaniel Borkmann .map_alloc = prog_array_map_alloc, 11772beee5f5SDaniel Borkmann .map_free = prog_array_map_free, 1178da765a2fSDaniel Borkmann .map_poke_track = prog_array_map_poke_track, 1179da765a2fSDaniel Borkmann .map_poke_untrack = prog_array_map_poke_untrack, 1180da765a2fSDaniel Borkmann .map_poke_run = prog_array_map_poke_run, 118104fd61abSAlexei Starovoitov .map_get_next_key = array_map_get_next_key, 11822a36f0b9SWang Nan .map_lookup_elem = fd_array_map_lookup_elem, 11832a36f0b9SWang Nan .map_delete_elem = fd_array_map_delete_elem, 11842a36f0b9SWang Nan .map_fd_get_ptr = prog_fd_array_get_ptr, 11852a36f0b9SWang Nan .map_fd_put_ptr = prog_fd_array_put_ptr, 118614dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, 1187da765a2fSDaniel Borkmann .map_release_uref = prog_array_map_clear, 1188a7c19db3SYonghong Song .map_seq_show_elem = prog_array_map_seq_show_elem, 11891746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1190c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 119104fd61abSAlexei Starovoitov }; 119204fd61abSAlexei Starovoitov 11933b1efb19SDaniel Borkmann static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, 11943b1efb19SDaniel Borkmann struct file *map_file) 1195ea317b26SKaixu Xia { 11963b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 11973b1efb19SDaniel Borkmann 1198*dc685409SHou Tao ee = kzalloc(sizeof(*ee), GFP_KERNEL); 11993b1efb19SDaniel Borkmann if (ee) { 12003b1efb19SDaniel Borkmann ee->event = perf_file->private_data; 12013b1efb19SDaniel Borkmann ee->perf_file = perf_file; 12023b1efb19SDaniel Borkmann ee->map_file = map_file; 12033b1efb19SDaniel Borkmann } 12043b1efb19SDaniel Borkmann 12053b1efb19SDaniel Borkmann return ee; 12063b1efb19SDaniel Borkmann } 12073b1efb19SDaniel Borkmann 12083b1efb19SDaniel Borkmann static void __bpf_event_entry_free(struct rcu_head *rcu) 12093b1efb19SDaniel Borkmann { 12103b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12113b1efb19SDaniel Borkmann 12123b1efb19SDaniel Borkmann ee = container_of(rcu, struct bpf_event_entry, rcu); 12133b1efb19SDaniel Borkmann fput(ee->perf_file); 12143b1efb19SDaniel Borkmann kfree(ee); 12153b1efb19SDaniel Borkmann } 12163b1efb19SDaniel Borkmann 12173b1efb19SDaniel Borkmann static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) 12183b1efb19SDaniel Borkmann { 12193b1efb19SDaniel Borkmann call_rcu(&ee->rcu, __bpf_event_entry_free); 1220ea317b26SKaixu Xia } 1221ea317b26SKaixu Xia 1222d056a788SDaniel Borkmann static void *perf_event_fd_array_get_ptr(struct bpf_map *map, 1223d056a788SDaniel Borkmann struct file *map_file, int fd) 1224ea317b26SKaixu Xia { 12253b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12263b1efb19SDaniel Borkmann struct perf_event *event; 12273b1efb19SDaniel Borkmann struct file *perf_file; 1228f91840a3SAlexei Starovoitov u64 value; 1229ea317b26SKaixu Xia 12303b1efb19SDaniel Borkmann perf_file = perf_event_get(fd); 12313b1efb19SDaniel Borkmann if (IS_ERR(perf_file)) 12323b1efb19SDaniel Borkmann return perf_file; 1233e03e7ee3SAlexei Starovoitov 1234f91840a3SAlexei Starovoitov ee = ERR_PTR(-EOPNOTSUPP); 12353b1efb19SDaniel Borkmann event = perf_file->private_data; 123697562633SYonghong Song if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP) 12373b1efb19SDaniel Borkmann goto err_out; 1238ea317b26SKaixu Xia 12393b1efb19SDaniel Borkmann ee = bpf_event_entry_gen(perf_file, map_file); 12403b1efb19SDaniel Borkmann if (ee) 12413b1efb19SDaniel Borkmann return ee; 12423b1efb19SDaniel Borkmann ee = ERR_PTR(-ENOMEM); 12433b1efb19SDaniel Borkmann err_out: 12443b1efb19SDaniel Borkmann fput(perf_file); 12453b1efb19SDaniel Borkmann return ee; 1246ea317b26SKaixu Xia } 1247ea317b26SKaixu Xia 124820c20bd1SHou Tao static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 1249ea317b26SKaixu Xia { 125020c20bd1SHou Tao /* bpf_perf_event is freed after one RCU grace period */ 12513b1efb19SDaniel Borkmann bpf_event_entry_free_rcu(ptr); 12523b1efb19SDaniel Borkmann } 12533b1efb19SDaniel Borkmann 12543b1efb19SDaniel Borkmann static void perf_event_fd_array_release(struct bpf_map *map, 12553b1efb19SDaniel Borkmann struct file *map_file) 12563b1efb19SDaniel Borkmann { 12573b1efb19SDaniel Borkmann struct bpf_array *array = container_of(map, struct bpf_array, map); 12583b1efb19SDaniel Borkmann struct bpf_event_entry *ee; 12593b1efb19SDaniel Borkmann int i; 12603b1efb19SDaniel Borkmann 1261792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS) 1262792cacccSSong Liu return; 1263792cacccSSong Liu 12643b1efb19SDaniel Borkmann rcu_read_lock(); 12653b1efb19SDaniel Borkmann for (i = 0; i < array->map.max_entries; i++) { 12663b1efb19SDaniel Borkmann ee = READ_ONCE(array->ptrs[i]); 12673b1efb19SDaniel Borkmann if (ee && ee->map_file == map_file) 126879d93b3cSHou Tao __fd_array_map_delete_elem(map, &i, true); 12693b1efb19SDaniel Borkmann } 12703b1efb19SDaniel Borkmann rcu_read_unlock(); 1271ea317b26SKaixu Xia } 1272ea317b26SKaixu Xia 1273792cacccSSong Liu static void perf_event_fd_array_map_free(struct bpf_map *map) 1274792cacccSSong Liu { 1275792cacccSSong Liu if (map->map_flags & BPF_F_PRESERVE_ELEMS) 127679d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 1277792cacccSSong Liu fd_array_map_free(map); 1278792cacccSSong Liu } 1279792cacccSSong Liu 128040077e0cSJohannes Berg const struct bpf_map_ops perf_event_array_map_ops = { 1281f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1282ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1283ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 1284792cacccSSong Liu .map_free = perf_event_fd_array_map_free, 1285ea317b26SKaixu Xia .map_get_next_key = array_map_get_next_key, 1286ea317b26SKaixu Xia .map_lookup_elem = fd_array_map_lookup_elem, 1287ea317b26SKaixu Xia .map_delete_elem = fd_array_map_delete_elem, 1288ea317b26SKaixu Xia .map_fd_get_ptr = perf_event_fd_array_get_ptr, 1289ea317b26SKaixu Xia .map_fd_put_ptr = perf_event_fd_array_put_ptr, 12903b1efb19SDaniel Borkmann .map_release = perf_event_fd_array_release, 1291e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 12921746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1293c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 1294ea317b26SKaixu Xia }; 1295ea317b26SKaixu Xia 129660d20f91SSargun Dhillon #ifdef CONFIG_CGROUPS 12974ed8ec52SMartin KaFai Lau static void *cgroup_fd_array_get_ptr(struct bpf_map *map, 12984ed8ec52SMartin KaFai Lau struct file *map_file /* not used */, 12994ed8ec52SMartin KaFai Lau int fd) 13004ed8ec52SMartin KaFai Lau { 13014ed8ec52SMartin KaFai Lau return cgroup_get_from_fd(fd); 13024ed8ec52SMartin KaFai Lau } 13034ed8ec52SMartin KaFai Lau 130420c20bd1SHou Tao static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) 13054ed8ec52SMartin KaFai Lau { 13064ed8ec52SMartin KaFai Lau /* cgroup_put free cgrp after a rcu grace period */ 13074ed8ec52SMartin KaFai Lau cgroup_put(ptr); 13084ed8ec52SMartin KaFai Lau } 13094ed8ec52SMartin KaFai Lau 13104ed8ec52SMartin KaFai Lau static void cgroup_fd_array_free(struct bpf_map *map) 13114ed8ec52SMartin KaFai Lau { 131279d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 13134ed8ec52SMartin KaFai Lau fd_array_map_free(map); 13144ed8ec52SMartin KaFai Lau } 13154ed8ec52SMartin KaFai Lau 131640077e0cSJohannes Berg const struct bpf_map_ops cgroup_array_map_ops = { 1317f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal, 1318ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 1319ad46061fSJakub Kicinski .map_alloc = array_map_alloc, 13204ed8ec52SMartin KaFai Lau .map_free = cgroup_fd_array_free, 13214ed8ec52SMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 13224ed8ec52SMartin KaFai Lau .map_lookup_elem = fd_array_map_lookup_elem, 13234ed8ec52SMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 13244ed8ec52SMartin KaFai Lau .map_fd_get_ptr = cgroup_fd_array_get_ptr, 13254ed8ec52SMartin KaFai Lau .map_fd_put_ptr = cgroup_fd_array_put_ptr, 1326e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 13271746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1328c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 13294ed8ec52SMartin KaFai Lau }; 13304ed8ec52SMartin KaFai Lau #endif 133156f668dfSMartin KaFai Lau 133256f668dfSMartin KaFai Lau static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) 133356f668dfSMartin KaFai Lau { 133456f668dfSMartin KaFai Lau struct bpf_map *map, *inner_map_meta; 133556f668dfSMartin KaFai Lau 133656f668dfSMartin KaFai Lau inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); 133756f668dfSMartin KaFai Lau if (IS_ERR(inner_map_meta)) 133856f668dfSMartin KaFai Lau return inner_map_meta; 133956f668dfSMartin KaFai Lau 1340ad46061fSJakub Kicinski map = array_map_alloc(attr); 134156f668dfSMartin KaFai Lau if (IS_ERR(map)) { 134256f668dfSMartin KaFai Lau bpf_map_meta_free(inner_map_meta); 134356f668dfSMartin KaFai Lau return map; 134456f668dfSMartin KaFai Lau } 134556f668dfSMartin KaFai Lau 134656f668dfSMartin KaFai Lau map->inner_map_meta = inner_map_meta; 134756f668dfSMartin KaFai Lau 134856f668dfSMartin KaFai Lau return map; 134956f668dfSMartin KaFai Lau } 135056f668dfSMartin KaFai Lau 135156f668dfSMartin KaFai Lau static void array_of_map_free(struct bpf_map *map) 135256f668dfSMartin KaFai Lau { 135356f668dfSMartin KaFai Lau /* map->inner_map_meta is only accessed by syscall which 135456f668dfSMartin KaFai Lau * is protected by fdget/fdput. 135556f668dfSMartin KaFai Lau */ 135656f668dfSMartin KaFai Lau bpf_map_meta_free(map->inner_map_meta); 135779d93b3cSHou Tao bpf_fd_array_map_clear(map, false); 135856f668dfSMartin KaFai Lau fd_array_map_free(map); 135956f668dfSMartin KaFai Lau } 136056f668dfSMartin KaFai Lau 136156f668dfSMartin KaFai Lau static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) 136256f668dfSMartin KaFai Lau { 136356f668dfSMartin KaFai Lau struct bpf_map **inner_map = array_map_lookup_elem(map, key); 136456f668dfSMartin KaFai Lau 136556f668dfSMartin KaFai Lau if (!inner_map) 136656f668dfSMartin KaFai Lau return NULL; 136756f668dfSMartin KaFai Lau 136856f668dfSMartin KaFai Lau return READ_ONCE(*inner_map); 136956f668dfSMartin KaFai Lau } 137056f668dfSMartin KaFai Lau 13714a8f87e6SDaniel Borkmann static int array_of_map_gen_lookup(struct bpf_map *map, 13727b0c2a05SDaniel Borkmann struct bpf_insn *insn_buf) 13737b0c2a05SDaniel Borkmann { 1374b2157399SAlexei Starovoitov struct bpf_array *array = container_of(map, struct bpf_array, map); 1375d937bc34SAndrii Nakryiko u32 elem_size = array->elem_size; 13767b0c2a05SDaniel Borkmann struct bpf_insn *insn = insn_buf; 13777b0c2a05SDaniel Borkmann const int ret = BPF_REG_0; 13787b0c2a05SDaniel Borkmann const int map_ptr = BPF_REG_1; 13797b0c2a05SDaniel Borkmann const int index = BPF_REG_2; 13807b0c2a05SDaniel Borkmann 13817b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); 13827b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); 13832c78ee89SAlexei Starovoitov if (!map->bypass_spec_v1) { 1384b2157399SAlexei Starovoitov *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); 1385b2157399SAlexei Starovoitov *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); 1386b2157399SAlexei Starovoitov } else { 13877b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); 1388b2157399SAlexei Starovoitov } 13897b0c2a05SDaniel Borkmann if (is_power_of_2(elem_size)) 13907b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); 13917b0c2a05SDaniel Borkmann else 13927b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); 13937b0c2a05SDaniel Borkmann *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); 13947b0c2a05SDaniel Borkmann *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); 13957b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); 13967b0c2a05SDaniel Borkmann *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); 13977b0c2a05SDaniel Borkmann *insn++ = BPF_MOV64_IMM(ret, 0); 13987b0c2a05SDaniel Borkmann 13997b0c2a05SDaniel Borkmann return insn - insn_buf; 14007b0c2a05SDaniel Borkmann } 14017b0c2a05SDaniel Borkmann 140240077e0cSJohannes Berg const struct bpf_map_ops array_of_maps_map_ops = { 1403ad46061fSJakub Kicinski .map_alloc_check = fd_array_map_alloc_check, 140456f668dfSMartin KaFai Lau .map_alloc = array_of_map_alloc, 140556f668dfSMartin KaFai Lau .map_free = array_of_map_free, 140656f668dfSMartin KaFai Lau .map_get_next_key = array_map_get_next_key, 140756f668dfSMartin KaFai Lau .map_lookup_elem = array_of_map_lookup_elem, 140856f668dfSMartin KaFai Lau .map_delete_elem = fd_array_map_delete_elem, 140956f668dfSMartin KaFai Lau .map_fd_get_ptr = bpf_map_fd_get_ptr, 141056f668dfSMartin KaFai Lau .map_fd_put_ptr = bpf_map_fd_put_ptr, 141114dc6f04SMartin KaFai Lau .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, 14127b0c2a05SDaniel Borkmann .map_gen_lookup = array_of_map_gen_lookup, 14139263dddcSTakshak Chahande .map_lookup_batch = generic_map_lookup_batch, 14149263dddcSTakshak Chahande .map_update_batch = generic_map_update_batch, 1415e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 14161746d055SYafang Shao .map_mem_usage = array_map_mem_usage, 1417c317ab71SMenglong Dong .map_btf_id = &array_map_btf_ids[0], 141856f668dfSMartin KaFai Lau }; 1419