125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2d5a3b1f6SAlexei Starovoitov /* Copyright (c) 2016 Facebook 3d5a3b1f6SAlexei Starovoitov */ 4d5a3b1f6SAlexei Starovoitov #include <linux/bpf.h> 5d5a3b1f6SAlexei Starovoitov #include <linux/jhash.h> 6d5a3b1f6SAlexei Starovoitov #include <linux/filter.h> 7d5a3b1f6SAlexei Starovoitov #include <linux/stacktrace.h> 8d5a3b1f6SAlexei Starovoitov #include <linux/perf_event.h> 9615755a7SSong Liu #include <linux/elf.h> 10615755a7SSong Liu #include <linux/pagemap.h> 11bae77c5eSSong Liu #include <linux/irq_work.h> 12557c0c6eSAlexei Starovoitov #include "percpu_freelist.h" 13d5a3b1f6SAlexei Starovoitov 146e71b04aSChenbo Feng #define STACK_CREATE_FLAG_MASK \ 15615755a7SSong Liu (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ 16615755a7SSong Liu BPF_F_STACK_BUILD_ID) 176e71b04aSChenbo Feng 18d5a3b1f6SAlexei Starovoitov struct stack_map_bucket { 19557c0c6eSAlexei Starovoitov struct pcpu_freelist_node fnode; 20d5a3b1f6SAlexei Starovoitov u32 hash; 21d5a3b1f6SAlexei Starovoitov u32 nr; 22615755a7SSong Liu u64 data[]; 23d5a3b1f6SAlexei Starovoitov }; 24d5a3b1f6SAlexei Starovoitov 25d5a3b1f6SAlexei Starovoitov struct bpf_stack_map { 26d5a3b1f6SAlexei Starovoitov struct bpf_map map; 27557c0c6eSAlexei Starovoitov void *elems; 28557c0c6eSAlexei Starovoitov struct pcpu_freelist freelist; 29d5a3b1f6SAlexei Starovoitov u32 n_buckets; 30557c0c6eSAlexei Starovoitov struct stack_map_bucket *buckets[]; 31d5a3b1f6SAlexei Starovoitov }; 32d5a3b1f6SAlexei Starovoitov 33bae77c5eSSong Liu /* irq_work to run up_read() for build_id lookup in nmi context */ 34bae77c5eSSong Liu struct stack_map_irq_work { 35bae77c5eSSong Liu struct irq_work irq_work; 36bae77c5eSSong Liu struct rw_semaphore *sem; 37bae77c5eSSong Liu }; 38bae77c5eSSong Liu 39bae77c5eSSong Liu static void do_up_read(struct irq_work *entry) 40bae77c5eSSong Liu { 41bae77c5eSSong Liu struct stack_map_irq_work *work; 42bae77c5eSSong Liu 43bae77c5eSSong Liu work = container_of(entry, struct stack_map_irq_work, irq_work); 443defaf2fSAlexei Starovoitov up_read_non_owner(work->sem); 45bae77c5eSSong Liu work->sem = NULL; 46bae77c5eSSong Liu } 47bae77c5eSSong Liu 48bae77c5eSSong Liu static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work); 49bae77c5eSSong Liu 50615755a7SSong Liu static inline bool stack_map_use_build_id(struct bpf_map *map) 51615755a7SSong Liu { 52615755a7SSong Liu return (map->map_flags & BPF_F_STACK_BUILD_ID); 53615755a7SSong Liu } 54615755a7SSong Liu 55615755a7SSong Liu static inline int stack_map_data_size(struct bpf_map *map) 56615755a7SSong Liu { 57615755a7SSong Liu return stack_map_use_build_id(map) ? 58615755a7SSong Liu sizeof(struct bpf_stack_build_id) : sizeof(u64); 59615755a7SSong Liu } 60615755a7SSong Liu 61557c0c6eSAlexei Starovoitov static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) 62557c0c6eSAlexei Starovoitov { 63557c0c6eSAlexei Starovoitov u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; 64557c0c6eSAlexei Starovoitov int err; 65557c0c6eSAlexei Starovoitov 6696eabe7aSMartin KaFai Lau smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, 6796eabe7aSMartin KaFai Lau smap->map.numa_node); 68557c0c6eSAlexei Starovoitov if (!smap->elems) 69557c0c6eSAlexei Starovoitov return -ENOMEM; 70557c0c6eSAlexei Starovoitov 71557c0c6eSAlexei Starovoitov err = pcpu_freelist_init(&smap->freelist); 72557c0c6eSAlexei Starovoitov if (err) 73557c0c6eSAlexei Starovoitov goto free_elems; 74557c0c6eSAlexei Starovoitov 75557c0c6eSAlexei Starovoitov pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, 76557c0c6eSAlexei Starovoitov smap->map.max_entries); 77557c0c6eSAlexei Starovoitov return 0; 78557c0c6eSAlexei Starovoitov 79557c0c6eSAlexei Starovoitov free_elems: 80d407bd25SDaniel Borkmann bpf_map_area_free(smap->elems); 81557c0c6eSAlexei Starovoitov return err; 82557c0c6eSAlexei Starovoitov } 83557c0c6eSAlexei Starovoitov 84d5a3b1f6SAlexei Starovoitov /* Called from syscall */ 85d5a3b1f6SAlexei Starovoitov static struct bpf_map *stack_map_alloc(union bpf_attr *attr) 86d5a3b1f6SAlexei Starovoitov { 87d5a3b1f6SAlexei Starovoitov u32 value_size = attr->value_size; 88d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap; 89b936ca64SRoman Gushchin struct bpf_map_memory mem; 90d5a3b1f6SAlexei Starovoitov u64 cost, n_buckets; 91d5a3b1f6SAlexei Starovoitov int err; 92d5a3b1f6SAlexei Starovoitov 93d5a3b1f6SAlexei Starovoitov if (!capable(CAP_SYS_ADMIN)) 94d5a3b1f6SAlexei Starovoitov return ERR_PTR(-EPERM); 95d5a3b1f6SAlexei Starovoitov 966e71b04aSChenbo Feng if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) 97823707b6SAlexei Starovoitov return ERR_PTR(-EINVAL); 98823707b6SAlexei Starovoitov 99d5a3b1f6SAlexei Starovoitov /* check sanity of attributes */ 100d5a3b1f6SAlexei Starovoitov if (attr->max_entries == 0 || attr->key_size != 4 || 101615755a7SSong Liu value_size < 8 || value_size % 8) 102615755a7SSong Liu return ERR_PTR(-EINVAL); 103615755a7SSong Liu 104615755a7SSong Liu BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); 105615755a7SSong Liu if (attr->map_flags & BPF_F_STACK_BUILD_ID) { 106615755a7SSong Liu if (value_size % sizeof(struct bpf_stack_build_id) || 107615755a7SSong Liu value_size / sizeof(struct bpf_stack_build_id) 108615755a7SSong Liu > sysctl_perf_event_max_stack) 109615755a7SSong Liu return ERR_PTR(-EINVAL); 110615755a7SSong Liu } else if (value_size / 8 > sysctl_perf_event_max_stack) 111d5a3b1f6SAlexei Starovoitov return ERR_PTR(-EINVAL); 112d5a3b1f6SAlexei Starovoitov 113d5a3b1f6SAlexei Starovoitov /* hash table size must be power of 2 */ 114d5a3b1f6SAlexei Starovoitov n_buckets = roundup_pow_of_two(attr->max_entries); 115d5a3b1f6SAlexei Starovoitov 116d5a3b1f6SAlexei Starovoitov cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); 117d5a3b1f6SAlexei Starovoitov cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 118c85d6913SRoman Gushchin err = bpf_map_charge_init(&mem, cost); 119b936ca64SRoman Gushchin if (err) 120b936ca64SRoman Gushchin return ERR_PTR(err); 121b936ca64SRoman Gushchin 122b936ca64SRoman Gushchin smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); 123b936ca64SRoman Gushchin if (!smap) { 124b936ca64SRoman Gushchin bpf_map_charge_finish(&mem); 125b936ca64SRoman Gushchin return ERR_PTR(-ENOMEM); 126b936ca64SRoman Gushchin } 127d5a3b1f6SAlexei Starovoitov 128bd475643SJakub Kicinski bpf_map_init_from_attr(&smap->map, attr); 129d5a3b1f6SAlexei Starovoitov smap->map.value_size = value_size; 130d5a3b1f6SAlexei Starovoitov smap->n_buckets = n_buckets; 131557c0c6eSAlexei Starovoitov 13297c79a38SArnaldo Carvalho de Melo err = get_callchain_buffers(sysctl_perf_event_max_stack); 133d5a3b1f6SAlexei Starovoitov if (err) 134b936ca64SRoman Gushchin goto free_charge; 135d5a3b1f6SAlexei Starovoitov 136557c0c6eSAlexei Starovoitov err = prealloc_elems_and_freelist(smap); 137557c0c6eSAlexei Starovoitov if (err) 138557c0c6eSAlexei Starovoitov goto put_buffers; 139557c0c6eSAlexei Starovoitov 140b936ca64SRoman Gushchin bpf_map_charge_move(&smap->map.memory, &mem); 141b936ca64SRoman Gushchin 142d5a3b1f6SAlexei Starovoitov return &smap->map; 143d5a3b1f6SAlexei Starovoitov 144557c0c6eSAlexei Starovoitov put_buffers: 145557c0c6eSAlexei Starovoitov put_callchain_buffers(); 146b936ca64SRoman Gushchin free_charge: 147b936ca64SRoman Gushchin bpf_map_charge_finish(&mem); 148d407bd25SDaniel Borkmann bpf_map_area_free(smap); 149d5a3b1f6SAlexei Starovoitov return ERR_PTR(err); 150d5a3b1f6SAlexei Starovoitov } 151d5a3b1f6SAlexei Starovoitov 152615755a7SSong Liu #define BPF_BUILD_ID 3 153615755a7SSong Liu /* 154615755a7SSong Liu * Parse build id from the note segment. This logic can be shared between 155615755a7SSong Liu * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are 156615755a7SSong Liu * identical. 157615755a7SSong Liu */ 158615755a7SSong Liu static inline int stack_map_parse_build_id(void *page_addr, 159615755a7SSong Liu unsigned char *build_id, 160615755a7SSong Liu void *note_start, 161615755a7SSong Liu Elf32_Word note_size) 162615755a7SSong Liu { 163615755a7SSong Liu Elf32_Word note_offs = 0, new_offs; 164615755a7SSong Liu 165615755a7SSong Liu /* check for overflow */ 166615755a7SSong Liu if (note_start < page_addr || note_start + note_size < note_start) 167615755a7SSong Liu return -EINVAL; 168615755a7SSong Liu 169615755a7SSong Liu /* only supports note that fits in the first page */ 170615755a7SSong Liu if (note_start + note_size > page_addr + PAGE_SIZE) 171615755a7SSong Liu return -EINVAL; 172615755a7SSong Liu 173615755a7SSong Liu while (note_offs + sizeof(Elf32_Nhdr) < note_size) { 174615755a7SSong Liu Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); 175615755a7SSong Liu 176615755a7SSong Liu if (nhdr->n_type == BPF_BUILD_ID && 177615755a7SSong Liu nhdr->n_namesz == sizeof("GNU") && 1780b698005SStanislav Fomichev nhdr->n_descsz > 0 && 1790b698005SStanislav Fomichev nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { 180615755a7SSong Liu memcpy(build_id, 181615755a7SSong Liu note_start + note_offs + 182615755a7SSong Liu ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), 1830b698005SStanislav Fomichev nhdr->n_descsz); 1840b698005SStanislav Fomichev memset(build_id + nhdr->n_descsz, 0, 1850b698005SStanislav Fomichev BPF_BUILD_ID_SIZE - nhdr->n_descsz); 186615755a7SSong Liu return 0; 187615755a7SSong Liu } 188615755a7SSong Liu new_offs = note_offs + sizeof(Elf32_Nhdr) + 189615755a7SSong Liu ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); 190615755a7SSong Liu if (new_offs <= note_offs) /* overflow */ 191615755a7SSong Liu break; 192615755a7SSong Liu note_offs = new_offs; 193615755a7SSong Liu } 194615755a7SSong Liu return -EINVAL; 195615755a7SSong Liu } 196615755a7SSong Liu 197615755a7SSong Liu /* Parse build ID from 32-bit ELF */ 198615755a7SSong Liu static int stack_map_get_build_id_32(void *page_addr, 199615755a7SSong Liu unsigned char *build_id) 200615755a7SSong Liu { 201615755a7SSong Liu Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; 202615755a7SSong Liu Elf32_Phdr *phdr; 203615755a7SSong Liu int i; 204615755a7SSong Liu 205615755a7SSong Liu /* only supports phdr that fits in one page */ 206615755a7SSong Liu if (ehdr->e_phnum > 207615755a7SSong Liu (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) 208615755a7SSong Liu return -EINVAL; 209615755a7SSong Liu 210615755a7SSong Liu phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); 211615755a7SSong Liu 212615755a7SSong Liu for (i = 0; i < ehdr->e_phnum; ++i) 213615755a7SSong Liu if (phdr[i].p_type == PT_NOTE) 214615755a7SSong Liu return stack_map_parse_build_id(page_addr, build_id, 215615755a7SSong Liu page_addr + phdr[i].p_offset, 216615755a7SSong Liu phdr[i].p_filesz); 217615755a7SSong Liu return -EINVAL; 218615755a7SSong Liu } 219615755a7SSong Liu 220615755a7SSong Liu /* Parse build ID from 64-bit ELF */ 221615755a7SSong Liu static int stack_map_get_build_id_64(void *page_addr, 222615755a7SSong Liu unsigned char *build_id) 223615755a7SSong Liu { 224615755a7SSong Liu Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; 225615755a7SSong Liu Elf64_Phdr *phdr; 226615755a7SSong Liu int i; 227615755a7SSong Liu 228615755a7SSong Liu /* only supports phdr that fits in one page */ 229615755a7SSong Liu if (ehdr->e_phnum > 230615755a7SSong Liu (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) 231615755a7SSong Liu return -EINVAL; 232615755a7SSong Liu 233615755a7SSong Liu phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); 234615755a7SSong Liu 235615755a7SSong Liu for (i = 0; i < ehdr->e_phnum; ++i) 236615755a7SSong Liu if (phdr[i].p_type == PT_NOTE) 237615755a7SSong Liu return stack_map_parse_build_id(page_addr, build_id, 238615755a7SSong Liu page_addr + phdr[i].p_offset, 239615755a7SSong Liu phdr[i].p_filesz); 240615755a7SSong Liu return -EINVAL; 241615755a7SSong Liu } 242615755a7SSong Liu 243615755a7SSong Liu /* Parse build ID of ELF file mapped to vma */ 244615755a7SSong Liu static int stack_map_get_build_id(struct vm_area_struct *vma, 245615755a7SSong Liu unsigned char *build_id) 246615755a7SSong Liu { 247615755a7SSong Liu Elf32_Ehdr *ehdr; 248615755a7SSong Liu struct page *page; 249615755a7SSong Liu void *page_addr; 250615755a7SSong Liu int ret; 251615755a7SSong Liu 252615755a7SSong Liu /* only works for page backed storage */ 253615755a7SSong Liu if (!vma->vm_file) 254615755a7SSong Liu return -EINVAL; 255615755a7SSong Liu 256615755a7SSong Liu page = find_get_page(vma->vm_file->f_mapping, 0); 257615755a7SSong Liu if (!page) 258615755a7SSong Liu return -EFAULT; /* page not mapped */ 259615755a7SSong Liu 260615755a7SSong Liu ret = -EINVAL; 261beaf3d19SSong Liu page_addr = kmap_atomic(page); 262615755a7SSong Liu ehdr = (Elf32_Ehdr *)page_addr; 263615755a7SSong Liu 264615755a7SSong Liu /* compare magic x7f "ELF" */ 265615755a7SSong Liu if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) 266615755a7SSong Liu goto out; 267615755a7SSong Liu 268615755a7SSong Liu /* only support executable file and shared object file */ 269615755a7SSong Liu if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) 270615755a7SSong Liu goto out; 271615755a7SSong Liu 272615755a7SSong Liu if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) 273615755a7SSong Liu ret = stack_map_get_build_id_32(page_addr, build_id); 274615755a7SSong Liu else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) 275615755a7SSong Liu ret = stack_map_get_build_id_64(page_addr, build_id); 276615755a7SSong Liu out: 277beaf3d19SSong Liu kunmap_atomic(page_addr); 278615755a7SSong Liu put_page(page); 279615755a7SSong Liu return ret; 280615755a7SSong Liu } 281615755a7SSong Liu 2825f412632SYonghong Song static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, 283615755a7SSong Liu u64 *ips, u32 trace_nr, bool user) 284615755a7SSong Liu { 285615755a7SSong Liu int i; 286615755a7SSong Liu struct vm_area_struct *vma; 287bae77c5eSSong Liu bool irq_work_busy = false; 288dc3b8ae9SArnd Bergmann struct stack_map_irq_work *work = NULL; 289bae77c5eSSong Liu 290dc3b8ae9SArnd Bergmann if (in_nmi()) { 291bae77c5eSSong Liu work = this_cpu_ptr(&up_read_work); 292bae77c5eSSong Liu if (work->irq_work.flags & IRQ_WORK_BUSY) 293bae77c5eSSong Liu /* cannot queue more up_read, fallback */ 294bae77c5eSSong Liu irq_work_busy = true; 295bae77c5eSSong Liu } 296615755a7SSong Liu 297615755a7SSong Liu /* 298bae77c5eSSong Liu * We cannot do up_read() in nmi context. To do build_id lookup 299bae77c5eSSong Liu * in nmi context, we need to run up_read() in irq_work. We use 300bae77c5eSSong Liu * a percpu variable to do the irq_work. If the irq_work is 301bae77c5eSSong Liu * already used by another lookup, we fall back to report ips. 302615755a7SSong Liu * 303615755a7SSong Liu * Same fallback is used for kernel stack (!user) on a stackmap 304615755a7SSong Liu * with build_id. 305615755a7SSong Liu */ 306bae77c5eSSong Liu if (!user || !current || !current->mm || irq_work_busy || 307615755a7SSong Liu down_read_trylock(¤t->mm->mmap_sem) == 0) { 308615755a7SSong Liu /* cannot access current->mm, fall back to ips */ 309615755a7SSong Liu for (i = 0; i < trace_nr; i++) { 310615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_IP; 311615755a7SSong Liu id_offs[i].ip = ips[i]; 3124af396aeSStanislav Fomichev memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 313615755a7SSong Liu } 314615755a7SSong Liu return; 315615755a7SSong Liu } 316615755a7SSong Liu 317615755a7SSong Liu for (i = 0; i < trace_nr; i++) { 318615755a7SSong Liu vma = find_vma(current->mm, ips[i]); 319615755a7SSong Liu if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) { 320615755a7SSong Liu /* per entry fall back to ips */ 321615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_IP; 322615755a7SSong Liu id_offs[i].ip = ips[i]; 3234af396aeSStanislav Fomichev memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); 324615755a7SSong Liu continue; 325615755a7SSong Liu } 326615755a7SSong Liu id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] 327615755a7SSong Liu - vma->vm_start; 328615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_VALID; 329615755a7SSong Liu } 330bae77c5eSSong Liu 331dc3b8ae9SArnd Bergmann if (!work) { 332615755a7SSong Liu up_read(¤t->mm->mmap_sem); 333bae77c5eSSong Liu } else { 334bae77c5eSSong Liu work->sem = ¤t->mm->mmap_sem; 335bae77c5eSSong Liu irq_work_queue(&work->irq_work); 3363defaf2fSAlexei Starovoitov /* 3373defaf2fSAlexei Starovoitov * The irq_work will release the mmap_sem with 3383defaf2fSAlexei Starovoitov * up_read_non_owner(). The rwsem_release() is called 3393defaf2fSAlexei Starovoitov * here to release the lock from lockdep's perspective. 3403defaf2fSAlexei Starovoitov */ 341*5facae4fSQian Cai rwsem_release(¤t->mm->mmap_sem.dep_map, _RET_IP_); 342bae77c5eSSong Liu } 343615755a7SSong Liu } 344615755a7SSong Liu 345f3694e00SDaniel Borkmann BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, 346f3694e00SDaniel Borkmann u64, flags) 347d5a3b1f6SAlexei Starovoitov { 348d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 349d5a3b1f6SAlexei Starovoitov struct perf_callchain_entry *trace; 350d5a3b1f6SAlexei Starovoitov struct stack_map_bucket *bucket, *new_bucket, *old_bucket; 351615755a7SSong Liu u32 max_depth = map->value_size / stack_map_data_size(map); 352c5dfd78eSArnaldo Carvalho de Melo /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ 353c5dfd78eSArnaldo Carvalho de Melo u32 init_nr = sysctl_perf_event_max_stack - max_depth; 354d5a3b1f6SAlexei Starovoitov u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 355d5a3b1f6SAlexei Starovoitov u32 hash, id, trace_nr, trace_len; 356d5a3b1f6SAlexei Starovoitov bool user = flags & BPF_F_USER_STACK; 357d5a3b1f6SAlexei Starovoitov bool kernel = !user; 358d5a3b1f6SAlexei Starovoitov u64 *ips; 359615755a7SSong Liu bool hash_matches; 360d5a3b1f6SAlexei Starovoitov 361d5a3b1f6SAlexei Starovoitov if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 362d5a3b1f6SAlexei Starovoitov BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 363d5a3b1f6SAlexei Starovoitov return -EINVAL; 364d5a3b1f6SAlexei Starovoitov 365cfbcf468SArnaldo Carvalho de Melo trace = get_perf_callchain(regs, init_nr, kernel, user, 366cfbcf468SArnaldo Carvalho de Melo sysctl_perf_event_max_stack, false, false); 367d5a3b1f6SAlexei Starovoitov 368d5a3b1f6SAlexei Starovoitov if (unlikely(!trace)) 369d5a3b1f6SAlexei Starovoitov /* couldn't fetch the stack trace */ 370d5a3b1f6SAlexei Starovoitov return -EFAULT; 371d5a3b1f6SAlexei Starovoitov 372d5a3b1f6SAlexei Starovoitov /* get_perf_callchain() guarantees that trace->nr >= init_nr 373c5dfd78eSArnaldo Carvalho de Melo * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth 374d5a3b1f6SAlexei Starovoitov */ 375d5a3b1f6SAlexei Starovoitov trace_nr = trace->nr - init_nr; 376d5a3b1f6SAlexei Starovoitov 377d5a3b1f6SAlexei Starovoitov if (trace_nr <= skip) 378d5a3b1f6SAlexei Starovoitov /* skipping more than usable stack trace */ 379d5a3b1f6SAlexei Starovoitov return -EFAULT; 380d5a3b1f6SAlexei Starovoitov 381d5a3b1f6SAlexei Starovoitov trace_nr -= skip; 382d5a3b1f6SAlexei Starovoitov trace_len = trace_nr * sizeof(u64); 383d5a3b1f6SAlexei Starovoitov ips = trace->ip + skip + init_nr; 384d5a3b1f6SAlexei Starovoitov hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); 385d5a3b1f6SAlexei Starovoitov id = hash & (smap->n_buckets - 1); 386557c0c6eSAlexei Starovoitov bucket = READ_ONCE(smap->buckets[id]); 387d5a3b1f6SAlexei Starovoitov 388615755a7SSong Liu hash_matches = bucket && bucket->hash == hash; 389615755a7SSong Liu /* fast cmp */ 390615755a7SSong Liu if (hash_matches && flags & BPF_F_FAST_STACK_CMP) 391d5a3b1f6SAlexei Starovoitov return id; 392615755a7SSong Liu 393615755a7SSong Liu if (stack_map_use_build_id(map)) { 394615755a7SSong Liu /* for build_id+offset, pop a bucket before slow cmp */ 395615755a7SSong Liu new_bucket = (struct stack_map_bucket *) 396615755a7SSong Liu pcpu_freelist_pop(&smap->freelist); 397615755a7SSong Liu if (unlikely(!new_bucket)) 398615755a7SSong Liu return -ENOMEM; 3995f412632SYonghong Song new_bucket->nr = trace_nr; 4005f412632SYonghong Song stack_map_get_build_id_offset( 4015f412632SYonghong Song (struct bpf_stack_build_id *)new_bucket->data, 4025f412632SYonghong Song ips, trace_nr, user); 403615755a7SSong Liu trace_len = trace_nr * sizeof(struct bpf_stack_build_id); 404615755a7SSong Liu if (hash_matches && bucket->nr == trace_nr && 405615755a7SSong Liu memcmp(bucket->data, new_bucket->data, trace_len) == 0) { 406615755a7SSong Liu pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 407d5a3b1f6SAlexei Starovoitov return id; 408d5a3b1f6SAlexei Starovoitov } 409615755a7SSong Liu if (bucket && !(flags & BPF_F_REUSE_STACKID)) { 410615755a7SSong Liu pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); 411615755a7SSong Liu return -EEXIST; 412615755a7SSong Liu } 413615755a7SSong Liu } else { 414615755a7SSong Liu if (hash_matches && bucket->nr == trace_nr && 415615755a7SSong Liu memcmp(bucket->data, ips, trace_len) == 0) 416615755a7SSong Liu return id; 417d5a3b1f6SAlexei Starovoitov if (bucket && !(flags & BPF_F_REUSE_STACKID)) 418d5a3b1f6SAlexei Starovoitov return -EEXIST; 419d5a3b1f6SAlexei Starovoitov 420557c0c6eSAlexei Starovoitov new_bucket = (struct stack_map_bucket *) 421557c0c6eSAlexei Starovoitov pcpu_freelist_pop(&smap->freelist); 422d5a3b1f6SAlexei Starovoitov if (unlikely(!new_bucket)) 423d5a3b1f6SAlexei Starovoitov return -ENOMEM; 424615755a7SSong Liu memcpy(new_bucket->data, ips, trace_len); 425615755a7SSong Liu } 426d5a3b1f6SAlexei Starovoitov 427d5a3b1f6SAlexei Starovoitov new_bucket->hash = hash; 428d5a3b1f6SAlexei Starovoitov new_bucket->nr = trace_nr; 429d5a3b1f6SAlexei Starovoitov 430d5a3b1f6SAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], new_bucket); 431d5a3b1f6SAlexei Starovoitov if (old_bucket) 432557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 433d5a3b1f6SAlexei Starovoitov return id; 434d5a3b1f6SAlexei Starovoitov } 435d5a3b1f6SAlexei Starovoitov 436d5a3b1f6SAlexei Starovoitov const struct bpf_func_proto bpf_get_stackid_proto = { 437d5a3b1f6SAlexei Starovoitov .func = bpf_get_stackid, 438d5a3b1f6SAlexei Starovoitov .gpl_only = true, 439d5a3b1f6SAlexei Starovoitov .ret_type = RET_INTEGER, 440d5a3b1f6SAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX, 441d5a3b1f6SAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR, 442d5a3b1f6SAlexei Starovoitov .arg3_type = ARG_ANYTHING, 443d5a3b1f6SAlexei Starovoitov }; 444d5a3b1f6SAlexei Starovoitov 445c195651eSYonghong Song BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, 446c195651eSYonghong Song u64, flags) 447c195651eSYonghong Song { 448c195651eSYonghong Song u32 init_nr, trace_nr, copy_len, elem_size, num_elem; 449c195651eSYonghong Song bool user_build_id = flags & BPF_F_USER_BUILD_ID; 450c195651eSYonghong Song u32 skip = flags & BPF_F_SKIP_FIELD_MASK; 451c195651eSYonghong Song bool user = flags & BPF_F_USER_STACK; 452c195651eSYonghong Song struct perf_callchain_entry *trace; 453c195651eSYonghong Song bool kernel = !user; 454c195651eSYonghong Song int err = -EINVAL; 455c195651eSYonghong Song u64 *ips; 456c195651eSYonghong Song 457c195651eSYonghong Song if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | 458c195651eSYonghong Song BPF_F_USER_BUILD_ID))) 459c195651eSYonghong Song goto clear; 460c195651eSYonghong Song if (kernel && user_build_id) 461c195651eSYonghong Song goto clear; 462c195651eSYonghong Song 463c195651eSYonghong Song elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) 464c195651eSYonghong Song : sizeof(u64); 465c195651eSYonghong Song if (unlikely(size % elem_size)) 466c195651eSYonghong Song goto clear; 467c195651eSYonghong Song 468c195651eSYonghong Song num_elem = size / elem_size; 469c195651eSYonghong Song if (sysctl_perf_event_max_stack < num_elem) 470c195651eSYonghong Song init_nr = 0; 471c195651eSYonghong Song else 472c195651eSYonghong Song init_nr = sysctl_perf_event_max_stack - num_elem; 473c195651eSYonghong Song trace = get_perf_callchain(regs, init_nr, kernel, user, 474c195651eSYonghong Song sysctl_perf_event_max_stack, false, false); 475c195651eSYonghong Song if (unlikely(!trace)) 476c195651eSYonghong Song goto err_fault; 477c195651eSYonghong Song 478c195651eSYonghong Song trace_nr = trace->nr - init_nr; 479c195651eSYonghong Song if (trace_nr < skip) 480c195651eSYonghong Song goto err_fault; 481c195651eSYonghong Song 482c195651eSYonghong Song trace_nr -= skip; 483c195651eSYonghong Song trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; 484c195651eSYonghong Song copy_len = trace_nr * elem_size; 485c195651eSYonghong Song ips = trace->ip + skip + init_nr; 486c195651eSYonghong Song if (user && user_build_id) 487c195651eSYonghong Song stack_map_get_build_id_offset(buf, ips, trace_nr, user); 488c195651eSYonghong Song else 489c195651eSYonghong Song memcpy(buf, ips, copy_len); 490c195651eSYonghong Song 491c195651eSYonghong Song if (size > copy_len) 492c195651eSYonghong Song memset(buf + copy_len, 0, size - copy_len); 493c195651eSYonghong Song return copy_len; 494c195651eSYonghong Song 495c195651eSYonghong Song err_fault: 496c195651eSYonghong Song err = -EFAULT; 497c195651eSYonghong Song clear: 498c195651eSYonghong Song memset(buf, 0, size); 499c195651eSYonghong Song return err; 500c195651eSYonghong Song } 501c195651eSYonghong Song 502c195651eSYonghong Song const struct bpf_func_proto bpf_get_stack_proto = { 503c195651eSYonghong Song .func = bpf_get_stack, 504c195651eSYonghong Song .gpl_only = true, 505c195651eSYonghong Song .ret_type = RET_INTEGER, 506c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX, 507c195651eSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM, 508c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO, 509c195651eSYonghong Song .arg4_type = ARG_ANYTHING, 510c195651eSYonghong Song }; 511c195651eSYonghong Song 512557c0c6eSAlexei Starovoitov /* Called from eBPF program */ 513d5a3b1f6SAlexei Starovoitov static void *stack_map_lookup_elem(struct bpf_map *map, void *key) 514d5a3b1f6SAlexei Starovoitov { 5153b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP); 516557c0c6eSAlexei Starovoitov } 517557c0c6eSAlexei Starovoitov 518557c0c6eSAlexei Starovoitov /* Called from syscall */ 519557c0c6eSAlexei Starovoitov int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 520557c0c6eSAlexei Starovoitov { 521d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 522557c0c6eSAlexei Starovoitov struct stack_map_bucket *bucket, *old_bucket; 523557c0c6eSAlexei Starovoitov u32 id = *(u32 *)key, trace_len; 524d5a3b1f6SAlexei Starovoitov 525d5a3b1f6SAlexei Starovoitov if (unlikely(id >= smap->n_buckets)) 526557c0c6eSAlexei Starovoitov return -ENOENT; 527557c0c6eSAlexei Starovoitov 528557c0c6eSAlexei Starovoitov bucket = xchg(&smap->buckets[id], NULL); 529557c0c6eSAlexei Starovoitov if (!bucket) 530557c0c6eSAlexei Starovoitov return -ENOENT; 531557c0c6eSAlexei Starovoitov 532615755a7SSong Liu trace_len = bucket->nr * stack_map_data_size(map); 533615755a7SSong Liu memcpy(value, bucket->data, trace_len); 534557c0c6eSAlexei Starovoitov memset(value + trace_len, 0, map->value_size - trace_len); 535557c0c6eSAlexei Starovoitov 536557c0c6eSAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], bucket); 537557c0c6eSAlexei Starovoitov if (old_bucket) 538557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 539557c0c6eSAlexei Starovoitov return 0; 540d5a3b1f6SAlexei Starovoitov } 541d5a3b1f6SAlexei Starovoitov 54216f07c55SYonghong Song static int stack_map_get_next_key(struct bpf_map *map, void *key, 54316f07c55SYonghong Song void *next_key) 544d5a3b1f6SAlexei Starovoitov { 54516f07c55SYonghong Song struct bpf_stack_map *smap = container_of(map, 54616f07c55SYonghong Song struct bpf_stack_map, map); 54716f07c55SYonghong Song u32 id; 54816f07c55SYonghong Song 54916f07c55SYonghong Song WARN_ON_ONCE(!rcu_read_lock_held()); 55016f07c55SYonghong Song 55116f07c55SYonghong Song if (!key) { 55216f07c55SYonghong Song id = 0; 55316f07c55SYonghong Song } else { 55416f07c55SYonghong Song id = *(u32 *)key; 55516f07c55SYonghong Song if (id >= smap->n_buckets || !smap->buckets[id]) 55616f07c55SYonghong Song id = 0; 55716f07c55SYonghong Song else 55816f07c55SYonghong Song id++; 55916f07c55SYonghong Song } 56016f07c55SYonghong Song 56116f07c55SYonghong Song while (id < smap->n_buckets && !smap->buckets[id]) 56216f07c55SYonghong Song id++; 56316f07c55SYonghong Song 56416f07c55SYonghong Song if (id >= smap->n_buckets) 56516f07c55SYonghong Song return -ENOENT; 56616f07c55SYonghong Song 56716f07c55SYonghong Song *(u32 *)next_key = id; 56816f07c55SYonghong Song return 0; 569d5a3b1f6SAlexei Starovoitov } 570d5a3b1f6SAlexei Starovoitov 571d5a3b1f6SAlexei Starovoitov static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, 572d5a3b1f6SAlexei Starovoitov u64 map_flags) 573d5a3b1f6SAlexei Starovoitov { 574d5a3b1f6SAlexei Starovoitov return -EINVAL; 575d5a3b1f6SAlexei Starovoitov } 576d5a3b1f6SAlexei Starovoitov 577d5a3b1f6SAlexei Starovoitov /* Called from syscall or from eBPF program */ 578d5a3b1f6SAlexei Starovoitov static int stack_map_delete_elem(struct bpf_map *map, void *key) 579d5a3b1f6SAlexei Starovoitov { 580d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 581d5a3b1f6SAlexei Starovoitov struct stack_map_bucket *old_bucket; 582d5a3b1f6SAlexei Starovoitov u32 id = *(u32 *)key; 583d5a3b1f6SAlexei Starovoitov 584d5a3b1f6SAlexei Starovoitov if (unlikely(id >= smap->n_buckets)) 585d5a3b1f6SAlexei Starovoitov return -E2BIG; 586d5a3b1f6SAlexei Starovoitov 587d5a3b1f6SAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], NULL); 588d5a3b1f6SAlexei Starovoitov if (old_bucket) { 589557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); 590d5a3b1f6SAlexei Starovoitov return 0; 591d5a3b1f6SAlexei Starovoitov } else { 592d5a3b1f6SAlexei Starovoitov return -ENOENT; 593d5a3b1f6SAlexei Starovoitov } 594d5a3b1f6SAlexei Starovoitov } 595d5a3b1f6SAlexei Starovoitov 596d5a3b1f6SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ 597d5a3b1f6SAlexei Starovoitov static void stack_map_free(struct bpf_map *map) 598d5a3b1f6SAlexei Starovoitov { 599d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); 600d5a3b1f6SAlexei Starovoitov 601557c0c6eSAlexei Starovoitov /* wait for bpf programs to complete before freeing stack map */ 602d5a3b1f6SAlexei Starovoitov synchronize_rcu(); 603d5a3b1f6SAlexei Starovoitov 604d407bd25SDaniel Borkmann bpf_map_area_free(smap->elems); 605557c0c6eSAlexei Starovoitov pcpu_freelist_destroy(&smap->freelist); 606d407bd25SDaniel Borkmann bpf_map_area_free(smap); 607d5a3b1f6SAlexei Starovoitov put_callchain_buffers(); 608d5a3b1f6SAlexei Starovoitov } 609d5a3b1f6SAlexei Starovoitov 61014499160SMauricio Vasquez B const struct bpf_map_ops stack_trace_map_ops = { 611d5a3b1f6SAlexei Starovoitov .map_alloc = stack_map_alloc, 612d5a3b1f6SAlexei Starovoitov .map_free = stack_map_free, 613d5a3b1f6SAlexei Starovoitov .map_get_next_key = stack_map_get_next_key, 614d5a3b1f6SAlexei Starovoitov .map_lookup_elem = stack_map_lookup_elem, 615d5a3b1f6SAlexei Starovoitov .map_update_elem = stack_map_update_elem, 616d5a3b1f6SAlexei Starovoitov .map_delete_elem = stack_map_delete_elem, 617e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf, 618d5a3b1f6SAlexei Starovoitov }; 619bae77c5eSSong Liu 620bae77c5eSSong Liu static int __init stack_map_init(void) 621bae77c5eSSong Liu { 622bae77c5eSSong Liu int cpu; 623bae77c5eSSong Liu struct stack_map_irq_work *work; 624bae77c5eSSong Liu 625bae77c5eSSong Liu for_each_possible_cpu(cpu) { 626bae77c5eSSong Liu work = per_cpu_ptr(&up_read_work, cpu); 627bae77c5eSSong Liu init_irq_work(&work->irq_work, do_up_read); 628bae77c5eSSong Liu } 629bae77c5eSSong Liu return 0; 630bae77c5eSSong Liu } 631bae77c5eSSong Liu subsys_initcall(stack_map_init); 632