125763b3cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d5a3b1f6SAlexei Starovoitov /* Copyright (c) 2016 Facebook
3d5a3b1f6SAlexei Starovoitov */
4d5a3b1f6SAlexei Starovoitov #include <linux/bpf.h>
5d5a3b1f6SAlexei Starovoitov #include <linux/jhash.h>
6d5a3b1f6SAlexei Starovoitov #include <linux/filter.h>
77b04d6d6SSong Liu #include <linux/kernel.h>
8d5a3b1f6SAlexei Starovoitov #include <linux/stacktrace.h>
9d5a3b1f6SAlexei Starovoitov #include <linux/perf_event.h>
10c9a0f3b8SJiri Olsa #include <linux/btf_ids.h>
11bd7525daSJiri Olsa #include <linux/buildid.h>
12557c0c6eSAlexei Starovoitov #include "percpu_freelist.h"
137c7e3d31SSong Liu #include "mmap_unlock_work.h"
14d5a3b1f6SAlexei Starovoitov
156e71b04aSChenbo Feng #define STACK_CREATE_FLAG_MASK \
16615755a7SSong Liu (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
17615755a7SSong Liu BPF_F_STACK_BUILD_ID)
186e71b04aSChenbo Feng
19d5a3b1f6SAlexei Starovoitov struct stack_map_bucket {
20557c0c6eSAlexei Starovoitov struct pcpu_freelist_node fnode;
21d5a3b1f6SAlexei Starovoitov u32 hash;
22d5a3b1f6SAlexei Starovoitov u32 nr;
23615755a7SSong Liu u64 data[];
24d5a3b1f6SAlexei Starovoitov };
25d5a3b1f6SAlexei Starovoitov
26d5a3b1f6SAlexei Starovoitov struct bpf_stack_map {
27d5a3b1f6SAlexei Starovoitov struct bpf_map map;
28557c0c6eSAlexei Starovoitov void *elems;
29557c0c6eSAlexei Starovoitov struct pcpu_freelist freelist;
30d5a3b1f6SAlexei Starovoitov u32 n_buckets;
3184cb9cbdSKees Cook struct stack_map_bucket *buckets[] __counted_by(n_buckets);
32d5a3b1f6SAlexei Starovoitov };
33d5a3b1f6SAlexei Starovoitov
stack_map_use_build_id(struct bpf_map * map)34615755a7SSong Liu static inline bool stack_map_use_build_id(struct bpf_map *map)
35615755a7SSong Liu {
36615755a7SSong Liu return (map->map_flags & BPF_F_STACK_BUILD_ID);
37615755a7SSong Liu }
38615755a7SSong Liu
stack_map_data_size(struct bpf_map * map)39615755a7SSong Liu static inline int stack_map_data_size(struct bpf_map *map)
40615755a7SSong Liu {
41615755a7SSong Liu return stack_map_use_build_id(map) ?
42615755a7SSong Liu sizeof(struct bpf_stack_build_id) : sizeof(u64);
43615755a7SSong Liu }
44615755a7SSong Liu
prealloc_elems_and_freelist(struct bpf_stack_map * smap)45557c0c6eSAlexei Starovoitov static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
46557c0c6eSAlexei Starovoitov {
4730e29a9aSTatsuhiko Yasumatsu u64 elem_size = sizeof(struct stack_map_bucket) +
4830e29a9aSTatsuhiko Yasumatsu (u64)smap->map.value_size;
49557c0c6eSAlexei Starovoitov int err;
50557c0c6eSAlexei Starovoitov
5196eabe7aSMartin KaFai Lau smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
5296eabe7aSMartin KaFai Lau smap->map.numa_node);
53557c0c6eSAlexei Starovoitov if (!smap->elems)
54557c0c6eSAlexei Starovoitov return -ENOMEM;
55557c0c6eSAlexei Starovoitov
56557c0c6eSAlexei Starovoitov err = pcpu_freelist_init(&smap->freelist);
57557c0c6eSAlexei Starovoitov if (err)
58557c0c6eSAlexei Starovoitov goto free_elems;
59557c0c6eSAlexei Starovoitov
60557c0c6eSAlexei Starovoitov pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
61557c0c6eSAlexei Starovoitov smap->map.max_entries);
62557c0c6eSAlexei Starovoitov return 0;
63557c0c6eSAlexei Starovoitov
64557c0c6eSAlexei Starovoitov free_elems:
65d407bd25SDaniel Borkmann bpf_map_area_free(smap->elems);
66557c0c6eSAlexei Starovoitov return err;
67557c0c6eSAlexei Starovoitov }
68557c0c6eSAlexei Starovoitov
69d5a3b1f6SAlexei Starovoitov /* Called from syscall */
stack_map_alloc(union bpf_attr * attr)70d5a3b1f6SAlexei Starovoitov static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
71d5a3b1f6SAlexei Starovoitov {
72d5a3b1f6SAlexei Starovoitov u32 value_size = attr->value_size;
73d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap;
74d5a3b1f6SAlexei Starovoitov u64 cost, n_buckets;
75d5a3b1f6SAlexei Starovoitov int err;
76d5a3b1f6SAlexei Starovoitov
776e71b04aSChenbo Feng if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
78823707b6SAlexei Starovoitov return ERR_PTR(-EINVAL);
79823707b6SAlexei Starovoitov
80d5a3b1f6SAlexei Starovoitov /* check sanity of attributes */
81d5a3b1f6SAlexei Starovoitov if (attr->max_entries == 0 || attr->key_size != 4 ||
82615755a7SSong Liu value_size < 8 || value_size % 8)
83615755a7SSong Liu return ERR_PTR(-EINVAL);
84615755a7SSong Liu
85615755a7SSong Liu BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
86615755a7SSong Liu if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
87615755a7SSong Liu if (value_size % sizeof(struct bpf_stack_build_id) ||
88615755a7SSong Liu value_size / sizeof(struct bpf_stack_build_id)
89615755a7SSong Liu > sysctl_perf_event_max_stack)
90615755a7SSong Liu return ERR_PTR(-EINVAL);
91615755a7SSong Liu } else if (value_size / 8 > sysctl_perf_event_max_stack)
92d5a3b1f6SAlexei Starovoitov return ERR_PTR(-EINVAL);
93d5a3b1f6SAlexei Starovoitov
947a4b2125SToke Høiland-Jørgensen /* hash table size must be power of 2; roundup_pow_of_two() can overflow
957a4b2125SToke Høiland-Jørgensen * into UB on 32-bit arches, so check that first
967a4b2125SToke Høiland-Jørgensen */
977a4b2125SToke Høiland-Jørgensen if (attr->max_entries > 1UL << 31)
986183f4d3SBui Quang Minh return ERR_PTR(-E2BIG);
99d5a3b1f6SAlexei Starovoitov
1007a4b2125SToke Høiland-Jørgensen n_buckets = roundup_pow_of_two(attr->max_entries);
1017a4b2125SToke Høiland-Jørgensen
102d5a3b1f6SAlexei Starovoitov cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
103b936ca64SRoman Gushchin smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
10437086810SRoman Gushchin if (!smap)
105b936ca64SRoman Gushchin return ERR_PTR(-ENOMEM);
106d5a3b1f6SAlexei Starovoitov
107bd475643SJakub Kicinski bpf_map_init_from_attr(&smap->map, attr);
108d5a3b1f6SAlexei Starovoitov smap->n_buckets = n_buckets;
109557c0c6eSAlexei Starovoitov
11097c79a38SArnaldo Carvalho de Melo err = get_callchain_buffers(sysctl_perf_event_max_stack);
111d5a3b1f6SAlexei Starovoitov if (err)
11237086810SRoman Gushchin goto free_smap;
113d5a3b1f6SAlexei Starovoitov
114557c0c6eSAlexei Starovoitov err = prealloc_elems_and_freelist(smap);
115557c0c6eSAlexei Starovoitov if (err)
116557c0c6eSAlexei Starovoitov goto put_buffers;
117557c0c6eSAlexei Starovoitov
118d5a3b1f6SAlexei Starovoitov return &smap->map;
119d5a3b1f6SAlexei Starovoitov
120557c0c6eSAlexei Starovoitov put_buffers:
121557c0c6eSAlexei Starovoitov put_callchain_buffers();
12237086810SRoman Gushchin free_smap:
123d407bd25SDaniel Borkmann bpf_map_area_free(smap);
124d5a3b1f6SAlexei Starovoitov return ERR_PTR(err);
125d5a3b1f6SAlexei Starovoitov }
126d5a3b1f6SAlexei Starovoitov
fetch_build_id(struct vm_area_struct * vma,unsigned char * build_id,bool may_fault)127*d4dd9775SAndrii Nakryiko static int fetch_build_id(struct vm_area_struct *vma, unsigned char *build_id, bool may_fault)
128*d4dd9775SAndrii Nakryiko {
129*d4dd9775SAndrii Nakryiko return may_fault ? build_id_parse(vma, build_id, NULL)
130*d4dd9775SAndrii Nakryiko : build_id_parse_nofault(vma, build_id, NULL);
131*d4dd9775SAndrii Nakryiko }
132*d4dd9775SAndrii Nakryiko
1334f4c4fc0SAndrii Nakryiko /*
1344f4c4fc0SAndrii Nakryiko * Expects all id_offs[i].ip values to be set to correct initial IPs.
1354f4c4fc0SAndrii Nakryiko * They will be subsequently:
1364f4c4fc0SAndrii Nakryiko * - either adjusted in place to a file offset, if build ID fetching
1374f4c4fc0SAndrii Nakryiko * succeeds; in this case id_offs[i].build_id is set to correct build ID,
1384f4c4fc0SAndrii Nakryiko * and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID;
1394f4c4fc0SAndrii Nakryiko * - or IP will be kept intact, if build ID fetching failed; in this case
1404f4c4fc0SAndrii Nakryiko * id_offs[i].build_id is zeroed out and id_offs[i].status is set to
1414f4c4fc0SAndrii Nakryiko * BPF_STACK_BUILD_ID_IP.
1424f4c4fc0SAndrii Nakryiko */
stack_map_get_build_id_offset(struct bpf_stack_build_id * id_offs,u32 trace_nr,bool user,bool may_fault)1435f412632SYonghong Song static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
144*d4dd9775SAndrii Nakryiko u32 trace_nr, bool user, bool may_fault)
145615755a7SSong Liu {
146615755a7SSong Liu int i;
1477c7e3d31SSong Liu struct mmap_unlock_irq_work *work = NULL;
1487c7e3d31SSong Liu bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
149ceac059eSHao Luo struct vm_area_struct *vma, *prev_vma = NULL;
150ceac059eSHao Luo const char *prev_build_id;
151bae77c5eSSong Liu
1527c7e3d31SSong Liu /* If the irq_work is in use, fall back to report ips. Same
1537c7e3d31SSong Liu * fallback is used for kernel stack (!user) on a stackmap with
1547c7e3d31SSong Liu * build_id.
155615755a7SSong Liu */
156bae77c5eSSong Liu if (!user || !current || !current->mm || irq_work_busy ||
1572f1aaf3eSYonghong Song !mmap_read_trylock(current->mm)) {
158615755a7SSong Liu /* cannot access current->mm, fall back to ips */
159615755a7SSong Liu for (i = 0; i < trace_nr; i++) {
160615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_IP;
161bd7525daSJiri Olsa memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
162615755a7SSong Liu }
163615755a7SSong Liu return;
164615755a7SSong Liu }
165615755a7SSong Liu
166615755a7SSong Liu for (i = 0; i < trace_nr; i++) {
1674f4c4fc0SAndrii Nakryiko u64 ip = READ_ONCE(id_offs[i].ip);
1684f4c4fc0SAndrii Nakryiko
1694f4c4fc0SAndrii Nakryiko if (range_in_vma(prev_vma, ip, ip)) {
170ceac059eSHao Luo vma = prev_vma;
1714f4c4fc0SAndrii Nakryiko memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX);
172ceac059eSHao Luo goto build_id_valid;
173ceac059eSHao Luo }
1744f4c4fc0SAndrii Nakryiko vma = find_vma(current->mm, ip);
175*d4dd9775SAndrii Nakryiko if (!vma || fetch_build_id(vma, id_offs[i].build_id, may_fault)) {
176615755a7SSong Liu /* per entry fall back to ips */
177615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_IP;
178bd7525daSJiri Olsa memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
179615755a7SSong Liu continue;
180615755a7SSong Liu }
181ceac059eSHao Luo build_id_valid:
1824f4c4fc0SAndrii Nakryiko id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start;
183615755a7SSong Liu id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
184ceac059eSHao Luo prev_vma = vma;
185ceac059eSHao Luo prev_build_id = id_offs[i].build_id;
186615755a7SSong Liu }
1877c7e3d31SSong Liu bpf_mmap_unlock_mm(work, current->mm);
188615755a7SSong Liu }
189615755a7SSong Liu
190fa28dcb8SSong Liu static struct perf_callchain_entry *
get_callchain_entry_for_task(struct task_struct * task,u32 max_depth)191ee2a0988SNamhyung Kim get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
192fa28dcb8SSong Liu {
193046cc3ddSSong Liu #ifdef CONFIG_STACKTRACE
194fa28dcb8SSong Liu struct perf_callchain_entry *entry;
195fa28dcb8SSong Liu int rctx;
196fa28dcb8SSong Liu
197fa28dcb8SSong Liu entry = get_callchain_entry(&rctx);
198fa28dcb8SSong Liu
199fa28dcb8SSong Liu if (!entry)
200fa28dcb8SSong Liu return NULL;
201fa28dcb8SSong Liu
202ee2a0988SNamhyung Kim entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
203ee2a0988SNamhyung Kim max_depth, 0);
204fa28dcb8SSong Liu
205fa28dcb8SSong Liu /* stack_trace_save_tsk() works on unsigned long array, while
206fa28dcb8SSong Liu * perf_callchain_entry uses u64 array. For 32-bit systems, it is
207fa28dcb8SSong Liu * necessary to fix this mismatch.
208fa28dcb8SSong Liu */
209fa28dcb8SSong Liu if (__BITS_PER_LONG != 64) {
210fa28dcb8SSong Liu unsigned long *from = (unsigned long *) entry->ip;
211fa28dcb8SSong Liu u64 *to = entry->ip;
212fa28dcb8SSong Liu int i;
213fa28dcb8SSong Liu
214fa28dcb8SSong Liu /* copy data from the end to avoid using extra buffer */
215ee2a0988SNamhyung Kim for (i = entry->nr - 1; i >= 0; i--)
216fa28dcb8SSong Liu to[i] = (u64)(from[i]);
217fa28dcb8SSong Liu }
218fa28dcb8SSong Liu
219fa28dcb8SSong Liu put_callchain_entry(rctx);
220fa28dcb8SSong Liu
221fa28dcb8SSong Liu return entry;
222046cc3ddSSong Liu #else /* CONFIG_STACKTRACE */
223046cc3ddSSong Liu return NULL;
224046cc3ddSSong Liu #endif
225fa28dcb8SSong Liu }
226fa28dcb8SSong Liu
__bpf_get_stackid(struct bpf_map * map,struct perf_callchain_entry * trace,u64 flags)2277b04d6d6SSong Liu static long __bpf_get_stackid(struct bpf_map *map,
2287b04d6d6SSong Liu struct perf_callchain_entry *trace, u64 flags)
229d5a3b1f6SAlexei Starovoitov {
230d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
231d5a3b1f6SAlexei Starovoitov struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
232d5a3b1f6SAlexei Starovoitov u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
2334f4c4fc0SAndrii Nakryiko u32 hash, id, trace_nr, trace_len, i;
234d5a3b1f6SAlexei Starovoitov bool user = flags & BPF_F_USER_STACK;
235d5a3b1f6SAlexei Starovoitov u64 *ips;
236615755a7SSong Liu bool hash_matches;
237d5a3b1f6SAlexei Starovoitov
238ee2a0988SNamhyung Kim if (trace->nr <= skip)
239d5a3b1f6SAlexei Starovoitov /* skipping more than usable stack trace */
240d5a3b1f6SAlexei Starovoitov return -EFAULT;
241d5a3b1f6SAlexei Starovoitov
242ee2a0988SNamhyung Kim trace_nr = trace->nr - skip;
243d5a3b1f6SAlexei Starovoitov trace_len = trace_nr * sizeof(u64);
244ee2a0988SNamhyung Kim ips = trace->ip + skip;
245d5a3b1f6SAlexei Starovoitov hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
246d5a3b1f6SAlexei Starovoitov id = hash & (smap->n_buckets - 1);
247557c0c6eSAlexei Starovoitov bucket = READ_ONCE(smap->buckets[id]);
248d5a3b1f6SAlexei Starovoitov
249615755a7SSong Liu hash_matches = bucket && bucket->hash == hash;
250615755a7SSong Liu /* fast cmp */
251615755a7SSong Liu if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
252d5a3b1f6SAlexei Starovoitov return id;
253615755a7SSong Liu
254615755a7SSong Liu if (stack_map_use_build_id(map)) {
2554f4c4fc0SAndrii Nakryiko struct bpf_stack_build_id *id_offs;
2564f4c4fc0SAndrii Nakryiko
257615755a7SSong Liu /* for build_id+offset, pop a bucket before slow cmp */
258615755a7SSong Liu new_bucket = (struct stack_map_bucket *)
259615755a7SSong Liu pcpu_freelist_pop(&smap->freelist);
260615755a7SSong Liu if (unlikely(!new_bucket))
261615755a7SSong Liu return -ENOMEM;
2625f412632SYonghong Song new_bucket->nr = trace_nr;
2634f4c4fc0SAndrii Nakryiko id_offs = (struct bpf_stack_build_id *)new_bucket->data;
2644f4c4fc0SAndrii Nakryiko for (i = 0; i < trace_nr; i++)
2654f4c4fc0SAndrii Nakryiko id_offs[i].ip = ips[i];
266*d4dd9775SAndrii Nakryiko stack_map_get_build_id_offset(id_offs, trace_nr, user, false /* !may_fault */);
267615755a7SSong Liu trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
268615755a7SSong Liu if (hash_matches && bucket->nr == trace_nr &&
269615755a7SSong Liu memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
270615755a7SSong Liu pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
271d5a3b1f6SAlexei Starovoitov return id;
272d5a3b1f6SAlexei Starovoitov }
273615755a7SSong Liu if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
274615755a7SSong Liu pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
275615755a7SSong Liu return -EEXIST;
276615755a7SSong Liu }
277615755a7SSong Liu } else {
278615755a7SSong Liu if (hash_matches && bucket->nr == trace_nr &&
279615755a7SSong Liu memcmp(bucket->data, ips, trace_len) == 0)
280615755a7SSong Liu return id;
281d5a3b1f6SAlexei Starovoitov if (bucket && !(flags & BPF_F_REUSE_STACKID))
282d5a3b1f6SAlexei Starovoitov return -EEXIST;
283d5a3b1f6SAlexei Starovoitov
284557c0c6eSAlexei Starovoitov new_bucket = (struct stack_map_bucket *)
285557c0c6eSAlexei Starovoitov pcpu_freelist_pop(&smap->freelist);
286d5a3b1f6SAlexei Starovoitov if (unlikely(!new_bucket))
287d5a3b1f6SAlexei Starovoitov return -ENOMEM;
288615755a7SSong Liu memcpy(new_bucket->data, ips, trace_len);
289615755a7SSong Liu }
290d5a3b1f6SAlexei Starovoitov
291d5a3b1f6SAlexei Starovoitov new_bucket->hash = hash;
292d5a3b1f6SAlexei Starovoitov new_bucket->nr = trace_nr;
293d5a3b1f6SAlexei Starovoitov
294d5a3b1f6SAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], new_bucket);
295d5a3b1f6SAlexei Starovoitov if (old_bucket)
296557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
297d5a3b1f6SAlexei Starovoitov return id;
298d5a3b1f6SAlexei Starovoitov }
299d5a3b1f6SAlexei Starovoitov
BPF_CALL_3(bpf_get_stackid,struct pt_regs *,regs,struct bpf_map *,map,u64,flags)3007b04d6d6SSong Liu BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
3017b04d6d6SSong Liu u64, flags)
3027b04d6d6SSong Liu {
3037b04d6d6SSong Liu u32 max_depth = map->value_size / stack_map_data_size(map);
304ee2a0988SNamhyung Kim u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
3057b04d6d6SSong Liu bool user = flags & BPF_F_USER_STACK;
3067b04d6d6SSong Liu struct perf_callchain_entry *trace;
3077b04d6d6SSong Liu bool kernel = !user;
3087b04d6d6SSong Liu
3097b04d6d6SSong Liu if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
3107b04d6d6SSong Liu BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
3117b04d6d6SSong Liu return -EINVAL;
3127b04d6d6SSong Liu
313ee2a0988SNamhyung Kim max_depth += skip;
314ee2a0988SNamhyung Kim if (max_depth > sysctl_perf_event_max_stack)
315ee2a0988SNamhyung Kim max_depth = sysctl_perf_event_max_stack;
316ee2a0988SNamhyung Kim
317ee2a0988SNamhyung Kim trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
318ee2a0988SNamhyung Kim false, false);
3197b04d6d6SSong Liu
3207b04d6d6SSong Liu if (unlikely(!trace))
3217b04d6d6SSong Liu /* couldn't fetch the stack trace */
3227b04d6d6SSong Liu return -EFAULT;
3237b04d6d6SSong Liu
3247b04d6d6SSong Liu return __bpf_get_stackid(map, trace, flags);
3257b04d6d6SSong Liu }
3267b04d6d6SSong Liu
327d5a3b1f6SAlexei Starovoitov const struct bpf_func_proto bpf_get_stackid_proto = {
328d5a3b1f6SAlexei Starovoitov .func = bpf_get_stackid,
329d5a3b1f6SAlexei Starovoitov .gpl_only = true,
330d5a3b1f6SAlexei Starovoitov .ret_type = RET_INTEGER,
331d5a3b1f6SAlexei Starovoitov .arg1_type = ARG_PTR_TO_CTX,
332d5a3b1f6SAlexei Starovoitov .arg2_type = ARG_CONST_MAP_PTR,
333d5a3b1f6SAlexei Starovoitov .arg3_type = ARG_ANYTHING,
334d5a3b1f6SAlexei Starovoitov };
335d5a3b1f6SAlexei Starovoitov
count_kernel_ip(struct perf_callchain_entry * trace)3367b04d6d6SSong Liu static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
3377b04d6d6SSong Liu {
3387b04d6d6SSong Liu __u64 nr_kernel = 0;
3397b04d6d6SSong Liu
3407b04d6d6SSong Liu while (nr_kernel < trace->nr) {
3417b04d6d6SSong Liu if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
3427b04d6d6SSong Liu break;
3437b04d6d6SSong Liu nr_kernel++;
3447b04d6d6SSong Liu }
3457b04d6d6SSong Liu return nr_kernel;
3467b04d6d6SSong Liu }
3477b04d6d6SSong Liu
BPF_CALL_3(bpf_get_stackid_pe,struct bpf_perf_event_data_kern *,ctx,struct bpf_map *,map,u64,flags)3487b04d6d6SSong Liu BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
3497b04d6d6SSong Liu struct bpf_map *, map, u64, flags)
3507b04d6d6SSong Liu {
3517b04d6d6SSong Liu struct perf_event *event = ctx->event;
3527b04d6d6SSong Liu struct perf_callchain_entry *trace;
3537b04d6d6SSong Liu bool kernel, user;
3547b04d6d6SSong Liu __u64 nr_kernel;
3557b04d6d6SSong Liu int ret;
3567b04d6d6SSong Liu
3577b04d6d6SSong Liu /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
35816817ad7SNamhyung Kim if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
3597b04d6d6SSong Liu return bpf_get_stackid((unsigned long)(ctx->regs),
3607b04d6d6SSong Liu (unsigned long) map, flags, 0, 0);
3617b04d6d6SSong Liu
3627b04d6d6SSong Liu if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
3637b04d6d6SSong Liu BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
3647b04d6d6SSong Liu return -EINVAL;
3657b04d6d6SSong Liu
3667b04d6d6SSong Liu user = flags & BPF_F_USER_STACK;
3677b04d6d6SSong Liu kernel = !user;
3687b04d6d6SSong Liu
3697b04d6d6SSong Liu trace = ctx->data->callchain;
3707b04d6d6SSong Liu if (unlikely(!trace))
3717b04d6d6SSong Liu return -EFAULT;
3727b04d6d6SSong Liu
3737b04d6d6SSong Liu nr_kernel = count_kernel_ip(trace);
3747b04d6d6SSong Liu
3757b04d6d6SSong Liu if (kernel) {
3767b04d6d6SSong Liu __u64 nr = trace->nr;
3777b04d6d6SSong Liu
3787b04d6d6SSong Liu trace->nr = nr_kernel;
3797b04d6d6SSong Liu ret = __bpf_get_stackid(map, trace, flags);
3807b04d6d6SSong Liu
3817b04d6d6SSong Liu /* restore nr */
3827b04d6d6SSong Liu trace->nr = nr;
3837b04d6d6SSong Liu } else { /* user */
3847b04d6d6SSong Liu u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
3857b04d6d6SSong Liu
3867b04d6d6SSong Liu skip += nr_kernel;
3877b04d6d6SSong Liu if (skip > BPF_F_SKIP_FIELD_MASK)
3887b04d6d6SSong Liu return -EFAULT;
3897b04d6d6SSong Liu
3907b04d6d6SSong Liu flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
3917b04d6d6SSong Liu ret = __bpf_get_stackid(map, trace, flags);
3927b04d6d6SSong Liu }
3937b04d6d6SSong Liu return ret;
3947b04d6d6SSong Liu }
3957b04d6d6SSong Liu
3967b04d6d6SSong Liu const struct bpf_func_proto bpf_get_stackid_proto_pe = {
3977b04d6d6SSong Liu .func = bpf_get_stackid_pe,
3987b04d6d6SSong Liu .gpl_only = false,
3997b04d6d6SSong Liu .ret_type = RET_INTEGER,
4007b04d6d6SSong Liu .arg1_type = ARG_PTR_TO_CTX,
4017b04d6d6SSong Liu .arg2_type = ARG_CONST_MAP_PTR,
4027b04d6d6SSong Liu .arg3_type = ARG_ANYTHING,
4037b04d6d6SSong Liu };
4047b04d6d6SSong Liu
__bpf_get_stack(struct pt_regs * regs,struct task_struct * task,struct perf_callchain_entry * trace_in,void * buf,u32 size,u64 flags,bool may_fault)405fa28dcb8SSong Liu static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
4067b04d6d6SSong Liu struct perf_callchain_entry *trace_in,
407*d4dd9775SAndrii Nakryiko void *buf, u32 size, u64 flags, bool may_fault)
408c195651eSYonghong Song {
409ee2a0988SNamhyung Kim u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
410c195651eSYonghong Song bool user_build_id = flags & BPF_F_USER_BUILD_ID;
411b8e3a87aSJordan Rome bool crosstask = task && task != current;
412c195651eSYonghong Song u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
413c195651eSYonghong Song bool user = flags & BPF_F_USER_STACK;
414c195651eSYonghong Song struct perf_callchain_entry *trace;
415c195651eSYonghong Song bool kernel = !user;
416c195651eSYonghong Song int err = -EINVAL;
417c195651eSYonghong Song u64 *ips;
418c195651eSYonghong Song
419c195651eSYonghong Song if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
420c195651eSYonghong Song BPF_F_USER_BUILD_ID)))
421c195651eSYonghong Song goto clear;
422c195651eSYonghong Song if (kernel && user_build_id)
423c195651eSYonghong Song goto clear;
424c195651eSYonghong Song
425*d4dd9775SAndrii Nakryiko elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64);
426c195651eSYonghong Song if (unlikely(size % elem_size))
427c195651eSYonghong Song goto clear;
428c195651eSYonghong Song
429fa28dcb8SSong Liu /* cannot get valid user stack for task without user_mode regs */
430fa28dcb8SSong Liu if (task && user && !user_mode(regs))
431fa28dcb8SSong Liu goto err_fault;
432fa28dcb8SSong Liu
433b8e3a87aSJordan Rome /* get_perf_callchain does not support crosstask user stack walking
434b8e3a87aSJordan Rome * but returns an empty stack instead of NULL.
435b8e3a87aSJordan Rome */
436b8e3a87aSJordan Rome if (crosstask && user) {
437b8e3a87aSJordan Rome err = -EOPNOTSUPP;
438b8e3a87aSJordan Rome goto clear;
439b8e3a87aSJordan Rome }
440b8e3a87aSJordan Rome
441c195651eSYonghong Song num_elem = size / elem_size;
442ee2a0988SNamhyung Kim max_depth = num_elem + skip;
443ee2a0988SNamhyung Kim if (sysctl_perf_event_max_stack < max_depth)
444ee2a0988SNamhyung Kim max_depth = sysctl_perf_event_max_stack;
445fa28dcb8SSong Liu
446*d4dd9775SAndrii Nakryiko if (may_fault)
447*d4dd9775SAndrii Nakryiko rcu_read_lock(); /* need RCU for perf's callchain below */
448*d4dd9775SAndrii Nakryiko
4497b04d6d6SSong Liu if (trace_in)
4507b04d6d6SSong Liu trace = trace_in;
4517b04d6d6SSong Liu else if (kernel && task)
452ee2a0988SNamhyung Kim trace = get_callchain_entry_for_task(task, max_depth);
453fa28dcb8SSong Liu else
454ee2a0988SNamhyung Kim trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
455b8e3a87aSJordan Rome crosstask, false);
456c195651eSYonghong Song
457*d4dd9775SAndrii Nakryiko if (unlikely(!trace) || trace->nr < skip) {
458*d4dd9775SAndrii Nakryiko if (may_fault)
459*d4dd9775SAndrii Nakryiko rcu_read_unlock();
460c195651eSYonghong Song goto err_fault;
461*d4dd9775SAndrii Nakryiko }
462c195651eSYonghong Song
463ee2a0988SNamhyung Kim trace_nr = trace->nr - skip;
464c195651eSYonghong Song trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
465c195651eSYonghong Song copy_len = trace_nr * elem_size;
466ee2a0988SNamhyung Kim
467ee2a0988SNamhyung Kim ips = trace->ip + skip;
468*d4dd9775SAndrii Nakryiko if (user_build_id) {
4694f4c4fc0SAndrii Nakryiko struct bpf_stack_build_id *id_offs = buf;
4704f4c4fc0SAndrii Nakryiko u32 i;
4714f4c4fc0SAndrii Nakryiko
4724f4c4fc0SAndrii Nakryiko for (i = 0; i < trace_nr; i++)
4734f4c4fc0SAndrii Nakryiko id_offs[i].ip = ips[i];
4744f4c4fc0SAndrii Nakryiko } else {
475c195651eSYonghong Song memcpy(buf, ips, copy_len);
4764f4c4fc0SAndrii Nakryiko }
477c195651eSYonghong Song
478*d4dd9775SAndrii Nakryiko /* trace/ips should not be dereferenced after this point */
479*d4dd9775SAndrii Nakryiko if (may_fault)
480*d4dd9775SAndrii Nakryiko rcu_read_unlock();
481*d4dd9775SAndrii Nakryiko
482*d4dd9775SAndrii Nakryiko if (user_build_id)
483*d4dd9775SAndrii Nakryiko stack_map_get_build_id_offset(buf, trace_nr, user, may_fault);
484*d4dd9775SAndrii Nakryiko
485c195651eSYonghong Song if (size > copy_len)
486c195651eSYonghong Song memset(buf + copy_len, 0, size - copy_len);
487c195651eSYonghong Song return copy_len;
488c195651eSYonghong Song
489c195651eSYonghong Song err_fault:
490c195651eSYonghong Song err = -EFAULT;
491c195651eSYonghong Song clear:
492c195651eSYonghong Song memset(buf, 0, size);
493c195651eSYonghong Song return err;
494c195651eSYonghong Song }
495c195651eSYonghong Song
BPF_CALL_4(bpf_get_stack,struct pt_regs *,regs,void *,buf,u32,size,u64,flags)496fa28dcb8SSong Liu BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
497fa28dcb8SSong Liu u64, flags)
498fa28dcb8SSong Liu {
499*d4dd9775SAndrii Nakryiko return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
500fa28dcb8SSong Liu }
501fa28dcb8SSong Liu
502c195651eSYonghong Song const struct bpf_func_proto bpf_get_stack_proto = {
503c195651eSYonghong Song .func = bpf_get_stack,
504c195651eSYonghong Song .gpl_only = true,
505c195651eSYonghong Song .ret_type = RET_INTEGER,
506c195651eSYonghong Song .arg1_type = ARG_PTR_TO_CTX,
507c195651eSYonghong Song .arg2_type = ARG_PTR_TO_UNINIT_MEM,
508c195651eSYonghong Song .arg3_type = ARG_CONST_SIZE_OR_ZERO,
509c195651eSYonghong Song .arg4_type = ARG_ANYTHING,
510c195651eSYonghong Song };
511c195651eSYonghong Song
BPF_CALL_4(bpf_get_stack_sleepable,struct pt_regs *,regs,void *,buf,u32,size,u64,flags)512*d4dd9775SAndrii Nakryiko BPF_CALL_4(bpf_get_stack_sleepable, struct pt_regs *, regs, void *, buf, u32, size,
513*d4dd9775SAndrii Nakryiko u64, flags)
514*d4dd9775SAndrii Nakryiko {
515*d4dd9775SAndrii Nakryiko return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, true /* may_fault */);
516*d4dd9775SAndrii Nakryiko }
517*d4dd9775SAndrii Nakryiko
518*d4dd9775SAndrii Nakryiko const struct bpf_func_proto bpf_get_stack_sleepable_proto = {
519*d4dd9775SAndrii Nakryiko .func = bpf_get_stack_sleepable,
520*d4dd9775SAndrii Nakryiko .gpl_only = true,
521*d4dd9775SAndrii Nakryiko .ret_type = RET_INTEGER,
522*d4dd9775SAndrii Nakryiko .arg1_type = ARG_PTR_TO_CTX,
523*d4dd9775SAndrii Nakryiko .arg2_type = ARG_PTR_TO_UNINIT_MEM,
524*d4dd9775SAndrii Nakryiko .arg3_type = ARG_CONST_SIZE_OR_ZERO,
525*d4dd9775SAndrii Nakryiko .arg4_type = ARG_ANYTHING,
526*d4dd9775SAndrii Nakryiko };
527*d4dd9775SAndrii Nakryiko
__bpf_get_task_stack(struct task_struct * task,void * buf,u32 size,u64 flags,bool may_fault)528*d4dd9775SAndrii Nakryiko static long __bpf_get_task_stack(struct task_struct *task, void *buf, u32 size,
529*d4dd9775SAndrii Nakryiko u64 flags, bool may_fault)
530fa28dcb8SSong Liu {
53106ab134cSDave Marchevsky struct pt_regs *regs;
532b992f01eSNaveen N. Rao long res = -EINVAL;
533fa28dcb8SSong Liu
53406ab134cSDave Marchevsky if (!try_get_task_stack(task))
53506ab134cSDave Marchevsky return -EFAULT;
53606ab134cSDave Marchevsky
53706ab134cSDave Marchevsky regs = task_pt_regs(task);
538b992f01eSNaveen N. Rao if (regs)
539*d4dd9775SAndrii Nakryiko res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault);
54006ab134cSDave Marchevsky put_task_stack(task);
54106ab134cSDave Marchevsky
54206ab134cSDave Marchevsky return res;
543fa28dcb8SSong Liu }
544fa28dcb8SSong Liu
BPF_CALL_4(bpf_get_task_stack,struct task_struct *,task,void *,buf,u32,size,u64,flags)545*d4dd9775SAndrii Nakryiko BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
546*d4dd9775SAndrii Nakryiko u32, size, u64, flags)
547*d4dd9775SAndrii Nakryiko {
548*d4dd9775SAndrii Nakryiko return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */);
549*d4dd9775SAndrii Nakryiko }
550*d4dd9775SAndrii Nakryiko
551fa28dcb8SSong Liu const struct bpf_func_proto bpf_get_task_stack_proto = {
552fa28dcb8SSong Liu .func = bpf_get_task_stack,
553fa28dcb8SSong Liu .gpl_only = false,
554fa28dcb8SSong Liu .ret_type = RET_INTEGER,
555fa28dcb8SSong Liu .arg1_type = ARG_PTR_TO_BTF_ID,
556d19ddb47SSong Liu .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
557fa28dcb8SSong Liu .arg2_type = ARG_PTR_TO_UNINIT_MEM,
558fa28dcb8SSong Liu .arg3_type = ARG_CONST_SIZE_OR_ZERO,
559fa28dcb8SSong Liu .arg4_type = ARG_ANYTHING,
560fa28dcb8SSong Liu };
561fa28dcb8SSong Liu
BPF_CALL_4(bpf_get_task_stack_sleepable,struct task_struct *,task,void *,buf,u32,size,u64,flags)562*d4dd9775SAndrii Nakryiko BPF_CALL_4(bpf_get_task_stack_sleepable, struct task_struct *, task, void *, buf,
563*d4dd9775SAndrii Nakryiko u32, size, u64, flags)
564*d4dd9775SAndrii Nakryiko {
565*d4dd9775SAndrii Nakryiko return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */);
566*d4dd9775SAndrii Nakryiko }
567*d4dd9775SAndrii Nakryiko
568*d4dd9775SAndrii Nakryiko const struct bpf_func_proto bpf_get_task_stack_sleepable_proto = {
569*d4dd9775SAndrii Nakryiko .func = bpf_get_task_stack_sleepable,
570*d4dd9775SAndrii Nakryiko .gpl_only = false,
571*d4dd9775SAndrii Nakryiko .ret_type = RET_INTEGER,
572*d4dd9775SAndrii Nakryiko .arg1_type = ARG_PTR_TO_BTF_ID,
573*d4dd9775SAndrii Nakryiko .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
574*d4dd9775SAndrii Nakryiko .arg2_type = ARG_PTR_TO_UNINIT_MEM,
575*d4dd9775SAndrii Nakryiko .arg3_type = ARG_CONST_SIZE_OR_ZERO,
576*d4dd9775SAndrii Nakryiko .arg4_type = ARG_ANYTHING,
577*d4dd9775SAndrii Nakryiko };
578*d4dd9775SAndrii Nakryiko
BPF_CALL_4(bpf_get_stack_pe,struct bpf_perf_event_data_kern *,ctx,void *,buf,u32,size,u64,flags)5797b04d6d6SSong Liu BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
5807b04d6d6SSong Liu void *, buf, u32, size, u64, flags)
5817b04d6d6SSong Liu {
5822b9b305fSSong Liu struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
5837b04d6d6SSong Liu struct perf_event *event = ctx->event;
5847b04d6d6SSong Liu struct perf_callchain_entry *trace;
5857b04d6d6SSong Liu bool kernel, user;
5867b04d6d6SSong Liu int err = -EINVAL;
5877b04d6d6SSong Liu __u64 nr_kernel;
5887b04d6d6SSong Liu
58916817ad7SNamhyung Kim if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
590*d4dd9775SAndrii Nakryiko return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */);
5917b04d6d6SSong Liu
5927b04d6d6SSong Liu if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
5937b04d6d6SSong Liu BPF_F_USER_BUILD_ID)))
5947b04d6d6SSong Liu goto clear;
5957b04d6d6SSong Liu
5967b04d6d6SSong Liu user = flags & BPF_F_USER_STACK;
5977b04d6d6SSong Liu kernel = !user;
5987b04d6d6SSong Liu
5997b04d6d6SSong Liu err = -EFAULT;
6007b04d6d6SSong Liu trace = ctx->data->callchain;
6017b04d6d6SSong Liu if (unlikely(!trace))
6027b04d6d6SSong Liu goto clear;
6037b04d6d6SSong Liu
6047b04d6d6SSong Liu nr_kernel = count_kernel_ip(trace);
6057b04d6d6SSong Liu
6067b04d6d6SSong Liu if (kernel) {
6077b04d6d6SSong Liu __u64 nr = trace->nr;
6087b04d6d6SSong Liu
6097b04d6d6SSong Liu trace->nr = nr_kernel;
610*d4dd9775SAndrii Nakryiko err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
6117b04d6d6SSong Liu
6127b04d6d6SSong Liu /* restore nr */
6137b04d6d6SSong Liu trace->nr = nr;
6147b04d6d6SSong Liu } else { /* user */
6157b04d6d6SSong Liu u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
6167b04d6d6SSong Liu
6177b04d6d6SSong Liu skip += nr_kernel;
6187b04d6d6SSong Liu if (skip > BPF_F_SKIP_FIELD_MASK)
6197b04d6d6SSong Liu goto clear;
6207b04d6d6SSong Liu
6217b04d6d6SSong Liu flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
622*d4dd9775SAndrii Nakryiko err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */);
6237b04d6d6SSong Liu }
6247b04d6d6SSong Liu return err;
6257b04d6d6SSong Liu
6267b04d6d6SSong Liu clear:
6277b04d6d6SSong Liu memset(buf, 0, size);
6287b04d6d6SSong Liu return err;
6297b04d6d6SSong Liu
6307b04d6d6SSong Liu }
6317b04d6d6SSong Liu
6327b04d6d6SSong Liu const struct bpf_func_proto bpf_get_stack_proto_pe = {
6337b04d6d6SSong Liu .func = bpf_get_stack_pe,
6347b04d6d6SSong Liu .gpl_only = true,
6357b04d6d6SSong Liu .ret_type = RET_INTEGER,
6367b04d6d6SSong Liu .arg1_type = ARG_PTR_TO_CTX,
6377b04d6d6SSong Liu .arg2_type = ARG_PTR_TO_UNINIT_MEM,
6387b04d6d6SSong Liu .arg3_type = ARG_CONST_SIZE_OR_ZERO,
6397b04d6d6SSong Liu .arg4_type = ARG_ANYTHING,
6407b04d6d6SSong Liu };
6417b04d6d6SSong Liu
642557c0c6eSAlexei Starovoitov /* Called from eBPF program */
stack_map_lookup_elem(struct bpf_map * map,void * key)643d5a3b1f6SAlexei Starovoitov static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
644d5a3b1f6SAlexei Starovoitov {
6453b4a63f6SPrashant Bhole return ERR_PTR(-EOPNOTSUPP);
646557c0c6eSAlexei Starovoitov }
647557c0c6eSAlexei Starovoitov
648557c0c6eSAlexei Starovoitov /* Called from syscall */
bpf_stackmap_copy(struct bpf_map * map,void * key,void * value)649557c0c6eSAlexei Starovoitov int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
650557c0c6eSAlexei Starovoitov {
651d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
652557c0c6eSAlexei Starovoitov struct stack_map_bucket *bucket, *old_bucket;
653557c0c6eSAlexei Starovoitov u32 id = *(u32 *)key, trace_len;
654d5a3b1f6SAlexei Starovoitov
655d5a3b1f6SAlexei Starovoitov if (unlikely(id >= smap->n_buckets))
656557c0c6eSAlexei Starovoitov return -ENOENT;
657557c0c6eSAlexei Starovoitov
658557c0c6eSAlexei Starovoitov bucket = xchg(&smap->buckets[id], NULL);
659557c0c6eSAlexei Starovoitov if (!bucket)
660557c0c6eSAlexei Starovoitov return -ENOENT;
661557c0c6eSAlexei Starovoitov
662615755a7SSong Liu trace_len = bucket->nr * stack_map_data_size(map);
663615755a7SSong Liu memcpy(value, bucket->data, trace_len);
664557c0c6eSAlexei Starovoitov memset(value + trace_len, 0, map->value_size - trace_len);
665557c0c6eSAlexei Starovoitov
666557c0c6eSAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], bucket);
667557c0c6eSAlexei Starovoitov if (old_bucket)
668557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
669557c0c6eSAlexei Starovoitov return 0;
670d5a3b1f6SAlexei Starovoitov }
671d5a3b1f6SAlexei Starovoitov
stack_map_get_next_key(struct bpf_map * map,void * key,void * next_key)67216f07c55SYonghong Song static int stack_map_get_next_key(struct bpf_map *map, void *key,
67316f07c55SYonghong Song void *next_key)
674d5a3b1f6SAlexei Starovoitov {
67516f07c55SYonghong Song struct bpf_stack_map *smap = container_of(map,
67616f07c55SYonghong Song struct bpf_stack_map, map);
67716f07c55SYonghong Song u32 id;
67816f07c55SYonghong Song
67916f07c55SYonghong Song WARN_ON_ONCE(!rcu_read_lock_held());
68016f07c55SYonghong Song
68116f07c55SYonghong Song if (!key) {
68216f07c55SYonghong Song id = 0;
68316f07c55SYonghong Song } else {
68416f07c55SYonghong Song id = *(u32 *)key;
68516f07c55SYonghong Song if (id >= smap->n_buckets || !smap->buckets[id])
68616f07c55SYonghong Song id = 0;
68716f07c55SYonghong Song else
68816f07c55SYonghong Song id++;
68916f07c55SYonghong Song }
69016f07c55SYonghong Song
69116f07c55SYonghong Song while (id < smap->n_buckets && !smap->buckets[id])
69216f07c55SYonghong Song id++;
69316f07c55SYonghong Song
69416f07c55SYonghong Song if (id >= smap->n_buckets)
69516f07c55SYonghong Song return -ENOENT;
69616f07c55SYonghong Song
69716f07c55SYonghong Song *(u32 *)next_key = id;
69816f07c55SYonghong Song return 0;
699d5a3b1f6SAlexei Starovoitov }
700d5a3b1f6SAlexei Starovoitov
stack_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)701d7ba4cc9SJP Kobryn static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
702d5a3b1f6SAlexei Starovoitov u64 map_flags)
703d5a3b1f6SAlexei Starovoitov {
704d5a3b1f6SAlexei Starovoitov return -EINVAL;
705d5a3b1f6SAlexei Starovoitov }
706d5a3b1f6SAlexei Starovoitov
707d5a3b1f6SAlexei Starovoitov /* Called from syscall or from eBPF program */
stack_map_delete_elem(struct bpf_map * map,void * key)708d7ba4cc9SJP Kobryn static long stack_map_delete_elem(struct bpf_map *map, void *key)
709d5a3b1f6SAlexei Starovoitov {
710d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
711d5a3b1f6SAlexei Starovoitov struct stack_map_bucket *old_bucket;
712d5a3b1f6SAlexei Starovoitov u32 id = *(u32 *)key;
713d5a3b1f6SAlexei Starovoitov
714d5a3b1f6SAlexei Starovoitov if (unlikely(id >= smap->n_buckets))
715d5a3b1f6SAlexei Starovoitov return -E2BIG;
716d5a3b1f6SAlexei Starovoitov
717d5a3b1f6SAlexei Starovoitov old_bucket = xchg(&smap->buckets[id], NULL);
718d5a3b1f6SAlexei Starovoitov if (old_bucket) {
719557c0c6eSAlexei Starovoitov pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
720d5a3b1f6SAlexei Starovoitov return 0;
721d5a3b1f6SAlexei Starovoitov } else {
722d5a3b1f6SAlexei Starovoitov return -ENOENT;
723d5a3b1f6SAlexei Starovoitov }
724d5a3b1f6SAlexei Starovoitov }
725d5a3b1f6SAlexei Starovoitov
726d5a3b1f6SAlexei Starovoitov /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
stack_map_free(struct bpf_map * map)727d5a3b1f6SAlexei Starovoitov static void stack_map_free(struct bpf_map *map)
728d5a3b1f6SAlexei Starovoitov {
729d5a3b1f6SAlexei Starovoitov struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
730d5a3b1f6SAlexei Starovoitov
731d407bd25SDaniel Borkmann bpf_map_area_free(smap->elems);
732557c0c6eSAlexei Starovoitov pcpu_freelist_destroy(&smap->freelist);
733d407bd25SDaniel Borkmann bpf_map_area_free(smap);
734d5a3b1f6SAlexei Starovoitov put_callchain_buffers();
735d5a3b1f6SAlexei Starovoitov }
736d5a3b1f6SAlexei Starovoitov
stack_map_mem_usage(const struct bpf_map * map)737cbb9b606SYafang Shao static u64 stack_map_mem_usage(const struct bpf_map *map)
738cbb9b606SYafang Shao {
739cbb9b606SYafang Shao struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
740cbb9b606SYafang Shao u64 value_size = map->value_size;
741cbb9b606SYafang Shao u64 n_buckets = smap->n_buckets;
742cbb9b606SYafang Shao u64 enties = map->max_entries;
743cbb9b606SYafang Shao u64 usage = sizeof(*smap);
744cbb9b606SYafang Shao
745cbb9b606SYafang Shao usage += n_buckets * sizeof(struct stack_map_bucket *);
746cbb9b606SYafang Shao usage += enties * (sizeof(struct stack_map_bucket) + value_size);
747cbb9b606SYafang Shao return usage;
748cbb9b606SYafang Shao }
749cbb9b606SYafang Shao
750c317ab71SMenglong Dong BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
75114499160SMauricio Vasquez B const struct bpf_map_ops stack_trace_map_ops = {
752f4d05259SMartin KaFai Lau .map_meta_equal = bpf_map_meta_equal,
753d5a3b1f6SAlexei Starovoitov .map_alloc = stack_map_alloc,
754d5a3b1f6SAlexei Starovoitov .map_free = stack_map_free,
755d5a3b1f6SAlexei Starovoitov .map_get_next_key = stack_map_get_next_key,
756d5a3b1f6SAlexei Starovoitov .map_lookup_elem = stack_map_lookup_elem,
757d5a3b1f6SAlexei Starovoitov .map_update_elem = stack_map_update_elem,
758d5a3b1f6SAlexei Starovoitov .map_delete_elem = stack_map_delete_elem,
759e8d2bec0SDaniel Borkmann .map_check_btf = map_check_no_btf,
760cbb9b606SYafang Shao .map_mem_usage = stack_map_mem_usage,
761c317ab71SMenglong Dong .map_btf_id = &stack_trace_map_btf_ids[0],
762d5a3b1f6SAlexei Starovoitov };
763