xref: /linux-6.15/kernel/bpf/stackmap.c (revision 3f2fb9a8)
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/bpf.h>
8 #include <linux/jhash.h>
9 #include <linux/filter.h>
10 #include <linux/vmalloc.h>
11 #include <linux/stacktrace.h>
12 #include <linux/perf_event.h>
13 
14 struct stack_map_bucket {
15 	struct rcu_head rcu;
16 	u32 hash;
17 	u32 nr;
18 	u64 ip[];
19 };
20 
21 struct bpf_stack_map {
22 	struct bpf_map map;
23 	u32 n_buckets;
24 	struct stack_map_bucket __rcu *buckets[];
25 };
26 
27 /* Called from syscall */
28 static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
29 {
30 	u32 value_size = attr->value_size;
31 	struct bpf_stack_map *smap;
32 	u64 cost, n_buckets;
33 	int err;
34 
35 	if (!capable(CAP_SYS_ADMIN))
36 		return ERR_PTR(-EPERM);
37 
38 	/* check sanity of attributes */
39 	if (attr->max_entries == 0 || attr->key_size != 4 ||
40 	    value_size < 8 || value_size % 8 ||
41 	    value_size / 8 > PERF_MAX_STACK_DEPTH)
42 		return ERR_PTR(-EINVAL);
43 
44 	/* hash table size must be power of 2 */
45 	n_buckets = roundup_pow_of_two(attr->max_entries);
46 
47 	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
48 	if (cost >= U32_MAX - PAGE_SIZE)
49 		return ERR_PTR(-E2BIG);
50 
51 	smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
52 	if (!smap) {
53 		smap = vzalloc(cost);
54 		if (!smap)
55 			return ERR_PTR(-ENOMEM);
56 	}
57 
58 	err = -E2BIG;
59 	cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
60 	if (cost >= U32_MAX - PAGE_SIZE)
61 		goto free_smap;
62 
63 	smap->map.map_type = attr->map_type;
64 	smap->map.key_size = attr->key_size;
65 	smap->map.value_size = value_size;
66 	smap->map.max_entries = attr->max_entries;
67 	smap->n_buckets = n_buckets;
68 	smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
69 
70 	err = get_callchain_buffers();
71 	if (err)
72 		goto free_smap;
73 
74 	return &smap->map;
75 
76 free_smap:
77 	kvfree(smap);
78 	return ERR_PTR(err);
79 }
80 
81 static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
82 {
83 	struct pt_regs *regs = (struct pt_regs *) (long) r1;
84 	struct bpf_map *map = (struct bpf_map *) (long) r2;
85 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
86 	struct perf_callchain_entry *trace;
87 	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
88 	u32 max_depth = map->value_size / 8;
89 	/* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */
90 	u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth;
91 	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
92 	u32 hash, id, trace_nr, trace_len;
93 	bool user = flags & BPF_F_USER_STACK;
94 	bool kernel = !user;
95 	u64 *ips;
96 
97 	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
98 			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
99 		return -EINVAL;
100 
101 	trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
102 
103 	if (unlikely(!trace))
104 		/* couldn't fetch the stack trace */
105 		return -EFAULT;
106 
107 	/* get_perf_callchain() guarantees that trace->nr >= init_nr
108 	 * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth
109 	 */
110 	trace_nr = trace->nr - init_nr;
111 
112 	if (trace_nr <= skip)
113 		/* skipping more than usable stack trace */
114 		return -EFAULT;
115 
116 	trace_nr -= skip;
117 	trace_len = trace_nr * sizeof(u64);
118 	ips = trace->ip + skip + init_nr;
119 	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
120 	id = hash & (smap->n_buckets - 1);
121 	bucket = rcu_dereference(smap->buckets[id]);
122 
123 	if (bucket && bucket->hash == hash) {
124 		if (flags & BPF_F_FAST_STACK_CMP)
125 			return id;
126 		if (bucket->nr == trace_nr &&
127 		    memcmp(bucket->ip, ips, trace_len) == 0)
128 			return id;
129 	}
130 
131 	/* this call stack is not in the map, try to add it */
132 	if (bucket && !(flags & BPF_F_REUSE_STACKID))
133 		return -EEXIST;
134 
135 	new_bucket = kmalloc(sizeof(struct stack_map_bucket) + map->value_size,
136 			     GFP_ATOMIC | __GFP_NOWARN);
137 	if (unlikely(!new_bucket))
138 		return -ENOMEM;
139 
140 	memcpy(new_bucket->ip, ips, trace_len);
141 	memset(new_bucket->ip + trace_len / 8, 0, map->value_size - trace_len);
142 	new_bucket->hash = hash;
143 	new_bucket->nr = trace_nr;
144 
145 	old_bucket = xchg(&smap->buckets[id], new_bucket);
146 	if (old_bucket)
147 		kfree_rcu(old_bucket, rcu);
148 	return id;
149 }
150 
151 const struct bpf_func_proto bpf_get_stackid_proto = {
152 	.func		= bpf_get_stackid,
153 	.gpl_only	= true,
154 	.ret_type	= RET_INTEGER,
155 	.arg1_type	= ARG_PTR_TO_CTX,
156 	.arg2_type	= ARG_CONST_MAP_PTR,
157 	.arg3_type	= ARG_ANYTHING,
158 };
159 
160 /* Called from syscall or from eBPF program */
161 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
162 {
163 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
164 	struct stack_map_bucket *bucket;
165 	u32 id = *(u32 *)key;
166 
167 	if (unlikely(id >= smap->n_buckets))
168 		return NULL;
169 	bucket = rcu_dereference(smap->buckets[id]);
170 	return bucket ? bucket->ip : NULL;
171 }
172 
173 static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
174 {
175 	return -EINVAL;
176 }
177 
178 static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
179 				 u64 map_flags)
180 {
181 	return -EINVAL;
182 }
183 
184 /* Called from syscall or from eBPF program */
185 static int stack_map_delete_elem(struct bpf_map *map, void *key)
186 {
187 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
188 	struct stack_map_bucket *old_bucket;
189 	u32 id = *(u32 *)key;
190 
191 	if (unlikely(id >= smap->n_buckets))
192 		return -E2BIG;
193 
194 	old_bucket = xchg(&smap->buckets[id], NULL);
195 	if (old_bucket) {
196 		kfree_rcu(old_bucket, rcu);
197 		return 0;
198 	} else {
199 		return -ENOENT;
200 	}
201 }
202 
203 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
204 static void stack_map_free(struct bpf_map *map)
205 {
206 	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
207 	int i;
208 
209 	synchronize_rcu();
210 
211 	for (i = 0; i < smap->n_buckets; i++)
212 		if (smap->buckets[i])
213 			kfree_rcu(smap->buckets[i], rcu);
214 	kvfree(smap);
215 	put_callchain_buffers();
216 }
217 
218 static const struct bpf_map_ops stack_map_ops = {
219 	.map_alloc = stack_map_alloc,
220 	.map_free = stack_map_free,
221 	.map_get_next_key = stack_map_get_next_key,
222 	.map_lookup_elem = stack_map_lookup_elem,
223 	.map_update_elem = stack_map_update_elem,
224 	.map_delete_elem = stack_map_delete_elem,
225 };
226 
227 static struct bpf_map_type_list stack_map_type __read_mostly = {
228 	.ops = &stack_map_ops,
229 	.type = BPF_MAP_TYPE_STACK_TRACE,
230 };
231 
232 static int __init register_stack_map(void)
233 {
234 	bpf_register_map_type(&stack_map_type);
235 	return 0;
236 }
237 late_initcall(register_stack_map);
238