xref: /linux-6.15/kernel/bpf/queue_stack_maps.c (revision b936ca64)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * queue_stack_maps.c: BPF queue and stack maps
4  *
5  * Copyright (c) 2018 Politecnico di Torino
6  */
7 #include <linux/bpf.h>
8 #include <linux/list.h>
9 #include <linux/slab.h>
10 #include <linux/capability.h>
11 #include "percpu_freelist.h"
12 
13 #define QUEUE_STACK_CREATE_FLAG_MASK \
14 	(BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK)
15 
16 struct bpf_queue_stack {
17 	struct bpf_map map;
18 	raw_spinlock_t lock;
19 	u32 head, tail;
20 	u32 size; /* max_entries + 1 */
21 
22 	char elements[0] __aligned(8);
23 };
24 
25 static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map)
26 {
27 	return container_of(map, struct bpf_queue_stack, map);
28 }
29 
30 static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs)
31 {
32 	return qs->head == qs->tail;
33 }
34 
35 static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
36 {
37 	u32 head = qs->head + 1;
38 
39 	if (unlikely(head >= qs->size))
40 		head = 0;
41 
42 	return head == qs->tail;
43 }
44 
45 /* Called from syscall */
46 static int queue_stack_map_alloc_check(union bpf_attr *attr)
47 {
48 	if (!capable(CAP_SYS_ADMIN))
49 		return -EPERM;
50 
51 	/* check sanity of attributes */
52 	if (attr->max_entries == 0 || attr->key_size != 0 ||
53 	    attr->value_size == 0 ||
54 	    attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK ||
55 	    !bpf_map_flags_access_ok(attr->map_flags))
56 		return -EINVAL;
57 
58 	if (attr->value_size > KMALLOC_MAX_SIZE)
59 		/* if value_size is bigger, the user space won't be able to
60 		 * access the elements.
61 		 */
62 		return -E2BIG;
63 
64 	return 0;
65 }
66 
67 static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
68 {
69 	int ret, numa_node = bpf_map_attr_numa_node(attr);
70 	struct bpf_map_memory mem = {0};
71 	struct bpf_queue_stack *qs;
72 	u64 size, queue_size, cost;
73 
74 	size = (u64) attr->max_entries + 1;
75 	cost = queue_size = sizeof(*qs) + size * attr->value_size;
76 	if (cost >= U32_MAX - PAGE_SIZE)
77 		return ERR_PTR(-E2BIG);
78 
79 	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
80 
81 	ret = bpf_map_charge_init(&mem, cost);
82 	if (ret < 0)
83 		return ERR_PTR(ret);
84 
85 	qs = bpf_map_area_alloc(queue_size, numa_node);
86 	if (!qs) {
87 		bpf_map_charge_finish(&mem);
88 		return ERR_PTR(-ENOMEM);
89 	}
90 
91 	memset(qs, 0, sizeof(*qs));
92 
93 	bpf_map_init_from_attr(&qs->map, attr);
94 
95 	bpf_map_charge_move(&qs->map.memory, &mem);
96 	qs->size = size;
97 
98 	raw_spin_lock_init(&qs->lock);
99 
100 	return &qs->map;
101 }
102 
103 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
104 static void queue_stack_map_free(struct bpf_map *map)
105 {
106 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
107 
108 	/* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
109 	 * so the programs (can be more than one that used this map) were
110 	 * disconnected from events. Wait for outstanding critical sections in
111 	 * these programs to complete
112 	 */
113 	synchronize_rcu();
114 
115 	bpf_map_area_free(qs);
116 }
117 
118 static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
119 {
120 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
121 	unsigned long flags;
122 	int err = 0;
123 	void *ptr;
124 
125 	raw_spin_lock_irqsave(&qs->lock, flags);
126 
127 	if (queue_stack_map_is_empty(qs)) {
128 		memset(value, 0, qs->map.value_size);
129 		err = -ENOENT;
130 		goto out;
131 	}
132 
133 	ptr = &qs->elements[qs->tail * qs->map.value_size];
134 	memcpy(value, ptr, qs->map.value_size);
135 
136 	if (delete) {
137 		if (unlikely(++qs->tail >= qs->size))
138 			qs->tail = 0;
139 	}
140 
141 out:
142 	raw_spin_unlock_irqrestore(&qs->lock, flags);
143 	return err;
144 }
145 
146 
147 static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
148 {
149 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
150 	unsigned long flags;
151 	int err = 0;
152 	void *ptr;
153 	u32 index;
154 
155 	raw_spin_lock_irqsave(&qs->lock, flags);
156 
157 	if (queue_stack_map_is_empty(qs)) {
158 		memset(value, 0, qs->map.value_size);
159 		err = -ENOENT;
160 		goto out;
161 	}
162 
163 	index = qs->head - 1;
164 	if (unlikely(index >= qs->size))
165 		index = qs->size - 1;
166 
167 	ptr = &qs->elements[index * qs->map.value_size];
168 	memcpy(value, ptr, qs->map.value_size);
169 
170 	if (delete)
171 		qs->head = index;
172 
173 out:
174 	raw_spin_unlock_irqrestore(&qs->lock, flags);
175 	return err;
176 }
177 
178 /* Called from syscall or from eBPF program */
179 static int queue_map_peek_elem(struct bpf_map *map, void *value)
180 {
181 	return __queue_map_get(map, value, false);
182 }
183 
184 /* Called from syscall or from eBPF program */
185 static int stack_map_peek_elem(struct bpf_map *map, void *value)
186 {
187 	return __stack_map_get(map, value, false);
188 }
189 
190 /* Called from syscall or from eBPF program */
191 static int queue_map_pop_elem(struct bpf_map *map, void *value)
192 {
193 	return __queue_map_get(map, value, true);
194 }
195 
196 /* Called from syscall or from eBPF program */
197 static int stack_map_pop_elem(struct bpf_map *map, void *value)
198 {
199 	return __stack_map_get(map, value, true);
200 }
201 
202 /* Called from syscall or from eBPF program */
203 static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
204 				     u64 flags)
205 {
206 	struct bpf_queue_stack *qs = bpf_queue_stack(map);
207 	unsigned long irq_flags;
208 	int err = 0;
209 	void *dst;
210 
211 	/* BPF_EXIST is used to force making room for a new element in case the
212 	 * map is full
213 	 */
214 	bool replace = (flags & BPF_EXIST);
215 
216 	/* Check supported flags for queue and stack maps */
217 	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
218 		return -EINVAL;
219 
220 	raw_spin_lock_irqsave(&qs->lock, irq_flags);
221 
222 	if (queue_stack_map_is_full(qs)) {
223 		if (!replace) {
224 			err = -E2BIG;
225 			goto out;
226 		}
227 		/* advance tail pointer to overwrite oldest element */
228 		if (unlikely(++qs->tail >= qs->size))
229 			qs->tail = 0;
230 	}
231 
232 	dst = &qs->elements[qs->head * qs->map.value_size];
233 	memcpy(dst, value, qs->map.value_size);
234 
235 	if (unlikely(++qs->head >= qs->size))
236 		qs->head = 0;
237 
238 out:
239 	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
240 	return err;
241 }
242 
243 /* Called from syscall or from eBPF program */
244 static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
245 {
246 	return NULL;
247 }
248 
249 /* Called from syscall or from eBPF program */
250 static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
251 				       void *value, u64 flags)
252 {
253 	return -EINVAL;
254 }
255 
256 /* Called from syscall or from eBPF program */
257 static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
258 {
259 	return -EINVAL;
260 }
261 
262 /* Called from syscall */
263 static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
264 					void *next_key)
265 {
266 	return -EINVAL;
267 }
268 
269 const struct bpf_map_ops queue_map_ops = {
270 	.map_alloc_check = queue_stack_map_alloc_check,
271 	.map_alloc = queue_stack_map_alloc,
272 	.map_free = queue_stack_map_free,
273 	.map_lookup_elem = queue_stack_map_lookup_elem,
274 	.map_update_elem = queue_stack_map_update_elem,
275 	.map_delete_elem = queue_stack_map_delete_elem,
276 	.map_push_elem = queue_stack_map_push_elem,
277 	.map_pop_elem = queue_map_pop_elem,
278 	.map_peek_elem = queue_map_peek_elem,
279 	.map_get_next_key = queue_stack_map_get_next_key,
280 };
281 
282 const struct bpf_map_ops stack_map_ops = {
283 	.map_alloc_check = queue_stack_map_alloc_check,
284 	.map_alloc = queue_stack_map_alloc,
285 	.map_free = queue_stack_map_free,
286 	.map_lookup_elem = queue_stack_map_lookup_elem,
287 	.map_update_elem = queue_stack_map_update_elem,
288 	.map_delete_elem = queue_stack_map_delete_elem,
289 	.map_push_elem = queue_stack_map_push_elem,
290 	.map_pop_elem = stack_map_pop_elem,
291 	.map_peek_elem = stack_map_peek_elem,
292 	.map_get_next_key = queue_stack_map_get_next_key,
293 };
294