xref: /linux-6.15/kernel/bpf/memalloc.c (revision baa8fdec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3 #include <linux/mm.h>
4 #include <linux/llist.h>
5 #include <linux/bpf.h>
6 #include <linux/irq_work.h>
7 #include <linux/bpf_mem_alloc.h>
8 #include <linux/memcontrol.h>
9 #include <asm/local.h>
10 
11 /* Any context (including NMI) BPF specific memory allocator.
12  *
13  * Tracing BPF programs can attach to kprobe and fentry. Hence they
14  * run in unknown context where calling plain kmalloc() might not be safe.
15  *
16  * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
17  * Refill this cache asynchronously from irq_work.
18  *
19  * CPU_0 buckets
20  * 16 32 64 96 128 196 256 512 1024 2048 4096
21  * ...
22  * CPU_N buckets
23  * 16 32 64 96 128 196 256 512 1024 2048 4096
24  *
25  * The buckets are prefilled at the start.
26  * BPF programs always run with migration disabled.
27  * It's safe to allocate from cache of the current cpu with irqs disabled.
28  * Free-ing is always done into bucket of the current cpu as well.
29  * irq_work trims extra free elements from buckets with kfree
30  * and refills them with kmalloc, so global kmalloc logic takes care
31  * of freeing objects allocated by one cpu and freed on another.
32  *
33  * Every allocated objected is padded with extra 8 bytes that contains
34  * struct llist_node.
35  */
36 #define LLIST_NODE_SZ sizeof(struct llist_node)
37 
38 /* similar to kmalloc, but sizeof == 8 bucket is gone */
39 static u8 size_index[24] __ro_after_init = {
40 	3,	/* 8 */
41 	3,	/* 16 */
42 	4,	/* 24 */
43 	4,	/* 32 */
44 	5,	/* 40 */
45 	5,	/* 48 */
46 	5,	/* 56 */
47 	5,	/* 64 */
48 	1,	/* 72 */
49 	1,	/* 80 */
50 	1,	/* 88 */
51 	1,	/* 96 */
52 	6,	/* 104 */
53 	6,	/* 112 */
54 	6,	/* 120 */
55 	6,	/* 128 */
56 	2,	/* 136 */
57 	2,	/* 144 */
58 	2,	/* 152 */
59 	2,	/* 160 */
60 	2,	/* 168 */
61 	2,	/* 176 */
62 	2,	/* 184 */
63 	2	/* 192 */
64 };
65 
66 static int bpf_mem_cache_idx(size_t size)
67 {
68 	if (!size || size > 4096)
69 		return -1;
70 
71 	if (size <= 192)
72 		return size_index[(size - 1) / 8] - 1;
73 
74 	return fls(size - 1) - 2;
75 }
76 
77 #define NUM_CACHES 11
78 
79 struct bpf_mem_cache {
80 	/* per-cpu list of free objects of size 'unit_size'.
81 	 * All accesses are done with interrupts disabled and 'active' counter
82 	 * protection with __llist_add() and __llist_del_first().
83 	 */
84 	struct llist_head free_llist;
85 	local_t active;
86 
87 	/* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
88 	 * are sequenced by per-cpu 'active' counter. But unit_free() cannot
89 	 * fail. When 'active' is busy the unit_free() will add an object to
90 	 * free_llist_extra.
91 	 */
92 	struct llist_head free_llist_extra;
93 
94 	struct irq_work refill_work;
95 	struct obj_cgroup *objcg;
96 	int unit_size;
97 	/* count of objects in free_llist */
98 	int free_cnt;
99 	int low_watermark, high_watermark, batch;
100 	int percpu_size;
101 	bool draining;
102 	struct bpf_mem_cache *tgt;
103 
104 	/* list of objects to be freed after RCU GP */
105 	struct llist_head free_by_rcu;
106 	struct llist_node *free_by_rcu_tail;
107 	struct llist_head waiting_for_gp;
108 	struct llist_node *waiting_for_gp_tail;
109 	struct rcu_head rcu;
110 	atomic_t call_rcu_in_progress;
111 	struct llist_head free_llist_extra_rcu;
112 
113 	/* list of objects to be freed after RCU tasks trace GP */
114 	struct llist_head free_by_rcu_ttrace;
115 	struct llist_head waiting_for_gp_ttrace;
116 	struct rcu_head rcu_ttrace;
117 	atomic_t call_rcu_ttrace_in_progress;
118 };
119 
120 struct bpf_mem_caches {
121 	struct bpf_mem_cache cache[NUM_CACHES];
122 };
123 
124 static struct llist_node notrace *__llist_del_first(struct llist_head *head)
125 {
126 	struct llist_node *entry, *next;
127 
128 	entry = head->first;
129 	if (!entry)
130 		return NULL;
131 	next = entry->next;
132 	head->first = next;
133 	return entry;
134 }
135 
136 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
137 {
138 	if (c->percpu_size) {
139 		void **obj = kmalloc_node(c->percpu_size, flags, node);
140 		void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
141 
142 		if (!obj || !pptr) {
143 			free_percpu(pptr);
144 			kfree(obj);
145 			return NULL;
146 		}
147 		obj[1] = pptr;
148 		return obj;
149 	}
150 
151 	return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
152 }
153 
154 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
155 {
156 #ifdef CONFIG_MEMCG_KMEM
157 	if (c->objcg)
158 		return get_mem_cgroup_from_objcg(c->objcg);
159 #endif
160 
161 #ifdef CONFIG_MEMCG
162 	return root_mem_cgroup;
163 #else
164 	return NULL;
165 #endif
166 }
167 
168 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
169 {
170 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
171 		/* In RT irq_work runs in per-cpu kthread, so disable
172 		 * interrupts to avoid preemption and interrupts and
173 		 * reduce the chance of bpf prog executing on this cpu
174 		 * when active counter is busy.
175 		 */
176 		local_irq_save(*flags);
177 	/* alloc_bulk runs from irq_work which will not preempt a bpf
178 	 * program that does unit_alloc/unit_free since IRQs are
179 	 * disabled there. There is no race to increment 'active'
180 	 * counter. It protects free_llist from corruption in case NMI
181 	 * bpf prog preempted this loop.
182 	 */
183 	WARN_ON_ONCE(local_inc_return(&c->active) != 1);
184 }
185 
186 static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
187 {
188 	local_dec(&c->active);
189 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
190 		local_irq_restore(*flags);
191 }
192 
193 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
194 {
195 	unsigned long flags;
196 
197 	inc_active(c, &flags);
198 	__llist_add(obj, &c->free_llist);
199 	c->free_cnt++;
200 	dec_active(c, &flags);
201 }
202 
203 /* Mostly runs from irq_work except __init phase. */
204 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
205 {
206 	struct mem_cgroup *memcg = NULL, *old_memcg;
207 	gfp_t gfp;
208 	void *obj;
209 	int i;
210 
211 	gfp = __GFP_NOWARN | __GFP_ACCOUNT;
212 	gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL;
213 
214 	for (i = 0; i < cnt; i++) {
215 		/*
216 		 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
217 		 * done only by one CPU == current CPU. Other CPUs might
218 		 * llist_add() and llist_del_all() in parallel.
219 		 */
220 		obj = llist_del_first(&c->free_by_rcu_ttrace);
221 		if (!obj)
222 			break;
223 		add_obj_to_free_list(c, obj);
224 	}
225 	if (i >= cnt)
226 		return;
227 
228 	for (; i < cnt; i++) {
229 		obj = llist_del_first(&c->waiting_for_gp_ttrace);
230 		if (!obj)
231 			break;
232 		add_obj_to_free_list(c, obj);
233 	}
234 	if (i >= cnt)
235 		return;
236 
237 	memcg = get_memcg(c);
238 	old_memcg = set_active_memcg(memcg);
239 	for (; i < cnt; i++) {
240 		/* Allocate, but don't deplete atomic reserves that typical
241 		 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
242 		 * will allocate from the current numa node which is what we
243 		 * want here.
244 		 */
245 		obj = __alloc(c, node, gfp);
246 		if (!obj)
247 			break;
248 		add_obj_to_free_list(c, obj);
249 	}
250 	set_active_memcg(old_memcg);
251 	mem_cgroup_put(memcg);
252 }
253 
254 static void free_one(void *obj, bool percpu)
255 {
256 	if (percpu) {
257 		free_percpu(((void **)obj)[1]);
258 		kfree(obj);
259 		return;
260 	}
261 
262 	kfree(obj);
263 }
264 
265 static int free_all(struct llist_node *llnode, bool percpu)
266 {
267 	struct llist_node *pos, *t;
268 	int cnt = 0;
269 
270 	llist_for_each_safe(pos, t, llnode) {
271 		free_one(pos, percpu);
272 		cnt++;
273 	}
274 	return cnt;
275 }
276 
277 static void __free_rcu(struct rcu_head *head)
278 {
279 	struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
280 
281 	free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
282 	atomic_set(&c->call_rcu_ttrace_in_progress, 0);
283 }
284 
285 static void __free_rcu_tasks_trace(struct rcu_head *head)
286 {
287 	/* If RCU Tasks Trace grace period implies RCU grace period,
288 	 * there is no need to invoke call_rcu().
289 	 */
290 	if (rcu_trace_implies_rcu_gp())
291 		__free_rcu(head);
292 	else
293 		call_rcu(head, __free_rcu);
294 }
295 
296 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
297 {
298 	struct llist_node *llnode = obj;
299 
300 	/* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
301 	 * Nothing races to add to free_by_rcu_ttrace list.
302 	 */
303 	llist_add(llnode, &c->free_by_rcu_ttrace);
304 }
305 
306 static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
307 {
308 	struct llist_node *llnode, *t;
309 
310 	if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
311 		if (unlikely(READ_ONCE(c->draining))) {
312 			llnode = llist_del_all(&c->free_by_rcu_ttrace);
313 			free_all(llnode, !!c->percpu_size);
314 		}
315 		return;
316 	}
317 
318 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
319 	llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
320 		llist_add(llnode, &c->waiting_for_gp_ttrace);
321 
322 	if (unlikely(READ_ONCE(c->draining))) {
323 		__free_rcu(&c->rcu_ttrace);
324 		return;
325 	}
326 
327 	/* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
328 	 * If RCU Tasks Trace grace period implies RCU grace period, free
329 	 * these elements directly, else use call_rcu() to wait for normal
330 	 * progs to finish and finally do free_one() on each element.
331 	 */
332 	call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
333 }
334 
335 static void free_bulk(struct bpf_mem_cache *c)
336 {
337 	struct bpf_mem_cache *tgt = c->tgt;
338 	struct llist_node *llnode, *t;
339 	unsigned long flags;
340 	int cnt;
341 
342 	WARN_ON_ONCE(tgt->unit_size != c->unit_size);
343 
344 	do {
345 		inc_active(c, &flags);
346 		llnode = __llist_del_first(&c->free_llist);
347 		if (llnode)
348 			cnt = --c->free_cnt;
349 		else
350 			cnt = 0;
351 		dec_active(c, &flags);
352 		if (llnode)
353 			enque_to_free(tgt, llnode);
354 	} while (cnt > (c->high_watermark + c->low_watermark) / 2);
355 
356 	/* and drain free_llist_extra */
357 	llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
358 		enque_to_free(tgt, llnode);
359 	do_call_rcu_ttrace(tgt);
360 }
361 
362 static void __free_by_rcu(struct rcu_head *head)
363 {
364 	struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
365 	struct bpf_mem_cache *tgt = c->tgt;
366 	struct llist_node *llnode;
367 
368 	llnode = llist_del_all(&c->waiting_for_gp);
369 	if (!llnode)
370 		goto out;
371 
372 	llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace);
373 
374 	/* Objects went through regular RCU GP. Send them to RCU tasks trace */
375 	do_call_rcu_ttrace(tgt);
376 out:
377 	atomic_set(&c->call_rcu_in_progress, 0);
378 }
379 
380 static void check_free_by_rcu(struct bpf_mem_cache *c)
381 {
382 	struct llist_node *llnode, *t;
383 	unsigned long flags;
384 
385 	/* drain free_llist_extra_rcu */
386 	if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) {
387 		inc_active(c, &flags);
388 		llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
389 			if (__llist_add(llnode, &c->free_by_rcu))
390 				c->free_by_rcu_tail = llnode;
391 		dec_active(c, &flags);
392 	}
393 
394 	if (llist_empty(&c->free_by_rcu))
395 		return;
396 
397 	if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
398 		/*
399 		 * Instead of kmalloc-ing new rcu_head and triggering 10k
400 		 * call_rcu() to hit rcutree.qhimark and force RCU to notice
401 		 * the overload just ask RCU to hurry up. There could be many
402 		 * objects in free_by_rcu list.
403 		 * This hint reduces memory consumption for an artificial
404 		 * benchmark from 2 Gbyte to 150 Mbyte.
405 		 */
406 		rcu_request_urgent_qs_task(current);
407 		return;
408 	}
409 
410 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
411 
412 	inc_active(c, &flags);
413 	WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
414 	c->waiting_for_gp_tail = c->free_by_rcu_tail;
415 	dec_active(c, &flags);
416 
417 	if (unlikely(READ_ONCE(c->draining))) {
418 		free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
419 		atomic_set(&c->call_rcu_in_progress, 0);
420 	} else {
421 		call_rcu_hurry(&c->rcu, __free_by_rcu);
422 	}
423 }
424 
425 static void bpf_mem_refill(struct irq_work *work)
426 {
427 	struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
428 	int cnt;
429 
430 	/* Racy access to free_cnt. It doesn't need to be 100% accurate */
431 	cnt = c->free_cnt;
432 	if (cnt < c->low_watermark)
433 		/* irq_work runs on this cpu and kmalloc will allocate
434 		 * from the current numa node which is what we want here.
435 		 */
436 		alloc_bulk(c, c->batch, NUMA_NO_NODE, true);
437 	else if (cnt > c->high_watermark)
438 		free_bulk(c);
439 
440 	check_free_by_rcu(c);
441 }
442 
443 static void notrace irq_work_raise(struct bpf_mem_cache *c)
444 {
445 	irq_work_queue(&c->refill_work);
446 }
447 
448 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
449  * the freelist cache will be elem_size * 64 (or less) on each cpu.
450  *
451  * For bpf programs that don't have statically known allocation sizes and
452  * assuming (low_mark + high_mark) / 2 as an average number of elements per
453  * bucket and all buckets are used the total amount of memory in freelists
454  * on each cpu will be:
455  * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
456  * == ~ 116 Kbyte using below heuristic.
457  * Initialized, but unused bpf allocator (not bpf map specific one) will
458  * consume ~ 11 Kbyte per cpu.
459  * Typical case will be between 11K and 116K closer to 11K.
460  * bpf progs can and should share bpf_mem_cache when possible.
461  */
462 static void init_refill_work(struct bpf_mem_cache *c)
463 {
464 	init_irq_work(&c->refill_work, bpf_mem_refill);
465 	if (c->unit_size <= 256) {
466 		c->low_watermark = 32;
467 		c->high_watermark = 96;
468 	} else {
469 		/* When page_size == 4k, order-0 cache will have low_mark == 2
470 		 * and high_mark == 6 with batch alloc of 3 individual pages at
471 		 * a time.
472 		 * 8k allocs and above low == 1, high == 3, batch == 1.
473 		 */
474 		c->low_watermark = max(32 * 256 / c->unit_size, 1);
475 		c->high_watermark = max(96 * 256 / c->unit_size, 3);
476 	}
477 	c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
478 }
479 
480 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
481 {
482 	/* To avoid consuming memory assume that 1st run of bpf
483 	 * prog won't be doing more than 4 map_update_elem from
484 	 * irq disabled region
485 	 */
486 	alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu), false);
487 }
488 
489 static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx)
490 {
491 	struct llist_node *first;
492 	unsigned int obj_size;
493 
494 	first = c->free_llist.first;
495 	if (!first)
496 		return 0;
497 
498 	if (c->percpu_size)
499 		obj_size = pcpu_alloc_size(((void **)first)[1]);
500 	else
501 		obj_size = ksize(first);
502 	if (obj_size != c->unit_size) {
503 		WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n",
504 			  idx, c->percpu_size, obj_size, c->unit_size);
505 		return -EINVAL;
506 	}
507 	return 0;
508 }
509 
510 /* When size != 0 bpf_mem_cache for each cpu.
511  * This is typical bpf hash map use case when all elements have equal size.
512  *
513  * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
514  * kmalloc/kfree. Max allocation size is 4096 in this case.
515  * This is bpf_dynptr and bpf_kptr use case.
516  */
517 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
518 {
519 	static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
520 	int cpu, i, err, unit_size, percpu_size = 0;
521 	struct bpf_mem_caches *cc, __percpu *pcc;
522 	struct bpf_mem_cache *c, __percpu *pc;
523 	struct obj_cgroup *objcg = NULL;
524 
525 	/* room for llist_node and per-cpu pointer */
526 	if (percpu)
527 		percpu_size = LLIST_NODE_SZ + sizeof(void *);
528 
529 	if (size) {
530 		pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
531 		if (!pc)
532 			return -ENOMEM;
533 
534 		if (!percpu)
535 			size += LLIST_NODE_SZ; /* room for llist_node */
536 		unit_size = size;
537 
538 #ifdef CONFIG_MEMCG_KMEM
539 		if (memcg_bpf_enabled())
540 			objcg = get_obj_cgroup_from_current();
541 #endif
542 		for_each_possible_cpu(cpu) {
543 			c = per_cpu_ptr(pc, cpu);
544 			c->unit_size = unit_size;
545 			c->objcg = objcg;
546 			c->percpu_size = percpu_size;
547 			c->tgt = c;
548 			init_refill_work(c);
549 			prefill_mem_cache(c, cpu);
550 		}
551 		ma->cache = pc;
552 		return 0;
553 	}
554 
555 	pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
556 	if (!pcc)
557 		return -ENOMEM;
558 	err = 0;
559 #ifdef CONFIG_MEMCG_KMEM
560 	objcg = get_obj_cgroup_from_current();
561 #endif
562 	for_each_possible_cpu(cpu) {
563 		cc = per_cpu_ptr(pcc, cpu);
564 		for (i = 0; i < NUM_CACHES; i++) {
565 			c = &cc->cache[i];
566 			c->unit_size = sizes[i];
567 			c->objcg = objcg;
568 			c->percpu_size = percpu_size;
569 			c->tgt = c;
570 
571 			init_refill_work(c);
572 			/* Another bpf_mem_cache will be used when allocating
573 			 * c->unit_size in bpf_mem_alloc(), so doesn't prefill
574 			 * for the bpf_mem_cache because these free objects will
575 			 * never be used.
576 			 */
577 			if (i != bpf_mem_cache_idx(c->unit_size))
578 				continue;
579 			prefill_mem_cache(c, cpu);
580 			err = check_obj_size(c, i);
581 			if (err)
582 				goto out;
583 		}
584 	}
585 
586 out:
587 	ma->caches = pcc;
588 	/* refill_work is either zeroed or initialized, so it is safe to
589 	 * call irq_work_sync().
590 	 */
591 	if (err)
592 		bpf_mem_alloc_destroy(ma);
593 	return err;
594 }
595 
596 static void drain_mem_cache(struct bpf_mem_cache *c)
597 {
598 	bool percpu = !!c->percpu_size;
599 
600 	/* No progs are using this bpf_mem_cache, but htab_map_free() called
601 	 * bpf_mem_cache_free() for all remaining elements and they can be in
602 	 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
603 	 *
604 	 * Except for waiting_for_gp_ttrace list, there are no concurrent operations
605 	 * on these lists, so it is safe to use __llist_del_all().
606 	 */
607 	free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
608 	free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
609 	free_all(__llist_del_all(&c->free_llist), percpu);
610 	free_all(__llist_del_all(&c->free_llist_extra), percpu);
611 	free_all(__llist_del_all(&c->free_by_rcu), percpu);
612 	free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
613 	free_all(llist_del_all(&c->waiting_for_gp), percpu);
614 }
615 
616 static void check_mem_cache(struct bpf_mem_cache *c)
617 {
618 	WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
619 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
620 	WARN_ON_ONCE(!llist_empty(&c->free_llist));
621 	WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
622 	WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
623 	WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
624 	WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
625 }
626 
627 static void check_leaked_objs(struct bpf_mem_alloc *ma)
628 {
629 	struct bpf_mem_caches *cc;
630 	struct bpf_mem_cache *c;
631 	int cpu, i;
632 
633 	if (ma->cache) {
634 		for_each_possible_cpu(cpu) {
635 			c = per_cpu_ptr(ma->cache, cpu);
636 			check_mem_cache(c);
637 		}
638 	}
639 	if (ma->caches) {
640 		for_each_possible_cpu(cpu) {
641 			cc = per_cpu_ptr(ma->caches, cpu);
642 			for (i = 0; i < NUM_CACHES; i++) {
643 				c = &cc->cache[i];
644 				check_mem_cache(c);
645 			}
646 		}
647 	}
648 }
649 
650 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
651 {
652 	check_leaked_objs(ma);
653 	free_percpu(ma->cache);
654 	free_percpu(ma->caches);
655 	ma->cache = NULL;
656 	ma->caches = NULL;
657 }
658 
659 static void free_mem_alloc(struct bpf_mem_alloc *ma)
660 {
661 	/* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
662 	 * might still execute. Wait for them.
663 	 *
664 	 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
665 	 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
666 	 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
667 	 * so if call_rcu(head, __free_rcu) is skipped due to
668 	 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
669 	 * using rcu_trace_implies_rcu_gp() as well.
670 	 */
671 	rcu_barrier(); /* wait for __free_by_rcu */
672 	rcu_barrier_tasks_trace(); /* wait for __free_rcu */
673 	if (!rcu_trace_implies_rcu_gp())
674 		rcu_barrier();
675 	free_mem_alloc_no_barrier(ma);
676 }
677 
678 static void free_mem_alloc_deferred(struct work_struct *work)
679 {
680 	struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
681 
682 	free_mem_alloc(ma);
683 	kfree(ma);
684 }
685 
686 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
687 {
688 	struct bpf_mem_alloc *copy;
689 
690 	if (!rcu_in_progress) {
691 		/* Fast path. No callbacks are pending, hence no need to do
692 		 * rcu_barrier-s.
693 		 */
694 		free_mem_alloc_no_barrier(ma);
695 		return;
696 	}
697 
698 	copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL);
699 	if (!copy) {
700 		/* Slow path with inline barrier-s */
701 		free_mem_alloc(ma);
702 		return;
703 	}
704 
705 	/* Defer barriers into worker to let the rest of map memory to be freed */
706 	memset(ma, 0, sizeof(*ma));
707 	INIT_WORK(&copy->work, free_mem_alloc_deferred);
708 	queue_work(system_unbound_wq, &copy->work);
709 }
710 
711 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
712 {
713 	struct bpf_mem_caches *cc;
714 	struct bpf_mem_cache *c;
715 	int cpu, i, rcu_in_progress;
716 
717 	if (ma->cache) {
718 		rcu_in_progress = 0;
719 		for_each_possible_cpu(cpu) {
720 			c = per_cpu_ptr(ma->cache, cpu);
721 			WRITE_ONCE(c->draining, true);
722 			irq_work_sync(&c->refill_work);
723 			drain_mem_cache(c);
724 			rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
725 			rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
726 		}
727 		/* objcg is the same across cpus */
728 		if (c->objcg)
729 			obj_cgroup_put(c->objcg);
730 		destroy_mem_alloc(ma, rcu_in_progress);
731 	}
732 	if (ma->caches) {
733 		rcu_in_progress = 0;
734 		for_each_possible_cpu(cpu) {
735 			cc = per_cpu_ptr(ma->caches, cpu);
736 			for (i = 0; i < NUM_CACHES; i++) {
737 				c = &cc->cache[i];
738 				WRITE_ONCE(c->draining, true);
739 				irq_work_sync(&c->refill_work);
740 				drain_mem_cache(c);
741 				rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
742 				rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
743 			}
744 		}
745 		if (c->objcg)
746 			obj_cgroup_put(c->objcg);
747 		destroy_mem_alloc(ma, rcu_in_progress);
748 	}
749 }
750 
751 /* notrace is necessary here and in other functions to make sure
752  * bpf programs cannot attach to them and cause llist corruptions.
753  */
754 static void notrace *unit_alloc(struct bpf_mem_cache *c)
755 {
756 	struct llist_node *llnode = NULL;
757 	unsigned long flags;
758 	int cnt = 0;
759 
760 	/* Disable irqs to prevent the following race for majority of prog types:
761 	 * prog_A
762 	 *   bpf_mem_alloc
763 	 *      preemption or irq -> prog_B
764 	 *        bpf_mem_alloc
765 	 *
766 	 * but prog_B could be a perf_event NMI prog.
767 	 * Use per-cpu 'active' counter to order free_list access between
768 	 * unit_alloc/unit_free/bpf_mem_refill.
769 	 */
770 	local_irq_save(flags);
771 	if (local_inc_return(&c->active) == 1) {
772 		llnode = __llist_del_first(&c->free_llist);
773 		if (llnode) {
774 			cnt = --c->free_cnt;
775 			*(struct bpf_mem_cache **)llnode = c;
776 		}
777 	}
778 	local_dec(&c->active);
779 
780 	WARN_ON(cnt < 0);
781 
782 	if (cnt < c->low_watermark)
783 		irq_work_raise(c);
784 	/* Enable IRQ after the enqueue of irq work completes, so irq work
785 	 * will run after IRQ is enabled and free_llist may be refilled by
786 	 * irq work before other task preempts current task.
787 	 */
788 	local_irq_restore(flags);
789 
790 	return llnode;
791 }
792 
793 /* Though 'ptr' object could have been allocated on a different cpu
794  * add it to the free_llist of the current cpu.
795  * Let kfree() logic deal with it when it's later called from irq_work.
796  */
797 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
798 {
799 	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
800 	unsigned long flags;
801 	int cnt = 0;
802 
803 	BUILD_BUG_ON(LLIST_NODE_SZ > 8);
804 
805 	/*
806 	 * Remember bpf_mem_cache that allocated this object.
807 	 * The hint is not accurate.
808 	 */
809 	c->tgt = *(struct bpf_mem_cache **)llnode;
810 
811 	local_irq_save(flags);
812 	if (local_inc_return(&c->active) == 1) {
813 		__llist_add(llnode, &c->free_llist);
814 		cnt = ++c->free_cnt;
815 	} else {
816 		/* unit_free() cannot fail. Therefore add an object to atomic
817 		 * llist. free_bulk() will drain it. Though free_llist_extra is
818 		 * a per-cpu list we have to use atomic llist_add here, since
819 		 * it also can be interrupted by bpf nmi prog that does another
820 		 * unit_free() into the same free_llist_extra.
821 		 */
822 		llist_add(llnode, &c->free_llist_extra);
823 	}
824 	local_dec(&c->active);
825 
826 	if (cnt > c->high_watermark)
827 		/* free few objects from current cpu into global kmalloc pool */
828 		irq_work_raise(c);
829 	/* Enable IRQ after irq_work_raise() completes, otherwise when current
830 	 * task is preempted by task which does unit_alloc(), unit_alloc() may
831 	 * return NULL unexpectedly because irq work is already pending but can
832 	 * not been triggered and free_llist can not be refilled timely.
833 	 */
834 	local_irq_restore(flags);
835 }
836 
837 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
838 {
839 	struct llist_node *llnode = ptr - LLIST_NODE_SZ;
840 	unsigned long flags;
841 
842 	c->tgt = *(struct bpf_mem_cache **)llnode;
843 
844 	local_irq_save(flags);
845 	if (local_inc_return(&c->active) == 1) {
846 		if (__llist_add(llnode, &c->free_by_rcu))
847 			c->free_by_rcu_tail = llnode;
848 	} else {
849 		llist_add(llnode, &c->free_llist_extra_rcu);
850 	}
851 	local_dec(&c->active);
852 
853 	if (!atomic_read(&c->call_rcu_in_progress))
854 		irq_work_raise(c);
855 	local_irq_restore(flags);
856 }
857 
858 /* Called from BPF program or from sys_bpf syscall.
859  * In both cases migration is disabled.
860  */
861 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
862 {
863 	int idx;
864 	void *ret;
865 
866 	if (!size)
867 		return ZERO_SIZE_PTR;
868 
869 	idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ);
870 	if (idx < 0)
871 		return NULL;
872 
873 	ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
874 	return !ret ? NULL : ret + LLIST_NODE_SZ;
875 }
876 
877 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
878 {
879 	int idx;
880 
881 	if (!ptr)
882 		return;
883 
884 	idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
885 	if (idx < 0)
886 		return;
887 
888 	unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
889 }
890 
891 void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
892 {
893 	int idx;
894 
895 	if (!ptr)
896 		return;
897 
898 	idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ));
899 	if (idx < 0)
900 		return;
901 
902 	unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
903 }
904 
905 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
906 {
907 	void *ret;
908 
909 	ret = unit_alloc(this_cpu_ptr(ma->cache));
910 	return !ret ? NULL : ret + LLIST_NODE_SZ;
911 }
912 
913 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
914 {
915 	if (!ptr)
916 		return;
917 
918 	unit_free(this_cpu_ptr(ma->cache), ptr);
919 }
920 
921 void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
922 {
923 	if (!ptr)
924 		return;
925 
926 	unit_free_rcu(this_cpu_ptr(ma->cache), ptr);
927 }
928 
929 /* Directly does a kfree() without putting 'ptr' back to the free_llist
930  * for reuse and without waiting for a rcu_tasks_trace gp.
931  * The caller must first go through the rcu_tasks_trace gp for 'ptr'
932  * before calling bpf_mem_cache_raw_free().
933  * It could be used when the rcu_tasks_trace callback does not have
934  * a hold on the original bpf_mem_alloc object that allocated the
935  * 'ptr'. This should only be used in the uncommon code path.
936  * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
937  * and may affect performance.
938  */
939 void bpf_mem_cache_raw_free(void *ptr)
940 {
941 	if (!ptr)
942 		return;
943 
944 	kfree(ptr - LLIST_NODE_SZ);
945 }
946 
947 /* When flags == GFP_KERNEL, it signals that the caller will not cause
948  * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
949  * kmalloc if the free_llist is empty.
950  */
951 void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
952 {
953 	struct bpf_mem_cache *c;
954 	void *ret;
955 
956 	c = this_cpu_ptr(ma->cache);
957 
958 	ret = unit_alloc(c);
959 	if (!ret && flags == GFP_KERNEL) {
960 		struct mem_cgroup *memcg, *old_memcg;
961 
962 		memcg = get_memcg(c);
963 		old_memcg = set_active_memcg(memcg);
964 		ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
965 		set_active_memcg(old_memcg);
966 		mem_cgroup_put(memcg);
967 	}
968 
969 	return !ret ? NULL : ret + LLIST_NODE_SZ;
970 }
971 
972 /* The alignment of dynamic per-cpu area is 8, so c->unit_size and the
973  * actual size of dynamic per-cpu area will always be matched and there is
974  * no need to adjust size_index for per-cpu allocation. However for the
975  * simplicity of the implementation, use an unified size_index for both
976  * kmalloc and per-cpu allocation.
977  */
978 static __init int bpf_mem_cache_adjust_size(void)
979 {
980 	unsigned int size;
981 
982 	/* Adjusting the indexes in size_index() according to the object_size
983 	 * of underlying slab cache, so bpf_mem_alloc() will select a
984 	 * bpf_mem_cache with unit_size equal to the object_size of
985 	 * the underlying slab cache.
986 	 *
987 	 * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
988 	 * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
989 	 */
990 	for (size = 192; size >= 8; size -= 8) {
991 		unsigned int kmalloc_size, index;
992 
993 		kmalloc_size = kmalloc_size_roundup(size);
994 		if (kmalloc_size == size)
995 			continue;
996 
997 		if (kmalloc_size <= 192)
998 			index = size_index[(kmalloc_size - 1) / 8];
999 		else
1000 			index = fls(kmalloc_size - 1) - 1;
1001 		/* Only overwrite if necessary */
1002 		if (size_index[(size - 1) / 8] != index)
1003 			size_index[(size - 1) / 8] = index;
1004 	}
1005 
1006 	return 0;
1007 }
1008 subsys_initcall(bpf_mem_cache_adjust_size);
1009