17c8199e2SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only
27c8199e2SAlexei Starovoitov /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
37c8199e2SAlexei Starovoitov #include <linux/mm.h>
47c8199e2SAlexei Starovoitov #include <linux/llist.h>
57c8199e2SAlexei Starovoitov #include <linux/bpf.h>
67c8199e2SAlexei Starovoitov #include <linux/irq_work.h>
77c8199e2SAlexei Starovoitov #include <linux/bpf_mem_alloc.h>
87c8199e2SAlexei Starovoitov #include <linux/memcontrol.h>
97c8199e2SAlexei Starovoitov #include <asm/local.h>
107c8199e2SAlexei Starovoitov
117c8199e2SAlexei Starovoitov /* Any context (including NMI) BPF specific memory allocator.
127c8199e2SAlexei Starovoitov *
137c8199e2SAlexei Starovoitov * Tracing BPF programs can attach to kprobe and fentry. Hence they
147c8199e2SAlexei Starovoitov * run in unknown context where calling plain kmalloc() might not be safe.
157c8199e2SAlexei Starovoitov *
167c8199e2SAlexei Starovoitov * Front-end kmalloc() with per-cpu per-bucket cache of free elements.
177c8199e2SAlexei Starovoitov * Refill this cache asynchronously from irq_work.
187c8199e2SAlexei Starovoitov *
197c8199e2SAlexei Starovoitov * CPU_0 buckets
207c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096
217c8199e2SAlexei Starovoitov * ...
227c8199e2SAlexei Starovoitov * CPU_N buckets
237c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096
247c8199e2SAlexei Starovoitov *
257c8199e2SAlexei Starovoitov * The buckets are prefilled at the start.
267c8199e2SAlexei Starovoitov * BPF programs always run with migration disabled.
277c8199e2SAlexei Starovoitov * It's safe to allocate from cache of the current cpu with irqs disabled.
287c8199e2SAlexei Starovoitov * Free-ing is always done into bucket of the current cpu as well.
297c8199e2SAlexei Starovoitov * irq_work trims extra free elements from buckets with kfree
307c8199e2SAlexei Starovoitov * and refills them with kmalloc, so global kmalloc logic takes care
317c8199e2SAlexei Starovoitov * of freeing objects allocated by one cpu and freed on another.
327c8199e2SAlexei Starovoitov *
337c8199e2SAlexei Starovoitov * Every allocated objected is padded with extra 8 bytes that contains
347c8199e2SAlexei Starovoitov * struct llist_node.
357c8199e2SAlexei Starovoitov */
367c8199e2SAlexei Starovoitov #define LLIST_NODE_SZ sizeof(struct llist_node)
377c8199e2SAlexei Starovoitov
38*62a898b0SHou Tao #define BPF_MEM_ALLOC_SIZE_MAX 4096
39*62a898b0SHou Tao
407c8199e2SAlexei Starovoitov /* similar to kmalloc, but sizeof == 8 bucket is gone */
417c8199e2SAlexei Starovoitov static u8 size_index[24] __ro_after_init = {
427c8199e2SAlexei Starovoitov 3, /* 8 */
437c8199e2SAlexei Starovoitov 3, /* 16 */
447c8199e2SAlexei Starovoitov 4, /* 24 */
457c8199e2SAlexei Starovoitov 4, /* 32 */
467c8199e2SAlexei Starovoitov 5, /* 40 */
477c8199e2SAlexei Starovoitov 5, /* 48 */
487c8199e2SAlexei Starovoitov 5, /* 56 */
497c8199e2SAlexei Starovoitov 5, /* 64 */
507c8199e2SAlexei Starovoitov 1, /* 72 */
517c8199e2SAlexei Starovoitov 1, /* 80 */
527c8199e2SAlexei Starovoitov 1, /* 88 */
537c8199e2SAlexei Starovoitov 1, /* 96 */
547c8199e2SAlexei Starovoitov 6, /* 104 */
557c8199e2SAlexei Starovoitov 6, /* 112 */
567c8199e2SAlexei Starovoitov 6, /* 120 */
577c8199e2SAlexei Starovoitov 6, /* 128 */
587c8199e2SAlexei Starovoitov 2, /* 136 */
597c8199e2SAlexei Starovoitov 2, /* 144 */
607c8199e2SAlexei Starovoitov 2, /* 152 */
617c8199e2SAlexei Starovoitov 2, /* 160 */
627c8199e2SAlexei Starovoitov 2, /* 168 */
637c8199e2SAlexei Starovoitov 2, /* 176 */
647c8199e2SAlexei Starovoitov 2, /* 184 */
657c8199e2SAlexei Starovoitov 2 /* 192 */
667c8199e2SAlexei Starovoitov };
677c8199e2SAlexei Starovoitov
bpf_mem_cache_idx(size_t size)687c8199e2SAlexei Starovoitov static int bpf_mem_cache_idx(size_t size)
697c8199e2SAlexei Starovoitov {
70*62a898b0SHou Tao if (!size || size > BPF_MEM_ALLOC_SIZE_MAX)
717c8199e2SAlexei Starovoitov return -1;
727c8199e2SAlexei Starovoitov
737c8199e2SAlexei Starovoitov if (size <= 192)
747c8199e2SAlexei Starovoitov return size_index[(size - 1) / 8] - 1;
757c8199e2SAlexei Starovoitov
7636024d02SHou Tao return fls(size - 1) - 2;
777c8199e2SAlexei Starovoitov }
787c8199e2SAlexei Starovoitov
797c8199e2SAlexei Starovoitov #define NUM_CACHES 11
807c8199e2SAlexei Starovoitov
817c8199e2SAlexei Starovoitov struct bpf_mem_cache {
827c8199e2SAlexei Starovoitov /* per-cpu list of free objects of size 'unit_size'.
837c8199e2SAlexei Starovoitov * All accesses are done with interrupts disabled and 'active' counter
847c8199e2SAlexei Starovoitov * protection with __llist_add() and __llist_del_first().
857c8199e2SAlexei Starovoitov */
867c8199e2SAlexei Starovoitov struct llist_head free_llist;
877c8199e2SAlexei Starovoitov local_t active;
887c8199e2SAlexei Starovoitov
897c8199e2SAlexei Starovoitov /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill
907c8199e2SAlexei Starovoitov * are sequenced by per-cpu 'active' counter. But unit_free() cannot
917c8199e2SAlexei Starovoitov * fail. When 'active' is busy the unit_free() will add an object to
927c8199e2SAlexei Starovoitov * free_llist_extra.
937c8199e2SAlexei Starovoitov */
947c8199e2SAlexei Starovoitov struct llist_head free_llist_extra;
957c8199e2SAlexei Starovoitov
967c8199e2SAlexei Starovoitov struct irq_work refill_work;
977c8199e2SAlexei Starovoitov struct obj_cgroup *objcg;
987c8199e2SAlexei Starovoitov int unit_size;
997c8199e2SAlexei Starovoitov /* count of objects in free_llist */
1007c8199e2SAlexei Starovoitov int free_cnt;
1017c266178SAlexei Starovoitov int low_watermark, high_watermark, batch;
102bfc03c15SAlexei Starovoitov int percpu_size;
103d114dde2SAlexei Starovoitov bool draining;
104822fb26bSAlexei Starovoitov struct bpf_mem_cache *tgt;
1058d5a8011SAlexei Starovoitov
1065af6807bSAlexei Starovoitov /* list of objects to be freed after RCU GP */
1075af6807bSAlexei Starovoitov struct llist_head free_by_rcu;
1085af6807bSAlexei Starovoitov struct llist_node *free_by_rcu_tail;
1095af6807bSAlexei Starovoitov struct llist_head waiting_for_gp;
1105af6807bSAlexei Starovoitov struct llist_node *waiting_for_gp_tail;
1115af6807bSAlexei Starovoitov struct rcu_head rcu;
1125af6807bSAlexei Starovoitov atomic_t call_rcu_in_progress;
1135af6807bSAlexei Starovoitov struct llist_head free_llist_extra_rcu;
1145af6807bSAlexei Starovoitov
11512c8d0f4SAlexei Starovoitov /* list of objects to be freed after RCU tasks trace GP */
11612c8d0f4SAlexei Starovoitov struct llist_head free_by_rcu_ttrace;
11712c8d0f4SAlexei Starovoitov struct llist_head waiting_for_gp_ttrace;
11812c8d0f4SAlexei Starovoitov struct rcu_head rcu_ttrace;
11912c8d0f4SAlexei Starovoitov atomic_t call_rcu_ttrace_in_progress;
1207c8199e2SAlexei Starovoitov };
1217c8199e2SAlexei Starovoitov
1227c8199e2SAlexei Starovoitov struct bpf_mem_caches {
1237c8199e2SAlexei Starovoitov struct bpf_mem_cache cache[NUM_CACHES];
1247c8199e2SAlexei Starovoitov };
1257c8199e2SAlexei Starovoitov
126c39aa3b2SYonghong Song static const u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
127c39aa3b2SYonghong Song
__llist_del_first(struct llist_head * head)1287c8199e2SAlexei Starovoitov static struct llist_node notrace *__llist_del_first(struct llist_head *head)
1297c8199e2SAlexei Starovoitov {
1307c8199e2SAlexei Starovoitov struct llist_node *entry, *next;
1317c8199e2SAlexei Starovoitov
1327c8199e2SAlexei Starovoitov entry = head->first;
1337c8199e2SAlexei Starovoitov if (!entry)
1347c8199e2SAlexei Starovoitov return NULL;
1357c8199e2SAlexei Starovoitov next = entry->next;
1367c8199e2SAlexei Starovoitov head->first = next;
1377c8199e2SAlexei Starovoitov return entry;
1387c8199e2SAlexei Starovoitov }
1397c8199e2SAlexei Starovoitov
__alloc(struct bpf_mem_cache * c,int node,gfp_t flags)140e65a5c6eSMartin KaFai Lau static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
1417c8199e2SAlexei Starovoitov {
142bfc03c15SAlexei Starovoitov if (c->percpu_size) {
1436d641ca5SUros Bizjak void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
1446d641ca5SUros Bizjak void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
1454ab67149SAlexei Starovoitov
1464ab67149SAlexei Starovoitov if (!obj || !pptr) {
1474ab67149SAlexei Starovoitov free_percpu(pptr);
1484ab67149SAlexei Starovoitov kfree(obj);
1494ab67149SAlexei Starovoitov return NULL;
1504ab67149SAlexei Starovoitov }
1514ab67149SAlexei Starovoitov obj[1] = pptr;
1524ab67149SAlexei Starovoitov return obj;
1534ab67149SAlexei Starovoitov }
1544ab67149SAlexei Starovoitov
155997849c4SHou Tao return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node);
1567c8199e2SAlexei Starovoitov }
1577c8199e2SAlexei Starovoitov
get_memcg(const struct bpf_mem_cache * c)1587c8199e2SAlexei Starovoitov static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
1597c8199e2SAlexei Starovoitov {
1603a3b7fecSJohannes Weiner #ifdef CONFIG_MEMCG
1617c8199e2SAlexei Starovoitov if (c->objcg)
1627c8199e2SAlexei Starovoitov return get_mem_cgroup_from_objcg(c->objcg);
1637c8199e2SAlexei Starovoitov return root_mem_cgroup;
1647c8199e2SAlexei Starovoitov #else
1657c8199e2SAlexei Starovoitov return NULL;
1667c8199e2SAlexei Starovoitov #endif
1677c8199e2SAlexei Starovoitov }
1687c8199e2SAlexei Starovoitov
inc_active(struct bpf_mem_cache * c,unsigned long * flags)16918e027b1SAlexei Starovoitov static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
17005ae6865SAlexei Starovoitov {
17105ae6865SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT))
17205ae6865SAlexei Starovoitov /* In RT irq_work runs in per-cpu kthread, so disable
17305ae6865SAlexei Starovoitov * interrupts to avoid preemption and interrupts and
17405ae6865SAlexei Starovoitov * reduce the chance of bpf prog executing on this cpu
17505ae6865SAlexei Starovoitov * when active counter is busy.
17605ae6865SAlexei Starovoitov */
17718e027b1SAlexei Starovoitov local_irq_save(*flags);
17805ae6865SAlexei Starovoitov /* alloc_bulk runs from irq_work which will not preempt a bpf
17905ae6865SAlexei Starovoitov * program that does unit_alloc/unit_free since IRQs are
18005ae6865SAlexei Starovoitov * disabled there. There is no race to increment 'active'
18105ae6865SAlexei Starovoitov * counter. It protects free_llist from corruption in case NMI
18205ae6865SAlexei Starovoitov * bpf prog preempted this loop.
18305ae6865SAlexei Starovoitov */
18405ae6865SAlexei Starovoitov WARN_ON_ONCE(local_inc_return(&c->active) != 1);
18518e027b1SAlexei Starovoitov }
18618e027b1SAlexei Starovoitov
dec_active(struct bpf_mem_cache * c,unsigned long * flags)18763e2da3bSArnd Bergmann static void dec_active(struct bpf_mem_cache *c, unsigned long *flags)
18818e027b1SAlexei Starovoitov {
18905ae6865SAlexei Starovoitov local_dec(&c->active);
19005ae6865SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT))
19163e2da3bSArnd Bergmann local_irq_restore(*flags);
19205ae6865SAlexei Starovoitov }
19305ae6865SAlexei Starovoitov
add_obj_to_free_list(struct bpf_mem_cache * c,void * obj)19418e027b1SAlexei Starovoitov static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
19518e027b1SAlexei Starovoitov {
19618e027b1SAlexei Starovoitov unsigned long flags;
19718e027b1SAlexei Starovoitov
19818e027b1SAlexei Starovoitov inc_active(c, &flags);
19918e027b1SAlexei Starovoitov __llist_add(obj, &c->free_llist);
20018e027b1SAlexei Starovoitov c->free_cnt++;
20163e2da3bSArnd Bergmann dec_active(c, &flags);
20218e027b1SAlexei Starovoitov }
20318e027b1SAlexei Starovoitov
2047c8199e2SAlexei Starovoitov /* Mostly runs from irq_work except __init phase. */
alloc_bulk(struct bpf_mem_cache * c,int cnt,int node,bool atomic)205d1a02358SYiFei Zhu static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
2067c8199e2SAlexei Starovoitov {
2077c8199e2SAlexei Starovoitov struct mem_cgroup *memcg = NULL, *old_memcg;
208d1a02358SYiFei Zhu gfp_t gfp;
2097c8199e2SAlexei Starovoitov void *obj;
2107c8199e2SAlexei Starovoitov int i;
2117c8199e2SAlexei Starovoitov
212d1a02358SYiFei Zhu gfp = __GFP_NOWARN | __GFP_ACCOUNT;
213d1a02358SYiFei Zhu gfp |= atomic ? GFP_NOWAIT : GFP_KERNEL;
214d1a02358SYiFei Zhu
2157c8199e2SAlexei Starovoitov for (i = 0; i < cnt; i++) {
2160893d600SHou Tao /*
217822fb26bSAlexei Starovoitov * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is
218822fb26bSAlexei Starovoitov * done only by one CPU == current CPU. Other CPUs might
219822fb26bSAlexei Starovoitov * llist_add() and llist_del_all() in parallel.
2200893d600SHou Tao */
221822fb26bSAlexei Starovoitov obj = llist_del_first(&c->free_by_rcu_ttrace);
22274680482SAlexei Starovoitov if (!obj)
22374680482SAlexei Starovoitov break;
22474680482SAlexei Starovoitov add_obj_to_free_list(c, obj);
22574680482SAlexei Starovoitov }
22674680482SAlexei Starovoitov if (i >= cnt)
22774680482SAlexei Starovoitov return;
22874680482SAlexei Starovoitov
22904fabf00SAlexei Starovoitov for (; i < cnt; i++) {
23004fabf00SAlexei Starovoitov obj = llist_del_first(&c->waiting_for_gp_ttrace);
23104fabf00SAlexei Starovoitov if (!obj)
23204fabf00SAlexei Starovoitov break;
23304fabf00SAlexei Starovoitov add_obj_to_free_list(c, obj);
23404fabf00SAlexei Starovoitov }
23504fabf00SAlexei Starovoitov if (i >= cnt)
23604fabf00SAlexei Starovoitov return;
23704fabf00SAlexei Starovoitov
23874680482SAlexei Starovoitov memcg = get_memcg(c);
23974680482SAlexei Starovoitov old_memcg = set_active_memcg(memcg);
24074680482SAlexei Starovoitov for (; i < cnt; i++) {
241e65a5c6eSMartin KaFai Lau /* Allocate, but don't deplete atomic reserves that typical
242e65a5c6eSMartin KaFai Lau * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
243e65a5c6eSMartin KaFai Lau * will allocate from the current numa node which is what we
244e65a5c6eSMartin KaFai Lau * want here.
245e65a5c6eSMartin KaFai Lau */
246d1a02358SYiFei Zhu obj = __alloc(c, node, gfp);
2477c8199e2SAlexei Starovoitov if (!obj)
2487c8199e2SAlexei Starovoitov break;
24905ae6865SAlexei Starovoitov add_obj_to_free_list(c, obj);
2507c8199e2SAlexei Starovoitov }
2517c8199e2SAlexei Starovoitov set_active_memcg(old_memcg);
2527c8199e2SAlexei Starovoitov mem_cgroup_put(memcg);
2537c8199e2SAlexei Starovoitov }
2547c8199e2SAlexei Starovoitov
free_one(void * obj,bool percpu)255aa7881fcSHou Tao static void free_one(void *obj, bool percpu)
2567c8199e2SAlexei Starovoitov {
257aa7881fcSHou Tao if (percpu)
2586d641ca5SUros Bizjak free_percpu(((void __percpu **)obj)[1]);
259bfc03c15SAlexei Starovoitov
2604ab67149SAlexei Starovoitov kfree(obj);
2614ab67149SAlexei Starovoitov }
2624ab67149SAlexei Starovoitov
free_all(struct llist_node * llnode,bool percpu)2637c8199e2SAlexei Starovoitov static int free_all(struct llist_node *llnode, bool percpu)
2647c8199e2SAlexei Starovoitov {
2657c8199e2SAlexei Starovoitov struct llist_node *pos, *t;
2669de3e815SAlexei Starovoitov int cnt = 0;
2678d5a8011SAlexei Starovoitov
2688d5a8011SAlexei Starovoitov llist_for_each_safe(pos, t, llnode) {
2699de3e815SAlexei Starovoitov free_one(pos, percpu);
2708d5a8011SAlexei Starovoitov cnt++;
2719de3e815SAlexei Starovoitov }
272aa7881fcSHou Tao return cnt;
2739de3e815SAlexei Starovoitov }
2749de3e815SAlexei Starovoitov
__free_rcu(struct rcu_head * head)2759de3e815SAlexei Starovoitov static void __free_rcu(struct rcu_head *head)
276aa7881fcSHou Tao {
277aa7881fcSHou Tao struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace);
278aa7881fcSHou Tao
279aa7881fcSHou Tao free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size);
28012c8d0f4SAlexei Starovoitov atomic_set(&c->call_rcu_ttrace_in_progress, 0);
281aa7881fcSHou Tao }
28212c8d0f4SAlexei Starovoitov
__free_rcu_tasks_trace(struct rcu_head * head)28312c8d0f4SAlexei Starovoitov static void __free_rcu_tasks_trace(struct rcu_head *head)
2848d5a8011SAlexei Starovoitov {
2858d5a8011SAlexei Starovoitov /* If RCU Tasks Trace grace period implies RCU grace period,
286dccb4a90SAlexei Starovoitov * there is no need to invoke call_rcu().
287dccb4a90SAlexei Starovoitov */
28859be91e5SHou Tao if (rcu_trace_implies_rcu_gp())
28959be91e5SHou Tao __free_rcu(head);
29059be91e5SHou Tao else
29159be91e5SHou Tao call_rcu(head, __free_rcu);
29259be91e5SHou Tao }
29359be91e5SHou Tao
enque_to_free(struct bpf_mem_cache * c,void * obj)29459be91e5SHou Tao static void enque_to_free(struct bpf_mem_cache *c, void *obj)
295dccb4a90SAlexei Starovoitov {
296dccb4a90SAlexei Starovoitov struct llist_node *llnode = obj;
2978d5a8011SAlexei Starovoitov
2988d5a8011SAlexei Starovoitov /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work.
2998d5a8011SAlexei Starovoitov * Nothing races to add to free_by_rcu_ttrace list.
3008d5a8011SAlexei Starovoitov */
3018d5a8011SAlexei Starovoitov llist_add(llnode, &c->free_by_rcu_ttrace);
30212c8d0f4SAlexei Starovoitov }
3038d5a8011SAlexei Starovoitov
do_call_rcu_ttrace(struct bpf_mem_cache * c)304822fb26bSAlexei Starovoitov static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
3058d5a8011SAlexei Starovoitov {
3068d5a8011SAlexei Starovoitov struct llist_node *llnode, *t;
30712c8d0f4SAlexei Starovoitov
3088d5a8011SAlexei Starovoitov if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) {
3098d5a8011SAlexei Starovoitov if (unlikely(READ_ONCE(c->draining))) {
3108d5a8011SAlexei Starovoitov llnode = llist_del_all(&c->free_by_rcu_ttrace);
311822fb26bSAlexei Starovoitov free_all(llnode, !!c->percpu_size);
312822fb26bSAlexei Starovoitov }
313822fb26bSAlexei Starovoitov return;
314822fb26bSAlexei Starovoitov }
315822fb26bSAlexei Starovoitov
3168d5a8011SAlexei Starovoitov WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
317822fb26bSAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
3188d5a8011SAlexei Starovoitov llist_add(llnode, &c->waiting_for_gp_ttrace);
31912c8d0f4SAlexei Starovoitov
320822fb26bSAlexei Starovoitov if (unlikely(READ_ONCE(c->draining))) {
32104fabf00SAlexei Starovoitov __free_rcu(&c->rcu_ttrace);
322d114dde2SAlexei Starovoitov return;
323d114dde2SAlexei Starovoitov }
324d114dde2SAlexei Starovoitov
325d114dde2SAlexei Starovoitov /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
326d114dde2SAlexei Starovoitov * If RCU Tasks Trace grace period implies RCU grace period, free
327d114dde2SAlexei Starovoitov * these elements directly, else use call_rcu() to wait for normal
328dccb4a90SAlexei Starovoitov * progs to finish and finally do free_one() on each element.
32959be91e5SHou Tao */
33059be91e5SHou Tao call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
33159be91e5SHou Tao }
332dccb4a90SAlexei Starovoitov
free_bulk(struct bpf_mem_cache * c)33312c8d0f4SAlexei Starovoitov static void free_bulk(struct bpf_mem_cache *c)
3348d5a8011SAlexei Starovoitov {
3358d5a8011SAlexei Starovoitov struct bpf_mem_cache *tgt = c->tgt;
3367c8199e2SAlexei Starovoitov struct llist_node *llnode, *t;
3377c8199e2SAlexei Starovoitov unsigned long flags;
338822fb26bSAlexei Starovoitov int cnt;
3397c8199e2SAlexei Starovoitov
3407c8199e2SAlexei Starovoitov WARN_ON_ONCE(tgt->unit_size != c->unit_size);
3417c8199e2SAlexei Starovoitov WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
3427c8199e2SAlexei Starovoitov
343822fb26bSAlexei Starovoitov do {
344c421c125SHou Tao inc_active(c, &flags);
345822fb26bSAlexei Starovoitov llnode = __llist_del_first(&c->free_llist);
3467c8199e2SAlexei Starovoitov if (llnode)
34718e027b1SAlexei Starovoitov cnt = --c->free_cnt;
3487c8199e2SAlexei Starovoitov else
3497c8199e2SAlexei Starovoitov cnt = 0;
3507c8199e2SAlexei Starovoitov dec_active(c, &flags);
3517c8199e2SAlexei Starovoitov if (llnode)
3527c8199e2SAlexei Starovoitov enque_to_free(tgt, llnode);
35363e2da3bSArnd Bergmann } while (cnt > (c->high_watermark + c->low_watermark) / 2);
354c31b38cbSHou Tao
355822fb26bSAlexei Starovoitov /* and drain free_llist_extra */
3567c266178SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
3577c8199e2SAlexei Starovoitov enque_to_free(tgt, llnode);
3587c8199e2SAlexei Starovoitov do_call_rcu_ttrace(tgt);
3597c8199e2SAlexei Starovoitov }
360822fb26bSAlexei Starovoitov
__free_by_rcu(struct rcu_head * head)361822fb26bSAlexei Starovoitov static void __free_by_rcu(struct rcu_head *head)
3627c8199e2SAlexei Starovoitov {
3637c8199e2SAlexei Starovoitov struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
3645af6807bSAlexei Starovoitov struct bpf_mem_cache *tgt = c->tgt;
3655af6807bSAlexei Starovoitov struct llist_node *llnode;
3665af6807bSAlexei Starovoitov
3675af6807bSAlexei Starovoitov WARN_ON_ONCE(tgt->unit_size != c->unit_size);
3685af6807bSAlexei Starovoitov WARN_ON_ONCE(tgt->percpu_size != c->percpu_size);
3695af6807bSAlexei Starovoitov
370c421c125SHou Tao llnode = llist_del_all(&c->waiting_for_gp);
371c421c125SHou Tao if (!llnode)
372c421c125SHou Tao goto out;
3735af6807bSAlexei Starovoitov
3745af6807bSAlexei Starovoitov llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace);
3755af6807bSAlexei Starovoitov
3765af6807bSAlexei Starovoitov /* Objects went through regular RCU GP. Send them to RCU tasks trace */
3775af6807bSAlexei Starovoitov do_call_rcu_ttrace(tgt);
3785af6807bSAlexei Starovoitov out:
3795af6807bSAlexei Starovoitov atomic_set(&c->call_rcu_in_progress, 0);
3805af6807bSAlexei Starovoitov }
3815af6807bSAlexei Starovoitov
check_free_by_rcu(struct bpf_mem_cache * c)3825af6807bSAlexei Starovoitov static void check_free_by_rcu(struct bpf_mem_cache *c)
3835af6807bSAlexei Starovoitov {
3845af6807bSAlexei Starovoitov struct llist_node *llnode, *t;
3855af6807bSAlexei Starovoitov unsigned long flags;
3865af6807bSAlexei Starovoitov
3875af6807bSAlexei Starovoitov /* drain free_llist_extra_rcu */
3885af6807bSAlexei Starovoitov if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) {
3895af6807bSAlexei Starovoitov inc_active(c, &flags);
3905af6807bSAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu))
3915af6807bSAlexei Starovoitov if (__llist_add(llnode, &c->free_by_rcu))
3925af6807bSAlexei Starovoitov c->free_by_rcu_tail = llnode;
3935af6807bSAlexei Starovoitov dec_active(c, &flags);
3945af6807bSAlexei Starovoitov }
3955af6807bSAlexei Starovoitov
39663e2da3bSArnd Bergmann if (llist_empty(&c->free_by_rcu))
3975af6807bSAlexei Starovoitov return;
3985af6807bSAlexei Starovoitov
3995af6807bSAlexei Starovoitov if (atomic_xchg(&c->call_rcu_in_progress, 1)) {
4005af6807bSAlexei Starovoitov /*
4015af6807bSAlexei Starovoitov * Instead of kmalloc-ing new rcu_head and triggering 10k
4025af6807bSAlexei Starovoitov * call_rcu() to hit rcutree.qhimark and force RCU to notice
4035af6807bSAlexei Starovoitov * the overload just ask RCU to hurry up. There could be many
4045af6807bSAlexei Starovoitov * objects in free_by_rcu list.
4055af6807bSAlexei Starovoitov * This hint reduces memory consumption for an artificial
4065af6807bSAlexei Starovoitov * benchmark from 2 Gbyte to 150 Mbyte.
4075af6807bSAlexei Starovoitov */
4085af6807bSAlexei Starovoitov rcu_request_urgent_qs_task(current);
4095af6807bSAlexei Starovoitov return;
4105af6807bSAlexei Starovoitov }
4115af6807bSAlexei Starovoitov
4125af6807bSAlexei Starovoitov WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
4135af6807bSAlexei Starovoitov
4145af6807bSAlexei Starovoitov inc_active(c, &flags);
4155af6807bSAlexei Starovoitov WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu));
4165af6807bSAlexei Starovoitov c->waiting_for_gp_tail = c->free_by_rcu_tail;
4175af6807bSAlexei Starovoitov dec_active(c, &flags);
4185af6807bSAlexei Starovoitov
4195af6807bSAlexei Starovoitov if (unlikely(READ_ONCE(c->draining))) {
42063e2da3bSArnd Bergmann free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size);
4215af6807bSAlexei Starovoitov atomic_set(&c->call_rcu_in_progress, 0);
4225af6807bSAlexei Starovoitov } else {
4235af6807bSAlexei Starovoitov call_rcu_hurry(&c->rcu, __free_by_rcu);
4245af6807bSAlexei Starovoitov }
4255af6807bSAlexei Starovoitov }
4265af6807bSAlexei Starovoitov
bpf_mem_refill(struct irq_work * work)4275af6807bSAlexei Starovoitov static void bpf_mem_refill(struct irq_work *work)
4285af6807bSAlexei Starovoitov {
4295af6807bSAlexei Starovoitov struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work);
4307c8199e2SAlexei Starovoitov int cnt;
4317c8199e2SAlexei Starovoitov
4327c8199e2SAlexei Starovoitov /* Racy access to free_cnt. It doesn't need to be 100% accurate */
4337c8199e2SAlexei Starovoitov cnt = c->free_cnt;
4347c8199e2SAlexei Starovoitov if (cnt < c->low_watermark)
4357c8199e2SAlexei Starovoitov /* irq_work runs on this cpu and kmalloc will allocate
4367c8199e2SAlexei Starovoitov * from the current numa node which is what we want here.
4377c266178SAlexei Starovoitov */
4387c8199e2SAlexei Starovoitov alloc_bulk(c, c->batch, NUMA_NO_NODE, true);
4397c8199e2SAlexei Starovoitov else if (cnt > c->high_watermark)
4407c8199e2SAlexei Starovoitov free_bulk(c);
441d1a02358SYiFei Zhu
4427c266178SAlexei Starovoitov check_free_by_rcu(c);
4437c8199e2SAlexei Starovoitov }
4445af6807bSAlexei Starovoitov
irq_work_raise(struct bpf_mem_cache * c)4455af6807bSAlexei Starovoitov static void notrace irq_work_raise(struct bpf_mem_cache *c)
4467c8199e2SAlexei Starovoitov {
4477c8199e2SAlexei Starovoitov irq_work_queue(&c->refill_work);
4487c8199e2SAlexei Starovoitov }
4497c8199e2SAlexei Starovoitov
4507c8199e2SAlexei Starovoitov /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
4517c8199e2SAlexei Starovoitov * the freelist cache will be elem_size * 64 (or less) on each cpu.
4527c8199e2SAlexei Starovoitov *
4537c266178SAlexei Starovoitov * For bpf programs that don't have statically known allocation sizes and
4547c266178SAlexei Starovoitov * assuming (low_mark + high_mark) / 2 as an average number of elements per
4557c266178SAlexei Starovoitov * bucket and all buckets are used the total amount of memory in freelists
4567c266178SAlexei Starovoitov * on each cpu will be:
4577c266178SAlexei Starovoitov * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
4587c266178SAlexei Starovoitov * == ~ 116 Kbyte using below heuristic.
4597c266178SAlexei Starovoitov * Initialized, but unused bpf allocator (not bpf map specific one) will
4607c266178SAlexei Starovoitov * consume ~ 11 Kbyte per cpu.
4617c266178SAlexei Starovoitov * Typical case will be between 11K and 116K closer to 11K.
4627c266178SAlexei Starovoitov * bpf progs can and should share bpf_mem_cache when possible.
4637c266178SAlexei Starovoitov *
4647c266178SAlexei Starovoitov * Percpu allocation is typically rare. To avoid potential unnecessary large
4657c266178SAlexei Starovoitov * memory consumption, set low_mark = 1 and high_mark = 3, resulting in c->batch = 1.
4660e2ba9f9SYonghong Song */
init_refill_work(struct bpf_mem_cache * c)4670e2ba9f9SYonghong Song static void init_refill_work(struct bpf_mem_cache *c)
4680e2ba9f9SYonghong Song {
4697c266178SAlexei Starovoitov init_irq_work(&c->refill_work, bpf_mem_refill);
470b1d53958SHou Tao if (c->percpu_size) {
4717c8199e2SAlexei Starovoitov c->low_watermark = 1;
4727c8199e2SAlexei Starovoitov c->high_watermark = 3;
4730e2ba9f9SYonghong Song } else if (c->unit_size <= 256) {
4740e2ba9f9SYonghong Song c->low_watermark = 32;
4750e2ba9f9SYonghong Song c->high_watermark = 96;
4760e2ba9f9SYonghong Song } else {
4777c266178SAlexei Starovoitov /* When page_size == 4k, order-0 cache will have low_mark == 2
4787c266178SAlexei Starovoitov * and high_mark == 6 with batch alloc of 3 individual pages at
4797c266178SAlexei Starovoitov * a time.
4807c266178SAlexei Starovoitov * 8k allocs and above low == 1, high == 3, batch == 1.
4817c266178SAlexei Starovoitov */
4827c266178SAlexei Starovoitov c->low_watermark = max(32 * 256 / c->unit_size, 1);
4837c266178SAlexei Starovoitov c->high_watermark = max(96 * 256 / c->unit_size, 3);
4847c266178SAlexei Starovoitov }
4857c266178SAlexei Starovoitov c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);
4867c266178SAlexei Starovoitov }
4877c266178SAlexei Starovoitov
prefill_mem_cache(struct bpf_mem_cache * c,int cpu)4887c266178SAlexei Starovoitov static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
489b1d53958SHou Tao {
4907c266178SAlexei Starovoitov int cnt = 1;
491b1d53958SHou Tao
492b1d53958SHou Tao /* To avoid consuming memory, for non-percpu allocation, assume that
4935b95e638SYonghong Song * 1st run of bpf prog won't be doing more than 4 map_update_elem from
4945b95e638SYonghong Song * irq disabled region if unit size is less than or equal to 256.
4955b95e638SYonghong Song * For all other cases, let us just do one allocation.
4965b95e638SYonghong Song */
4975b95e638SYonghong Song if (!c->percpu_size && c->unit_size <= 256)
4985b95e638SYonghong Song cnt = 4;
4997c8199e2SAlexei Starovoitov alloc_bulk(c, cnt, cpu_to_node(cpu), false);
5005b95e638SYonghong Song }
5015b95e638SYonghong Song
5025b95e638SYonghong Song /* When size != 0 bpf_mem_cache for each cpu.
5037c8199e2SAlexei Starovoitov * This is typical bpf hash map use case when all elements have equal size.
5047c8199e2SAlexei Starovoitov *
505bfc03c15SAlexei Starovoitov * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on
5067c8199e2SAlexei Starovoitov * kmalloc/kfree. Max allocation size is 4096 in this case.
5077c8199e2SAlexei Starovoitov * This is bpf_dynptr and bpf_kptr use case.
5087c8199e2SAlexei Starovoitov */
bpf_mem_alloc_init(struct bpf_mem_alloc * ma,int size,bool percpu)5097c8199e2SAlexei Starovoitov int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
5107c8199e2SAlexei Starovoitov {
5117c8199e2SAlexei Starovoitov struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
5124ab67149SAlexei Starovoitov struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
5137c8199e2SAlexei Starovoitov struct obj_cgroup *objcg = NULL;
5146d641ca5SUros Bizjak int cpu, i, unit_size, percpu_size = 0;
5156d641ca5SUros Bizjak
5167c8199e2SAlexei Starovoitov if (percpu && size == 0)
5177ac5c53eSHou Tao return -EINVAL;
5187c8199e2SAlexei Starovoitov
519c39aa3b2SYonghong Song /* room for llist_node and per-cpu pointer */
520c39aa3b2SYonghong Song if (percpu)
521c39aa3b2SYonghong Song percpu_size = LLIST_NODE_SZ + sizeof(void *);
52241a5db8dSYonghong Song ma->percpu = percpu;
52341a5db8dSYonghong Song
52441a5db8dSYonghong Song if (size) {
5253f2189e4SHou Tao pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
52641a5db8dSYonghong Song if (!pc)
5277c8199e2SAlexei Starovoitov return -ENOMEM;
5287c8199e2SAlexei Starovoitov
5297c8199e2SAlexei Starovoitov if (!percpu)
5307c8199e2SAlexei Starovoitov size += LLIST_NODE_SZ; /* room for llist_node */
5314ab67149SAlexei Starovoitov unit_size = size;
53241a5db8dSYonghong Song
5337c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG
5344ab67149SAlexei Starovoitov if (memcg_bpf_enabled())
5354ab67149SAlexei Starovoitov objcg = get_obj_cgroup_from_current();
5363a3b7fecSJohannes Weiner #endif
537ee53cbfbSYafang Shao ma->objcg = objcg;
5387c8199e2SAlexei Starovoitov
5397c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) {
5409fc8e802SYonghong Song c = per_cpu_ptr(pc, cpu);
541c39aa3b2SYonghong Song c->unit_size = unit_size;
5427c8199e2SAlexei Starovoitov c->objcg = objcg;
5437c8199e2SAlexei Starovoitov c->percpu_size = percpu_size;
5444ab67149SAlexei Starovoitov c->tgt = c;
5457c8199e2SAlexei Starovoitov init_refill_work(c);
546bfc03c15SAlexei Starovoitov prefill_mem_cache(c, cpu);
547822fb26bSAlexei Starovoitov }
548b1d53958SHou Tao ma->cache = pc;
5497c8199e2SAlexei Starovoitov return 0;
5507c8199e2SAlexei Starovoitov }
5517c8199e2SAlexei Starovoitov
5527c8199e2SAlexei Starovoitov pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL);
5537c8199e2SAlexei Starovoitov if (!pcc)
5547c8199e2SAlexei Starovoitov return -ENOMEM;
5557c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG
5567c8199e2SAlexei Starovoitov objcg = get_obj_cgroup_from_current();
5577c8199e2SAlexei Starovoitov #endif
5583a3b7fecSJohannes Weiner ma->objcg = objcg;
5597c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) {
5607c8199e2SAlexei Starovoitov cc = per_cpu_ptr(pcc, cpu);
5619fc8e802SYonghong Song for (i = 0; i < NUM_CACHES; i++) {
5627c8199e2SAlexei Starovoitov c = &cc->cache[i];
5637c8199e2SAlexei Starovoitov c->unit_size = sizes[i];
5647c8199e2SAlexei Starovoitov c->objcg = objcg;
5657c8199e2SAlexei Starovoitov c->percpu_size = percpu_size;
5667c8199e2SAlexei Starovoitov c->tgt = c;
5677c8199e2SAlexei Starovoitov
56841a5db8dSYonghong Song init_refill_work(c);
569822fb26bSAlexei Starovoitov prefill_mem_cache(c, cpu);
570b1d53958SHou Tao }
571b1d53958SHou Tao }
5727c8199e2SAlexei Starovoitov
5737c8199e2SAlexei Starovoitov ma->caches = pcc;
5747c8199e2SAlexei Starovoitov return 0;
575c9304725SHou Tao }
5767c8199e2SAlexei Starovoitov
bpf_mem_alloc_percpu_init(struct bpf_mem_alloc * ma,struct obj_cgroup * objcg)5777ac5c53eSHou Tao int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg)
5787c8199e2SAlexei Starovoitov {
5797c8199e2SAlexei Starovoitov struct bpf_mem_caches __percpu *pcc;
580c39aa3b2SYonghong Song
581c39aa3b2SYonghong Song pcc = __alloc_percpu_gfp(sizeof(struct bpf_mem_caches), 8, GFP_KERNEL);
582c39aa3b2SYonghong Song if (!pcc)
583c39aa3b2SYonghong Song return -ENOMEM;
584c39aa3b2SYonghong Song
585c39aa3b2SYonghong Song ma->caches = pcc;
586c39aa3b2SYonghong Song ma->objcg = objcg;
587c39aa3b2SYonghong Song ma->percpu = true;
588c39aa3b2SYonghong Song return 0;
589c39aa3b2SYonghong Song }
590c39aa3b2SYonghong Song
bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc * ma,int size)591c39aa3b2SYonghong Song int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
592c39aa3b2SYonghong Song {
593c39aa3b2SYonghong Song struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
594c39aa3b2SYonghong Song int cpu, i, unit_size, percpu_size;
595c39aa3b2SYonghong Song struct obj_cgroup *objcg;
5966d641ca5SUros Bizjak struct bpf_mem_cache *c;
597c39aa3b2SYonghong Song
598c39aa3b2SYonghong Song i = bpf_mem_cache_idx(size);
599c39aa3b2SYonghong Song if (i < 0)
600c39aa3b2SYonghong Song return -EINVAL;
601c39aa3b2SYonghong Song
602c39aa3b2SYonghong Song /* room for llist_node and per-cpu pointer */
603c39aa3b2SYonghong Song percpu_size = LLIST_NODE_SZ + sizeof(void *);
604c39aa3b2SYonghong Song
605c39aa3b2SYonghong Song unit_size = sizes[i];
606c39aa3b2SYonghong Song objcg = ma->objcg;
607c39aa3b2SYonghong Song pcc = ma->caches;
608c39aa3b2SYonghong Song
609c39aa3b2SYonghong Song for_each_possible_cpu(cpu) {
610c39aa3b2SYonghong Song cc = per_cpu_ptr(pcc, cpu);
611c39aa3b2SYonghong Song c = &cc->cache[i];
612c39aa3b2SYonghong Song if (c->unit_size)
613c39aa3b2SYonghong Song break;
614c39aa3b2SYonghong Song
6159ddf872bSYonghong Song c->unit_size = unit_size;
616c39aa3b2SYonghong Song c->objcg = objcg;
617c39aa3b2SYonghong Song c->percpu_size = percpu_size;
618c39aa3b2SYonghong Song c->tgt = c;
619c39aa3b2SYonghong Song
620c39aa3b2SYonghong Song init_refill_work(c);
621c39aa3b2SYonghong Song prefill_mem_cache(c, cpu);
622c39aa3b2SYonghong Song }
623c39aa3b2SYonghong Song
624c39aa3b2SYonghong Song return 0;
625c39aa3b2SYonghong Song }
626c39aa3b2SYonghong Song
drain_mem_cache(struct bpf_mem_cache * c)627c39aa3b2SYonghong Song static void drain_mem_cache(struct bpf_mem_cache *c)
628c39aa3b2SYonghong Song {
629c39aa3b2SYonghong Song bool percpu = !!c->percpu_size;
6307c8199e2SAlexei Starovoitov
6317c8199e2SAlexei Starovoitov /* No progs are using this bpf_mem_cache, but htab_map_free() called
632aa7881fcSHou Tao * bpf_mem_cache_free() for all remaining elements and they can be in
6337c8199e2SAlexei Starovoitov * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now.
6349f2c6e96SAlexei Starovoitov *
6359f2c6e96SAlexei Starovoitov * Except for waiting_for_gp_ttrace list, there are no concurrent operations
63612c8d0f4SAlexei Starovoitov * on these lists, so it is safe to use __llist_del_all().
637fa4447cbSHou Tao */
63812c8d0f4SAlexei Starovoitov free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu);
639fa4447cbSHou Tao free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu);
6408d5a8011SAlexei Starovoitov free_all(__llist_del_all(&c->free_llist), percpu);
641822fb26bSAlexei Starovoitov free_all(__llist_del_all(&c->free_llist_extra), percpu);
64212c8d0f4SAlexei Starovoitov free_all(__llist_del_all(&c->free_by_rcu), percpu);
643aa7881fcSHou Tao free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu);
644aa7881fcSHou Tao free_all(llist_del_all(&c->waiting_for_gp), percpu);
6455af6807bSAlexei Starovoitov }
6465af6807bSAlexei Starovoitov
check_mem_cache(struct bpf_mem_cache * c)6475af6807bSAlexei Starovoitov static void check_mem_cache(struct bpf_mem_cache *c)
6487c8199e2SAlexei Starovoitov {
6497c8199e2SAlexei Starovoitov WARN_ON_ONCE(!llist_empty(&c->free_by_rcu_ttrace));
6504ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
6514ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->free_llist));
6524ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->free_llist_extra));
6534ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->free_by_rcu));
6544ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->free_llist_extra_rcu));
6554ed8b5bcSHou Tao WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp));
6564ed8b5bcSHou Tao }
6574ed8b5bcSHou Tao
check_leaked_objs(struct bpf_mem_alloc * ma)6584ed8b5bcSHou Tao static void check_leaked_objs(struct bpf_mem_alloc *ma)
6594ed8b5bcSHou Tao {
6604ed8b5bcSHou Tao struct bpf_mem_caches *cc;
6614ed8b5bcSHou Tao struct bpf_mem_cache *c;
6624ed8b5bcSHou Tao int cpu, i;
6634ed8b5bcSHou Tao
6644ed8b5bcSHou Tao if (ma->cache) {
6654ed8b5bcSHou Tao for_each_possible_cpu(cpu) {
6664ed8b5bcSHou Tao c = per_cpu_ptr(ma->cache, cpu);
6674ed8b5bcSHou Tao check_mem_cache(c);
6684ed8b5bcSHou Tao }
6694ed8b5bcSHou Tao }
6704ed8b5bcSHou Tao if (ma->caches) {
6714ed8b5bcSHou Tao for_each_possible_cpu(cpu) {
6724ed8b5bcSHou Tao cc = per_cpu_ptr(ma->caches, cpu);
6734ed8b5bcSHou Tao for (i = 0; i < NUM_CACHES; i++) {
6744ed8b5bcSHou Tao c = &cc->cache[i];
6754ed8b5bcSHou Tao check_mem_cache(c);
6764ed8b5bcSHou Tao }
6774ed8b5bcSHou Tao }
6784ed8b5bcSHou Tao }
6794ed8b5bcSHou Tao }
6804ed8b5bcSHou Tao
free_mem_alloc_no_barrier(struct bpf_mem_alloc * ma)6814ed8b5bcSHou Tao static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
6824ed8b5bcSHou Tao {
6834ed8b5bcSHou Tao check_leaked_objs(ma);
6849f2c6e96SAlexei Starovoitov free_percpu(ma->cache);
6859f2c6e96SAlexei Starovoitov free_percpu(ma->caches);
6864ed8b5bcSHou Tao ma->cache = NULL;
6879f2c6e96SAlexei Starovoitov ma->caches = NULL;
6889f2c6e96SAlexei Starovoitov }
6899f2c6e96SAlexei Starovoitov
free_mem_alloc(struct bpf_mem_alloc * ma)6909f2c6e96SAlexei Starovoitov static void free_mem_alloc(struct bpf_mem_alloc *ma)
6919f2c6e96SAlexei Starovoitov {
6929f2c6e96SAlexei Starovoitov /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
6939f2c6e96SAlexei Starovoitov * might still execute. Wait for them.
6949f2c6e96SAlexei Starovoitov *
6955af6807bSAlexei Starovoitov * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
6965af6807bSAlexei Starovoitov * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
697822ed78fSHou Tao * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
698822ed78fSHou Tao * so if call_rcu(head, __free_rcu) is skipped due to
699822ed78fSHou Tao * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
700822ed78fSHou Tao * using rcu_trace_implies_rcu_gp() as well.
701822ed78fSHou Tao */
702822ed78fSHou Tao rcu_barrier(); /* wait for __free_by_rcu */
703822ed78fSHou Tao rcu_barrier_tasks_trace(); /* wait for __free_rcu */
7049f2c6e96SAlexei Starovoitov if (!rcu_trace_implies_rcu_gp())
7055af6807bSAlexei Starovoitov rcu_barrier();
7065af6807bSAlexei Starovoitov free_mem_alloc_no_barrier(ma);
707822ed78fSHou Tao }
7089f2c6e96SAlexei Starovoitov
free_mem_alloc_deferred(struct work_struct * work)7099f2c6e96SAlexei Starovoitov static void free_mem_alloc_deferred(struct work_struct *work)
7109f2c6e96SAlexei Starovoitov {
7119f2c6e96SAlexei Starovoitov struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work);
7129f2c6e96SAlexei Starovoitov
7139f2c6e96SAlexei Starovoitov free_mem_alloc(ma);
7149f2c6e96SAlexei Starovoitov kfree(ma);
7159f2c6e96SAlexei Starovoitov }
7169f2c6e96SAlexei Starovoitov
destroy_mem_alloc(struct bpf_mem_alloc * ma,int rcu_in_progress)7179f2c6e96SAlexei Starovoitov static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress)
7189f2c6e96SAlexei Starovoitov {
7199f2c6e96SAlexei Starovoitov struct bpf_mem_alloc *copy;
7209f2c6e96SAlexei Starovoitov
7219f2c6e96SAlexei Starovoitov if (!rcu_in_progress) {
7229f2c6e96SAlexei Starovoitov /* Fast path. No callbacks are pending, hence no need to do
7239f2c6e96SAlexei Starovoitov * rcu_barrier-s.
7249f2c6e96SAlexei Starovoitov */
7259f2c6e96SAlexei Starovoitov free_mem_alloc_no_barrier(ma);
7269f2c6e96SAlexei Starovoitov return;
7279f2c6e96SAlexei Starovoitov }
7289f2c6e96SAlexei Starovoitov
7299f2c6e96SAlexei Starovoitov copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL);
7309f2c6e96SAlexei Starovoitov if (!copy) {
7319f2c6e96SAlexei Starovoitov /* Slow path with inline barrier-s */
732a80672d7SAlexei Starovoitov free_mem_alloc(ma);
7339f2c6e96SAlexei Starovoitov return;
7349f2c6e96SAlexei Starovoitov }
7359f2c6e96SAlexei Starovoitov
7369f2c6e96SAlexei Starovoitov /* Defer barriers into worker to let the rest of map memory to be freed */
7379f2c6e96SAlexei Starovoitov memset(ma, 0, sizeof(*ma));
7389f2c6e96SAlexei Starovoitov INIT_WORK(©->work, free_mem_alloc_deferred);
7399f2c6e96SAlexei Starovoitov queue_work(system_unbound_wq, ©->work);
740a80672d7SAlexei Starovoitov }
7419f2c6e96SAlexei Starovoitov
bpf_mem_alloc_destroy(struct bpf_mem_alloc * ma)7429f2c6e96SAlexei Starovoitov void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
7439f2c6e96SAlexei Starovoitov {
7449f2c6e96SAlexei Starovoitov struct bpf_mem_caches *cc;
7457c8199e2SAlexei Starovoitov struct bpf_mem_cache *c;
7467c8199e2SAlexei Starovoitov int cpu, i, rcu_in_progress;
7477c8199e2SAlexei Starovoitov
7487c8199e2SAlexei Starovoitov if (ma->cache) {
7499f2c6e96SAlexei Starovoitov rcu_in_progress = 0;
7507c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) {
7517c8199e2SAlexei Starovoitov c = per_cpu_ptr(ma->cache, cpu);
7529f2c6e96SAlexei Starovoitov WRITE_ONCE(c->draining, true);
7537c8199e2SAlexei Starovoitov irq_work_sync(&c->refill_work);
7547c8199e2SAlexei Starovoitov drain_mem_cache(c);
755d114dde2SAlexei Starovoitov rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
7563d058187SHou Tao rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
7577c8199e2SAlexei Starovoitov }
75812c8d0f4SAlexei Starovoitov obj_cgroup_put(ma->objcg);
7595af6807bSAlexei Starovoitov destroy_mem_alloc(ma, rcu_in_progress);
7607c8199e2SAlexei Starovoitov }
7619fc8e802SYonghong Song if (ma->caches) {
7629f2c6e96SAlexei Starovoitov rcu_in_progress = 0;
7637c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) {
7647c8199e2SAlexei Starovoitov cc = per_cpu_ptr(ma->caches, cpu);
7659f2c6e96SAlexei Starovoitov for (i = 0; i < NUM_CACHES; i++) {
7667c8199e2SAlexei Starovoitov c = &cc->cache[i];
7677c8199e2SAlexei Starovoitov WRITE_ONCE(c->draining, true);
7687c8199e2SAlexei Starovoitov irq_work_sync(&c->refill_work);
7697c8199e2SAlexei Starovoitov drain_mem_cache(c);
770d114dde2SAlexei Starovoitov rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress);
7713d058187SHou Tao rcu_in_progress += atomic_read(&c->call_rcu_in_progress);
7727c8199e2SAlexei Starovoitov }
77312c8d0f4SAlexei Starovoitov }
7745af6807bSAlexei Starovoitov obj_cgroup_put(ma->objcg);
7757c8199e2SAlexei Starovoitov destroy_mem_alloc(ma, rcu_in_progress);
7767c8199e2SAlexei Starovoitov }
7779fc8e802SYonghong Song }
7789f2c6e96SAlexei Starovoitov
7797c8199e2SAlexei Starovoitov /* notrace is necessary here and in other functions to make sure
7807c8199e2SAlexei Starovoitov * bpf programs cannot attach to them and cause llist corruptions.
7817c8199e2SAlexei Starovoitov */
unit_alloc(struct bpf_mem_cache * c)7827c8199e2SAlexei Starovoitov static void notrace *unit_alloc(struct bpf_mem_cache *c)
7837c8199e2SAlexei Starovoitov {
7847c8199e2SAlexei Starovoitov struct llist_node *llnode = NULL;
7857c8199e2SAlexei Starovoitov unsigned long flags;
7867c8199e2SAlexei Starovoitov int cnt = 0;
7877c8199e2SAlexei Starovoitov
7887c8199e2SAlexei Starovoitov /* Disable irqs to prevent the following race for majority of prog types:
7897c8199e2SAlexei Starovoitov * prog_A
7907c8199e2SAlexei Starovoitov * bpf_mem_alloc
7917c8199e2SAlexei Starovoitov * preemption or irq -> prog_B
7927c8199e2SAlexei Starovoitov * bpf_mem_alloc
7937c8199e2SAlexei Starovoitov *
7947c8199e2SAlexei Starovoitov * but prog_B could be a perf_event NMI prog.
7957c8199e2SAlexei Starovoitov * Use per-cpu 'active' counter to order free_list access between
7967c8199e2SAlexei Starovoitov * unit_alloc/unit_free/bpf_mem_refill.
7977c8199e2SAlexei Starovoitov */
7987c8199e2SAlexei Starovoitov local_irq_save(flags);
7997c8199e2SAlexei Starovoitov if (local_inc_return(&c->active) == 1) {
8007c8199e2SAlexei Starovoitov llnode = __llist_del_first(&c->free_llist);
8017c8199e2SAlexei Starovoitov if (llnode) {
8027c8199e2SAlexei Starovoitov cnt = --c->free_cnt;
8037c8199e2SAlexei Starovoitov *(struct bpf_mem_cache **)llnode = c;
804822fb26bSAlexei Starovoitov }
8057c8199e2SAlexei Starovoitov }
806822fb26bSAlexei Starovoitov local_dec(&c->active);
807822fb26bSAlexei Starovoitov
8087c8199e2SAlexei Starovoitov WARN_ON(cnt < 0);
8097c8199e2SAlexei Starovoitov
8107c8199e2SAlexei Starovoitov if (cnt < c->low_watermark)
8117c8199e2SAlexei Starovoitov irq_work_raise(c);
8127c8199e2SAlexei Starovoitov /* Enable IRQ after the enqueue of irq work completes, so irq work
8137c266178SAlexei Starovoitov * will run after IRQ is enabled and free_llist may be refilled by
8147c8199e2SAlexei Starovoitov * irq work before other task preempts current task.
815566f6de3SHou Tao */
816566f6de3SHou Tao local_irq_restore(flags);
817566f6de3SHou Tao
818566f6de3SHou Tao return llnode;
819566f6de3SHou Tao }
820566f6de3SHou Tao
8217c8199e2SAlexei Starovoitov /* Though 'ptr' object could have been allocated on a different cpu
8227c8199e2SAlexei Starovoitov * add it to the free_llist of the current cpu.
8237c8199e2SAlexei Starovoitov * Let kfree() logic deal with it when it's later called from irq_work.
8247c8199e2SAlexei Starovoitov */
unit_free(struct bpf_mem_cache * c,void * ptr)8257c8199e2SAlexei Starovoitov static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
8267c8199e2SAlexei Starovoitov {
8277c8199e2SAlexei Starovoitov struct llist_node *llnode = ptr - LLIST_NODE_SZ;
8287c8199e2SAlexei Starovoitov unsigned long flags;
8297c8199e2SAlexei Starovoitov int cnt = 0;
8307c8199e2SAlexei Starovoitov
8317c8199e2SAlexei Starovoitov BUILD_BUG_ON(LLIST_NODE_SZ > 8);
8327c8199e2SAlexei Starovoitov
8337c8199e2SAlexei Starovoitov /*
8347c8199e2SAlexei Starovoitov * Remember bpf_mem_cache that allocated this object.
8357c8199e2SAlexei Starovoitov * The hint is not accurate.
836822fb26bSAlexei Starovoitov */
837822fb26bSAlexei Starovoitov c->tgt = *(struct bpf_mem_cache **)llnode;
838822fb26bSAlexei Starovoitov
839822fb26bSAlexei Starovoitov local_irq_save(flags);
840822fb26bSAlexei Starovoitov if (local_inc_return(&c->active) == 1) {
841822fb26bSAlexei Starovoitov __llist_add(llnode, &c->free_llist);
8427c8199e2SAlexei Starovoitov cnt = ++c->free_cnt;
8437c8199e2SAlexei Starovoitov } else {
8447c8199e2SAlexei Starovoitov /* unit_free() cannot fail. Therefore add an object to atomic
8457c8199e2SAlexei Starovoitov * llist. free_bulk() will drain it. Though free_llist_extra is
8467c8199e2SAlexei Starovoitov * a per-cpu list we have to use atomic llist_add here, since
8477c8199e2SAlexei Starovoitov * it also can be interrupted by bpf nmi prog that does another
8487c8199e2SAlexei Starovoitov * unit_free() into the same free_llist_extra.
8497c8199e2SAlexei Starovoitov */
8507c8199e2SAlexei Starovoitov llist_add(llnode, &c->free_llist_extra);
8517c8199e2SAlexei Starovoitov }
8527c8199e2SAlexei Starovoitov local_dec(&c->active);
8537c8199e2SAlexei Starovoitov
8547c8199e2SAlexei Starovoitov if (cnt > c->high_watermark)
8557c8199e2SAlexei Starovoitov /* free few objects from current cpu into global kmalloc pool */
8567c8199e2SAlexei Starovoitov irq_work_raise(c);
8577c266178SAlexei Starovoitov /* Enable IRQ after irq_work_raise() completes, otherwise when current
8587c8199e2SAlexei Starovoitov * task is preempted by task which does unit_alloc(), unit_alloc() may
8597c8199e2SAlexei Starovoitov * return NULL unexpectedly because irq work is already pending but can
86062cf51cbSHou Tao * not been triggered and free_llist can not be refilled timely.
86162cf51cbSHou Tao */
86262cf51cbSHou Tao local_irq_restore(flags);
86362cf51cbSHou Tao }
86462cf51cbSHou Tao
unit_free_rcu(struct bpf_mem_cache * c,void * ptr)86562cf51cbSHou Tao static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr)
8667c8199e2SAlexei Starovoitov {
8677c8199e2SAlexei Starovoitov struct llist_node *llnode = ptr - LLIST_NODE_SZ;
8685af6807bSAlexei Starovoitov unsigned long flags;
8695af6807bSAlexei Starovoitov
8705af6807bSAlexei Starovoitov c->tgt = *(struct bpf_mem_cache **)llnode;
8715af6807bSAlexei Starovoitov
8725af6807bSAlexei Starovoitov local_irq_save(flags);
8735af6807bSAlexei Starovoitov if (local_inc_return(&c->active) == 1) {
8745af6807bSAlexei Starovoitov if (__llist_add(llnode, &c->free_by_rcu))
8755af6807bSAlexei Starovoitov c->free_by_rcu_tail = llnode;
8765af6807bSAlexei Starovoitov } else {
8775af6807bSAlexei Starovoitov llist_add(llnode, &c->free_llist_extra_rcu);
8785af6807bSAlexei Starovoitov }
8795af6807bSAlexei Starovoitov local_dec(&c->active);
8805af6807bSAlexei Starovoitov
8815af6807bSAlexei Starovoitov if (!atomic_read(&c->call_rcu_in_progress))
8825af6807bSAlexei Starovoitov irq_work_raise(c);
8835af6807bSAlexei Starovoitov local_irq_restore(flags);
8845af6807bSAlexei Starovoitov }
8855af6807bSAlexei Starovoitov
88662cf51cbSHou Tao /* Called from BPF program or from sys_bpf syscall.
8875af6807bSAlexei Starovoitov * In both cases migration is disabled.
8885af6807bSAlexei Starovoitov */
bpf_mem_alloc(struct bpf_mem_alloc * ma,size_t size)8897c8199e2SAlexei Starovoitov void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size)
8907c8199e2SAlexei Starovoitov {
8917c8199e2SAlexei Starovoitov int idx;
8927c8199e2SAlexei Starovoitov void *ret;
8937c8199e2SAlexei Starovoitov
8947c8199e2SAlexei Starovoitov if (!size)
8957c8199e2SAlexei Starovoitov return NULL;
8967c8199e2SAlexei Starovoitov
8977c8199e2SAlexei Starovoitov if (!ma->percpu)
8987ac5c53eSHou Tao size += LLIST_NODE_SZ;
8997c8199e2SAlexei Starovoitov idx = bpf_mem_cache_idx(size);
9009beda16cSYonghong Song if (idx < 0)
9019beda16cSYonghong Song return NULL;
9029beda16cSYonghong Song
9037c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx);
9047c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ;
9057c8199e2SAlexei Starovoitov }
9067c8199e2SAlexei Starovoitov
bpf_mem_free(struct bpf_mem_alloc * ma,void * ptr)9077c8199e2SAlexei Starovoitov void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr)
9087c8199e2SAlexei Starovoitov {
9097c8199e2SAlexei Starovoitov struct bpf_mem_cache *c;
9107c8199e2SAlexei Starovoitov int idx;
9117c8199e2SAlexei Starovoitov
9127ac5c53eSHou Tao if (!ptr)
9137c8199e2SAlexei Starovoitov return;
9147c8199e2SAlexei Starovoitov
9157c8199e2SAlexei Starovoitov c = *(void **)(ptr - LLIST_NODE_SZ);
9167c8199e2SAlexei Starovoitov idx = bpf_mem_cache_idx(c->unit_size);
9177c8199e2SAlexei Starovoitov if (WARN_ON_ONCE(idx < 0))
9187ac5c53eSHou Tao return;
9197ac5c53eSHou Tao
9207ac5c53eSHou Tao unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr);
9217c8199e2SAlexei Starovoitov }
9227c8199e2SAlexei Starovoitov
bpf_mem_free_rcu(struct bpf_mem_alloc * ma,void * ptr)9237c8199e2SAlexei Starovoitov void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
9247c8199e2SAlexei Starovoitov {
9257c8199e2SAlexei Starovoitov struct bpf_mem_cache *c;
9265af6807bSAlexei Starovoitov int idx;
9275af6807bSAlexei Starovoitov
9287ac5c53eSHou Tao if (!ptr)
9295af6807bSAlexei Starovoitov return;
9305af6807bSAlexei Starovoitov
9315af6807bSAlexei Starovoitov c = *(void **)(ptr - LLIST_NODE_SZ);
9325af6807bSAlexei Starovoitov idx = bpf_mem_cache_idx(c->unit_size);
9335af6807bSAlexei Starovoitov if (WARN_ON_ONCE(idx < 0))
9347ac5c53eSHou Tao return;
9357ac5c53eSHou Tao
9367ac5c53eSHou Tao unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr);
9375af6807bSAlexei Starovoitov }
9385af6807bSAlexei Starovoitov
bpf_mem_cache_alloc(struct bpf_mem_alloc * ma)9395af6807bSAlexei Starovoitov void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma)
9405af6807bSAlexei Starovoitov {
9415af6807bSAlexei Starovoitov void *ret;
9427c8199e2SAlexei Starovoitov
9437c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->cache));
9447c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ;
9457c8199e2SAlexei Starovoitov }
9467c8199e2SAlexei Starovoitov
bpf_mem_cache_free(struct bpf_mem_alloc * ma,void * ptr)9477c8199e2SAlexei Starovoitov void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
9487c8199e2SAlexei Starovoitov {
9497c8199e2SAlexei Starovoitov if (!ptr)
9507c8199e2SAlexei Starovoitov return;
9517c8199e2SAlexei Starovoitov
9527c8199e2SAlexei Starovoitov unit_free(this_cpu_ptr(ma->cache), ptr);
9537c8199e2SAlexei Starovoitov }
9547c8199e2SAlexei Starovoitov
bpf_mem_cache_free_rcu(struct bpf_mem_alloc * ma,void * ptr)9557c8199e2SAlexei Starovoitov void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr)
9567c8199e2SAlexei Starovoitov {
957e65a5c6eSMartin KaFai Lau if (!ptr)
9585af6807bSAlexei Starovoitov return;
9595af6807bSAlexei Starovoitov
9605af6807bSAlexei Starovoitov unit_free_rcu(this_cpu_ptr(ma->cache), ptr);
9615af6807bSAlexei Starovoitov }
9625af6807bSAlexei Starovoitov
9635af6807bSAlexei Starovoitov /* Directly does a kfree() without putting 'ptr' back to the free_llist
9645af6807bSAlexei Starovoitov * for reuse and without waiting for a rcu_tasks_trace gp.
9655af6807bSAlexei Starovoitov * The caller must first go through the rcu_tasks_trace gp for 'ptr'
966e65a5c6eSMartin KaFai Lau * before calling bpf_mem_cache_raw_free().
967e65a5c6eSMartin KaFai Lau * It could be used when the rcu_tasks_trace callback does not have
968e65a5c6eSMartin KaFai Lau * a hold on the original bpf_mem_alloc object that allocated the
969e65a5c6eSMartin KaFai Lau * 'ptr'. This should only be used in the uncommon code path.
970e65a5c6eSMartin KaFai Lau * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
971e65a5c6eSMartin KaFai Lau * and may affect performance.
972e65a5c6eSMartin KaFai Lau */
bpf_mem_cache_raw_free(void * ptr)973e65a5c6eSMartin KaFai Lau void bpf_mem_cache_raw_free(void *ptr)
974e65a5c6eSMartin KaFai Lau {
975e65a5c6eSMartin KaFai Lau if (!ptr)
976e65a5c6eSMartin KaFai Lau return;
977e65a5c6eSMartin KaFai Lau
978e65a5c6eSMartin KaFai Lau kfree(ptr - LLIST_NODE_SZ);
979e65a5c6eSMartin KaFai Lau }
980e65a5c6eSMartin KaFai Lau
981e65a5c6eSMartin KaFai Lau /* When flags == GFP_KERNEL, it signals that the caller will not cause
982e65a5c6eSMartin KaFai Lau * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
983e65a5c6eSMartin KaFai Lau * kmalloc if the free_llist is empty.
984e65a5c6eSMartin KaFai Lau */
bpf_mem_cache_alloc_flags(struct bpf_mem_alloc * ma,gfp_t flags)985e65a5c6eSMartin KaFai Lau void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
986e65a5c6eSMartin KaFai Lau {
987e65a5c6eSMartin KaFai Lau struct bpf_mem_cache *c;
988e65a5c6eSMartin KaFai Lau void *ret;
989e65a5c6eSMartin KaFai Lau
990e65a5c6eSMartin KaFai Lau c = this_cpu_ptr(ma->cache);
991e65a5c6eSMartin KaFai Lau
992e65a5c6eSMartin KaFai Lau ret = unit_alloc(c);
993e65a5c6eSMartin KaFai Lau if (!ret && flags == GFP_KERNEL) {
994e65a5c6eSMartin KaFai Lau struct mem_cgroup *memcg, *old_memcg;
995e65a5c6eSMartin KaFai Lau
996e65a5c6eSMartin KaFai Lau memcg = get_memcg(c);
997e65a5c6eSMartin KaFai Lau old_memcg = set_active_memcg(memcg);
998e65a5c6eSMartin KaFai Lau ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
999e65a5c6eSMartin KaFai Lau if (ret)
1000e65a5c6eSMartin KaFai Lau *(struct bpf_mem_cache **)ret = c;
1001e65a5c6eSMartin KaFai Lau set_active_memcg(old_memcg);
100275a44258SHou Tao mem_cgroup_put(memcg);
100375a44258SHou Tao }
1004e65a5c6eSMartin KaFai Lau
1005e65a5c6eSMartin KaFai Lau return !ret ? NULL : ret + LLIST_NODE_SZ;
1006e65a5c6eSMartin KaFai Lau }
1007e65a5c6eSMartin KaFai Lau
bpf_mem_alloc_check_size(bool percpu,size_t size)1008e65a5c6eSMartin KaFai Lau int bpf_mem_alloc_check_size(bool percpu, size_t size)
1009e65a5c6eSMartin KaFai Lau {
1010*62a898b0SHou Tao /* The size of percpu allocation doesn't have LLIST_NODE_SZ overhead */
1011*62a898b0SHou Tao if ((percpu && size > BPF_MEM_ALLOC_SIZE_MAX) ||
1012*62a898b0SHou Tao (!percpu && size > BPF_MEM_ALLOC_SIZE_MAX - LLIST_NODE_SZ))
1013*62a898b0SHou Tao return -E2BIG;
1014*62a898b0SHou Tao
1015*62a898b0SHou Tao return 0;
1016*62a898b0SHou Tao }
1017*62a898b0SHou Tao