1b7953797SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only
2b7953797SAlexei Starovoitov /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3b7953797SAlexei Starovoitov #include <linux/interval_tree_generic.h>
4b7953797SAlexei Starovoitov #include <linux/slab.h>
5b7953797SAlexei Starovoitov #include <linux/bpf_mem_alloc.h>
6b7953797SAlexei Starovoitov #include <linux/bpf.h>
7b7953797SAlexei Starovoitov #include "range_tree.h"
8b7953797SAlexei Starovoitov
9b7953797SAlexei Starovoitov /*
10b7953797SAlexei Starovoitov * struct range_tree is a data structure used to allocate contiguous memory
11b7953797SAlexei Starovoitov * ranges in bpf arena. It's a large bitmap. The contiguous sequence of bits is
12b7953797SAlexei Starovoitov * represented by struct range_node or 'rn' for short.
13b7953797SAlexei Starovoitov * rn->rn_rbnode links it into an interval tree while
14b7953797SAlexei Starovoitov * rn->rb_range_size links it into a second rbtree sorted by size of the range.
15b7953797SAlexei Starovoitov * __find_range() performs binary search and best fit algorithm to find the
16b7953797SAlexei Starovoitov * range less or equal requested size.
17b7953797SAlexei Starovoitov * range_tree_clear/set() clears or sets a range of bits in this bitmap. The
18b7953797SAlexei Starovoitov * adjacent ranges are merged or split at the same time.
19b7953797SAlexei Starovoitov *
20b7953797SAlexei Starovoitov * The split/merge logic is based/borrowed from XFS's xbitmap32 added
21b7953797SAlexei Starovoitov * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree").
22b7953797SAlexei Starovoitov *
23b7953797SAlexei Starovoitov * The implementation relies on external lock to protect rbtree-s.
24b7953797SAlexei Starovoitov * The alloc/free of range_node-s is done via bpf_mem_alloc.
25b7953797SAlexei Starovoitov *
26b7953797SAlexei Starovoitov * bpf arena is using range_tree to represent unallocated slots.
27b7953797SAlexei Starovoitov * At init time:
28b7953797SAlexei Starovoitov * range_tree_set(rt, 0, max);
29b7953797SAlexei Starovoitov * Then:
30b7953797SAlexei Starovoitov * start = range_tree_find(rt, len);
31b7953797SAlexei Starovoitov * if (start >= 0)
32b7953797SAlexei Starovoitov * range_tree_clear(rt, start, len);
33b7953797SAlexei Starovoitov * to find free range and mark slots as allocated and later:
34b7953797SAlexei Starovoitov * range_tree_set(rt, start, len);
35b7953797SAlexei Starovoitov * to mark as unallocated after use.
36b7953797SAlexei Starovoitov */
37b7953797SAlexei Starovoitov struct range_node {
38b7953797SAlexei Starovoitov struct rb_node rn_rbnode;
39b7953797SAlexei Starovoitov struct rb_node rb_range_size;
40b7953797SAlexei Starovoitov u32 rn_start;
41b7953797SAlexei Starovoitov u32 rn_last; /* inclusive */
42b7953797SAlexei Starovoitov u32 __rn_subtree_last;
43b7953797SAlexei Starovoitov };
44b7953797SAlexei Starovoitov
rb_to_range_node(struct rb_node * rb)45b7953797SAlexei Starovoitov static struct range_node *rb_to_range_node(struct rb_node *rb)
46b7953797SAlexei Starovoitov {
47b7953797SAlexei Starovoitov return rb_entry(rb, struct range_node, rb_range_size);
48b7953797SAlexei Starovoitov }
49b7953797SAlexei Starovoitov
rn_size(struct range_node * rn)50b7953797SAlexei Starovoitov static u32 rn_size(struct range_node *rn)
51b7953797SAlexei Starovoitov {
52b7953797SAlexei Starovoitov return rn->rn_last - rn->rn_start + 1;
53b7953797SAlexei Starovoitov }
54b7953797SAlexei Starovoitov
55b7953797SAlexei Starovoitov /* Find range that fits best to requested size */
__find_range(struct range_tree * rt,u32 len)56b7953797SAlexei Starovoitov static inline struct range_node *__find_range(struct range_tree *rt, u32 len)
57b7953797SAlexei Starovoitov {
58b7953797SAlexei Starovoitov struct rb_node *rb = rt->range_size_root.rb_root.rb_node;
59b7953797SAlexei Starovoitov struct range_node *best = NULL;
60b7953797SAlexei Starovoitov
61b7953797SAlexei Starovoitov while (rb) {
62b7953797SAlexei Starovoitov struct range_node *rn = rb_to_range_node(rb);
63b7953797SAlexei Starovoitov
64b7953797SAlexei Starovoitov if (len <= rn_size(rn)) {
65b7953797SAlexei Starovoitov best = rn;
66b7953797SAlexei Starovoitov rb = rb->rb_right;
67b7953797SAlexei Starovoitov } else {
68b7953797SAlexei Starovoitov rb = rb->rb_left;
69b7953797SAlexei Starovoitov }
70b7953797SAlexei Starovoitov }
71b7953797SAlexei Starovoitov
72b7953797SAlexei Starovoitov return best;
73b7953797SAlexei Starovoitov }
74b7953797SAlexei Starovoitov
range_tree_find(struct range_tree * rt,u32 len)75b7953797SAlexei Starovoitov s64 range_tree_find(struct range_tree *rt, u32 len)
76b7953797SAlexei Starovoitov {
77b7953797SAlexei Starovoitov struct range_node *rn;
78b7953797SAlexei Starovoitov
79b7953797SAlexei Starovoitov rn = __find_range(rt, len);
80b7953797SAlexei Starovoitov if (!rn)
81b7953797SAlexei Starovoitov return -ENOENT;
82b7953797SAlexei Starovoitov return rn->rn_start;
83b7953797SAlexei Starovoitov }
84b7953797SAlexei Starovoitov
85b7953797SAlexei Starovoitov /* Insert the range into rbtree sorted by the range size */
__range_size_insert(struct range_node * rn,struct rb_root_cached * root)86b7953797SAlexei Starovoitov static inline void __range_size_insert(struct range_node *rn,
87b7953797SAlexei Starovoitov struct rb_root_cached *root)
88b7953797SAlexei Starovoitov {
89b7953797SAlexei Starovoitov struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
90b7953797SAlexei Starovoitov u64 size = rn_size(rn);
91b7953797SAlexei Starovoitov bool leftmost = true;
92b7953797SAlexei Starovoitov
93b7953797SAlexei Starovoitov while (*link) {
94b7953797SAlexei Starovoitov rb = *link;
95b7953797SAlexei Starovoitov if (size > rn_size(rb_to_range_node(rb))) {
96b7953797SAlexei Starovoitov link = &rb->rb_left;
97b7953797SAlexei Starovoitov } else {
98b7953797SAlexei Starovoitov link = &rb->rb_right;
99b7953797SAlexei Starovoitov leftmost = false;
100b7953797SAlexei Starovoitov }
101b7953797SAlexei Starovoitov }
102b7953797SAlexei Starovoitov
103b7953797SAlexei Starovoitov rb_link_node(&rn->rb_range_size, rb, link);
104b7953797SAlexei Starovoitov rb_insert_color_cached(&rn->rb_range_size, root, leftmost);
105b7953797SAlexei Starovoitov }
106b7953797SAlexei Starovoitov
107b7953797SAlexei Starovoitov #define START(node) ((node)->rn_start)
108b7953797SAlexei Starovoitov #define LAST(node) ((node)->rn_last)
109b7953797SAlexei Starovoitov
INTERVAL_TREE_DEFINE(struct range_node,rn_rbnode,u32,__rn_subtree_last,START,LAST,static inline __maybe_unused,__range_it)110b7953797SAlexei Starovoitov INTERVAL_TREE_DEFINE(struct range_node, rn_rbnode, u32,
111b7953797SAlexei Starovoitov __rn_subtree_last, START, LAST,
112b7953797SAlexei Starovoitov static inline __maybe_unused,
113b7953797SAlexei Starovoitov __range_it)
114b7953797SAlexei Starovoitov
115b7953797SAlexei Starovoitov static inline __maybe_unused void
116b7953797SAlexei Starovoitov range_it_insert(struct range_node *rn, struct range_tree *rt)
117b7953797SAlexei Starovoitov {
118b7953797SAlexei Starovoitov __range_size_insert(rn, &rt->range_size_root);
119b7953797SAlexei Starovoitov __range_it_insert(rn, &rt->it_root);
120b7953797SAlexei Starovoitov }
121b7953797SAlexei Starovoitov
122b7953797SAlexei Starovoitov static inline __maybe_unused void
range_it_remove(struct range_node * rn,struct range_tree * rt)123b7953797SAlexei Starovoitov range_it_remove(struct range_node *rn, struct range_tree *rt)
124b7953797SAlexei Starovoitov {
125b7953797SAlexei Starovoitov rb_erase_cached(&rn->rb_range_size, &rt->range_size_root);
126b7953797SAlexei Starovoitov RB_CLEAR_NODE(&rn->rb_range_size);
127b7953797SAlexei Starovoitov __range_it_remove(rn, &rt->it_root);
128b7953797SAlexei Starovoitov }
129b7953797SAlexei Starovoitov
130b7953797SAlexei Starovoitov static inline __maybe_unused struct range_node *
range_it_iter_first(struct range_tree * rt,u32 start,u32 last)131b7953797SAlexei Starovoitov range_it_iter_first(struct range_tree *rt, u32 start, u32 last)
132b7953797SAlexei Starovoitov {
133b7953797SAlexei Starovoitov return __range_it_iter_first(&rt->it_root, start, last);
134b7953797SAlexei Starovoitov }
135b7953797SAlexei Starovoitov
136b7953797SAlexei Starovoitov /* Clear the range in this range tree */
range_tree_clear(struct range_tree * rt,u32 start,u32 len)137b7953797SAlexei Starovoitov int range_tree_clear(struct range_tree *rt, u32 start, u32 len)
138b7953797SAlexei Starovoitov {
139b7953797SAlexei Starovoitov u32 last = start + len - 1;
140b7953797SAlexei Starovoitov struct range_node *new_rn;
141b7953797SAlexei Starovoitov struct range_node *rn;
142b7953797SAlexei Starovoitov
143b7953797SAlexei Starovoitov while ((rn = range_it_iter_first(rt, start, last))) {
144b7953797SAlexei Starovoitov if (rn->rn_start < start && rn->rn_last > last) {
145b7953797SAlexei Starovoitov u32 old_last = rn->rn_last;
146b7953797SAlexei Starovoitov
147b7953797SAlexei Starovoitov /* Overlaps with the entire clearing range */
148b7953797SAlexei Starovoitov range_it_remove(rn, rt);
149b7953797SAlexei Starovoitov rn->rn_last = start - 1;
150b7953797SAlexei Starovoitov range_it_insert(rn, rt);
151b7953797SAlexei Starovoitov
152b7953797SAlexei Starovoitov /* Add a range */
153*4ff04abfSYonghong Song migrate_disable();
154b7953797SAlexei Starovoitov new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
155*4ff04abfSYonghong Song migrate_enable();
156b7953797SAlexei Starovoitov if (!new_rn)
157b7953797SAlexei Starovoitov return -ENOMEM;
158b7953797SAlexei Starovoitov new_rn->rn_start = last + 1;
159b7953797SAlexei Starovoitov new_rn->rn_last = old_last;
160b7953797SAlexei Starovoitov range_it_insert(new_rn, rt);
161b7953797SAlexei Starovoitov } else if (rn->rn_start < start) {
162b7953797SAlexei Starovoitov /* Overlaps with the left side of the clearing range */
163b7953797SAlexei Starovoitov range_it_remove(rn, rt);
164b7953797SAlexei Starovoitov rn->rn_last = start - 1;
165b7953797SAlexei Starovoitov range_it_insert(rn, rt);
166b7953797SAlexei Starovoitov } else if (rn->rn_last > last) {
167b7953797SAlexei Starovoitov /* Overlaps with the right side of the clearing range */
168b7953797SAlexei Starovoitov range_it_remove(rn, rt);
169b7953797SAlexei Starovoitov rn->rn_start = last + 1;
170b7953797SAlexei Starovoitov range_it_insert(rn, rt);
171b7953797SAlexei Starovoitov break;
172b7953797SAlexei Starovoitov } else {
173b7953797SAlexei Starovoitov /* in the middle of the clearing range */
174b7953797SAlexei Starovoitov range_it_remove(rn, rt);
175*4ff04abfSYonghong Song migrate_disable();
176b7953797SAlexei Starovoitov bpf_mem_free(&bpf_global_ma, rn);
177*4ff04abfSYonghong Song migrate_enable();
178b7953797SAlexei Starovoitov }
179b7953797SAlexei Starovoitov }
180b7953797SAlexei Starovoitov return 0;
181b7953797SAlexei Starovoitov }
182b7953797SAlexei Starovoitov
183b7953797SAlexei Starovoitov /* Is the whole range set ? */
is_range_tree_set(struct range_tree * rt,u32 start,u32 len)184b7953797SAlexei Starovoitov int is_range_tree_set(struct range_tree *rt, u32 start, u32 len)
185b7953797SAlexei Starovoitov {
186b7953797SAlexei Starovoitov u32 last = start + len - 1;
187b7953797SAlexei Starovoitov struct range_node *left;
188b7953797SAlexei Starovoitov
189b7953797SAlexei Starovoitov /* Is this whole range set ? */
190b7953797SAlexei Starovoitov left = range_it_iter_first(rt, start, last);
191b7953797SAlexei Starovoitov if (left && left->rn_start <= start && left->rn_last >= last)
192b7953797SAlexei Starovoitov return 0;
193b7953797SAlexei Starovoitov return -ESRCH;
194b7953797SAlexei Starovoitov }
195b7953797SAlexei Starovoitov
196b7953797SAlexei Starovoitov /* Set the range in this range tree */
range_tree_set(struct range_tree * rt,u32 start,u32 len)197b7953797SAlexei Starovoitov int range_tree_set(struct range_tree *rt, u32 start, u32 len)
198b7953797SAlexei Starovoitov {
199b7953797SAlexei Starovoitov u32 last = start + len - 1;
200b7953797SAlexei Starovoitov struct range_node *right;
201b7953797SAlexei Starovoitov struct range_node *left;
202b7953797SAlexei Starovoitov int err;
203b7953797SAlexei Starovoitov
204b7953797SAlexei Starovoitov /* Is this whole range already set ? */
205b7953797SAlexei Starovoitov left = range_it_iter_first(rt, start, last);
206b7953797SAlexei Starovoitov if (left && left->rn_start <= start && left->rn_last >= last)
207b7953797SAlexei Starovoitov return 0;
208b7953797SAlexei Starovoitov
209b7953797SAlexei Starovoitov /* Clear out everything in the range we want to set. */
210b7953797SAlexei Starovoitov err = range_tree_clear(rt, start, len);
211b7953797SAlexei Starovoitov if (err)
212b7953797SAlexei Starovoitov return err;
213b7953797SAlexei Starovoitov
214b7953797SAlexei Starovoitov /* Do we have a left-adjacent range ? */
215b7953797SAlexei Starovoitov left = range_it_iter_first(rt, start - 1, start - 1);
216b7953797SAlexei Starovoitov if (left && left->rn_last + 1 != start)
217b7953797SAlexei Starovoitov return -EFAULT;
218b7953797SAlexei Starovoitov
219b7953797SAlexei Starovoitov /* Do we have a right-adjacent range ? */
220b7953797SAlexei Starovoitov right = range_it_iter_first(rt, last + 1, last + 1);
221b7953797SAlexei Starovoitov if (right && right->rn_start != last + 1)
222b7953797SAlexei Starovoitov return -EFAULT;
223b7953797SAlexei Starovoitov
224b7953797SAlexei Starovoitov if (left && right) {
225b7953797SAlexei Starovoitov /* Combine left and right adjacent ranges */
226b7953797SAlexei Starovoitov range_it_remove(left, rt);
227b7953797SAlexei Starovoitov range_it_remove(right, rt);
228b7953797SAlexei Starovoitov left->rn_last = right->rn_last;
229b7953797SAlexei Starovoitov range_it_insert(left, rt);
230*4ff04abfSYonghong Song migrate_disable();
231b7953797SAlexei Starovoitov bpf_mem_free(&bpf_global_ma, right);
232*4ff04abfSYonghong Song migrate_enable();
233b7953797SAlexei Starovoitov } else if (left) {
234b7953797SAlexei Starovoitov /* Combine with the left range */
235b7953797SAlexei Starovoitov range_it_remove(left, rt);
236b7953797SAlexei Starovoitov left->rn_last = last;
237b7953797SAlexei Starovoitov range_it_insert(left, rt);
238b7953797SAlexei Starovoitov } else if (right) {
239b7953797SAlexei Starovoitov /* Combine with the right range */
240b7953797SAlexei Starovoitov range_it_remove(right, rt);
241b7953797SAlexei Starovoitov right->rn_start = start;
242b7953797SAlexei Starovoitov range_it_insert(right, rt);
243b7953797SAlexei Starovoitov } else {
244*4ff04abfSYonghong Song migrate_disable();
245b7953797SAlexei Starovoitov left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node));
246*4ff04abfSYonghong Song migrate_enable();
247b7953797SAlexei Starovoitov if (!left)
248b7953797SAlexei Starovoitov return -ENOMEM;
249b7953797SAlexei Starovoitov left->rn_start = start;
250b7953797SAlexei Starovoitov left->rn_last = last;
251b7953797SAlexei Starovoitov range_it_insert(left, rt);
252b7953797SAlexei Starovoitov }
253b7953797SAlexei Starovoitov return 0;
254b7953797SAlexei Starovoitov }
255b7953797SAlexei Starovoitov
range_tree_destroy(struct range_tree * rt)256b7953797SAlexei Starovoitov void range_tree_destroy(struct range_tree *rt)
257b7953797SAlexei Starovoitov {
258b7953797SAlexei Starovoitov struct range_node *rn;
259b7953797SAlexei Starovoitov
260b7953797SAlexei Starovoitov while ((rn = range_it_iter_first(rt, 0, -1U))) {
261b7953797SAlexei Starovoitov range_it_remove(rn, rt);
262b7953797SAlexei Starovoitov bpf_mem_free(&bpf_global_ma, rn);
263b7953797SAlexei Starovoitov }
264b7953797SAlexei Starovoitov }
265b7953797SAlexei Starovoitov
range_tree_init(struct range_tree * rt)266b7953797SAlexei Starovoitov void range_tree_init(struct range_tree *rt)
267b7953797SAlexei Starovoitov {
268b7953797SAlexei Starovoitov rt->it_root = RB_ROOT_CACHED;
269b7953797SAlexei Starovoitov rt->range_size_root = RB_ROOT_CACHED;
270b7953797SAlexei Starovoitov }
271