xref: /linux-6.15/include/linux/radix-tree.h (revision e756bc56)
1 /*
2  * Copyright (C) 2001 Momchil Velikov
3  * Portions Copyright (C) 2001 Christoph Hellwig
4  * Copyright (C) 2006 Nick Piggin
5  * Copyright (C) 2012 Konstantin Khlebnikov
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2, or (at
10  * your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
23 
24 #include <linux/preempt.h>
25 #include <linux/types.h>
26 #include <linux/bug.h>
27 #include <linux/kernel.h>
28 #include <linux/rcupdate.h>
29 
30 /*
31  * An indirect pointer (root->rnode pointing to a radix_tree_node, rather
32  * than a data item) is signalled by the low bit set in the root->rnode
33  * pointer.
34  *
35  * In this case root->height is > 0, but the indirect pointer tests are
36  * needed for RCU lookups (because root->height is unreliable). The only
37  * time callers need worry about this is when doing a lookup_slot under
38  * RCU.
39  *
40  * Indirect pointer in fact is also used to tag the last pointer of a node
41  * when it is shrunk, before we rcu free the node. See shrink code for
42  * details.
43  */
44 #define RADIX_TREE_INDIRECT_PTR		1
45 /*
46  * A common use of the radix tree is to store pointers to struct pages;
47  * but shmem/tmpfs needs also to store swap entries in the same tree:
48  * those are marked as exceptional entries to distinguish them.
49  * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
50  */
51 #define RADIX_TREE_EXCEPTIONAL_ENTRY	2
52 #define RADIX_TREE_EXCEPTIONAL_SHIFT	2
53 
54 static inline int radix_tree_is_indirect_ptr(void *ptr)
55 {
56 	return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
57 }
58 
59 /*** radix-tree API starts here ***/
60 
61 #define RADIX_TREE_MAX_TAGS 3
62 
63 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
64 struct radix_tree_root {
65 	unsigned int		height;
66 	gfp_t			gfp_mask;
67 	struct radix_tree_node	__rcu *rnode;
68 };
69 
70 #define RADIX_TREE_INIT(mask)	{					\
71 	.height = 0,							\
72 	.gfp_mask = (mask),						\
73 	.rnode = NULL,							\
74 }
75 
76 #define RADIX_TREE(name, mask) \
77 	struct radix_tree_root name = RADIX_TREE_INIT(mask)
78 
79 #define INIT_RADIX_TREE(root, mask)					\
80 do {									\
81 	(root)->height = 0;						\
82 	(root)->gfp_mask = (mask);					\
83 	(root)->rnode = NULL;						\
84 } while (0)
85 
86 /**
87  * Radix-tree synchronization
88  *
89  * The radix-tree API requires that users provide all synchronisation (with
90  * specific exceptions, noted below).
91  *
92  * Synchronization of access to the data items being stored in the tree, and
93  * management of their lifetimes must be completely managed by API users.
94  *
95  * For API usage, in general,
96  * - any function _modifying_ the tree or tags (inserting or deleting
97  *   items, setting or clearing tags) must exclude other modifications, and
98  *   exclude any functions reading the tree.
99  * - any function _reading_ the tree or tags (looking up items or tags,
100  *   gang lookups) must exclude modifications to the tree, but may occur
101  *   concurrently with other readers.
102  *
103  * The notable exceptions to this rule are the following functions:
104  * radix_tree_lookup
105  * radix_tree_lookup_slot
106  * radix_tree_tag_get
107  * radix_tree_gang_lookup
108  * radix_tree_gang_lookup_slot
109  * radix_tree_gang_lookup_tag
110  * radix_tree_gang_lookup_tag_slot
111  * radix_tree_tagged
112  *
113  * The first 7 functions are able to be called locklessly, using RCU. The
114  * caller must ensure calls to these functions are made within rcu_read_lock()
115  * regions. Other readers (lock-free or otherwise) and modifications may be
116  * running concurrently.
117  *
118  * It is still required that the caller manage the synchronization and lifetimes
119  * of the items. So if RCU lock-free lookups are used, typically this would mean
120  * that the items have their own locks, or are amenable to lock-free access; and
121  * that the items are freed by RCU (or only freed after having been deleted from
122  * the radix tree *and* a synchronize_rcu() grace period).
123  *
124  * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
125  * access to data items when inserting into or looking up from the radix tree)
126  *
127  * Note that the value returned by radix_tree_tag_get() may not be relied upon
128  * if only the RCU read lock is held.  Functions to set/clear tags and to
129  * delete nodes running concurrently with it may affect its result such that
130  * two consecutive reads in the same locked section may return different
131  * values.  If reliability is required, modification functions must also be
132  * excluded from concurrency.
133  *
134  * radix_tree_tagged is able to be called without locking or RCU.
135  */
136 
137 /**
138  * radix_tree_deref_slot	- dereference a slot
139  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
140  * Returns:	item that was stored in that slot with any direct pointer flag
141  *		removed.
142  *
143  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
144  * locked across slot lookup and dereference. Not required if write lock is
145  * held (ie. items cannot be concurrently inserted).
146  *
147  * radix_tree_deref_retry must be used to confirm validity of the pointer if
148  * only the read lock is held.
149  */
150 static inline void *radix_tree_deref_slot(void **pslot)
151 {
152 	return rcu_dereference(*pslot);
153 }
154 
155 /**
156  * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
157  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
158  * Returns:	item that was stored in that slot with any direct pointer flag
159  *		removed.
160  *
161  * Similar to radix_tree_deref_slot but only used during migration when a pages
162  * mapping is being moved. The caller does not hold the RCU read lock but it
163  * must hold the tree lock to prevent parallel updates.
164  */
165 static inline void *radix_tree_deref_slot_protected(void **pslot,
166 							spinlock_t *treelock)
167 {
168 	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
169 }
170 
171 /**
172  * radix_tree_deref_retry	- check radix_tree_deref_slot
173  * @arg:	pointer returned by radix_tree_deref_slot
174  * Returns:	0 if retry is not required, otherwise retry is required
175  *
176  * radix_tree_deref_retry must be used with radix_tree_deref_slot.
177  */
178 static inline int radix_tree_deref_retry(void *arg)
179 {
180 	return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
181 }
182 
183 /**
184  * radix_tree_exceptional_entry	- radix_tree_deref_slot gave exceptional entry?
185  * @arg:	value returned by radix_tree_deref_slot
186  * Returns:	0 if well-aligned pointer, non-0 if exceptional entry.
187  */
188 static inline int radix_tree_exceptional_entry(void *arg)
189 {
190 	/* Not unlikely because radix_tree_exception often tested first */
191 	return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
192 }
193 
194 /**
195  * radix_tree_exception	- radix_tree_deref_slot returned either exception?
196  * @arg:	value returned by radix_tree_deref_slot
197  * Returns:	0 if well-aligned pointer, non-0 if either kind of exception.
198  */
199 static inline int radix_tree_exception(void *arg)
200 {
201 	return unlikely((unsigned long)arg &
202 		(RADIX_TREE_INDIRECT_PTR | RADIX_TREE_EXCEPTIONAL_ENTRY));
203 }
204 
205 /**
206  * radix_tree_replace_slot	- replace item in a slot
207  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
208  * @item:	new item to store in the slot.
209  *
210  * For use with radix_tree_lookup_slot().  Caller must hold tree write locked
211  * across slot lookup and replacement.
212  */
213 static inline void radix_tree_replace_slot(void **pslot, void *item)
214 {
215 	BUG_ON(radix_tree_is_indirect_ptr(item));
216 	rcu_assign_pointer(*pslot, item);
217 }
218 
219 int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
220 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
221 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
222 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
223 unsigned int
224 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
225 			unsigned long first_index, unsigned int max_items);
226 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
227 			void ***results, unsigned long *indices,
228 			unsigned long first_index, unsigned int max_items);
229 unsigned long radix_tree_next_hole(struct radix_tree_root *root,
230 				unsigned long index, unsigned long max_scan);
231 unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
232 				unsigned long index, unsigned long max_scan);
233 int radix_tree_preload(gfp_t gfp_mask);
234 int radix_tree_maybe_preload(gfp_t gfp_mask);
235 void radix_tree_init(void);
236 void *radix_tree_tag_set(struct radix_tree_root *root,
237 			unsigned long index, unsigned int tag);
238 void *radix_tree_tag_clear(struct radix_tree_root *root,
239 			unsigned long index, unsigned int tag);
240 int radix_tree_tag_get(struct radix_tree_root *root,
241 			unsigned long index, unsigned int tag);
242 unsigned int
243 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
244 		unsigned long first_index, unsigned int max_items,
245 		unsigned int tag);
246 unsigned int
247 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
248 		unsigned long first_index, unsigned int max_items,
249 		unsigned int tag);
250 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
251 		unsigned long *first_indexp, unsigned long last_index,
252 		unsigned long nr_to_tag,
253 		unsigned int fromtag, unsigned int totag);
254 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
255 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
256 
257 static inline void radix_tree_preload_end(void)
258 {
259 	preempt_enable();
260 }
261 
262 /**
263  * struct radix_tree_iter - radix tree iterator state
264  *
265  * @index:	index of current slot
266  * @next_index:	next-to-last index for this chunk
267  * @tags:	bit-mask for tag-iterating
268  *
269  * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
270  * subinterval of slots contained within one radix tree leaf node.  It is
271  * described by a pointer to its first slot and a struct radix_tree_iter
272  * which holds the chunk's position in the tree and its size.  For tagged
273  * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
274  * radix tree tag.
275  */
276 struct radix_tree_iter {
277 	unsigned long	index;
278 	unsigned long	next_index;
279 	unsigned long	tags;
280 };
281 
282 #define RADIX_TREE_ITER_TAG_MASK	0x00FF	/* tag index in lower byte */
283 #define RADIX_TREE_ITER_TAGGED		0x0100	/* lookup tagged slots */
284 #define RADIX_TREE_ITER_CONTIG		0x0200	/* stop at first hole */
285 
286 /**
287  * radix_tree_iter_init - initialize radix tree iterator
288  *
289  * @iter:	pointer to iterator state
290  * @start:	iteration starting index
291  * Returns:	NULL
292  */
293 static __always_inline void **
294 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
295 {
296 	/*
297 	 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
298 	 * in the case of a successful tagged chunk lookup.  If the lookup was
299 	 * unsuccessful or non-tagged then nobody cares about ->tags.
300 	 *
301 	 * Set index to zero to bypass next_index overflow protection.
302 	 * See the comment in radix_tree_next_chunk() for details.
303 	 */
304 	iter->index = 0;
305 	iter->next_index = start;
306 	return NULL;
307 }
308 
309 /**
310  * radix_tree_next_chunk - find next chunk of slots for iteration
311  *
312  * @root:	radix tree root
313  * @iter:	iterator state
314  * @flags:	RADIX_TREE_ITER_* flags and tag index
315  * Returns:	pointer to chunk first slot, or NULL if there no more left
316  *
317  * This function looks up the next chunk in the radix tree starting from
318  * @iter->next_index.  It returns a pointer to the chunk's first slot.
319  * Also it fills @iter with data about chunk: position in the tree (index),
320  * its end (next_index), and constructs a bit mask for tagged iterating (tags).
321  */
322 void **radix_tree_next_chunk(struct radix_tree_root *root,
323 			     struct radix_tree_iter *iter, unsigned flags);
324 
325 /**
326  * radix_tree_chunk_size - get current chunk size
327  *
328  * @iter:	pointer to radix tree iterator
329  * Returns:	current chunk size
330  */
331 static __always_inline unsigned
332 radix_tree_chunk_size(struct radix_tree_iter *iter)
333 {
334 	return iter->next_index - iter->index;
335 }
336 
337 /**
338  * radix_tree_next_slot - find next slot in chunk
339  *
340  * @slot:	pointer to current slot
341  * @iter:	pointer to interator state
342  * @flags:	RADIX_TREE_ITER_*, should be constant
343  * Returns:	pointer to next slot, or NULL if there no more left
344  *
345  * This function updates @iter->index in the case of a successful lookup.
346  * For tagged lookup it also eats @iter->tags.
347  */
348 static __always_inline void **
349 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
350 {
351 	if (flags & RADIX_TREE_ITER_TAGGED) {
352 		iter->tags >>= 1;
353 		if (likely(iter->tags & 1ul)) {
354 			iter->index++;
355 			return slot + 1;
356 		}
357 		if (!(flags & RADIX_TREE_ITER_CONTIG) && likely(iter->tags)) {
358 			unsigned offset = __ffs(iter->tags);
359 
360 			iter->tags >>= offset;
361 			iter->index += offset + 1;
362 			return slot + offset + 1;
363 		}
364 	} else {
365 		unsigned size = radix_tree_chunk_size(iter) - 1;
366 
367 		while (size--) {
368 			slot++;
369 			iter->index++;
370 			if (likely(*slot))
371 				return slot;
372 			if (flags & RADIX_TREE_ITER_CONTIG) {
373 				/* forbid switching to the next chunk */
374 				iter->next_index = 0;
375 				break;
376 			}
377 		}
378 	}
379 	return NULL;
380 }
381 
382 /**
383  * radix_tree_for_each_chunk - iterate over chunks
384  *
385  * @slot:	the void** variable for pointer to chunk first slot
386  * @root:	the struct radix_tree_root pointer
387  * @iter:	the struct radix_tree_iter pointer
388  * @start:	iteration starting index
389  * @flags:	RADIX_TREE_ITER_* and tag index
390  *
391  * Locks can be released and reacquired between iterations.
392  */
393 #define radix_tree_for_each_chunk(slot, root, iter, start, flags)	\
394 	for (slot = radix_tree_iter_init(iter, start) ;			\
395 	      (slot = radix_tree_next_chunk(root, iter, flags)) ;)
396 
397 /**
398  * radix_tree_for_each_chunk_slot - iterate over slots in one chunk
399  *
400  * @slot:	the void** variable, at the beginning points to chunk first slot
401  * @iter:	the struct radix_tree_iter pointer
402  * @flags:	RADIX_TREE_ITER_*, should be constant
403  *
404  * This macro is designed to be nested inside radix_tree_for_each_chunk().
405  * @slot points to the radix tree slot, @iter->index contains its index.
406  */
407 #define radix_tree_for_each_chunk_slot(slot, iter, flags)		\
408 	for (; slot ; slot = radix_tree_next_slot(slot, iter, flags))
409 
410 /**
411  * radix_tree_for_each_slot - iterate over non-empty slots
412  *
413  * @slot:	the void** variable for pointer to slot
414  * @root:	the struct radix_tree_root pointer
415  * @iter:	the struct radix_tree_iter pointer
416  * @start:	iteration starting index
417  *
418  * @slot points to radix tree slot, @iter->index contains its index.
419  */
420 #define radix_tree_for_each_slot(slot, root, iter, start)		\
421 	for (slot = radix_tree_iter_init(iter, start) ;			\
422 	     slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;	\
423 	     slot = radix_tree_next_slot(slot, iter, 0))
424 
425 /**
426  * radix_tree_for_each_contig - iterate over contiguous slots
427  *
428  * @slot:	the void** variable for pointer to slot
429  * @root:	the struct radix_tree_root pointer
430  * @iter:	the struct radix_tree_iter pointer
431  * @start:	iteration starting index
432  *
433  * @slot points to radix tree slot, @iter->index contains its index.
434  */
435 #define radix_tree_for_each_contig(slot, root, iter, start)		\
436 	for (slot = radix_tree_iter_init(iter, start) ;			\
437 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
438 				RADIX_TREE_ITER_CONTIG)) ;		\
439 	     slot = radix_tree_next_slot(slot, iter,			\
440 				RADIX_TREE_ITER_CONTIG))
441 
442 /**
443  * radix_tree_for_each_tagged - iterate over tagged slots
444  *
445  * @slot:	the void** variable for pointer to slot
446  * @root:	the struct radix_tree_root pointer
447  * @iter:	the struct radix_tree_iter pointer
448  * @start:	iteration starting index
449  * @tag:	tag index
450  *
451  * @slot points to radix tree slot, @iter->index contains its index.
452  */
453 #define radix_tree_for_each_tagged(slot, root, iter, start, tag)	\
454 	for (slot = radix_tree_iter_init(iter, start) ;			\
455 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
456 			      RADIX_TREE_ITER_TAGGED | tag)) ;		\
457 	     slot = radix_tree_next_slot(slot, iter,			\
458 				RADIX_TREE_ITER_TAGGED))
459 
460 #endif /* _LINUX_RADIX_TREE_H */
461