xref: /linux-6.15/include/linux/radix-tree.h (revision d69dece5)
1 /*
2  * Copyright (C) 2001 Momchil Velikov
3  * Portions Copyright (C) 2001 Christoph Hellwig
4  * Copyright (C) 2006 Nick Piggin
5  * Copyright (C) 2012 Konstantin Khlebnikov
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2, or (at
10  * your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 #ifndef _LINUX_RADIX_TREE_H
22 #define _LINUX_RADIX_TREE_H
23 
24 #include <linux/bitops.h>
25 #include <linux/preempt.h>
26 #include <linux/types.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/rcupdate.h>
30 
31 /*
32  * The bottom two bits of the slot determine how the remaining bits in the
33  * slot are interpreted:
34  *
35  * 00 - data pointer
36  * 01 - internal entry
37  * 10 - exceptional entry
38  * 11 - this bit combination is currently unused/reserved
39  *
40  * The internal entry may be a pointer to the next level in the tree, a
41  * sibling entry, or an indicator that the entry in this slot has been moved
42  * to another location in the tree and the lookup should be restarted.  While
43  * NULL fits the 'data pointer' pattern, it means that there is no entry in
44  * the tree for this index (no matter what level of the tree it is found at).
45  * This means that you cannot store NULL in the tree as a value for the index.
46  */
47 #define RADIX_TREE_ENTRY_MASK		3UL
48 #define RADIX_TREE_INTERNAL_NODE	1UL
49 
50 /*
51  * Most users of the radix tree store pointers but shmem/tmpfs stores swap
52  * entries in the same tree.  They are marked as exceptional entries to
53  * distinguish them from pointers to struct page.
54  * EXCEPTIONAL_ENTRY tests the bit, EXCEPTIONAL_SHIFT shifts content past it.
55  */
56 #define RADIX_TREE_EXCEPTIONAL_ENTRY	2
57 #define RADIX_TREE_EXCEPTIONAL_SHIFT	2
58 
59 static inline bool radix_tree_is_internal_node(void *ptr)
60 {
61 	return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) ==
62 				RADIX_TREE_INTERNAL_NODE;
63 }
64 
65 /*** radix-tree API starts here ***/
66 
67 #define RADIX_TREE_MAX_TAGS 3
68 
69 #ifndef RADIX_TREE_MAP_SHIFT
70 #define RADIX_TREE_MAP_SHIFT	(CONFIG_BASE_SMALL ? 4 : 6)
71 #endif
72 
73 #define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
74 #define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
75 
76 #define RADIX_TREE_TAG_LONGS	\
77 	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
78 
79 #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
80 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
81 					  RADIX_TREE_MAP_SHIFT))
82 
83 /*
84  * @count is the count of every non-NULL element in the ->slots array
85  * whether that is an exceptional entry, a retry entry, a user pointer,
86  * a sibling entry or a pointer to the next level of the tree.
87  * @exceptional is the count of every element in ->slots which is
88  * either radix_tree_exceptional_entry() or is a sibling entry for an
89  * exceptional entry.
90  */
91 struct radix_tree_node {
92 	unsigned char	shift;		/* Bits remaining in each slot */
93 	unsigned char	offset;		/* Slot offset in parent */
94 	unsigned char	count;		/* Total entry count */
95 	unsigned char	exceptional;	/* Exceptional entry count */
96 	struct radix_tree_node *parent;		/* Used when ascending tree */
97 	void *private_data;			/* For tree user */
98 	union {
99 		struct list_head private_list;	/* For tree user */
100 		struct rcu_head	rcu_head;	/* Used when freeing node */
101 	};
102 	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
103 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
104 };
105 
106 /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */
107 struct radix_tree_root {
108 	gfp_t			gfp_mask;
109 	struct radix_tree_node	__rcu *rnode;
110 };
111 
112 #define RADIX_TREE_INIT(mask)	{					\
113 	.gfp_mask = (mask),						\
114 	.rnode = NULL,							\
115 }
116 
117 #define RADIX_TREE(name, mask) \
118 	struct radix_tree_root name = RADIX_TREE_INIT(mask)
119 
120 #define INIT_RADIX_TREE(root, mask)					\
121 do {									\
122 	(root)->gfp_mask = (mask);					\
123 	(root)->rnode = NULL;						\
124 } while (0)
125 
126 static inline bool radix_tree_empty(struct radix_tree_root *root)
127 {
128 	return root->rnode == NULL;
129 }
130 
131 /**
132  * struct radix_tree_iter - radix tree iterator state
133  *
134  * @index:	index of current slot
135  * @next_index:	one beyond the last index for this chunk
136  * @tags:	bit-mask for tag-iterating
137  * @node:	node that contains current slot
138  * @shift:	shift for the node that holds our slots
139  *
140  * This radix tree iterator works in terms of "chunks" of slots.  A chunk is a
141  * subinterval of slots contained within one radix tree leaf node.  It is
142  * described by a pointer to its first slot and a struct radix_tree_iter
143  * which holds the chunk's position in the tree and its size.  For tagged
144  * iteration radix_tree_iter also holds the slots' bit-mask for one chosen
145  * radix tree tag.
146  */
147 struct radix_tree_iter {
148 	unsigned long	index;
149 	unsigned long	next_index;
150 	unsigned long	tags;
151 	struct radix_tree_node *node;
152 #ifdef CONFIG_RADIX_TREE_MULTIORDER
153 	unsigned int	shift;
154 #endif
155 };
156 
157 static inline unsigned int iter_shift(const struct radix_tree_iter *iter)
158 {
159 #ifdef CONFIG_RADIX_TREE_MULTIORDER
160 	return iter->shift;
161 #else
162 	return 0;
163 #endif
164 }
165 
166 /**
167  * Radix-tree synchronization
168  *
169  * The radix-tree API requires that users provide all synchronisation (with
170  * specific exceptions, noted below).
171  *
172  * Synchronization of access to the data items being stored in the tree, and
173  * management of their lifetimes must be completely managed by API users.
174  *
175  * For API usage, in general,
176  * - any function _modifying_ the tree or tags (inserting or deleting
177  *   items, setting or clearing tags) must exclude other modifications, and
178  *   exclude any functions reading the tree.
179  * - any function _reading_ the tree or tags (looking up items or tags,
180  *   gang lookups) must exclude modifications to the tree, but may occur
181  *   concurrently with other readers.
182  *
183  * The notable exceptions to this rule are the following functions:
184  * __radix_tree_lookup
185  * radix_tree_lookup
186  * radix_tree_lookup_slot
187  * radix_tree_tag_get
188  * radix_tree_gang_lookup
189  * radix_tree_gang_lookup_slot
190  * radix_tree_gang_lookup_tag
191  * radix_tree_gang_lookup_tag_slot
192  * radix_tree_tagged
193  *
194  * The first 8 functions are able to be called locklessly, using RCU. The
195  * caller must ensure calls to these functions are made within rcu_read_lock()
196  * regions. Other readers (lock-free or otherwise) and modifications may be
197  * running concurrently.
198  *
199  * It is still required that the caller manage the synchronization and lifetimes
200  * of the items. So if RCU lock-free lookups are used, typically this would mean
201  * that the items have their own locks, or are amenable to lock-free access; and
202  * that the items are freed by RCU (or only freed after having been deleted from
203  * the radix tree *and* a synchronize_rcu() grace period).
204  *
205  * (Note, rcu_assign_pointer and rcu_dereference are not needed to control
206  * access to data items when inserting into or looking up from the radix tree)
207  *
208  * Note that the value returned by radix_tree_tag_get() may not be relied upon
209  * if only the RCU read lock is held.  Functions to set/clear tags and to
210  * delete nodes running concurrently with it may affect its result such that
211  * two consecutive reads in the same locked section may return different
212  * values.  If reliability is required, modification functions must also be
213  * excluded from concurrency.
214  *
215  * radix_tree_tagged is able to be called without locking or RCU.
216  */
217 
218 /**
219  * radix_tree_deref_slot	- dereference a slot
220  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
221  * Returns:	item that was stored in that slot with any direct pointer flag
222  *		removed.
223  *
224  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
225  * locked across slot lookup and dereference. Not required if write lock is
226  * held (ie. items cannot be concurrently inserted).
227  *
228  * radix_tree_deref_retry must be used to confirm validity of the pointer if
229  * only the read lock is held.
230  */
231 static inline void *radix_tree_deref_slot(void **pslot)
232 {
233 	return rcu_dereference(*pslot);
234 }
235 
236 /**
237  * radix_tree_deref_slot_protected	- dereference a slot without RCU lock but with tree lock held
238  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
239  * Returns:	item that was stored in that slot with any direct pointer flag
240  *		removed.
241  *
242  * Similar to radix_tree_deref_slot but only used during migration when a pages
243  * mapping is being moved. The caller does not hold the RCU read lock but it
244  * must hold the tree lock to prevent parallel updates.
245  */
246 static inline void *radix_tree_deref_slot_protected(void **pslot,
247 							spinlock_t *treelock)
248 {
249 	return rcu_dereference_protected(*pslot, lockdep_is_held(treelock));
250 }
251 
252 /**
253  * radix_tree_deref_retry	- check radix_tree_deref_slot
254  * @arg:	pointer returned by radix_tree_deref_slot
255  * Returns:	0 if retry is not required, otherwise retry is required
256  *
257  * radix_tree_deref_retry must be used with radix_tree_deref_slot.
258  */
259 static inline int radix_tree_deref_retry(void *arg)
260 {
261 	return unlikely(radix_tree_is_internal_node(arg));
262 }
263 
264 /**
265  * radix_tree_exceptional_entry	- radix_tree_deref_slot gave exceptional entry?
266  * @arg:	value returned by radix_tree_deref_slot
267  * Returns:	0 if well-aligned pointer, non-0 if exceptional entry.
268  */
269 static inline int radix_tree_exceptional_entry(void *arg)
270 {
271 	/* Not unlikely because radix_tree_exception often tested first */
272 	return (unsigned long)arg & RADIX_TREE_EXCEPTIONAL_ENTRY;
273 }
274 
275 /**
276  * radix_tree_exception	- radix_tree_deref_slot returned either exception?
277  * @arg:	value returned by radix_tree_deref_slot
278  * Returns:	0 if well-aligned pointer, non-0 if either kind of exception.
279  */
280 static inline int radix_tree_exception(void *arg)
281 {
282 	return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
283 }
284 
285 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
286 			unsigned order, struct radix_tree_node **nodep,
287 			void ***slotp);
288 int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
289 			unsigned order, void *);
290 static inline int radix_tree_insert(struct radix_tree_root *root,
291 			unsigned long index, void *entry)
292 {
293 	return __radix_tree_insert(root, index, 0, entry);
294 }
295 void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
296 			  struct radix_tree_node **nodep, void ***slotp);
297 void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
298 void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
299 typedef void (*radix_tree_update_node_t)(struct radix_tree_node *, void *);
300 void __radix_tree_replace(struct radix_tree_root *root,
301 			  struct radix_tree_node *node,
302 			  void **slot, void *item,
303 			  radix_tree_update_node_t update_node, void *private);
304 void radix_tree_iter_replace(struct radix_tree_root *,
305 		const struct radix_tree_iter *, void **slot, void *item);
306 void radix_tree_replace_slot(struct radix_tree_root *root,
307 			     void **slot, void *item);
308 void __radix_tree_delete_node(struct radix_tree_root *root,
309 			      struct radix_tree_node *node,
310 			      radix_tree_update_node_t update_node,
311 			      void *private);
312 void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
313 void *radix_tree_delete(struct radix_tree_root *, unsigned long);
314 void radix_tree_clear_tags(struct radix_tree_root *root,
315 			   struct radix_tree_node *node,
316 			   void **slot);
317 unsigned int radix_tree_gang_lookup(struct radix_tree_root *root,
318 			void **results, unsigned long first_index,
319 			unsigned int max_items);
320 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
321 			void ***results, unsigned long *indices,
322 			unsigned long first_index, unsigned int max_items);
323 int radix_tree_preload(gfp_t gfp_mask);
324 int radix_tree_maybe_preload(gfp_t gfp_mask);
325 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
326 void radix_tree_init(void);
327 void *radix_tree_tag_set(struct radix_tree_root *root,
328 			unsigned long index, unsigned int tag);
329 void *radix_tree_tag_clear(struct radix_tree_root *root,
330 			unsigned long index, unsigned int tag);
331 int radix_tree_tag_get(struct radix_tree_root *root,
332 			unsigned long index, unsigned int tag);
333 void radix_tree_iter_tag_set(struct radix_tree_root *root,
334 		const struct radix_tree_iter *iter, unsigned int tag);
335 unsigned int
336 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
337 		unsigned long first_index, unsigned int max_items,
338 		unsigned int tag);
339 unsigned int
340 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
341 		unsigned long first_index, unsigned int max_items,
342 		unsigned int tag);
343 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
344 
345 static inline void radix_tree_preload_end(void)
346 {
347 	preempt_enable();
348 }
349 
350 int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
351 int radix_tree_split(struct radix_tree_root *, unsigned long index,
352 			unsigned new_order);
353 int radix_tree_join(struct radix_tree_root *, unsigned long index,
354 			unsigned new_order, void *);
355 
356 #define RADIX_TREE_ITER_TAG_MASK	0x00FF	/* tag index in lower byte */
357 #define RADIX_TREE_ITER_TAGGED		0x0100	/* lookup tagged slots */
358 #define RADIX_TREE_ITER_CONTIG		0x0200	/* stop at first hole */
359 
360 /**
361  * radix_tree_iter_init - initialize radix tree iterator
362  *
363  * @iter:	pointer to iterator state
364  * @start:	iteration starting index
365  * Returns:	NULL
366  */
367 static __always_inline void **
368 radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
369 {
370 	/*
371 	 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
372 	 * in the case of a successful tagged chunk lookup.  If the lookup was
373 	 * unsuccessful or non-tagged then nobody cares about ->tags.
374 	 *
375 	 * Set index to zero to bypass next_index overflow protection.
376 	 * See the comment in radix_tree_next_chunk() for details.
377 	 */
378 	iter->index = 0;
379 	iter->next_index = start;
380 	return NULL;
381 }
382 
383 /**
384  * radix_tree_next_chunk - find next chunk of slots for iteration
385  *
386  * @root:	radix tree root
387  * @iter:	iterator state
388  * @flags:	RADIX_TREE_ITER_* flags and tag index
389  * Returns:	pointer to chunk first slot, or NULL if there no more left
390  *
391  * This function looks up the next chunk in the radix tree starting from
392  * @iter->next_index.  It returns a pointer to the chunk's first slot.
393  * Also it fills @iter with data about chunk: position in the tree (index),
394  * its end (next_index), and constructs a bit mask for tagged iterating (tags).
395  */
396 void **radix_tree_next_chunk(struct radix_tree_root *root,
397 			     struct radix_tree_iter *iter, unsigned flags);
398 
399 /**
400  * radix_tree_iter_retry - retry this chunk of the iteration
401  * @iter:	iterator state
402  *
403  * If we iterate over a tree protected only by the RCU lock, a race
404  * against deletion or creation may result in seeing a slot for which
405  * radix_tree_deref_retry() returns true.  If so, call this function
406  * and continue the iteration.
407  */
408 static inline __must_check
409 void **radix_tree_iter_retry(struct radix_tree_iter *iter)
410 {
411 	iter->next_index = iter->index;
412 	iter->tags = 0;
413 	return NULL;
414 }
415 
416 static inline unsigned long
417 __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots)
418 {
419 	return iter->index + (slots << iter_shift(iter));
420 }
421 
422 /**
423  * radix_tree_iter_resume - resume iterating when the chunk may be invalid
424  * @slot: pointer to current slot
425  * @iter: iterator state
426  * Returns: New slot pointer
427  *
428  * If the iterator needs to release then reacquire a lock, the chunk may
429  * have been invalidated by an insertion or deletion.  Call this function
430  * before releasing the lock to continue the iteration from the next index.
431  */
432 void **__must_check radix_tree_iter_resume(void **slot,
433 					struct radix_tree_iter *iter);
434 
435 /**
436  * radix_tree_chunk_size - get current chunk size
437  *
438  * @iter:	pointer to radix tree iterator
439  * Returns:	current chunk size
440  */
441 static __always_inline long
442 radix_tree_chunk_size(struct radix_tree_iter *iter)
443 {
444 	return (iter->next_index - iter->index) >> iter_shift(iter);
445 }
446 
447 #ifdef CONFIG_RADIX_TREE_MULTIORDER
448 void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
449 				unsigned flags);
450 #else
451 /* Can't happen without sibling entries, but the compiler can't tell that */
452 static inline void ** __radix_tree_next_slot(void **slot,
453 				struct radix_tree_iter *iter, unsigned flags)
454 {
455 	return slot;
456 }
457 #endif
458 
459 /**
460  * radix_tree_next_slot - find next slot in chunk
461  *
462  * @slot:	pointer to current slot
463  * @iter:	pointer to interator state
464  * @flags:	RADIX_TREE_ITER_*, should be constant
465  * Returns:	pointer to next slot, or NULL if there no more left
466  *
467  * This function updates @iter->index in the case of a successful lookup.
468  * For tagged lookup it also eats @iter->tags.
469  *
470  * There are several cases where 'slot' can be passed in as NULL to this
471  * function.  These cases result from the use of radix_tree_iter_resume() or
472  * radix_tree_iter_retry().  In these cases we don't end up dereferencing
473  * 'slot' because either:
474  * a) we are doing tagged iteration and iter->tags has been set to 0, or
475  * b) we are doing non-tagged iteration, and iter->index and iter->next_index
476  *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
477  */
478 static __always_inline void **
479 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
480 {
481 	if (flags & RADIX_TREE_ITER_TAGGED) {
482 		iter->tags >>= 1;
483 		if (unlikely(!iter->tags))
484 			return NULL;
485 		if (likely(iter->tags & 1ul)) {
486 			iter->index = __radix_tree_iter_add(iter, 1);
487 			slot++;
488 			goto found;
489 		}
490 		if (!(flags & RADIX_TREE_ITER_CONTIG)) {
491 			unsigned offset = __ffs(iter->tags);
492 
493 			iter->tags >>= offset++;
494 			iter->index = __radix_tree_iter_add(iter, offset);
495 			slot += offset;
496 			goto found;
497 		}
498 	} else {
499 		long count = radix_tree_chunk_size(iter);
500 
501 		while (--count > 0) {
502 			slot++;
503 			iter->index = __radix_tree_iter_add(iter, 1);
504 
505 			if (likely(*slot))
506 				goto found;
507 			if (flags & RADIX_TREE_ITER_CONTIG) {
508 				/* forbid switching to the next chunk */
509 				iter->next_index = 0;
510 				break;
511 			}
512 		}
513 	}
514 	return NULL;
515 
516  found:
517 	if (unlikely(radix_tree_is_internal_node(*slot)))
518 		return __radix_tree_next_slot(slot, iter, flags);
519 	return slot;
520 }
521 
522 /**
523  * radix_tree_for_each_slot - iterate over non-empty slots
524  *
525  * @slot:	the void** variable for pointer to slot
526  * @root:	the struct radix_tree_root pointer
527  * @iter:	the struct radix_tree_iter pointer
528  * @start:	iteration starting index
529  *
530  * @slot points to radix tree slot, @iter->index contains its index.
531  */
532 #define radix_tree_for_each_slot(slot, root, iter, start)		\
533 	for (slot = radix_tree_iter_init(iter, start) ;			\
534 	     slot || (slot = radix_tree_next_chunk(root, iter, 0)) ;	\
535 	     slot = radix_tree_next_slot(slot, iter, 0))
536 
537 /**
538  * radix_tree_for_each_contig - iterate over contiguous slots
539  *
540  * @slot:	the void** variable for pointer to slot
541  * @root:	the struct radix_tree_root pointer
542  * @iter:	the struct radix_tree_iter pointer
543  * @start:	iteration starting index
544  *
545  * @slot points to radix tree slot, @iter->index contains its index.
546  */
547 #define radix_tree_for_each_contig(slot, root, iter, start)		\
548 	for (slot = radix_tree_iter_init(iter, start) ;			\
549 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
550 				RADIX_TREE_ITER_CONTIG)) ;		\
551 	     slot = radix_tree_next_slot(slot, iter,			\
552 				RADIX_TREE_ITER_CONTIG))
553 
554 /**
555  * radix_tree_for_each_tagged - iterate over tagged slots
556  *
557  * @slot:	the void** variable for pointer to slot
558  * @root:	the struct radix_tree_root pointer
559  * @iter:	the struct radix_tree_iter pointer
560  * @start:	iteration starting index
561  * @tag:	tag index
562  *
563  * @slot points to radix tree slot, @iter->index contains its index.
564  */
565 #define radix_tree_for_each_tagged(slot, root, iter, start, tag)	\
566 	for (slot = radix_tree_iter_init(iter, start) ;			\
567 	     slot || (slot = radix_tree_next_chunk(root, iter,		\
568 			      RADIX_TREE_ITER_TAGGED | tag)) ;		\
569 	     slot = radix_tree_next_slot(slot, iter,			\
570 				RADIX_TREE_ITER_TAGGED | tag))
571 
572 #endif /* _LINUX_RADIX_TREE_H */
573