xref: /linux-6.15/include/linux/xarray.h (revision 82a95849)
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #ifndef _LINUX_XARRAY_H
3 #define _LINUX_XARRAY_H
4 /*
5  * eXtensible Arrays
6  * Copyright (c) 2017 Microsoft Corporation
7  * Author: Matthew Wilcox <[email protected]>
8  *
9  * See Documentation/core-api/xarray.rst for how to use the XArray.
10  */
11 
12 #include <linux/bug.h>
13 #include <linux/compiler.h>
14 #include <linux/gfp.h>
15 #include <linux/kconfig.h>
16 #include <linux/kernel.h>
17 #include <linux/rcupdate.h>
18 #include <linux/spinlock.h>
19 #include <linux/types.h>
20 
21 /*
22  * The bottom two bits of the entry determine how the XArray interprets
23  * the contents:
24  *
25  * 00: Pointer entry
26  * 10: Internal entry
27  * x1: Value entry or tagged pointer
28  *
29  * Attempting to store internal entries in the XArray is a bug.
30  *
31  * Most internal entries are pointers to the next node in the tree.
32  * The following internal entries have a special meaning:
33  *
34  * 0-62: Sibling entries
35  * 256: Zero entry
36  * 257: Retry entry
37  *
38  * Errors are also represented as internal entries, but use the negative
39  * space (-4094 to -2).  They're never stored in the slots array; only
40  * returned by the normal API.
41  */
42 
43 #define BITS_PER_XA_VALUE	(BITS_PER_LONG - 1)
44 
45 /**
46  * xa_mk_value() - Create an XArray entry from an integer.
47  * @v: Value to store in XArray.
48  *
49  * Context: Any context.
50  * Return: An entry suitable for storing in the XArray.
51  */
52 static inline void *xa_mk_value(unsigned long v)
53 {
54 	WARN_ON((long)v < 0);
55 	return (void *)((v << 1) | 1);
56 }
57 
58 /**
59  * xa_to_value() - Get value stored in an XArray entry.
60  * @entry: XArray entry.
61  *
62  * Context: Any context.
63  * Return: The value stored in the XArray entry.
64  */
65 static inline unsigned long xa_to_value(const void *entry)
66 {
67 	return (unsigned long)entry >> 1;
68 }
69 
70 /**
71  * xa_is_value() - Determine if an entry is a value.
72  * @entry: XArray entry.
73  *
74  * Context: Any context.
75  * Return: True if the entry is a value, false if it is a pointer.
76  */
77 static inline bool xa_is_value(const void *entry)
78 {
79 	return (unsigned long)entry & 1;
80 }
81 
82 /**
83  * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
84  * @p: Plain pointer.
85  * @tag: Tag value (0, 1 or 3).
86  *
87  * If the user of the XArray prefers, they can tag their pointers instead
88  * of storing value entries.  Three tags are available (0, 1 and 3).
89  * These are distinct from the xa_mark_t as they are not replicated up
90  * through the array and cannot be searched for.
91  *
92  * Context: Any context.
93  * Return: An XArray entry.
94  */
95 static inline void *xa_tag_pointer(void *p, unsigned long tag)
96 {
97 	return (void *)((unsigned long)p | tag);
98 }
99 
100 /**
101  * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
102  * @entry: XArray entry.
103  *
104  * If you have stored a tagged pointer in the XArray, call this function
105  * to get the untagged version of the pointer.
106  *
107  * Context: Any context.
108  * Return: A pointer.
109  */
110 static inline void *xa_untag_pointer(void *entry)
111 {
112 	return (void *)((unsigned long)entry & ~3UL);
113 }
114 
115 /**
116  * xa_pointer_tag() - Get the tag stored in an XArray entry.
117  * @entry: XArray entry.
118  *
119  * If you have stored a tagged pointer in the XArray, call this function
120  * to get the tag of that pointer.
121  *
122  * Context: Any context.
123  * Return: A tag.
124  */
125 static inline unsigned int xa_pointer_tag(void *entry)
126 {
127 	return (unsigned long)entry & 3UL;
128 }
129 
130 /*
131  * xa_mk_internal() - Create an internal entry.
132  * @v: Value to turn into an internal entry.
133  *
134  * Internal entries are used for a number of purposes.  Entries 0-255 are
135  * used for sibling entries (only 0-62 are used by the current code).  256
136  * is used for the retry entry.  257 is used for the reserved / zero entry.
137  * Negative internal entries are used to represent errnos.  Node pointers
138  * are also tagged as internal entries in some situations.
139  *
140  * Context: Any context.
141  * Return: An XArray internal entry corresponding to this value.
142  */
143 static inline void *xa_mk_internal(unsigned long v)
144 {
145 	return (void *)((v << 2) | 2);
146 }
147 
148 /*
149  * xa_to_internal() - Extract the value from an internal entry.
150  * @entry: XArray entry.
151  *
152  * Context: Any context.
153  * Return: The value which was stored in the internal entry.
154  */
155 static inline unsigned long xa_to_internal(const void *entry)
156 {
157 	return (unsigned long)entry >> 2;
158 }
159 
160 /*
161  * xa_is_internal() - Is the entry an internal entry?
162  * @entry: XArray entry.
163  *
164  * Context: Any context.
165  * Return: %true if the entry is an internal entry.
166  */
167 static inline bool xa_is_internal(const void *entry)
168 {
169 	return ((unsigned long)entry & 3) == 2;
170 }
171 
172 #define XA_ZERO_ENTRY		xa_mk_internal(257)
173 
174 /**
175  * xa_is_zero() - Is the entry a zero entry?
176  * @entry: Entry retrieved from the XArray
177  *
178  * The normal API will return NULL as the contents of a slot containing
179  * a zero entry.  You can only see zero entries by using the advanced API.
180  *
181  * Return: %true if the entry is a zero entry.
182  */
183 static inline bool xa_is_zero(const void *entry)
184 {
185 	return unlikely(entry == XA_ZERO_ENTRY);
186 }
187 
188 /**
189  * xa_is_err() - Report whether an XArray operation returned an error
190  * @entry: Result from calling an XArray function
191  *
192  * If an XArray operation cannot complete an operation, it will return
193  * a special value indicating an error.  This function tells you
194  * whether an error occurred; xa_err() tells you which error occurred.
195  *
196  * Context: Any context.
197  * Return: %true if the entry indicates an error.
198  */
199 static inline bool xa_is_err(const void *entry)
200 {
201 	return unlikely(xa_is_internal(entry) &&
202 			entry >= xa_mk_internal(-MAX_ERRNO));
203 }
204 
205 /**
206  * xa_err() - Turn an XArray result into an errno.
207  * @entry: Result from calling an XArray function.
208  *
209  * If an XArray operation cannot complete an operation, it will return
210  * a special pointer value which encodes an errno.  This function extracts
211  * the errno from the pointer value, or returns 0 if the pointer does not
212  * represent an errno.
213  *
214  * Context: Any context.
215  * Return: A negative errno or 0.
216  */
217 static inline int xa_err(void *entry)
218 {
219 	/* xa_to_internal() would not do sign extension. */
220 	if (xa_is_err(entry))
221 		return (long)entry >> 2;
222 	return 0;
223 }
224 
225 /**
226  * struct xa_limit - Represents a range of IDs.
227  * @min: The lowest ID to allocate (inclusive).
228  * @max: The maximum ID to allocate (inclusive).
229  *
230  * This structure is used either directly or via the XA_LIMIT() macro
231  * to communicate the range of IDs that are valid for allocation.
232  * Two common ranges are predefined for you:
233  * * xa_limit_32b	- [0 - UINT_MAX]
234  * * xa_limit_31b	- [0 - INT_MAX]
235  */
236 struct xa_limit {
237 	u32 max;
238 	u32 min;
239 };
240 
241 #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
242 
243 #define xa_limit_32b	XA_LIMIT(0, UINT_MAX)
244 #define xa_limit_31b	XA_LIMIT(0, INT_MAX)
245 
246 typedef unsigned __bitwise xa_mark_t;
247 #define XA_MARK_0		((__force xa_mark_t)0U)
248 #define XA_MARK_1		((__force xa_mark_t)1U)
249 #define XA_MARK_2		((__force xa_mark_t)2U)
250 #define XA_PRESENT		((__force xa_mark_t)8U)
251 #define XA_MARK_MAX		XA_MARK_2
252 #define XA_FREE_MARK		XA_MARK_0
253 
254 enum xa_lock_type {
255 	XA_LOCK_IRQ = 1,
256 	XA_LOCK_BH = 2,
257 };
258 
259 /*
260  * Values for xa_flags.  The radix tree stores its GFP flags in the xa_flags,
261  * and we remain compatible with that.
262  */
263 #define XA_FLAGS_LOCK_IRQ	((__force gfp_t)XA_LOCK_IRQ)
264 #define XA_FLAGS_LOCK_BH	((__force gfp_t)XA_LOCK_BH)
265 #define XA_FLAGS_TRACK_FREE	((__force gfp_t)4U)
266 #define XA_FLAGS_ZERO_BUSY	((__force gfp_t)8U)
267 #define XA_FLAGS_ALLOC_WRAPPED	((__force gfp_t)16U)
268 #define XA_FLAGS_ACCOUNT	((__force gfp_t)32U)
269 #define XA_FLAGS_MARK(mark)	((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
270 						(__force unsigned)(mark)))
271 
272 /* ALLOC is for a normal 0-based alloc.  ALLOC1 is for an 1-based alloc */
273 #define XA_FLAGS_ALLOC	(XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
274 #define XA_FLAGS_ALLOC1	(XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
275 
276 /**
277  * struct xarray - The anchor of the XArray.
278  * @xa_lock: Lock that protects the contents of the XArray.
279  *
280  * To use the xarray, define it statically or embed it in your data structure.
281  * It is a very small data structure, so it does not usually make sense to
282  * allocate it separately and keep a pointer to it in your data structure.
283  *
284  * You may use the xa_lock to protect your own data structures as well.
285  */
286 /*
287  * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
288  * If the only non-NULL entry in the array is at index 0, @xa_head is that
289  * entry.  If any other entry in the array is non-NULL, @xa_head points
290  * to an @xa_node.
291  */
292 struct xarray {
293 	spinlock_t	xa_lock;
294 /* private: The rest of the data structure is not to be used directly. */
295 	gfp_t		xa_flags;
296 	void __rcu *	xa_head;
297 };
298 
299 #define XARRAY_INIT(name, flags) {				\
300 	.xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock),		\
301 	.xa_flags = flags,					\
302 	.xa_head = NULL,					\
303 }
304 
305 /**
306  * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
307  * @name: A string that names your XArray.
308  * @flags: XA_FLAG values.
309  *
310  * This is intended for file scope definitions of XArrays.  It declares
311  * and initialises an empty XArray with the chosen name and flags.  It is
312  * equivalent to calling xa_init_flags() on the array, but it does the
313  * initialisation at compiletime instead of runtime.
314  */
315 #define DEFINE_XARRAY_FLAGS(name, flags)				\
316 	struct xarray name = XARRAY_INIT(name, flags)
317 
318 /**
319  * DEFINE_XARRAY() - Define an XArray.
320  * @name: A string that names your XArray.
321  *
322  * This is intended for file scope definitions of XArrays.  It declares
323  * and initialises an empty XArray with the chosen name.  It is equivalent
324  * to calling xa_init() on the array, but it does the initialisation at
325  * compiletime instead of runtime.
326  */
327 #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
328 
329 /**
330  * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
331  * @name: A string that names your XArray.
332  *
333  * This is intended for file scope definitions of allocating XArrays.
334  * See also DEFINE_XARRAY().
335  */
336 #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
337 
338 /**
339  * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
340  * @name: A string that names your XArray.
341  *
342  * This is intended for file scope definitions of allocating XArrays.
343  * See also DEFINE_XARRAY().
344  */
345 #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
346 
347 void *xa_load(struct xarray *, unsigned long index);
348 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
349 void *xa_erase(struct xarray *, unsigned long index);
350 void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
351 			void *entry, gfp_t);
352 bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
353 void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
354 void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
355 void *xa_find(struct xarray *xa, unsigned long *index,
356 		unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
357 void *xa_find_after(struct xarray *xa, unsigned long *index,
358 		unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
359 unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
360 		unsigned long max, unsigned int n, xa_mark_t);
361 void xa_destroy(struct xarray *);
362 
363 /**
364  * xa_init_flags() - Initialise an empty XArray with flags.
365  * @xa: XArray.
366  * @flags: XA_FLAG values.
367  *
368  * If you need to initialise an XArray with special flags (eg you need
369  * to take the lock from interrupt context), use this function instead
370  * of xa_init().
371  *
372  * Context: Any context.
373  */
374 static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
375 {
376 	spin_lock_init(&xa->xa_lock);
377 	xa->xa_flags = flags;
378 	xa->xa_head = NULL;
379 }
380 
381 /**
382  * xa_init() - Initialise an empty XArray.
383  * @xa: XArray.
384  *
385  * An empty XArray is full of NULL entries.
386  *
387  * Context: Any context.
388  */
389 static inline void xa_init(struct xarray *xa)
390 {
391 	xa_init_flags(xa, 0);
392 }
393 
394 /**
395  * xa_empty() - Determine if an array has any present entries.
396  * @xa: XArray.
397  *
398  * Context: Any context.
399  * Return: %true if the array contains only NULL pointers.
400  */
401 static inline bool xa_empty(const struct xarray *xa)
402 {
403 	return xa->xa_head == NULL;
404 }
405 
406 /**
407  * xa_marked() - Inquire whether any entry in this array has a mark set
408  * @xa: Array
409  * @mark: Mark value
410  *
411  * Context: Any context.
412  * Return: %true if any entry has this mark set.
413  */
414 static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
415 {
416 	return xa->xa_flags & XA_FLAGS_MARK(mark);
417 }
418 
419 /**
420  * xa_for_each_start() - Iterate over a portion of an XArray.
421  * @xa: XArray.
422  * @index: Index of @entry.
423  * @entry: Entry retrieved from array.
424  * @start: First index to retrieve from array.
425  *
426  * During the iteration, @entry will have the value of the entry stored
427  * in @xa at @index.  You may modify @index during the iteration if you
428  * want to skip or reprocess indices.  It is safe to modify the array
429  * during the iteration.  At the end of the iteration, @entry will be set
430  * to NULL and @index will have a value less than or equal to max.
431  *
432  * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n).  You have
433  * to handle your own locking with xas_for_each(), and if you have to unlock
434  * after each iteration, it will also end up being O(n.log(n)).
435  * xa_for_each_start() will spin if it hits a retry entry; if you intend to
436  * see retry entries, you should use the xas_for_each() iterator instead.
437  * The xas_for_each() iterator will expand into more inline code than
438  * xa_for_each_start().
439  *
440  * Context: Any context.  Takes and releases the RCU lock.
441  */
442 #define xa_for_each_start(xa, index, entry, start)			\
443 	for (index = start,						\
444 	     entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT);	\
445 	     entry;							\
446 	     entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
447 
448 /**
449  * xa_for_each() - Iterate over present entries in an XArray.
450  * @xa: XArray.
451  * @index: Index of @entry.
452  * @entry: Entry retrieved from array.
453  *
454  * During the iteration, @entry will have the value of the entry stored
455  * in @xa at @index.  You may modify @index during the iteration if you want
456  * to skip or reprocess indices.  It is safe to modify the array during the
457  * iteration.  At the end of the iteration, @entry will be set to NULL and
458  * @index will have a value less than or equal to max.
459  *
460  * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n).  You have
461  * to handle your own locking with xas_for_each(), and if you have to unlock
462  * after each iteration, it will also end up being O(n.log(n)).  xa_for_each()
463  * will spin if it hits a retry entry; if you intend to see retry entries,
464  * you should use the xas_for_each() iterator instead.  The xas_for_each()
465  * iterator will expand into more inline code than xa_for_each().
466  *
467  * Context: Any context.  Takes and releases the RCU lock.
468  */
469 #define xa_for_each(xa, index, entry) \
470 	xa_for_each_start(xa, index, entry, 0)
471 
472 /**
473  * xa_for_each_marked() - Iterate over marked entries in an XArray.
474  * @xa: XArray.
475  * @index: Index of @entry.
476  * @entry: Entry retrieved from array.
477  * @filter: Selection criterion.
478  *
479  * During the iteration, @entry will have the value of the entry stored
480  * in @xa at @index.  The iteration will skip all entries in the array
481  * which do not match @filter.  You may modify @index during the iteration
482  * if you want to skip or reprocess indices.  It is safe to modify the array
483  * during the iteration.  At the end of the iteration, @entry will be set to
484  * NULL and @index will have a value less than or equal to max.
485  *
486  * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
487  * You have to handle your own locking with xas_for_each(), and if you have
488  * to unlock after each iteration, it will also end up being O(n.log(n)).
489  * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
490  * see retry entries, you should use the xas_for_each_marked() iterator
491  * instead.  The xas_for_each_marked() iterator will expand into more inline
492  * code than xa_for_each_marked().
493  *
494  * Context: Any context.  Takes and releases the RCU lock.
495  */
496 #define xa_for_each_marked(xa, index, entry, filter) \
497 	for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
498 	     entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
499 
500 #define xa_trylock(xa)		spin_trylock(&(xa)->xa_lock)
501 #define xa_lock(xa)		spin_lock(&(xa)->xa_lock)
502 #define xa_unlock(xa)		spin_unlock(&(xa)->xa_lock)
503 #define xa_lock_bh(xa)		spin_lock_bh(&(xa)->xa_lock)
504 #define xa_unlock_bh(xa)	spin_unlock_bh(&(xa)->xa_lock)
505 #define xa_lock_irq(xa)		spin_lock_irq(&(xa)->xa_lock)
506 #define xa_unlock_irq(xa)	spin_unlock_irq(&(xa)->xa_lock)
507 #define xa_lock_irqsave(xa, flags) \
508 				spin_lock_irqsave(&(xa)->xa_lock, flags)
509 #define xa_unlock_irqrestore(xa, flags) \
510 				spin_unlock_irqrestore(&(xa)->xa_lock, flags)
511 #define xa_lock_nested(xa, subclass) \
512 				spin_lock_nested(&(xa)->xa_lock, subclass)
513 #define xa_lock_bh_nested(xa, subclass) \
514 				spin_lock_bh_nested(&(xa)->xa_lock, subclass)
515 #define xa_lock_irq_nested(xa, subclass) \
516 				spin_lock_irq_nested(&(xa)->xa_lock, subclass)
517 #define xa_lock_irqsave_nested(xa, flags, subclass) \
518 		spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
519 
520 /*
521  * Versions of the normal API which require the caller to hold the
522  * xa_lock.  If the GFP flags allow it, they will drop the lock to
523  * allocate memory, then reacquire it afterwards.  These functions
524  * may also re-enable interrupts if the XArray flags indicate the
525  * locking should be interrupt safe.
526  */
527 void *__xa_erase(struct xarray *, unsigned long index);
528 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
529 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
530 		void *entry, gfp_t);
531 int __must_check __xa_insert(struct xarray *, unsigned long index,
532 		void *entry, gfp_t);
533 int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
534 		struct xa_limit, gfp_t);
535 int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
536 		struct xa_limit, u32 *next, gfp_t);
537 void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
538 void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
539 
540 /**
541  * xa_store_bh() - Store this entry in the XArray.
542  * @xa: XArray.
543  * @index: Index into array.
544  * @entry: New entry.
545  * @gfp: Memory allocation flags.
546  *
547  * This function is like calling xa_store() except it disables softirqs
548  * while holding the array lock.
549  *
550  * Context: Any context.  Takes and releases the xa_lock while
551  * disabling softirqs.
552  * Return: The entry which used to be at this index.
553  */
554 static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
555 		void *entry, gfp_t gfp)
556 {
557 	void *curr;
558 
559 	xa_lock_bh(xa);
560 	curr = __xa_store(xa, index, entry, gfp);
561 	xa_unlock_bh(xa);
562 
563 	return curr;
564 }
565 
566 /**
567  * xa_store_irq() - Store this entry in the XArray.
568  * @xa: XArray.
569  * @index: Index into array.
570  * @entry: New entry.
571  * @gfp: Memory allocation flags.
572  *
573  * This function is like calling xa_store() except it disables interrupts
574  * while holding the array lock.
575  *
576  * Context: Process context.  Takes and releases the xa_lock while
577  * disabling interrupts.
578  * Return: The entry which used to be at this index.
579  */
580 static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
581 		void *entry, gfp_t gfp)
582 {
583 	void *curr;
584 
585 	xa_lock_irq(xa);
586 	curr = __xa_store(xa, index, entry, gfp);
587 	xa_unlock_irq(xa);
588 
589 	return curr;
590 }
591 
592 /**
593  * xa_erase_bh() - Erase this entry from the XArray.
594  * @xa: XArray.
595  * @index: Index of entry.
596  *
597  * After this function returns, loading from @index will return %NULL.
598  * If the index is part of a multi-index entry, all indices will be erased
599  * and none of the entries will be part of a multi-index entry.
600  *
601  * Context: Any context.  Takes and releases the xa_lock while
602  * disabling softirqs.
603  * Return: The entry which used to be at this index.
604  */
605 static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
606 {
607 	void *entry;
608 
609 	xa_lock_bh(xa);
610 	entry = __xa_erase(xa, index);
611 	xa_unlock_bh(xa);
612 
613 	return entry;
614 }
615 
616 /**
617  * xa_erase_irq() - Erase this entry from the XArray.
618  * @xa: XArray.
619  * @index: Index of entry.
620  *
621  * After this function returns, loading from @index will return %NULL.
622  * If the index is part of a multi-index entry, all indices will be erased
623  * and none of the entries will be part of a multi-index entry.
624  *
625  * Context: Process context.  Takes and releases the xa_lock while
626  * disabling interrupts.
627  * Return: The entry which used to be at this index.
628  */
629 static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
630 {
631 	void *entry;
632 
633 	xa_lock_irq(xa);
634 	entry = __xa_erase(xa, index);
635 	xa_unlock_irq(xa);
636 
637 	return entry;
638 }
639 
640 /**
641  * xa_cmpxchg() - Conditionally replace an entry in the XArray.
642  * @xa: XArray.
643  * @index: Index into array.
644  * @old: Old value to test against.
645  * @entry: New value to place in array.
646  * @gfp: Memory allocation flags.
647  *
648  * If the entry at @index is the same as @old, replace it with @entry.
649  * If the return value is equal to @old, then the exchange was successful.
650  *
651  * Context: Any context.  Takes and releases the xa_lock.  May sleep
652  * if the @gfp flags permit.
653  * Return: The old value at this index or xa_err() if an error happened.
654  */
655 static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
656 			void *old, void *entry, gfp_t gfp)
657 {
658 	void *curr;
659 
660 	xa_lock(xa);
661 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
662 	xa_unlock(xa);
663 
664 	return curr;
665 }
666 
667 /**
668  * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
669  * @xa: XArray.
670  * @index: Index into array.
671  * @old: Old value to test against.
672  * @entry: New value to place in array.
673  * @gfp: Memory allocation flags.
674  *
675  * This function is like calling xa_cmpxchg() except it disables softirqs
676  * while holding the array lock.
677  *
678  * Context: Any context.  Takes and releases the xa_lock while
679  * disabling softirqs.  May sleep if the @gfp flags permit.
680  * Return: The old value at this index or xa_err() if an error happened.
681  */
682 static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
683 			void *old, void *entry, gfp_t gfp)
684 {
685 	void *curr;
686 
687 	xa_lock_bh(xa);
688 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
689 	xa_unlock_bh(xa);
690 
691 	return curr;
692 }
693 
694 /**
695  * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
696  * @xa: XArray.
697  * @index: Index into array.
698  * @old: Old value to test against.
699  * @entry: New value to place in array.
700  * @gfp: Memory allocation flags.
701  *
702  * This function is like calling xa_cmpxchg() except it disables interrupts
703  * while holding the array lock.
704  *
705  * Context: Process context.  Takes and releases the xa_lock while
706  * disabling interrupts.  May sleep if the @gfp flags permit.
707  * Return: The old value at this index or xa_err() if an error happened.
708  */
709 static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
710 			void *old, void *entry, gfp_t gfp)
711 {
712 	void *curr;
713 
714 	xa_lock_irq(xa);
715 	curr = __xa_cmpxchg(xa, index, old, entry, gfp);
716 	xa_unlock_irq(xa);
717 
718 	return curr;
719 }
720 
721 /**
722  * xa_insert() - Store this entry in the XArray unless another entry is
723  *			already present.
724  * @xa: XArray.
725  * @index: Index into array.
726  * @entry: New entry.
727  * @gfp: Memory allocation flags.
728  *
729  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
730  * if no entry is present.  Inserting will fail if a reserved entry is
731  * present, even though loading from this index will return NULL.
732  *
733  * Context: Any context.  Takes and releases the xa_lock.  May sleep if
734  * the @gfp flags permit.
735  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
736  * -ENOMEM if memory could not be allocated.
737  */
738 static inline int __must_check xa_insert(struct xarray *xa,
739 		unsigned long index, void *entry, gfp_t gfp)
740 {
741 	int err;
742 
743 	xa_lock(xa);
744 	err = __xa_insert(xa, index, entry, gfp);
745 	xa_unlock(xa);
746 
747 	return err;
748 }
749 
750 /**
751  * xa_insert_bh() - Store this entry in the XArray unless another entry is
752  *			already present.
753  * @xa: XArray.
754  * @index: Index into array.
755  * @entry: New entry.
756  * @gfp: Memory allocation flags.
757  *
758  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
759  * if no entry is present.  Inserting will fail if a reserved entry is
760  * present, even though loading from this index will return NULL.
761  *
762  * Context: Any context.  Takes and releases the xa_lock while
763  * disabling softirqs.  May sleep if the @gfp flags permit.
764  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
765  * -ENOMEM if memory could not be allocated.
766  */
767 static inline int __must_check xa_insert_bh(struct xarray *xa,
768 		unsigned long index, void *entry, gfp_t gfp)
769 {
770 	int err;
771 
772 	xa_lock_bh(xa);
773 	err = __xa_insert(xa, index, entry, gfp);
774 	xa_unlock_bh(xa);
775 
776 	return err;
777 }
778 
779 /**
780  * xa_insert_irq() - Store this entry in the XArray unless another entry is
781  *			already present.
782  * @xa: XArray.
783  * @index: Index into array.
784  * @entry: New entry.
785  * @gfp: Memory allocation flags.
786  *
787  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
788  * if no entry is present.  Inserting will fail if a reserved entry is
789  * present, even though loading from this index will return NULL.
790  *
791  * Context: Process context.  Takes and releases the xa_lock while
792  * disabling interrupts.  May sleep if the @gfp flags permit.
793  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
794  * -ENOMEM if memory could not be allocated.
795  */
796 static inline int __must_check xa_insert_irq(struct xarray *xa,
797 		unsigned long index, void *entry, gfp_t gfp)
798 {
799 	int err;
800 
801 	xa_lock_irq(xa);
802 	err = __xa_insert(xa, index, entry, gfp);
803 	xa_unlock_irq(xa);
804 
805 	return err;
806 }
807 
808 /**
809  * xa_alloc() - Find somewhere to store this entry in the XArray.
810  * @xa: XArray.
811  * @id: Pointer to ID.
812  * @entry: New entry.
813  * @limit: Range of ID to allocate.
814  * @gfp: Memory allocation flags.
815  *
816  * Finds an empty entry in @xa between @limit.min and @limit.max,
817  * stores the index into the @id pointer, then stores the entry at
818  * that index.  A concurrent lookup will not see an uninitialised @id.
819  *
820  * Context: Any context.  Takes and releases the xa_lock.  May sleep if
821  * the @gfp flags permit.
822  * Return: 0 on success, -ENOMEM if memory could not be allocated or
823  * -EBUSY if there are no free entries in @limit.
824  */
825 static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
826 		void *entry, struct xa_limit limit, gfp_t gfp)
827 {
828 	int err;
829 
830 	xa_lock(xa);
831 	err = __xa_alloc(xa, id, entry, limit, gfp);
832 	xa_unlock(xa);
833 
834 	return err;
835 }
836 
837 /**
838  * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
839  * @xa: XArray.
840  * @id: Pointer to ID.
841  * @entry: New entry.
842  * @limit: Range of ID to allocate.
843  * @gfp: Memory allocation flags.
844  *
845  * Finds an empty entry in @xa between @limit.min and @limit.max,
846  * stores the index into the @id pointer, then stores the entry at
847  * that index.  A concurrent lookup will not see an uninitialised @id.
848  *
849  * Context: Any context.  Takes and releases the xa_lock while
850  * disabling softirqs.  May sleep if the @gfp flags permit.
851  * Return: 0 on success, -ENOMEM if memory could not be allocated or
852  * -EBUSY if there are no free entries in @limit.
853  */
854 static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
855 		void *entry, struct xa_limit limit, gfp_t gfp)
856 {
857 	int err;
858 
859 	xa_lock_bh(xa);
860 	err = __xa_alloc(xa, id, entry, limit, gfp);
861 	xa_unlock_bh(xa);
862 
863 	return err;
864 }
865 
866 /**
867  * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
868  * @xa: XArray.
869  * @id: Pointer to ID.
870  * @entry: New entry.
871  * @limit: Range of ID to allocate.
872  * @gfp: Memory allocation flags.
873  *
874  * Finds an empty entry in @xa between @limit.min and @limit.max,
875  * stores the index into the @id pointer, then stores the entry at
876  * that index.  A concurrent lookup will not see an uninitialised @id.
877  *
878  * Context: Process context.  Takes and releases the xa_lock while
879  * disabling interrupts.  May sleep if the @gfp flags permit.
880  * Return: 0 on success, -ENOMEM if memory could not be allocated or
881  * -EBUSY if there are no free entries in @limit.
882  */
883 static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
884 		void *entry, struct xa_limit limit, gfp_t gfp)
885 {
886 	int err;
887 
888 	xa_lock_irq(xa);
889 	err = __xa_alloc(xa, id, entry, limit, gfp);
890 	xa_unlock_irq(xa);
891 
892 	return err;
893 }
894 
895 /**
896  * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
897  * @xa: XArray.
898  * @id: Pointer to ID.
899  * @entry: New entry.
900  * @limit: Range of allocated ID.
901  * @next: Pointer to next ID to allocate.
902  * @gfp: Memory allocation flags.
903  *
904  * Finds an empty entry in @xa between @limit.min and @limit.max,
905  * stores the index into the @id pointer, then stores the entry at
906  * that index.  A concurrent lookup will not see an uninitialised @id.
907  * The search for an empty entry will start at @next and will wrap
908  * around if necessary.
909  *
910  * Context: Any context.  Takes and releases the xa_lock.  May sleep if
911  * the @gfp flags permit.
912  * Return: 0 if the allocation succeeded without wrapping.  1 if the
913  * allocation succeeded after wrapping, -ENOMEM if memory could not be
914  * allocated or -EBUSY if there are no free entries in @limit.
915  */
916 static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
917 		struct xa_limit limit, u32 *next, gfp_t gfp)
918 {
919 	int err;
920 
921 	xa_lock(xa);
922 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
923 	xa_unlock(xa);
924 
925 	return err;
926 }
927 
928 /**
929  * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
930  * @xa: XArray.
931  * @id: Pointer to ID.
932  * @entry: New entry.
933  * @limit: Range of allocated ID.
934  * @next: Pointer to next ID to allocate.
935  * @gfp: Memory allocation flags.
936  *
937  * Finds an empty entry in @xa between @limit.min and @limit.max,
938  * stores the index into the @id pointer, then stores the entry at
939  * that index.  A concurrent lookup will not see an uninitialised @id.
940  * The search for an empty entry will start at @next and will wrap
941  * around if necessary.
942  *
943  * Context: Any context.  Takes and releases the xa_lock while
944  * disabling softirqs.  May sleep if the @gfp flags permit.
945  * Return: 0 if the allocation succeeded without wrapping.  1 if the
946  * allocation succeeded after wrapping, -ENOMEM if memory could not be
947  * allocated or -EBUSY if there are no free entries in @limit.
948  */
949 static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
950 		struct xa_limit limit, u32 *next, gfp_t gfp)
951 {
952 	int err;
953 
954 	xa_lock_bh(xa);
955 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
956 	xa_unlock_bh(xa);
957 
958 	return err;
959 }
960 
961 /**
962  * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
963  * @xa: XArray.
964  * @id: Pointer to ID.
965  * @entry: New entry.
966  * @limit: Range of allocated ID.
967  * @next: Pointer to next ID to allocate.
968  * @gfp: Memory allocation flags.
969  *
970  * Finds an empty entry in @xa between @limit.min and @limit.max,
971  * stores the index into the @id pointer, then stores the entry at
972  * that index.  A concurrent lookup will not see an uninitialised @id.
973  * The search for an empty entry will start at @next and will wrap
974  * around if necessary.
975  *
976  * Context: Process context.  Takes and releases the xa_lock while
977  * disabling interrupts.  May sleep if the @gfp flags permit.
978  * Return: 0 if the allocation succeeded without wrapping.  1 if the
979  * allocation succeeded after wrapping, -ENOMEM if memory could not be
980  * allocated or -EBUSY if there are no free entries in @limit.
981  */
982 static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
983 		struct xa_limit limit, u32 *next, gfp_t gfp)
984 {
985 	int err;
986 
987 	xa_lock_irq(xa);
988 	err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
989 	xa_unlock_irq(xa);
990 
991 	return err;
992 }
993 
994 /**
995  * xa_reserve() - Reserve this index in the XArray.
996  * @xa: XArray.
997  * @index: Index into array.
998  * @gfp: Memory allocation flags.
999  *
1000  * Ensures there is somewhere to store an entry at @index in the array.
1001  * If there is already something stored at @index, this function does
1002  * nothing.  If there was nothing there, the entry is marked as reserved.
1003  * Loading from a reserved entry returns a %NULL pointer.
1004  *
1005  * If you do not use the entry that you have reserved, call xa_release()
1006  * or xa_erase() to free any unnecessary memory.
1007  *
1008  * Context: Any context.  Takes and releases the xa_lock.
1009  * May sleep if the @gfp flags permit.
1010  * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1011  */
1012 static inline __must_check
1013 int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1014 {
1015 	return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1016 }
1017 
1018 /**
1019  * xa_reserve_bh() - Reserve this index in the XArray.
1020  * @xa: XArray.
1021  * @index: Index into array.
1022  * @gfp: Memory allocation flags.
1023  *
1024  * A softirq-disabling version of xa_reserve().
1025  *
1026  * Context: Any context.  Takes and releases the xa_lock while
1027  * disabling softirqs.
1028  * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1029  */
1030 static inline __must_check
1031 int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
1032 {
1033 	return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1034 }
1035 
1036 /**
1037  * xa_reserve_irq() - Reserve this index in the XArray.
1038  * @xa: XArray.
1039  * @index: Index into array.
1040  * @gfp: Memory allocation flags.
1041  *
1042  * An interrupt-disabling version of xa_reserve().
1043  *
1044  * Context: Process context.  Takes and releases the xa_lock while
1045  * disabling interrupts.
1046  * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1047  */
1048 static inline __must_check
1049 int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
1050 {
1051 	return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1052 }
1053 
1054 /**
1055  * xa_release() - Release a reserved entry.
1056  * @xa: XArray.
1057  * @index: Index of entry.
1058  *
1059  * After calling xa_reserve(), you can call this function to release the
1060  * reservation.  If the entry at @index has been stored to, this function
1061  * will do nothing.
1062  */
1063 static inline void xa_release(struct xarray *xa, unsigned long index)
1064 {
1065 	xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
1066 }
1067 
1068 /* Everything below here is the Advanced API.  Proceed with caution. */
1069 
1070 /*
1071  * The xarray is constructed out of a set of 'chunks' of pointers.  Choosing
1072  * the best chunk size requires some tradeoffs.  A power of two recommends
1073  * itself so that we can walk the tree based purely on shifts and masks.
1074  * Generally, the larger the better; as the number of slots per level of the
1075  * tree increases, the less tall the tree needs to be.  But that needs to be
1076  * balanced against the memory consumption of each node.  On a 64-bit system,
1077  * xa_node is currently 576 bytes, and we get 7 of them per 4kB page.  If we
1078  * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
1079  */
1080 #ifndef XA_CHUNK_SHIFT
1081 #define XA_CHUNK_SHIFT		(CONFIG_BASE_SMALL ? 4 : 6)
1082 #endif
1083 #define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
1084 #define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
1085 #define XA_MAX_MARKS		3
1086 #define XA_MARK_LONGS		DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
1087 
1088 /*
1089  * @count is the count of every non-NULL element in the ->slots array
1090  * whether that is a value entry, a retry entry, a user pointer,
1091  * a sibling entry or a pointer to the next level of the tree.
1092  * @nr_values is the count of every element in ->slots which is
1093  * either a value entry or a sibling of a value entry.
1094  */
1095 struct xa_node {
1096 	unsigned char	shift;		/* Bits remaining in each slot */
1097 	unsigned char	offset;		/* Slot offset in parent */
1098 	unsigned char	count;		/* Total entry count */
1099 	unsigned char	nr_values;	/* Value entry count */
1100 	struct xa_node __rcu *parent;	/* NULL at top of tree */
1101 	struct xarray	*array;		/* The array we belong to */
1102 	union {
1103 		struct list_head private_list;	/* For tree user */
1104 		struct rcu_head	rcu_head;	/* Used when freeing node */
1105 	};
1106 	void __rcu	*slots[XA_CHUNK_SIZE];
1107 	union {
1108 		unsigned long	tags[XA_MAX_MARKS][XA_MARK_LONGS];
1109 		unsigned long	marks[XA_MAX_MARKS][XA_MARK_LONGS];
1110 	};
1111 };
1112 
1113 void xa_dump(const struct xarray *);
1114 void xa_dump_node(const struct xa_node *);
1115 
1116 #ifdef XA_DEBUG
1117 #define XA_BUG_ON(xa, x) do {					\
1118 		if (x) {					\
1119 			xa_dump(xa);				\
1120 			BUG();					\
1121 		}						\
1122 	} while (0)
1123 #define XA_NODE_BUG_ON(node, x) do {				\
1124 		if (x) {					\
1125 			if (node) xa_dump_node(node);		\
1126 			BUG();					\
1127 		}						\
1128 	} while (0)
1129 #else
1130 #define XA_BUG_ON(xa, x)	do { } while (0)
1131 #define XA_NODE_BUG_ON(node, x)	do { } while (0)
1132 #endif
1133 
1134 /* Private */
1135 static inline void *xa_head(const struct xarray *xa)
1136 {
1137 	return rcu_dereference_check(xa->xa_head,
1138 						lockdep_is_held(&xa->xa_lock));
1139 }
1140 
1141 /* Private */
1142 static inline void *xa_head_locked(const struct xarray *xa)
1143 {
1144 	return rcu_dereference_protected(xa->xa_head,
1145 						lockdep_is_held(&xa->xa_lock));
1146 }
1147 
1148 /* Private */
1149 static inline void *xa_entry(const struct xarray *xa,
1150 				const struct xa_node *node, unsigned int offset)
1151 {
1152 	XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1153 	return rcu_dereference_check(node->slots[offset],
1154 						lockdep_is_held(&xa->xa_lock));
1155 }
1156 
1157 /* Private */
1158 static inline void *xa_entry_locked(const struct xarray *xa,
1159 				const struct xa_node *node, unsigned int offset)
1160 {
1161 	XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1162 	return rcu_dereference_protected(node->slots[offset],
1163 						lockdep_is_held(&xa->xa_lock));
1164 }
1165 
1166 /* Private */
1167 static inline struct xa_node *xa_parent(const struct xarray *xa,
1168 					const struct xa_node *node)
1169 {
1170 	return rcu_dereference_check(node->parent,
1171 						lockdep_is_held(&xa->xa_lock));
1172 }
1173 
1174 /* Private */
1175 static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
1176 					const struct xa_node *node)
1177 {
1178 	return rcu_dereference_protected(node->parent,
1179 						lockdep_is_held(&xa->xa_lock));
1180 }
1181 
1182 /* Private */
1183 static inline void *xa_mk_node(const struct xa_node *node)
1184 {
1185 	return (void *)((unsigned long)node | 2);
1186 }
1187 
1188 /* Private */
1189 static inline struct xa_node *xa_to_node(const void *entry)
1190 {
1191 	return (struct xa_node *)((unsigned long)entry - 2);
1192 }
1193 
1194 /* Private */
1195 static inline bool xa_is_node(const void *entry)
1196 {
1197 	return xa_is_internal(entry) && (unsigned long)entry > 4096;
1198 }
1199 
1200 /* Private */
1201 static inline void *xa_mk_sibling(unsigned int offset)
1202 {
1203 	return xa_mk_internal(offset);
1204 }
1205 
1206 /* Private */
1207 static inline unsigned long xa_to_sibling(const void *entry)
1208 {
1209 	return xa_to_internal(entry);
1210 }
1211 
1212 /**
1213  * xa_is_sibling() - Is the entry a sibling entry?
1214  * @entry: Entry retrieved from the XArray
1215  *
1216  * Return: %true if the entry is a sibling entry.
1217  */
1218 static inline bool xa_is_sibling(const void *entry)
1219 {
1220 	return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
1221 		(entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
1222 }
1223 
1224 #define XA_RETRY_ENTRY		xa_mk_internal(256)
1225 
1226 /**
1227  * xa_is_retry() - Is the entry a retry entry?
1228  * @entry: Entry retrieved from the XArray
1229  *
1230  * Return: %true if the entry is a retry entry.
1231  */
1232 static inline bool xa_is_retry(const void *entry)
1233 {
1234 	return unlikely(entry == XA_RETRY_ENTRY);
1235 }
1236 
1237 /**
1238  * xa_is_advanced() - Is the entry only permitted for the advanced API?
1239  * @entry: Entry to be stored in the XArray.
1240  *
1241  * Return: %true if the entry cannot be stored by the normal API.
1242  */
1243 static inline bool xa_is_advanced(const void *entry)
1244 {
1245 	return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1246 }
1247 
1248 /**
1249  * typedef xa_update_node_t - A callback function from the XArray.
1250  * @node: The node which is being processed
1251  *
1252  * This function is called every time the XArray updates the count of
1253  * present and value entries in a node.  It allows advanced users to
1254  * maintain the private_list in the node.
1255  *
1256  * Context: The xa_lock is held and interrupts may be disabled.
1257  *	    Implementations should not drop the xa_lock, nor re-enable
1258  *	    interrupts.
1259  */
1260 typedef void (*xa_update_node_t)(struct xa_node *node);
1261 
1262 /*
1263  * The xa_state is opaque to its users.  It contains various different pieces
1264  * of state involved in the current operation on the XArray.  It should be
1265  * declared on the stack and passed between the various internal routines.
1266  * The various elements in it should not be accessed directly, but only
1267  * through the provided accessor functions.  The below documentation is for
1268  * the benefit of those working on the code, not for users of the XArray.
1269  *
1270  * @xa_node usually points to the xa_node containing the slot we're operating
1271  * on (and @xa_offset is the offset in the slots array).  If there is a
1272  * single entry in the array at index 0, there are no allocated xa_nodes to
1273  * point to, and so we store %NULL in @xa_node.  @xa_node is set to
1274  * the value %XAS_RESTART if the xa_state is not walked to the correct
1275  * position in the tree of nodes for this operation.  If an error occurs
1276  * during an operation, it is set to an %XAS_ERROR value.  If we run off the
1277  * end of the allocated nodes, it is set to %XAS_BOUNDS.
1278  */
1279 struct xa_state {
1280 	struct xarray *xa;
1281 	unsigned long xa_index;
1282 	unsigned char xa_shift;
1283 	unsigned char xa_sibs;
1284 	unsigned char xa_offset;
1285 	unsigned char xa_pad;		/* Helps gcc generate better code */
1286 	struct xa_node *xa_node;
1287 	struct xa_node *xa_alloc;
1288 	xa_update_node_t xa_update;
1289 };
1290 
1291 /*
1292  * We encode errnos in the xas->xa_node.  If an error has happened, we need to
1293  * drop the lock to fix it, and once we've done so the xa_state is invalid.
1294  */
1295 #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
1296 #define XAS_BOUNDS	((struct xa_node *)1UL)
1297 #define XAS_RESTART	((struct xa_node *)3UL)
1298 
1299 #define __XA_STATE(array, index, shift, sibs)  {	\
1300 	.xa = array,					\
1301 	.xa_index = index,				\
1302 	.xa_shift = shift,				\
1303 	.xa_sibs = sibs,				\
1304 	.xa_offset = 0,					\
1305 	.xa_pad = 0,					\
1306 	.xa_node = XAS_RESTART,				\
1307 	.xa_alloc = NULL,				\
1308 	.xa_update = NULL				\
1309 }
1310 
1311 /**
1312  * XA_STATE() - Declare an XArray operation state.
1313  * @name: Name of this operation state (usually xas).
1314  * @array: Array to operate on.
1315  * @index: Initial index of interest.
1316  *
1317  * Declare and initialise an xa_state on the stack.
1318  */
1319 #define XA_STATE(name, array, index)				\
1320 	struct xa_state name = __XA_STATE(array, index, 0, 0)
1321 
1322 /**
1323  * XA_STATE_ORDER() - Declare an XArray operation state.
1324  * @name: Name of this operation state (usually xas).
1325  * @array: Array to operate on.
1326  * @index: Initial index of interest.
1327  * @order: Order of entry.
1328  *
1329  * Declare and initialise an xa_state on the stack.  This variant of
1330  * XA_STATE() allows you to specify the 'order' of the element you
1331  * want to operate on.`
1332  */
1333 #define XA_STATE_ORDER(name, array, index, order)		\
1334 	struct xa_state name = __XA_STATE(array,		\
1335 			(index >> order) << order,		\
1336 			order - (order % XA_CHUNK_SHIFT),	\
1337 			(1U << (order % XA_CHUNK_SHIFT)) - 1)
1338 
1339 #define xas_marked(xas, mark)	xa_marked((xas)->xa, (mark))
1340 #define xas_trylock(xas)	xa_trylock((xas)->xa)
1341 #define xas_lock(xas)		xa_lock((xas)->xa)
1342 #define xas_unlock(xas)		xa_unlock((xas)->xa)
1343 #define xas_lock_bh(xas)	xa_lock_bh((xas)->xa)
1344 #define xas_unlock_bh(xas)	xa_unlock_bh((xas)->xa)
1345 #define xas_lock_irq(xas)	xa_lock_irq((xas)->xa)
1346 #define xas_unlock_irq(xas)	xa_unlock_irq((xas)->xa)
1347 #define xas_lock_irqsave(xas, flags) \
1348 				xa_lock_irqsave((xas)->xa, flags)
1349 #define xas_unlock_irqrestore(xas, flags) \
1350 				xa_unlock_irqrestore((xas)->xa, flags)
1351 
1352 /**
1353  * xas_error() - Return an errno stored in the xa_state.
1354  * @xas: XArray operation state.
1355  *
1356  * Return: 0 if no error has been noted.  A negative errno if one has.
1357  */
1358 static inline int xas_error(const struct xa_state *xas)
1359 {
1360 	return xa_err(xas->xa_node);
1361 }
1362 
1363 /**
1364  * xas_set_err() - Note an error in the xa_state.
1365  * @xas: XArray operation state.
1366  * @err: Negative error number.
1367  *
1368  * Only call this function with a negative @err; zero or positive errors
1369  * will probably not behave the way you think they should.  If you want
1370  * to clear the error from an xa_state, use xas_reset().
1371  */
1372 static inline void xas_set_err(struct xa_state *xas, long err)
1373 {
1374 	xas->xa_node = XA_ERROR(err);
1375 }
1376 
1377 /**
1378  * xas_invalid() - Is the xas in a retry or error state?
1379  * @xas: XArray operation state.
1380  *
1381  * Return: %true if the xas cannot be used for operations.
1382  */
1383 static inline bool xas_invalid(const struct xa_state *xas)
1384 {
1385 	return (unsigned long)xas->xa_node & 3;
1386 }
1387 
1388 /**
1389  * xas_valid() - Is the xas a valid cursor into the array?
1390  * @xas: XArray operation state.
1391  *
1392  * Return: %true if the xas can be used for operations.
1393  */
1394 static inline bool xas_valid(const struct xa_state *xas)
1395 {
1396 	return !xas_invalid(xas);
1397 }
1398 
1399 /**
1400  * xas_is_node() - Does the xas point to a node?
1401  * @xas: XArray operation state.
1402  *
1403  * Return: %true if the xas currently references a node.
1404  */
1405 static inline bool xas_is_node(const struct xa_state *xas)
1406 {
1407 	return xas_valid(xas) && xas->xa_node;
1408 }
1409 
1410 /* True if the pointer is something other than a node */
1411 static inline bool xas_not_node(struct xa_node *node)
1412 {
1413 	return ((unsigned long)node & 3) || !node;
1414 }
1415 
1416 /* True if the node represents RESTART or an error */
1417 static inline bool xas_frozen(struct xa_node *node)
1418 {
1419 	return (unsigned long)node & 2;
1420 }
1421 
1422 /* True if the node represents head-of-tree, RESTART or BOUNDS */
1423 static inline bool xas_top(struct xa_node *node)
1424 {
1425 	return node <= XAS_RESTART;
1426 }
1427 
1428 /**
1429  * xas_reset() - Reset an XArray operation state.
1430  * @xas: XArray operation state.
1431  *
1432  * Resets the error or walk state of the @xas so future walks of the
1433  * array will start from the root.  Use this if you have dropped the
1434  * xarray lock and want to reuse the xa_state.
1435  *
1436  * Context: Any context.
1437  */
1438 static inline void xas_reset(struct xa_state *xas)
1439 {
1440 	xas->xa_node = XAS_RESTART;
1441 }
1442 
1443 /**
1444  * xas_retry() - Retry the operation if appropriate.
1445  * @xas: XArray operation state.
1446  * @entry: Entry from xarray.
1447  *
1448  * The advanced functions may sometimes return an internal entry, such as
1449  * a retry entry or a zero entry.  This function sets up the @xas to restart
1450  * the walk from the head of the array if needed.
1451  *
1452  * Context: Any context.
1453  * Return: true if the operation needs to be retried.
1454  */
1455 static inline bool xas_retry(struct xa_state *xas, const void *entry)
1456 {
1457 	if (xa_is_zero(entry))
1458 		return true;
1459 	if (!xa_is_retry(entry))
1460 		return false;
1461 	xas_reset(xas);
1462 	return true;
1463 }
1464 
1465 void *xas_load(struct xa_state *);
1466 void *xas_store(struct xa_state *, void *entry);
1467 void *xas_find(struct xa_state *, unsigned long max);
1468 void *xas_find_conflict(struct xa_state *);
1469 
1470 bool xas_get_mark(const struct xa_state *, xa_mark_t);
1471 void xas_set_mark(const struct xa_state *, xa_mark_t);
1472 void xas_clear_mark(const struct xa_state *, xa_mark_t);
1473 void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1474 void xas_init_marks(const struct xa_state *);
1475 
1476 bool xas_nomem(struct xa_state *, gfp_t);
1477 void xas_pause(struct xa_state *);
1478 
1479 void xas_create_range(struct xa_state *);
1480 
1481 /**
1482  * xas_reload() - Refetch an entry from the xarray.
1483  * @xas: XArray operation state.
1484  *
1485  * Use this function to check that a previously loaded entry still has
1486  * the same value.  This is useful for the lockless pagecache lookup where
1487  * we walk the array with only the RCU lock to protect us, lock the page,
1488  * then check that the page hasn't moved since we looked it up.
1489  *
1490  * The caller guarantees that @xas is still valid.  If it may be in an
1491  * error or restart state, call xas_load() instead.
1492  *
1493  * Return: The entry at this location in the xarray.
1494  */
1495 static inline void *xas_reload(struct xa_state *xas)
1496 {
1497 	struct xa_node *node = xas->xa_node;
1498 
1499 	if (node)
1500 		return xa_entry(xas->xa, node, xas->xa_offset);
1501 	return xa_head(xas->xa);
1502 }
1503 
1504 /**
1505  * xas_set() - Set up XArray operation state for a different index.
1506  * @xas: XArray operation state.
1507  * @index: New index into the XArray.
1508  *
1509  * Move the operation state to refer to a different index.  This will
1510  * have the effect of starting a walk from the top; see xas_next()
1511  * to move to an adjacent index.
1512  */
1513 static inline void xas_set(struct xa_state *xas, unsigned long index)
1514 {
1515 	xas->xa_index = index;
1516 	xas->xa_node = XAS_RESTART;
1517 }
1518 
1519 /**
1520  * xas_set_order() - Set up XArray operation state for a multislot entry.
1521  * @xas: XArray operation state.
1522  * @index: Target of the operation.
1523  * @order: Entry occupies 2^@order indices.
1524  */
1525 static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1526 					unsigned int order)
1527 {
1528 #ifdef CONFIG_XARRAY_MULTI
1529 	xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1530 	xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1531 	xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1532 	xas->xa_node = XAS_RESTART;
1533 #else
1534 	BUG_ON(order > 0);
1535 	xas_set(xas, index);
1536 #endif
1537 }
1538 
1539 /**
1540  * xas_set_update() - Set up XArray operation state for a callback.
1541  * @xas: XArray operation state.
1542  * @update: Function to call when updating a node.
1543  *
1544  * The XArray can notify a caller after it has updated an xa_node.
1545  * This is advanced functionality and is only needed by the page cache.
1546  */
1547 static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1548 {
1549 	xas->xa_update = update;
1550 }
1551 
1552 /**
1553  * xas_next_entry() - Advance iterator to next present entry.
1554  * @xas: XArray operation state.
1555  * @max: Highest index to return.
1556  *
1557  * xas_next_entry() is an inline function to optimise xarray traversal for
1558  * speed.  It is equivalent to calling xas_find(), and will call xas_find()
1559  * for all the hard cases.
1560  *
1561  * Return: The next present entry after the one currently referred to by @xas.
1562  */
1563 static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1564 {
1565 	struct xa_node *node = xas->xa_node;
1566 	void *entry;
1567 
1568 	if (unlikely(xas_not_node(node) || node->shift ||
1569 			xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1570 		return xas_find(xas, max);
1571 
1572 	do {
1573 		if (unlikely(xas->xa_index >= max))
1574 			return xas_find(xas, max);
1575 		if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1576 			return xas_find(xas, max);
1577 		entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1578 		if (unlikely(xa_is_internal(entry)))
1579 			return xas_find(xas, max);
1580 		xas->xa_offset++;
1581 		xas->xa_index++;
1582 	} while (!entry);
1583 
1584 	return entry;
1585 }
1586 
1587 /* Private */
1588 static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1589 		xa_mark_t mark)
1590 {
1591 	unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1592 	unsigned int offset = xas->xa_offset;
1593 
1594 	if (advance)
1595 		offset++;
1596 	if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1597 		if (offset < XA_CHUNK_SIZE) {
1598 			unsigned long data = *addr & (~0UL << offset);
1599 			if (data)
1600 				return __ffs(data);
1601 		}
1602 		return XA_CHUNK_SIZE;
1603 	}
1604 
1605 	return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1606 }
1607 
1608 /**
1609  * xas_next_marked() - Advance iterator to next marked entry.
1610  * @xas: XArray operation state.
1611  * @max: Highest index to return.
1612  * @mark: Mark to search for.
1613  *
1614  * xas_next_marked() is an inline function to optimise xarray traversal for
1615  * speed.  It is equivalent to calling xas_find_marked(), and will call
1616  * xas_find_marked() for all the hard cases.
1617  *
1618  * Return: The next marked entry after the one currently referred to by @xas.
1619  */
1620 static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1621 								xa_mark_t mark)
1622 {
1623 	struct xa_node *node = xas->xa_node;
1624 	unsigned int offset;
1625 
1626 	if (unlikely(xas_not_node(node) || node->shift))
1627 		return xas_find_marked(xas, max, mark);
1628 	offset = xas_find_chunk(xas, true, mark);
1629 	xas->xa_offset = offset;
1630 	xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1631 	if (xas->xa_index > max)
1632 		return NULL;
1633 	if (offset == XA_CHUNK_SIZE)
1634 		return xas_find_marked(xas, max, mark);
1635 	return xa_entry(xas->xa, node, offset);
1636 }
1637 
1638 /*
1639  * If iterating while holding a lock, drop the lock and reschedule
1640  * every %XA_CHECK_SCHED loops.
1641  */
1642 enum {
1643 	XA_CHECK_SCHED = 4096,
1644 };
1645 
1646 /**
1647  * xas_for_each() - Iterate over a range of an XArray.
1648  * @xas: XArray operation state.
1649  * @entry: Entry retrieved from the array.
1650  * @max: Maximum index to retrieve from array.
1651  *
1652  * The loop body will be executed for each entry present in the xarray
1653  * between the current xas position and @max.  @entry will be set to
1654  * the entry retrieved from the xarray.  It is safe to delete entries
1655  * from the array in the loop body.  You should hold either the RCU lock
1656  * or the xa_lock while iterating.  If you need to drop the lock, call
1657  * xas_pause() first.
1658  */
1659 #define xas_for_each(xas, entry, max) \
1660 	for (entry = xas_find(xas, max); entry; \
1661 	     entry = xas_next_entry(xas, max))
1662 
1663 /**
1664  * xas_for_each_marked() - Iterate over a range of an XArray.
1665  * @xas: XArray operation state.
1666  * @entry: Entry retrieved from the array.
1667  * @max: Maximum index to retrieve from array.
1668  * @mark: Mark to search for.
1669  *
1670  * The loop body will be executed for each marked entry in the xarray
1671  * between the current xas position and @max.  @entry will be set to
1672  * the entry retrieved from the xarray.  It is safe to delete entries
1673  * from the array in the loop body.  You should hold either the RCU lock
1674  * or the xa_lock while iterating.  If you need to drop the lock, call
1675  * xas_pause() first.
1676  */
1677 #define xas_for_each_marked(xas, entry, max, mark) \
1678 	for (entry = xas_find_marked(xas, max, mark); entry; \
1679 	     entry = xas_next_marked(xas, max, mark))
1680 
1681 /**
1682  * xas_for_each_conflict() - Iterate over a range of an XArray.
1683  * @xas: XArray operation state.
1684  * @entry: Entry retrieved from the array.
1685  *
1686  * The loop body will be executed for each entry in the XArray that lies
1687  * within the range specified by @xas.  If the loop completes successfully,
1688  * any entries that lie in this range will be replaced by @entry.  The caller
1689  * may break out of the loop; if they do so, the contents of the XArray will
1690  * be unchanged.  The operation may fail due to an out of memory condition.
1691  * The caller may also call xa_set_err() to exit the loop while setting an
1692  * error to record the reason.
1693  */
1694 #define xas_for_each_conflict(xas, entry) \
1695 	while ((entry = xas_find_conflict(xas)))
1696 
1697 void *__xas_next(struct xa_state *);
1698 void *__xas_prev(struct xa_state *);
1699 
1700 /**
1701  * xas_prev() - Move iterator to previous index.
1702  * @xas: XArray operation state.
1703  *
1704  * If the @xas was in an error state, it will remain in an error state
1705  * and this function will return %NULL.  If the @xas has never been walked,
1706  * it will have the effect of calling xas_load().  Otherwise one will be
1707  * subtracted from the index and the state will be walked to the correct
1708  * location in the array for the next operation.
1709  *
1710  * If the iterator was referencing index 0, this function wraps
1711  * around to %ULONG_MAX.
1712  *
1713  * Return: The entry at the new index.  This may be %NULL or an internal
1714  * entry.
1715  */
1716 static inline void *xas_prev(struct xa_state *xas)
1717 {
1718 	struct xa_node *node = xas->xa_node;
1719 
1720 	if (unlikely(xas_not_node(node) || node->shift ||
1721 				xas->xa_offset == 0))
1722 		return __xas_prev(xas);
1723 
1724 	xas->xa_index--;
1725 	xas->xa_offset--;
1726 	return xa_entry(xas->xa, node, xas->xa_offset);
1727 }
1728 
1729 /**
1730  * xas_next() - Move state to next index.
1731  * @xas: XArray operation state.
1732  *
1733  * If the @xas was in an error state, it will remain in an error state
1734  * and this function will return %NULL.  If the @xas has never been walked,
1735  * it will have the effect of calling xas_load().  Otherwise one will be
1736  * added to the index and the state will be walked to the correct
1737  * location in the array for the next operation.
1738  *
1739  * If the iterator was referencing index %ULONG_MAX, this function wraps
1740  * around to 0.
1741  *
1742  * Return: The entry at the new index.  This may be %NULL or an internal
1743  * entry.
1744  */
1745 static inline void *xas_next(struct xa_state *xas)
1746 {
1747 	struct xa_node *node = xas->xa_node;
1748 
1749 	if (unlikely(xas_not_node(node) || node->shift ||
1750 				xas->xa_offset == XA_CHUNK_MASK))
1751 		return __xas_next(xas);
1752 
1753 	xas->xa_index++;
1754 	xas->xa_offset++;
1755 	return xa_entry(xas->xa, node, xas->xa_offset);
1756 }
1757 
1758 #endif /* _LINUX_XARRAY_H */
1759