xref: /linux-6.15/lib/xarray.c (revision 2fa044e5)
1f8d5d0ccSMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2f8d5d0ccSMatthew Wilcox /*
3f8d5d0ccSMatthew Wilcox  * XArray implementation
4f8d5d0ccSMatthew Wilcox  * Copyright (c) 2017 Microsoft Corporation
5f8d5d0ccSMatthew Wilcox  * Author: Matthew Wilcox <[email protected]>
6f8d5d0ccSMatthew Wilcox  */
7f8d5d0ccSMatthew Wilcox 
89b89a035SMatthew Wilcox #include <linux/bitmap.h>
9f8d5d0ccSMatthew Wilcox #include <linux/export.h>
1058d6ea30SMatthew Wilcox #include <linux/list.h>
1158d6ea30SMatthew Wilcox #include <linux/slab.h>
12f8d5d0ccSMatthew Wilcox #include <linux/xarray.h>
13f8d5d0ccSMatthew Wilcox 
14f8d5d0ccSMatthew Wilcox /*
15f8d5d0ccSMatthew Wilcox  * Coding conventions in this file:
16f8d5d0ccSMatthew Wilcox  *
17f8d5d0ccSMatthew Wilcox  * @xa is used to refer to the entire xarray.
18f8d5d0ccSMatthew Wilcox  * @xas is the 'xarray operation state'.  It may be either a pointer to
19f8d5d0ccSMatthew Wilcox  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
20f8d5d0ccSMatthew Wilcox  * ambiguity.
21f8d5d0ccSMatthew Wilcox  * @index is the index of the entry being operated on
22f8d5d0ccSMatthew Wilcox  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
23f8d5d0ccSMatthew Wilcox  * @node refers to an xa_node; usually the primary one being operated on by
24f8d5d0ccSMatthew Wilcox  * this function.
25f8d5d0ccSMatthew Wilcox  * @offset is the index into the slots array inside an xa_node.
26f8d5d0ccSMatthew Wilcox  * @parent refers to the @xa_node closer to the head than @node.
27f8d5d0ccSMatthew Wilcox  * @entry refers to something stored in a slot in the xarray
28f8d5d0ccSMatthew Wilcox  */
29f8d5d0ccSMatthew Wilcox 
3058d6ea30SMatthew Wilcox static inline unsigned int xa_lock_type(const struct xarray *xa)
3158d6ea30SMatthew Wilcox {
3258d6ea30SMatthew Wilcox 	return (__force unsigned int)xa->xa_flags & 3;
3358d6ea30SMatthew Wilcox }
3458d6ea30SMatthew Wilcox 
3558d6ea30SMatthew Wilcox static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
3658d6ea30SMatthew Wilcox {
3758d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
3858d6ea30SMatthew Wilcox 		xas_lock_irq(xas);
3958d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
4058d6ea30SMatthew Wilcox 		xas_lock_bh(xas);
4158d6ea30SMatthew Wilcox 	else
4258d6ea30SMatthew Wilcox 		xas_lock(xas);
4358d6ea30SMatthew Wilcox }
4458d6ea30SMatthew Wilcox 
4558d6ea30SMatthew Wilcox static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
4658d6ea30SMatthew Wilcox {
4758d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
4858d6ea30SMatthew Wilcox 		xas_unlock_irq(xas);
4958d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
5058d6ea30SMatthew Wilcox 		xas_unlock_bh(xas);
5158d6ea30SMatthew Wilcox 	else
5258d6ea30SMatthew Wilcox 		xas_unlock(xas);
5358d6ea30SMatthew Wilcox }
5458d6ea30SMatthew Wilcox 
55371c752dSMatthew Wilcox static inline bool xa_track_free(const struct xarray *xa)
56371c752dSMatthew Wilcox {
57371c752dSMatthew Wilcox 	return xa->xa_flags & XA_FLAGS_TRACK_FREE;
58371c752dSMatthew Wilcox }
59371c752dSMatthew Wilcox 
603ccaf57aSMatthew Wilcox static inline bool xa_zero_busy(const struct xarray *xa)
613ccaf57aSMatthew Wilcox {
623ccaf57aSMatthew Wilcox 	return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
633ccaf57aSMatthew Wilcox }
643ccaf57aSMatthew Wilcox 
659b89a035SMatthew Wilcox static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
669b89a035SMatthew Wilcox {
679b89a035SMatthew Wilcox 	if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
689b89a035SMatthew Wilcox 		xa->xa_flags |= XA_FLAGS_MARK(mark);
699b89a035SMatthew Wilcox }
709b89a035SMatthew Wilcox 
719b89a035SMatthew Wilcox static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
729b89a035SMatthew Wilcox {
739b89a035SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_MARK(mark))
749b89a035SMatthew Wilcox 		xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
759b89a035SMatthew Wilcox }
769b89a035SMatthew Wilcox 
779b89a035SMatthew Wilcox static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
789b89a035SMatthew Wilcox {
799b89a035SMatthew Wilcox 	return node->marks[(__force unsigned)mark];
809b89a035SMatthew Wilcox }
819b89a035SMatthew Wilcox 
829b89a035SMatthew Wilcox static inline bool node_get_mark(struct xa_node *node,
839b89a035SMatthew Wilcox 		unsigned int offset, xa_mark_t mark)
849b89a035SMatthew Wilcox {
859b89a035SMatthew Wilcox 	return test_bit(offset, node_marks(node, mark));
869b89a035SMatthew Wilcox }
879b89a035SMatthew Wilcox 
889b89a035SMatthew Wilcox /* returns true if the bit was set */
899b89a035SMatthew Wilcox static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
909b89a035SMatthew Wilcox 				xa_mark_t mark)
919b89a035SMatthew Wilcox {
929b89a035SMatthew Wilcox 	return __test_and_set_bit(offset, node_marks(node, mark));
939b89a035SMatthew Wilcox }
949b89a035SMatthew Wilcox 
959b89a035SMatthew Wilcox /* returns true if the bit was set */
969b89a035SMatthew Wilcox static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
979b89a035SMatthew Wilcox 				xa_mark_t mark)
989b89a035SMatthew Wilcox {
999b89a035SMatthew Wilcox 	return __test_and_clear_bit(offset, node_marks(node, mark));
1009b89a035SMatthew Wilcox }
1019b89a035SMatthew Wilcox 
1029b89a035SMatthew Wilcox static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
1039b89a035SMatthew Wilcox {
1049b89a035SMatthew Wilcox 	return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
1059b89a035SMatthew Wilcox }
1069b89a035SMatthew Wilcox 
107371c752dSMatthew Wilcox static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
108371c752dSMatthew Wilcox {
109371c752dSMatthew Wilcox 	bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
110371c752dSMatthew Wilcox }
111371c752dSMatthew Wilcox 
11258d6ea30SMatthew Wilcox #define mark_inc(mark) do { \
11358d6ea30SMatthew Wilcox 	mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
11458d6ea30SMatthew Wilcox } while (0)
11558d6ea30SMatthew Wilcox 
11658d6ea30SMatthew Wilcox /*
11758d6ea30SMatthew Wilcox  * xas_squash_marks() - Merge all marks to the first entry
11858d6ea30SMatthew Wilcox  * @xas: Array operation state.
11958d6ea30SMatthew Wilcox  *
12058d6ea30SMatthew Wilcox  * Set a mark on the first entry if any entry has it set.  Clear marks on
12158d6ea30SMatthew Wilcox  * all sibling entries.
12258d6ea30SMatthew Wilcox  */
12358d6ea30SMatthew Wilcox static void xas_squash_marks(const struct xa_state *xas)
12458d6ea30SMatthew Wilcox {
12558d6ea30SMatthew Wilcox 	unsigned int mark = 0;
12658d6ea30SMatthew Wilcox 	unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
12758d6ea30SMatthew Wilcox 
12858d6ea30SMatthew Wilcox 	if (!xas->xa_sibs)
12958d6ea30SMatthew Wilcox 		return;
13058d6ea30SMatthew Wilcox 
13158d6ea30SMatthew Wilcox 	do {
13258d6ea30SMatthew Wilcox 		unsigned long *marks = xas->xa_node->marks[mark];
13358d6ea30SMatthew Wilcox 		if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
13458d6ea30SMatthew Wilcox 			continue;
13558d6ea30SMatthew Wilcox 		__set_bit(xas->xa_offset, marks);
13658d6ea30SMatthew Wilcox 		bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
13758d6ea30SMatthew Wilcox 	} while (mark++ != (__force unsigned)XA_MARK_MAX);
13858d6ea30SMatthew Wilcox }
13958d6ea30SMatthew Wilcox 
140ad3d6c72SMatthew Wilcox /* extracts the offset within this node from the index */
141ad3d6c72SMatthew Wilcox static unsigned int get_offset(unsigned long index, struct xa_node *node)
142ad3d6c72SMatthew Wilcox {
143ad3d6c72SMatthew Wilcox 	return (index >> node->shift) & XA_CHUNK_MASK;
144ad3d6c72SMatthew Wilcox }
145ad3d6c72SMatthew Wilcox 
146b803b428SMatthew Wilcox static void xas_set_offset(struct xa_state *xas)
147b803b428SMatthew Wilcox {
148b803b428SMatthew Wilcox 	xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
149b803b428SMatthew Wilcox }
150b803b428SMatthew Wilcox 
151ad3d6c72SMatthew Wilcox /* move the index either forwards (find) or backwards (sibling slot) */
152ad3d6c72SMatthew Wilcox static void xas_move_index(struct xa_state *xas, unsigned long offset)
153ad3d6c72SMatthew Wilcox {
154ad3d6c72SMatthew Wilcox 	unsigned int shift = xas->xa_node->shift;
155ad3d6c72SMatthew Wilcox 	xas->xa_index &= ~XA_CHUNK_MASK << shift;
156ad3d6c72SMatthew Wilcox 	xas->xa_index += offset << shift;
157ad3d6c72SMatthew Wilcox }
158ad3d6c72SMatthew Wilcox 
159b803b428SMatthew Wilcox static void xas_advance(struct xa_state *xas)
160b803b428SMatthew Wilcox {
161b803b428SMatthew Wilcox 	xas->xa_offset++;
162b803b428SMatthew Wilcox 	xas_move_index(xas, xas->xa_offset);
163b803b428SMatthew Wilcox }
164b803b428SMatthew Wilcox 
165ad3d6c72SMatthew Wilcox static void *set_bounds(struct xa_state *xas)
166ad3d6c72SMatthew Wilcox {
167ad3d6c72SMatthew Wilcox 	xas->xa_node = XAS_BOUNDS;
168ad3d6c72SMatthew Wilcox 	return NULL;
169ad3d6c72SMatthew Wilcox }
170ad3d6c72SMatthew Wilcox 
171ad3d6c72SMatthew Wilcox /*
172ad3d6c72SMatthew Wilcox  * Starts a walk.  If the @xas is already valid, we assume that it's on
173ad3d6c72SMatthew Wilcox  * the right path and just return where we've got to.  If we're in an
174ad3d6c72SMatthew Wilcox  * error state, return NULL.  If the index is outside the current scope
175ad3d6c72SMatthew Wilcox  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
176ad3d6c72SMatthew Wilcox  * set @xas->xa_node to NULL and return the current head of the array.
177ad3d6c72SMatthew Wilcox  */
178ad3d6c72SMatthew Wilcox static void *xas_start(struct xa_state *xas)
179ad3d6c72SMatthew Wilcox {
180ad3d6c72SMatthew Wilcox 	void *entry;
181ad3d6c72SMatthew Wilcox 
182ad3d6c72SMatthew Wilcox 	if (xas_valid(xas))
183ad3d6c72SMatthew Wilcox 		return xas_reload(xas);
184ad3d6c72SMatthew Wilcox 	if (xas_error(xas))
185ad3d6c72SMatthew Wilcox 		return NULL;
186ad3d6c72SMatthew Wilcox 
187ad3d6c72SMatthew Wilcox 	entry = xa_head(xas->xa);
188ad3d6c72SMatthew Wilcox 	if (!xa_is_node(entry)) {
189ad3d6c72SMatthew Wilcox 		if (xas->xa_index)
190ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
191ad3d6c72SMatthew Wilcox 	} else {
192ad3d6c72SMatthew Wilcox 		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
193ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
194ad3d6c72SMatthew Wilcox 	}
195ad3d6c72SMatthew Wilcox 
196ad3d6c72SMatthew Wilcox 	xas->xa_node = NULL;
197ad3d6c72SMatthew Wilcox 	return entry;
198ad3d6c72SMatthew Wilcox }
199ad3d6c72SMatthew Wilcox 
200ad3d6c72SMatthew Wilcox static void *xas_descend(struct xa_state *xas, struct xa_node *node)
201ad3d6c72SMatthew Wilcox {
202ad3d6c72SMatthew Wilcox 	unsigned int offset = get_offset(xas->xa_index, node);
203ad3d6c72SMatthew Wilcox 	void *entry = xa_entry(xas->xa, node, offset);
204ad3d6c72SMatthew Wilcox 
205ad3d6c72SMatthew Wilcox 	xas->xa_node = node;
206ad3d6c72SMatthew Wilcox 	if (xa_is_sibling(entry)) {
207ad3d6c72SMatthew Wilcox 		offset = xa_to_sibling(entry);
208ad3d6c72SMatthew Wilcox 		entry = xa_entry(xas->xa, node, offset);
209ad3d6c72SMatthew Wilcox 	}
210ad3d6c72SMatthew Wilcox 
211ad3d6c72SMatthew Wilcox 	xas->xa_offset = offset;
212ad3d6c72SMatthew Wilcox 	return entry;
213ad3d6c72SMatthew Wilcox }
214ad3d6c72SMatthew Wilcox 
215ad3d6c72SMatthew Wilcox /**
216ad3d6c72SMatthew Wilcox  * xas_load() - Load an entry from the XArray (advanced).
217ad3d6c72SMatthew Wilcox  * @xas: XArray operation state.
218ad3d6c72SMatthew Wilcox  *
219ad3d6c72SMatthew Wilcox  * Usually walks the @xas to the appropriate state to load the entry
220ad3d6c72SMatthew Wilcox  * stored at xa_index.  However, it will do nothing and return %NULL if
221ad3d6c72SMatthew Wilcox  * @xas is in an error state.  xas_load() will never expand the tree.
222ad3d6c72SMatthew Wilcox  *
223ad3d6c72SMatthew Wilcox  * If the xa_state is set up to operate on a multi-index entry, xas_load()
224ad3d6c72SMatthew Wilcox  * may return %NULL or an internal entry, even if there are entries
225ad3d6c72SMatthew Wilcox  * present within the range specified by @xas.
226ad3d6c72SMatthew Wilcox  *
227ad3d6c72SMatthew Wilcox  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
228ad3d6c72SMatthew Wilcox  * Return: Usually an entry in the XArray, but see description for exceptions.
229ad3d6c72SMatthew Wilcox  */
230ad3d6c72SMatthew Wilcox void *xas_load(struct xa_state *xas)
231ad3d6c72SMatthew Wilcox {
232ad3d6c72SMatthew Wilcox 	void *entry = xas_start(xas);
233ad3d6c72SMatthew Wilcox 
234ad3d6c72SMatthew Wilcox 	while (xa_is_node(entry)) {
235ad3d6c72SMatthew Wilcox 		struct xa_node *node = xa_to_node(entry);
236ad3d6c72SMatthew Wilcox 
237ad3d6c72SMatthew Wilcox 		if (xas->xa_shift > node->shift)
238ad3d6c72SMatthew Wilcox 			break;
239ad3d6c72SMatthew Wilcox 		entry = xas_descend(xas, node);
24076b4e529SMatthew Wilcox 		if (node->shift == 0)
24176b4e529SMatthew Wilcox 			break;
242ad3d6c72SMatthew Wilcox 	}
243ad3d6c72SMatthew Wilcox 	return entry;
244ad3d6c72SMatthew Wilcox }
245ad3d6c72SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_load);
246ad3d6c72SMatthew Wilcox 
24758d6ea30SMatthew Wilcox /* Move the radix tree node cache here */
24858d6ea30SMatthew Wilcox extern struct kmem_cache *radix_tree_node_cachep;
24958d6ea30SMatthew Wilcox extern void radix_tree_node_rcu_free(struct rcu_head *head);
25058d6ea30SMatthew Wilcox 
25158d6ea30SMatthew Wilcox #define XA_RCU_FREE	((struct xarray *)1)
25258d6ea30SMatthew Wilcox 
25358d6ea30SMatthew Wilcox static void xa_node_free(struct xa_node *node)
25458d6ea30SMatthew Wilcox {
25558d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
25658d6ea30SMatthew Wilcox 	node->array = XA_RCU_FREE;
25758d6ea30SMatthew Wilcox 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
25858d6ea30SMatthew Wilcox }
25958d6ea30SMatthew Wilcox 
26058d6ea30SMatthew Wilcox /*
26158d6ea30SMatthew Wilcox  * xas_destroy() - Free any resources allocated during the XArray operation.
26258d6ea30SMatthew Wilcox  * @xas: XArray operation state.
26358d6ea30SMatthew Wilcox  *
26458d6ea30SMatthew Wilcox  * This function is now internal-only.
26558d6ea30SMatthew Wilcox  */
26658d6ea30SMatthew Wilcox static void xas_destroy(struct xa_state *xas)
26758d6ea30SMatthew Wilcox {
26858d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_alloc;
26958d6ea30SMatthew Wilcox 
27058d6ea30SMatthew Wilcox 	if (!node)
27158d6ea30SMatthew Wilcox 		return;
27258d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
27358d6ea30SMatthew Wilcox 	kmem_cache_free(radix_tree_node_cachep, node);
27458d6ea30SMatthew Wilcox 	xas->xa_alloc = NULL;
27558d6ea30SMatthew Wilcox }
27658d6ea30SMatthew Wilcox 
27758d6ea30SMatthew Wilcox /**
27858d6ea30SMatthew Wilcox  * xas_nomem() - Allocate memory if needed.
27958d6ea30SMatthew Wilcox  * @xas: XArray operation state.
28058d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
28158d6ea30SMatthew Wilcox  *
28258d6ea30SMatthew Wilcox  * If we need to add new nodes to the XArray, we try to allocate memory
28358d6ea30SMatthew Wilcox  * with GFP_NOWAIT while holding the lock, which will usually succeed.
28458d6ea30SMatthew Wilcox  * If it fails, @xas is flagged as needing memory to continue.  The caller
28558d6ea30SMatthew Wilcox  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
28658d6ea30SMatthew Wilcox  * the caller should retry the operation.
28758d6ea30SMatthew Wilcox  *
28858d6ea30SMatthew Wilcox  * Forward progress is guaranteed as one node is allocated here and
28958d6ea30SMatthew Wilcox  * stored in the xa_state where it will be found by xas_alloc().  More
29058d6ea30SMatthew Wilcox  * nodes will likely be found in the slab allocator, but we do not tie
29158d6ea30SMatthew Wilcox  * them up here.
29258d6ea30SMatthew Wilcox  *
29358d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
29458d6ea30SMatthew Wilcox  */
29558d6ea30SMatthew Wilcox bool xas_nomem(struct xa_state *xas, gfp_t gfp)
29658d6ea30SMatthew Wilcox {
29758d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
29858d6ea30SMatthew Wilcox 		xas_destroy(xas);
29958d6ea30SMatthew Wilcox 		return false;
30058d6ea30SMatthew Wilcox 	}
30158d6ea30SMatthew Wilcox 	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
30258d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
30358d6ea30SMatthew Wilcox 		return false;
30458d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
30558d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
30658d6ea30SMatthew Wilcox 	return true;
30758d6ea30SMatthew Wilcox }
30858d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_nomem);
30958d6ea30SMatthew Wilcox 
31058d6ea30SMatthew Wilcox /*
31158d6ea30SMatthew Wilcox  * __xas_nomem() - Drop locks and allocate memory if needed.
31258d6ea30SMatthew Wilcox  * @xas: XArray operation state.
31358d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
31458d6ea30SMatthew Wilcox  *
31558d6ea30SMatthew Wilcox  * Internal variant of xas_nomem().
31658d6ea30SMatthew Wilcox  *
31758d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
31858d6ea30SMatthew Wilcox  */
31958d6ea30SMatthew Wilcox static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
32058d6ea30SMatthew Wilcox 	__must_hold(xas->xa->xa_lock)
32158d6ea30SMatthew Wilcox {
32258d6ea30SMatthew Wilcox 	unsigned int lock_type = xa_lock_type(xas->xa);
32358d6ea30SMatthew Wilcox 
32458d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
32558d6ea30SMatthew Wilcox 		xas_destroy(xas);
32658d6ea30SMatthew Wilcox 		return false;
32758d6ea30SMatthew Wilcox 	}
32858d6ea30SMatthew Wilcox 	if (gfpflags_allow_blocking(gfp)) {
32958d6ea30SMatthew Wilcox 		xas_unlock_type(xas, lock_type);
33058d6ea30SMatthew Wilcox 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
33158d6ea30SMatthew Wilcox 		xas_lock_type(xas, lock_type);
33258d6ea30SMatthew Wilcox 	} else {
33358d6ea30SMatthew Wilcox 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
33458d6ea30SMatthew Wilcox 	}
33558d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
33658d6ea30SMatthew Wilcox 		return false;
33758d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
33858d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
33958d6ea30SMatthew Wilcox 	return true;
34058d6ea30SMatthew Wilcox }
34158d6ea30SMatthew Wilcox 
34258d6ea30SMatthew Wilcox static void xas_update(struct xa_state *xas, struct xa_node *node)
34358d6ea30SMatthew Wilcox {
34458d6ea30SMatthew Wilcox 	if (xas->xa_update)
34558d6ea30SMatthew Wilcox 		xas->xa_update(node);
34658d6ea30SMatthew Wilcox 	else
34758d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
34858d6ea30SMatthew Wilcox }
34958d6ea30SMatthew Wilcox 
35058d6ea30SMatthew Wilcox static void *xas_alloc(struct xa_state *xas, unsigned int shift)
35158d6ea30SMatthew Wilcox {
35258d6ea30SMatthew Wilcox 	struct xa_node *parent = xas->xa_node;
35358d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_alloc;
35458d6ea30SMatthew Wilcox 
35558d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
35658d6ea30SMatthew Wilcox 		return NULL;
35758d6ea30SMatthew Wilcox 
35858d6ea30SMatthew Wilcox 	if (node) {
35958d6ea30SMatthew Wilcox 		xas->xa_alloc = NULL;
36058d6ea30SMatthew Wilcox 	} else {
36158d6ea30SMatthew Wilcox 		node = kmem_cache_alloc(radix_tree_node_cachep,
36258d6ea30SMatthew Wilcox 					GFP_NOWAIT | __GFP_NOWARN);
36358d6ea30SMatthew Wilcox 		if (!node) {
36458d6ea30SMatthew Wilcox 			xas_set_err(xas, -ENOMEM);
36558d6ea30SMatthew Wilcox 			return NULL;
36658d6ea30SMatthew Wilcox 		}
36758d6ea30SMatthew Wilcox 	}
36858d6ea30SMatthew Wilcox 
36958d6ea30SMatthew Wilcox 	if (parent) {
37058d6ea30SMatthew Wilcox 		node->offset = xas->xa_offset;
37158d6ea30SMatthew Wilcox 		parent->count++;
37258d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
37358d6ea30SMatthew Wilcox 		xas_update(xas, parent);
37458d6ea30SMatthew Wilcox 	}
37558d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
37658d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
37758d6ea30SMatthew Wilcox 	node->shift = shift;
37858d6ea30SMatthew Wilcox 	node->count = 0;
37958d6ea30SMatthew Wilcox 	node->nr_values = 0;
38058d6ea30SMatthew Wilcox 	RCU_INIT_POINTER(node->parent, xas->xa_node);
38158d6ea30SMatthew Wilcox 	node->array = xas->xa;
38258d6ea30SMatthew Wilcox 
38358d6ea30SMatthew Wilcox 	return node;
38458d6ea30SMatthew Wilcox }
38558d6ea30SMatthew Wilcox 
3860e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
3870e9446c3SMatthew Wilcox /* Returns the number of indices covered by a given xa_state */
3880e9446c3SMatthew Wilcox static unsigned long xas_size(const struct xa_state *xas)
3890e9446c3SMatthew Wilcox {
3900e9446c3SMatthew Wilcox 	return (xas->xa_sibs + 1UL) << xas->xa_shift;
3910e9446c3SMatthew Wilcox }
3920e9446c3SMatthew Wilcox #endif
3930e9446c3SMatthew Wilcox 
39458d6ea30SMatthew Wilcox /*
39558d6ea30SMatthew Wilcox  * Use this to calculate the maximum index that will need to be created
39658d6ea30SMatthew Wilcox  * in order to add the entry described by @xas.  Because we cannot store a
39758d6ea30SMatthew Wilcox  * multiple-index entry at index 0, the calculation is a little more complex
39858d6ea30SMatthew Wilcox  * than you might expect.
39958d6ea30SMatthew Wilcox  */
40058d6ea30SMatthew Wilcox static unsigned long xas_max(struct xa_state *xas)
40158d6ea30SMatthew Wilcox {
40258d6ea30SMatthew Wilcox 	unsigned long max = xas->xa_index;
40358d6ea30SMatthew Wilcox 
40458d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
40558d6ea30SMatthew Wilcox 	if (xas->xa_shift || xas->xa_sibs) {
4060e9446c3SMatthew Wilcox 		unsigned long mask = xas_size(xas) - 1;
40758d6ea30SMatthew Wilcox 		max |= mask;
40858d6ea30SMatthew Wilcox 		if (mask == max)
40958d6ea30SMatthew Wilcox 			max++;
41058d6ea30SMatthew Wilcox 	}
41158d6ea30SMatthew Wilcox #endif
41258d6ea30SMatthew Wilcox 
41358d6ea30SMatthew Wilcox 	return max;
41458d6ea30SMatthew Wilcox }
41558d6ea30SMatthew Wilcox 
41658d6ea30SMatthew Wilcox /* The maximum index that can be contained in the array without expanding it */
41758d6ea30SMatthew Wilcox static unsigned long max_index(void *entry)
41858d6ea30SMatthew Wilcox {
41958d6ea30SMatthew Wilcox 	if (!xa_is_node(entry))
42058d6ea30SMatthew Wilcox 		return 0;
42158d6ea30SMatthew Wilcox 	return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
42258d6ea30SMatthew Wilcox }
42358d6ea30SMatthew Wilcox 
42458d6ea30SMatthew Wilcox static void xas_shrink(struct xa_state *xas)
42558d6ea30SMatthew Wilcox {
42658d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
42758d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
42858d6ea30SMatthew Wilcox 
42958d6ea30SMatthew Wilcox 	for (;;) {
43058d6ea30SMatthew Wilcox 		void *entry;
43158d6ea30SMatthew Wilcox 
43258d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
43358d6ea30SMatthew Wilcox 		if (node->count != 1)
43458d6ea30SMatthew Wilcox 			break;
43558d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, 0);
43658d6ea30SMatthew Wilcox 		if (!entry)
43758d6ea30SMatthew Wilcox 			break;
43858d6ea30SMatthew Wilcox 		if (!xa_is_node(entry) && node->shift)
43958d6ea30SMatthew Wilcox 			break;
4403ccaf57aSMatthew Wilcox 		if (xa_is_zero(entry) && xa_zero_busy(xa))
4413ccaf57aSMatthew Wilcox 			entry = NULL;
44258d6ea30SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
44358d6ea30SMatthew Wilcox 
44458d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(xa->xa_head, entry);
445371c752dSMatthew Wilcox 		if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
446371c752dSMatthew Wilcox 			xa_mark_clear(xa, XA_FREE_MARK);
44758d6ea30SMatthew Wilcox 
44858d6ea30SMatthew Wilcox 		node->count = 0;
44958d6ea30SMatthew Wilcox 		node->nr_values = 0;
45058d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
45158d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
45258d6ea30SMatthew Wilcox 		xas_update(xas, node);
45358d6ea30SMatthew Wilcox 		xa_node_free(node);
45458d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
45558d6ea30SMatthew Wilcox 			break;
45658d6ea30SMatthew Wilcox 		node = xa_to_node(entry);
45758d6ea30SMatthew Wilcox 		node->parent = NULL;
45858d6ea30SMatthew Wilcox 	}
45958d6ea30SMatthew Wilcox }
46058d6ea30SMatthew Wilcox 
46158d6ea30SMatthew Wilcox /*
46258d6ea30SMatthew Wilcox  * xas_delete_node() - Attempt to delete an xa_node
46358d6ea30SMatthew Wilcox  * @xas: Array operation state.
46458d6ea30SMatthew Wilcox  *
46558d6ea30SMatthew Wilcox  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
46658d6ea30SMatthew Wilcox  * a non-zero reference count.
46758d6ea30SMatthew Wilcox  */
46858d6ea30SMatthew Wilcox static void xas_delete_node(struct xa_state *xas)
46958d6ea30SMatthew Wilcox {
47058d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
47158d6ea30SMatthew Wilcox 
47258d6ea30SMatthew Wilcox 	for (;;) {
47358d6ea30SMatthew Wilcox 		struct xa_node *parent;
47458d6ea30SMatthew Wilcox 
47558d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
47658d6ea30SMatthew Wilcox 		if (node->count)
47758d6ea30SMatthew Wilcox 			break;
47858d6ea30SMatthew Wilcox 
47958d6ea30SMatthew Wilcox 		parent = xa_parent_locked(xas->xa, node);
48058d6ea30SMatthew Wilcox 		xas->xa_node = parent;
48158d6ea30SMatthew Wilcox 		xas->xa_offset = node->offset;
48258d6ea30SMatthew Wilcox 		xa_node_free(node);
48358d6ea30SMatthew Wilcox 
48458d6ea30SMatthew Wilcox 		if (!parent) {
48558d6ea30SMatthew Wilcox 			xas->xa->xa_head = NULL;
48658d6ea30SMatthew Wilcox 			xas->xa_node = XAS_BOUNDS;
48758d6ea30SMatthew Wilcox 			return;
48858d6ea30SMatthew Wilcox 		}
48958d6ea30SMatthew Wilcox 
49058d6ea30SMatthew Wilcox 		parent->slots[xas->xa_offset] = NULL;
49158d6ea30SMatthew Wilcox 		parent->count--;
49258d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
49358d6ea30SMatthew Wilcox 		node = parent;
49458d6ea30SMatthew Wilcox 		xas_update(xas, node);
49558d6ea30SMatthew Wilcox 	}
49658d6ea30SMatthew Wilcox 
49758d6ea30SMatthew Wilcox 	if (!node->parent)
49858d6ea30SMatthew Wilcox 		xas_shrink(xas);
49958d6ea30SMatthew Wilcox }
50058d6ea30SMatthew Wilcox 
50158d6ea30SMatthew Wilcox /**
50258d6ea30SMatthew Wilcox  * xas_free_nodes() - Free this node and all nodes that it references
50358d6ea30SMatthew Wilcox  * @xas: Array operation state.
50458d6ea30SMatthew Wilcox  * @top: Node to free
50558d6ea30SMatthew Wilcox  *
50658d6ea30SMatthew Wilcox  * This node has been removed from the tree.  We must now free it and all
50758d6ea30SMatthew Wilcox  * of its subnodes.  There may be RCU walkers with references into the tree,
50858d6ea30SMatthew Wilcox  * so we must replace all entries with retry markers.
50958d6ea30SMatthew Wilcox  */
51058d6ea30SMatthew Wilcox static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
51158d6ea30SMatthew Wilcox {
51258d6ea30SMatthew Wilcox 	unsigned int offset = 0;
51358d6ea30SMatthew Wilcox 	struct xa_node *node = top;
51458d6ea30SMatthew Wilcox 
51558d6ea30SMatthew Wilcox 	for (;;) {
51658d6ea30SMatthew Wilcox 		void *entry = xa_entry_locked(xas->xa, node, offset);
51758d6ea30SMatthew Wilcox 
51876b4e529SMatthew Wilcox 		if (node->shift && xa_is_node(entry)) {
51958d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
52058d6ea30SMatthew Wilcox 			offset = 0;
52158d6ea30SMatthew Wilcox 			continue;
52258d6ea30SMatthew Wilcox 		}
52358d6ea30SMatthew Wilcox 		if (entry)
52458d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
52558d6ea30SMatthew Wilcox 		offset++;
52658d6ea30SMatthew Wilcox 		while (offset == XA_CHUNK_SIZE) {
52758d6ea30SMatthew Wilcox 			struct xa_node *parent;
52858d6ea30SMatthew Wilcox 
52958d6ea30SMatthew Wilcox 			parent = xa_parent_locked(xas->xa, node);
53058d6ea30SMatthew Wilcox 			offset = node->offset + 1;
53158d6ea30SMatthew Wilcox 			node->count = 0;
53258d6ea30SMatthew Wilcox 			node->nr_values = 0;
53358d6ea30SMatthew Wilcox 			xas_update(xas, node);
53458d6ea30SMatthew Wilcox 			xa_node_free(node);
53558d6ea30SMatthew Wilcox 			if (node == top)
53658d6ea30SMatthew Wilcox 				return;
53758d6ea30SMatthew Wilcox 			node = parent;
53858d6ea30SMatthew Wilcox 		}
53958d6ea30SMatthew Wilcox 	}
54058d6ea30SMatthew Wilcox }
54158d6ea30SMatthew Wilcox 
54258d6ea30SMatthew Wilcox /*
54358d6ea30SMatthew Wilcox  * xas_expand adds nodes to the head of the tree until it has reached
54458d6ea30SMatthew Wilcox  * sufficient height to be able to contain @xas->xa_index
54558d6ea30SMatthew Wilcox  */
54658d6ea30SMatthew Wilcox static int xas_expand(struct xa_state *xas, void *head)
54758d6ea30SMatthew Wilcox {
54858d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
54958d6ea30SMatthew Wilcox 	struct xa_node *node = NULL;
55058d6ea30SMatthew Wilcox 	unsigned int shift = 0;
55158d6ea30SMatthew Wilcox 	unsigned long max = xas_max(xas);
55258d6ea30SMatthew Wilcox 
55358d6ea30SMatthew Wilcox 	if (!head) {
55458d6ea30SMatthew Wilcox 		if (max == 0)
55558d6ea30SMatthew Wilcox 			return 0;
55658d6ea30SMatthew Wilcox 		while ((max >> shift) >= XA_CHUNK_SIZE)
55758d6ea30SMatthew Wilcox 			shift += XA_CHUNK_SHIFT;
55858d6ea30SMatthew Wilcox 		return shift + XA_CHUNK_SHIFT;
55958d6ea30SMatthew Wilcox 	} else if (xa_is_node(head)) {
56058d6ea30SMatthew Wilcox 		node = xa_to_node(head);
56158d6ea30SMatthew Wilcox 		shift = node->shift + XA_CHUNK_SHIFT;
56258d6ea30SMatthew Wilcox 	}
56358d6ea30SMatthew Wilcox 	xas->xa_node = NULL;
56458d6ea30SMatthew Wilcox 
56558d6ea30SMatthew Wilcox 	while (max > max_index(head)) {
56658d6ea30SMatthew Wilcox 		xa_mark_t mark = 0;
56758d6ea30SMatthew Wilcox 
56858d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
56958d6ea30SMatthew Wilcox 		node = xas_alloc(xas, shift);
57058d6ea30SMatthew Wilcox 		if (!node)
57158d6ea30SMatthew Wilcox 			return -ENOMEM;
57258d6ea30SMatthew Wilcox 
57358d6ea30SMatthew Wilcox 		node->count = 1;
57458d6ea30SMatthew Wilcox 		if (xa_is_value(head))
57558d6ea30SMatthew Wilcox 			node->nr_values = 1;
57658d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(node->slots[0], head);
57758d6ea30SMatthew Wilcox 
57858d6ea30SMatthew Wilcox 		/* Propagate the aggregated mark info to the new child */
57958d6ea30SMatthew Wilcox 		for (;;) {
580371c752dSMatthew Wilcox 			if (xa_track_free(xa) && mark == XA_FREE_MARK) {
581371c752dSMatthew Wilcox 				node_mark_all(node, XA_FREE_MARK);
582371c752dSMatthew Wilcox 				if (!xa_marked(xa, XA_FREE_MARK)) {
583371c752dSMatthew Wilcox 					node_clear_mark(node, 0, XA_FREE_MARK);
584371c752dSMatthew Wilcox 					xa_mark_set(xa, XA_FREE_MARK);
585371c752dSMatthew Wilcox 				}
586371c752dSMatthew Wilcox 			} else if (xa_marked(xa, mark)) {
58758d6ea30SMatthew Wilcox 				node_set_mark(node, 0, mark);
588371c752dSMatthew Wilcox 			}
58958d6ea30SMatthew Wilcox 			if (mark == XA_MARK_MAX)
59058d6ea30SMatthew Wilcox 				break;
59158d6ea30SMatthew Wilcox 			mark_inc(mark);
59258d6ea30SMatthew Wilcox 		}
59358d6ea30SMatthew Wilcox 
59458d6ea30SMatthew Wilcox 		/*
59558d6ea30SMatthew Wilcox 		 * Now that the new node is fully initialised, we can add
59658d6ea30SMatthew Wilcox 		 * it to the tree
59758d6ea30SMatthew Wilcox 		 */
59858d6ea30SMatthew Wilcox 		if (xa_is_node(head)) {
59958d6ea30SMatthew Wilcox 			xa_to_node(head)->offset = 0;
60058d6ea30SMatthew Wilcox 			rcu_assign_pointer(xa_to_node(head)->parent, node);
60158d6ea30SMatthew Wilcox 		}
60258d6ea30SMatthew Wilcox 		head = xa_mk_node(node);
60358d6ea30SMatthew Wilcox 		rcu_assign_pointer(xa->xa_head, head);
60458d6ea30SMatthew Wilcox 		xas_update(xas, node);
60558d6ea30SMatthew Wilcox 
60658d6ea30SMatthew Wilcox 		shift += XA_CHUNK_SHIFT;
60758d6ea30SMatthew Wilcox 	}
60858d6ea30SMatthew Wilcox 
60958d6ea30SMatthew Wilcox 	xas->xa_node = node;
61058d6ea30SMatthew Wilcox 	return shift;
61158d6ea30SMatthew Wilcox }
61258d6ea30SMatthew Wilcox 
61358d6ea30SMatthew Wilcox /*
61458d6ea30SMatthew Wilcox  * xas_create() - Create a slot to store an entry in.
61558d6ea30SMatthew Wilcox  * @xas: XArray operation state.
61676b4e529SMatthew Wilcox  * @allow_root: %true if we can store the entry in the root directly
61758d6ea30SMatthew Wilcox  *
61858d6ea30SMatthew Wilcox  * Most users will not need to call this function directly, as it is called
61958d6ea30SMatthew Wilcox  * by xas_store().  It is useful for doing conditional store operations
62058d6ea30SMatthew Wilcox  * (see the xa_cmpxchg() implementation for an example).
62158d6ea30SMatthew Wilcox  *
62258d6ea30SMatthew Wilcox  * Return: If the slot already existed, returns the contents of this slot.
623804dfaf0SMatthew Wilcox  * If the slot was newly created, returns %NULL.  If it failed to create the
624804dfaf0SMatthew Wilcox  * slot, returns %NULL and indicates the error in @xas.
62558d6ea30SMatthew Wilcox  */
62676b4e529SMatthew Wilcox static void *xas_create(struct xa_state *xas, bool allow_root)
62758d6ea30SMatthew Wilcox {
62858d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
62958d6ea30SMatthew Wilcox 	void *entry;
63058d6ea30SMatthew Wilcox 	void __rcu **slot;
63158d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
63258d6ea30SMatthew Wilcox 	int shift;
63358d6ea30SMatthew Wilcox 	unsigned int order = xas->xa_shift;
63458d6ea30SMatthew Wilcox 
63558d6ea30SMatthew Wilcox 	if (xas_top(node)) {
63658d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
63758d6ea30SMatthew Wilcox 		xas->xa_node = NULL;
6383ccaf57aSMatthew Wilcox 		if (!entry && xa_zero_busy(xa))
6393ccaf57aSMatthew Wilcox 			entry = XA_ZERO_ENTRY;
64058d6ea30SMatthew Wilcox 		shift = xas_expand(xas, entry);
64158d6ea30SMatthew Wilcox 		if (shift < 0)
64258d6ea30SMatthew Wilcox 			return NULL;
64376b4e529SMatthew Wilcox 		if (!shift && !allow_root)
64476b4e529SMatthew Wilcox 			shift = XA_CHUNK_SHIFT;
64558d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
64658d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
64758d6ea30SMatthew Wilcox 	} else if (xas_error(xas)) {
64858d6ea30SMatthew Wilcox 		return NULL;
64958d6ea30SMatthew Wilcox 	} else if (node) {
65058d6ea30SMatthew Wilcox 		unsigned int offset = xas->xa_offset;
65158d6ea30SMatthew Wilcox 
65258d6ea30SMatthew Wilcox 		shift = node->shift;
65358d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, offset);
65458d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
65558d6ea30SMatthew Wilcox 	} else {
65658d6ea30SMatthew Wilcox 		shift = 0;
65758d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
65858d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
65958d6ea30SMatthew Wilcox 	}
66058d6ea30SMatthew Wilcox 
66158d6ea30SMatthew Wilcox 	while (shift > order) {
66258d6ea30SMatthew Wilcox 		shift -= XA_CHUNK_SHIFT;
66358d6ea30SMatthew Wilcox 		if (!entry) {
66458d6ea30SMatthew Wilcox 			node = xas_alloc(xas, shift);
66558d6ea30SMatthew Wilcox 			if (!node)
66658d6ea30SMatthew Wilcox 				break;
667371c752dSMatthew Wilcox 			if (xa_track_free(xa))
668371c752dSMatthew Wilcox 				node_mark_all(node, XA_FREE_MARK);
66958d6ea30SMatthew Wilcox 			rcu_assign_pointer(*slot, xa_mk_node(node));
67058d6ea30SMatthew Wilcox 		} else if (xa_is_node(entry)) {
67158d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
67258d6ea30SMatthew Wilcox 		} else {
67358d6ea30SMatthew Wilcox 			break;
67458d6ea30SMatthew Wilcox 		}
67558d6ea30SMatthew Wilcox 		entry = xas_descend(xas, node);
67658d6ea30SMatthew Wilcox 		slot = &node->slots[xas->xa_offset];
67758d6ea30SMatthew Wilcox 	}
67858d6ea30SMatthew Wilcox 
67958d6ea30SMatthew Wilcox 	return entry;
68058d6ea30SMatthew Wilcox }
68158d6ea30SMatthew Wilcox 
6822264f513SMatthew Wilcox /**
6832264f513SMatthew Wilcox  * xas_create_range() - Ensure that stores to this range will succeed
6842264f513SMatthew Wilcox  * @xas: XArray operation state.
6852264f513SMatthew Wilcox  *
6862264f513SMatthew Wilcox  * Creates all of the slots in the range covered by @xas.  Sets @xas to
6872264f513SMatthew Wilcox  * create single-index entries and positions it at the beginning of the
6882264f513SMatthew Wilcox  * range.  This is for the benefit of users which have not yet been
6892264f513SMatthew Wilcox  * converted to use multi-index entries.
6902264f513SMatthew Wilcox  */
6912264f513SMatthew Wilcox void xas_create_range(struct xa_state *xas)
6922264f513SMatthew Wilcox {
6932264f513SMatthew Wilcox 	unsigned long index = xas->xa_index;
6942264f513SMatthew Wilcox 	unsigned char shift = xas->xa_shift;
6952264f513SMatthew Wilcox 	unsigned char sibs = xas->xa_sibs;
6962264f513SMatthew Wilcox 
6972264f513SMatthew Wilcox 	xas->xa_index |= ((sibs + 1) << shift) - 1;
6982264f513SMatthew Wilcox 	if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
6992264f513SMatthew Wilcox 		xas->xa_offset |= sibs;
7002264f513SMatthew Wilcox 	xas->xa_shift = 0;
7012264f513SMatthew Wilcox 	xas->xa_sibs = 0;
7022264f513SMatthew Wilcox 
7032264f513SMatthew Wilcox 	for (;;) {
70476b4e529SMatthew Wilcox 		xas_create(xas, true);
7052264f513SMatthew Wilcox 		if (xas_error(xas))
7062264f513SMatthew Wilcox 			goto restore;
7072264f513SMatthew Wilcox 		if (xas->xa_index <= (index | XA_CHUNK_MASK))
7082264f513SMatthew Wilcox 			goto success;
7092264f513SMatthew Wilcox 		xas->xa_index -= XA_CHUNK_SIZE;
7102264f513SMatthew Wilcox 
7112264f513SMatthew Wilcox 		for (;;) {
7122264f513SMatthew Wilcox 			struct xa_node *node = xas->xa_node;
7132264f513SMatthew Wilcox 			xas->xa_node = xa_parent_locked(xas->xa, node);
7142264f513SMatthew Wilcox 			xas->xa_offset = node->offset - 1;
7152264f513SMatthew Wilcox 			if (node->offset != 0)
7162264f513SMatthew Wilcox 				break;
7172264f513SMatthew Wilcox 		}
7182264f513SMatthew Wilcox 	}
7192264f513SMatthew Wilcox 
7202264f513SMatthew Wilcox restore:
7212264f513SMatthew Wilcox 	xas->xa_shift = shift;
7222264f513SMatthew Wilcox 	xas->xa_sibs = sibs;
7232264f513SMatthew Wilcox 	xas->xa_index = index;
7242264f513SMatthew Wilcox 	return;
7252264f513SMatthew Wilcox success:
7262264f513SMatthew Wilcox 	xas->xa_index = index;
7272264f513SMatthew Wilcox 	if (xas->xa_node)
7282264f513SMatthew Wilcox 		xas_set_offset(xas);
7292264f513SMatthew Wilcox }
7302264f513SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_create_range);
7312264f513SMatthew Wilcox 
73258d6ea30SMatthew Wilcox static void update_node(struct xa_state *xas, struct xa_node *node,
73358d6ea30SMatthew Wilcox 		int count, int values)
73458d6ea30SMatthew Wilcox {
73558d6ea30SMatthew Wilcox 	if (!node || (!count && !values))
73658d6ea30SMatthew Wilcox 		return;
73758d6ea30SMatthew Wilcox 
73858d6ea30SMatthew Wilcox 	node->count += count;
73958d6ea30SMatthew Wilcox 	node->nr_values += values;
74058d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
74158d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
74258d6ea30SMatthew Wilcox 	xas_update(xas, node);
74358d6ea30SMatthew Wilcox 	if (count < 0)
74458d6ea30SMatthew Wilcox 		xas_delete_node(xas);
74558d6ea30SMatthew Wilcox }
74658d6ea30SMatthew Wilcox 
74758d6ea30SMatthew Wilcox /**
74858d6ea30SMatthew Wilcox  * xas_store() - Store this entry in the XArray.
74958d6ea30SMatthew Wilcox  * @xas: XArray operation state.
75058d6ea30SMatthew Wilcox  * @entry: New entry.
75158d6ea30SMatthew Wilcox  *
75258d6ea30SMatthew Wilcox  * If @xas is operating on a multi-index entry, the entry returned by this
75358d6ea30SMatthew Wilcox  * function is essentially meaningless (it may be an internal entry or it
75458d6ea30SMatthew Wilcox  * may be %NULL, even if there are non-NULL entries at some of the indices
75558d6ea30SMatthew Wilcox  * covered by the range).  This is not a problem for any current users,
75658d6ea30SMatthew Wilcox  * and can be changed if needed.
75758d6ea30SMatthew Wilcox  *
75858d6ea30SMatthew Wilcox  * Return: The old entry at this index.
75958d6ea30SMatthew Wilcox  */
76058d6ea30SMatthew Wilcox void *xas_store(struct xa_state *xas, void *entry)
76158d6ea30SMatthew Wilcox {
76258d6ea30SMatthew Wilcox 	struct xa_node *node;
76358d6ea30SMatthew Wilcox 	void __rcu **slot = &xas->xa->xa_head;
76458d6ea30SMatthew Wilcox 	unsigned int offset, max;
76558d6ea30SMatthew Wilcox 	int count = 0;
76658d6ea30SMatthew Wilcox 	int values = 0;
76758d6ea30SMatthew Wilcox 	void *first, *next;
76858d6ea30SMatthew Wilcox 	bool value = xa_is_value(entry);
76958d6ea30SMatthew Wilcox 
77058d6ea30SMatthew Wilcox 	if (entry)
77176b4e529SMatthew Wilcox 		first = xas_create(xas, !xa_is_node(entry));
77258d6ea30SMatthew Wilcox 	else
77358d6ea30SMatthew Wilcox 		first = xas_load(xas);
77458d6ea30SMatthew Wilcox 
77558d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
77658d6ea30SMatthew Wilcox 		return first;
77758d6ea30SMatthew Wilcox 	node = xas->xa_node;
77858d6ea30SMatthew Wilcox 	if (node && (xas->xa_shift < node->shift))
77958d6ea30SMatthew Wilcox 		xas->xa_sibs = 0;
78058d6ea30SMatthew Wilcox 	if ((first == entry) && !xas->xa_sibs)
78158d6ea30SMatthew Wilcox 		return first;
78258d6ea30SMatthew Wilcox 
78358d6ea30SMatthew Wilcox 	next = first;
78458d6ea30SMatthew Wilcox 	offset = xas->xa_offset;
78558d6ea30SMatthew Wilcox 	max = xas->xa_offset + xas->xa_sibs;
78658d6ea30SMatthew Wilcox 	if (node) {
78758d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
78858d6ea30SMatthew Wilcox 		if (xas->xa_sibs)
78958d6ea30SMatthew Wilcox 			xas_squash_marks(xas);
79058d6ea30SMatthew Wilcox 	}
79158d6ea30SMatthew Wilcox 	if (!entry)
79258d6ea30SMatthew Wilcox 		xas_init_marks(xas);
79358d6ea30SMatthew Wilcox 
79458d6ea30SMatthew Wilcox 	for (;;) {
79558d6ea30SMatthew Wilcox 		/*
79658d6ea30SMatthew Wilcox 		 * Must clear the marks before setting the entry to NULL,
79758d6ea30SMatthew Wilcox 		 * otherwise xas_for_each_marked may find a NULL entry and
79858d6ea30SMatthew Wilcox 		 * stop early.  rcu_assign_pointer contains a release barrier
79958d6ea30SMatthew Wilcox 		 * so the mark clearing will appear to happen before the
80058d6ea30SMatthew Wilcox 		 * entry is set to NULL.
80158d6ea30SMatthew Wilcox 		 */
80258d6ea30SMatthew Wilcox 		rcu_assign_pointer(*slot, entry);
80358d6ea30SMatthew Wilcox 		if (xa_is_node(next))
80458d6ea30SMatthew Wilcox 			xas_free_nodes(xas, xa_to_node(next));
80558d6ea30SMatthew Wilcox 		if (!node)
80658d6ea30SMatthew Wilcox 			break;
80758d6ea30SMatthew Wilcox 		count += !next - !entry;
80858d6ea30SMatthew Wilcox 		values += !xa_is_value(first) - !value;
80958d6ea30SMatthew Wilcox 		if (entry) {
81058d6ea30SMatthew Wilcox 			if (offset == max)
81158d6ea30SMatthew Wilcox 				break;
81258d6ea30SMatthew Wilcox 			if (!xa_is_sibling(entry))
81358d6ea30SMatthew Wilcox 				entry = xa_mk_sibling(xas->xa_offset);
81458d6ea30SMatthew Wilcox 		} else {
81558d6ea30SMatthew Wilcox 			if (offset == XA_CHUNK_MASK)
81658d6ea30SMatthew Wilcox 				break;
81758d6ea30SMatthew Wilcox 		}
81858d6ea30SMatthew Wilcox 		next = xa_entry_locked(xas->xa, node, ++offset);
81958d6ea30SMatthew Wilcox 		if (!xa_is_sibling(next)) {
82058d6ea30SMatthew Wilcox 			if (!entry && (offset > max))
82158d6ea30SMatthew Wilcox 				break;
82258d6ea30SMatthew Wilcox 			first = next;
82358d6ea30SMatthew Wilcox 		}
82458d6ea30SMatthew Wilcox 		slot++;
82558d6ea30SMatthew Wilcox 	}
82658d6ea30SMatthew Wilcox 
82758d6ea30SMatthew Wilcox 	update_node(xas, node, count, values);
82858d6ea30SMatthew Wilcox 	return first;
82958d6ea30SMatthew Wilcox }
83058d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_store);
83158d6ea30SMatthew Wilcox 
832f8d5d0ccSMatthew Wilcox /**
8339b89a035SMatthew Wilcox  * xas_get_mark() - Returns the state of this mark.
8349b89a035SMatthew Wilcox  * @xas: XArray operation state.
8359b89a035SMatthew Wilcox  * @mark: Mark number.
8369b89a035SMatthew Wilcox  *
8379b89a035SMatthew Wilcox  * Return: true if the mark is set, false if the mark is clear or @xas
8389b89a035SMatthew Wilcox  * is in an error state.
8399b89a035SMatthew Wilcox  */
8409b89a035SMatthew Wilcox bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
8419b89a035SMatthew Wilcox {
8429b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8439b89a035SMatthew Wilcox 		return false;
8449b89a035SMatthew Wilcox 	if (!xas->xa_node)
8459b89a035SMatthew Wilcox 		return xa_marked(xas->xa, mark);
8469b89a035SMatthew Wilcox 	return node_get_mark(xas->xa_node, xas->xa_offset, mark);
8479b89a035SMatthew Wilcox }
8489b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_get_mark);
8499b89a035SMatthew Wilcox 
8509b89a035SMatthew Wilcox /**
8519b89a035SMatthew Wilcox  * xas_set_mark() - Sets the mark on this entry and its parents.
8529b89a035SMatthew Wilcox  * @xas: XArray operation state.
8539b89a035SMatthew Wilcox  * @mark: Mark number.
8549b89a035SMatthew Wilcox  *
8559b89a035SMatthew Wilcox  * Sets the specified mark on this entry, and walks up the tree setting it
8569b89a035SMatthew Wilcox  * on all the ancestor entries.  Does nothing if @xas has not been walked to
8579b89a035SMatthew Wilcox  * an entry, or is in an error state.
8589b89a035SMatthew Wilcox  */
8599b89a035SMatthew Wilcox void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
8609b89a035SMatthew Wilcox {
8619b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
8629b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
8639b89a035SMatthew Wilcox 
8649b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8659b89a035SMatthew Wilcox 		return;
8669b89a035SMatthew Wilcox 
8679b89a035SMatthew Wilcox 	while (node) {
8689b89a035SMatthew Wilcox 		if (node_set_mark(node, offset, mark))
8699b89a035SMatthew Wilcox 			return;
8709b89a035SMatthew Wilcox 		offset = node->offset;
8719b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
8729b89a035SMatthew Wilcox 	}
8739b89a035SMatthew Wilcox 
8749b89a035SMatthew Wilcox 	if (!xa_marked(xas->xa, mark))
8759b89a035SMatthew Wilcox 		xa_mark_set(xas->xa, mark);
8769b89a035SMatthew Wilcox }
8779b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_set_mark);
8789b89a035SMatthew Wilcox 
8799b89a035SMatthew Wilcox /**
8809b89a035SMatthew Wilcox  * xas_clear_mark() - Clears the mark on this entry and its parents.
8819b89a035SMatthew Wilcox  * @xas: XArray operation state.
8829b89a035SMatthew Wilcox  * @mark: Mark number.
8839b89a035SMatthew Wilcox  *
8849b89a035SMatthew Wilcox  * Clears the specified mark on this entry, and walks back to the head
8859b89a035SMatthew Wilcox  * attempting to clear it on all the ancestor entries.  Does nothing if
8869b89a035SMatthew Wilcox  * @xas has not been walked to an entry, or is in an error state.
8879b89a035SMatthew Wilcox  */
8889b89a035SMatthew Wilcox void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
8899b89a035SMatthew Wilcox {
8909b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
8919b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
8929b89a035SMatthew Wilcox 
8939b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8949b89a035SMatthew Wilcox 		return;
8959b89a035SMatthew Wilcox 
8969b89a035SMatthew Wilcox 	while (node) {
8979b89a035SMatthew Wilcox 		if (!node_clear_mark(node, offset, mark))
8989b89a035SMatthew Wilcox 			return;
8999b89a035SMatthew Wilcox 		if (node_any_mark(node, mark))
9009b89a035SMatthew Wilcox 			return;
9019b89a035SMatthew Wilcox 
9029b89a035SMatthew Wilcox 		offset = node->offset;
9039b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
9049b89a035SMatthew Wilcox 	}
9059b89a035SMatthew Wilcox 
9069b89a035SMatthew Wilcox 	if (xa_marked(xas->xa, mark))
9079b89a035SMatthew Wilcox 		xa_mark_clear(xas->xa, mark);
9089b89a035SMatthew Wilcox }
9099b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_clear_mark);
9109b89a035SMatthew Wilcox 
9119b89a035SMatthew Wilcox /**
91258d6ea30SMatthew Wilcox  * xas_init_marks() - Initialise all marks for the entry
91358d6ea30SMatthew Wilcox  * @xas: Array operations state.
91458d6ea30SMatthew Wilcox  *
91558d6ea30SMatthew Wilcox  * Initialise all marks for the entry specified by @xas.  If we're tracking
91658d6ea30SMatthew Wilcox  * free entries with a mark, we need to set it on all entries.  All other
91758d6ea30SMatthew Wilcox  * marks are cleared.
91858d6ea30SMatthew Wilcox  *
91958d6ea30SMatthew Wilcox  * This implementation is not as efficient as it could be; we may walk
92058d6ea30SMatthew Wilcox  * up the tree multiple times.
92158d6ea30SMatthew Wilcox  */
92258d6ea30SMatthew Wilcox void xas_init_marks(const struct xa_state *xas)
92358d6ea30SMatthew Wilcox {
92458d6ea30SMatthew Wilcox 	xa_mark_t mark = 0;
92558d6ea30SMatthew Wilcox 
92658d6ea30SMatthew Wilcox 	for (;;) {
927371c752dSMatthew Wilcox 		if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
928371c752dSMatthew Wilcox 			xas_set_mark(xas, mark);
929371c752dSMatthew Wilcox 		else
93058d6ea30SMatthew Wilcox 			xas_clear_mark(xas, mark);
93158d6ea30SMatthew Wilcox 		if (mark == XA_MARK_MAX)
93258d6ea30SMatthew Wilcox 			break;
93358d6ea30SMatthew Wilcox 		mark_inc(mark);
93458d6ea30SMatthew Wilcox 	}
93558d6ea30SMatthew Wilcox }
93658d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_init_marks);
93758d6ea30SMatthew Wilcox 
93858d6ea30SMatthew Wilcox /**
939b803b428SMatthew Wilcox  * xas_pause() - Pause a walk to drop a lock.
940b803b428SMatthew Wilcox  * @xas: XArray operation state.
941b803b428SMatthew Wilcox  *
942b803b428SMatthew Wilcox  * Some users need to pause a walk and drop the lock they're holding in
943b803b428SMatthew Wilcox  * order to yield to a higher priority thread or carry out an operation
944b803b428SMatthew Wilcox  * on an entry.  Those users should call this function before they drop
945b803b428SMatthew Wilcox  * the lock.  It resets the @xas to be suitable for the next iteration
946b803b428SMatthew Wilcox  * of the loop after the user has reacquired the lock.  If most entries
947b803b428SMatthew Wilcox  * found during a walk require you to call xas_pause(), the xa_for_each()
948b803b428SMatthew Wilcox  * iterator may be more appropriate.
949b803b428SMatthew Wilcox  *
950b803b428SMatthew Wilcox  * Note that xas_pause() only works for forward iteration.  If a user needs
951b803b428SMatthew Wilcox  * to pause a reverse iteration, we will need a xas_pause_rev().
952b803b428SMatthew Wilcox  */
953b803b428SMatthew Wilcox void xas_pause(struct xa_state *xas)
954b803b428SMatthew Wilcox {
955b803b428SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
956b803b428SMatthew Wilcox 
957b803b428SMatthew Wilcox 	if (xas_invalid(xas))
958b803b428SMatthew Wilcox 		return;
959b803b428SMatthew Wilcox 
960b803b428SMatthew Wilcox 	if (node) {
961b803b428SMatthew Wilcox 		unsigned int offset = xas->xa_offset;
962b803b428SMatthew Wilcox 		while (++offset < XA_CHUNK_SIZE) {
963b803b428SMatthew Wilcox 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
964b803b428SMatthew Wilcox 				break;
965b803b428SMatthew Wilcox 		}
966b803b428SMatthew Wilcox 		xas->xa_index += (offset - xas->xa_offset) << node->shift;
967b803b428SMatthew Wilcox 	} else {
968b803b428SMatthew Wilcox 		xas->xa_index++;
969b803b428SMatthew Wilcox 	}
970b803b428SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
971b803b428SMatthew Wilcox }
972b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_pause);
973b803b428SMatthew Wilcox 
97464d3e9a9SMatthew Wilcox /*
97564d3e9a9SMatthew Wilcox  * __xas_prev() - Find the previous entry in the XArray.
97664d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
97764d3e9a9SMatthew Wilcox  *
97864d3e9a9SMatthew Wilcox  * Helper function for xas_prev() which handles all the complex cases
97964d3e9a9SMatthew Wilcox  * out of line.
98064d3e9a9SMatthew Wilcox  */
98164d3e9a9SMatthew Wilcox void *__xas_prev(struct xa_state *xas)
98264d3e9a9SMatthew Wilcox {
98364d3e9a9SMatthew Wilcox 	void *entry;
98464d3e9a9SMatthew Wilcox 
98564d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
98664d3e9a9SMatthew Wilcox 		xas->xa_index--;
98764d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
98864d3e9a9SMatthew Wilcox 		return xas_load(xas);
98964d3e9a9SMatthew Wilcox 
99064d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
99164d3e9a9SMatthew Wilcox 		xas->xa_offset--;
99264d3e9a9SMatthew Wilcox 
99364d3e9a9SMatthew Wilcox 	while (xas->xa_offset == 255) {
99464d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset - 1;
99564d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
99664d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
99764d3e9a9SMatthew Wilcox 			return set_bounds(xas);
99864d3e9a9SMatthew Wilcox 	}
99964d3e9a9SMatthew Wilcox 
100064d3e9a9SMatthew Wilcox 	for (;;) {
100164d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
100264d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
100364d3e9a9SMatthew Wilcox 			return entry;
100464d3e9a9SMatthew Wilcox 
100564d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
100664d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
100764d3e9a9SMatthew Wilcox 	}
100864d3e9a9SMatthew Wilcox }
100964d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_prev);
101064d3e9a9SMatthew Wilcox 
101164d3e9a9SMatthew Wilcox /*
101264d3e9a9SMatthew Wilcox  * __xas_next() - Find the next entry in the XArray.
101364d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
101464d3e9a9SMatthew Wilcox  *
101564d3e9a9SMatthew Wilcox  * Helper function for xas_next() which handles all the complex cases
101664d3e9a9SMatthew Wilcox  * out of line.
101764d3e9a9SMatthew Wilcox  */
101864d3e9a9SMatthew Wilcox void *__xas_next(struct xa_state *xas)
101964d3e9a9SMatthew Wilcox {
102064d3e9a9SMatthew Wilcox 	void *entry;
102164d3e9a9SMatthew Wilcox 
102264d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
102364d3e9a9SMatthew Wilcox 		xas->xa_index++;
102464d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
102564d3e9a9SMatthew Wilcox 		return xas_load(xas);
102664d3e9a9SMatthew Wilcox 
102764d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
102864d3e9a9SMatthew Wilcox 		xas->xa_offset++;
102964d3e9a9SMatthew Wilcox 
103064d3e9a9SMatthew Wilcox 	while (xas->xa_offset == XA_CHUNK_SIZE) {
103164d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset + 1;
103264d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
103364d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
103464d3e9a9SMatthew Wilcox 			return set_bounds(xas);
103564d3e9a9SMatthew Wilcox 	}
103664d3e9a9SMatthew Wilcox 
103764d3e9a9SMatthew Wilcox 	for (;;) {
103864d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
103964d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
104064d3e9a9SMatthew Wilcox 			return entry;
104164d3e9a9SMatthew Wilcox 
104264d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
104364d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
104464d3e9a9SMatthew Wilcox 	}
104564d3e9a9SMatthew Wilcox }
104664d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_next);
104764d3e9a9SMatthew Wilcox 
1048b803b428SMatthew Wilcox /**
1049b803b428SMatthew Wilcox  * xas_find() - Find the next present entry in the XArray.
1050b803b428SMatthew Wilcox  * @xas: XArray operation state.
1051b803b428SMatthew Wilcox  * @max: Highest index to return.
1052b803b428SMatthew Wilcox  *
1053b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the entry
1054b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
1055b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we move to the
1056b803b428SMatthew Wilcox  * next entry.
1057b803b428SMatthew Wilcox  *
1058b803b428SMatthew Wilcox  * If no entry is found and the array is smaller than @max, the iterator
1059b803b428SMatthew Wilcox  * is set to the smallest index not yet in the array.  This allows @xas
1060b803b428SMatthew Wilcox  * to be immediately passed to xas_store().
1061b803b428SMatthew Wilcox  *
1062b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1063b803b428SMatthew Wilcox  */
1064b803b428SMatthew Wilcox void *xas_find(struct xa_state *xas, unsigned long max)
1065b803b428SMatthew Wilcox {
1066b803b428SMatthew Wilcox 	void *entry;
1067b803b428SMatthew Wilcox 
1068b803b428SMatthew Wilcox 	if (xas_error(xas))
1069b803b428SMatthew Wilcox 		return NULL;
1070b803b428SMatthew Wilcox 
1071b803b428SMatthew Wilcox 	if (!xas->xa_node) {
1072b803b428SMatthew Wilcox 		xas->xa_index = 1;
1073b803b428SMatthew Wilcox 		return set_bounds(xas);
1074b803b428SMatthew Wilcox 	} else if (xas_top(xas->xa_node)) {
1075b803b428SMatthew Wilcox 		entry = xas_load(xas);
1076b803b428SMatthew Wilcox 		if (entry || xas_not_node(xas->xa_node))
1077b803b428SMatthew Wilcox 			return entry;
1078b803b428SMatthew Wilcox 	} else if (!xas->xa_node->shift &&
1079b803b428SMatthew Wilcox 		    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1080b803b428SMatthew Wilcox 		xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1081b803b428SMatthew Wilcox 	}
1082b803b428SMatthew Wilcox 
1083b803b428SMatthew Wilcox 	xas_advance(xas);
1084b803b428SMatthew Wilcox 
1085b803b428SMatthew Wilcox 	while (xas->xa_node && (xas->xa_index <= max)) {
1086b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1087b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
1088b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1089b803b428SMatthew Wilcox 			continue;
1090b803b428SMatthew Wilcox 		}
1091b803b428SMatthew Wilcox 
1092b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1093b803b428SMatthew Wilcox 		if (xa_is_node(entry)) {
1094b803b428SMatthew Wilcox 			xas->xa_node = xa_to_node(entry);
1095b803b428SMatthew Wilcox 			xas->xa_offset = 0;
1096b803b428SMatthew Wilcox 			continue;
1097b803b428SMatthew Wilcox 		}
1098b803b428SMatthew Wilcox 		if (entry && !xa_is_sibling(entry))
1099b803b428SMatthew Wilcox 			return entry;
1100b803b428SMatthew Wilcox 
1101b803b428SMatthew Wilcox 		xas_advance(xas);
1102b803b428SMatthew Wilcox 	}
1103b803b428SMatthew Wilcox 
1104b803b428SMatthew Wilcox 	if (!xas->xa_node)
1105b803b428SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
1106b803b428SMatthew Wilcox 	return NULL;
1107b803b428SMatthew Wilcox }
1108b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find);
1109b803b428SMatthew Wilcox 
1110b803b428SMatthew Wilcox /**
1111b803b428SMatthew Wilcox  * xas_find_marked() - Find the next marked entry in the XArray.
1112b803b428SMatthew Wilcox  * @xas: XArray operation state.
1113b803b428SMatthew Wilcox  * @max: Highest index to return.
1114b803b428SMatthew Wilcox  * @mark: Mark number to search for.
1115b803b428SMatthew Wilcox  *
1116b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the marked entry
1117b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
1118b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we return the
1119b803b428SMatthew Wilcox  * first marked entry with an index > xas.xa_index.
1120b803b428SMatthew Wilcox  *
1121b803b428SMatthew Wilcox  * If no marked entry is found and the array is smaller than @max, @xas is
1122b803b428SMatthew Wilcox  * set to the bounds state and xas->xa_index is set to the smallest index
1123b803b428SMatthew Wilcox  * not yet in the array.  This allows @xas to be immediately passed to
1124b803b428SMatthew Wilcox  * xas_store().
1125b803b428SMatthew Wilcox  *
1126b803b428SMatthew Wilcox  * If no entry is found before @max is reached, @xas is set to the restart
1127b803b428SMatthew Wilcox  * state.
1128b803b428SMatthew Wilcox  *
1129b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1130b803b428SMatthew Wilcox  */
1131b803b428SMatthew Wilcox void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1132b803b428SMatthew Wilcox {
1133b803b428SMatthew Wilcox 	bool advance = true;
1134b803b428SMatthew Wilcox 	unsigned int offset;
1135b803b428SMatthew Wilcox 	void *entry;
1136b803b428SMatthew Wilcox 
1137b803b428SMatthew Wilcox 	if (xas_error(xas))
1138b803b428SMatthew Wilcox 		return NULL;
1139b803b428SMatthew Wilcox 
1140b803b428SMatthew Wilcox 	if (!xas->xa_node) {
1141b803b428SMatthew Wilcox 		xas->xa_index = 1;
1142b803b428SMatthew Wilcox 		goto out;
1143b803b428SMatthew Wilcox 	} else if (xas_top(xas->xa_node)) {
1144b803b428SMatthew Wilcox 		advance = false;
1145b803b428SMatthew Wilcox 		entry = xa_head(xas->xa);
1146b803b428SMatthew Wilcox 		xas->xa_node = NULL;
1147b803b428SMatthew Wilcox 		if (xas->xa_index > max_index(entry))
114848483614SMatthew Wilcox 			goto out;
1149b803b428SMatthew Wilcox 		if (!xa_is_node(entry)) {
1150b803b428SMatthew Wilcox 			if (xa_marked(xas->xa, mark))
1151b803b428SMatthew Wilcox 				return entry;
1152b803b428SMatthew Wilcox 			xas->xa_index = 1;
1153b803b428SMatthew Wilcox 			goto out;
1154b803b428SMatthew Wilcox 		}
1155b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1156b803b428SMatthew Wilcox 		xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1157b803b428SMatthew Wilcox 	}
1158b803b428SMatthew Wilcox 
1159b803b428SMatthew Wilcox 	while (xas->xa_index <= max) {
1160b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1161b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
1162b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1163b803b428SMatthew Wilcox 			if (!xas->xa_node)
1164b803b428SMatthew Wilcox 				break;
1165b803b428SMatthew Wilcox 			advance = false;
1166b803b428SMatthew Wilcox 			continue;
1167b803b428SMatthew Wilcox 		}
1168b803b428SMatthew Wilcox 
1169b803b428SMatthew Wilcox 		if (!advance) {
1170b803b428SMatthew Wilcox 			entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1171b803b428SMatthew Wilcox 			if (xa_is_sibling(entry)) {
1172b803b428SMatthew Wilcox 				xas->xa_offset = xa_to_sibling(entry);
1173b803b428SMatthew Wilcox 				xas_move_index(xas, xas->xa_offset);
1174b803b428SMatthew Wilcox 			}
1175b803b428SMatthew Wilcox 		}
1176b803b428SMatthew Wilcox 
1177b803b428SMatthew Wilcox 		offset = xas_find_chunk(xas, advance, mark);
1178b803b428SMatthew Wilcox 		if (offset > xas->xa_offset) {
1179b803b428SMatthew Wilcox 			advance = false;
1180b803b428SMatthew Wilcox 			xas_move_index(xas, offset);
1181b803b428SMatthew Wilcox 			/* Mind the wrap */
1182b803b428SMatthew Wilcox 			if ((xas->xa_index - 1) >= max)
1183b803b428SMatthew Wilcox 				goto max;
1184b803b428SMatthew Wilcox 			xas->xa_offset = offset;
1185b803b428SMatthew Wilcox 			if (offset == XA_CHUNK_SIZE)
1186b803b428SMatthew Wilcox 				continue;
1187b803b428SMatthew Wilcox 		}
1188b803b428SMatthew Wilcox 
1189b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1190b803b428SMatthew Wilcox 		if (!xa_is_node(entry))
1191b803b428SMatthew Wilcox 			return entry;
1192b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1193b803b428SMatthew Wilcox 		xas_set_offset(xas);
1194b803b428SMatthew Wilcox 	}
1195b803b428SMatthew Wilcox 
1196b803b428SMatthew Wilcox out:
119748483614SMatthew Wilcox 	if (xas->xa_index > max)
1198b803b428SMatthew Wilcox 		goto max;
119948483614SMatthew Wilcox 	return set_bounds(xas);
1200b803b428SMatthew Wilcox max:
1201b803b428SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
1202b803b428SMatthew Wilcox 	return NULL;
1203b803b428SMatthew Wilcox }
1204b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_marked);
1205b803b428SMatthew Wilcox 
1206b803b428SMatthew Wilcox /**
12074e99d4e9SMatthew Wilcox  * xas_find_conflict() - Find the next present entry in a range.
12084e99d4e9SMatthew Wilcox  * @xas: XArray operation state.
12094e99d4e9SMatthew Wilcox  *
12104e99d4e9SMatthew Wilcox  * The @xas describes both a range and a position within that range.
12114e99d4e9SMatthew Wilcox  *
12124e99d4e9SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held.
12134e99d4e9SMatthew Wilcox  * Return: The next entry in the range covered by @xas or %NULL.
12144e99d4e9SMatthew Wilcox  */
12154e99d4e9SMatthew Wilcox void *xas_find_conflict(struct xa_state *xas)
12164e99d4e9SMatthew Wilcox {
12174e99d4e9SMatthew Wilcox 	void *curr;
12184e99d4e9SMatthew Wilcox 
12194e99d4e9SMatthew Wilcox 	if (xas_error(xas))
12204e99d4e9SMatthew Wilcox 		return NULL;
12214e99d4e9SMatthew Wilcox 
12224e99d4e9SMatthew Wilcox 	if (!xas->xa_node)
12234e99d4e9SMatthew Wilcox 		return NULL;
12244e99d4e9SMatthew Wilcox 
12254e99d4e9SMatthew Wilcox 	if (xas_top(xas->xa_node)) {
12264e99d4e9SMatthew Wilcox 		curr = xas_start(xas);
12274e99d4e9SMatthew Wilcox 		if (!curr)
12284e99d4e9SMatthew Wilcox 			return NULL;
12294e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
12304e99d4e9SMatthew Wilcox 			struct xa_node *node = xa_to_node(curr);
12314e99d4e9SMatthew Wilcox 			curr = xas_descend(xas, node);
12324e99d4e9SMatthew Wilcox 		}
12334e99d4e9SMatthew Wilcox 		if (curr)
12344e99d4e9SMatthew Wilcox 			return curr;
12354e99d4e9SMatthew Wilcox 	}
12364e99d4e9SMatthew Wilcox 
12374e99d4e9SMatthew Wilcox 	if (xas->xa_node->shift > xas->xa_shift)
12384e99d4e9SMatthew Wilcox 		return NULL;
12394e99d4e9SMatthew Wilcox 
12404e99d4e9SMatthew Wilcox 	for (;;) {
12414e99d4e9SMatthew Wilcox 		if (xas->xa_node->shift == xas->xa_shift) {
12424e99d4e9SMatthew Wilcox 			if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
12434e99d4e9SMatthew Wilcox 				break;
12444e99d4e9SMatthew Wilcox 		} else if (xas->xa_offset == XA_CHUNK_MASK) {
12454e99d4e9SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset;
12464e99d4e9SMatthew Wilcox 			xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
12474e99d4e9SMatthew Wilcox 			if (!xas->xa_node)
12484e99d4e9SMatthew Wilcox 				break;
12494e99d4e9SMatthew Wilcox 			continue;
12504e99d4e9SMatthew Wilcox 		}
12514e99d4e9SMatthew Wilcox 		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
12524e99d4e9SMatthew Wilcox 		if (xa_is_sibling(curr))
12534e99d4e9SMatthew Wilcox 			continue;
12544e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
12554e99d4e9SMatthew Wilcox 			xas->xa_node = xa_to_node(curr);
12564e99d4e9SMatthew Wilcox 			xas->xa_offset = 0;
12574e99d4e9SMatthew Wilcox 			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
12584e99d4e9SMatthew Wilcox 		}
12594e99d4e9SMatthew Wilcox 		if (curr)
12604e99d4e9SMatthew Wilcox 			return curr;
12614e99d4e9SMatthew Wilcox 	}
12624e99d4e9SMatthew Wilcox 	xas->xa_offset -= xas->xa_sibs;
12634e99d4e9SMatthew Wilcox 	return NULL;
12644e99d4e9SMatthew Wilcox }
12654e99d4e9SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_conflict);
12664e99d4e9SMatthew Wilcox 
12674e99d4e9SMatthew Wilcox /**
1268ad3d6c72SMatthew Wilcox  * xa_load() - Load an entry from an XArray.
1269ad3d6c72SMatthew Wilcox  * @xa: XArray.
1270ad3d6c72SMatthew Wilcox  * @index: index into array.
1271ad3d6c72SMatthew Wilcox  *
1272ad3d6c72SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1273ad3d6c72SMatthew Wilcox  * Return: The entry at @index in @xa.
1274ad3d6c72SMatthew Wilcox  */
1275ad3d6c72SMatthew Wilcox void *xa_load(struct xarray *xa, unsigned long index)
1276ad3d6c72SMatthew Wilcox {
1277ad3d6c72SMatthew Wilcox 	XA_STATE(xas, xa, index);
1278ad3d6c72SMatthew Wilcox 	void *entry;
1279ad3d6c72SMatthew Wilcox 
1280ad3d6c72SMatthew Wilcox 	rcu_read_lock();
1281ad3d6c72SMatthew Wilcox 	do {
1282ad3d6c72SMatthew Wilcox 		entry = xas_load(&xas);
12839f14d4f1SMatthew Wilcox 		if (xa_is_zero(entry))
12849f14d4f1SMatthew Wilcox 			entry = NULL;
1285ad3d6c72SMatthew Wilcox 	} while (xas_retry(&xas, entry));
1286ad3d6c72SMatthew Wilcox 	rcu_read_unlock();
1287ad3d6c72SMatthew Wilcox 
1288ad3d6c72SMatthew Wilcox 	return entry;
1289ad3d6c72SMatthew Wilcox }
1290ad3d6c72SMatthew Wilcox EXPORT_SYMBOL(xa_load);
1291ad3d6c72SMatthew Wilcox 
129258d6ea30SMatthew Wilcox static void *xas_result(struct xa_state *xas, void *curr)
129358d6ea30SMatthew Wilcox {
12949f14d4f1SMatthew Wilcox 	if (xa_is_zero(curr))
12959f14d4f1SMatthew Wilcox 		return NULL;
129658d6ea30SMatthew Wilcox 	if (xas_error(xas))
129758d6ea30SMatthew Wilcox 		curr = xas->xa_node;
129858d6ea30SMatthew Wilcox 	return curr;
129958d6ea30SMatthew Wilcox }
130058d6ea30SMatthew Wilcox 
130158d6ea30SMatthew Wilcox /**
130258d6ea30SMatthew Wilcox  * __xa_erase() - Erase this entry from the XArray while locked.
130358d6ea30SMatthew Wilcox  * @xa: XArray.
130458d6ea30SMatthew Wilcox  * @index: Index into array.
130558d6ea30SMatthew Wilcox  *
1306809ab937SMatthew Wilcox  * After this function returns, loading from @index will return %NULL.
1307809ab937SMatthew Wilcox  * If the index is part of a multi-index entry, all indices will be erased
1308809ab937SMatthew Wilcox  * and none of the entries will be part of a multi-index entry.
130958d6ea30SMatthew Wilcox  *
1310809ab937SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
1311809ab937SMatthew Wilcox  * Return: The entry which used to be at this index.
131258d6ea30SMatthew Wilcox  */
131358d6ea30SMatthew Wilcox void *__xa_erase(struct xarray *xa, unsigned long index)
131458d6ea30SMatthew Wilcox {
131558d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
131658d6ea30SMatthew Wilcox 	return xas_result(&xas, xas_store(&xas, NULL));
131758d6ea30SMatthew Wilcox }
13189ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_erase);
131958d6ea30SMatthew Wilcox 
132058d6ea30SMatthew Wilcox /**
13219c16bb88SMatthew Wilcox  * xa_erase() - Erase this entry from the XArray.
13229c16bb88SMatthew Wilcox  * @xa: XArray.
13239c16bb88SMatthew Wilcox  * @index: Index of entry.
13249c16bb88SMatthew Wilcox  *
1325809ab937SMatthew Wilcox  * After this function returns, loading from @index will return %NULL.
1326809ab937SMatthew Wilcox  * If the index is part of a multi-index entry, all indices will be erased
1327809ab937SMatthew Wilcox  * and none of the entries will be part of a multi-index entry.
13289c16bb88SMatthew Wilcox  *
13299c16bb88SMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock.
13309c16bb88SMatthew Wilcox  * Return: The entry which used to be at this index.
13319c16bb88SMatthew Wilcox  */
13329c16bb88SMatthew Wilcox void *xa_erase(struct xarray *xa, unsigned long index)
13339c16bb88SMatthew Wilcox {
13349c16bb88SMatthew Wilcox 	void *entry;
13359c16bb88SMatthew Wilcox 
13369c16bb88SMatthew Wilcox 	xa_lock(xa);
13379c16bb88SMatthew Wilcox 	entry = __xa_erase(xa, index);
13389c16bb88SMatthew Wilcox 	xa_unlock(xa);
13399c16bb88SMatthew Wilcox 
13409c16bb88SMatthew Wilcox 	return entry;
13419c16bb88SMatthew Wilcox }
13429c16bb88SMatthew Wilcox EXPORT_SYMBOL(xa_erase);
13439c16bb88SMatthew Wilcox 
13449c16bb88SMatthew Wilcox /**
134558d6ea30SMatthew Wilcox  * __xa_store() - Store this entry in the XArray.
134658d6ea30SMatthew Wilcox  * @xa: XArray.
134758d6ea30SMatthew Wilcox  * @index: Index into array.
134858d6ea30SMatthew Wilcox  * @entry: New entry.
134958d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
135058d6ea30SMatthew Wilcox  *
135158d6ea30SMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
135258d6ea30SMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
135358d6ea30SMatthew Wilcox  * it afterwards.
135458d6ea30SMatthew Wilcox  *
135558d6ea30SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
135658d6ea30SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
135758d6ea30SMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
135858d6ea30SMatthew Wilcox  */
135958d6ea30SMatthew Wilcox void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
136058d6ea30SMatthew Wilcox {
136158d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
136258d6ea30SMatthew Wilcox 	void *curr;
136358d6ea30SMatthew Wilcox 
136476b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
136558d6ea30SMatthew Wilcox 		return XA_ERROR(-EINVAL);
1366d9c48043SMatthew Wilcox 	if (xa_track_free(xa) && !entry)
1367d9c48043SMatthew Wilcox 		entry = XA_ZERO_ENTRY;
136858d6ea30SMatthew Wilcox 
136958d6ea30SMatthew Wilcox 	do {
137058d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
1371d9c48043SMatthew Wilcox 		if (xa_track_free(xa))
1372371c752dSMatthew Wilcox 			xas_clear_mark(&xas, XA_FREE_MARK);
137358d6ea30SMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
137458d6ea30SMatthew Wilcox 
137558d6ea30SMatthew Wilcox 	return xas_result(&xas, curr);
137658d6ea30SMatthew Wilcox }
137758d6ea30SMatthew Wilcox EXPORT_SYMBOL(__xa_store);
137858d6ea30SMatthew Wilcox 
13799b89a035SMatthew Wilcox /**
1380611f3186SMatthew Wilcox  * xa_store() - Store this entry in the XArray.
1381611f3186SMatthew Wilcox  * @xa: XArray.
1382611f3186SMatthew Wilcox  * @index: Index into array.
1383611f3186SMatthew Wilcox  * @entry: New entry.
1384611f3186SMatthew Wilcox  * @gfp: Memory allocation flags.
1385611f3186SMatthew Wilcox  *
1386611f3186SMatthew Wilcox  * After this function returns, loads from this index will return @entry.
1387611f3186SMatthew Wilcox  * Storing into an existing multislot entry updates the entry of every index.
1388611f3186SMatthew Wilcox  * The marks associated with @index are unaffected unless @entry is %NULL.
1389611f3186SMatthew Wilcox  *
1390611f3186SMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock.
1391611f3186SMatthew Wilcox  * May sleep if the @gfp flags permit.
1392611f3186SMatthew Wilcox  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1393611f3186SMatthew Wilcox  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1394611f3186SMatthew Wilcox  * failed.
1395611f3186SMatthew Wilcox  */
1396611f3186SMatthew Wilcox void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1397611f3186SMatthew Wilcox {
1398611f3186SMatthew Wilcox 	void *curr;
1399611f3186SMatthew Wilcox 
1400611f3186SMatthew Wilcox 	xa_lock(xa);
1401611f3186SMatthew Wilcox 	curr = __xa_store(xa, index, entry, gfp);
1402611f3186SMatthew Wilcox 	xa_unlock(xa);
1403611f3186SMatthew Wilcox 
1404611f3186SMatthew Wilcox 	return curr;
1405611f3186SMatthew Wilcox }
1406611f3186SMatthew Wilcox EXPORT_SYMBOL(xa_store);
1407611f3186SMatthew Wilcox 
1408611f3186SMatthew Wilcox /**
140941aec91fSMatthew Wilcox  * __xa_cmpxchg() - Store this entry in the XArray.
141041aec91fSMatthew Wilcox  * @xa: XArray.
141141aec91fSMatthew Wilcox  * @index: Index into array.
141241aec91fSMatthew Wilcox  * @old: Old value to test against.
141341aec91fSMatthew Wilcox  * @entry: New entry.
141441aec91fSMatthew Wilcox  * @gfp: Memory allocation flags.
141541aec91fSMatthew Wilcox  *
141641aec91fSMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
141741aec91fSMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
141841aec91fSMatthew Wilcox  * it afterwards.
141941aec91fSMatthew Wilcox  *
142041aec91fSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
142141aec91fSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
142241aec91fSMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
142341aec91fSMatthew Wilcox  */
142441aec91fSMatthew Wilcox void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
142541aec91fSMatthew Wilcox 			void *old, void *entry, gfp_t gfp)
142641aec91fSMatthew Wilcox {
142741aec91fSMatthew Wilcox 	XA_STATE(xas, xa, index);
142841aec91fSMatthew Wilcox 	void *curr;
142941aec91fSMatthew Wilcox 
143076b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
143141aec91fSMatthew Wilcox 		return XA_ERROR(-EINVAL);
1432d9c48043SMatthew Wilcox 	if (xa_track_free(xa) && !entry)
1433d9c48043SMatthew Wilcox 		entry = XA_ZERO_ENTRY;
143441aec91fSMatthew Wilcox 
143541aec91fSMatthew Wilcox 	do {
143641aec91fSMatthew Wilcox 		curr = xas_load(&xas);
14379f14d4f1SMatthew Wilcox 		if (curr == XA_ZERO_ENTRY)
14389f14d4f1SMatthew Wilcox 			curr = NULL;
1439371c752dSMatthew Wilcox 		if (curr == old) {
144041aec91fSMatthew Wilcox 			xas_store(&xas, entry);
1441d9c48043SMatthew Wilcox 			if (xa_track_free(xa))
1442371c752dSMatthew Wilcox 				xas_clear_mark(&xas, XA_FREE_MARK);
1443371c752dSMatthew Wilcox 		}
144441aec91fSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
144541aec91fSMatthew Wilcox 
144641aec91fSMatthew Wilcox 	return xas_result(&xas, curr);
144741aec91fSMatthew Wilcox }
144841aec91fSMatthew Wilcox EXPORT_SYMBOL(__xa_cmpxchg);
144941aec91fSMatthew Wilcox 
145041aec91fSMatthew Wilcox /**
1451b0606fedSMatthew Wilcox  * __xa_insert() - Store this entry in the XArray if no entry is present.
1452b0606fedSMatthew Wilcox  * @xa: XArray.
1453b0606fedSMatthew Wilcox  * @index: Index into array.
1454b0606fedSMatthew Wilcox  * @entry: New entry.
1455b0606fedSMatthew Wilcox  * @gfp: Memory allocation flags.
1456b0606fedSMatthew Wilcox  *
1457b0606fedSMatthew Wilcox  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1458b0606fedSMatthew Wilcox  * if no entry is present.  Inserting will fail if a reserved entry is
1459b0606fedSMatthew Wilcox  * present, even though loading from this index will return NULL.
1460b0606fedSMatthew Wilcox  *
1461b0606fedSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
1462b0606fedSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
1463fd9dc93eSMatthew Wilcox  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1464b0606fedSMatthew Wilcox  * -ENOMEM if memory could not be allocated.
1465b0606fedSMatthew Wilcox  */
1466b0606fedSMatthew Wilcox int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1467b0606fedSMatthew Wilcox {
1468b0606fedSMatthew Wilcox 	XA_STATE(xas, xa, index);
1469b0606fedSMatthew Wilcox 	void *curr;
1470b0606fedSMatthew Wilcox 
1471b0606fedSMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1472b0606fedSMatthew Wilcox 		return -EINVAL;
1473b0606fedSMatthew Wilcox 	if (!entry)
1474b0606fedSMatthew Wilcox 		entry = XA_ZERO_ENTRY;
1475b0606fedSMatthew Wilcox 
1476b0606fedSMatthew Wilcox 	do {
1477b0606fedSMatthew Wilcox 		curr = xas_load(&xas);
1478b0606fedSMatthew Wilcox 		if (!curr) {
1479b0606fedSMatthew Wilcox 			xas_store(&xas, entry);
1480b0606fedSMatthew Wilcox 			if (xa_track_free(xa))
1481b0606fedSMatthew Wilcox 				xas_clear_mark(&xas, XA_FREE_MARK);
1482b0606fedSMatthew Wilcox 		} else {
1483fd9dc93eSMatthew Wilcox 			xas_set_err(&xas, -EBUSY);
1484b0606fedSMatthew Wilcox 		}
1485b0606fedSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
1486b0606fedSMatthew Wilcox 
1487b0606fedSMatthew Wilcox 	return xas_error(&xas);
1488b0606fedSMatthew Wilcox }
1489b0606fedSMatthew Wilcox EXPORT_SYMBOL(__xa_insert);
1490b0606fedSMatthew Wilcox 
1491b0606fedSMatthew Wilcox /**
14924c0608f4SMatthew Wilcox  * __xa_reserve() - Reserve this index in the XArray.
14939f14d4f1SMatthew Wilcox  * @xa: XArray.
14949f14d4f1SMatthew Wilcox  * @index: Index into array.
14959f14d4f1SMatthew Wilcox  * @gfp: Memory allocation flags.
14969f14d4f1SMatthew Wilcox  *
14979f14d4f1SMatthew Wilcox  * Ensures there is somewhere to store an entry at @index in the array.
14989f14d4f1SMatthew Wilcox  * If there is already something stored at @index, this function does
14999f14d4f1SMatthew Wilcox  * nothing.  If there was nothing there, the entry is marked as reserved.
15004c0608f4SMatthew Wilcox  * Loading from a reserved entry returns a %NULL pointer.
15019f14d4f1SMatthew Wilcox  *
15029f14d4f1SMatthew Wilcox  * If you do not use the entry that you have reserved, call xa_release()
15039f14d4f1SMatthew Wilcox  * or xa_erase() to free any unnecessary memory.
15049f14d4f1SMatthew Wilcox  *
15054c0608f4SMatthew Wilcox  * Context: Any context.  Expects the xa_lock to be held on entry.  May
15064c0608f4SMatthew Wilcox  * release the lock, sleep and reacquire the lock if the @gfp flags permit.
15079f14d4f1SMatthew Wilcox  * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
15089f14d4f1SMatthew Wilcox  */
15094c0608f4SMatthew Wilcox int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
15109f14d4f1SMatthew Wilcox {
15119f14d4f1SMatthew Wilcox 	XA_STATE(xas, xa, index);
15129f14d4f1SMatthew Wilcox 	void *curr;
15139f14d4f1SMatthew Wilcox 
15149f14d4f1SMatthew Wilcox 	do {
15159f14d4f1SMatthew Wilcox 		curr = xas_load(&xas);
1516d9c48043SMatthew Wilcox 		if (!curr) {
15179f14d4f1SMatthew Wilcox 			xas_store(&xas, XA_ZERO_ENTRY);
1518d9c48043SMatthew Wilcox 			if (xa_track_free(xa))
1519d9c48043SMatthew Wilcox 				xas_clear_mark(&xas, XA_FREE_MARK);
1520d9c48043SMatthew Wilcox 		}
15214c0608f4SMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
15229f14d4f1SMatthew Wilcox 
15239f14d4f1SMatthew Wilcox 	return xas_error(&xas);
15249f14d4f1SMatthew Wilcox }
15254c0608f4SMatthew Wilcox EXPORT_SYMBOL(__xa_reserve);
15269f14d4f1SMatthew Wilcox 
15270e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
15280e9446c3SMatthew Wilcox static void xas_set_range(struct xa_state *xas, unsigned long first,
15290e9446c3SMatthew Wilcox 		unsigned long last)
15300e9446c3SMatthew Wilcox {
15310e9446c3SMatthew Wilcox 	unsigned int shift = 0;
15320e9446c3SMatthew Wilcox 	unsigned long sibs = last - first;
15330e9446c3SMatthew Wilcox 	unsigned int offset = XA_CHUNK_MASK;
15340e9446c3SMatthew Wilcox 
15350e9446c3SMatthew Wilcox 	xas_set(xas, first);
15360e9446c3SMatthew Wilcox 
15370e9446c3SMatthew Wilcox 	while ((first & XA_CHUNK_MASK) == 0) {
15380e9446c3SMatthew Wilcox 		if (sibs < XA_CHUNK_MASK)
15390e9446c3SMatthew Wilcox 			break;
15400e9446c3SMatthew Wilcox 		if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
15410e9446c3SMatthew Wilcox 			break;
15420e9446c3SMatthew Wilcox 		shift += XA_CHUNK_SHIFT;
15430e9446c3SMatthew Wilcox 		if (offset == XA_CHUNK_MASK)
15440e9446c3SMatthew Wilcox 			offset = sibs & XA_CHUNK_MASK;
15450e9446c3SMatthew Wilcox 		sibs >>= XA_CHUNK_SHIFT;
15460e9446c3SMatthew Wilcox 		first >>= XA_CHUNK_SHIFT;
15470e9446c3SMatthew Wilcox 	}
15480e9446c3SMatthew Wilcox 
15490e9446c3SMatthew Wilcox 	offset = first & XA_CHUNK_MASK;
15500e9446c3SMatthew Wilcox 	if (offset + sibs > XA_CHUNK_MASK)
15510e9446c3SMatthew Wilcox 		sibs = XA_CHUNK_MASK - offset;
15520e9446c3SMatthew Wilcox 	if ((((first + sibs + 1) << shift) - 1) > last)
15530e9446c3SMatthew Wilcox 		sibs -= 1;
15540e9446c3SMatthew Wilcox 
15550e9446c3SMatthew Wilcox 	xas->xa_shift = shift;
15560e9446c3SMatthew Wilcox 	xas->xa_sibs = sibs;
15570e9446c3SMatthew Wilcox }
15580e9446c3SMatthew Wilcox 
15590e9446c3SMatthew Wilcox /**
15600e9446c3SMatthew Wilcox  * xa_store_range() - Store this entry at a range of indices in the XArray.
15610e9446c3SMatthew Wilcox  * @xa: XArray.
15620e9446c3SMatthew Wilcox  * @first: First index to affect.
15630e9446c3SMatthew Wilcox  * @last: Last index to affect.
15640e9446c3SMatthew Wilcox  * @entry: New entry.
15650e9446c3SMatthew Wilcox  * @gfp: Memory allocation flags.
15660e9446c3SMatthew Wilcox  *
15670e9446c3SMatthew Wilcox  * After this function returns, loads from any index between @first and @last,
15680e9446c3SMatthew Wilcox  * inclusive will return @entry.
15690e9446c3SMatthew Wilcox  * Storing into an existing multislot entry updates the entry of every index.
15700e9446c3SMatthew Wilcox  * The marks associated with @index are unaffected unless @entry is %NULL.
15710e9446c3SMatthew Wilcox  *
15720e9446c3SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.  May sleep
15730e9446c3SMatthew Wilcox  * if the @gfp flags permit.
15740e9446c3SMatthew Wilcox  * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
15750e9446c3SMatthew Wilcox  * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
15760e9446c3SMatthew Wilcox  */
15770e9446c3SMatthew Wilcox void *xa_store_range(struct xarray *xa, unsigned long first,
15780e9446c3SMatthew Wilcox 		unsigned long last, void *entry, gfp_t gfp)
15790e9446c3SMatthew Wilcox {
15800e9446c3SMatthew Wilcox 	XA_STATE(xas, xa, 0);
15810e9446c3SMatthew Wilcox 
15820e9446c3SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
15830e9446c3SMatthew Wilcox 		return XA_ERROR(-EINVAL);
15840e9446c3SMatthew Wilcox 	if (last < first)
15850e9446c3SMatthew Wilcox 		return XA_ERROR(-EINVAL);
15860e9446c3SMatthew Wilcox 
15870e9446c3SMatthew Wilcox 	do {
15880e9446c3SMatthew Wilcox 		xas_lock(&xas);
15890e9446c3SMatthew Wilcox 		if (entry) {
159044a4a66bSMatthew Wilcox 			unsigned int order = BITS_PER_LONG;
159144a4a66bSMatthew Wilcox 			if (last + 1)
159244a4a66bSMatthew Wilcox 				order = __ffs(last + 1);
15930e9446c3SMatthew Wilcox 			xas_set_order(&xas, last, order);
159476b4e529SMatthew Wilcox 			xas_create(&xas, true);
15950e9446c3SMatthew Wilcox 			if (xas_error(&xas))
15960e9446c3SMatthew Wilcox 				goto unlock;
15970e9446c3SMatthew Wilcox 		}
15980e9446c3SMatthew Wilcox 		do {
15990e9446c3SMatthew Wilcox 			xas_set_range(&xas, first, last);
16000e9446c3SMatthew Wilcox 			xas_store(&xas, entry);
16010e9446c3SMatthew Wilcox 			if (xas_error(&xas))
16020e9446c3SMatthew Wilcox 				goto unlock;
16030e9446c3SMatthew Wilcox 			first += xas_size(&xas);
16040e9446c3SMatthew Wilcox 		} while (first <= last);
16050e9446c3SMatthew Wilcox unlock:
16060e9446c3SMatthew Wilcox 		xas_unlock(&xas);
16070e9446c3SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
16080e9446c3SMatthew Wilcox 
16090e9446c3SMatthew Wilcox 	return xas_result(&xas, NULL);
16100e9446c3SMatthew Wilcox }
16110e9446c3SMatthew Wilcox EXPORT_SYMBOL(xa_store_range);
16120e9446c3SMatthew Wilcox #endif /* CONFIG_XARRAY_MULTI */
16130e9446c3SMatthew Wilcox 
16149f14d4f1SMatthew Wilcox /**
1615371c752dSMatthew Wilcox  * __xa_alloc() - Find somewhere to store this entry in the XArray.
1616371c752dSMatthew Wilcox  * @xa: XArray.
1617371c752dSMatthew Wilcox  * @id: Pointer to ID.
1618a3e4d3f9SMatthew Wilcox  * @limit: Range for allocated ID.
1619371c752dSMatthew Wilcox  * @entry: New entry.
1620371c752dSMatthew Wilcox  * @gfp: Memory allocation flags.
1621371c752dSMatthew Wilcox  *
1622a3e4d3f9SMatthew Wilcox  * Finds an empty entry in @xa between @limit.min and @limit.max,
1623a3e4d3f9SMatthew Wilcox  * stores the index into the @id pointer, then stores the entry at
1624a3e4d3f9SMatthew Wilcox  * that index.  A concurrent lookup will not see an uninitialised @id.
1625371c752dSMatthew Wilcox  *
1626371c752dSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
1627371c752dSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
1628a3e4d3f9SMatthew Wilcox  * Return: 0 on success, -ENOMEM if memory could not be allocated or
1629a3e4d3f9SMatthew Wilcox  * -EBUSY if there are no free entries in @limit.
1630371c752dSMatthew Wilcox  */
1631a3e4d3f9SMatthew Wilcox int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1632a3e4d3f9SMatthew Wilcox 		struct xa_limit limit, gfp_t gfp)
1633371c752dSMatthew Wilcox {
1634371c752dSMatthew Wilcox 	XA_STATE(xas, xa, 0);
1635371c752dSMatthew Wilcox 
163676b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1637371c752dSMatthew Wilcox 		return -EINVAL;
1638371c752dSMatthew Wilcox 	if (WARN_ON_ONCE(!xa_track_free(xa)))
1639371c752dSMatthew Wilcox 		return -EINVAL;
1640371c752dSMatthew Wilcox 
1641371c752dSMatthew Wilcox 	if (!entry)
1642371c752dSMatthew Wilcox 		entry = XA_ZERO_ENTRY;
1643371c752dSMatthew Wilcox 
1644371c752dSMatthew Wilcox 	do {
1645a3e4d3f9SMatthew Wilcox 		xas.xa_index = limit.min;
1646a3e4d3f9SMatthew Wilcox 		xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1647371c752dSMatthew Wilcox 		if (xas.xa_node == XAS_RESTART)
1648a3e4d3f9SMatthew Wilcox 			xas_set_err(&xas, -EBUSY);
1649a3e4d3f9SMatthew Wilcox 		else
1650a3e4d3f9SMatthew Wilcox 			*id = xas.xa_index;
1651371c752dSMatthew Wilcox 		xas_store(&xas, entry);
1652371c752dSMatthew Wilcox 		xas_clear_mark(&xas, XA_FREE_MARK);
1653371c752dSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
1654371c752dSMatthew Wilcox 
1655a3e4d3f9SMatthew Wilcox 	return xas_error(&xas);
1656371c752dSMatthew Wilcox }
1657371c752dSMatthew Wilcox EXPORT_SYMBOL(__xa_alloc);
1658371c752dSMatthew Wilcox 
1659371c752dSMatthew Wilcox /**
1660*2fa044e5SMatthew Wilcox  * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1661*2fa044e5SMatthew Wilcox  * @xa: XArray.
1662*2fa044e5SMatthew Wilcox  * @id: Pointer to ID.
1663*2fa044e5SMatthew Wilcox  * @entry: New entry.
1664*2fa044e5SMatthew Wilcox  * @limit: Range of allocated ID.
1665*2fa044e5SMatthew Wilcox  * @next: Pointer to next ID to allocate.
1666*2fa044e5SMatthew Wilcox  * @gfp: Memory allocation flags.
1667*2fa044e5SMatthew Wilcox  *
1668*2fa044e5SMatthew Wilcox  * Finds an empty entry in @xa between @limit.min and @limit.max,
1669*2fa044e5SMatthew Wilcox  * stores the index into the @id pointer, then stores the entry at
1670*2fa044e5SMatthew Wilcox  * that index.  A concurrent lookup will not see an uninitialised @id.
1671*2fa044e5SMatthew Wilcox  * The search for an empty entry will start at @next and will wrap
1672*2fa044e5SMatthew Wilcox  * around if necessary.
1673*2fa044e5SMatthew Wilcox  *
1674*2fa044e5SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
1675*2fa044e5SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
1676*2fa044e5SMatthew Wilcox  * Return: 0 if the allocation succeeded without wrapping.  1 if the
1677*2fa044e5SMatthew Wilcox  * allocation succeeded after wrapping, -ENOMEM if memory could not be
1678*2fa044e5SMatthew Wilcox  * allocated or -EBUSY if there are no free entries in @limit.
1679*2fa044e5SMatthew Wilcox  */
1680*2fa044e5SMatthew Wilcox int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1681*2fa044e5SMatthew Wilcox 		struct xa_limit limit, u32 *next, gfp_t gfp)
1682*2fa044e5SMatthew Wilcox {
1683*2fa044e5SMatthew Wilcox 	u32 min = limit.min;
1684*2fa044e5SMatthew Wilcox 	int ret;
1685*2fa044e5SMatthew Wilcox 
1686*2fa044e5SMatthew Wilcox 	limit.min = max(min, *next);
1687*2fa044e5SMatthew Wilcox 	ret = __xa_alloc(xa, id, entry, limit, gfp);
1688*2fa044e5SMatthew Wilcox 	if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1689*2fa044e5SMatthew Wilcox 		xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1690*2fa044e5SMatthew Wilcox 		ret = 1;
1691*2fa044e5SMatthew Wilcox 	}
1692*2fa044e5SMatthew Wilcox 
1693*2fa044e5SMatthew Wilcox 	if (ret < 0 && limit.min > min) {
1694*2fa044e5SMatthew Wilcox 		limit.min = min;
1695*2fa044e5SMatthew Wilcox 		ret = __xa_alloc(xa, id, entry, limit, gfp);
1696*2fa044e5SMatthew Wilcox 		if (ret == 0)
1697*2fa044e5SMatthew Wilcox 			ret = 1;
1698*2fa044e5SMatthew Wilcox 	}
1699*2fa044e5SMatthew Wilcox 
1700*2fa044e5SMatthew Wilcox 	if (ret >= 0) {
1701*2fa044e5SMatthew Wilcox 		*next = *id + 1;
1702*2fa044e5SMatthew Wilcox 		if (*next == 0)
1703*2fa044e5SMatthew Wilcox 			xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1704*2fa044e5SMatthew Wilcox 	}
1705*2fa044e5SMatthew Wilcox 	return ret;
1706*2fa044e5SMatthew Wilcox }
1707*2fa044e5SMatthew Wilcox EXPORT_SYMBOL(__xa_alloc_cyclic);
1708*2fa044e5SMatthew Wilcox 
1709*2fa044e5SMatthew Wilcox /**
17109b89a035SMatthew Wilcox  * __xa_set_mark() - Set this mark on this entry while locked.
17119b89a035SMatthew Wilcox  * @xa: XArray.
17129b89a035SMatthew Wilcox  * @index: Index of entry.
17139b89a035SMatthew Wilcox  * @mark: Mark number.
17149b89a035SMatthew Wilcox  *
1715804dfaf0SMatthew Wilcox  * Attempting to set a mark on a %NULL entry does not succeed.
17169b89a035SMatthew Wilcox  *
17179b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
17189b89a035SMatthew Wilcox  */
17199b89a035SMatthew Wilcox void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
17209b89a035SMatthew Wilcox {
17219b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
17229b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
17239b89a035SMatthew Wilcox 
17249b89a035SMatthew Wilcox 	if (entry)
17259b89a035SMatthew Wilcox 		xas_set_mark(&xas, mark);
17269b89a035SMatthew Wilcox }
17279ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_set_mark);
17289b89a035SMatthew Wilcox 
17299b89a035SMatthew Wilcox /**
17309b89a035SMatthew Wilcox  * __xa_clear_mark() - Clear this mark on this entry while locked.
17319b89a035SMatthew Wilcox  * @xa: XArray.
17329b89a035SMatthew Wilcox  * @index: Index of entry.
17339b89a035SMatthew Wilcox  * @mark: Mark number.
17349b89a035SMatthew Wilcox  *
17359b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
17369b89a035SMatthew Wilcox  */
17379b89a035SMatthew Wilcox void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
17389b89a035SMatthew Wilcox {
17399b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
17409b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
17419b89a035SMatthew Wilcox 
17429b89a035SMatthew Wilcox 	if (entry)
17439b89a035SMatthew Wilcox 		xas_clear_mark(&xas, mark);
17449b89a035SMatthew Wilcox }
17459ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_clear_mark);
17469b89a035SMatthew Wilcox 
17479b89a035SMatthew Wilcox /**
17489b89a035SMatthew Wilcox  * xa_get_mark() - Inquire whether this mark is set on this entry.
17499b89a035SMatthew Wilcox  * @xa: XArray.
17509b89a035SMatthew Wilcox  * @index: Index of entry.
17519b89a035SMatthew Wilcox  * @mark: Mark number.
17529b89a035SMatthew Wilcox  *
17539b89a035SMatthew Wilcox  * This function uses the RCU read lock, so the result may be out of date
17549b89a035SMatthew Wilcox  * by the time it returns.  If you need the result to be stable, use a lock.
17559b89a035SMatthew Wilcox  *
17569b89a035SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
17579b89a035SMatthew Wilcox  * Return: True if the entry at @index has this mark set, false if it doesn't.
17589b89a035SMatthew Wilcox  */
17599b89a035SMatthew Wilcox bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
17609b89a035SMatthew Wilcox {
17619b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
17629b89a035SMatthew Wilcox 	void *entry;
17639b89a035SMatthew Wilcox 
17649b89a035SMatthew Wilcox 	rcu_read_lock();
17659b89a035SMatthew Wilcox 	entry = xas_start(&xas);
17669b89a035SMatthew Wilcox 	while (xas_get_mark(&xas, mark)) {
17679b89a035SMatthew Wilcox 		if (!xa_is_node(entry))
17689b89a035SMatthew Wilcox 			goto found;
17699b89a035SMatthew Wilcox 		entry = xas_descend(&xas, xa_to_node(entry));
17709b89a035SMatthew Wilcox 	}
17719b89a035SMatthew Wilcox 	rcu_read_unlock();
17729b89a035SMatthew Wilcox 	return false;
17739b89a035SMatthew Wilcox  found:
17749b89a035SMatthew Wilcox 	rcu_read_unlock();
17759b89a035SMatthew Wilcox 	return true;
17769b89a035SMatthew Wilcox }
17779b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_get_mark);
17789b89a035SMatthew Wilcox 
17799b89a035SMatthew Wilcox /**
17809b89a035SMatthew Wilcox  * xa_set_mark() - Set this mark on this entry.
17819b89a035SMatthew Wilcox  * @xa: XArray.
17829b89a035SMatthew Wilcox  * @index: Index of entry.
17839b89a035SMatthew Wilcox  * @mark: Mark number.
17849b89a035SMatthew Wilcox  *
1785804dfaf0SMatthew Wilcox  * Attempting to set a mark on a %NULL entry does not succeed.
17869b89a035SMatthew Wilcox  *
17879b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
17889b89a035SMatthew Wilcox  */
17899b89a035SMatthew Wilcox void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
17909b89a035SMatthew Wilcox {
17919b89a035SMatthew Wilcox 	xa_lock(xa);
17929b89a035SMatthew Wilcox 	__xa_set_mark(xa, index, mark);
17939b89a035SMatthew Wilcox 	xa_unlock(xa);
17949b89a035SMatthew Wilcox }
17959b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_set_mark);
17969b89a035SMatthew Wilcox 
17979b89a035SMatthew Wilcox /**
17989b89a035SMatthew Wilcox  * xa_clear_mark() - Clear this mark on this entry.
17999b89a035SMatthew Wilcox  * @xa: XArray.
18009b89a035SMatthew Wilcox  * @index: Index of entry.
18019b89a035SMatthew Wilcox  * @mark: Mark number.
18029b89a035SMatthew Wilcox  *
18039b89a035SMatthew Wilcox  * Clearing a mark always succeeds.
18049b89a035SMatthew Wilcox  *
18059b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
18069b89a035SMatthew Wilcox  */
18079b89a035SMatthew Wilcox void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
18089b89a035SMatthew Wilcox {
18099b89a035SMatthew Wilcox 	xa_lock(xa);
18109b89a035SMatthew Wilcox 	__xa_clear_mark(xa, index, mark);
18119b89a035SMatthew Wilcox 	xa_unlock(xa);
18129b89a035SMatthew Wilcox }
18139b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_clear_mark);
18149b89a035SMatthew Wilcox 
1815b803b428SMatthew Wilcox /**
1816b803b428SMatthew Wilcox  * xa_find() - Search the XArray for an entry.
1817b803b428SMatthew Wilcox  * @xa: XArray.
1818b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
1819b803b428SMatthew Wilcox  * @max: Maximum index to search to.
1820b803b428SMatthew Wilcox  * @filter: Selection criterion.
1821b803b428SMatthew Wilcox  *
1822b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter, and has the lowest
1823b803b428SMatthew Wilcox  * index that is at least @indexp and no more than @max.
1824b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
1825b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may not find
1826b803b428SMatthew Wilcox  * entries which are being simultaneously added.  It will not return an
1827b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1828b803b428SMatthew Wilcox  *
1829b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1830b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1831b803b428SMatthew Wilcox  */
1832b803b428SMatthew Wilcox void *xa_find(struct xarray *xa, unsigned long *indexp,
1833b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
1834b803b428SMatthew Wilcox {
1835b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp);
1836b803b428SMatthew Wilcox 	void *entry;
1837b803b428SMatthew Wilcox 
1838b803b428SMatthew Wilcox 	rcu_read_lock();
1839b803b428SMatthew Wilcox 	do {
1840b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
1841b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
1842b803b428SMatthew Wilcox 		else
1843b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
1844b803b428SMatthew Wilcox 	} while (xas_retry(&xas, entry));
1845b803b428SMatthew Wilcox 	rcu_read_unlock();
1846b803b428SMatthew Wilcox 
1847b803b428SMatthew Wilcox 	if (entry)
1848b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
1849b803b428SMatthew Wilcox 	return entry;
1850b803b428SMatthew Wilcox }
1851b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find);
1852b803b428SMatthew Wilcox 
1853b803b428SMatthew Wilcox /**
1854b803b428SMatthew Wilcox  * xa_find_after() - Search the XArray for a present entry.
1855b803b428SMatthew Wilcox  * @xa: XArray.
1856b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
1857b803b428SMatthew Wilcox  * @max: Maximum index to search to.
1858b803b428SMatthew Wilcox  * @filter: Selection criterion.
1859b803b428SMatthew Wilcox  *
1860b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter and has the lowest
1861b803b428SMatthew Wilcox  * index that is above @indexp and no more than @max.
1862b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
1863b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may miss entries
1864b803b428SMatthew Wilcox  * which are being simultaneously added.  It will not return an
1865b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1866b803b428SMatthew Wilcox  *
1867b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1868b803b428SMatthew Wilcox  * Return: The pointer, if found, otherwise %NULL.
1869b803b428SMatthew Wilcox  */
1870b803b428SMatthew Wilcox void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1871b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
1872b803b428SMatthew Wilcox {
1873b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp + 1);
1874b803b428SMatthew Wilcox 	void *entry;
1875b803b428SMatthew Wilcox 
1876b803b428SMatthew Wilcox 	rcu_read_lock();
1877b803b428SMatthew Wilcox 	for (;;) {
1878b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
1879b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
1880b803b428SMatthew Wilcox 		else
1881b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
18828229706eSMatthew Wilcox 		if (xas.xa_node == XAS_BOUNDS)
18838229706eSMatthew Wilcox 			break;
1884b803b428SMatthew Wilcox 		if (xas.xa_shift) {
1885b803b428SMatthew Wilcox 			if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1886b803b428SMatthew Wilcox 				continue;
1887b803b428SMatthew Wilcox 		} else {
1888b803b428SMatthew Wilcox 			if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
1889b803b428SMatthew Wilcox 				continue;
1890b803b428SMatthew Wilcox 		}
1891b803b428SMatthew Wilcox 		if (!xas_retry(&xas, entry))
1892b803b428SMatthew Wilcox 			break;
1893b803b428SMatthew Wilcox 	}
1894b803b428SMatthew Wilcox 	rcu_read_unlock();
1895b803b428SMatthew Wilcox 
1896b803b428SMatthew Wilcox 	if (entry)
1897b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
1898b803b428SMatthew Wilcox 	return entry;
1899b803b428SMatthew Wilcox }
1900b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find_after);
1901b803b428SMatthew Wilcox 
190280a0a1a9SMatthew Wilcox static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
190380a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n)
190480a0a1a9SMatthew Wilcox {
190580a0a1a9SMatthew Wilcox 	void *entry;
190680a0a1a9SMatthew Wilcox 	unsigned int i = 0;
190780a0a1a9SMatthew Wilcox 
190880a0a1a9SMatthew Wilcox 	rcu_read_lock();
190980a0a1a9SMatthew Wilcox 	xas_for_each(xas, entry, max) {
191080a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
191180a0a1a9SMatthew Wilcox 			continue;
191280a0a1a9SMatthew Wilcox 		dst[i++] = entry;
191380a0a1a9SMatthew Wilcox 		if (i == n)
191480a0a1a9SMatthew Wilcox 			break;
191580a0a1a9SMatthew Wilcox 	}
191680a0a1a9SMatthew Wilcox 	rcu_read_unlock();
191780a0a1a9SMatthew Wilcox 
191880a0a1a9SMatthew Wilcox 	return i;
191980a0a1a9SMatthew Wilcox }
192080a0a1a9SMatthew Wilcox 
192180a0a1a9SMatthew Wilcox static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
192280a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t mark)
192380a0a1a9SMatthew Wilcox {
192480a0a1a9SMatthew Wilcox 	void *entry;
192580a0a1a9SMatthew Wilcox 	unsigned int i = 0;
192680a0a1a9SMatthew Wilcox 
192780a0a1a9SMatthew Wilcox 	rcu_read_lock();
192880a0a1a9SMatthew Wilcox 	xas_for_each_marked(xas, entry, max, mark) {
192980a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
193080a0a1a9SMatthew Wilcox 			continue;
193180a0a1a9SMatthew Wilcox 		dst[i++] = entry;
193280a0a1a9SMatthew Wilcox 		if (i == n)
193380a0a1a9SMatthew Wilcox 			break;
193480a0a1a9SMatthew Wilcox 	}
193580a0a1a9SMatthew Wilcox 	rcu_read_unlock();
193680a0a1a9SMatthew Wilcox 
193780a0a1a9SMatthew Wilcox 	return i;
193880a0a1a9SMatthew Wilcox }
193980a0a1a9SMatthew Wilcox 
194080a0a1a9SMatthew Wilcox /**
194180a0a1a9SMatthew Wilcox  * xa_extract() - Copy selected entries from the XArray into a normal array.
194280a0a1a9SMatthew Wilcox  * @xa: The source XArray to copy from.
194380a0a1a9SMatthew Wilcox  * @dst: The buffer to copy entries into.
194480a0a1a9SMatthew Wilcox  * @start: The first index in the XArray eligible to be selected.
194580a0a1a9SMatthew Wilcox  * @max: The last index in the XArray eligible to be selected.
194680a0a1a9SMatthew Wilcox  * @n: The maximum number of entries to copy.
194780a0a1a9SMatthew Wilcox  * @filter: Selection criterion.
194880a0a1a9SMatthew Wilcox  *
194980a0a1a9SMatthew Wilcox  * Copies up to @n entries that match @filter from the XArray.  The
195080a0a1a9SMatthew Wilcox  * copied entries will have indices between @start and @max, inclusive.
195180a0a1a9SMatthew Wilcox  *
195280a0a1a9SMatthew Wilcox  * The @filter may be an XArray mark value, in which case entries which are
195380a0a1a9SMatthew Wilcox  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
1954804dfaf0SMatthew Wilcox  * which case all entries which are not %NULL will be copied.
195580a0a1a9SMatthew Wilcox  *
195680a0a1a9SMatthew Wilcox  * The entries returned may not represent a snapshot of the XArray at a
195780a0a1a9SMatthew Wilcox  * moment in time.  For example, if another thread stores to index 5, then
195880a0a1a9SMatthew Wilcox  * index 10, calling xa_extract() may return the old contents of index 5
195980a0a1a9SMatthew Wilcox  * and the new contents of index 10.  Indices not modified while this
196080a0a1a9SMatthew Wilcox  * function is running will not be skipped.
196180a0a1a9SMatthew Wilcox  *
196280a0a1a9SMatthew Wilcox  * If you need stronger guarantees, holding the xa_lock across calls to this
196380a0a1a9SMatthew Wilcox  * function will prevent concurrent modification.
196480a0a1a9SMatthew Wilcox  *
196580a0a1a9SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
196680a0a1a9SMatthew Wilcox  * Return: The number of entries copied.
196780a0a1a9SMatthew Wilcox  */
196880a0a1a9SMatthew Wilcox unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
196980a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t filter)
197080a0a1a9SMatthew Wilcox {
197180a0a1a9SMatthew Wilcox 	XA_STATE(xas, xa, start);
197280a0a1a9SMatthew Wilcox 
197380a0a1a9SMatthew Wilcox 	if (!n)
197480a0a1a9SMatthew Wilcox 		return 0;
197580a0a1a9SMatthew Wilcox 
197680a0a1a9SMatthew Wilcox 	if ((__force unsigned int)filter < XA_MAX_MARKS)
197780a0a1a9SMatthew Wilcox 		return xas_extract_marked(&xas, dst, max, n, filter);
197880a0a1a9SMatthew Wilcox 	return xas_extract_present(&xas, dst, max, n);
197980a0a1a9SMatthew Wilcox }
198080a0a1a9SMatthew Wilcox EXPORT_SYMBOL(xa_extract);
198180a0a1a9SMatthew Wilcox 
1982687149fcSMatthew Wilcox /**
1983687149fcSMatthew Wilcox  * xa_destroy() - Free all internal data structures.
1984687149fcSMatthew Wilcox  * @xa: XArray.
1985687149fcSMatthew Wilcox  *
1986687149fcSMatthew Wilcox  * After calling this function, the XArray is empty and has freed all memory
1987687149fcSMatthew Wilcox  * allocated for its internal data structures.  You are responsible for
1988687149fcSMatthew Wilcox  * freeing the objects referenced by the XArray.
1989687149fcSMatthew Wilcox  *
1990687149fcSMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
1991687149fcSMatthew Wilcox  */
1992687149fcSMatthew Wilcox void xa_destroy(struct xarray *xa)
1993687149fcSMatthew Wilcox {
1994687149fcSMatthew Wilcox 	XA_STATE(xas, xa, 0);
1995687149fcSMatthew Wilcox 	unsigned long flags;
1996687149fcSMatthew Wilcox 	void *entry;
1997687149fcSMatthew Wilcox 
1998687149fcSMatthew Wilcox 	xas.xa_node = NULL;
1999687149fcSMatthew Wilcox 	xas_lock_irqsave(&xas, flags);
2000687149fcSMatthew Wilcox 	entry = xa_head_locked(xa);
2001687149fcSMatthew Wilcox 	RCU_INIT_POINTER(xa->xa_head, NULL);
2002687149fcSMatthew Wilcox 	xas_init_marks(&xas);
20033ccaf57aSMatthew Wilcox 	if (xa_zero_busy(xa))
20043ccaf57aSMatthew Wilcox 		xa_mark_clear(xa, XA_FREE_MARK);
2005687149fcSMatthew Wilcox 	/* lockdep checks we're still holding the lock in xas_free_nodes() */
2006687149fcSMatthew Wilcox 	if (xa_is_node(entry))
2007687149fcSMatthew Wilcox 		xas_free_nodes(&xas, xa_to_node(entry));
2008687149fcSMatthew Wilcox 	xas_unlock_irqrestore(&xas, flags);
2009687149fcSMatthew Wilcox }
2010687149fcSMatthew Wilcox EXPORT_SYMBOL(xa_destroy);
2011687149fcSMatthew Wilcox 
2012ad3d6c72SMatthew Wilcox #ifdef XA_DEBUG
2013ad3d6c72SMatthew Wilcox void xa_dump_node(const struct xa_node *node)
2014ad3d6c72SMatthew Wilcox {
2015ad3d6c72SMatthew Wilcox 	unsigned i, j;
2016ad3d6c72SMatthew Wilcox 
2017ad3d6c72SMatthew Wilcox 	if (!node)
2018ad3d6c72SMatthew Wilcox 		return;
2019ad3d6c72SMatthew Wilcox 	if ((unsigned long)node & 3) {
2020ad3d6c72SMatthew Wilcox 		pr_cont("node %px\n", node);
2021ad3d6c72SMatthew Wilcox 		return;
2022ad3d6c72SMatthew Wilcox 	}
2023ad3d6c72SMatthew Wilcox 
2024ad3d6c72SMatthew Wilcox 	pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2025ad3d6c72SMatthew Wilcox 		"array %px list %px %px marks",
2026ad3d6c72SMatthew Wilcox 		node, node->parent ? "offset" : "max", node->offset,
2027ad3d6c72SMatthew Wilcox 		node->parent, node->shift, node->count, node->nr_values,
2028ad3d6c72SMatthew Wilcox 		node->array, node->private_list.prev, node->private_list.next);
2029ad3d6c72SMatthew Wilcox 	for (i = 0; i < XA_MAX_MARKS; i++)
2030ad3d6c72SMatthew Wilcox 		for (j = 0; j < XA_MARK_LONGS; j++)
2031ad3d6c72SMatthew Wilcox 			pr_cont(" %lx", node->marks[i][j]);
2032ad3d6c72SMatthew Wilcox 	pr_cont("\n");
2033ad3d6c72SMatthew Wilcox }
2034ad3d6c72SMatthew Wilcox 
2035ad3d6c72SMatthew Wilcox void xa_dump_index(unsigned long index, unsigned int shift)
2036ad3d6c72SMatthew Wilcox {
2037ad3d6c72SMatthew Wilcox 	if (!shift)
2038ad3d6c72SMatthew Wilcox 		pr_info("%lu: ", index);
2039ad3d6c72SMatthew Wilcox 	else if (shift >= BITS_PER_LONG)
2040ad3d6c72SMatthew Wilcox 		pr_info("0-%lu: ", ~0UL);
2041ad3d6c72SMatthew Wilcox 	else
2042ad3d6c72SMatthew Wilcox 		pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2043ad3d6c72SMatthew Wilcox }
2044ad3d6c72SMatthew Wilcox 
2045ad3d6c72SMatthew Wilcox void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2046ad3d6c72SMatthew Wilcox {
2047ad3d6c72SMatthew Wilcox 	if (!entry)
2048ad3d6c72SMatthew Wilcox 		return;
2049ad3d6c72SMatthew Wilcox 
2050ad3d6c72SMatthew Wilcox 	xa_dump_index(index, shift);
2051ad3d6c72SMatthew Wilcox 
2052ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry)) {
2053ad3d6c72SMatthew Wilcox 		if (shift == 0) {
2054ad3d6c72SMatthew Wilcox 			pr_cont("%px\n", entry);
2055ad3d6c72SMatthew Wilcox 		} else {
2056ad3d6c72SMatthew Wilcox 			unsigned long i;
2057ad3d6c72SMatthew Wilcox 			struct xa_node *node = xa_to_node(entry);
2058ad3d6c72SMatthew Wilcox 			xa_dump_node(node);
2059ad3d6c72SMatthew Wilcox 			for (i = 0; i < XA_CHUNK_SIZE; i++)
2060ad3d6c72SMatthew Wilcox 				xa_dump_entry(node->slots[i],
2061ad3d6c72SMatthew Wilcox 				      index + (i << node->shift), node->shift);
2062ad3d6c72SMatthew Wilcox 		}
2063ad3d6c72SMatthew Wilcox 	} else if (xa_is_value(entry))
2064ad3d6c72SMatthew Wilcox 		pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2065ad3d6c72SMatthew Wilcox 						xa_to_value(entry), entry);
2066ad3d6c72SMatthew Wilcox 	else if (!xa_is_internal(entry))
2067ad3d6c72SMatthew Wilcox 		pr_cont("%px\n", entry);
2068ad3d6c72SMatthew Wilcox 	else if (xa_is_retry(entry))
2069ad3d6c72SMatthew Wilcox 		pr_cont("retry (%ld)\n", xa_to_internal(entry));
2070ad3d6c72SMatthew Wilcox 	else if (xa_is_sibling(entry))
2071ad3d6c72SMatthew Wilcox 		pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
20729f14d4f1SMatthew Wilcox 	else if (xa_is_zero(entry))
20739f14d4f1SMatthew Wilcox 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
2074ad3d6c72SMatthew Wilcox 	else
2075ad3d6c72SMatthew Wilcox 		pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2076ad3d6c72SMatthew Wilcox }
2077ad3d6c72SMatthew Wilcox 
2078ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa)
2079ad3d6c72SMatthew Wilcox {
2080ad3d6c72SMatthew Wilcox 	void *entry = xa->xa_head;
2081ad3d6c72SMatthew Wilcox 	unsigned int shift = 0;
2082ad3d6c72SMatthew Wilcox 
2083ad3d6c72SMatthew Wilcox 	pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
20849b89a035SMatthew Wilcox 			xa->xa_flags, xa_marked(xa, XA_MARK_0),
20859b89a035SMatthew Wilcox 			xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2086ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry))
2087ad3d6c72SMatthew Wilcox 		shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2088ad3d6c72SMatthew Wilcox 	xa_dump_entry(entry, 0, shift);
2089ad3d6c72SMatthew Wilcox }
2090ad3d6c72SMatthew Wilcox #endif
2091