xref: /linux-6.15/lib/xarray.c (revision 4e99d4e9)
1f8d5d0ccSMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2f8d5d0ccSMatthew Wilcox /*
3f8d5d0ccSMatthew Wilcox  * XArray implementation
4f8d5d0ccSMatthew Wilcox  * Copyright (c) 2017 Microsoft Corporation
5f8d5d0ccSMatthew Wilcox  * Author: Matthew Wilcox <[email protected]>
6f8d5d0ccSMatthew Wilcox  */
7f8d5d0ccSMatthew Wilcox 
89b89a035SMatthew Wilcox #include <linux/bitmap.h>
9f8d5d0ccSMatthew Wilcox #include <linux/export.h>
1058d6ea30SMatthew Wilcox #include <linux/list.h>
1158d6ea30SMatthew Wilcox #include <linux/slab.h>
12f8d5d0ccSMatthew Wilcox #include <linux/xarray.h>
13f8d5d0ccSMatthew Wilcox 
14f8d5d0ccSMatthew Wilcox /*
15f8d5d0ccSMatthew Wilcox  * Coding conventions in this file:
16f8d5d0ccSMatthew Wilcox  *
17f8d5d0ccSMatthew Wilcox  * @xa is used to refer to the entire xarray.
18f8d5d0ccSMatthew Wilcox  * @xas is the 'xarray operation state'.  It may be either a pointer to
19f8d5d0ccSMatthew Wilcox  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
20f8d5d0ccSMatthew Wilcox  * ambiguity.
21f8d5d0ccSMatthew Wilcox  * @index is the index of the entry being operated on
22f8d5d0ccSMatthew Wilcox  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
23f8d5d0ccSMatthew Wilcox  * @node refers to an xa_node; usually the primary one being operated on by
24f8d5d0ccSMatthew Wilcox  * this function.
25f8d5d0ccSMatthew Wilcox  * @offset is the index into the slots array inside an xa_node.
26f8d5d0ccSMatthew Wilcox  * @parent refers to the @xa_node closer to the head than @node.
27f8d5d0ccSMatthew Wilcox  * @entry refers to something stored in a slot in the xarray
28f8d5d0ccSMatthew Wilcox  */
29f8d5d0ccSMatthew Wilcox 
3058d6ea30SMatthew Wilcox static inline unsigned int xa_lock_type(const struct xarray *xa)
3158d6ea30SMatthew Wilcox {
3258d6ea30SMatthew Wilcox 	return (__force unsigned int)xa->xa_flags & 3;
3358d6ea30SMatthew Wilcox }
3458d6ea30SMatthew Wilcox 
3558d6ea30SMatthew Wilcox static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
3658d6ea30SMatthew Wilcox {
3758d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
3858d6ea30SMatthew Wilcox 		xas_lock_irq(xas);
3958d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
4058d6ea30SMatthew Wilcox 		xas_lock_bh(xas);
4158d6ea30SMatthew Wilcox 	else
4258d6ea30SMatthew Wilcox 		xas_lock(xas);
4358d6ea30SMatthew Wilcox }
4458d6ea30SMatthew Wilcox 
4558d6ea30SMatthew Wilcox static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
4658d6ea30SMatthew Wilcox {
4758d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
4858d6ea30SMatthew Wilcox 		xas_unlock_irq(xas);
4958d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
5058d6ea30SMatthew Wilcox 		xas_unlock_bh(xas);
5158d6ea30SMatthew Wilcox 	else
5258d6ea30SMatthew Wilcox 		xas_unlock(xas);
5358d6ea30SMatthew Wilcox }
5458d6ea30SMatthew Wilcox 
559b89a035SMatthew Wilcox static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
569b89a035SMatthew Wilcox {
579b89a035SMatthew Wilcox 	if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
589b89a035SMatthew Wilcox 		xa->xa_flags |= XA_FLAGS_MARK(mark);
599b89a035SMatthew Wilcox }
609b89a035SMatthew Wilcox 
619b89a035SMatthew Wilcox static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
629b89a035SMatthew Wilcox {
639b89a035SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_MARK(mark))
649b89a035SMatthew Wilcox 		xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
659b89a035SMatthew Wilcox }
669b89a035SMatthew Wilcox 
679b89a035SMatthew Wilcox static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
689b89a035SMatthew Wilcox {
699b89a035SMatthew Wilcox 	return node->marks[(__force unsigned)mark];
709b89a035SMatthew Wilcox }
719b89a035SMatthew Wilcox 
729b89a035SMatthew Wilcox static inline bool node_get_mark(struct xa_node *node,
739b89a035SMatthew Wilcox 		unsigned int offset, xa_mark_t mark)
749b89a035SMatthew Wilcox {
759b89a035SMatthew Wilcox 	return test_bit(offset, node_marks(node, mark));
769b89a035SMatthew Wilcox }
779b89a035SMatthew Wilcox 
789b89a035SMatthew Wilcox /* returns true if the bit was set */
799b89a035SMatthew Wilcox static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
809b89a035SMatthew Wilcox 				xa_mark_t mark)
819b89a035SMatthew Wilcox {
829b89a035SMatthew Wilcox 	return __test_and_set_bit(offset, node_marks(node, mark));
839b89a035SMatthew Wilcox }
849b89a035SMatthew Wilcox 
859b89a035SMatthew Wilcox /* returns true if the bit was set */
869b89a035SMatthew Wilcox static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
879b89a035SMatthew Wilcox 				xa_mark_t mark)
889b89a035SMatthew Wilcox {
899b89a035SMatthew Wilcox 	return __test_and_clear_bit(offset, node_marks(node, mark));
909b89a035SMatthew Wilcox }
919b89a035SMatthew Wilcox 
929b89a035SMatthew Wilcox static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
939b89a035SMatthew Wilcox {
949b89a035SMatthew Wilcox 	return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
959b89a035SMatthew Wilcox }
969b89a035SMatthew Wilcox 
9758d6ea30SMatthew Wilcox #define mark_inc(mark) do { \
9858d6ea30SMatthew Wilcox 	mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
9958d6ea30SMatthew Wilcox } while (0)
10058d6ea30SMatthew Wilcox 
10158d6ea30SMatthew Wilcox /*
10258d6ea30SMatthew Wilcox  * xas_squash_marks() - Merge all marks to the first entry
10358d6ea30SMatthew Wilcox  * @xas: Array operation state.
10458d6ea30SMatthew Wilcox  *
10558d6ea30SMatthew Wilcox  * Set a mark on the first entry if any entry has it set.  Clear marks on
10658d6ea30SMatthew Wilcox  * all sibling entries.
10758d6ea30SMatthew Wilcox  */
10858d6ea30SMatthew Wilcox static void xas_squash_marks(const struct xa_state *xas)
10958d6ea30SMatthew Wilcox {
11058d6ea30SMatthew Wilcox 	unsigned int mark = 0;
11158d6ea30SMatthew Wilcox 	unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
11258d6ea30SMatthew Wilcox 
11358d6ea30SMatthew Wilcox 	if (!xas->xa_sibs)
11458d6ea30SMatthew Wilcox 		return;
11558d6ea30SMatthew Wilcox 
11658d6ea30SMatthew Wilcox 	do {
11758d6ea30SMatthew Wilcox 		unsigned long *marks = xas->xa_node->marks[mark];
11858d6ea30SMatthew Wilcox 		if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
11958d6ea30SMatthew Wilcox 			continue;
12058d6ea30SMatthew Wilcox 		__set_bit(xas->xa_offset, marks);
12158d6ea30SMatthew Wilcox 		bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
12258d6ea30SMatthew Wilcox 	} while (mark++ != (__force unsigned)XA_MARK_MAX);
12358d6ea30SMatthew Wilcox }
12458d6ea30SMatthew Wilcox 
125ad3d6c72SMatthew Wilcox /* extracts the offset within this node from the index */
126ad3d6c72SMatthew Wilcox static unsigned int get_offset(unsigned long index, struct xa_node *node)
127ad3d6c72SMatthew Wilcox {
128ad3d6c72SMatthew Wilcox 	return (index >> node->shift) & XA_CHUNK_MASK;
129ad3d6c72SMatthew Wilcox }
130ad3d6c72SMatthew Wilcox 
131b803b428SMatthew Wilcox static void xas_set_offset(struct xa_state *xas)
132b803b428SMatthew Wilcox {
133b803b428SMatthew Wilcox 	xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
134b803b428SMatthew Wilcox }
135b803b428SMatthew Wilcox 
136ad3d6c72SMatthew Wilcox /* move the index either forwards (find) or backwards (sibling slot) */
137ad3d6c72SMatthew Wilcox static void xas_move_index(struct xa_state *xas, unsigned long offset)
138ad3d6c72SMatthew Wilcox {
139ad3d6c72SMatthew Wilcox 	unsigned int shift = xas->xa_node->shift;
140ad3d6c72SMatthew Wilcox 	xas->xa_index &= ~XA_CHUNK_MASK << shift;
141ad3d6c72SMatthew Wilcox 	xas->xa_index += offset << shift;
142ad3d6c72SMatthew Wilcox }
143ad3d6c72SMatthew Wilcox 
144b803b428SMatthew Wilcox static void xas_advance(struct xa_state *xas)
145b803b428SMatthew Wilcox {
146b803b428SMatthew Wilcox 	xas->xa_offset++;
147b803b428SMatthew Wilcox 	xas_move_index(xas, xas->xa_offset);
148b803b428SMatthew Wilcox }
149b803b428SMatthew Wilcox 
150ad3d6c72SMatthew Wilcox static void *set_bounds(struct xa_state *xas)
151ad3d6c72SMatthew Wilcox {
152ad3d6c72SMatthew Wilcox 	xas->xa_node = XAS_BOUNDS;
153ad3d6c72SMatthew Wilcox 	return NULL;
154ad3d6c72SMatthew Wilcox }
155ad3d6c72SMatthew Wilcox 
156ad3d6c72SMatthew Wilcox /*
157ad3d6c72SMatthew Wilcox  * Starts a walk.  If the @xas is already valid, we assume that it's on
158ad3d6c72SMatthew Wilcox  * the right path and just return where we've got to.  If we're in an
159ad3d6c72SMatthew Wilcox  * error state, return NULL.  If the index is outside the current scope
160ad3d6c72SMatthew Wilcox  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
161ad3d6c72SMatthew Wilcox  * set @xas->xa_node to NULL and return the current head of the array.
162ad3d6c72SMatthew Wilcox  */
163ad3d6c72SMatthew Wilcox static void *xas_start(struct xa_state *xas)
164ad3d6c72SMatthew Wilcox {
165ad3d6c72SMatthew Wilcox 	void *entry;
166ad3d6c72SMatthew Wilcox 
167ad3d6c72SMatthew Wilcox 	if (xas_valid(xas))
168ad3d6c72SMatthew Wilcox 		return xas_reload(xas);
169ad3d6c72SMatthew Wilcox 	if (xas_error(xas))
170ad3d6c72SMatthew Wilcox 		return NULL;
171ad3d6c72SMatthew Wilcox 
172ad3d6c72SMatthew Wilcox 	entry = xa_head(xas->xa);
173ad3d6c72SMatthew Wilcox 	if (!xa_is_node(entry)) {
174ad3d6c72SMatthew Wilcox 		if (xas->xa_index)
175ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
176ad3d6c72SMatthew Wilcox 	} else {
177ad3d6c72SMatthew Wilcox 		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
178ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
179ad3d6c72SMatthew Wilcox 	}
180ad3d6c72SMatthew Wilcox 
181ad3d6c72SMatthew Wilcox 	xas->xa_node = NULL;
182ad3d6c72SMatthew Wilcox 	return entry;
183ad3d6c72SMatthew Wilcox }
184ad3d6c72SMatthew Wilcox 
185ad3d6c72SMatthew Wilcox static void *xas_descend(struct xa_state *xas, struct xa_node *node)
186ad3d6c72SMatthew Wilcox {
187ad3d6c72SMatthew Wilcox 	unsigned int offset = get_offset(xas->xa_index, node);
188ad3d6c72SMatthew Wilcox 	void *entry = xa_entry(xas->xa, node, offset);
189ad3d6c72SMatthew Wilcox 
190ad3d6c72SMatthew Wilcox 	xas->xa_node = node;
191ad3d6c72SMatthew Wilcox 	if (xa_is_sibling(entry)) {
192ad3d6c72SMatthew Wilcox 		offset = xa_to_sibling(entry);
193ad3d6c72SMatthew Wilcox 		entry = xa_entry(xas->xa, node, offset);
194ad3d6c72SMatthew Wilcox 	}
195ad3d6c72SMatthew Wilcox 
196ad3d6c72SMatthew Wilcox 	xas->xa_offset = offset;
197ad3d6c72SMatthew Wilcox 	return entry;
198ad3d6c72SMatthew Wilcox }
199ad3d6c72SMatthew Wilcox 
200ad3d6c72SMatthew Wilcox /**
201ad3d6c72SMatthew Wilcox  * xas_load() - Load an entry from the XArray (advanced).
202ad3d6c72SMatthew Wilcox  * @xas: XArray operation state.
203ad3d6c72SMatthew Wilcox  *
204ad3d6c72SMatthew Wilcox  * Usually walks the @xas to the appropriate state to load the entry
205ad3d6c72SMatthew Wilcox  * stored at xa_index.  However, it will do nothing and return %NULL if
206ad3d6c72SMatthew Wilcox  * @xas is in an error state.  xas_load() will never expand the tree.
207ad3d6c72SMatthew Wilcox  *
208ad3d6c72SMatthew Wilcox  * If the xa_state is set up to operate on a multi-index entry, xas_load()
209ad3d6c72SMatthew Wilcox  * may return %NULL or an internal entry, even if there are entries
210ad3d6c72SMatthew Wilcox  * present within the range specified by @xas.
211ad3d6c72SMatthew Wilcox  *
212ad3d6c72SMatthew Wilcox  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
213ad3d6c72SMatthew Wilcox  * Return: Usually an entry in the XArray, but see description for exceptions.
214ad3d6c72SMatthew Wilcox  */
215ad3d6c72SMatthew Wilcox void *xas_load(struct xa_state *xas)
216ad3d6c72SMatthew Wilcox {
217ad3d6c72SMatthew Wilcox 	void *entry = xas_start(xas);
218ad3d6c72SMatthew Wilcox 
219ad3d6c72SMatthew Wilcox 	while (xa_is_node(entry)) {
220ad3d6c72SMatthew Wilcox 		struct xa_node *node = xa_to_node(entry);
221ad3d6c72SMatthew Wilcox 
222ad3d6c72SMatthew Wilcox 		if (xas->xa_shift > node->shift)
223ad3d6c72SMatthew Wilcox 			break;
224ad3d6c72SMatthew Wilcox 		entry = xas_descend(xas, node);
225ad3d6c72SMatthew Wilcox 	}
226ad3d6c72SMatthew Wilcox 	return entry;
227ad3d6c72SMatthew Wilcox }
228ad3d6c72SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_load);
229ad3d6c72SMatthew Wilcox 
23058d6ea30SMatthew Wilcox /* Move the radix tree node cache here */
23158d6ea30SMatthew Wilcox extern struct kmem_cache *radix_tree_node_cachep;
23258d6ea30SMatthew Wilcox extern void radix_tree_node_rcu_free(struct rcu_head *head);
23358d6ea30SMatthew Wilcox 
23458d6ea30SMatthew Wilcox #define XA_RCU_FREE	((struct xarray *)1)
23558d6ea30SMatthew Wilcox 
23658d6ea30SMatthew Wilcox static void xa_node_free(struct xa_node *node)
23758d6ea30SMatthew Wilcox {
23858d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
23958d6ea30SMatthew Wilcox 	node->array = XA_RCU_FREE;
24058d6ea30SMatthew Wilcox 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
24158d6ea30SMatthew Wilcox }
24258d6ea30SMatthew Wilcox 
24358d6ea30SMatthew Wilcox /*
24458d6ea30SMatthew Wilcox  * xas_destroy() - Free any resources allocated during the XArray operation.
24558d6ea30SMatthew Wilcox  * @xas: XArray operation state.
24658d6ea30SMatthew Wilcox  *
24758d6ea30SMatthew Wilcox  * This function is now internal-only.
24858d6ea30SMatthew Wilcox  */
24958d6ea30SMatthew Wilcox static void xas_destroy(struct xa_state *xas)
25058d6ea30SMatthew Wilcox {
25158d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_alloc;
25258d6ea30SMatthew Wilcox 
25358d6ea30SMatthew Wilcox 	if (!node)
25458d6ea30SMatthew Wilcox 		return;
25558d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
25658d6ea30SMatthew Wilcox 	kmem_cache_free(radix_tree_node_cachep, node);
25758d6ea30SMatthew Wilcox 	xas->xa_alloc = NULL;
25858d6ea30SMatthew Wilcox }
25958d6ea30SMatthew Wilcox 
26058d6ea30SMatthew Wilcox /**
26158d6ea30SMatthew Wilcox  * xas_nomem() - Allocate memory if needed.
26258d6ea30SMatthew Wilcox  * @xas: XArray operation state.
26358d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
26458d6ea30SMatthew Wilcox  *
26558d6ea30SMatthew Wilcox  * If we need to add new nodes to the XArray, we try to allocate memory
26658d6ea30SMatthew Wilcox  * with GFP_NOWAIT while holding the lock, which will usually succeed.
26758d6ea30SMatthew Wilcox  * If it fails, @xas is flagged as needing memory to continue.  The caller
26858d6ea30SMatthew Wilcox  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
26958d6ea30SMatthew Wilcox  * the caller should retry the operation.
27058d6ea30SMatthew Wilcox  *
27158d6ea30SMatthew Wilcox  * Forward progress is guaranteed as one node is allocated here and
27258d6ea30SMatthew Wilcox  * stored in the xa_state where it will be found by xas_alloc().  More
27358d6ea30SMatthew Wilcox  * nodes will likely be found in the slab allocator, but we do not tie
27458d6ea30SMatthew Wilcox  * them up here.
27558d6ea30SMatthew Wilcox  *
27658d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
27758d6ea30SMatthew Wilcox  */
27858d6ea30SMatthew Wilcox bool xas_nomem(struct xa_state *xas, gfp_t gfp)
27958d6ea30SMatthew Wilcox {
28058d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
28158d6ea30SMatthew Wilcox 		xas_destroy(xas);
28258d6ea30SMatthew Wilcox 		return false;
28358d6ea30SMatthew Wilcox 	}
28458d6ea30SMatthew Wilcox 	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
28558d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
28658d6ea30SMatthew Wilcox 		return false;
28758d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
28858d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
28958d6ea30SMatthew Wilcox 	return true;
29058d6ea30SMatthew Wilcox }
29158d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_nomem);
29258d6ea30SMatthew Wilcox 
29358d6ea30SMatthew Wilcox /*
29458d6ea30SMatthew Wilcox  * __xas_nomem() - Drop locks and allocate memory if needed.
29558d6ea30SMatthew Wilcox  * @xas: XArray operation state.
29658d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
29758d6ea30SMatthew Wilcox  *
29858d6ea30SMatthew Wilcox  * Internal variant of xas_nomem().
29958d6ea30SMatthew Wilcox  *
30058d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
30158d6ea30SMatthew Wilcox  */
30258d6ea30SMatthew Wilcox static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
30358d6ea30SMatthew Wilcox 	__must_hold(xas->xa->xa_lock)
30458d6ea30SMatthew Wilcox {
30558d6ea30SMatthew Wilcox 	unsigned int lock_type = xa_lock_type(xas->xa);
30658d6ea30SMatthew Wilcox 
30758d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
30858d6ea30SMatthew Wilcox 		xas_destroy(xas);
30958d6ea30SMatthew Wilcox 		return false;
31058d6ea30SMatthew Wilcox 	}
31158d6ea30SMatthew Wilcox 	if (gfpflags_allow_blocking(gfp)) {
31258d6ea30SMatthew Wilcox 		xas_unlock_type(xas, lock_type);
31358d6ea30SMatthew Wilcox 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
31458d6ea30SMatthew Wilcox 		xas_lock_type(xas, lock_type);
31558d6ea30SMatthew Wilcox 	} else {
31658d6ea30SMatthew Wilcox 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
31758d6ea30SMatthew Wilcox 	}
31858d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
31958d6ea30SMatthew Wilcox 		return false;
32058d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
32158d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
32258d6ea30SMatthew Wilcox 	return true;
32358d6ea30SMatthew Wilcox }
32458d6ea30SMatthew Wilcox 
32558d6ea30SMatthew Wilcox static void xas_update(struct xa_state *xas, struct xa_node *node)
32658d6ea30SMatthew Wilcox {
32758d6ea30SMatthew Wilcox 	if (xas->xa_update)
32858d6ea30SMatthew Wilcox 		xas->xa_update(node);
32958d6ea30SMatthew Wilcox 	else
33058d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
33158d6ea30SMatthew Wilcox }
33258d6ea30SMatthew Wilcox 
33358d6ea30SMatthew Wilcox static void *xas_alloc(struct xa_state *xas, unsigned int shift)
33458d6ea30SMatthew Wilcox {
33558d6ea30SMatthew Wilcox 	struct xa_node *parent = xas->xa_node;
33658d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_alloc;
33758d6ea30SMatthew Wilcox 
33858d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
33958d6ea30SMatthew Wilcox 		return NULL;
34058d6ea30SMatthew Wilcox 
34158d6ea30SMatthew Wilcox 	if (node) {
34258d6ea30SMatthew Wilcox 		xas->xa_alloc = NULL;
34358d6ea30SMatthew Wilcox 	} else {
34458d6ea30SMatthew Wilcox 		node = kmem_cache_alloc(radix_tree_node_cachep,
34558d6ea30SMatthew Wilcox 					GFP_NOWAIT | __GFP_NOWARN);
34658d6ea30SMatthew Wilcox 		if (!node) {
34758d6ea30SMatthew Wilcox 			xas_set_err(xas, -ENOMEM);
34858d6ea30SMatthew Wilcox 			return NULL;
34958d6ea30SMatthew Wilcox 		}
35058d6ea30SMatthew Wilcox 	}
35158d6ea30SMatthew Wilcox 
35258d6ea30SMatthew Wilcox 	if (parent) {
35358d6ea30SMatthew Wilcox 		node->offset = xas->xa_offset;
35458d6ea30SMatthew Wilcox 		parent->count++;
35558d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
35658d6ea30SMatthew Wilcox 		xas_update(xas, parent);
35758d6ea30SMatthew Wilcox 	}
35858d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
35958d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
36058d6ea30SMatthew Wilcox 	node->shift = shift;
36158d6ea30SMatthew Wilcox 	node->count = 0;
36258d6ea30SMatthew Wilcox 	node->nr_values = 0;
36358d6ea30SMatthew Wilcox 	RCU_INIT_POINTER(node->parent, xas->xa_node);
36458d6ea30SMatthew Wilcox 	node->array = xas->xa;
36558d6ea30SMatthew Wilcox 
36658d6ea30SMatthew Wilcox 	return node;
36758d6ea30SMatthew Wilcox }
36858d6ea30SMatthew Wilcox 
36958d6ea30SMatthew Wilcox /*
37058d6ea30SMatthew Wilcox  * Use this to calculate the maximum index that will need to be created
37158d6ea30SMatthew Wilcox  * in order to add the entry described by @xas.  Because we cannot store a
37258d6ea30SMatthew Wilcox  * multiple-index entry at index 0, the calculation is a little more complex
37358d6ea30SMatthew Wilcox  * than you might expect.
37458d6ea30SMatthew Wilcox  */
37558d6ea30SMatthew Wilcox static unsigned long xas_max(struct xa_state *xas)
37658d6ea30SMatthew Wilcox {
37758d6ea30SMatthew Wilcox 	unsigned long max = xas->xa_index;
37858d6ea30SMatthew Wilcox 
37958d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
38058d6ea30SMatthew Wilcox 	if (xas->xa_shift || xas->xa_sibs) {
38158d6ea30SMatthew Wilcox 		unsigned long mask;
38258d6ea30SMatthew Wilcox 		mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
38358d6ea30SMatthew Wilcox 		max |= mask;
38458d6ea30SMatthew Wilcox 		if (mask == max)
38558d6ea30SMatthew Wilcox 			max++;
38658d6ea30SMatthew Wilcox 	}
38758d6ea30SMatthew Wilcox #endif
38858d6ea30SMatthew Wilcox 
38958d6ea30SMatthew Wilcox 	return max;
39058d6ea30SMatthew Wilcox }
39158d6ea30SMatthew Wilcox 
39258d6ea30SMatthew Wilcox /* The maximum index that can be contained in the array without expanding it */
39358d6ea30SMatthew Wilcox static unsigned long max_index(void *entry)
39458d6ea30SMatthew Wilcox {
39558d6ea30SMatthew Wilcox 	if (!xa_is_node(entry))
39658d6ea30SMatthew Wilcox 		return 0;
39758d6ea30SMatthew Wilcox 	return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
39858d6ea30SMatthew Wilcox }
39958d6ea30SMatthew Wilcox 
40058d6ea30SMatthew Wilcox static void xas_shrink(struct xa_state *xas)
40158d6ea30SMatthew Wilcox {
40258d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
40358d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
40458d6ea30SMatthew Wilcox 
40558d6ea30SMatthew Wilcox 	for (;;) {
40658d6ea30SMatthew Wilcox 		void *entry;
40758d6ea30SMatthew Wilcox 
40858d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
40958d6ea30SMatthew Wilcox 		if (node->count != 1)
41058d6ea30SMatthew Wilcox 			break;
41158d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, 0);
41258d6ea30SMatthew Wilcox 		if (!entry)
41358d6ea30SMatthew Wilcox 			break;
41458d6ea30SMatthew Wilcox 		if (!xa_is_node(entry) && node->shift)
41558d6ea30SMatthew Wilcox 			break;
41658d6ea30SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
41758d6ea30SMatthew Wilcox 
41858d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(xa->xa_head, entry);
41958d6ea30SMatthew Wilcox 
42058d6ea30SMatthew Wilcox 		node->count = 0;
42158d6ea30SMatthew Wilcox 		node->nr_values = 0;
42258d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
42358d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
42458d6ea30SMatthew Wilcox 		xas_update(xas, node);
42558d6ea30SMatthew Wilcox 		xa_node_free(node);
42658d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
42758d6ea30SMatthew Wilcox 			break;
42858d6ea30SMatthew Wilcox 		node = xa_to_node(entry);
42958d6ea30SMatthew Wilcox 		node->parent = NULL;
43058d6ea30SMatthew Wilcox 	}
43158d6ea30SMatthew Wilcox }
43258d6ea30SMatthew Wilcox 
43358d6ea30SMatthew Wilcox /*
43458d6ea30SMatthew Wilcox  * xas_delete_node() - Attempt to delete an xa_node
43558d6ea30SMatthew Wilcox  * @xas: Array operation state.
43658d6ea30SMatthew Wilcox  *
43758d6ea30SMatthew Wilcox  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
43858d6ea30SMatthew Wilcox  * a non-zero reference count.
43958d6ea30SMatthew Wilcox  */
44058d6ea30SMatthew Wilcox static void xas_delete_node(struct xa_state *xas)
44158d6ea30SMatthew Wilcox {
44258d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
44358d6ea30SMatthew Wilcox 
44458d6ea30SMatthew Wilcox 	for (;;) {
44558d6ea30SMatthew Wilcox 		struct xa_node *parent;
44658d6ea30SMatthew Wilcox 
44758d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
44858d6ea30SMatthew Wilcox 		if (node->count)
44958d6ea30SMatthew Wilcox 			break;
45058d6ea30SMatthew Wilcox 
45158d6ea30SMatthew Wilcox 		parent = xa_parent_locked(xas->xa, node);
45258d6ea30SMatthew Wilcox 		xas->xa_node = parent;
45358d6ea30SMatthew Wilcox 		xas->xa_offset = node->offset;
45458d6ea30SMatthew Wilcox 		xa_node_free(node);
45558d6ea30SMatthew Wilcox 
45658d6ea30SMatthew Wilcox 		if (!parent) {
45758d6ea30SMatthew Wilcox 			xas->xa->xa_head = NULL;
45858d6ea30SMatthew Wilcox 			xas->xa_node = XAS_BOUNDS;
45958d6ea30SMatthew Wilcox 			return;
46058d6ea30SMatthew Wilcox 		}
46158d6ea30SMatthew Wilcox 
46258d6ea30SMatthew Wilcox 		parent->slots[xas->xa_offset] = NULL;
46358d6ea30SMatthew Wilcox 		parent->count--;
46458d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
46558d6ea30SMatthew Wilcox 		node = parent;
46658d6ea30SMatthew Wilcox 		xas_update(xas, node);
46758d6ea30SMatthew Wilcox 	}
46858d6ea30SMatthew Wilcox 
46958d6ea30SMatthew Wilcox 	if (!node->parent)
47058d6ea30SMatthew Wilcox 		xas_shrink(xas);
47158d6ea30SMatthew Wilcox }
47258d6ea30SMatthew Wilcox 
47358d6ea30SMatthew Wilcox /**
47458d6ea30SMatthew Wilcox  * xas_free_nodes() - Free this node and all nodes that it references
47558d6ea30SMatthew Wilcox  * @xas: Array operation state.
47658d6ea30SMatthew Wilcox  * @top: Node to free
47758d6ea30SMatthew Wilcox  *
47858d6ea30SMatthew Wilcox  * This node has been removed from the tree.  We must now free it and all
47958d6ea30SMatthew Wilcox  * of its subnodes.  There may be RCU walkers with references into the tree,
48058d6ea30SMatthew Wilcox  * so we must replace all entries with retry markers.
48158d6ea30SMatthew Wilcox  */
48258d6ea30SMatthew Wilcox static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
48358d6ea30SMatthew Wilcox {
48458d6ea30SMatthew Wilcox 	unsigned int offset = 0;
48558d6ea30SMatthew Wilcox 	struct xa_node *node = top;
48658d6ea30SMatthew Wilcox 
48758d6ea30SMatthew Wilcox 	for (;;) {
48858d6ea30SMatthew Wilcox 		void *entry = xa_entry_locked(xas->xa, node, offset);
48958d6ea30SMatthew Wilcox 
49058d6ea30SMatthew Wilcox 		if (xa_is_node(entry)) {
49158d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
49258d6ea30SMatthew Wilcox 			offset = 0;
49358d6ea30SMatthew Wilcox 			continue;
49458d6ea30SMatthew Wilcox 		}
49558d6ea30SMatthew Wilcox 		if (entry)
49658d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
49758d6ea30SMatthew Wilcox 		offset++;
49858d6ea30SMatthew Wilcox 		while (offset == XA_CHUNK_SIZE) {
49958d6ea30SMatthew Wilcox 			struct xa_node *parent;
50058d6ea30SMatthew Wilcox 
50158d6ea30SMatthew Wilcox 			parent = xa_parent_locked(xas->xa, node);
50258d6ea30SMatthew Wilcox 			offset = node->offset + 1;
50358d6ea30SMatthew Wilcox 			node->count = 0;
50458d6ea30SMatthew Wilcox 			node->nr_values = 0;
50558d6ea30SMatthew Wilcox 			xas_update(xas, node);
50658d6ea30SMatthew Wilcox 			xa_node_free(node);
50758d6ea30SMatthew Wilcox 			if (node == top)
50858d6ea30SMatthew Wilcox 				return;
50958d6ea30SMatthew Wilcox 			node = parent;
51058d6ea30SMatthew Wilcox 		}
51158d6ea30SMatthew Wilcox 	}
51258d6ea30SMatthew Wilcox }
51358d6ea30SMatthew Wilcox 
51458d6ea30SMatthew Wilcox /*
51558d6ea30SMatthew Wilcox  * xas_expand adds nodes to the head of the tree until it has reached
51658d6ea30SMatthew Wilcox  * sufficient height to be able to contain @xas->xa_index
51758d6ea30SMatthew Wilcox  */
51858d6ea30SMatthew Wilcox static int xas_expand(struct xa_state *xas, void *head)
51958d6ea30SMatthew Wilcox {
52058d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
52158d6ea30SMatthew Wilcox 	struct xa_node *node = NULL;
52258d6ea30SMatthew Wilcox 	unsigned int shift = 0;
52358d6ea30SMatthew Wilcox 	unsigned long max = xas_max(xas);
52458d6ea30SMatthew Wilcox 
52558d6ea30SMatthew Wilcox 	if (!head) {
52658d6ea30SMatthew Wilcox 		if (max == 0)
52758d6ea30SMatthew Wilcox 			return 0;
52858d6ea30SMatthew Wilcox 		while ((max >> shift) >= XA_CHUNK_SIZE)
52958d6ea30SMatthew Wilcox 			shift += XA_CHUNK_SHIFT;
53058d6ea30SMatthew Wilcox 		return shift + XA_CHUNK_SHIFT;
53158d6ea30SMatthew Wilcox 	} else if (xa_is_node(head)) {
53258d6ea30SMatthew Wilcox 		node = xa_to_node(head);
53358d6ea30SMatthew Wilcox 		shift = node->shift + XA_CHUNK_SHIFT;
53458d6ea30SMatthew Wilcox 	}
53558d6ea30SMatthew Wilcox 	xas->xa_node = NULL;
53658d6ea30SMatthew Wilcox 
53758d6ea30SMatthew Wilcox 	while (max > max_index(head)) {
53858d6ea30SMatthew Wilcox 		xa_mark_t mark = 0;
53958d6ea30SMatthew Wilcox 
54058d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
54158d6ea30SMatthew Wilcox 		node = xas_alloc(xas, shift);
54258d6ea30SMatthew Wilcox 		if (!node)
54358d6ea30SMatthew Wilcox 			return -ENOMEM;
54458d6ea30SMatthew Wilcox 
54558d6ea30SMatthew Wilcox 		node->count = 1;
54658d6ea30SMatthew Wilcox 		if (xa_is_value(head))
54758d6ea30SMatthew Wilcox 			node->nr_values = 1;
54858d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(node->slots[0], head);
54958d6ea30SMatthew Wilcox 
55058d6ea30SMatthew Wilcox 		/* Propagate the aggregated mark info to the new child */
55158d6ea30SMatthew Wilcox 		for (;;) {
55258d6ea30SMatthew Wilcox 			if (xa_marked(xa, mark))
55358d6ea30SMatthew Wilcox 				node_set_mark(node, 0, mark);
55458d6ea30SMatthew Wilcox 			if (mark == XA_MARK_MAX)
55558d6ea30SMatthew Wilcox 				break;
55658d6ea30SMatthew Wilcox 			mark_inc(mark);
55758d6ea30SMatthew Wilcox 		}
55858d6ea30SMatthew Wilcox 
55958d6ea30SMatthew Wilcox 		/*
56058d6ea30SMatthew Wilcox 		 * Now that the new node is fully initialised, we can add
56158d6ea30SMatthew Wilcox 		 * it to the tree
56258d6ea30SMatthew Wilcox 		 */
56358d6ea30SMatthew Wilcox 		if (xa_is_node(head)) {
56458d6ea30SMatthew Wilcox 			xa_to_node(head)->offset = 0;
56558d6ea30SMatthew Wilcox 			rcu_assign_pointer(xa_to_node(head)->parent, node);
56658d6ea30SMatthew Wilcox 		}
56758d6ea30SMatthew Wilcox 		head = xa_mk_node(node);
56858d6ea30SMatthew Wilcox 		rcu_assign_pointer(xa->xa_head, head);
56958d6ea30SMatthew Wilcox 		xas_update(xas, node);
57058d6ea30SMatthew Wilcox 
57158d6ea30SMatthew Wilcox 		shift += XA_CHUNK_SHIFT;
57258d6ea30SMatthew Wilcox 	}
57358d6ea30SMatthew Wilcox 
57458d6ea30SMatthew Wilcox 	xas->xa_node = node;
57558d6ea30SMatthew Wilcox 	return shift;
57658d6ea30SMatthew Wilcox }
57758d6ea30SMatthew Wilcox 
57858d6ea30SMatthew Wilcox /*
57958d6ea30SMatthew Wilcox  * xas_create() - Create a slot to store an entry in.
58058d6ea30SMatthew Wilcox  * @xas: XArray operation state.
58158d6ea30SMatthew Wilcox  *
58258d6ea30SMatthew Wilcox  * Most users will not need to call this function directly, as it is called
58358d6ea30SMatthew Wilcox  * by xas_store().  It is useful for doing conditional store operations
58458d6ea30SMatthew Wilcox  * (see the xa_cmpxchg() implementation for an example).
58558d6ea30SMatthew Wilcox  *
58658d6ea30SMatthew Wilcox  * Return: If the slot already existed, returns the contents of this slot.
58758d6ea30SMatthew Wilcox  * If the slot was newly created, returns NULL.  If it failed to create the
58858d6ea30SMatthew Wilcox  * slot, returns NULL and indicates the error in @xas.
58958d6ea30SMatthew Wilcox  */
59058d6ea30SMatthew Wilcox static void *xas_create(struct xa_state *xas)
59158d6ea30SMatthew Wilcox {
59258d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
59358d6ea30SMatthew Wilcox 	void *entry;
59458d6ea30SMatthew Wilcox 	void __rcu **slot;
59558d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
59658d6ea30SMatthew Wilcox 	int shift;
59758d6ea30SMatthew Wilcox 	unsigned int order = xas->xa_shift;
59858d6ea30SMatthew Wilcox 
59958d6ea30SMatthew Wilcox 	if (xas_top(node)) {
60058d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
60158d6ea30SMatthew Wilcox 		xas->xa_node = NULL;
60258d6ea30SMatthew Wilcox 		shift = xas_expand(xas, entry);
60358d6ea30SMatthew Wilcox 		if (shift < 0)
60458d6ea30SMatthew Wilcox 			return NULL;
60558d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
60658d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
60758d6ea30SMatthew Wilcox 	} else if (xas_error(xas)) {
60858d6ea30SMatthew Wilcox 		return NULL;
60958d6ea30SMatthew Wilcox 	} else if (node) {
61058d6ea30SMatthew Wilcox 		unsigned int offset = xas->xa_offset;
61158d6ea30SMatthew Wilcox 
61258d6ea30SMatthew Wilcox 		shift = node->shift;
61358d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, offset);
61458d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
61558d6ea30SMatthew Wilcox 	} else {
61658d6ea30SMatthew Wilcox 		shift = 0;
61758d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
61858d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
61958d6ea30SMatthew Wilcox 	}
62058d6ea30SMatthew Wilcox 
62158d6ea30SMatthew Wilcox 	while (shift > order) {
62258d6ea30SMatthew Wilcox 		shift -= XA_CHUNK_SHIFT;
62358d6ea30SMatthew Wilcox 		if (!entry) {
62458d6ea30SMatthew Wilcox 			node = xas_alloc(xas, shift);
62558d6ea30SMatthew Wilcox 			if (!node)
62658d6ea30SMatthew Wilcox 				break;
62758d6ea30SMatthew Wilcox 			rcu_assign_pointer(*slot, xa_mk_node(node));
62858d6ea30SMatthew Wilcox 		} else if (xa_is_node(entry)) {
62958d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
63058d6ea30SMatthew Wilcox 		} else {
63158d6ea30SMatthew Wilcox 			break;
63258d6ea30SMatthew Wilcox 		}
63358d6ea30SMatthew Wilcox 		entry = xas_descend(xas, node);
63458d6ea30SMatthew Wilcox 		slot = &node->slots[xas->xa_offset];
63558d6ea30SMatthew Wilcox 	}
63658d6ea30SMatthew Wilcox 
63758d6ea30SMatthew Wilcox 	return entry;
63858d6ea30SMatthew Wilcox }
63958d6ea30SMatthew Wilcox 
64058d6ea30SMatthew Wilcox static void update_node(struct xa_state *xas, struct xa_node *node,
64158d6ea30SMatthew Wilcox 		int count, int values)
64258d6ea30SMatthew Wilcox {
64358d6ea30SMatthew Wilcox 	if (!node || (!count && !values))
64458d6ea30SMatthew Wilcox 		return;
64558d6ea30SMatthew Wilcox 
64658d6ea30SMatthew Wilcox 	node->count += count;
64758d6ea30SMatthew Wilcox 	node->nr_values += values;
64858d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
64958d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
65058d6ea30SMatthew Wilcox 	xas_update(xas, node);
65158d6ea30SMatthew Wilcox 	if (count < 0)
65258d6ea30SMatthew Wilcox 		xas_delete_node(xas);
65358d6ea30SMatthew Wilcox }
65458d6ea30SMatthew Wilcox 
65558d6ea30SMatthew Wilcox /**
65658d6ea30SMatthew Wilcox  * xas_store() - Store this entry in the XArray.
65758d6ea30SMatthew Wilcox  * @xas: XArray operation state.
65858d6ea30SMatthew Wilcox  * @entry: New entry.
65958d6ea30SMatthew Wilcox  *
66058d6ea30SMatthew Wilcox  * If @xas is operating on a multi-index entry, the entry returned by this
66158d6ea30SMatthew Wilcox  * function is essentially meaningless (it may be an internal entry or it
66258d6ea30SMatthew Wilcox  * may be %NULL, even if there are non-NULL entries at some of the indices
66358d6ea30SMatthew Wilcox  * covered by the range).  This is not a problem for any current users,
66458d6ea30SMatthew Wilcox  * and can be changed if needed.
66558d6ea30SMatthew Wilcox  *
66658d6ea30SMatthew Wilcox  * Return: The old entry at this index.
66758d6ea30SMatthew Wilcox  */
66858d6ea30SMatthew Wilcox void *xas_store(struct xa_state *xas, void *entry)
66958d6ea30SMatthew Wilcox {
67058d6ea30SMatthew Wilcox 	struct xa_node *node;
67158d6ea30SMatthew Wilcox 	void __rcu **slot = &xas->xa->xa_head;
67258d6ea30SMatthew Wilcox 	unsigned int offset, max;
67358d6ea30SMatthew Wilcox 	int count = 0;
67458d6ea30SMatthew Wilcox 	int values = 0;
67558d6ea30SMatthew Wilcox 	void *first, *next;
67658d6ea30SMatthew Wilcox 	bool value = xa_is_value(entry);
67758d6ea30SMatthew Wilcox 
67858d6ea30SMatthew Wilcox 	if (entry)
67958d6ea30SMatthew Wilcox 		first = xas_create(xas);
68058d6ea30SMatthew Wilcox 	else
68158d6ea30SMatthew Wilcox 		first = xas_load(xas);
68258d6ea30SMatthew Wilcox 
68358d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
68458d6ea30SMatthew Wilcox 		return first;
68558d6ea30SMatthew Wilcox 	node = xas->xa_node;
68658d6ea30SMatthew Wilcox 	if (node && (xas->xa_shift < node->shift))
68758d6ea30SMatthew Wilcox 		xas->xa_sibs = 0;
68858d6ea30SMatthew Wilcox 	if ((first == entry) && !xas->xa_sibs)
68958d6ea30SMatthew Wilcox 		return first;
69058d6ea30SMatthew Wilcox 
69158d6ea30SMatthew Wilcox 	next = first;
69258d6ea30SMatthew Wilcox 	offset = xas->xa_offset;
69358d6ea30SMatthew Wilcox 	max = xas->xa_offset + xas->xa_sibs;
69458d6ea30SMatthew Wilcox 	if (node) {
69558d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
69658d6ea30SMatthew Wilcox 		if (xas->xa_sibs)
69758d6ea30SMatthew Wilcox 			xas_squash_marks(xas);
69858d6ea30SMatthew Wilcox 	}
69958d6ea30SMatthew Wilcox 	if (!entry)
70058d6ea30SMatthew Wilcox 		xas_init_marks(xas);
70158d6ea30SMatthew Wilcox 
70258d6ea30SMatthew Wilcox 	for (;;) {
70358d6ea30SMatthew Wilcox 		/*
70458d6ea30SMatthew Wilcox 		 * Must clear the marks before setting the entry to NULL,
70558d6ea30SMatthew Wilcox 		 * otherwise xas_for_each_marked may find a NULL entry and
70658d6ea30SMatthew Wilcox 		 * stop early.  rcu_assign_pointer contains a release barrier
70758d6ea30SMatthew Wilcox 		 * so the mark clearing will appear to happen before the
70858d6ea30SMatthew Wilcox 		 * entry is set to NULL.
70958d6ea30SMatthew Wilcox 		 */
71058d6ea30SMatthew Wilcox 		rcu_assign_pointer(*slot, entry);
71158d6ea30SMatthew Wilcox 		if (xa_is_node(next))
71258d6ea30SMatthew Wilcox 			xas_free_nodes(xas, xa_to_node(next));
71358d6ea30SMatthew Wilcox 		if (!node)
71458d6ea30SMatthew Wilcox 			break;
71558d6ea30SMatthew Wilcox 		count += !next - !entry;
71658d6ea30SMatthew Wilcox 		values += !xa_is_value(first) - !value;
71758d6ea30SMatthew Wilcox 		if (entry) {
71858d6ea30SMatthew Wilcox 			if (offset == max)
71958d6ea30SMatthew Wilcox 				break;
72058d6ea30SMatthew Wilcox 			if (!xa_is_sibling(entry))
72158d6ea30SMatthew Wilcox 				entry = xa_mk_sibling(xas->xa_offset);
72258d6ea30SMatthew Wilcox 		} else {
72358d6ea30SMatthew Wilcox 			if (offset == XA_CHUNK_MASK)
72458d6ea30SMatthew Wilcox 				break;
72558d6ea30SMatthew Wilcox 		}
72658d6ea30SMatthew Wilcox 		next = xa_entry_locked(xas->xa, node, ++offset);
72758d6ea30SMatthew Wilcox 		if (!xa_is_sibling(next)) {
72858d6ea30SMatthew Wilcox 			if (!entry && (offset > max))
72958d6ea30SMatthew Wilcox 				break;
73058d6ea30SMatthew Wilcox 			first = next;
73158d6ea30SMatthew Wilcox 		}
73258d6ea30SMatthew Wilcox 		slot++;
73358d6ea30SMatthew Wilcox 	}
73458d6ea30SMatthew Wilcox 
73558d6ea30SMatthew Wilcox 	update_node(xas, node, count, values);
73658d6ea30SMatthew Wilcox 	return first;
73758d6ea30SMatthew Wilcox }
73858d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_store);
73958d6ea30SMatthew Wilcox 
740f8d5d0ccSMatthew Wilcox /**
7419b89a035SMatthew Wilcox  * xas_get_mark() - Returns the state of this mark.
7429b89a035SMatthew Wilcox  * @xas: XArray operation state.
7439b89a035SMatthew Wilcox  * @mark: Mark number.
7449b89a035SMatthew Wilcox  *
7459b89a035SMatthew Wilcox  * Return: true if the mark is set, false if the mark is clear or @xas
7469b89a035SMatthew Wilcox  * is in an error state.
7479b89a035SMatthew Wilcox  */
7489b89a035SMatthew Wilcox bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
7499b89a035SMatthew Wilcox {
7509b89a035SMatthew Wilcox 	if (xas_invalid(xas))
7519b89a035SMatthew Wilcox 		return false;
7529b89a035SMatthew Wilcox 	if (!xas->xa_node)
7539b89a035SMatthew Wilcox 		return xa_marked(xas->xa, mark);
7549b89a035SMatthew Wilcox 	return node_get_mark(xas->xa_node, xas->xa_offset, mark);
7559b89a035SMatthew Wilcox }
7569b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_get_mark);
7579b89a035SMatthew Wilcox 
7589b89a035SMatthew Wilcox /**
7599b89a035SMatthew Wilcox  * xas_set_mark() - Sets the mark on this entry and its parents.
7609b89a035SMatthew Wilcox  * @xas: XArray operation state.
7619b89a035SMatthew Wilcox  * @mark: Mark number.
7629b89a035SMatthew Wilcox  *
7639b89a035SMatthew Wilcox  * Sets the specified mark on this entry, and walks up the tree setting it
7649b89a035SMatthew Wilcox  * on all the ancestor entries.  Does nothing if @xas has not been walked to
7659b89a035SMatthew Wilcox  * an entry, or is in an error state.
7669b89a035SMatthew Wilcox  */
7679b89a035SMatthew Wilcox void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
7689b89a035SMatthew Wilcox {
7699b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
7709b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
7719b89a035SMatthew Wilcox 
7729b89a035SMatthew Wilcox 	if (xas_invalid(xas))
7739b89a035SMatthew Wilcox 		return;
7749b89a035SMatthew Wilcox 
7759b89a035SMatthew Wilcox 	while (node) {
7769b89a035SMatthew Wilcox 		if (node_set_mark(node, offset, mark))
7779b89a035SMatthew Wilcox 			return;
7789b89a035SMatthew Wilcox 		offset = node->offset;
7799b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
7809b89a035SMatthew Wilcox 	}
7819b89a035SMatthew Wilcox 
7829b89a035SMatthew Wilcox 	if (!xa_marked(xas->xa, mark))
7839b89a035SMatthew Wilcox 		xa_mark_set(xas->xa, mark);
7849b89a035SMatthew Wilcox }
7859b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_set_mark);
7869b89a035SMatthew Wilcox 
7879b89a035SMatthew Wilcox /**
7889b89a035SMatthew Wilcox  * xas_clear_mark() - Clears the mark on this entry and its parents.
7899b89a035SMatthew Wilcox  * @xas: XArray operation state.
7909b89a035SMatthew Wilcox  * @mark: Mark number.
7919b89a035SMatthew Wilcox  *
7929b89a035SMatthew Wilcox  * Clears the specified mark on this entry, and walks back to the head
7939b89a035SMatthew Wilcox  * attempting to clear it on all the ancestor entries.  Does nothing if
7949b89a035SMatthew Wilcox  * @xas has not been walked to an entry, or is in an error state.
7959b89a035SMatthew Wilcox  */
7969b89a035SMatthew Wilcox void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
7979b89a035SMatthew Wilcox {
7989b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
7999b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
8009b89a035SMatthew Wilcox 
8019b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8029b89a035SMatthew Wilcox 		return;
8039b89a035SMatthew Wilcox 
8049b89a035SMatthew Wilcox 	while (node) {
8059b89a035SMatthew Wilcox 		if (!node_clear_mark(node, offset, mark))
8069b89a035SMatthew Wilcox 			return;
8079b89a035SMatthew Wilcox 		if (node_any_mark(node, mark))
8089b89a035SMatthew Wilcox 			return;
8099b89a035SMatthew Wilcox 
8109b89a035SMatthew Wilcox 		offset = node->offset;
8119b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
8129b89a035SMatthew Wilcox 	}
8139b89a035SMatthew Wilcox 
8149b89a035SMatthew Wilcox 	if (xa_marked(xas->xa, mark))
8159b89a035SMatthew Wilcox 		xa_mark_clear(xas->xa, mark);
8169b89a035SMatthew Wilcox }
8179b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_clear_mark);
8189b89a035SMatthew Wilcox 
8199b89a035SMatthew Wilcox /**
82058d6ea30SMatthew Wilcox  * xas_init_marks() - Initialise all marks for the entry
82158d6ea30SMatthew Wilcox  * @xas: Array operations state.
82258d6ea30SMatthew Wilcox  *
82358d6ea30SMatthew Wilcox  * Initialise all marks for the entry specified by @xas.  If we're tracking
82458d6ea30SMatthew Wilcox  * free entries with a mark, we need to set it on all entries.  All other
82558d6ea30SMatthew Wilcox  * marks are cleared.
82658d6ea30SMatthew Wilcox  *
82758d6ea30SMatthew Wilcox  * This implementation is not as efficient as it could be; we may walk
82858d6ea30SMatthew Wilcox  * up the tree multiple times.
82958d6ea30SMatthew Wilcox  */
83058d6ea30SMatthew Wilcox void xas_init_marks(const struct xa_state *xas)
83158d6ea30SMatthew Wilcox {
83258d6ea30SMatthew Wilcox 	xa_mark_t mark = 0;
83358d6ea30SMatthew Wilcox 
83458d6ea30SMatthew Wilcox 	for (;;) {
83558d6ea30SMatthew Wilcox 		xas_clear_mark(xas, mark);
83658d6ea30SMatthew Wilcox 		if (mark == XA_MARK_MAX)
83758d6ea30SMatthew Wilcox 			break;
83858d6ea30SMatthew Wilcox 		mark_inc(mark);
83958d6ea30SMatthew Wilcox 	}
84058d6ea30SMatthew Wilcox }
84158d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_init_marks);
84258d6ea30SMatthew Wilcox 
84358d6ea30SMatthew Wilcox /**
844b803b428SMatthew Wilcox  * xas_pause() - Pause a walk to drop a lock.
845b803b428SMatthew Wilcox  * @xas: XArray operation state.
846b803b428SMatthew Wilcox  *
847b803b428SMatthew Wilcox  * Some users need to pause a walk and drop the lock they're holding in
848b803b428SMatthew Wilcox  * order to yield to a higher priority thread or carry out an operation
849b803b428SMatthew Wilcox  * on an entry.  Those users should call this function before they drop
850b803b428SMatthew Wilcox  * the lock.  It resets the @xas to be suitable for the next iteration
851b803b428SMatthew Wilcox  * of the loop after the user has reacquired the lock.  If most entries
852b803b428SMatthew Wilcox  * found during a walk require you to call xas_pause(), the xa_for_each()
853b803b428SMatthew Wilcox  * iterator may be more appropriate.
854b803b428SMatthew Wilcox  *
855b803b428SMatthew Wilcox  * Note that xas_pause() only works for forward iteration.  If a user needs
856b803b428SMatthew Wilcox  * to pause a reverse iteration, we will need a xas_pause_rev().
857b803b428SMatthew Wilcox  */
858b803b428SMatthew Wilcox void xas_pause(struct xa_state *xas)
859b803b428SMatthew Wilcox {
860b803b428SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
861b803b428SMatthew Wilcox 
862b803b428SMatthew Wilcox 	if (xas_invalid(xas))
863b803b428SMatthew Wilcox 		return;
864b803b428SMatthew Wilcox 
865b803b428SMatthew Wilcox 	if (node) {
866b803b428SMatthew Wilcox 		unsigned int offset = xas->xa_offset;
867b803b428SMatthew Wilcox 		while (++offset < XA_CHUNK_SIZE) {
868b803b428SMatthew Wilcox 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
869b803b428SMatthew Wilcox 				break;
870b803b428SMatthew Wilcox 		}
871b803b428SMatthew Wilcox 		xas->xa_index += (offset - xas->xa_offset) << node->shift;
872b803b428SMatthew Wilcox 	} else {
873b803b428SMatthew Wilcox 		xas->xa_index++;
874b803b428SMatthew Wilcox 	}
875b803b428SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
876b803b428SMatthew Wilcox }
877b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_pause);
878b803b428SMatthew Wilcox 
87964d3e9a9SMatthew Wilcox /*
88064d3e9a9SMatthew Wilcox  * __xas_prev() - Find the previous entry in the XArray.
88164d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
88264d3e9a9SMatthew Wilcox  *
88364d3e9a9SMatthew Wilcox  * Helper function for xas_prev() which handles all the complex cases
88464d3e9a9SMatthew Wilcox  * out of line.
88564d3e9a9SMatthew Wilcox  */
88664d3e9a9SMatthew Wilcox void *__xas_prev(struct xa_state *xas)
88764d3e9a9SMatthew Wilcox {
88864d3e9a9SMatthew Wilcox 	void *entry;
88964d3e9a9SMatthew Wilcox 
89064d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
89164d3e9a9SMatthew Wilcox 		xas->xa_index--;
89264d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
89364d3e9a9SMatthew Wilcox 		return xas_load(xas);
89464d3e9a9SMatthew Wilcox 
89564d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
89664d3e9a9SMatthew Wilcox 		xas->xa_offset--;
89764d3e9a9SMatthew Wilcox 
89864d3e9a9SMatthew Wilcox 	while (xas->xa_offset == 255) {
89964d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset - 1;
90064d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
90164d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
90264d3e9a9SMatthew Wilcox 			return set_bounds(xas);
90364d3e9a9SMatthew Wilcox 	}
90464d3e9a9SMatthew Wilcox 
90564d3e9a9SMatthew Wilcox 	for (;;) {
90664d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
90764d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
90864d3e9a9SMatthew Wilcox 			return entry;
90964d3e9a9SMatthew Wilcox 
91064d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
91164d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
91264d3e9a9SMatthew Wilcox 	}
91364d3e9a9SMatthew Wilcox }
91464d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_prev);
91564d3e9a9SMatthew Wilcox 
91664d3e9a9SMatthew Wilcox /*
91764d3e9a9SMatthew Wilcox  * __xas_next() - Find the next entry in the XArray.
91864d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
91964d3e9a9SMatthew Wilcox  *
92064d3e9a9SMatthew Wilcox  * Helper function for xas_next() which handles all the complex cases
92164d3e9a9SMatthew Wilcox  * out of line.
92264d3e9a9SMatthew Wilcox  */
92364d3e9a9SMatthew Wilcox void *__xas_next(struct xa_state *xas)
92464d3e9a9SMatthew Wilcox {
92564d3e9a9SMatthew Wilcox 	void *entry;
92664d3e9a9SMatthew Wilcox 
92764d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
92864d3e9a9SMatthew Wilcox 		xas->xa_index++;
92964d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
93064d3e9a9SMatthew Wilcox 		return xas_load(xas);
93164d3e9a9SMatthew Wilcox 
93264d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
93364d3e9a9SMatthew Wilcox 		xas->xa_offset++;
93464d3e9a9SMatthew Wilcox 
93564d3e9a9SMatthew Wilcox 	while (xas->xa_offset == XA_CHUNK_SIZE) {
93664d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset + 1;
93764d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
93864d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
93964d3e9a9SMatthew Wilcox 			return set_bounds(xas);
94064d3e9a9SMatthew Wilcox 	}
94164d3e9a9SMatthew Wilcox 
94264d3e9a9SMatthew Wilcox 	for (;;) {
94364d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
94464d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
94564d3e9a9SMatthew Wilcox 			return entry;
94664d3e9a9SMatthew Wilcox 
94764d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
94864d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
94964d3e9a9SMatthew Wilcox 	}
95064d3e9a9SMatthew Wilcox }
95164d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_next);
95264d3e9a9SMatthew Wilcox 
953b803b428SMatthew Wilcox /**
954b803b428SMatthew Wilcox  * xas_find() - Find the next present entry in the XArray.
955b803b428SMatthew Wilcox  * @xas: XArray operation state.
956b803b428SMatthew Wilcox  * @max: Highest index to return.
957b803b428SMatthew Wilcox  *
958b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the entry
959b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
960b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we move to the
961b803b428SMatthew Wilcox  * next entry.
962b803b428SMatthew Wilcox  *
963b803b428SMatthew Wilcox  * If no entry is found and the array is smaller than @max, the iterator
964b803b428SMatthew Wilcox  * is set to the smallest index not yet in the array.  This allows @xas
965b803b428SMatthew Wilcox  * to be immediately passed to xas_store().
966b803b428SMatthew Wilcox  *
967b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
968b803b428SMatthew Wilcox  */
969b803b428SMatthew Wilcox void *xas_find(struct xa_state *xas, unsigned long max)
970b803b428SMatthew Wilcox {
971b803b428SMatthew Wilcox 	void *entry;
972b803b428SMatthew Wilcox 
973b803b428SMatthew Wilcox 	if (xas_error(xas))
974b803b428SMatthew Wilcox 		return NULL;
975b803b428SMatthew Wilcox 
976b803b428SMatthew Wilcox 	if (!xas->xa_node) {
977b803b428SMatthew Wilcox 		xas->xa_index = 1;
978b803b428SMatthew Wilcox 		return set_bounds(xas);
979b803b428SMatthew Wilcox 	} else if (xas_top(xas->xa_node)) {
980b803b428SMatthew Wilcox 		entry = xas_load(xas);
981b803b428SMatthew Wilcox 		if (entry || xas_not_node(xas->xa_node))
982b803b428SMatthew Wilcox 			return entry;
983b803b428SMatthew Wilcox 	} else if (!xas->xa_node->shift &&
984b803b428SMatthew Wilcox 		    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
985b803b428SMatthew Wilcox 		xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
986b803b428SMatthew Wilcox 	}
987b803b428SMatthew Wilcox 
988b803b428SMatthew Wilcox 	xas_advance(xas);
989b803b428SMatthew Wilcox 
990b803b428SMatthew Wilcox 	while (xas->xa_node && (xas->xa_index <= max)) {
991b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
992b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
993b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
994b803b428SMatthew Wilcox 			continue;
995b803b428SMatthew Wilcox 		}
996b803b428SMatthew Wilcox 
997b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
998b803b428SMatthew Wilcox 		if (xa_is_node(entry)) {
999b803b428SMatthew Wilcox 			xas->xa_node = xa_to_node(entry);
1000b803b428SMatthew Wilcox 			xas->xa_offset = 0;
1001b803b428SMatthew Wilcox 			continue;
1002b803b428SMatthew Wilcox 		}
1003b803b428SMatthew Wilcox 		if (entry && !xa_is_sibling(entry))
1004b803b428SMatthew Wilcox 			return entry;
1005b803b428SMatthew Wilcox 
1006b803b428SMatthew Wilcox 		xas_advance(xas);
1007b803b428SMatthew Wilcox 	}
1008b803b428SMatthew Wilcox 
1009b803b428SMatthew Wilcox 	if (!xas->xa_node)
1010b803b428SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
1011b803b428SMatthew Wilcox 	return NULL;
1012b803b428SMatthew Wilcox }
1013b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find);
1014b803b428SMatthew Wilcox 
1015b803b428SMatthew Wilcox /**
1016b803b428SMatthew Wilcox  * xas_find_marked() - Find the next marked entry in the XArray.
1017b803b428SMatthew Wilcox  * @xas: XArray operation state.
1018b803b428SMatthew Wilcox  * @max: Highest index to return.
1019b803b428SMatthew Wilcox  * @mark: Mark number to search for.
1020b803b428SMatthew Wilcox  *
1021b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the marked entry
1022b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
1023b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we return the
1024b803b428SMatthew Wilcox  * first marked entry with an index > xas.xa_index.
1025b803b428SMatthew Wilcox  *
1026b803b428SMatthew Wilcox  * If no marked entry is found and the array is smaller than @max, @xas is
1027b803b428SMatthew Wilcox  * set to the bounds state and xas->xa_index is set to the smallest index
1028b803b428SMatthew Wilcox  * not yet in the array.  This allows @xas to be immediately passed to
1029b803b428SMatthew Wilcox  * xas_store().
1030b803b428SMatthew Wilcox  *
1031b803b428SMatthew Wilcox  * If no entry is found before @max is reached, @xas is set to the restart
1032b803b428SMatthew Wilcox  * state.
1033b803b428SMatthew Wilcox  *
1034b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1035b803b428SMatthew Wilcox  */
1036b803b428SMatthew Wilcox void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1037b803b428SMatthew Wilcox {
1038b803b428SMatthew Wilcox 	bool advance = true;
1039b803b428SMatthew Wilcox 	unsigned int offset;
1040b803b428SMatthew Wilcox 	void *entry;
1041b803b428SMatthew Wilcox 
1042b803b428SMatthew Wilcox 	if (xas_error(xas))
1043b803b428SMatthew Wilcox 		return NULL;
1044b803b428SMatthew Wilcox 
1045b803b428SMatthew Wilcox 	if (!xas->xa_node) {
1046b803b428SMatthew Wilcox 		xas->xa_index = 1;
1047b803b428SMatthew Wilcox 		goto out;
1048b803b428SMatthew Wilcox 	} else if (xas_top(xas->xa_node)) {
1049b803b428SMatthew Wilcox 		advance = false;
1050b803b428SMatthew Wilcox 		entry = xa_head(xas->xa);
1051b803b428SMatthew Wilcox 		xas->xa_node = NULL;
1052b803b428SMatthew Wilcox 		if (xas->xa_index > max_index(entry))
1053b803b428SMatthew Wilcox 			goto bounds;
1054b803b428SMatthew Wilcox 		if (!xa_is_node(entry)) {
1055b803b428SMatthew Wilcox 			if (xa_marked(xas->xa, mark))
1056b803b428SMatthew Wilcox 				return entry;
1057b803b428SMatthew Wilcox 			xas->xa_index = 1;
1058b803b428SMatthew Wilcox 			goto out;
1059b803b428SMatthew Wilcox 		}
1060b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1061b803b428SMatthew Wilcox 		xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1062b803b428SMatthew Wilcox 	}
1063b803b428SMatthew Wilcox 
1064b803b428SMatthew Wilcox 	while (xas->xa_index <= max) {
1065b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1066b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
1067b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1068b803b428SMatthew Wilcox 			if (!xas->xa_node)
1069b803b428SMatthew Wilcox 				break;
1070b803b428SMatthew Wilcox 			advance = false;
1071b803b428SMatthew Wilcox 			continue;
1072b803b428SMatthew Wilcox 		}
1073b803b428SMatthew Wilcox 
1074b803b428SMatthew Wilcox 		if (!advance) {
1075b803b428SMatthew Wilcox 			entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1076b803b428SMatthew Wilcox 			if (xa_is_sibling(entry)) {
1077b803b428SMatthew Wilcox 				xas->xa_offset = xa_to_sibling(entry);
1078b803b428SMatthew Wilcox 				xas_move_index(xas, xas->xa_offset);
1079b803b428SMatthew Wilcox 			}
1080b803b428SMatthew Wilcox 		}
1081b803b428SMatthew Wilcox 
1082b803b428SMatthew Wilcox 		offset = xas_find_chunk(xas, advance, mark);
1083b803b428SMatthew Wilcox 		if (offset > xas->xa_offset) {
1084b803b428SMatthew Wilcox 			advance = false;
1085b803b428SMatthew Wilcox 			xas_move_index(xas, offset);
1086b803b428SMatthew Wilcox 			/* Mind the wrap */
1087b803b428SMatthew Wilcox 			if ((xas->xa_index - 1) >= max)
1088b803b428SMatthew Wilcox 				goto max;
1089b803b428SMatthew Wilcox 			xas->xa_offset = offset;
1090b803b428SMatthew Wilcox 			if (offset == XA_CHUNK_SIZE)
1091b803b428SMatthew Wilcox 				continue;
1092b803b428SMatthew Wilcox 		}
1093b803b428SMatthew Wilcox 
1094b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1095b803b428SMatthew Wilcox 		if (!xa_is_node(entry))
1096b803b428SMatthew Wilcox 			return entry;
1097b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1098b803b428SMatthew Wilcox 		xas_set_offset(xas);
1099b803b428SMatthew Wilcox 	}
1100b803b428SMatthew Wilcox 
1101b803b428SMatthew Wilcox out:
1102b803b428SMatthew Wilcox 	if (!max)
1103b803b428SMatthew Wilcox 		goto max;
1104b803b428SMatthew Wilcox bounds:
1105b803b428SMatthew Wilcox 	xas->xa_node = XAS_BOUNDS;
1106b803b428SMatthew Wilcox 	return NULL;
1107b803b428SMatthew Wilcox max:
1108b803b428SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
1109b803b428SMatthew Wilcox 	return NULL;
1110b803b428SMatthew Wilcox }
1111b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_marked);
1112b803b428SMatthew Wilcox 
1113b803b428SMatthew Wilcox /**
1114*4e99d4e9SMatthew Wilcox  * xas_find_conflict() - Find the next present entry in a range.
1115*4e99d4e9SMatthew Wilcox  * @xas: XArray operation state.
1116*4e99d4e9SMatthew Wilcox  *
1117*4e99d4e9SMatthew Wilcox  * The @xas describes both a range and a position within that range.
1118*4e99d4e9SMatthew Wilcox  *
1119*4e99d4e9SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held.
1120*4e99d4e9SMatthew Wilcox  * Return: The next entry in the range covered by @xas or %NULL.
1121*4e99d4e9SMatthew Wilcox  */
1122*4e99d4e9SMatthew Wilcox void *xas_find_conflict(struct xa_state *xas)
1123*4e99d4e9SMatthew Wilcox {
1124*4e99d4e9SMatthew Wilcox 	void *curr;
1125*4e99d4e9SMatthew Wilcox 
1126*4e99d4e9SMatthew Wilcox 	if (xas_error(xas))
1127*4e99d4e9SMatthew Wilcox 		return NULL;
1128*4e99d4e9SMatthew Wilcox 
1129*4e99d4e9SMatthew Wilcox 	if (!xas->xa_node)
1130*4e99d4e9SMatthew Wilcox 		return NULL;
1131*4e99d4e9SMatthew Wilcox 
1132*4e99d4e9SMatthew Wilcox 	if (xas_top(xas->xa_node)) {
1133*4e99d4e9SMatthew Wilcox 		curr = xas_start(xas);
1134*4e99d4e9SMatthew Wilcox 		if (!curr)
1135*4e99d4e9SMatthew Wilcox 			return NULL;
1136*4e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
1137*4e99d4e9SMatthew Wilcox 			struct xa_node *node = xa_to_node(curr);
1138*4e99d4e9SMatthew Wilcox 			curr = xas_descend(xas, node);
1139*4e99d4e9SMatthew Wilcox 		}
1140*4e99d4e9SMatthew Wilcox 		if (curr)
1141*4e99d4e9SMatthew Wilcox 			return curr;
1142*4e99d4e9SMatthew Wilcox 	}
1143*4e99d4e9SMatthew Wilcox 
1144*4e99d4e9SMatthew Wilcox 	if (xas->xa_node->shift > xas->xa_shift)
1145*4e99d4e9SMatthew Wilcox 		return NULL;
1146*4e99d4e9SMatthew Wilcox 
1147*4e99d4e9SMatthew Wilcox 	for (;;) {
1148*4e99d4e9SMatthew Wilcox 		if (xas->xa_node->shift == xas->xa_shift) {
1149*4e99d4e9SMatthew Wilcox 			if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1150*4e99d4e9SMatthew Wilcox 				break;
1151*4e99d4e9SMatthew Wilcox 		} else if (xas->xa_offset == XA_CHUNK_MASK) {
1152*4e99d4e9SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset;
1153*4e99d4e9SMatthew Wilcox 			xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1154*4e99d4e9SMatthew Wilcox 			if (!xas->xa_node)
1155*4e99d4e9SMatthew Wilcox 				break;
1156*4e99d4e9SMatthew Wilcox 			continue;
1157*4e99d4e9SMatthew Wilcox 		}
1158*4e99d4e9SMatthew Wilcox 		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1159*4e99d4e9SMatthew Wilcox 		if (xa_is_sibling(curr))
1160*4e99d4e9SMatthew Wilcox 			continue;
1161*4e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
1162*4e99d4e9SMatthew Wilcox 			xas->xa_node = xa_to_node(curr);
1163*4e99d4e9SMatthew Wilcox 			xas->xa_offset = 0;
1164*4e99d4e9SMatthew Wilcox 			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1165*4e99d4e9SMatthew Wilcox 		}
1166*4e99d4e9SMatthew Wilcox 		if (curr)
1167*4e99d4e9SMatthew Wilcox 			return curr;
1168*4e99d4e9SMatthew Wilcox 	}
1169*4e99d4e9SMatthew Wilcox 	xas->xa_offset -= xas->xa_sibs;
1170*4e99d4e9SMatthew Wilcox 	return NULL;
1171*4e99d4e9SMatthew Wilcox }
1172*4e99d4e9SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_conflict);
1173*4e99d4e9SMatthew Wilcox 
1174*4e99d4e9SMatthew Wilcox /**
1175f8d5d0ccSMatthew Wilcox  * xa_init_flags() - Initialise an empty XArray with flags.
1176f8d5d0ccSMatthew Wilcox  * @xa: XArray.
1177f8d5d0ccSMatthew Wilcox  * @flags: XA_FLAG values.
1178f8d5d0ccSMatthew Wilcox  *
1179f8d5d0ccSMatthew Wilcox  * If you need to initialise an XArray with special flags (eg you need
1180f8d5d0ccSMatthew Wilcox  * to take the lock from interrupt context), use this function instead
1181f8d5d0ccSMatthew Wilcox  * of xa_init().
1182f8d5d0ccSMatthew Wilcox  *
1183f8d5d0ccSMatthew Wilcox  * Context: Any context.
1184f8d5d0ccSMatthew Wilcox  */
1185f8d5d0ccSMatthew Wilcox void xa_init_flags(struct xarray *xa, gfp_t flags)
1186f8d5d0ccSMatthew Wilcox {
118758d6ea30SMatthew Wilcox 	unsigned int lock_type;
118858d6ea30SMatthew Wilcox 	static struct lock_class_key xa_lock_irq;
118958d6ea30SMatthew Wilcox 	static struct lock_class_key xa_lock_bh;
119058d6ea30SMatthew Wilcox 
1191f8d5d0ccSMatthew Wilcox 	spin_lock_init(&xa->xa_lock);
1192f8d5d0ccSMatthew Wilcox 	xa->xa_flags = flags;
1193f8d5d0ccSMatthew Wilcox 	xa->xa_head = NULL;
119458d6ea30SMatthew Wilcox 
119558d6ea30SMatthew Wilcox 	lock_type = xa_lock_type(xa);
119658d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
119758d6ea30SMatthew Wilcox 		lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
119858d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
119958d6ea30SMatthew Wilcox 		lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
1200f8d5d0ccSMatthew Wilcox }
1201f8d5d0ccSMatthew Wilcox EXPORT_SYMBOL(xa_init_flags);
1202ad3d6c72SMatthew Wilcox 
1203ad3d6c72SMatthew Wilcox /**
1204ad3d6c72SMatthew Wilcox  * xa_load() - Load an entry from an XArray.
1205ad3d6c72SMatthew Wilcox  * @xa: XArray.
1206ad3d6c72SMatthew Wilcox  * @index: index into array.
1207ad3d6c72SMatthew Wilcox  *
1208ad3d6c72SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1209ad3d6c72SMatthew Wilcox  * Return: The entry at @index in @xa.
1210ad3d6c72SMatthew Wilcox  */
1211ad3d6c72SMatthew Wilcox void *xa_load(struct xarray *xa, unsigned long index)
1212ad3d6c72SMatthew Wilcox {
1213ad3d6c72SMatthew Wilcox 	XA_STATE(xas, xa, index);
1214ad3d6c72SMatthew Wilcox 	void *entry;
1215ad3d6c72SMatthew Wilcox 
1216ad3d6c72SMatthew Wilcox 	rcu_read_lock();
1217ad3d6c72SMatthew Wilcox 	do {
1218ad3d6c72SMatthew Wilcox 		entry = xas_load(&xas);
1219ad3d6c72SMatthew Wilcox 	} while (xas_retry(&xas, entry));
1220ad3d6c72SMatthew Wilcox 	rcu_read_unlock();
1221ad3d6c72SMatthew Wilcox 
1222ad3d6c72SMatthew Wilcox 	return entry;
1223ad3d6c72SMatthew Wilcox }
1224ad3d6c72SMatthew Wilcox EXPORT_SYMBOL(xa_load);
1225ad3d6c72SMatthew Wilcox 
122658d6ea30SMatthew Wilcox static void *xas_result(struct xa_state *xas, void *curr)
122758d6ea30SMatthew Wilcox {
122858d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
122958d6ea30SMatthew Wilcox 	if (xas_error(xas))
123058d6ea30SMatthew Wilcox 		curr = xas->xa_node;
123158d6ea30SMatthew Wilcox 	return curr;
123258d6ea30SMatthew Wilcox }
123358d6ea30SMatthew Wilcox 
123458d6ea30SMatthew Wilcox /**
123558d6ea30SMatthew Wilcox  * __xa_erase() - Erase this entry from the XArray while locked.
123658d6ea30SMatthew Wilcox  * @xa: XArray.
123758d6ea30SMatthew Wilcox  * @index: Index into array.
123858d6ea30SMatthew Wilcox  *
123958d6ea30SMatthew Wilcox  * If the entry at this index is a multi-index entry then all indices will
124058d6ea30SMatthew Wilcox  * be erased, and the entry will no longer be a multi-index entry.
124158d6ea30SMatthew Wilcox  * This function expects the xa_lock to be held on entry.
124258d6ea30SMatthew Wilcox  *
124358d6ea30SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
124458d6ea30SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
124558d6ea30SMatthew Wilcox  * Return: The old entry at this index.
124658d6ea30SMatthew Wilcox  */
124758d6ea30SMatthew Wilcox void *__xa_erase(struct xarray *xa, unsigned long index)
124858d6ea30SMatthew Wilcox {
124958d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
125058d6ea30SMatthew Wilcox 	return xas_result(&xas, xas_store(&xas, NULL));
125158d6ea30SMatthew Wilcox }
125258d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(__xa_erase);
125358d6ea30SMatthew Wilcox 
125458d6ea30SMatthew Wilcox /**
125558d6ea30SMatthew Wilcox  * xa_store() - Store this entry in the XArray.
125658d6ea30SMatthew Wilcox  * @xa: XArray.
125758d6ea30SMatthew Wilcox  * @index: Index into array.
125858d6ea30SMatthew Wilcox  * @entry: New entry.
125958d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
126058d6ea30SMatthew Wilcox  *
126158d6ea30SMatthew Wilcox  * After this function returns, loads from this index will return @entry.
126258d6ea30SMatthew Wilcox  * Storing into an existing multislot entry updates the entry of every index.
126358d6ea30SMatthew Wilcox  * The marks associated with @index are unaffected unless @entry is %NULL.
126458d6ea30SMatthew Wilcox  *
126558d6ea30SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.  May sleep
126658d6ea30SMatthew Wilcox  * if the @gfp flags permit.
126758d6ea30SMatthew Wilcox  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
126858d6ea30SMatthew Wilcox  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
126958d6ea30SMatthew Wilcox  * failed.
127058d6ea30SMatthew Wilcox  */
127158d6ea30SMatthew Wilcox void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
127258d6ea30SMatthew Wilcox {
127358d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
127458d6ea30SMatthew Wilcox 	void *curr;
127558d6ea30SMatthew Wilcox 
127658d6ea30SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
127758d6ea30SMatthew Wilcox 		return XA_ERROR(-EINVAL);
127858d6ea30SMatthew Wilcox 
127958d6ea30SMatthew Wilcox 	do {
128058d6ea30SMatthew Wilcox 		xas_lock(&xas);
128158d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
128258d6ea30SMatthew Wilcox 		xas_unlock(&xas);
128358d6ea30SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
128458d6ea30SMatthew Wilcox 
128558d6ea30SMatthew Wilcox 	return xas_result(&xas, curr);
128658d6ea30SMatthew Wilcox }
128758d6ea30SMatthew Wilcox EXPORT_SYMBOL(xa_store);
128858d6ea30SMatthew Wilcox 
128958d6ea30SMatthew Wilcox /**
129058d6ea30SMatthew Wilcox  * __xa_store() - Store this entry in the XArray.
129158d6ea30SMatthew Wilcox  * @xa: XArray.
129258d6ea30SMatthew Wilcox  * @index: Index into array.
129358d6ea30SMatthew Wilcox  * @entry: New entry.
129458d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
129558d6ea30SMatthew Wilcox  *
129658d6ea30SMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
129758d6ea30SMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
129858d6ea30SMatthew Wilcox  * it afterwards.
129958d6ea30SMatthew Wilcox  *
130058d6ea30SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
130158d6ea30SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
130258d6ea30SMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
130358d6ea30SMatthew Wilcox  */
130458d6ea30SMatthew Wilcox void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
130558d6ea30SMatthew Wilcox {
130658d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
130758d6ea30SMatthew Wilcox 	void *curr;
130858d6ea30SMatthew Wilcox 
130958d6ea30SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
131058d6ea30SMatthew Wilcox 		return XA_ERROR(-EINVAL);
131158d6ea30SMatthew Wilcox 
131258d6ea30SMatthew Wilcox 	do {
131358d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
131458d6ea30SMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
131558d6ea30SMatthew Wilcox 
131658d6ea30SMatthew Wilcox 	return xas_result(&xas, curr);
131758d6ea30SMatthew Wilcox }
131858d6ea30SMatthew Wilcox EXPORT_SYMBOL(__xa_store);
131958d6ea30SMatthew Wilcox 
13209b89a035SMatthew Wilcox /**
132141aec91fSMatthew Wilcox  * xa_cmpxchg() - Conditionally replace an entry in the XArray.
132241aec91fSMatthew Wilcox  * @xa: XArray.
132341aec91fSMatthew Wilcox  * @index: Index into array.
132441aec91fSMatthew Wilcox  * @old: Old value to test against.
132541aec91fSMatthew Wilcox  * @entry: New value to place in array.
132641aec91fSMatthew Wilcox  * @gfp: Memory allocation flags.
132741aec91fSMatthew Wilcox  *
132841aec91fSMatthew Wilcox  * If the entry at @index is the same as @old, replace it with @entry.
132941aec91fSMatthew Wilcox  * If the return value is equal to @old, then the exchange was successful.
133041aec91fSMatthew Wilcox  *
133141aec91fSMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.  May sleep
133241aec91fSMatthew Wilcox  * if the @gfp flags permit.
133341aec91fSMatthew Wilcox  * Return: The old value at this index or xa_err() if an error happened.
133441aec91fSMatthew Wilcox  */
133541aec91fSMatthew Wilcox void *xa_cmpxchg(struct xarray *xa, unsigned long index,
133641aec91fSMatthew Wilcox 			void *old, void *entry, gfp_t gfp)
133741aec91fSMatthew Wilcox {
133841aec91fSMatthew Wilcox 	XA_STATE(xas, xa, index);
133941aec91fSMatthew Wilcox 	void *curr;
134041aec91fSMatthew Wilcox 
134141aec91fSMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
134241aec91fSMatthew Wilcox 		return XA_ERROR(-EINVAL);
134341aec91fSMatthew Wilcox 
134441aec91fSMatthew Wilcox 	do {
134541aec91fSMatthew Wilcox 		xas_lock(&xas);
134641aec91fSMatthew Wilcox 		curr = xas_load(&xas);
134741aec91fSMatthew Wilcox 		if (curr == old)
134841aec91fSMatthew Wilcox 			xas_store(&xas, entry);
134941aec91fSMatthew Wilcox 		xas_unlock(&xas);
135041aec91fSMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
135141aec91fSMatthew Wilcox 
135241aec91fSMatthew Wilcox 	return xas_result(&xas, curr);
135341aec91fSMatthew Wilcox }
135441aec91fSMatthew Wilcox EXPORT_SYMBOL(xa_cmpxchg);
135541aec91fSMatthew Wilcox 
135641aec91fSMatthew Wilcox /**
135741aec91fSMatthew Wilcox  * __xa_cmpxchg() - Store this entry in the XArray.
135841aec91fSMatthew Wilcox  * @xa: XArray.
135941aec91fSMatthew Wilcox  * @index: Index into array.
136041aec91fSMatthew Wilcox  * @old: Old value to test against.
136141aec91fSMatthew Wilcox  * @entry: New entry.
136241aec91fSMatthew Wilcox  * @gfp: Memory allocation flags.
136341aec91fSMatthew Wilcox  *
136441aec91fSMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
136541aec91fSMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
136641aec91fSMatthew Wilcox  * it afterwards.
136741aec91fSMatthew Wilcox  *
136841aec91fSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
136941aec91fSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
137041aec91fSMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
137141aec91fSMatthew Wilcox  */
137241aec91fSMatthew Wilcox void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
137341aec91fSMatthew Wilcox 			void *old, void *entry, gfp_t gfp)
137441aec91fSMatthew Wilcox {
137541aec91fSMatthew Wilcox 	XA_STATE(xas, xa, index);
137641aec91fSMatthew Wilcox 	void *curr;
137741aec91fSMatthew Wilcox 
137841aec91fSMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
137941aec91fSMatthew Wilcox 		return XA_ERROR(-EINVAL);
138041aec91fSMatthew Wilcox 
138141aec91fSMatthew Wilcox 	do {
138241aec91fSMatthew Wilcox 		curr = xas_load(&xas);
138341aec91fSMatthew Wilcox 		if (curr == old)
138441aec91fSMatthew Wilcox 			xas_store(&xas, entry);
138541aec91fSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
138641aec91fSMatthew Wilcox 
138741aec91fSMatthew Wilcox 	return xas_result(&xas, curr);
138841aec91fSMatthew Wilcox }
138941aec91fSMatthew Wilcox EXPORT_SYMBOL(__xa_cmpxchg);
139041aec91fSMatthew Wilcox 
139141aec91fSMatthew Wilcox /**
13929b89a035SMatthew Wilcox  * __xa_set_mark() - Set this mark on this entry while locked.
13939b89a035SMatthew Wilcox  * @xa: XArray.
13949b89a035SMatthew Wilcox  * @index: Index of entry.
13959b89a035SMatthew Wilcox  * @mark: Mark number.
13969b89a035SMatthew Wilcox  *
13979b89a035SMatthew Wilcox  * Attempting to set a mark on a NULL entry does not succeed.
13989b89a035SMatthew Wilcox  *
13999b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
14009b89a035SMatthew Wilcox  */
14019b89a035SMatthew Wilcox void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
14029b89a035SMatthew Wilcox {
14039b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
14049b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
14059b89a035SMatthew Wilcox 
14069b89a035SMatthew Wilcox 	if (entry)
14079b89a035SMatthew Wilcox 		xas_set_mark(&xas, mark);
14089b89a035SMatthew Wilcox }
14099b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(__xa_set_mark);
14109b89a035SMatthew Wilcox 
14119b89a035SMatthew Wilcox /**
14129b89a035SMatthew Wilcox  * __xa_clear_mark() - Clear this mark on this entry while locked.
14139b89a035SMatthew Wilcox  * @xa: XArray.
14149b89a035SMatthew Wilcox  * @index: Index of entry.
14159b89a035SMatthew Wilcox  * @mark: Mark number.
14169b89a035SMatthew Wilcox  *
14179b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
14189b89a035SMatthew Wilcox  */
14199b89a035SMatthew Wilcox void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
14209b89a035SMatthew Wilcox {
14219b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
14229b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
14239b89a035SMatthew Wilcox 
14249b89a035SMatthew Wilcox 	if (entry)
14259b89a035SMatthew Wilcox 		xas_clear_mark(&xas, mark);
14269b89a035SMatthew Wilcox }
14279b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(__xa_clear_mark);
14289b89a035SMatthew Wilcox 
14299b89a035SMatthew Wilcox /**
14309b89a035SMatthew Wilcox  * xa_get_mark() - Inquire whether this mark is set on this entry.
14319b89a035SMatthew Wilcox  * @xa: XArray.
14329b89a035SMatthew Wilcox  * @index: Index of entry.
14339b89a035SMatthew Wilcox  * @mark: Mark number.
14349b89a035SMatthew Wilcox  *
14359b89a035SMatthew Wilcox  * This function uses the RCU read lock, so the result may be out of date
14369b89a035SMatthew Wilcox  * by the time it returns.  If you need the result to be stable, use a lock.
14379b89a035SMatthew Wilcox  *
14389b89a035SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
14399b89a035SMatthew Wilcox  * Return: True if the entry at @index has this mark set, false if it doesn't.
14409b89a035SMatthew Wilcox  */
14419b89a035SMatthew Wilcox bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
14429b89a035SMatthew Wilcox {
14439b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
14449b89a035SMatthew Wilcox 	void *entry;
14459b89a035SMatthew Wilcox 
14469b89a035SMatthew Wilcox 	rcu_read_lock();
14479b89a035SMatthew Wilcox 	entry = xas_start(&xas);
14489b89a035SMatthew Wilcox 	while (xas_get_mark(&xas, mark)) {
14499b89a035SMatthew Wilcox 		if (!xa_is_node(entry))
14509b89a035SMatthew Wilcox 			goto found;
14519b89a035SMatthew Wilcox 		entry = xas_descend(&xas, xa_to_node(entry));
14529b89a035SMatthew Wilcox 	}
14539b89a035SMatthew Wilcox 	rcu_read_unlock();
14549b89a035SMatthew Wilcox 	return false;
14559b89a035SMatthew Wilcox  found:
14569b89a035SMatthew Wilcox 	rcu_read_unlock();
14579b89a035SMatthew Wilcox 	return true;
14589b89a035SMatthew Wilcox }
14599b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_get_mark);
14609b89a035SMatthew Wilcox 
14619b89a035SMatthew Wilcox /**
14629b89a035SMatthew Wilcox  * xa_set_mark() - Set this mark on this entry.
14639b89a035SMatthew Wilcox  * @xa: XArray.
14649b89a035SMatthew Wilcox  * @index: Index of entry.
14659b89a035SMatthew Wilcox  * @mark: Mark number.
14669b89a035SMatthew Wilcox  *
14679b89a035SMatthew Wilcox  * Attempting to set a mark on a NULL entry does not succeed.
14689b89a035SMatthew Wilcox  *
14699b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
14709b89a035SMatthew Wilcox  */
14719b89a035SMatthew Wilcox void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
14729b89a035SMatthew Wilcox {
14739b89a035SMatthew Wilcox 	xa_lock(xa);
14749b89a035SMatthew Wilcox 	__xa_set_mark(xa, index, mark);
14759b89a035SMatthew Wilcox 	xa_unlock(xa);
14769b89a035SMatthew Wilcox }
14779b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_set_mark);
14789b89a035SMatthew Wilcox 
14799b89a035SMatthew Wilcox /**
14809b89a035SMatthew Wilcox  * xa_clear_mark() - Clear this mark on this entry.
14819b89a035SMatthew Wilcox  * @xa: XArray.
14829b89a035SMatthew Wilcox  * @index: Index of entry.
14839b89a035SMatthew Wilcox  * @mark: Mark number.
14849b89a035SMatthew Wilcox  *
14859b89a035SMatthew Wilcox  * Clearing a mark always succeeds.
14869b89a035SMatthew Wilcox  *
14879b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
14889b89a035SMatthew Wilcox  */
14899b89a035SMatthew Wilcox void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
14909b89a035SMatthew Wilcox {
14919b89a035SMatthew Wilcox 	xa_lock(xa);
14929b89a035SMatthew Wilcox 	__xa_clear_mark(xa, index, mark);
14939b89a035SMatthew Wilcox 	xa_unlock(xa);
14949b89a035SMatthew Wilcox }
14959b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_clear_mark);
14969b89a035SMatthew Wilcox 
1497b803b428SMatthew Wilcox /**
1498b803b428SMatthew Wilcox  * xa_find() - Search the XArray for an entry.
1499b803b428SMatthew Wilcox  * @xa: XArray.
1500b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
1501b803b428SMatthew Wilcox  * @max: Maximum index to search to.
1502b803b428SMatthew Wilcox  * @filter: Selection criterion.
1503b803b428SMatthew Wilcox  *
1504b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter, and has the lowest
1505b803b428SMatthew Wilcox  * index that is at least @indexp and no more than @max.
1506b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
1507b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may not find
1508b803b428SMatthew Wilcox  * entries which are being simultaneously added.  It will not return an
1509b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1510b803b428SMatthew Wilcox  *
1511b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1512b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1513b803b428SMatthew Wilcox  */
1514b803b428SMatthew Wilcox void *xa_find(struct xarray *xa, unsigned long *indexp,
1515b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
1516b803b428SMatthew Wilcox {
1517b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp);
1518b803b428SMatthew Wilcox 	void *entry;
1519b803b428SMatthew Wilcox 
1520b803b428SMatthew Wilcox 	rcu_read_lock();
1521b803b428SMatthew Wilcox 	do {
1522b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
1523b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
1524b803b428SMatthew Wilcox 		else
1525b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
1526b803b428SMatthew Wilcox 	} while (xas_retry(&xas, entry));
1527b803b428SMatthew Wilcox 	rcu_read_unlock();
1528b803b428SMatthew Wilcox 
1529b803b428SMatthew Wilcox 	if (entry)
1530b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
1531b803b428SMatthew Wilcox 	return entry;
1532b803b428SMatthew Wilcox }
1533b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find);
1534b803b428SMatthew Wilcox 
1535b803b428SMatthew Wilcox /**
1536b803b428SMatthew Wilcox  * xa_find_after() - Search the XArray for a present entry.
1537b803b428SMatthew Wilcox  * @xa: XArray.
1538b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
1539b803b428SMatthew Wilcox  * @max: Maximum index to search to.
1540b803b428SMatthew Wilcox  * @filter: Selection criterion.
1541b803b428SMatthew Wilcox  *
1542b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter and has the lowest
1543b803b428SMatthew Wilcox  * index that is above @indexp and no more than @max.
1544b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
1545b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may miss entries
1546b803b428SMatthew Wilcox  * which are being simultaneously added.  It will not return an
1547b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1548b803b428SMatthew Wilcox  *
1549b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1550b803b428SMatthew Wilcox  * Return: The pointer, if found, otherwise %NULL.
1551b803b428SMatthew Wilcox  */
1552b803b428SMatthew Wilcox void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1553b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
1554b803b428SMatthew Wilcox {
1555b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp + 1);
1556b803b428SMatthew Wilcox 	void *entry;
1557b803b428SMatthew Wilcox 
1558b803b428SMatthew Wilcox 	rcu_read_lock();
1559b803b428SMatthew Wilcox 	for (;;) {
1560b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
1561b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
1562b803b428SMatthew Wilcox 		else
1563b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
1564b803b428SMatthew Wilcox 		if (xas.xa_shift) {
1565b803b428SMatthew Wilcox 			if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1566b803b428SMatthew Wilcox 				continue;
1567b803b428SMatthew Wilcox 		} else {
1568b803b428SMatthew Wilcox 			if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
1569b803b428SMatthew Wilcox 				continue;
1570b803b428SMatthew Wilcox 		}
1571b803b428SMatthew Wilcox 		if (!xas_retry(&xas, entry))
1572b803b428SMatthew Wilcox 			break;
1573b803b428SMatthew Wilcox 	}
1574b803b428SMatthew Wilcox 	rcu_read_unlock();
1575b803b428SMatthew Wilcox 
1576b803b428SMatthew Wilcox 	if (entry)
1577b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
1578b803b428SMatthew Wilcox 	return entry;
1579b803b428SMatthew Wilcox }
1580b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find_after);
1581b803b428SMatthew Wilcox 
158280a0a1a9SMatthew Wilcox static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
158380a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n)
158480a0a1a9SMatthew Wilcox {
158580a0a1a9SMatthew Wilcox 	void *entry;
158680a0a1a9SMatthew Wilcox 	unsigned int i = 0;
158780a0a1a9SMatthew Wilcox 
158880a0a1a9SMatthew Wilcox 	rcu_read_lock();
158980a0a1a9SMatthew Wilcox 	xas_for_each(xas, entry, max) {
159080a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
159180a0a1a9SMatthew Wilcox 			continue;
159280a0a1a9SMatthew Wilcox 		dst[i++] = entry;
159380a0a1a9SMatthew Wilcox 		if (i == n)
159480a0a1a9SMatthew Wilcox 			break;
159580a0a1a9SMatthew Wilcox 	}
159680a0a1a9SMatthew Wilcox 	rcu_read_unlock();
159780a0a1a9SMatthew Wilcox 
159880a0a1a9SMatthew Wilcox 	return i;
159980a0a1a9SMatthew Wilcox }
160080a0a1a9SMatthew Wilcox 
160180a0a1a9SMatthew Wilcox static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
160280a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t mark)
160380a0a1a9SMatthew Wilcox {
160480a0a1a9SMatthew Wilcox 	void *entry;
160580a0a1a9SMatthew Wilcox 	unsigned int i = 0;
160680a0a1a9SMatthew Wilcox 
160780a0a1a9SMatthew Wilcox 	rcu_read_lock();
160880a0a1a9SMatthew Wilcox 	xas_for_each_marked(xas, entry, max, mark) {
160980a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
161080a0a1a9SMatthew Wilcox 			continue;
161180a0a1a9SMatthew Wilcox 		dst[i++] = entry;
161280a0a1a9SMatthew Wilcox 		if (i == n)
161380a0a1a9SMatthew Wilcox 			break;
161480a0a1a9SMatthew Wilcox 	}
161580a0a1a9SMatthew Wilcox 	rcu_read_unlock();
161680a0a1a9SMatthew Wilcox 
161780a0a1a9SMatthew Wilcox 	return i;
161880a0a1a9SMatthew Wilcox }
161980a0a1a9SMatthew Wilcox 
162080a0a1a9SMatthew Wilcox /**
162180a0a1a9SMatthew Wilcox  * xa_extract() - Copy selected entries from the XArray into a normal array.
162280a0a1a9SMatthew Wilcox  * @xa: The source XArray to copy from.
162380a0a1a9SMatthew Wilcox  * @dst: The buffer to copy entries into.
162480a0a1a9SMatthew Wilcox  * @start: The first index in the XArray eligible to be selected.
162580a0a1a9SMatthew Wilcox  * @max: The last index in the XArray eligible to be selected.
162680a0a1a9SMatthew Wilcox  * @n: The maximum number of entries to copy.
162780a0a1a9SMatthew Wilcox  * @filter: Selection criterion.
162880a0a1a9SMatthew Wilcox  *
162980a0a1a9SMatthew Wilcox  * Copies up to @n entries that match @filter from the XArray.  The
163080a0a1a9SMatthew Wilcox  * copied entries will have indices between @start and @max, inclusive.
163180a0a1a9SMatthew Wilcox  *
163280a0a1a9SMatthew Wilcox  * The @filter may be an XArray mark value, in which case entries which are
163380a0a1a9SMatthew Wilcox  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
163480a0a1a9SMatthew Wilcox  * which case all entries which are not NULL will be copied.
163580a0a1a9SMatthew Wilcox  *
163680a0a1a9SMatthew Wilcox  * The entries returned may not represent a snapshot of the XArray at a
163780a0a1a9SMatthew Wilcox  * moment in time.  For example, if another thread stores to index 5, then
163880a0a1a9SMatthew Wilcox  * index 10, calling xa_extract() may return the old contents of index 5
163980a0a1a9SMatthew Wilcox  * and the new contents of index 10.  Indices not modified while this
164080a0a1a9SMatthew Wilcox  * function is running will not be skipped.
164180a0a1a9SMatthew Wilcox  *
164280a0a1a9SMatthew Wilcox  * If you need stronger guarantees, holding the xa_lock across calls to this
164380a0a1a9SMatthew Wilcox  * function will prevent concurrent modification.
164480a0a1a9SMatthew Wilcox  *
164580a0a1a9SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
164680a0a1a9SMatthew Wilcox  * Return: The number of entries copied.
164780a0a1a9SMatthew Wilcox  */
164880a0a1a9SMatthew Wilcox unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
164980a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t filter)
165080a0a1a9SMatthew Wilcox {
165180a0a1a9SMatthew Wilcox 	XA_STATE(xas, xa, start);
165280a0a1a9SMatthew Wilcox 
165380a0a1a9SMatthew Wilcox 	if (!n)
165480a0a1a9SMatthew Wilcox 		return 0;
165580a0a1a9SMatthew Wilcox 
165680a0a1a9SMatthew Wilcox 	if ((__force unsigned int)filter < XA_MAX_MARKS)
165780a0a1a9SMatthew Wilcox 		return xas_extract_marked(&xas, dst, max, n, filter);
165880a0a1a9SMatthew Wilcox 	return xas_extract_present(&xas, dst, max, n);
165980a0a1a9SMatthew Wilcox }
166080a0a1a9SMatthew Wilcox EXPORT_SYMBOL(xa_extract);
166180a0a1a9SMatthew Wilcox 
1662687149fcSMatthew Wilcox /**
1663687149fcSMatthew Wilcox  * xa_destroy() - Free all internal data structures.
1664687149fcSMatthew Wilcox  * @xa: XArray.
1665687149fcSMatthew Wilcox  *
1666687149fcSMatthew Wilcox  * After calling this function, the XArray is empty and has freed all memory
1667687149fcSMatthew Wilcox  * allocated for its internal data structures.  You are responsible for
1668687149fcSMatthew Wilcox  * freeing the objects referenced by the XArray.
1669687149fcSMatthew Wilcox  *
1670687149fcSMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
1671687149fcSMatthew Wilcox  */
1672687149fcSMatthew Wilcox void xa_destroy(struct xarray *xa)
1673687149fcSMatthew Wilcox {
1674687149fcSMatthew Wilcox 	XA_STATE(xas, xa, 0);
1675687149fcSMatthew Wilcox 	unsigned long flags;
1676687149fcSMatthew Wilcox 	void *entry;
1677687149fcSMatthew Wilcox 
1678687149fcSMatthew Wilcox 	xas.xa_node = NULL;
1679687149fcSMatthew Wilcox 	xas_lock_irqsave(&xas, flags);
1680687149fcSMatthew Wilcox 	entry = xa_head_locked(xa);
1681687149fcSMatthew Wilcox 	RCU_INIT_POINTER(xa->xa_head, NULL);
1682687149fcSMatthew Wilcox 	xas_init_marks(&xas);
1683687149fcSMatthew Wilcox 	/* lockdep checks we're still holding the lock in xas_free_nodes() */
1684687149fcSMatthew Wilcox 	if (xa_is_node(entry))
1685687149fcSMatthew Wilcox 		xas_free_nodes(&xas, xa_to_node(entry));
1686687149fcSMatthew Wilcox 	xas_unlock_irqrestore(&xas, flags);
1687687149fcSMatthew Wilcox }
1688687149fcSMatthew Wilcox EXPORT_SYMBOL(xa_destroy);
1689687149fcSMatthew Wilcox 
1690ad3d6c72SMatthew Wilcox #ifdef XA_DEBUG
1691ad3d6c72SMatthew Wilcox void xa_dump_node(const struct xa_node *node)
1692ad3d6c72SMatthew Wilcox {
1693ad3d6c72SMatthew Wilcox 	unsigned i, j;
1694ad3d6c72SMatthew Wilcox 
1695ad3d6c72SMatthew Wilcox 	if (!node)
1696ad3d6c72SMatthew Wilcox 		return;
1697ad3d6c72SMatthew Wilcox 	if ((unsigned long)node & 3) {
1698ad3d6c72SMatthew Wilcox 		pr_cont("node %px\n", node);
1699ad3d6c72SMatthew Wilcox 		return;
1700ad3d6c72SMatthew Wilcox 	}
1701ad3d6c72SMatthew Wilcox 
1702ad3d6c72SMatthew Wilcox 	pr_cont("node %px %s %d parent %px shift %d count %d values %d "
1703ad3d6c72SMatthew Wilcox 		"array %px list %px %px marks",
1704ad3d6c72SMatthew Wilcox 		node, node->parent ? "offset" : "max", node->offset,
1705ad3d6c72SMatthew Wilcox 		node->parent, node->shift, node->count, node->nr_values,
1706ad3d6c72SMatthew Wilcox 		node->array, node->private_list.prev, node->private_list.next);
1707ad3d6c72SMatthew Wilcox 	for (i = 0; i < XA_MAX_MARKS; i++)
1708ad3d6c72SMatthew Wilcox 		for (j = 0; j < XA_MARK_LONGS; j++)
1709ad3d6c72SMatthew Wilcox 			pr_cont(" %lx", node->marks[i][j]);
1710ad3d6c72SMatthew Wilcox 	pr_cont("\n");
1711ad3d6c72SMatthew Wilcox }
1712ad3d6c72SMatthew Wilcox 
1713ad3d6c72SMatthew Wilcox void xa_dump_index(unsigned long index, unsigned int shift)
1714ad3d6c72SMatthew Wilcox {
1715ad3d6c72SMatthew Wilcox 	if (!shift)
1716ad3d6c72SMatthew Wilcox 		pr_info("%lu: ", index);
1717ad3d6c72SMatthew Wilcox 	else if (shift >= BITS_PER_LONG)
1718ad3d6c72SMatthew Wilcox 		pr_info("0-%lu: ", ~0UL);
1719ad3d6c72SMatthew Wilcox 	else
1720ad3d6c72SMatthew Wilcox 		pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
1721ad3d6c72SMatthew Wilcox }
1722ad3d6c72SMatthew Wilcox 
1723ad3d6c72SMatthew Wilcox void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
1724ad3d6c72SMatthew Wilcox {
1725ad3d6c72SMatthew Wilcox 	if (!entry)
1726ad3d6c72SMatthew Wilcox 		return;
1727ad3d6c72SMatthew Wilcox 
1728ad3d6c72SMatthew Wilcox 	xa_dump_index(index, shift);
1729ad3d6c72SMatthew Wilcox 
1730ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry)) {
1731ad3d6c72SMatthew Wilcox 		if (shift == 0) {
1732ad3d6c72SMatthew Wilcox 			pr_cont("%px\n", entry);
1733ad3d6c72SMatthew Wilcox 		} else {
1734ad3d6c72SMatthew Wilcox 			unsigned long i;
1735ad3d6c72SMatthew Wilcox 			struct xa_node *node = xa_to_node(entry);
1736ad3d6c72SMatthew Wilcox 			xa_dump_node(node);
1737ad3d6c72SMatthew Wilcox 			for (i = 0; i < XA_CHUNK_SIZE; i++)
1738ad3d6c72SMatthew Wilcox 				xa_dump_entry(node->slots[i],
1739ad3d6c72SMatthew Wilcox 				      index + (i << node->shift), node->shift);
1740ad3d6c72SMatthew Wilcox 		}
1741ad3d6c72SMatthew Wilcox 	} else if (xa_is_value(entry))
1742ad3d6c72SMatthew Wilcox 		pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
1743ad3d6c72SMatthew Wilcox 						xa_to_value(entry), entry);
1744ad3d6c72SMatthew Wilcox 	else if (!xa_is_internal(entry))
1745ad3d6c72SMatthew Wilcox 		pr_cont("%px\n", entry);
1746ad3d6c72SMatthew Wilcox 	else if (xa_is_retry(entry))
1747ad3d6c72SMatthew Wilcox 		pr_cont("retry (%ld)\n", xa_to_internal(entry));
1748ad3d6c72SMatthew Wilcox 	else if (xa_is_sibling(entry))
1749ad3d6c72SMatthew Wilcox 		pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
1750ad3d6c72SMatthew Wilcox 	else
1751ad3d6c72SMatthew Wilcox 		pr_cont("UNKNOWN ENTRY (%px)\n", entry);
1752ad3d6c72SMatthew Wilcox }
1753ad3d6c72SMatthew Wilcox 
1754ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa)
1755ad3d6c72SMatthew Wilcox {
1756ad3d6c72SMatthew Wilcox 	void *entry = xa->xa_head;
1757ad3d6c72SMatthew Wilcox 	unsigned int shift = 0;
1758ad3d6c72SMatthew Wilcox 
1759ad3d6c72SMatthew Wilcox 	pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
17609b89a035SMatthew Wilcox 			xa->xa_flags, xa_marked(xa, XA_MARK_0),
17619b89a035SMatthew Wilcox 			xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
1762ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry))
1763ad3d6c72SMatthew Wilcox 		shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
1764ad3d6c72SMatthew Wilcox 	xa_dump_entry(entry, 0, shift);
1765ad3d6c72SMatthew Wilcox }
1766ad3d6c72SMatthew Wilcox #endif
1767