xref: /linux-6.15/lib/xarray.c (revision 200a89c1)
1f8d5d0ccSMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2f8d5d0ccSMatthew Wilcox /*
3f8d5d0ccSMatthew Wilcox  * XArray implementation
4c44aa5e8SMatthew Wilcox (Oracle)  * Copyright (c) 2017-2018 Microsoft Corporation
5c44aa5e8SMatthew Wilcox (Oracle)  * Copyright (c) 2018-2020 Oracle
6f8d5d0ccSMatthew Wilcox  * Author: Matthew Wilcox <[email protected]>
7f8d5d0ccSMatthew Wilcox  */
8f8d5d0ccSMatthew Wilcox 
99b89a035SMatthew Wilcox #include <linux/bitmap.h>
10f8d5d0ccSMatthew Wilcox #include <linux/export.h>
1158d6ea30SMatthew Wilcox #include <linux/list.h>
1258d6ea30SMatthew Wilcox #include <linux/slab.h>
13f8d5d0ccSMatthew Wilcox #include <linux/xarray.h>
14f8d5d0ccSMatthew Wilcox 
15bde1597dSArnd Bergmann #include "radix-tree.h"
16bde1597dSArnd Bergmann 
17f8d5d0ccSMatthew Wilcox /*
18f8d5d0ccSMatthew Wilcox  * Coding conventions in this file:
19f8d5d0ccSMatthew Wilcox  *
20f8d5d0ccSMatthew Wilcox  * @xa is used to refer to the entire xarray.
21f8d5d0ccSMatthew Wilcox  * @xas is the 'xarray operation state'.  It may be either a pointer to
22f8d5d0ccSMatthew Wilcox  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
23f8d5d0ccSMatthew Wilcox  * ambiguity.
24f8d5d0ccSMatthew Wilcox  * @index is the index of the entry being operated on
25f8d5d0ccSMatthew Wilcox  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
26f8d5d0ccSMatthew Wilcox  * @node refers to an xa_node; usually the primary one being operated on by
27f8d5d0ccSMatthew Wilcox  * this function.
28f8d5d0ccSMatthew Wilcox  * @offset is the index into the slots array inside an xa_node.
29f8d5d0ccSMatthew Wilcox  * @parent refers to the @xa_node closer to the head than @node.
30f8d5d0ccSMatthew Wilcox  * @entry refers to something stored in a slot in the xarray
31f8d5d0ccSMatthew Wilcox  */
32f8d5d0ccSMatthew Wilcox 
xa_lock_type(const struct xarray * xa)3358d6ea30SMatthew Wilcox static inline unsigned int xa_lock_type(const struct xarray *xa)
3458d6ea30SMatthew Wilcox {
3558d6ea30SMatthew Wilcox 	return (__force unsigned int)xa->xa_flags & 3;
3658d6ea30SMatthew Wilcox }
3758d6ea30SMatthew Wilcox 
xas_lock_type(struct xa_state * xas,unsigned int lock_type)3858d6ea30SMatthew Wilcox static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
3958d6ea30SMatthew Wilcox {
4058d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
4158d6ea30SMatthew Wilcox 		xas_lock_irq(xas);
4258d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
4358d6ea30SMatthew Wilcox 		xas_lock_bh(xas);
4458d6ea30SMatthew Wilcox 	else
4558d6ea30SMatthew Wilcox 		xas_lock(xas);
4658d6ea30SMatthew Wilcox }
4758d6ea30SMatthew Wilcox 
xas_unlock_type(struct xa_state * xas,unsigned int lock_type)4858d6ea30SMatthew Wilcox static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
4958d6ea30SMatthew Wilcox {
5058d6ea30SMatthew Wilcox 	if (lock_type == XA_LOCK_IRQ)
5158d6ea30SMatthew Wilcox 		xas_unlock_irq(xas);
5258d6ea30SMatthew Wilcox 	else if (lock_type == XA_LOCK_BH)
5358d6ea30SMatthew Wilcox 		xas_unlock_bh(xas);
5458d6ea30SMatthew Wilcox 	else
5558d6ea30SMatthew Wilcox 		xas_unlock(xas);
5658d6ea30SMatthew Wilcox }
5758d6ea30SMatthew Wilcox 
xa_track_free(const struct xarray * xa)58371c752dSMatthew Wilcox static inline bool xa_track_free(const struct xarray *xa)
59371c752dSMatthew Wilcox {
60371c752dSMatthew Wilcox 	return xa->xa_flags & XA_FLAGS_TRACK_FREE;
61371c752dSMatthew Wilcox }
62371c752dSMatthew Wilcox 
xa_zero_busy(const struct xarray * xa)633ccaf57aSMatthew Wilcox static inline bool xa_zero_busy(const struct xarray *xa)
643ccaf57aSMatthew Wilcox {
653ccaf57aSMatthew Wilcox 	return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
663ccaf57aSMatthew Wilcox }
673ccaf57aSMatthew Wilcox 
xa_mark_set(struct xarray * xa,xa_mark_t mark)689b89a035SMatthew Wilcox static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
699b89a035SMatthew Wilcox {
709b89a035SMatthew Wilcox 	if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
719b89a035SMatthew Wilcox 		xa->xa_flags |= XA_FLAGS_MARK(mark);
729b89a035SMatthew Wilcox }
739b89a035SMatthew Wilcox 
xa_mark_clear(struct xarray * xa,xa_mark_t mark)749b89a035SMatthew Wilcox static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
759b89a035SMatthew Wilcox {
769b89a035SMatthew Wilcox 	if (xa->xa_flags & XA_FLAGS_MARK(mark))
779b89a035SMatthew Wilcox 		xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
789b89a035SMatthew Wilcox }
799b89a035SMatthew Wilcox 
node_marks(struct xa_node * node,xa_mark_t mark)809b89a035SMatthew Wilcox static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
819b89a035SMatthew Wilcox {
829b89a035SMatthew Wilcox 	return node->marks[(__force unsigned)mark];
839b89a035SMatthew Wilcox }
849b89a035SMatthew Wilcox 
node_get_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)859b89a035SMatthew Wilcox static inline bool node_get_mark(struct xa_node *node,
869b89a035SMatthew Wilcox 		unsigned int offset, xa_mark_t mark)
879b89a035SMatthew Wilcox {
889b89a035SMatthew Wilcox 	return test_bit(offset, node_marks(node, mark));
899b89a035SMatthew Wilcox }
909b89a035SMatthew Wilcox 
919b89a035SMatthew Wilcox /* returns true if the bit was set */
node_set_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)929b89a035SMatthew Wilcox static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
939b89a035SMatthew Wilcox 				xa_mark_t mark)
949b89a035SMatthew Wilcox {
959b89a035SMatthew Wilcox 	return __test_and_set_bit(offset, node_marks(node, mark));
969b89a035SMatthew Wilcox }
979b89a035SMatthew Wilcox 
989b89a035SMatthew Wilcox /* returns true if the bit was set */
node_clear_mark(struct xa_node * node,unsigned int offset,xa_mark_t mark)999b89a035SMatthew Wilcox static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
1009b89a035SMatthew Wilcox 				xa_mark_t mark)
1019b89a035SMatthew Wilcox {
1029b89a035SMatthew Wilcox 	return __test_and_clear_bit(offset, node_marks(node, mark));
1039b89a035SMatthew Wilcox }
1049b89a035SMatthew Wilcox 
node_any_mark(struct xa_node * node,xa_mark_t mark)1059b89a035SMatthew Wilcox static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
1069b89a035SMatthew Wilcox {
1079b89a035SMatthew Wilcox 	return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
1089b89a035SMatthew Wilcox }
1099b89a035SMatthew Wilcox 
node_mark_all(struct xa_node * node,xa_mark_t mark)110371c752dSMatthew Wilcox static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
111371c752dSMatthew Wilcox {
112371c752dSMatthew Wilcox 	bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
113371c752dSMatthew Wilcox }
114371c752dSMatthew Wilcox 
11558d6ea30SMatthew Wilcox #define mark_inc(mark) do { \
11658d6ea30SMatthew Wilcox 	mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
11758d6ea30SMatthew Wilcox } while (0)
11858d6ea30SMatthew Wilcox 
11958d6ea30SMatthew Wilcox /*
12058d6ea30SMatthew Wilcox  * xas_squash_marks() - Merge all marks to the first entry
12158d6ea30SMatthew Wilcox  * @xas: Array operation state.
12258d6ea30SMatthew Wilcox  *
12358d6ea30SMatthew Wilcox  * Set a mark on the first entry if any entry has it set.  Clear marks on
12458d6ea30SMatthew Wilcox  * all sibling entries.
12558d6ea30SMatthew Wilcox  */
xas_squash_marks(const struct xa_state * xas)12658d6ea30SMatthew Wilcox static void xas_squash_marks(const struct xa_state *xas)
12758d6ea30SMatthew Wilcox {
12813fd5cf3SKemeng Shi 	xa_mark_t mark = 0;
12958d6ea30SMatthew Wilcox 	unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
13058d6ea30SMatthew Wilcox 
13113fd5cf3SKemeng Shi 	for (;;) {
13213fd5cf3SKemeng Shi 		unsigned long *marks = node_marks(xas->xa_node, mark);
13313fd5cf3SKemeng Shi 
13413fd5cf3SKemeng Shi 		if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) {
13558d6ea30SMatthew Wilcox 			__set_bit(xas->xa_offset, marks);
13658d6ea30SMatthew Wilcox 			bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
13713fd5cf3SKemeng Shi 		}
13813fd5cf3SKemeng Shi 		if (mark == XA_MARK_MAX)
13913fd5cf3SKemeng Shi 			break;
14013fd5cf3SKemeng Shi 		mark_inc(mark);
14113fd5cf3SKemeng Shi 	}
14258d6ea30SMatthew Wilcox }
14358d6ea30SMatthew Wilcox 
144ad3d6c72SMatthew Wilcox /* extracts the offset within this node from the index */
get_offset(unsigned long index,struct xa_node * node)145ad3d6c72SMatthew Wilcox static unsigned int get_offset(unsigned long index, struct xa_node *node)
146ad3d6c72SMatthew Wilcox {
147ad3d6c72SMatthew Wilcox 	return (index >> node->shift) & XA_CHUNK_MASK;
148ad3d6c72SMatthew Wilcox }
149ad3d6c72SMatthew Wilcox 
xas_set_offset(struct xa_state * xas)150b803b428SMatthew Wilcox static void xas_set_offset(struct xa_state *xas)
151b803b428SMatthew Wilcox {
152b803b428SMatthew Wilcox 	xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
153b803b428SMatthew Wilcox }
154b803b428SMatthew Wilcox 
155ad3d6c72SMatthew Wilcox /* move the index either forwards (find) or backwards (sibling slot) */
xas_move_index(struct xa_state * xas,unsigned long offset)156ad3d6c72SMatthew Wilcox static void xas_move_index(struct xa_state *xas, unsigned long offset)
157ad3d6c72SMatthew Wilcox {
158ad3d6c72SMatthew Wilcox 	unsigned int shift = xas->xa_node->shift;
159ad3d6c72SMatthew Wilcox 	xas->xa_index &= ~XA_CHUNK_MASK << shift;
160ad3d6c72SMatthew Wilcox 	xas->xa_index += offset << shift;
161ad3d6c72SMatthew Wilcox }
162ad3d6c72SMatthew Wilcox 
xas_next_offset(struct xa_state * xas)16325a8de7fSMatthew Wilcox (Oracle) static void xas_next_offset(struct xa_state *xas)
164b803b428SMatthew Wilcox {
165b803b428SMatthew Wilcox 	xas->xa_offset++;
166b803b428SMatthew Wilcox 	xas_move_index(xas, xas->xa_offset);
167b803b428SMatthew Wilcox }
168b803b428SMatthew Wilcox 
set_bounds(struct xa_state * xas)169ad3d6c72SMatthew Wilcox static void *set_bounds(struct xa_state *xas)
170ad3d6c72SMatthew Wilcox {
171ad3d6c72SMatthew Wilcox 	xas->xa_node = XAS_BOUNDS;
172ad3d6c72SMatthew Wilcox 	return NULL;
173ad3d6c72SMatthew Wilcox }
174ad3d6c72SMatthew Wilcox 
175ad3d6c72SMatthew Wilcox /*
176ad3d6c72SMatthew Wilcox  * Starts a walk.  If the @xas is already valid, we assume that it's on
177ad3d6c72SMatthew Wilcox  * the right path and just return where we've got to.  If we're in an
178ad3d6c72SMatthew Wilcox  * error state, return NULL.  If the index is outside the current scope
179ad3d6c72SMatthew Wilcox  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
180ad3d6c72SMatthew Wilcox  * set @xas->xa_node to NULL and return the current head of the array.
181ad3d6c72SMatthew Wilcox  */
xas_start(struct xa_state * xas)182ad3d6c72SMatthew Wilcox static void *xas_start(struct xa_state *xas)
183ad3d6c72SMatthew Wilcox {
184ad3d6c72SMatthew Wilcox 	void *entry;
185ad3d6c72SMatthew Wilcox 
186ad3d6c72SMatthew Wilcox 	if (xas_valid(xas))
187ad3d6c72SMatthew Wilcox 		return xas_reload(xas);
188ad3d6c72SMatthew Wilcox 	if (xas_error(xas))
189ad3d6c72SMatthew Wilcox 		return NULL;
190ad3d6c72SMatthew Wilcox 
191ad3d6c72SMatthew Wilcox 	entry = xa_head(xas->xa);
192ad3d6c72SMatthew Wilcox 	if (!xa_is_node(entry)) {
193ad3d6c72SMatthew Wilcox 		if (xas->xa_index)
194ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
195ad3d6c72SMatthew Wilcox 	} else {
196ad3d6c72SMatthew Wilcox 		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
197ad3d6c72SMatthew Wilcox 			return set_bounds(xas);
198ad3d6c72SMatthew Wilcox 	}
199ad3d6c72SMatthew Wilcox 
200ad3d6c72SMatthew Wilcox 	xas->xa_node = NULL;
201ad3d6c72SMatthew Wilcox 	return entry;
202ad3d6c72SMatthew Wilcox }
203ad3d6c72SMatthew Wilcox 
xas_descend(struct xa_state * xas,struct xa_node * node)204ba591801SLong Li static __always_inline void *xas_descend(struct xa_state *xas,
205ba591801SLong Li 					struct xa_node *node)
206ad3d6c72SMatthew Wilcox {
207ad3d6c72SMatthew Wilcox 	unsigned int offset = get_offset(xas->xa_index, node);
208ad3d6c72SMatthew Wilcox 	void *entry = xa_entry(xas->xa, node, offset);
209ad3d6c72SMatthew Wilcox 
210ad3d6c72SMatthew Wilcox 	xas->xa_node = node;
211cbc02854SMatthew Wilcox (Oracle) 	while (xa_is_sibling(entry)) {
212ad3d6c72SMatthew Wilcox 		offset = xa_to_sibling(entry);
213ad3d6c72SMatthew Wilcox 		entry = xa_entry(xas->xa, node, offset);
21463b1898fSMatthew Wilcox (Oracle) 		if (node->shift && xa_is_node(entry))
21563b1898fSMatthew Wilcox (Oracle) 			entry = XA_RETRY_ENTRY;
216ad3d6c72SMatthew Wilcox 	}
217ad3d6c72SMatthew Wilcox 
218ad3d6c72SMatthew Wilcox 	xas->xa_offset = offset;
219ad3d6c72SMatthew Wilcox 	return entry;
220ad3d6c72SMatthew Wilcox }
221ad3d6c72SMatthew Wilcox 
222ad3d6c72SMatthew Wilcox /**
223ad3d6c72SMatthew Wilcox  * xas_load() - Load an entry from the XArray (advanced).
224ad3d6c72SMatthew Wilcox  * @xas: XArray operation state.
225ad3d6c72SMatthew Wilcox  *
226ad3d6c72SMatthew Wilcox  * Usually walks the @xas to the appropriate state to load the entry
227ad3d6c72SMatthew Wilcox  * stored at xa_index.  However, it will do nothing and return %NULL if
228ad3d6c72SMatthew Wilcox  * @xas is in an error state.  xas_load() will never expand the tree.
229ad3d6c72SMatthew Wilcox  *
230ad3d6c72SMatthew Wilcox  * If the xa_state is set up to operate on a multi-index entry, xas_load()
231ad3d6c72SMatthew Wilcox  * may return %NULL or an internal entry, even if there are entries
232ad3d6c72SMatthew Wilcox  * present within the range specified by @xas.
233ad3d6c72SMatthew Wilcox  *
234ad3d6c72SMatthew Wilcox  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
235ad3d6c72SMatthew Wilcox  * Return: Usually an entry in the XArray, but see description for exceptions.
236ad3d6c72SMatthew Wilcox  */
xas_load(struct xa_state * xas)237ad3d6c72SMatthew Wilcox void *xas_load(struct xa_state *xas)
238ad3d6c72SMatthew Wilcox {
239ad3d6c72SMatthew Wilcox 	void *entry = xas_start(xas);
240ad3d6c72SMatthew Wilcox 
241ad3d6c72SMatthew Wilcox 	while (xa_is_node(entry)) {
242ad3d6c72SMatthew Wilcox 		struct xa_node *node = xa_to_node(entry);
243ad3d6c72SMatthew Wilcox 
244ad3d6c72SMatthew Wilcox 		if (xas->xa_shift > node->shift)
245ad3d6c72SMatthew Wilcox 			break;
246ad3d6c72SMatthew Wilcox 		entry = xas_descend(xas, node);
24776b4e529SMatthew Wilcox 		if (node->shift == 0)
24876b4e529SMatthew Wilcox 			break;
249ad3d6c72SMatthew Wilcox 	}
250ad3d6c72SMatthew Wilcox 	return entry;
251ad3d6c72SMatthew Wilcox }
252ad3d6c72SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_load);
253ad3d6c72SMatthew Wilcox 
25458d6ea30SMatthew Wilcox #define XA_RCU_FREE	((struct xarray *)1)
25558d6ea30SMatthew Wilcox 
xa_node_free(struct xa_node * node)25658d6ea30SMatthew Wilcox static void xa_node_free(struct xa_node *node)
25758d6ea30SMatthew Wilcox {
25858d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
25958d6ea30SMatthew Wilcox 	node->array = XA_RCU_FREE;
26058d6ea30SMatthew Wilcox 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
26158d6ea30SMatthew Wilcox }
26258d6ea30SMatthew Wilcox 
26358d6ea30SMatthew Wilcox /*
26458d6ea30SMatthew Wilcox  * xas_destroy() - Free any resources allocated during the XArray operation.
26558d6ea30SMatthew Wilcox  * @xas: XArray operation state.
26658d6ea30SMatthew Wilcox  *
26769a37a8bSMatthew Wilcox (Oracle)  * Most users will not need to call this function; it is called for you
26869a37a8bSMatthew Wilcox (Oracle)  * by xas_nomem().
26958d6ea30SMatthew Wilcox  */
xas_destroy(struct xa_state * xas)27069a37a8bSMatthew Wilcox (Oracle) void xas_destroy(struct xa_state *xas)
27158d6ea30SMatthew Wilcox {
2728fc75643SMatthew Wilcox (Oracle) 	struct xa_node *next, *node = xas->xa_alloc;
27358d6ea30SMatthew Wilcox 
2748fc75643SMatthew Wilcox (Oracle) 	while (node) {
27558d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
2768fc75643SMatthew Wilcox (Oracle) 		next = rcu_dereference_raw(node->parent);
2778fc75643SMatthew Wilcox (Oracle) 		radix_tree_node_rcu_free(&node->rcu_head);
2788fc75643SMatthew Wilcox (Oracle) 		xas->xa_alloc = node = next;
2798fc75643SMatthew Wilcox (Oracle) 	}
28058d6ea30SMatthew Wilcox }
2813fec86f8SZi Yan EXPORT_SYMBOL_GPL(xas_destroy);
28258d6ea30SMatthew Wilcox 
28358d6ea30SMatthew Wilcox /**
28458d6ea30SMatthew Wilcox  * xas_nomem() - Allocate memory if needed.
28558d6ea30SMatthew Wilcox  * @xas: XArray operation state.
28658d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
28758d6ea30SMatthew Wilcox  *
28858d6ea30SMatthew Wilcox  * If we need to add new nodes to the XArray, we try to allocate memory
28958d6ea30SMatthew Wilcox  * with GFP_NOWAIT while holding the lock, which will usually succeed.
29058d6ea30SMatthew Wilcox  * If it fails, @xas is flagged as needing memory to continue.  The caller
29158d6ea30SMatthew Wilcox  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
29258d6ea30SMatthew Wilcox  * the caller should retry the operation.
29358d6ea30SMatthew Wilcox  *
29458d6ea30SMatthew Wilcox  * Forward progress is guaranteed as one node is allocated here and
29558d6ea30SMatthew Wilcox  * stored in the xa_state where it will be found by xas_alloc().  More
29658d6ea30SMatthew Wilcox  * nodes will likely be found in the slab allocator, but we do not tie
29758d6ea30SMatthew Wilcox  * them up here.
29858d6ea30SMatthew Wilcox  *
29958d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
30058d6ea30SMatthew Wilcox  */
xas_nomem(struct xa_state * xas,gfp_t gfp)30158d6ea30SMatthew Wilcox bool xas_nomem(struct xa_state *xas, gfp_t gfp)
30258d6ea30SMatthew Wilcox {
30358d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
30458d6ea30SMatthew Wilcox 		xas_destroy(xas);
30558d6ea30SMatthew Wilcox 		return false;
30658d6ea30SMatthew Wilcox 	}
3077b785645SJohannes Weiner 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
3087b785645SJohannes Weiner 		gfp |= __GFP_ACCOUNT;
3099bbdc0f3SMuchun Song 	xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
31058d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
31158d6ea30SMatthew Wilcox 		return false;
3128fc75643SMatthew Wilcox (Oracle) 	xas->xa_alloc->parent = NULL;
31358d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
31458d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
31558d6ea30SMatthew Wilcox 	return true;
31658d6ea30SMatthew Wilcox }
31758d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_nomem);
31858d6ea30SMatthew Wilcox 
31958d6ea30SMatthew Wilcox /*
32058d6ea30SMatthew Wilcox  * __xas_nomem() - Drop locks and allocate memory if needed.
32158d6ea30SMatthew Wilcox  * @xas: XArray operation state.
32258d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
32358d6ea30SMatthew Wilcox  *
32458d6ea30SMatthew Wilcox  * Internal variant of xas_nomem().
32558d6ea30SMatthew Wilcox  *
32658d6ea30SMatthew Wilcox  * Return: true if memory was needed, and was successfully allocated.
32758d6ea30SMatthew Wilcox  */
__xas_nomem(struct xa_state * xas,gfp_t gfp)32858d6ea30SMatthew Wilcox static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
32958d6ea30SMatthew Wilcox 	__must_hold(xas->xa->xa_lock)
33058d6ea30SMatthew Wilcox {
33158d6ea30SMatthew Wilcox 	unsigned int lock_type = xa_lock_type(xas->xa);
33258d6ea30SMatthew Wilcox 
33358d6ea30SMatthew Wilcox 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
33458d6ea30SMatthew Wilcox 		xas_destroy(xas);
33558d6ea30SMatthew Wilcox 		return false;
33658d6ea30SMatthew Wilcox 	}
3377b785645SJohannes Weiner 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
3387b785645SJohannes Weiner 		gfp |= __GFP_ACCOUNT;
33958d6ea30SMatthew Wilcox 	if (gfpflags_allow_blocking(gfp)) {
34058d6ea30SMatthew Wilcox 		xas_unlock_type(xas, lock_type);
3419bbdc0f3SMuchun Song 		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
34258d6ea30SMatthew Wilcox 		xas_lock_type(xas, lock_type);
34358d6ea30SMatthew Wilcox 	} else {
3449bbdc0f3SMuchun Song 		xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
34558d6ea30SMatthew Wilcox 	}
34658d6ea30SMatthew Wilcox 	if (!xas->xa_alloc)
34758d6ea30SMatthew Wilcox 		return false;
3488fc75643SMatthew Wilcox (Oracle) 	xas->xa_alloc->parent = NULL;
34958d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
35058d6ea30SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
35158d6ea30SMatthew Wilcox 	return true;
35258d6ea30SMatthew Wilcox }
35358d6ea30SMatthew Wilcox 
xas_update(struct xa_state * xas,struct xa_node * node)35458d6ea30SMatthew Wilcox static void xas_update(struct xa_state *xas, struct xa_node *node)
35558d6ea30SMatthew Wilcox {
35658d6ea30SMatthew Wilcox 	if (xas->xa_update)
35758d6ea30SMatthew Wilcox 		xas->xa_update(node);
35858d6ea30SMatthew Wilcox 	else
35958d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
36058d6ea30SMatthew Wilcox }
36158d6ea30SMatthew Wilcox 
xas_alloc(struct xa_state * xas,unsigned int shift)36258d6ea30SMatthew Wilcox static void *xas_alloc(struct xa_state *xas, unsigned int shift)
36358d6ea30SMatthew Wilcox {
36458d6ea30SMatthew Wilcox 	struct xa_node *parent = xas->xa_node;
36558d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_alloc;
36658d6ea30SMatthew Wilcox 
36758d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
36858d6ea30SMatthew Wilcox 		return NULL;
36958d6ea30SMatthew Wilcox 
37058d6ea30SMatthew Wilcox 	if (node) {
37158d6ea30SMatthew Wilcox 		xas->xa_alloc = NULL;
37258d6ea30SMatthew Wilcox 	} else {
3737b785645SJohannes Weiner 		gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
3747b785645SJohannes Weiner 
3757b785645SJohannes Weiner 		if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
3767b785645SJohannes Weiner 			gfp |= __GFP_ACCOUNT;
3777b785645SJohannes Weiner 
3789bbdc0f3SMuchun Song 		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
37958d6ea30SMatthew Wilcox 		if (!node) {
38058d6ea30SMatthew Wilcox 			xas_set_err(xas, -ENOMEM);
38158d6ea30SMatthew Wilcox 			return NULL;
38258d6ea30SMatthew Wilcox 		}
38358d6ea30SMatthew Wilcox 	}
38458d6ea30SMatthew Wilcox 
38558d6ea30SMatthew Wilcox 	if (parent) {
38658d6ea30SMatthew Wilcox 		node->offset = xas->xa_offset;
38758d6ea30SMatthew Wilcox 		parent->count++;
38858d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
38958d6ea30SMatthew Wilcox 		xas_update(xas, parent);
39058d6ea30SMatthew Wilcox 	}
39158d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
39258d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
39358d6ea30SMatthew Wilcox 	node->shift = shift;
39458d6ea30SMatthew Wilcox 	node->count = 0;
39558d6ea30SMatthew Wilcox 	node->nr_values = 0;
39658d6ea30SMatthew Wilcox 	RCU_INIT_POINTER(node->parent, xas->xa_node);
39758d6ea30SMatthew Wilcox 	node->array = xas->xa;
39858d6ea30SMatthew Wilcox 
39958d6ea30SMatthew Wilcox 	return node;
40058d6ea30SMatthew Wilcox }
40158d6ea30SMatthew Wilcox 
4020e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
4030e9446c3SMatthew Wilcox /* Returns the number of indices covered by a given xa_state */
xas_size(const struct xa_state * xas)4040e9446c3SMatthew Wilcox static unsigned long xas_size(const struct xa_state *xas)
4050e9446c3SMatthew Wilcox {
4060e9446c3SMatthew Wilcox 	return (xas->xa_sibs + 1UL) << xas->xa_shift;
4070e9446c3SMatthew Wilcox }
4080e9446c3SMatthew Wilcox #endif
4090e9446c3SMatthew Wilcox 
41058d6ea30SMatthew Wilcox /*
41158d6ea30SMatthew Wilcox  * Use this to calculate the maximum index that will need to be created
41258d6ea30SMatthew Wilcox  * in order to add the entry described by @xas.  Because we cannot store a
4138fc75643SMatthew Wilcox (Oracle)  * multi-index entry at index 0, the calculation is a little more complex
41458d6ea30SMatthew Wilcox  * than you might expect.
41558d6ea30SMatthew Wilcox  */
xas_max(struct xa_state * xas)41658d6ea30SMatthew Wilcox static unsigned long xas_max(struct xa_state *xas)
41758d6ea30SMatthew Wilcox {
41858d6ea30SMatthew Wilcox 	unsigned long max = xas->xa_index;
41958d6ea30SMatthew Wilcox 
42058d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
42158d6ea30SMatthew Wilcox 	if (xas->xa_shift || xas->xa_sibs) {
4220e9446c3SMatthew Wilcox 		unsigned long mask = xas_size(xas) - 1;
42358d6ea30SMatthew Wilcox 		max |= mask;
42458d6ea30SMatthew Wilcox 		if (mask == max)
42558d6ea30SMatthew Wilcox 			max++;
42658d6ea30SMatthew Wilcox 	}
42758d6ea30SMatthew Wilcox #endif
42858d6ea30SMatthew Wilcox 
42958d6ea30SMatthew Wilcox 	return max;
43058d6ea30SMatthew Wilcox }
43158d6ea30SMatthew Wilcox 
43258d6ea30SMatthew Wilcox /* The maximum index that can be contained in the array without expanding it */
max_index(void * entry)43358d6ea30SMatthew Wilcox static unsigned long max_index(void *entry)
43458d6ea30SMatthew Wilcox {
43558d6ea30SMatthew Wilcox 	if (!xa_is_node(entry))
43658d6ea30SMatthew Wilcox 		return 0;
43758d6ea30SMatthew Wilcox 	return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
43858d6ea30SMatthew Wilcox }
43958d6ea30SMatthew Wilcox 
xa_zero_to_null(void * entry)44074e2712bSTamir Duberstein static inline void *xa_zero_to_null(void *entry)
44174e2712bSTamir Duberstein {
44274e2712bSTamir Duberstein 	return xa_is_zero(entry) ? NULL : entry;
44374e2712bSTamir Duberstein }
44474e2712bSTamir Duberstein 
xas_shrink(struct xa_state * xas)44558d6ea30SMatthew Wilcox static void xas_shrink(struct xa_state *xas)
44658d6ea30SMatthew Wilcox {
44758d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
44858d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
44958d6ea30SMatthew Wilcox 
45058d6ea30SMatthew Wilcox 	for (;;) {
45158d6ea30SMatthew Wilcox 		void *entry;
45258d6ea30SMatthew Wilcox 
45358d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
45458d6ea30SMatthew Wilcox 		if (node->count != 1)
45558d6ea30SMatthew Wilcox 			break;
45658d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, 0);
45758d6ea30SMatthew Wilcox 		if (!entry)
45858d6ea30SMatthew Wilcox 			break;
45958d6ea30SMatthew Wilcox 		if (!xa_is_node(entry) && node->shift)
46058d6ea30SMatthew Wilcox 			break;
46174e2712bSTamir Duberstein 		if (xa_zero_busy(xa))
46274e2712bSTamir Duberstein 			entry = xa_zero_to_null(entry);
46358d6ea30SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
46458d6ea30SMatthew Wilcox 
46558d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(xa->xa_head, entry);
466371c752dSMatthew Wilcox 		if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
467371c752dSMatthew Wilcox 			xa_mark_clear(xa, XA_FREE_MARK);
46858d6ea30SMatthew Wilcox 
46958d6ea30SMatthew Wilcox 		node->count = 0;
47058d6ea30SMatthew Wilcox 		node->nr_values = 0;
47158d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
47258d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
47358d6ea30SMatthew Wilcox 		xas_update(xas, node);
47458d6ea30SMatthew Wilcox 		xa_node_free(node);
47558d6ea30SMatthew Wilcox 		if (!xa_is_node(entry))
47658d6ea30SMatthew Wilcox 			break;
47758d6ea30SMatthew Wilcox 		node = xa_to_node(entry);
47858d6ea30SMatthew Wilcox 		node->parent = NULL;
47958d6ea30SMatthew Wilcox 	}
48058d6ea30SMatthew Wilcox }
48158d6ea30SMatthew Wilcox 
48258d6ea30SMatthew Wilcox /*
48358d6ea30SMatthew Wilcox  * xas_delete_node() - Attempt to delete an xa_node
48458d6ea30SMatthew Wilcox  * @xas: Array operation state.
48558d6ea30SMatthew Wilcox  *
48658d6ea30SMatthew Wilcox  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
48758d6ea30SMatthew Wilcox  * a non-zero reference count.
48858d6ea30SMatthew Wilcox  */
xas_delete_node(struct xa_state * xas)48958d6ea30SMatthew Wilcox static void xas_delete_node(struct xa_state *xas)
49058d6ea30SMatthew Wilcox {
49158d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
49258d6ea30SMatthew Wilcox 
49358d6ea30SMatthew Wilcox 	for (;;) {
49458d6ea30SMatthew Wilcox 		struct xa_node *parent;
49558d6ea30SMatthew Wilcox 
49658d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
49758d6ea30SMatthew Wilcox 		if (node->count)
49858d6ea30SMatthew Wilcox 			break;
49958d6ea30SMatthew Wilcox 
50058d6ea30SMatthew Wilcox 		parent = xa_parent_locked(xas->xa, node);
50158d6ea30SMatthew Wilcox 		xas->xa_node = parent;
50258d6ea30SMatthew Wilcox 		xas->xa_offset = node->offset;
50358d6ea30SMatthew Wilcox 		xa_node_free(node);
50458d6ea30SMatthew Wilcox 
50558d6ea30SMatthew Wilcox 		if (!parent) {
50658d6ea30SMatthew Wilcox 			xas->xa->xa_head = NULL;
50758d6ea30SMatthew Wilcox 			xas->xa_node = XAS_BOUNDS;
50858d6ea30SMatthew Wilcox 			return;
50958d6ea30SMatthew Wilcox 		}
51058d6ea30SMatthew Wilcox 
51158d6ea30SMatthew Wilcox 		parent->slots[xas->xa_offset] = NULL;
51258d6ea30SMatthew Wilcox 		parent->count--;
51358d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
51458d6ea30SMatthew Wilcox 		node = parent;
51558d6ea30SMatthew Wilcox 		xas_update(xas, node);
51658d6ea30SMatthew Wilcox 	}
51758d6ea30SMatthew Wilcox 
51858d6ea30SMatthew Wilcox 	if (!node->parent)
51958d6ea30SMatthew Wilcox 		xas_shrink(xas);
52058d6ea30SMatthew Wilcox }
52158d6ea30SMatthew Wilcox 
52258d6ea30SMatthew Wilcox /**
52358d6ea30SMatthew Wilcox  * xas_free_nodes() - Free this node and all nodes that it references
52458d6ea30SMatthew Wilcox  * @xas: Array operation state.
52558d6ea30SMatthew Wilcox  * @top: Node to free
52658d6ea30SMatthew Wilcox  *
52758d6ea30SMatthew Wilcox  * This node has been removed from the tree.  We must now free it and all
52858d6ea30SMatthew Wilcox  * of its subnodes.  There may be RCU walkers with references into the tree,
52958d6ea30SMatthew Wilcox  * so we must replace all entries with retry markers.
53058d6ea30SMatthew Wilcox  */
xas_free_nodes(struct xa_state * xas,struct xa_node * top)53158d6ea30SMatthew Wilcox static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
53258d6ea30SMatthew Wilcox {
53358d6ea30SMatthew Wilcox 	unsigned int offset = 0;
53458d6ea30SMatthew Wilcox 	struct xa_node *node = top;
53558d6ea30SMatthew Wilcox 
53658d6ea30SMatthew Wilcox 	for (;;) {
53758d6ea30SMatthew Wilcox 		void *entry = xa_entry_locked(xas->xa, node, offset);
53858d6ea30SMatthew Wilcox 
53976b4e529SMatthew Wilcox 		if (node->shift && xa_is_node(entry)) {
54058d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
54158d6ea30SMatthew Wilcox 			offset = 0;
54258d6ea30SMatthew Wilcox 			continue;
54358d6ea30SMatthew Wilcox 		}
54458d6ea30SMatthew Wilcox 		if (entry)
54558d6ea30SMatthew Wilcox 			RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
54658d6ea30SMatthew Wilcox 		offset++;
54758d6ea30SMatthew Wilcox 		while (offset == XA_CHUNK_SIZE) {
54858d6ea30SMatthew Wilcox 			struct xa_node *parent;
54958d6ea30SMatthew Wilcox 
55058d6ea30SMatthew Wilcox 			parent = xa_parent_locked(xas->xa, node);
55158d6ea30SMatthew Wilcox 			offset = node->offset + 1;
55258d6ea30SMatthew Wilcox 			node->count = 0;
55358d6ea30SMatthew Wilcox 			node->nr_values = 0;
55458d6ea30SMatthew Wilcox 			xas_update(xas, node);
55558d6ea30SMatthew Wilcox 			xa_node_free(node);
55658d6ea30SMatthew Wilcox 			if (node == top)
55758d6ea30SMatthew Wilcox 				return;
55858d6ea30SMatthew Wilcox 			node = parent;
55958d6ea30SMatthew Wilcox 		}
56058d6ea30SMatthew Wilcox 	}
56158d6ea30SMatthew Wilcox }
56258d6ea30SMatthew Wilcox 
56358d6ea30SMatthew Wilcox /*
56458d6ea30SMatthew Wilcox  * xas_expand adds nodes to the head of the tree until it has reached
56558d6ea30SMatthew Wilcox  * sufficient height to be able to contain @xas->xa_index
56658d6ea30SMatthew Wilcox  */
xas_expand(struct xa_state * xas,void * head)56758d6ea30SMatthew Wilcox static int xas_expand(struct xa_state *xas, void *head)
56858d6ea30SMatthew Wilcox {
56958d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
57058d6ea30SMatthew Wilcox 	struct xa_node *node = NULL;
57158d6ea30SMatthew Wilcox 	unsigned int shift = 0;
57258d6ea30SMatthew Wilcox 	unsigned long max = xas_max(xas);
57358d6ea30SMatthew Wilcox 
57458d6ea30SMatthew Wilcox 	if (!head) {
57558d6ea30SMatthew Wilcox 		if (max == 0)
57658d6ea30SMatthew Wilcox 			return 0;
57758d6ea30SMatthew Wilcox 		while ((max >> shift) >= XA_CHUNK_SIZE)
57858d6ea30SMatthew Wilcox 			shift += XA_CHUNK_SHIFT;
57958d6ea30SMatthew Wilcox 		return shift + XA_CHUNK_SHIFT;
58058d6ea30SMatthew Wilcox 	} else if (xa_is_node(head)) {
58158d6ea30SMatthew Wilcox 		node = xa_to_node(head);
58258d6ea30SMatthew Wilcox 		shift = node->shift + XA_CHUNK_SHIFT;
58358d6ea30SMatthew Wilcox 	}
58458d6ea30SMatthew Wilcox 	xas->xa_node = NULL;
58558d6ea30SMatthew Wilcox 
58658d6ea30SMatthew Wilcox 	while (max > max_index(head)) {
58758d6ea30SMatthew Wilcox 		xa_mark_t mark = 0;
58858d6ea30SMatthew Wilcox 
58958d6ea30SMatthew Wilcox 		XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
59058d6ea30SMatthew Wilcox 		node = xas_alloc(xas, shift);
59158d6ea30SMatthew Wilcox 		if (!node)
59258d6ea30SMatthew Wilcox 			return -ENOMEM;
59358d6ea30SMatthew Wilcox 
59458d6ea30SMatthew Wilcox 		node->count = 1;
59558d6ea30SMatthew Wilcox 		if (xa_is_value(head))
59658d6ea30SMatthew Wilcox 			node->nr_values = 1;
59758d6ea30SMatthew Wilcox 		RCU_INIT_POINTER(node->slots[0], head);
59858d6ea30SMatthew Wilcox 
59958d6ea30SMatthew Wilcox 		/* Propagate the aggregated mark info to the new child */
60058d6ea30SMatthew Wilcox 		for (;;) {
601371c752dSMatthew Wilcox 			if (xa_track_free(xa) && mark == XA_FREE_MARK) {
602371c752dSMatthew Wilcox 				node_mark_all(node, XA_FREE_MARK);
603371c752dSMatthew Wilcox 				if (!xa_marked(xa, XA_FREE_MARK)) {
604371c752dSMatthew Wilcox 					node_clear_mark(node, 0, XA_FREE_MARK);
605371c752dSMatthew Wilcox 					xa_mark_set(xa, XA_FREE_MARK);
606371c752dSMatthew Wilcox 				}
607371c752dSMatthew Wilcox 			} else if (xa_marked(xa, mark)) {
60858d6ea30SMatthew Wilcox 				node_set_mark(node, 0, mark);
609371c752dSMatthew Wilcox 			}
61058d6ea30SMatthew Wilcox 			if (mark == XA_MARK_MAX)
61158d6ea30SMatthew Wilcox 				break;
61258d6ea30SMatthew Wilcox 			mark_inc(mark);
61358d6ea30SMatthew Wilcox 		}
61458d6ea30SMatthew Wilcox 
61558d6ea30SMatthew Wilcox 		/*
61658d6ea30SMatthew Wilcox 		 * Now that the new node is fully initialised, we can add
61758d6ea30SMatthew Wilcox 		 * it to the tree
61858d6ea30SMatthew Wilcox 		 */
61958d6ea30SMatthew Wilcox 		if (xa_is_node(head)) {
62058d6ea30SMatthew Wilcox 			xa_to_node(head)->offset = 0;
62158d6ea30SMatthew Wilcox 			rcu_assign_pointer(xa_to_node(head)->parent, node);
62258d6ea30SMatthew Wilcox 		}
62358d6ea30SMatthew Wilcox 		head = xa_mk_node(node);
62458d6ea30SMatthew Wilcox 		rcu_assign_pointer(xa->xa_head, head);
62558d6ea30SMatthew Wilcox 		xas_update(xas, node);
62658d6ea30SMatthew Wilcox 
62758d6ea30SMatthew Wilcox 		shift += XA_CHUNK_SHIFT;
62858d6ea30SMatthew Wilcox 	}
62958d6ea30SMatthew Wilcox 
63058d6ea30SMatthew Wilcox 	xas->xa_node = node;
63158d6ea30SMatthew Wilcox 	return shift;
63258d6ea30SMatthew Wilcox }
63358d6ea30SMatthew Wilcox 
63458d6ea30SMatthew Wilcox /*
63558d6ea30SMatthew Wilcox  * xas_create() - Create a slot to store an entry in.
63658d6ea30SMatthew Wilcox  * @xas: XArray operation state.
63776b4e529SMatthew Wilcox  * @allow_root: %true if we can store the entry in the root directly
63858d6ea30SMatthew Wilcox  *
63958d6ea30SMatthew Wilcox  * Most users will not need to call this function directly, as it is called
64058d6ea30SMatthew Wilcox  * by xas_store().  It is useful for doing conditional store operations
64158d6ea30SMatthew Wilcox  * (see the xa_cmpxchg() implementation for an example).
64258d6ea30SMatthew Wilcox  *
64358d6ea30SMatthew Wilcox  * Return: If the slot already existed, returns the contents of this slot.
644804dfaf0SMatthew Wilcox  * If the slot was newly created, returns %NULL.  If it failed to create the
645804dfaf0SMatthew Wilcox  * slot, returns %NULL and indicates the error in @xas.
64658d6ea30SMatthew Wilcox  */
xas_create(struct xa_state * xas,bool allow_root)64776b4e529SMatthew Wilcox static void *xas_create(struct xa_state *xas, bool allow_root)
64858d6ea30SMatthew Wilcox {
64958d6ea30SMatthew Wilcox 	struct xarray *xa = xas->xa;
65058d6ea30SMatthew Wilcox 	void *entry;
65158d6ea30SMatthew Wilcox 	void __rcu **slot;
65258d6ea30SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
65358d6ea30SMatthew Wilcox 	int shift;
65458d6ea30SMatthew Wilcox 	unsigned int order = xas->xa_shift;
65558d6ea30SMatthew Wilcox 
65658d6ea30SMatthew Wilcox 	if (xas_top(node)) {
65758d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
65858d6ea30SMatthew Wilcox 		xas->xa_node = NULL;
6593ccaf57aSMatthew Wilcox 		if (!entry && xa_zero_busy(xa))
6603ccaf57aSMatthew Wilcox 			entry = XA_ZERO_ENTRY;
66158d6ea30SMatthew Wilcox 		shift = xas_expand(xas, entry);
66258d6ea30SMatthew Wilcox 		if (shift < 0)
66358d6ea30SMatthew Wilcox 			return NULL;
66476b4e529SMatthew Wilcox 		if (!shift && !allow_root)
66576b4e529SMatthew Wilcox 			shift = XA_CHUNK_SHIFT;
66658d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
66758d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
66858d6ea30SMatthew Wilcox 	} else if (xas_error(xas)) {
66958d6ea30SMatthew Wilcox 		return NULL;
67058d6ea30SMatthew Wilcox 	} else if (node) {
67158d6ea30SMatthew Wilcox 		unsigned int offset = xas->xa_offset;
67258d6ea30SMatthew Wilcox 
67358d6ea30SMatthew Wilcox 		shift = node->shift;
67458d6ea30SMatthew Wilcox 		entry = xa_entry_locked(xa, node, offset);
67558d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
67658d6ea30SMatthew Wilcox 	} else {
67758d6ea30SMatthew Wilcox 		shift = 0;
67858d6ea30SMatthew Wilcox 		entry = xa_head_locked(xa);
67958d6ea30SMatthew Wilcox 		slot = &xa->xa_head;
68058d6ea30SMatthew Wilcox 	}
68158d6ea30SMatthew Wilcox 
68258d6ea30SMatthew Wilcox 	while (shift > order) {
68358d6ea30SMatthew Wilcox 		shift -= XA_CHUNK_SHIFT;
68458d6ea30SMatthew Wilcox 		if (!entry) {
68558d6ea30SMatthew Wilcox 			node = xas_alloc(xas, shift);
68658d6ea30SMatthew Wilcox 			if (!node)
68758d6ea30SMatthew Wilcox 				break;
688371c752dSMatthew Wilcox 			if (xa_track_free(xa))
689371c752dSMatthew Wilcox 				node_mark_all(node, XA_FREE_MARK);
69058d6ea30SMatthew Wilcox 			rcu_assign_pointer(*slot, xa_mk_node(node));
69158d6ea30SMatthew Wilcox 		} else if (xa_is_node(entry)) {
69258d6ea30SMatthew Wilcox 			node = xa_to_node(entry);
69358d6ea30SMatthew Wilcox 		} else {
69458d6ea30SMatthew Wilcox 			break;
69558d6ea30SMatthew Wilcox 		}
69658d6ea30SMatthew Wilcox 		entry = xas_descend(xas, node);
69758d6ea30SMatthew Wilcox 		slot = &node->slots[xas->xa_offset];
69858d6ea30SMatthew Wilcox 	}
69958d6ea30SMatthew Wilcox 
70058d6ea30SMatthew Wilcox 	return entry;
70158d6ea30SMatthew Wilcox }
70258d6ea30SMatthew Wilcox 
7032264f513SMatthew Wilcox /**
7042264f513SMatthew Wilcox  * xas_create_range() - Ensure that stores to this range will succeed
7052264f513SMatthew Wilcox  * @xas: XArray operation state.
7062264f513SMatthew Wilcox  *
7072264f513SMatthew Wilcox  * Creates all of the slots in the range covered by @xas.  Sets @xas to
7082264f513SMatthew Wilcox  * create single-index entries and positions it at the beginning of the
7092264f513SMatthew Wilcox  * range.  This is for the benefit of users which have not yet been
7102264f513SMatthew Wilcox  * converted to use multi-index entries.
7112264f513SMatthew Wilcox  */
xas_create_range(struct xa_state * xas)7122264f513SMatthew Wilcox void xas_create_range(struct xa_state *xas)
7132264f513SMatthew Wilcox {
7142264f513SMatthew Wilcox 	unsigned long index = xas->xa_index;
7152264f513SMatthew Wilcox 	unsigned char shift = xas->xa_shift;
7162264f513SMatthew Wilcox 	unsigned char sibs = xas->xa_sibs;
7172264f513SMatthew Wilcox 
71884c34df1SMatthew Wilcox (Oracle) 	xas->xa_index |= ((sibs + 1UL) << shift) - 1;
7192264f513SMatthew Wilcox 	if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
7202264f513SMatthew Wilcox 		xas->xa_offset |= sibs;
7212264f513SMatthew Wilcox 	xas->xa_shift = 0;
7222264f513SMatthew Wilcox 	xas->xa_sibs = 0;
7232264f513SMatthew Wilcox 
7242264f513SMatthew Wilcox 	for (;;) {
72576b4e529SMatthew Wilcox 		xas_create(xas, true);
7262264f513SMatthew Wilcox 		if (xas_error(xas))
7272264f513SMatthew Wilcox 			goto restore;
7282264f513SMatthew Wilcox 		if (xas->xa_index <= (index | XA_CHUNK_MASK))
7292264f513SMatthew Wilcox 			goto success;
7302264f513SMatthew Wilcox 		xas->xa_index -= XA_CHUNK_SIZE;
7312264f513SMatthew Wilcox 
7322264f513SMatthew Wilcox 		for (;;) {
7332264f513SMatthew Wilcox 			struct xa_node *node = xas->xa_node;
7343e3c6580SMatthew Wilcox (Oracle) 			if (node->shift >= shift)
7353e3c6580SMatthew Wilcox (Oracle) 				break;
7362264f513SMatthew Wilcox 			xas->xa_node = xa_parent_locked(xas->xa, node);
7372264f513SMatthew Wilcox 			xas->xa_offset = node->offset - 1;
7382264f513SMatthew Wilcox 			if (node->offset != 0)
7392264f513SMatthew Wilcox 				break;
7402264f513SMatthew Wilcox 		}
7412264f513SMatthew Wilcox 	}
7422264f513SMatthew Wilcox 
7432264f513SMatthew Wilcox restore:
7442264f513SMatthew Wilcox 	xas->xa_shift = shift;
7452264f513SMatthew Wilcox 	xas->xa_sibs = sibs;
7462264f513SMatthew Wilcox 	xas->xa_index = index;
7472264f513SMatthew Wilcox 	return;
7482264f513SMatthew Wilcox success:
7492264f513SMatthew Wilcox 	xas->xa_index = index;
7502264f513SMatthew Wilcox 	if (xas->xa_node)
7512264f513SMatthew Wilcox 		xas_set_offset(xas);
7522264f513SMatthew Wilcox }
7532264f513SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_create_range);
7542264f513SMatthew Wilcox 
update_node(struct xa_state * xas,struct xa_node * node,int count,int values)75558d6ea30SMatthew Wilcox static void update_node(struct xa_state *xas, struct xa_node *node,
75658d6ea30SMatthew Wilcox 		int count, int values)
75758d6ea30SMatthew Wilcox {
75858d6ea30SMatthew Wilcox 	if (!node || (!count && !values))
75958d6ea30SMatthew Wilcox 		return;
76058d6ea30SMatthew Wilcox 
76158d6ea30SMatthew Wilcox 	node->count += count;
76258d6ea30SMatthew Wilcox 	node->nr_values += values;
76358d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
76458d6ea30SMatthew Wilcox 	XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
76558d6ea30SMatthew Wilcox 	xas_update(xas, node);
76658d6ea30SMatthew Wilcox 	if (count < 0)
76758d6ea30SMatthew Wilcox 		xas_delete_node(xas);
76858d6ea30SMatthew Wilcox }
76958d6ea30SMatthew Wilcox 
77058d6ea30SMatthew Wilcox /**
77158d6ea30SMatthew Wilcox  * xas_store() - Store this entry in the XArray.
77258d6ea30SMatthew Wilcox  * @xas: XArray operation state.
77358d6ea30SMatthew Wilcox  * @entry: New entry.
77458d6ea30SMatthew Wilcox  *
77558d6ea30SMatthew Wilcox  * If @xas is operating on a multi-index entry, the entry returned by this
77658d6ea30SMatthew Wilcox  * function is essentially meaningless (it may be an internal entry or it
77758d6ea30SMatthew Wilcox  * may be %NULL, even if there are non-NULL entries at some of the indices
77858d6ea30SMatthew Wilcox  * covered by the range).  This is not a problem for any current users,
77958d6ea30SMatthew Wilcox  * and can be changed if needed.
78058d6ea30SMatthew Wilcox  *
78158d6ea30SMatthew Wilcox  * Return: The old entry at this index.
78258d6ea30SMatthew Wilcox  */
xas_store(struct xa_state * xas,void * entry)78358d6ea30SMatthew Wilcox void *xas_store(struct xa_state *xas, void *entry)
78458d6ea30SMatthew Wilcox {
78558d6ea30SMatthew Wilcox 	struct xa_node *node;
78658d6ea30SMatthew Wilcox 	void __rcu **slot = &xas->xa->xa_head;
78758d6ea30SMatthew Wilcox 	unsigned int offset, max;
78858d6ea30SMatthew Wilcox 	int count = 0;
78958d6ea30SMatthew Wilcox 	int values = 0;
79058d6ea30SMatthew Wilcox 	void *first, *next;
79158d6ea30SMatthew Wilcox 	bool value = xa_is_value(entry);
79258d6ea30SMatthew Wilcox 
7934a5c8d89SMatthew Wilcox 	if (entry) {
7944a5c8d89SMatthew Wilcox 		bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
7954a5c8d89SMatthew Wilcox 		first = xas_create(xas, allow_root);
7964a5c8d89SMatthew Wilcox 	} else {
79758d6ea30SMatthew Wilcox 		first = xas_load(xas);
7984a5c8d89SMatthew Wilcox 	}
79958d6ea30SMatthew Wilcox 
80058d6ea30SMatthew Wilcox 	if (xas_invalid(xas))
80158d6ea30SMatthew Wilcox 		return first;
80258d6ea30SMatthew Wilcox 	node = xas->xa_node;
80358d6ea30SMatthew Wilcox 	if (node && (xas->xa_shift < node->shift))
80458d6ea30SMatthew Wilcox 		xas->xa_sibs = 0;
80558d6ea30SMatthew Wilcox 	if ((first == entry) && !xas->xa_sibs)
80658d6ea30SMatthew Wilcox 		return first;
80758d6ea30SMatthew Wilcox 
80858d6ea30SMatthew Wilcox 	next = first;
80958d6ea30SMatthew Wilcox 	offset = xas->xa_offset;
81058d6ea30SMatthew Wilcox 	max = xas->xa_offset + xas->xa_sibs;
81158d6ea30SMatthew Wilcox 	if (node) {
81258d6ea30SMatthew Wilcox 		slot = &node->slots[offset];
81358d6ea30SMatthew Wilcox 		if (xas->xa_sibs)
81458d6ea30SMatthew Wilcox 			xas_squash_marks(xas);
81558d6ea30SMatthew Wilcox 	}
81658d6ea30SMatthew Wilcox 	if (!entry)
81758d6ea30SMatthew Wilcox 		xas_init_marks(xas);
81858d6ea30SMatthew Wilcox 
81958d6ea30SMatthew Wilcox 	for (;;) {
82058d6ea30SMatthew Wilcox 		/*
82158d6ea30SMatthew Wilcox 		 * Must clear the marks before setting the entry to NULL,
82258d6ea30SMatthew Wilcox 		 * otherwise xas_for_each_marked may find a NULL entry and
82358d6ea30SMatthew Wilcox 		 * stop early.  rcu_assign_pointer contains a release barrier
82458d6ea30SMatthew Wilcox 		 * so the mark clearing will appear to happen before the
82558d6ea30SMatthew Wilcox 		 * entry is set to NULL.
82658d6ea30SMatthew Wilcox 		 */
82758d6ea30SMatthew Wilcox 		rcu_assign_pointer(*slot, entry);
8282fbe967bSMatthew Wilcox 		if (xa_is_node(next) && (!node || node->shift))
82958d6ea30SMatthew Wilcox 			xas_free_nodes(xas, xa_to_node(next));
83058d6ea30SMatthew Wilcox 		if (!node)
83158d6ea30SMatthew Wilcox 			break;
83258d6ea30SMatthew Wilcox 		count += !next - !entry;
83358d6ea30SMatthew Wilcox 		values += !xa_is_value(first) - !value;
83458d6ea30SMatthew Wilcox 		if (entry) {
83558d6ea30SMatthew Wilcox 			if (offset == max)
83658d6ea30SMatthew Wilcox 				break;
83758d6ea30SMatthew Wilcox 			if (!xa_is_sibling(entry))
83858d6ea30SMatthew Wilcox 				entry = xa_mk_sibling(xas->xa_offset);
83958d6ea30SMatthew Wilcox 		} else {
84058d6ea30SMatthew Wilcox 			if (offset == XA_CHUNK_MASK)
84158d6ea30SMatthew Wilcox 				break;
84258d6ea30SMatthew Wilcox 		}
84358d6ea30SMatthew Wilcox 		next = xa_entry_locked(xas->xa, node, ++offset);
84458d6ea30SMatthew Wilcox 		if (!xa_is_sibling(next)) {
84558d6ea30SMatthew Wilcox 			if (!entry && (offset > max))
84658d6ea30SMatthew Wilcox 				break;
84758d6ea30SMatthew Wilcox 			first = next;
84858d6ea30SMatthew Wilcox 		}
84958d6ea30SMatthew Wilcox 		slot++;
85058d6ea30SMatthew Wilcox 	}
85158d6ea30SMatthew Wilcox 
85258d6ea30SMatthew Wilcox 	update_node(xas, node, count, values);
85358d6ea30SMatthew Wilcox 	return first;
85458d6ea30SMatthew Wilcox }
85558d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_store);
85658d6ea30SMatthew Wilcox 
857f8d5d0ccSMatthew Wilcox /**
8589b89a035SMatthew Wilcox  * xas_get_mark() - Returns the state of this mark.
8599b89a035SMatthew Wilcox  * @xas: XArray operation state.
8609b89a035SMatthew Wilcox  * @mark: Mark number.
8619b89a035SMatthew Wilcox  *
8629b89a035SMatthew Wilcox  * Return: true if the mark is set, false if the mark is clear or @xas
8639b89a035SMatthew Wilcox  * is in an error state.
8649b89a035SMatthew Wilcox  */
xas_get_mark(const struct xa_state * xas,xa_mark_t mark)8659b89a035SMatthew Wilcox bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
8669b89a035SMatthew Wilcox {
8679b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8689b89a035SMatthew Wilcox 		return false;
8699b89a035SMatthew Wilcox 	if (!xas->xa_node)
8709b89a035SMatthew Wilcox 		return xa_marked(xas->xa, mark);
8719b89a035SMatthew Wilcox 	return node_get_mark(xas->xa_node, xas->xa_offset, mark);
8729b89a035SMatthew Wilcox }
8739b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_get_mark);
8749b89a035SMatthew Wilcox 
8759b89a035SMatthew Wilcox /**
8769b89a035SMatthew Wilcox  * xas_set_mark() - Sets the mark on this entry and its parents.
8779b89a035SMatthew Wilcox  * @xas: XArray operation state.
8789b89a035SMatthew Wilcox  * @mark: Mark number.
8799b89a035SMatthew Wilcox  *
8809b89a035SMatthew Wilcox  * Sets the specified mark on this entry, and walks up the tree setting it
8819b89a035SMatthew Wilcox  * on all the ancestor entries.  Does nothing if @xas has not been walked to
8829b89a035SMatthew Wilcox  * an entry, or is in an error state.
8839b89a035SMatthew Wilcox  */
xas_set_mark(const struct xa_state * xas,xa_mark_t mark)8849b89a035SMatthew Wilcox void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
8859b89a035SMatthew Wilcox {
8869b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
8879b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
8889b89a035SMatthew Wilcox 
8899b89a035SMatthew Wilcox 	if (xas_invalid(xas))
8909b89a035SMatthew Wilcox 		return;
8919b89a035SMatthew Wilcox 
8929b89a035SMatthew Wilcox 	while (node) {
8939b89a035SMatthew Wilcox 		if (node_set_mark(node, offset, mark))
8949b89a035SMatthew Wilcox 			return;
8959b89a035SMatthew Wilcox 		offset = node->offset;
8969b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
8979b89a035SMatthew Wilcox 	}
8989b89a035SMatthew Wilcox 
8999b89a035SMatthew Wilcox 	if (!xa_marked(xas->xa, mark))
9009b89a035SMatthew Wilcox 		xa_mark_set(xas->xa, mark);
9019b89a035SMatthew Wilcox }
9029b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_set_mark);
9039b89a035SMatthew Wilcox 
9049b89a035SMatthew Wilcox /**
9059b89a035SMatthew Wilcox  * xas_clear_mark() - Clears the mark on this entry and its parents.
9069b89a035SMatthew Wilcox  * @xas: XArray operation state.
9079b89a035SMatthew Wilcox  * @mark: Mark number.
9089b89a035SMatthew Wilcox  *
9099b89a035SMatthew Wilcox  * Clears the specified mark on this entry, and walks back to the head
9109b89a035SMatthew Wilcox  * attempting to clear it on all the ancestor entries.  Does nothing if
9119b89a035SMatthew Wilcox  * @xas has not been walked to an entry, or is in an error state.
9129b89a035SMatthew Wilcox  */
xas_clear_mark(const struct xa_state * xas,xa_mark_t mark)9139b89a035SMatthew Wilcox void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
9149b89a035SMatthew Wilcox {
9159b89a035SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
9169b89a035SMatthew Wilcox 	unsigned int offset = xas->xa_offset;
9179b89a035SMatthew Wilcox 
9189b89a035SMatthew Wilcox 	if (xas_invalid(xas))
9199b89a035SMatthew Wilcox 		return;
9209b89a035SMatthew Wilcox 
9219b89a035SMatthew Wilcox 	while (node) {
9229b89a035SMatthew Wilcox 		if (!node_clear_mark(node, offset, mark))
9239b89a035SMatthew Wilcox 			return;
9249b89a035SMatthew Wilcox 		if (node_any_mark(node, mark))
9259b89a035SMatthew Wilcox 			return;
9269b89a035SMatthew Wilcox 
9279b89a035SMatthew Wilcox 		offset = node->offset;
9289b89a035SMatthew Wilcox 		node = xa_parent_locked(xas->xa, node);
9299b89a035SMatthew Wilcox 	}
9309b89a035SMatthew Wilcox 
9319b89a035SMatthew Wilcox 	if (xa_marked(xas->xa, mark))
9329b89a035SMatthew Wilcox 		xa_mark_clear(xas->xa, mark);
9339b89a035SMatthew Wilcox }
9349b89a035SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_clear_mark);
9359b89a035SMatthew Wilcox 
9369b89a035SMatthew Wilcox /**
93758d6ea30SMatthew Wilcox  * xas_init_marks() - Initialise all marks for the entry
93858d6ea30SMatthew Wilcox  * @xas: Array operations state.
93958d6ea30SMatthew Wilcox  *
94058d6ea30SMatthew Wilcox  * Initialise all marks for the entry specified by @xas.  If we're tracking
94158d6ea30SMatthew Wilcox  * free entries with a mark, we need to set it on all entries.  All other
94258d6ea30SMatthew Wilcox  * marks are cleared.
94358d6ea30SMatthew Wilcox  *
94458d6ea30SMatthew Wilcox  * This implementation is not as efficient as it could be; we may walk
94558d6ea30SMatthew Wilcox  * up the tree multiple times.
94658d6ea30SMatthew Wilcox  */
xas_init_marks(const struct xa_state * xas)94758d6ea30SMatthew Wilcox void xas_init_marks(const struct xa_state *xas)
94858d6ea30SMatthew Wilcox {
94958d6ea30SMatthew Wilcox 	xa_mark_t mark = 0;
95058d6ea30SMatthew Wilcox 
95158d6ea30SMatthew Wilcox 	for (;;) {
952371c752dSMatthew Wilcox 		if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
953371c752dSMatthew Wilcox 			xas_set_mark(xas, mark);
954371c752dSMatthew Wilcox 		else
95558d6ea30SMatthew Wilcox 			xas_clear_mark(xas, mark);
95658d6ea30SMatthew Wilcox 		if (mark == XA_MARK_MAX)
95758d6ea30SMatthew Wilcox 			break;
95858d6ea30SMatthew Wilcox 		mark_inc(mark);
95958d6ea30SMatthew Wilcox 	}
96058d6ea30SMatthew Wilcox }
96158d6ea30SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_init_marks);
96258d6ea30SMatthew Wilcox 
9638fc75643SMatthew Wilcox (Oracle) #ifdef CONFIG_XARRAY_MULTI
node_get_marks(struct xa_node * node,unsigned int offset)9648fc75643SMatthew Wilcox (Oracle) static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
9658fc75643SMatthew Wilcox (Oracle) {
9668fc75643SMatthew Wilcox (Oracle) 	unsigned int marks = 0;
9678fc75643SMatthew Wilcox (Oracle) 	xa_mark_t mark = XA_MARK_0;
9688fc75643SMatthew Wilcox (Oracle) 
9698fc75643SMatthew Wilcox (Oracle) 	for (;;) {
9708fc75643SMatthew Wilcox (Oracle) 		if (node_get_mark(node, offset, mark))
9718fc75643SMatthew Wilcox (Oracle) 			marks |= 1 << (__force unsigned int)mark;
9728fc75643SMatthew Wilcox (Oracle) 		if (mark == XA_MARK_MAX)
9738fc75643SMatthew Wilcox (Oracle) 			break;
9748fc75643SMatthew Wilcox (Oracle) 		mark_inc(mark);
9758fc75643SMatthew Wilcox (Oracle) 	}
9768fc75643SMatthew Wilcox (Oracle) 
9778fc75643SMatthew Wilcox (Oracle) 	return marks;
9788fc75643SMatthew Wilcox (Oracle) }
9798fc75643SMatthew Wilcox (Oracle) 
node_mark_slots(struct xa_node * node,unsigned int sibs,xa_mark_t mark)9802a0774c2SMatthew Wilcox (Oracle) static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
9812a0774c2SMatthew Wilcox (Oracle) 		xa_mark_t mark)
9822a0774c2SMatthew Wilcox (Oracle) {
9832a0774c2SMatthew Wilcox (Oracle) 	int i;
9842a0774c2SMatthew Wilcox (Oracle) 
9852a0774c2SMatthew Wilcox (Oracle) 	if (sibs == 0)
9862a0774c2SMatthew Wilcox (Oracle) 		node_mark_all(node, mark);
9872a0774c2SMatthew Wilcox (Oracle) 	else {
9882a0774c2SMatthew Wilcox (Oracle) 		for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
9892a0774c2SMatthew Wilcox (Oracle) 			node_set_mark(node, i, mark);
9902a0774c2SMatthew Wilcox (Oracle) 	}
9912a0774c2SMatthew Wilcox (Oracle) }
9922a0774c2SMatthew Wilcox (Oracle) 
node_set_marks(struct xa_node * node,unsigned int offset,struct xa_node * child,unsigned int sibs,unsigned int marks)9938fc75643SMatthew Wilcox (Oracle) static void node_set_marks(struct xa_node *node, unsigned int offset,
9942a0774c2SMatthew Wilcox (Oracle) 			struct xa_node *child, unsigned int sibs,
9952a0774c2SMatthew Wilcox (Oracle) 			unsigned int marks)
9968fc75643SMatthew Wilcox (Oracle) {
9978fc75643SMatthew Wilcox (Oracle) 	xa_mark_t mark = XA_MARK_0;
9988fc75643SMatthew Wilcox (Oracle) 
9998fc75643SMatthew Wilcox (Oracle) 	for (;;) {
10008fc75643SMatthew Wilcox (Oracle) 		if (marks & (1 << (__force unsigned int)mark)) {
10018fc75643SMatthew Wilcox (Oracle) 			node_set_mark(node, offset, mark);
10028fc75643SMatthew Wilcox (Oracle) 			if (child)
10032a0774c2SMatthew Wilcox (Oracle) 				node_mark_slots(child, sibs, mark);
10048fc75643SMatthew Wilcox (Oracle) 		}
10058fc75643SMatthew Wilcox (Oracle) 		if (mark == XA_MARK_MAX)
10068fc75643SMatthew Wilcox (Oracle) 			break;
10078fc75643SMatthew Wilcox (Oracle) 		mark_inc(mark);
10088fc75643SMatthew Wilcox (Oracle) 	}
10098fc75643SMatthew Wilcox (Oracle) }
10108fc75643SMatthew Wilcox (Oracle) 
__xas_init_node_for_split(struct xa_state * xas,struct xa_node * node,void * entry)10113fec86f8SZi Yan static void __xas_init_node_for_split(struct xa_state *xas,
10123fec86f8SZi Yan 		struct xa_node *node, void *entry)
10133fec86f8SZi Yan {
10143fec86f8SZi Yan 	unsigned int i;
10153fec86f8SZi Yan 	void *sibling = NULL;
10163fec86f8SZi Yan 	unsigned int mask = xas->xa_sibs;
10173fec86f8SZi Yan 
10183fec86f8SZi Yan 	if (!node)
10193fec86f8SZi Yan 		return;
10203fec86f8SZi Yan 	node->array = xas->xa;
10213fec86f8SZi Yan 	for (i = 0; i < XA_CHUNK_SIZE; i++) {
10223fec86f8SZi Yan 		if ((i & mask) == 0) {
10233fec86f8SZi Yan 			RCU_INIT_POINTER(node->slots[i], entry);
10243fec86f8SZi Yan 			sibling = xa_mk_sibling(i);
10253fec86f8SZi Yan 		} else {
10263fec86f8SZi Yan 			RCU_INIT_POINTER(node->slots[i], sibling);
10273fec86f8SZi Yan 		}
10283fec86f8SZi Yan 	}
10293fec86f8SZi Yan }
10303fec86f8SZi Yan 
10318fc75643SMatthew Wilcox (Oracle) /**
10328fc75643SMatthew Wilcox (Oracle)  * xas_split_alloc() - Allocate memory for splitting an entry.
10338fc75643SMatthew Wilcox (Oracle)  * @xas: XArray operation state.
10348fc75643SMatthew Wilcox (Oracle)  * @entry: New entry which will be stored in the array.
103512efebabSMatthew Wilcox (Oracle)  * @order: Current entry order.
10368fc75643SMatthew Wilcox (Oracle)  * @gfp: Memory allocation flags.
10378fc75643SMatthew Wilcox (Oracle)  *
10388fc75643SMatthew Wilcox (Oracle)  * This function should be called before calling xas_split().
10398fc75643SMatthew Wilcox (Oracle)  * If necessary, it will allocate new nodes (and fill them with @entry)
10408fc75643SMatthew Wilcox (Oracle)  * to prepare for the upcoming split of an entry of @order size into
10418fc75643SMatthew Wilcox (Oracle)  * entries of the order stored in the @xas.
10428fc75643SMatthew Wilcox (Oracle)  *
10438fc75643SMatthew Wilcox (Oracle)  * Context: May sleep if @gfp flags permit.
10448fc75643SMatthew Wilcox (Oracle)  */
xas_split_alloc(struct xa_state * xas,void * entry,unsigned int order,gfp_t gfp)10458fc75643SMatthew Wilcox (Oracle) void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
10468fc75643SMatthew Wilcox (Oracle) 		gfp_t gfp)
10478fc75643SMatthew Wilcox (Oracle) {
10488fc75643SMatthew Wilcox (Oracle) 	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
10498fc75643SMatthew Wilcox (Oracle) 
10508fc75643SMatthew Wilcox (Oracle) 	/* XXX: no support for splitting really large entries yet */
105197db889bSKemeng Shi 	if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
10528fc75643SMatthew Wilcox (Oracle) 		goto nomem;
10538fc75643SMatthew Wilcox (Oracle) 	if (xas->xa_shift + XA_CHUNK_SHIFT > order)
10548fc75643SMatthew Wilcox (Oracle) 		return;
10558fc75643SMatthew Wilcox (Oracle) 
10568fc75643SMatthew Wilcox (Oracle) 	do {
10578fc75643SMatthew Wilcox (Oracle) 		struct xa_node *node;
10588fc75643SMatthew Wilcox (Oracle) 
10599bbdc0f3SMuchun Song 		node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp);
10608fc75643SMatthew Wilcox (Oracle) 		if (!node)
10618fc75643SMatthew Wilcox (Oracle) 			goto nomem;
10623fec86f8SZi Yan 
10633fec86f8SZi Yan 		__xas_init_node_for_split(xas, node, entry);
10648fc75643SMatthew Wilcox (Oracle) 		RCU_INIT_POINTER(node->parent, xas->xa_alloc);
10658fc75643SMatthew Wilcox (Oracle) 		xas->xa_alloc = node;
10668fc75643SMatthew Wilcox (Oracle) 	} while (sibs-- > 0);
10678fc75643SMatthew Wilcox (Oracle) 
10688fc75643SMatthew Wilcox (Oracle) 	return;
10698fc75643SMatthew Wilcox (Oracle) nomem:
10708fc75643SMatthew Wilcox (Oracle) 	xas_destroy(xas);
10718fc75643SMatthew Wilcox (Oracle) 	xas_set_err(xas, -ENOMEM);
10728fc75643SMatthew Wilcox (Oracle) }
10738fc75643SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(xas_split_alloc);
10748fc75643SMatthew Wilcox (Oracle) 
10758fc75643SMatthew Wilcox (Oracle) /**
10768fc75643SMatthew Wilcox (Oracle)  * xas_split() - Split a multi-index entry into smaller entries.
10778fc75643SMatthew Wilcox (Oracle)  * @xas: XArray operation state.
10788fc75643SMatthew Wilcox (Oracle)  * @entry: New entry to store in the array.
107912efebabSMatthew Wilcox (Oracle)  * @order: Current entry order.
10808fc75643SMatthew Wilcox (Oracle)  *
108112efebabSMatthew Wilcox (Oracle)  * The size of the new entries is set in @xas.  The value in @entry is
108212efebabSMatthew Wilcox (Oracle)  * copied to all the replacement entries.
10838fc75643SMatthew Wilcox (Oracle)  *
10848fc75643SMatthew Wilcox (Oracle)  * Context: Any context.  The caller should hold the xa_lock.
10858fc75643SMatthew Wilcox (Oracle)  */
xas_split(struct xa_state * xas,void * entry,unsigned int order)10868fc75643SMatthew Wilcox (Oracle) void xas_split(struct xa_state *xas, void *entry, unsigned int order)
10878fc75643SMatthew Wilcox (Oracle) {
10888fc75643SMatthew Wilcox (Oracle) 	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
10898fc75643SMatthew Wilcox (Oracle) 	unsigned int offset, marks;
10908fc75643SMatthew Wilcox (Oracle) 	struct xa_node *node;
10918fc75643SMatthew Wilcox (Oracle) 	void *curr = xas_load(xas);
10928fc75643SMatthew Wilcox (Oracle) 	int values = 0;
10938fc75643SMatthew Wilcox (Oracle) 
10948fc75643SMatthew Wilcox (Oracle) 	node = xas->xa_node;
10958fc75643SMatthew Wilcox (Oracle) 	if (xas_top(node))
10968fc75643SMatthew Wilcox (Oracle) 		return;
10978fc75643SMatthew Wilcox (Oracle) 
10988fc75643SMatthew Wilcox (Oracle) 	marks = node_get_marks(node, xas->xa_offset);
10998fc75643SMatthew Wilcox (Oracle) 
11008fc75643SMatthew Wilcox (Oracle) 	offset = xas->xa_offset + sibs;
11018fc75643SMatthew Wilcox (Oracle) 	do {
11028fc75643SMatthew Wilcox (Oracle) 		if (xas->xa_shift < node->shift) {
11038fc75643SMatthew Wilcox (Oracle) 			struct xa_node *child = xas->xa_alloc;
11048fc75643SMatthew Wilcox (Oracle) 
11058fc75643SMatthew Wilcox (Oracle) 			xas->xa_alloc = rcu_dereference_raw(child->parent);
11068fc75643SMatthew Wilcox (Oracle) 			child->shift = node->shift - XA_CHUNK_SHIFT;
11078fc75643SMatthew Wilcox (Oracle) 			child->offset = offset;
11088fc75643SMatthew Wilcox (Oracle) 			child->count = XA_CHUNK_SIZE;
11098fc75643SMatthew Wilcox (Oracle) 			child->nr_values = xa_is_value(entry) ?
11108fc75643SMatthew Wilcox (Oracle) 					XA_CHUNK_SIZE : 0;
11118fc75643SMatthew Wilcox (Oracle) 			RCU_INIT_POINTER(child->parent, node);
11122a0774c2SMatthew Wilcox (Oracle) 			node_set_marks(node, offset, child, xas->xa_sibs,
11132a0774c2SMatthew Wilcox (Oracle) 					marks);
11148fc75643SMatthew Wilcox (Oracle) 			rcu_assign_pointer(node->slots[offset],
11158fc75643SMatthew Wilcox (Oracle) 					xa_mk_node(child));
11168fc75643SMatthew Wilcox (Oracle) 			if (xa_is_value(curr))
11178fc75643SMatthew Wilcox (Oracle) 				values--;
11183ed4bb77SMatthew Wilcox (Oracle) 			xas_update(xas, child);
11198fc75643SMatthew Wilcox (Oracle) 		} else {
11208fc75643SMatthew Wilcox (Oracle) 			unsigned int canon = offset - xas->xa_sibs;
11218fc75643SMatthew Wilcox (Oracle) 
11222a0774c2SMatthew Wilcox (Oracle) 			node_set_marks(node, canon, NULL, 0, marks);
11238fc75643SMatthew Wilcox (Oracle) 			rcu_assign_pointer(node->slots[canon], entry);
11248fc75643SMatthew Wilcox (Oracle) 			while (offset > canon)
11258fc75643SMatthew Wilcox (Oracle) 				rcu_assign_pointer(node->slots[offset--],
11268fc75643SMatthew Wilcox (Oracle) 						xa_mk_sibling(canon));
11278fc75643SMatthew Wilcox (Oracle) 			values += (xa_is_value(entry) - xa_is_value(curr)) *
11288fc75643SMatthew Wilcox (Oracle) 					(xas->xa_sibs + 1);
11298fc75643SMatthew Wilcox (Oracle) 		}
11308fc75643SMatthew Wilcox (Oracle) 	} while (offset-- > xas->xa_offset);
11318fc75643SMatthew Wilcox (Oracle) 
11328fc75643SMatthew Wilcox (Oracle) 	node->nr_values += values;
11333ed4bb77SMatthew Wilcox (Oracle) 	xas_update(xas, node);
11348fc75643SMatthew Wilcox (Oracle) }
11358fc75643SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(xas_split);
11363fec86f8SZi Yan 
11373fec86f8SZi Yan /**
1138*200a89c1SZi Yan  * xas_try_split_min_order() - Minimal split order xas_try_split() can accept
1139*200a89c1SZi Yan  * @order: Current entry order.
1140*200a89c1SZi Yan  *
1141*200a89c1SZi Yan  * xas_try_split() can split a multi-index entry to smaller than @order - 1 if
1142*200a89c1SZi Yan  * no new xa_node is needed. This function provides the minimal order
1143*200a89c1SZi Yan  * xas_try_split() supports.
1144*200a89c1SZi Yan  *
1145*200a89c1SZi Yan  * Return: the minimal order xas_try_split() supports
1146*200a89c1SZi Yan  *
1147*200a89c1SZi Yan  * Context: Any context.
1148*200a89c1SZi Yan  *
1149*200a89c1SZi Yan  */
xas_try_split_min_order(unsigned int order)1150*200a89c1SZi Yan unsigned int xas_try_split_min_order(unsigned int order)
1151*200a89c1SZi Yan {
1152*200a89c1SZi Yan 	if (order % XA_CHUNK_SHIFT == 0)
1153*200a89c1SZi Yan 		return order == 0 ? 0 : order - 1;
1154*200a89c1SZi Yan 
1155*200a89c1SZi Yan 	return order - (order % XA_CHUNK_SHIFT);
1156*200a89c1SZi Yan }
1157*200a89c1SZi Yan EXPORT_SYMBOL_GPL(xas_try_split_min_order);
1158*200a89c1SZi Yan 
1159*200a89c1SZi Yan /**
11603fec86f8SZi Yan  * xas_try_split() - Try to split a multi-index entry.
11613fec86f8SZi Yan  * @xas: XArray operation state.
11623fec86f8SZi Yan  * @entry: New entry to store in the array.
11633fec86f8SZi Yan  * @order: Current entry order.
11643fec86f8SZi Yan  *
11653fec86f8SZi Yan  * The size of the new entries is set in @xas.  The value in @entry is
11663fec86f8SZi Yan  * copied to all the replacement entries. If and only if one new xa_node is
11673fec86f8SZi Yan  * needed, the function will use GFP_NOWAIT to get one if xas->xa_alloc is
11683fec86f8SZi Yan  * NULL. If more new xa_node are needed, the function gives EINVAL error.
11693fec86f8SZi Yan  *
1170*200a89c1SZi Yan  * NOTE: use xas_try_split_min_order() to get next split order instead of
1171*200a89c1SZi Yan  * @order - 1 if you want to minmize xas_try_split() calls.
1172*200a89c1SZi Yan  *
11733fec86f8SZi Yan  * Context: Any context.  The caller should hold the xa_lock.
11743fec86f8SZi Yan  */
xas_try_split(struct xa_state * xas,void * entry,unsigned int order)11753fec86f8SZi Yan void xas_try_split(struct xa_state *xas, void *entry, unsigned int order)
11763fec86f8SZi Yan {
11773fec86f8SZi Yan 	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
11783fec86f8SZi Yan 	unsigned int offset, marks;
11793fec86f8SZi Yan 	struct xa_node *node;
11803fec86f8SZi Yan 	void *curr = xas_load(xas);
11813fec86f8SZi Yan 	int values = 0;
11823fec86f8SZi Yan 	gfp_t gfp = GFP_NOWAIT;
11833fec86f8SZi Yan 
11843fec86f8SZi Yan 	node = xas->xa_node;
11853fec86f8SZi Yan 	if (xas_top(node))
11863fec86f8SZi Yan 		return;
11873fec86f8SZi Yan 
11883fec86f8SZi Yan 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
11893fec86f8SZi Yan 		gfp |= __GFP_ACCOUNT;
11903fec86f8SZi Yan 
11913fec86f8SZi Yan 	marks = node_get_marks(node, xas->xa_offset);
11923fec86f8SZi Yan 
11933fec86f8SZi Yan 	offset = xas->xa_offset + sibs;
11943fec86f8SZi Yan 
11953fec86f8SZi Yan 	if (xas->xa_shift < node->shift) {
11963fec86f8SZi Yan 		struct xa_node *child = xas->xa_alloc;
11973fec86f8SZi Yan 		unsigned int expected_sibs =
11983fec86f8SZi Yan 			(1 << ((order - 1) % XA_CHUNK_SHIFT)) - 1;
11993fec86f8SZi Yan 
12003fec86f8SZi Yan 		/*
12013fec86f8SZi Yan 		 * No support for splitting sibling entries
12023fec86f8SZi Yan 		 * (horizontally) or cascade split (vertically), which
12033fec86f8SZi Yan 		 * requires two or more new xa_nodes.
12043fec86f8SZi Yan 		 * Since if one xa_node allocation fails,
12053fec86f8SZi Yan 		 * it is hard to free the prior allocations.
12063fec86f8SZi Yan 		 */
12073fec86f8SZi Yan 		if (sibs || xas->xa_sibs != expected_sibs) {
12083fec86f8SZi Yan 			xas_destroy(xas);
12093fec86f8SZi Yan 			xas_set_err(xas, -EINVAL);
12103fec86f8SZi Yan 			return;
12113fec86f8SZi Yan 		}
12123fec86f8SZi Yan 
12133fec86f8SZi Yan 		if (!child) {
12143fec86f8SZi Yan 			child = kmem_cache_alloc_lru(radix_tree_node_cachep,
12153fec86f8SZi Yan 						     xas->xa_lru, gfp);
12163fec86f8SZi Yan 			if (!child) {
12173fec86f8SZi Yan 				xas_destroy(xas);
12183fec86f8SZi Yan 				xas_set_err(xas, -ENOMEM);
12193fec86f8SZi Yan 				return;
12203fec86f8SZi Yan 			}
12213fec86f8SZi Yan 			RCU_INIT_POINTER(child->parent, xas->xa_alloc);
12223fec86f8SZi Yan 		}
12233fec86f8SZi Yan 		__xas_init_node_for_split(xas, child, entry);
12243fec86f8SZi Yan 
12253fec86f8SZi Yan 		xas->xa_alloc = rcu_dereference_raw(child->parent);
12263fec86f8SZi Yan 		child->shift = node->shift - XA_CHUNK_SHIFT;
12273fec86f8SZi Yan 		child->offset = offset;
12283fec86f8SZi Yan 		child->count = XA_CHUNK_SIZE;
12293fec86f8SZi Yan 		child->nr_values = xa_is_value(entry) ?
12303fec86f8SZi Yan 				XA_CHUNK_SIZE : 0;
12313fec86f8SZi Yan 		RCU_INIT_POINTER(child->parent, node);
12323fec86f8SZi Yan 		node_set_marks(node, offset, child, xas->xa_sibs,
12333fec86f8SZi Yan 				marks);
12343fec86f8SZi Yan 		rcu_assign_pointer(node->slots[offset],
12353fec86f8SZi Yan 				xa_mk_node(child));
12363fec86f8SZi Yan 		if (xa_is_value(curr))
12373fec86f8SZi Yan 			values--;
12383fec86f8SZi Yan 		xas_update(xas, child);
12393fec86f8SZi Yan 
12403fec86f8SZi Yan 	} else {
12413fec86f8SZi Yan 		do {
12423fec86f8SZi Yan 			unsigned int canon = offset - xas->xa_sibs;
12433fec86f8SZi Yan 
12443fec86f8SZi Yan 			node_set_marks(node, canon, NULL, 0, marks);
12453fec86f8SZi Yan 			rcu_assign_pointer(node->slots[canon], entry);
12463fec86f8SZi Yan 			while (offset > canon)
12473fec86f8SZi Yan 				rcu_assign_pointer(node->slots[offset--],
12483fec86f8SZi Yan 						xa_mk_sibling(canon));
12493fec86f8SZi Yan 			values += (xa_is_value(entry) - xa_is_value(curr)) *
12503fec86f8SZi Yan 					(xas->xa_sibs + 1);
12513fec86f8SZi Yan 		} while (offset-- > xas->xa_offset);
12523fec86f8SZi Yan 	}
12533fec86f8SZi Yan 
12543fec86f8SZi Yan 	node->nr_values += values;
12553fec86f8SZi Yan 	xas_update(xas, node);
12563fec86f8SZi Yan }
12573fec86f8SZi Yan EXPORT_SYMBOL_GPL(xas_try_split);
12588fc75643SMatthew Wilcox (Oracle) #endif
12598fc75643SMatthew Wilcox (Oracle) 
126058d6ea30SMatthew Wilcox /**
1261b803b428SMatthew Wilcox  * xas_pause() - Pause a walk to drop a lock.
1262b803b428SMatthew Wilcox  * @xas: XArray operation state.
1263b803b428SMatthew Wilcox  *
1264b803b428SMatthew Wilcox  * Some users need to pause a walk and drop the lock they're holding in
1265b803b428SMatthew Wilcox  * order to yield to a higher priority thread or carry out an operation
1266b803b428SMatthew Wilcox  * on an entry.  Those users should call this function before they drop
1267b803b428SMatthew Wilcox  * the lock.  It resets the @xas to be suitable for the next iteration
1268b803b428SMatthew Wilcox  * of the loop after the user has reacquired the lock.  If most entries
1269b803b428SMatthew Wilcox  * found during a walk require you to call xas_pause(), the xa_for_each()
1270b803b428SMatthew Wilcox  * iterator may be more appropriate.
1271b803b428SMatthew Wilcox  *
1272b803b428SMatthew Wilcox  * Note that xas_pause() only works for forward iteration.  If a user needs
1273b803b428SMatthew Wilcox  * to pause a reverse iteration, we will need a xas_pause_rev().
1274b803b428SMatthew Wilcox  */
xas_pause(struct xa_state * xas)1275b803b428SMatthew Wilcox void xas_pause(struct xa_state *xas)
1276b803b428SMatthew Wilcox {
1277b803b428SMatthew Wilcox 	struct xa_node *node = xas->xa_node;
1278b803b428SMatthew Wilcox 
1279b803b428SMatthew Wilcox 	if (xas_invalid(xas))
1280b803b428SMatthew Wilcox 		return;
1281b803b428SMatthew Wilcox 
128282a22311SMatthew Wilcox (Oracle) 	xas->xa_node = XAS_RESTART;
1283b803b428SMatthew Wilcox 	if (node) {
1284c36d451aSMatthew Wilcox (Oracle) 		unsigned long offset = xas->xa_offset;
1285b803b428SMatthew Wilcox 		while (++offset < XA_CHUNK_SIZE) {
1286b803b428SMatthew Wilcox 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
1287b803b428SMatthew Wilcox 				break;
1288b803b428SMatthew Wilcox 		}
1289c9ba5249SKemeng Shi 		xas->xa_index &= ~0UL << node->shift;
1290b803b428SMatthew Wilcox 		xas->xa_index += (offset - xas->xa_offset) << node->shift;
129182a22311SMatthew Wilcox (Oracle) 		if (xas->xa_index == 0)
129282a22311SMatthew Wilcox (Oracle) 			xas->xa_node = XAS_BOUNDS;
1293b803b428SMatthew Wilcox 	} else {
1294b803b428SMatthew Wilcox 		xas->xa_index++;
1295b803b428SMatthew Wilcox 	}
1296b803b428SMatthew Wilcox }
1297b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_pause);
1298b803b428SMatthew Wilcox 
129964d3e9a9SMatthew Wilcox /*
130064d3e9a9SMatthew Wilcox  * __xas_prev() - Find the previous entry in the XArray.
130164d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
130264d3e9a9SMatthew Wilcox  *
130364d3e9a9SMatthew Wilcox  * Helper function for xas_prev() which handles all the complex cases
130464d3e9a9SMatthew Wilcox  * out of line.
130564d3e9a9SMatthew Wilcox  */
__xas_prev(struct xa_state * xas)130664d3e9a9SMatthew Wilcox void *__xas_prev(struct xa_state *xas)
130764d3e9a9SMatthew Wilcox {
130864d3e9a9SMatthew Wilcox 	void *entry;
130964d3e9a9SMatthew Wilcox 
131064d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
131164d3e9a9SMatthew Wilcox 		xas->xa_index--;
131291abab83SMatthew Wilcox (Oracle) 	if (!xas->xa_node)
131391abab83SMatthew Wilcox (Oracle) 		return set_bounds(xas);
131464d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
131564d3e9a9SMatthew Wilcox 		return xas_load(xas);
131664d3e9a9SMatthew Wilcox 
131764d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
131864d3e9a9SMatthew Wilcox 		xas->xa_offset--;
131964d3e9a9SMatthew Wilcox 
132064d3e9a9SMatthew Wilcox 	while (xas->xa_offset == 255) {
132164d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset - 1;
132264d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
132364d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
132464d3e9a9SMatthew Wilcox 			return set_bounds(xas);
132564d3e9a9SMatthew Wilcox 	}
132664d3e9a9SMatthew Wilcox 
132764d3e9a9SMatthew Wilcox 	for (;;) {
132864d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
132964d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
133064d3e9a9SMatthew Wilcox 			return entry;
133164d3e9a9SMatthew Wilcox 
133264d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
133364d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
133464d3e9a9SMatthew Wilcox 	}
133564d3e9a9SMatthew Wilcox }
133664d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_prev);
133764d3e9a9SMatthew Wilcox 
133864d3e9a9SMatthew Wilcox /*
133964d3e9a9SMatthew Wilcox  * __xas_next() - Find the next entry in the XArray.
134064d3e9a9SMatthew Wilcox  * @xas: XArray operation state.
134164d3e9a9SMatthew Wilcox  *
134264d3e9a9SMatthew Wilcox  * Helper function for xas_next() which handles all the complex cases
134364d3e9a9SMatthew Wilcox  * out of line.
134464d3e9a9SMatthew Wilcox  */
__xas_next(struct xa_state * xas)134564d3e9a9SMatthew Wilcox void *__xas_next(struct xa_state *xas)
134664d3e9a9SMatthew Wilcox {
134764d3e9a9SMatthew Wilcox 	void *entry;
134864d3e9a9SMatthew Wilcox 
134964d3e9a9SMatthew Wilcox 	if (!xas_frozen(xas->xa_node))
135064d3e9a9SMatthew Wilcox 		xas->xa_index++;
135191abab83SMatthew Wilcox (Oracle) 	if (!xas->xa_node)
135291abab83SMatthew Wilcox (Oracle) 		return set_bounds(xas);
135364d3e9a9SMatthew Wilcox 	if (xas_not_node(xas->xa_node))
135464d3e9a9SMatthew Wilcox 		return xas_load(xas);
135564d3e9a9SMatthew Wilcox 
135664d3e9a9SMatthew Wilcox 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
135764d3e9a9SMatthew Wilcox 		xas->xa_offset++;
135864d3e9a9SMatthew Wilcox 
135964d3e9a9SMatthew Wilcox 	while (xas->xa_offset == XA_CHUNK_SIZE) {
136064d3e9a9SMatthew Wilcox 		xas->xa_offset = xas->xa_node->offset + 1;
136164d3e9a9SMatthew Wilcox 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
136264d3e9a9SMatthew Wilcox 		if (!xas->xa_node)
136364d3e9a9SMatthew Wilcox 			return set_bounds(xas);
136464d3e9a9SMatthew Wilcox 	}
136564d3e9a9SMatthew Wilcox 
136664d3e9a9SMatthew Wilcox 	for (;;) {
136764d3e9a9SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
136864d3e9a9SMatthew Wilcox 		if (!xa_is_node(entry))
136964d3e9a9SMatthew Wilcox 			return entry;
137064d3e9a9SMatthew Wilcox 
137164d3e9a9SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
137264d3e9a9SMatthew Wilcox 		xas_set_offset(xas);
137364d3e9a9SMatthew Wilcox 	}
137464d3e9a9SMatthew Wilcox }
137564d3e9a9SMatthew Wilcox EXPORT_SYMBOL_GPL(__xas_next);
137664d3e9a9SMatthew Wilcox 
1377b803b428SMatthew Wilcox /**
1378b803b428SMatthew Wilcox  * xas_find() - Find the next present entry in the XArray.
1379b803b428SMatthew Wilcox  * @xas: XArray operation state.
1380b803b428SMatthew Wilcox  * @max: Highest index to return.
1381b803b428SMatthew Wilcox  *
1382b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the entry
1383b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
1384b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we move to the
1385b803b428SMatthew Wilcox  * next entry.
1386b803b428SMatthew Wilcox  *
1387b803b428SMatthew Wilcox  * If no entry is found and the array is smaller than @max, the iterator
1388b803b428SMatthew Wilcox  * is set to the smallest index not yet in the array.  This allows @xas
1389b803b428SMatthew Wilcox  * to be immediately passed to xas_store().
1390b803b428SMatthew Wilcox  *
1391b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1392b803b428SMatthew Wilcox  */
xas_find(struct xa_state * xas,unsigned long max)1393b803b428SMatthew Wilcox void *xas_find(struct xa_state *xas, unsigned long max)
1394b803b428SMatthew Wilcox {
1395b803b428SMatthew Wilcox 	void *entry;
1396b803b428SMatthew Wilcox 
139782a22311SMatthew Wilcox (Oracle) 	if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
1398b803b428SMatthew Wilcox 		return NULL;
1399c44aa5e8SMatthew Wilcox (Oracle) 	if (xas->xa_index > max)
1400c44aa5e8SMatthew Wilcox (Oracle) 		return set_bounds(xas);
1401b803b428SMatthew Wilcox 
1402b803b428SMatthew Wilcox 	if (!xas->xa_node) {
1403b803b428SMatthew Wilcox 		xas->xa_index = 1;
1404b803b428SMatthew Wilcox 		return set_bounds(xas);
140582a22311SMatthew Wilcox (Oracle) 	} else if (xas->xa_node == XAS_RESTART) {
1406b803b428SMatthew Wilcox 		entry = xas_load(xas);
1407b803b428SMatthew Wilcox 		if (entry || xas_not_node(xas->xa_node))
1408b803b428SMatthew Wilcox 			return entry;
1409b803b428SMatthew Wilcox 	} else if (!xas->xa_node->shift &&
1410b803b428SMatthew Wilcox 		    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1411b803b428SMatthew Wilcox 		xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1412b803b428SMatthew Wilcox 	}
1413b803b428SMatthew Wilcox 
141425a8de7fSMatthew Wilcox (Oracle) 	xas_next_offset(xas);
1415b803b428SMatthew Wilcox 
1416b803b428SMatthew Wilcox 	while (xas->xa_node && (xas->xa_index <= max)) {
1417b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1418b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
1419b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1420b803b428SMatthew Wilcox 			continue;
1421b803b428SMatthew Wilcox 		}
1422b803b428SMatthew Wilcox 
1423b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1424b803b428SMatthew Wilcox 		if (xa_is_node(entry)) {
1425b803b428SMatthew Wilcox 			xas->xa_node = xa_to_node(entry);
1426b803b428SMatthew Wilcox 			xas->xa_offset = 0;
1427b803b428SMatthew Wilcox 			continue;
1428b803b428SMatthew Wilcox 		}
1429b803b428SMatthew Wilcox 		if (entry && !xa_is_sibling(entry))
1430b803b428SMatthew Wilcox 			return entry;
1431b803b428SMatthew Wilcox 
143225a8de7fSMatthew Wilcox (Oracle) 		xas_next_offset(xas);
1433b803b428SMatthew Wilcox 	}
1434b803b428SMatthew Wilcox 
1435b803b428SMatthew Wilcox 	if (!xas->xa_node)
1436b803b428SMatthew Wilcox 		xas->xa_node = XAS_BOUNDS;
1437b803b428SMatthew Wilcox 	return NULL;
1438b803b428SMatthew Wilcox }
1439b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find);
1440b803b428SMatthew Wilcox 
1441b803b428SMatthew Wilcox /**
1442b803b428SMatthew Wilcox  * xas_find_marked() - Find the next marked entry in the XArray.
1443b803b428SMatthew Wilcox  * @xas: XArray operation state.
1444b803b428SMatthew Wilcox  * @max: Highest index to return.
1445b803b428SMatthew Wilcox  * @mark: Mark number to search for.
1446b803b428SMatthew Wilcox  *
1447b803b428SMatthew Wilcox  * If the @xas has not yet been walked to an entry, return the marked entry
1448b803b428SMatthew Wilcox  * which has an index >= xas.xa_index.  If it has been walked, the entry
1449b803b428SMatthew Wilcox  * currently being pointed at has been processed, and so we return the
1450b803b428SMatthew Wilcox  * first marked entry with an index > xas.xa_index.
1451b803b428SMatthew Wilcox  *
1452b803b428SMatthew Wilcox  * If no marked entry is found and the array is smaller than @max, @xas is
1453b803b428SMatthew Wilcox  * set to the bounds state and xas->xa_index is set to the smallest index
1454b803b428SMatthew Wilcox  * not yet in the array.  This allows @xas to be immediately passed to
1455b803b428SMatthew Wilcox  * xas_store().
1456b803b428SMatthew Wilcox  *
1457b803b428SMatthew Wilcox  * If no entry is found before @max is reached, @xas is set to the restart
1458b803b428SMatthew Wilcox  * state.
1459b803b428SMatthew Wilcox  *
1460b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
1461b803b428SMatthew Wilcox  */
xas_find_marked(struct xa_state * xas,unsigned long max,xa_mark_t mark)1462b803b428SMatthew Wilcox void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1463b803b428SMatthew Wilcox {
1464b803b428SMatthew Wilcox 	bool advance = true;
1465b803b428SMatthew Wilcox 	unsigned int offset;
1466b803b428SMatthew Wilcox 	void *entry;
1467b803b428SMatthew Wilcox 
1468b803b428SMatthew Wilcox 	if (xas_error(xas))
1469b803b428SMatthew Wilcox 		return NULL;
1470c44aa5e8SMatthew Wilcox (Oracle) 	if (xas->xa_index > max)
1471c44aa5e8SMatthew Wilcox (Oracle) 		goto max;
1472b803b428SMatthew Wilcox 
1473b803b428SMatthew Wilcox 	if (!xas->xa_node) {
1474b803b428SMatthew Wilcox 		xas->xa_index = 1;
1475b803b428SMatthew Wilcox 		goto out;
1476b803b428SMatthew Wilcox 	} else if (xas_top(xas->xa_node)) {
1477b803b428SMatthew Wilcox 		advance = false;
1478b803b428SMatthew Wilcox 		entry = xa_head(xas->xa);
1479b803b428SMatthew Wilcox 		xas->xa_node = NULL;
1480b803b428SMatthew Wilcox 		if (xas->xa_index > max_index(entry))
148148483614SMatthew Wilcox 			goto out;
1482b803b428SMatthew Wilcox 		if (!xa_is_node(entry)) {
1483b803b428SMatthew Wilcox 			if (xa_marked(xas->xa, mark))
1484b803b428SMatthew Wilcox 				return entry;
1485b803b428SMatthew Wilcox 			xas->xa_index = 1;
1486b803b428SMatthew Wilcox 			goto out;
1487b803b428SMatthew Wilcox 		}
1488b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1489b803b428SMatthew Wilcox 		xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1490b803b428SMatthew Wilcox 	}
1491b803b428SMatthew Wilcox 
1492b803b428SMatthew Wilcox 	while (xas->xa_index <= max) {
1493b803b428SMatthew Wilcox 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1494b803b428SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset + 1;
1495b803b428SMatthew Wilcox 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1496b803b428SMatthew Wilcox 			if (!xas->xa_node)
1497b803b428SMatthew Wilcox 				break;
1498b803b428SMatthew Wilcox 			advance = false;
1499b803b428SMatthew Wilcox 			continue;
1500b803b428SMatthew Wilcox 		}
1501b803b428SMatthew Wilcox 
1502b803b428SMatthew Wilcox 		if (!advance) {
1503b803b428SMatthew Wilcox 			entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1504b803b428SMatthew Wilcox 			if (xa_is_sibling(entry)) {
1505b803b428SMatthew Wilcox 				xas->xa_offset = xa_to_sibling(entry);
1506b803b428SMatthew Wilcox 				xas_move_index(xas, xas->xa_offset);
1507b803b428SMatthew Wilcox 			}
1508b803b428SMatthew Wilcox 		}
1509b803b428SMatthew Wilcox 
1510b803b428SMatthew Wilcox 		offset = xas_find_chunk(xas, advance, mark);
1511b803b428SMatthew Wilcox 		if (offset > xas->xa_offset) {
1512b803b428SMatthew Wilcox 			advance = false;
1513b803b428SMatthew Wilcox 			xas_move_index(xas, offset);
1514b803b428SMatthew Wilcox 			/* Mind the wrap */
1515b803b428SMatthew Wilcox 			if ((xas->xa_index - 1) >= max)
1516b803b428SMatthew Wilcox 				goto max;
1517b803b428SMatthew Wilcox 			xas->xa_offset = offset;
1518b803b428SMatthew Wilcox 			if (offset == XA_CHUNK_SIZE)
1519b803b428SMatthew Wilcox 				continue;
1520b803b428SMatthew Wilcox 		}
1521b803b428SMatthew Wilcox 
1522b803b428SMatthew Wilcox 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
15237e934cf5SMatthew Wilcox (Oracle) 		if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
15247e934cf5SMatthew Wilcox (Oracle) 			continue;
15257e060df0SKemeng Shi 		if (xa_is_sibling(entry))
15267e060df0SKemeng Shi 			continue;
1527b803b428SMatthew Wilcox 		if (!xa_is_node(entry))
1528b803b428SMatthew Wilcox 			return entry;
1529b803b428SMatthew Wilcox 		xas->xa_node = xa_to_node(entry);
1530b803b428SMatthew Wilcox 		xas_set_offset(xas);
1531b803b428SMatthew Wilcox 	}
1532b803b428SMatthew Wilcox 
1533b803b428SMatthew Wilcox out:
153448483614SMatthew Wilcox 	if (xas->xa_index > max)
1535b803b428SMatthew Wilcox 		goto max;
153648483614SMatthew Wilcox 	return set_bounds(xas);
1537b803b428SMatthew Wilcox max:
1538b803b428SMatthew Wilcox 	xas->xa_node = XAS_RESTART;
1539b803b428SMatthew Wilcox 	return NULL;
1540b803b428SMatthew Wilcox }
1541b803b428SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_marked);
1542b803b428SMatthew Wilcox 
1543b803b428SMatthew Wilcox /**
15444e99d4e9SMatthew Wilcox  * xas_find_conflict() - Find the next present entry in a range.
15454e99d4e9SMatthew Wilcox  * @xas: XArray operation state.
15464e99d4e9SMatthew Wilcox  *
15474e99d4e9SMatthew Wilcox  * The @xas describes both a range and a position within that range.
15484e99d4e9SMatthew Wilcox  *
15494e99d4e9SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held.
15504e99d4e9SMatthew Wilcox  * Return: The next entry in the range covered by @xas or %NULL.
15514e99d4e9SMatthew Wilcox  */
xas_find_conflict(struct xa_state * xas)15524e99d4e9SMatthew Wilcox void *xas_find_conflict(struct xa_state *xas)
15534e99d4e9SMatthew Wilcox {
15544e99d4e9SMatthew Wilcox 	void *curr;
15554e99d4e9SMatthew Wilcox 
15564e99d4e9SMatthew Wilcox 	if (xas_error(xas))
15574e99d4e9SMatthew Wilcox 		return NULL;
15584e99d4e9SMatthew Wilcox 
15594e99d4e9SMatthew Wilcox 	if (!xas->xa_node)
15604e99d4e9SMatthew Wilcox 		return NULL;
15614e99d4e9SMatthew Wilcox 
15624e99d4e9SMatthew Wilcox 	if (xas_top(xas->xa_node)) {
15634e99d4e9SMatthew Wilcox 		curr = xas_start(xas);
15644e99d4e9SMatthew Wilcox 		if (!curr)
15654e99d4e9SMatthew Wilcox 			return NULL;
15664e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
15674e99d4e9SMatthew Wilcox 			struct xa_node *node = xa_to_node(curr);
15684e99d4e9SMatthew Wilcox 			curr = xas_descend(xas, node);
15694e99d4e9SMatthew Wilcox 		}
15704e99d4e9SMatthew Wilcox 		if (curr)
15714e99d4e9SMatthew Wilcox 			return curr;
15724e99d4e9SMatthew Wilcox 	}
15734e99d4e9SMatthew Wilcox 
15744e99d4e9SMatthew Wilcox 	if (xas->xa_node->shift > xas->xa_shift)
15754e99d4e9SMatthew Wilcox 		return NULL;
15764e99d4e9SMatthew Wilcox 
15774e99d4e9SMatthew Wilcox 	for (;;) {
15784e99d4e9SMatthew Wilcox 		if (xas->xa_node->shift == xas->xa_shift) {
15794e99d4e9SMatthew Wilcox 			if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
15804e99d4e9SMatthew Wilcox 				break;
15814e99d4e9SMatthew Wilcox 		} else if (xas->xa_offset == XA_CHUNK_MASK) {
15824e99d4e9SMatthew Wilcox 			xas->xa_offset = xas->xa_node->offset;
15834e99d4e9SMatthew Wilcox 			xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
15844e99d4e9SMatthew Wilcox 			if (!xas->xa_node)
15854e99d4e9SMatthew Wilcox 				break;
15864e99d4e9SMatthew Wilcox 			continue;
15874e99d4e9SMatthew Wilcox 		}
15884e99d4e9SMatthew Wilcox 		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
15894e99d4e9SMatthew Wilcox 		if (xa_is_sibling(curr))
15904e99d4e9SMatthew Wilcox 			continue;
15914e99d4e9SMatthew Wilcox 		while (xa_is_node(curr)) {
15924e99d4e9SMatthew Wilcox 			xas->xa_node = xa_to_node(curr);
15934e99d4e9SMatthew Wilcox 			xas->xa_offset = 0;
15944e99d4e9SMatthew Wilcox 			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
15954e99d4e9SMatthew Wilcox 		}
15964e99d4e9SMatthew Wilcox 		if (curr)
15974e99d4e9SMatthew Wilcox 			return curr;
15984e99d4e9SMatthew Wilcox 	}
15994e99d4e9SMatthew Wilcox 	xas->xa_offset -= xas->xa_sibs;
16004e99d4e9SMatthew Wilcox 	return NULL;
16014e99d4e9SMatthew Wilcox }
16024e99d4e9SMatthew Wilcox EXPORT_SYMBOL_GPL(xas_find_conflict);
16034e99d4e9SMatthew Wilcox 
16044e99d4e9SMatthew Wilcox /**
1605ad3d6c72SMatthew Wilcox  * xa_load() - Load an entry from an XArray.
1606ad3d6c72SMatthew Wilcox  * @xa: XArray.
1607ad3d6c72SMatthew Wilcox  * @index: index into array.
1608ad3d6c72SMatthew Wilcox  *
1609ad3d6c72SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
1610ad3d6c72SMatthew Wilcox  * Return: The entry at @index in @xa.
1611ad3d6c72SMatthew Wilcox  */
xa_load(struct xarray * xa,unsigned long index)1612ad3d6c72SMatthew Wilcox void *xa_load(struct xarray *xa, unsigned long index)
1613ad3d6c72SMatthew Wilcox {
1614ad3d6c72SMatthew Wilcox 	XA_STATE(xas, xa, index);
1615ad3d6c72SMatthew Wilcox 	void *entry;
1616ad3d6c72SMatthew Wilcox 
1617ad3d6c72SMatthew Wilcox 	rcu_read_lock();
1618ad3d6c72SMatthew Wilcox 	do {
161974e2712bSTamir Duberstein 		entry = xa_zero_to_null(xas_load(&xas));
1620ad3d6c72SMatthew Wilcox 	} while (xas_retry(&xas, entry));
1621ad3d6c72SMatthew Wilcox 	rcu_read_unlock();
1622ad3d6c72SMatthew Wilcox 
1623ad3d6c72SMatthew Wilcox 	return entry;
1624ad3d6c72SMatthew Wilcox }
1625ad3d6c72SMatthew Wilcox EXPORT_SYMBOL(xa_load);
1626ad3d6c72SMatthew Wilcox 
xas_result(struct xa_state * xas,void * curr)162758d6ea30SMatthew Wilcox static void *xas_result(struct xa_state *xas, void *curr)
162858d6ea30SMatthew Wilcox {
162958d6ea30SMatthew Wilcox 	if (xas_error(xas))
163058d6ea30SMatthew Wilcox 		curr = xas->xa_node;
163179ada2aeSTamir Duberstein 	return curr;
163258d6ea30SMatthew Wilcox }
163358d6ea30SMatthew Wilcox 
163458d6ea30SMatthew Wilcox /**
163558d6ea30SMatthew Wilcox  * __xa_erase() - Erase this entry from the XArray while locked.
163658d6ea30SMatthew Wilcox  * @xa: XArray.
163758d6ea30SMatthew Wilcox  * @index: Index into array.
163858d6ea30SMatthew Wilcox  *
1639809ab937SMatthew Wilcox  * After this function returns, loading from @index will return %NULL.
1640809ab937SMatthew Wilcox  * If the index is part of a multi-index entry, all indices will be erased
1641809ab937SMatthew Wilcox  * and none of the entries will be part of a multi-index entry.
164258d6ea30SMatthew Wilcox  *
1643809ab937SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
1644809ab937SMatthew Wilcox  * Return: The entry which used to be at this index.
164558d6ea30SMatthew Wilcox  */
__xa_erase(struct xarray * xa,unsigned long index)164658d6ea30SMatthew Wilcox void *__xa_erase(struct xarray *xa, unsigned long index)
164758d6ea30SMatthew Wilcox {
164858d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
164979ada2aeSTamir Duberstein 	return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
165058d6ea30SMatthew Wilcox }
16519ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_erase);
165258d6ea30SMatthew Wilcox 
165358d6ea30SMatthew Wilcox /**
16549c16bb88SMatthew Wilcox  * xa_erase() - Erase this entry from the XArray.
16559c16bb88SMatthew Wilcox  * @xa: XArray.
16569c16bb88SMatthew Wilcox  * @index: Index of entry.
16579c16bb88SMatthew Wilcox  *
1658809ab937SMatthew Wilcox  * After this function returns, loading from @index will return %NULL.
1659809ab937SMatthew Wilcox  * If the index is part of a multi-index entry, all indices will be erased
1660809ab937SMatthew Wilcox  * and none of the entries will be part of a multi-index entry.
16619c16bb88SMatthew Wilcox  *
16629c16bb88SMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock.
16639c16bb88SMatthew Wilcox  * Return: The entry which used to be at this index.
16649c16bb88SMatthew Wilcox  */
xa_erase(struct xarray * xa,unsigned long index)16659c16bb88SMatthew Wilcox void *xa_erase(struct xarray *xa, unsigned long index)
16669c16bb88SMatthew Wilcox {
16679c16bb88SMatthew Wilcox 	void *entry;
16689c16bb88SMatthew Wilcox 
16699c16bb88SMatthew Wilcox 	xa_lock(xa);
16709c16bb88SMatthew Wilcox 	entry = __xa_erase(xa, index);
16719c16bb88SMatthew Wilcox 	xa_unlock(xa);
16729c16bb88SMatthew Wilcox 
16739c16bb88SMatthew Wilcox 	return entry;
16749c16bb88SMatthew Wilcox }
16759c16bb88SMatthew Wilcox EXPORT_SYMBOL(xa_erase);
16769c16bb88SMatthew Wilcox 
16779c16bb88SMatthew Wilcox /**
167858d6ea30SMatthew Wilcox  * __xa_store() - Store this entry in the XArray.
167958d6ea30SMatthew Wilcox  * @xa: XArray.
168058d6ea30SMatthew Wilcox  * @index: Index into array.
168158d6ea30SMatthew Wilcox  * @entry: New entry.
168258d6ea30SMatthew Wilcox  * @gfp: Memory allocation flags.
168358d6ea30SMatthew Wilcox  *
168458d6ea30SMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
168558d6ea30SMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
168658d6ea30SMatthew Wilcox  * it afterwards.
168758d6ea30SMatthew Wilcox  *
168858d6ea30SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
168958d6ea30SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
169058d6ea30SMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
169158d6ea30SMatthew Wilcox  */
__xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)169258d6ea30SMatthew Wilcox void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
169358d6ea30SMatthew Wilcox {
169458d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, index);
169558d6ea30SMatthew Wilcox 	void *curr;
169658d6ea30SMatthew Wilcox 
169776b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
169858d6ea30SMatthew Wilcox 		return XA_ERROR(-EINVAL);
1699d9c48043SMatthew Wilcox 	if (xa_track_free(xa) && !entry)
1700d9c48043SMatthew Wilcox 		entry = XA_ZERO_ENTRY;
170158d6ea30SMatthew Wilcox 
170258d6ea30SMatthew Wilcox 	do {
170358d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
1704d9c48043SMatthew Wilcox 		if (xa_track_free(xa))
1705371c752dSMatthew Wilcox 			xas_clear_mark(&xas, XA_FREE_MARK);
170658d6ea30SMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
170758d6ea30SMatthew Wilcox 
170879ada2aeSTamir Duberstein 	return xas_result(&xas, xa_zero_to_null(curr));
170958d6ea30SMatthew Wilcox }
171058d6ea30SMatthew Wilcox EXPORT_SYMBOL(__xa_store);
171158d6ea30SMatthew Wilcox 
17129b89a035SMatthew Wilcox /**
1713611f3186SMatthew Wilcox  * xa_store() - Store this entry in the XArray.
1714611f3186SMatthew Wilcox  * @xa: XArray.
1715611f3186SMatthew Wilcox  * @index: Index into array.
1716611f3186SMatthew Wilcox  * @entry: New entry.
1717611f3186SMatthew Wilcox  * @gfp: Memory allocation flags.
1718611f3186SMatthew Wilcox  *
1719611f3186SMatthew Wilcox  * After this function returns, loads from this index will return @entry.
17208fc75643SMatthew Wilcox (Oracle)  * Storing into an existing multi-index entry updates the entry of every index.
1721611f3186SMatthew Wilcox  * The marks associated with @index are unaffected unless @entry is %NULL.
1722611f3186SMatthew Wilcox  *
1723611f3186SMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock.
1724611f3186SMatthew Wilcox  * May sleep if the @gfp flags permit.
1725611f3186SMatthew Wilcox  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1726611f3186SMatthew Wilcox  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1727611f3186SMatthew Wilcox  * failed.
1728611f3186SMatthew Wilcox  */
xa_store(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1729611f3186SMatthew Wilcox void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1730611f3186SMatthew Wilcox {
1731611f3186SMatthew Wilcox 	void *curr;
1732611f3186SMatthew Wilcox 
1733611f3186SMatthew Wilcox 	xa_lock(xa);
1734611f3186SMatthew Wilcox 	curr = __xa_store(xa, index, entry, gfp);
1735611f3186SMatthew Wilcox 	xa_unlock(xa);
1736611f3186SMatthew Wilcox 
1737611f3186SMatthew Wilcox 	return curr;
1738611f3186SMatthew Wilcox }
1739611f3186SMatthew Wilcox EXPORT_SYMBOL(xa_store);
1740611f3186SMatthew Wilcox 
174179ada2aeSTamir Duberstein static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
174279ada2aeSTamir Duberstein 			void *old, void *entry, gfp_t gfp);
174379ada2aeSTamir Duberstein 
1744611f3186SMatthew Wilcox /**
174541aec91fSMatthew Wilcox  * __xa_cmpxchg() - Store this entry in the XArray.
174641aec91fSMatthew Wilcox  * @xa: XArray.
174741aec91fSMatthew Wilcox  * @index: Index into array.
174841aec91fSMatthew Wilcox  * @old: Old value to test against.
174941aec91fSMatthew Wilcox  * @entry: New entry.
175041aec91fSMatthew Wilcox  * @gfp: Memory allocation flags.
175141aec91fSMatthew Wilcox  *
175241aec91fSMatthew Wilcox  * You must already be holding the xa_lock when calling this function.
175341aec91fSMatthew Wilcox  * It will drop the lock if needed to allocate memory, and then reacquire
175441aec91fSMatthew Wilcox  * it afterwards.
175541aec91fSMatthew Wilcox  *
175641aec91fSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
175741aec91fSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
175841aec91fSMatthew Wilcox  * Return: The old entry at this index or xa_err() if an error happened.
175941aec91fSMatthew Wilcox  */
__xa_cmpxchg(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)176041aec91fSMatthew Wilcox void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
176141aec91fSMatthew Wilcox 			void *old, void *entry, gfp_t gfp)
176241aec91fSMatthew Wilcox {
176379ada2aeSTamir Duberstein 	return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
176479ada2aeSTamir Duberstein }
176579ada2aeSTamir Duberstein EXPORT_SYMBOL(__xa_cmpxchg);
176679ada2aeSTamir Duberstein 
__xa_cmpxchg_raw(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)176779ada2aeSTamir Duberstein static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
176879ada2aeSTamir Duberstein 			void *old, void *entry, gfp_t gfp)
176979ada2aeSTamir Duberstein {
177041aec91fSMatthew Wilcox 	XA_STATE(xas, xa, index);
177141aec91fSMatthew Wilcox 	void *curr;
177241aec91fSMatthew Wilcox 
177376b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
177441aec91fSMatthew Wilcox 		return XA_ERROR(-EINVAL);
177541aec91fSMatthew Wilcox 
177641aec91fSMatthew Wilcox 	do {
177741aec91fSMatthew Wilcox 		curr = xas_load(&xas);
1778371c752dSMatthew Wilcox 		if (curr == old) {
177941aec91fSMatthew Wilcox 			xas_store(&xas, entry);
1780b38f6c50SMatthew Wilcox 			if (xa_track_free(xa) && entry && !curr)
1781371c752dSMatthew Wilcox 				xas_clear_mark(&xas, XA_FREE_MARK);
1782371c752dSMatthew Wilcox 		}
178341aec91fSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
178441aec91fSMatthew Wilcox 
178541aec91fSMatthew Wilcox 	return xas_result(&xas, curr);
178641aec91fSMatthew Wilcox }
178741aec91fSMatthew Wilcox 
178841aec91fSMatthew Wilcox /**
1789b0606fedSMatthew Wilcox  * __xa_insert() - Store this entry in the XArray if no entry is present.
1790b0606fedSMatthew Wilcox  * @xa: XArray.
1791b0606fedSMatthew Wilcox  * @index: Index into array.
1792b0606fedSMatthew Wilcox  * @entry: New entry.
1793b0606fedSMatthew Wilcox  * @gfp: Memory allocation flags.
1794b0606fedSMatthew Wilcox  *
1795b0606fedSMatthew Wilcox  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
1796b0606fedSMatthew Wilcox  * if no entry is present.  Inserting will fail if a reserved entry is
1797b0606fedSMatthew Wilcox  * present, even though loading from this index will return NULL.
1798b0606fedSMatthew Wilcox  *
1799b0606fedSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
1800b0606fedSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
1801fd9dc93eSMatthew Wilcox  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
1802b0606fedSMatthew Wilcox  * -ENOMEM if memory could not be allocated.
1803b0606fedSMatthew Wilcox  */
__xa_insert(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)1804b0606fedSMatthew Wilcox int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1805b0606fedSMatthew Wilcox {
1806b0606fedSMatthew Wilcox 	void *curr;
180779ada2aeSTamir Duberstein 	int errno;
1808b0606fedSMatthew Wilcox 
1809b0606fedSMatthew Wilcox 	if (!entry)
1810b0606fedSMatthew Wilcox 		entry = XA_ZERO_ENTRY;
181179ada2aeSTamir Duberstein 	curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
181279ada2aeSTamir Duberstein 	errno = xa_err(curr);
181379ada2aeSTamir Duberstein 	if (errno)
181479ada2aeSTamir Duberstein 		return errno;
181579ada2aeSTamir Duberstein 	return (curr != NULL) ? -EBUSY : 0;
1816b0606fedSMatthew Wilcox }
1817b0606fedSMatthew Wilcox EXPORT_SYMBOL(__xa_insert);
1818b0606fedSMatthew Wilcox 
18190e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
xas_set_range(struct xa_state * xas,unsigned long first,unsigned long last)18200e9446c3SMatthew Wilcox static void xas_set_range(struct xa_state *xas, unsigned long first,
18210e9446c3SMatthew Wilcox 		unsigned long last)
18220e9446c3SMatthew Wilcox {
18230e9446c3SMatthew Wilcox 	unsigned int shift = 0;
18240e9446c3SMatthew Wilcox 	unsigned long sibs = last - first;
18250e9446c3SMatthew Wilcox 	unsigned int offset = XA_CHUNK_MASK;
18260e9446c3SMatthew Wilcox 
18270e9446c3SMatthew Wilcox 	xas_set(xas, first);
18280e9446c3SMatthew Wilcox 
18290e9446c3SMatthew Wilcox 	while ((first & XA_CHUNK_MASK) == 0) {
18300e9446c3SMatthew Wilcox 		if (sibs < XA_CHUNK_MASK)
18310e9446c3SMatthew Wilcox 			break;
18320e9446c3SMatthew Wilcox 		if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
18330e9446c3SMatthew Wilcox 			break;
18340e9446c3SMatthew Wilcox 		shift += XA_CHUNK_SHIFT;
18350e9446c3SMatthew Wilcox 		if (offset == XA_CHUNK_MASK)
18360e9446c3SMatthew Wilcox 			offset = sibs & XA_CHUNK_MASK;
18370e9446c3SMatthew Wilcox 		sibs >>= XA_CHUNK_SHIFT;
18380e9446c3SMatthew Wilcox 		first >>= XA_CHUNK_SHIFT;
18390e9446c3SMatthew Wilcox 	}
18400e9446c3SMatthew Wilcox 
18410e9446c3SMatthew Wilcox 	offset = first & XA_CHUNK_MASK;
18420e9446c3SMatthew Wilcox 	if (offset + sibs > XA_CHUNK_MASK)
18430e9446c3SMatthew Wilcox 		sibs = XA_CHUNK_MASK - offset;
18440e9446c3SMatthew Wilcox 	if ((((first + sibs + 1) << shift) - 1) > last)
18450e9446c3SMatthew Wilcox 		sibs -= 1;
18460e9446c3SMatthew Wilcox 
18470e9446c3SMatthew Wilcox 	xas->xa_shift = shift;
18480e9446c3SMatthew Wilcox 	xas->xa_sibs = sibs;
18490e9446c3SMatthew Wilcox }
18500e9446c3SMatthew Wilcox 
18510e9446c3SMatthew Wilcox /**
18520e9446c3SMatthew Wilcox  * xa_store_range() - Store this entry at a range of indices in the XArray.
18530e9446c3SMatthew Wilcox  * @xa: XArray.
18540e9446c3SMatthew Wilcox  * @first: First index to affect.
18550e9446c3SMatthew Wilcox  * @last: Last index to affect.
18560e9446c3SMatthew Wilcox  * @entry: New entry.
18570e9446c3SMatthew Wilcox  * @gfp: Memory allocation flags.
18580e9446c3SMatthew Wilcox  *
18590e9446c3SMatthew Wilcox  * After this function returns, loads from any index between @first and @last,
18600e9446c3SMatthew Wilcox  * inclusive will return @entry.
18618fc75643SMatthew Wilcox (Oracle)  * Storing into an existing multi-index entry updates the entry of every index.
18620e9446c3SMatthew Wilcox  * The marks associated with @index are unaffected unless @entry is %NULL.
18630e9446c3SMatthew Wilcox  *
18640e9446c3SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.  May sleep
18650e9446c3SMatthew Wilcox  * if the @gfp flags permit.
18660e9446c3SMatthew Wilcox  * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
18670e9446c3SMatthew Wilcox  * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
18680e9446c3SMatthew Wilcox  */
xa_store_range(struct xarray * xa,unsigned long first,unsigned long last,void * entry,gfp_t gfp)18690e9446c3SMatthew Wilcox void *xa_store_range(struct xarray *xa, unsigned long first,
18700e9446c3SMatthew Wilcox 		unsigned long last, void *entry, gfp_t gfp)
18710e9446c3SMatthew Wilcox {
18720e9446c3SMatthew Wilcox 	XA_STATE(xas, xa, 0);
18730e9446c3SMatthew Wilcox 
18740e9446c3SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_internal(entry)))
18750e9446c3SMatthew Wilcox 		return XA_ERROR(-EINVAL);
18760e9446c3SMatthew Wilcox 	if (last < first)
18770e9446c3SMatthew Wilcox 		return XA_ERROR(-EINVAL);
18780e9446c3SMatthew Wilcox 
18790e9446c3SMatthew Wilcox 	do {
18800e9446c3SMatthew Wilcox 		xas_lock(&xas);
18810e9446c3SMatthew Wilcox 		if (entry) {
188244a4a66bSMatthew Wilcox 			unsigned int order = BITS_PER_LONG;
188344a4a66bSMatthew Wilcox 			if (last + 1)
188444a4a66bSMatthew Wilcox 				order = __ffs(last + 1);
18850e9446c3SMatthew Wilcox 			xas_set_order(&xas, last, order);
188676b4e529SMatthew Wilcox 			xas_create(&xas, true);
18870e9446c3SMatthew Wilcox 			if (xas_error(&xas))
18880e9446c3SMatthew Wilcox 				goto unlock;
18890e9446c3SMatthew Wilcox 		}
18900e9446c3SMatthew Wilcox 		do {
18910e9446c3SMatthew Wilcox 			xas_set_range(&xas, first, last);
18920e9446c3SMatthew Wilcox 			xas_store(&xas, entry);
18930e9446c3SMatthew Wilcox 			if (xas_error(&xas))
18940e9446c3SMatthew Wilcox 				goto unlock;
18950e9446c3SMatthew Wilcox 			first += xas_size(&xas);
18960e9446c3SMatthew Wilcox 		} while (first <= last);
18970e9446c3SMatthew Wilcox unlock:
18980e9446c3SMatthew Wilcox 		xas_unlock(&xas);
18990e9446c3SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
19000e9446c3SMatthew Wilcox 
19010e9446c3SMatthew Wilcox 	return xas_result(&xas, NULL);
19020e9446c3SMatthew Wilcox }
19030e9446c3SMatthew Wilcox EXPORT_SYMBOL(xa_store_range);
190457417cebSMatthew Wilcox (Oracle) 
190557417cebSMatthew Wilcox (Oracle) /**
1906a4864671SKairui Song  * xas_get_order() - Get the order of an entry.
1907a4864671SKairui Song  * @xas: XArray operation state.
1908a4864671SKairui Song  *
1909a4864671SKairui Song  * Called after xas_load, the xas should not be in an error state.
1910a4864671SKairui Song  *
1911a4864671SKairui Song  * Return: A number between 0 and 63 indicating the order of the entry.
1912a4864671SKairui Song  */
xas_get_order(struct xa_state * xas)1913a4864671SKairui Song int xas_get_order(struct xa_state *xas)
1914a4864671SKairui Song {
1915a4864671SKairui Song 	int order = 0;
1916a4864671SKairui Song 
1917a4864671SKairui Song 	if (!xas->xa_node)
1918a4864671SKairui Song 		return 0;
1919a4864671SKairui Song 
1920a4864671SKairui Song 	for (;;) {
1921a4864671SKairui Song 		unsigned int slot = xas->xa_offset + (1 << order);
1922a4864671SKairui Song 
1923a4864671SKairui Song 		if (slot >= XA_CHUNK_SIZE)
1924a4864671SKairui Song 			break;
1925a4864671SKairui Song 		if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
1926a4864671SKairui Song 			break;
1927a4864671SKairui Song 		order++;
1928a4864671SKairui Song 	}
1929a4864671SKairui Song 
1930a4864671SKairui Song 	order += xas->xa_node->shift;
1931a4864671SKairui Song 	return order;
1932a4864671SKairui Song }
1933a4864671SKairui Song EXPORT_SYMBOL_GPL(xas_get_order);
1934a4864671SKairui Song 
1935a4864671SKairui Song /**
193657417cebSMatthew Wilcox (Oracle)  * xa_get_order() - Get the order of an entry.
193757417cebSMatthew Wilcox (Oracle)  * @xa: XArray.
193857417cebSMatthew Wilcox (Oracle)  * @index: Index of the entry.
193957417cebSMatthew Wilcox (Oracle)  *
194057417cebSMatthew Wilcox (Oracle)  * Return: A number between 0 and 63 indicating the order of the entry.
194157417cebSMatthew Wilcox (Oracle)  */
xa_get_order(struct xarray * xa,unsigned long index)194257417cebSMatthew Wilcox (Oracle) int xa_get_order(struct xarray *xa, unsigned long index)
194357417cebSMatthew Wilcox (Oracle) {
194457417cebSMatthew Wilcox (Oracle) 	XA_STATE(xas, xa, index);
194557417cebSMatthew Wilcox (Oracle) 	int order = 0;
1946a4864671SKairui Song 	void *entry;
194757417cebSMatthew Wilcox (Oracle) 
194857417cebSMatthew Wilcox (Oracle) 	rcu_read_lock();
194957417cebSMatthew Wilcox (Oracle) 	entry = xas_load(&xas);
1950a4864671SKairui Song 	if (entry)
1951a4864671SKairui Song 		order = xas_get_order(&xas);
195257417cebSMatthew Wilcox (Oracle) 	rcu_read_unlock();
195357417cebSMatthew Wilcox (Oracle) 
195457417cebSMatthew Wilcox (Oracle) 	return order;
195557417cebSMatthew Wilcox (Oracle) }
195657417cebSMatthew Wilcox (Oracle) EXPORT_SYMBOL(xa_get_order);
19570e9446c3SMatthew Wilcox #endif /* CONFIG_XARRAY_MULTI */
19580e9446c3SMatthew Wilcox 
19599f14d4f1SMatthew Wilcox /**
1960371c752dSMatthew Wilcox  * __xa_alloc() - Find somewhere to store this entry in the XArray.
1961371c752dSMatthew Wilcox  * @xa: XArray.
1962371c752dSMatthew Wilcox  * @id: Pointer to ID.
1963a3e4d3f9SMatthew Wilcox  * @limit: Range for allocated ID.
1964371c752dSMatthew Wilcox  * @entry: New entry.
1965371c752dSMatthew Wilcox  * @gfp: Memory allocation flags.
1966371c752dSMatthew Wilcox  *
1967a3e4d3f9SMatthew Wilcox  * Finds an empty entry in @xa between @limit.min and @limit.max,
1968a3e4d3f9SMatthew Wilcox  * stores the index into the @id pointer, then stores the entry at
1969a3e4d3f9SMatthew Wilcox  * that index.  A concurrent lookup will not see an uninitialised @id.
1970371c752dSMatthew Wilcox  *
1971e7716c74SPhilipp Stanner  * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1972e7716c74SPhilipp Stanner  * in xa_init_flags().
1973e7716c74SPhilipp Stanner  *
1974371c752dSMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
1975371c752dSMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
1976a3e4d3f9SMatthew Wilcox  * Return: 0 on success, -ENOMEM if memory could not be allocated or
1977a3e4d3f9SMatthew Wilcox  * -EBUSY if there are no free entries in @limit.
1978371c752dSMatthew Wilcox  */
__xa_alloc(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)1979a3e4d3f9SMatthew Wilcox int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1980a3e4d3f9SMatthew Wilcox 		struct xa_limit limit, gfp_t gfp)
1981371c752dSMatthew Wilcox {
1982371c752dSMatthew Wilcox 	XA_STATE(xas, xa, 0);
1983371c752dSMatthew Wilcox 
198476b4e529SMatthew Wilcox 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
1985371c752dSMatthew Wilcox 		return -EINVAL;
1986371c752dSMatthew Wilcox 	if (WARN_ON_ONCE(!xa_track_free(xa)))
1987371c752dSMatthew Wilcox 		return -EINVAL;
1988371c752dSMatthew Wilcox 
1989371c752dSMatthew Wilcox 	if (!entry)
1990371c752dSMatthew Wilcox 		entry = XA_ZERO_ENTRY;
1991371c752dSMatthew Wilcox 
1992371c752dSMatthew Wilcox 	do {
1993a3e4d3f9SMatthew Wilcox 		xas.xa_index = limit.min;
1994a3e4d3f9SMatthew Wilcox 		xas_find_marked(&xas, limit.max, XA_FREE_MARK);
1995371c752dSMatthew Wilcox 		if (xas.xa_node == XAS_RESTART)
1996a3e4d3f9SMatthew Wilcox 			xas_set_err(&xas, -EBUSY);
1997a3e4d3f9SMatthew Wilcox 		else
1998a3e4d3f9SMatthew Wilcox 			*id = xas.xa_index;
1999371c752dSMatthew Wilcox 		xas_store(&xas, entry);
2000371c752dSMatthew Wilcox 		xas_clear_mark(&xas, XA_FREE_MARK);
2001371c752dSMatthew Wilcox 	} while (__xas_nomem(&xas, gfp));
2002371c752dSMatthew Wilcox 
2003a3e4d3f9SMatthew Wilcox 	return xas_error(&xas);
2004371c752dSMatthew Wilcox }
2005371c752dSMatthew Wilcox EXPORT_SYMBOL(__xa_alloc);
2006371c752dSMatthew Wilcox 
2007371c752dSMatthew Wilcox /**
20082fa044e5SMatthew Wilcox  * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
20092fa044e5SMatthew Wilcox  * @xa: XArray.
20102fa044e5SMatthew Wilcox  * @id: Pointer to ID.
20112fa044e5SMatthew Wilcox  * @entry: New entry.
20122fa044e5SMatthew Wilcox  * @limit: Range of allocated ID.
20132fa044e5SMatthew Wilcox  * @next: Pointer to next ID to allocate.
20142fa044e5SMatthew Wilcox  * @gfp: Memory allocation flags.
20152fa044e5SMatthew Wilcox  *
20162fa044e5SMatthew Wilcox  * Finds an empty entry in @xa between @limit.min and @limit.max,
20172fa044e5SMatthew Wilcox  * stores the index into the @id pointer, then stores the entry at
20182fa044e5SMatthew Wilcox  * that index.  A concurrent lookup will not see an uninitialised @id.
20192fa044e5SMatthew Wilcox  * The search for an empty entry will start at @next and will wrap
20202fa044e5SMatthew Wilcox  * around if necessary.
20212fa044e5SMatthew Wilcox  *
2022e7716c74SPhilipp Stanner  * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
2023e7716c74SPhilipp Stanner  * in xa_init_flags().
2024e7716c74SPhilipp Stanner  *
20252fa044e5SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.  May
20262fa044e5SMatthew Wilcox  * release and reacquire xa_lock if @gfp flags permit.
20272fa044e5SMatthew Wilcox  * Return: 0 if the allocation succeeded without wrapping.  1 if the
20282fa044e5SMatthew Wilcox  * allocation succeeded after wrapping, -ENOMEM if memory could not be
20292fa044e5SMatthew Wilcox  * allocated or -EBUSY if there are no free entries in @limit.
20302fa044e5SMatthew Wilcox  */
__xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)20312fa044e5SMatthew Wilcox int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
20322fa044e5SMatthew Wilcox 		struct xa_limit limit, u32 *next, gfp_t gfp)
20332fa044e5SMatthew Wilcox {
20342fa044e5SMatthew Wilcox 	u32 min = limit.min;
20352fa044e5SMatthew Wilcox 	int ret;
20362fa044e5SMatthew Wilcox 
20372fa044e5SMatthew Wilcox 	limit.min = max(min, *next);
20382fa044e5SMatthew Wilcox 	ret = __xa_alloc(xa, id, entry, limit, gfp);
20392fa044e5SMatthew Wilcox 	if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
20402fa044e5SMatthew Wilcox 		xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
20412fa044e5SMatthew Wilcox 		ret = 1;
20422fa044e5SMatthew Wilcox 	}
20432fa044e5SMatthew Wilcox 
20442fa044e5SMatthew Wilcox 	if (ret < 0 && limit.min > min) {
20452fa044e5SMatthew Wilcox 		limit.min = min;
20462fa044e5SMatthew Wilcox 		ret = __xa_alloc(xa, id, entry, limit, gfp);
20472fa044e5SMatthew Wilcox 		if (ret == 0)
20482fa044e5SMatthew Wilcox 			ret = 1;
20492fa044e5SMatthew Wilcox 	}
20502fa044e5SMatthew Wilcox 
20512fa044e5SMatthew Wilcox 	if (ret >= 0) {
20522fa044e5SMatthew Wilcox 		*next = *id + 1;
20532fa044e5SMatthew Wilcox 		if (*next == 0)
20542fa044e5SMatthew Wilcox 			xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
20552fa044e5SMatthew Wilcox 	}
20562fa044e5SMatthew Wilcox 	return ret;
20572fa044e5SMatthew Wilcox }
20582fa044e5SMatthew Wilcox EXPORT_SYMBOL(__xa_alloc_cyclic);
20592fa044e5SMatthew Wilcox 
20602fa044e5SMatthew Wilcox /**
20619b89a035SMatthew Wilcox  * __xa_set_mark() - Set this mark on this entry while locked.
20629b89a035SMatthew Wilcox  * @xa: XArray.
20639b89a035SMatthew Wilcox  * @index: Index of entry.
20649b89a035SMatthew Wilcox  * @mark: Mark number.
20659b89a035SMatthew Wilcox  *
2066804dfaf0SMatthew Wilcox  * Attempting to set a mark on a %NULL entry does not succeed.
20679b89a035SMatthew Wilcox  *
20689b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
20699b89a035SMatthew Wilcox  */
__xa_set_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)20709b89a035SMatthew Wilcox void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
20719b89a035SMatthew Wilcox {
20729b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
20739b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
20749b89a035SMatthew Wilcox 
20759b89a035SMatthew Wilcox 	if (entry)
20769b89a035SMatthew Wilcox 		xas_set_mark(&xas, mark);
20779b89a035SMatthew Wilcox }
20789ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_set_mark);
20799b89a035SMatthew Wilcox 
20809b89a035SMatthew Wilcox /**
20819b89a035SMatthew Wilcox  * __xa_clear_mark() - Clear this mark on this entry while locked.
20829b89a035SMatthew Wilcox  * @xa: XArray.
20839b89a035SMatthew Wilcox  * @index: Index of entry.
20849b89a035SMatthew Wilcox  * @mark: Mark number.
20859b89a035SMatthew Wilcox  *
20869b89a035SMatthew Wilcox  * Context: Any context.  Expects xa_lock to be held on entry.
20879b89a035SMatthew Wilcox  */
__xa_clear_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)20889b89a035SMatthew Wilcox void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
20899b89a035SMatthew Wilcox {
20909b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
20919b89a035SMatthew Wilcox 	void *entry = xas_load(&xas);
20929b89a035SMatthew Wilcox 
20939b89a035SMatthew Wilcox 	if (entry)
20949b89a035SMatthew Wilcox 		xas_clear_mark(&xas, mark);
20959b89a035SMatthew Wilcox }
20969ee5a3b7SMatthew Wilcox EXPORT_SYMBOL(__xa_clear_mark);
20979b89a035SMatthew Wilcox 
20989b89a035SMatthew Wilcox /**
20999b89a035SMatthew Wilcox  * xa_get_mark() - Inquire whether this mark is set on this entry.
21009b89a035SMatthew Wilcox  * @xa: XArray.
21019b89a035SMatthew Wilcox  * @index: Index of entry.
21029b89a035SMatthew Wilcox  * @mark: Mark number.
21039b89a035SMatthew Wilcox  *
21049b89a035SMatthew Wilcox  * This function uses the RCU read lock, so the result may be out of date
21059b89a035SMatthew Wilcox  * by the time it returns.  If you need the result to be stable, use a lock.
21069b89a035SMatthew Wilcox  *
21079b89a035SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
21089b89a035SMatthew Wilcox  * Return: True if the entry at @index has this mark set, false if it doesn't.
21099b89a035SMatthew Wilcox  */
xa_get_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)21109b89a035SMatthew Wilcox bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
21119b89a035SMatthew Wilcox {
21129b89a035SMatthew Wilcox 	XA_STATE(xas, xa, index);
21139b89a035SMatthew Wilcox 	void *entry;
21149b89a035SMatthew Wilcox 
21159b89a035SMatthew Wilcox 	rcu_read_lock();
21169b89a035SMatthew Wilcox 	entry = xas_start(&xas);
21179b89a035SMatthew Wilcox 	while (xas_get_mark(&xas, mark)) {
21189b89a035SMatthew Wilcox 		if (!xa_is_node(entry))
21199b89a035SMatthew Wilcox 			goto found;
21209b89a035SMatthew Wilcox 		entry = xas_descend(&xas, xa_to_node(entry));
21219b89a035SMatthew Wilcox 	}
21229b89a035SMatthew Wilcox 	rcu_read_unlock();
21239b89a035SMatthew Wilcox 	return false;
21249b89a035SMatthew Wilcox  found:
21259b89a035SMatthew Wilcox 	rcu_read_unlock();
21269b89a035SMatthew Wilcox 	return true;
21279b89a035SMatthew Wilcox }
21289b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_get_mark);
21299b89a035SMatthew Wilcox 
21309b89a035SMatthew Wilcox /**
21319b89a035SMatthew Wilcox  * xa_set_mark() - Set this mark on this entry.
21329b89a035SMatthew Wilcox  * @xa: XArray.
21339b89a035SMatthew Wilcox  * @index: Index of entry.
21349b89a035SMatthew Wilcox  * @mark: Mark number.
21359b89a035SMatthew Wilcox  *
2136804dfaf0SMatthew Wilcox  * Attempting to set a mark on a %NULL entry does not succeed.
21379b89a035SMatthew Wilcox  *
21389b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
21399b89a035SMatthew Wilcox  */
xa_set_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)21409b89a035SMatthew Wilcox void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
21419b89a035SMatthew Wilcox {
21429b89a035SMatthew Wilcox 	xa_lock(xa);
21439b89a035SMatthew Wilcox 	__xa_set_mark(xa, index, mark);
21449b89a035SMatthew Wilcox 	xa_unlock(xa);
21459b89a035SMatthew Wilcox }
21469b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_set_mark);
21479b89a035SMatthew Wilcox 
21489b89a035SMatthew Wilcox /**
21499b89a035SMatthew Wilcox  * xa_clear_mark() - Clear this mark on this entry.
21509b89a035SMatthew Wilcox  * @xa: XArray.
21519b89a035SMatthew Wilcox  * @index: Index of entry.
21529b89a035SMatthew Wilcox  * @mark: Mark number.
21539b89a035SMatthew Wilcox  *
21549b89a035SMatthew Wilcox  * Clearing a mark always succeeds.
21559b89a035SMatthew Wilcox  *
21569b89a035SMatthew Wilcox  * Context: Process context.  Takes and releases the xa_lock.
21579b89a035SMatthew Wilcox  */
xa_clear_mark(struct xarray * xa,unsigned long index,xa_mark_t mark)21589b89a035SMatthew Wilcox void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
21599b89a035SMatthew Wilcox {
21609b89a035SMatthew Wilcox 	xa_lock(xa);
21619b89a035SMatthew Wilcox 	__xa_clear_mark(xa, index, mark);
21629b89a035SMatthew Wilcox 	xa_unlock(xa);
21639b89a035SMatthew Wilcox }
21649b89a035SMatthew Wilcox EXPORT_SYMBOL(xa_clear_mark);
21659b89a035SMatthew Wilcox 
2166b803b428SMatthew Wilcox /**
2167b803b428SMatthew Wilcox  * xa_find() - Search the XArray for an entry.
2168b803b428SMatthew Wilcox  * @xa: XArray.
2169b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
2170b803b428SMatthew Wilcox  * @max: Maximum index to search to.
2171b803b428SMatthew Wilcox  * @filter: Selection criterion.
2172b803b428SMatthew Wilcox  *
2173b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter, and has the lowest
2174b803b428SMatthew Wilcox  * index that is at least @indexp and no more than @max.
2175b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
2176b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may not find
2177b803b428SMatthew Wilcox  * entries which are being simultaneously added.  It will not return an
2178b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2179b803b428SMatthew Wilcox  *
2180b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
2181b803b428SMatthew Wilcox  * Return: The entry, if found, otherwise %NULL.
2182b803b428SMatthew Wilcox  */
xa_find(struct xarray * xa,unsigned long * indexp,unsigned long max,xa_mark_t filter)2183b803b428SMatthew Wilcox void *xa_find(struct xarray *xa, unsigned long *indexp,
2184b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
2185b803b428SMatthew Wilcox {
2186b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp);
2187b803b428SMatthew Wilcox 	void *entry;
2188b803b428SMatthew Wilcox 
2189b803b428SMatthew Wilcox 	rcu_read_lock();
2190b803b428SMatthew Wilcox 	do {
2191b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
2192b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
2193b803b428SMatthew Wilcox 		else
2194b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
2195b803b428SMatthew Wilcox 	} while (xas_retry(&xas, entry));
2196b803b428SMatthew Wilcox 	rcu_read_unlock();
2197b803b428SMatthew Wilcox 
2198b803b428SMatthew Wilcox 	if (entry)
2199b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
2200b803b428SMatthew Wilcox 	return entry;
2201b803b428SMatthew Wilcox }
2202b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find);
2203b803b428SMatthew Wilcox 
xas_sibling(struct xa_state * xas)220419c30f4dSMatthew Wilcox (Oracle) static bool xas_sibling(struct xa_state *xas)
220519c30f4dSMatthew Wilcox (Oracle) {
220619c30f4dSMatthew Wilcox (Oracle) 	struct xa_node *node = xas->xa_node;
220719c30f4dSMatthew Wilcox (Oracle) 	unsigned long mask;
220819c30f4dSMatthew Wilcox (Oracle) 
2209d8e93e3fSMatthew Wilcox (Oracle) 	if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
221019c30f4dSMatthew Wilcox (Oracle) 		return false;
221119c30f4dSMatthew Wilcox (Oracle) 	mask = (XA_CHUNK_SIZE << node->shift) - 1;
2212bd40b17cSMatthew Wilcox (Oracle) 	return (xas->xa_index & mask) >
2213bd40b17cSMatthew Wilcox (Oracle) 		((unsigned long)xas->xa_offset << node->shift);
221419c30f4dSMatthew Wilcox (Oracle) }
221519c30f4dSMatthew Wilcox (Oracle) 
2216b803b428SMatthew Wilcox /**
2217b803b428SMatthew Wilcox  * xa_find_after() - Search the XArray for a present entry.
2218b803b428SMatthew Wilcox  * @xa: XArray.
2219b803b428SMatthew Wilcox  * @indexp: Pointer to an index.
2220b803b428SMatthew Wilcox  * @max: Maximum index to search to.
2221b803b428SMatthew Wilcox  * @filter: Selection criterion.
2222b803b428SMatthew Wilcox  *
2223b803b428SMatthew Wilcox  * Finds the entry in @xa which matches the @filter and has the lowest
2224b803b428SMatthew Wilcox  * index that is above @indexp and no more than @max.
2225b803b428SMatthew Wilcox  * If an entry is found, @indexp is updated to be the index of the entry.
2226b803b428SMatthew Wilcox  * This function is protected by the RCU read lock, so it may miss entries
2227b803b428SMatthew Wilcox  * which are being simultaneously added.  It will not return an
2228b803b428SMatthew Wilcox  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
2229b803b428SMatthew Wilcox  *
2230b803b428SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
2231b803b428SMatthew Wilcox  * Return: The pointer, if found, otherwise %NULL.
2232b803b428SMatthew Wilcox  */
xa_find_after(struct xarray * xa,unsigned long * indexp,unsigned long max,xa_mark_t filter)2233b803b428SMatthew Wilcox void *xa_find_after(struct xarray *xa, unsigned long *indexp,
2234b803b428SMatthew Wilcox 			unsigned long max, xa_mark_t filter)
2235b803b428SMatthew Wilcox {
2236b803b428SMatthew Wilcox 	XA_STATE(xas, xa, *indexp + 1);
2237b803b428SMatthew Wilcox 	void *entry;
2238b803b428SMatthew Wilcox 
2239430f24f9SMatthew Wilcox (Oracle) 	if (xas.xa_index == 0)
2240430f24f9SMatthew Wilcox (Oracle) 		return NULL;
2241430f24f9SMatthew Wilcox (Oracle) 
2242b803b428SMatthew Wilcox 	rcu_read_lock();
2243b803b428SMatthew Wilcox 	for (;;) {
2244b803b428SMatthew Wilcox 		if ((__force unsigned int)filter < XA_MAX_MARKS)
2245b803b428SMatthew Wilcox 			entry = xas_find_marked(&xas, max, filter);
2246b803b428SMatthew Wilcox 		else
2247b803b428SMatthew Wilcox 			entry = xas_find(&xas, max);
2248c44aa5e8SMatthew Wilcox (Oracle) 
2249c44aa5e8SMatthew Wilcox (Oracle) 		if (xas_invalid(&xas))
22508229706eSMatthew Wilcox 			break;
225119c30f4dSMatthew Wilcox (Oracle) 		if (xas_sibling(&xas))
2252b803b428SMatthew Wilcox 			continue;
2253b803b428SMatthew Wilcox 		if (!xas_retry(&xas, entry))
2254b803b428SMatthew Wilcox 			break;
2255b803b428SMatthew Wilcox 	}
2256b803b428SMatthew Wilcox 	rcu_read_unlock();
2257b803b428SMatthew Wilcox 
2258b803b428SMatthew Wilcox 	if (entry)
2259b803b428SMatthew Wilcox 		*indexp = xas.xa_index;
2260b803b428SMatthew Wilcox 	return entry;
2261b803b428SMatthew Wilcox }
2262b803b428SMatthew Wilcox EXPORT_SYMBOL(xa_find_after);
2263b803b428SMatthew Wilcox 
xas_extract_present(struct xa_state * xas,void ** dst,unsigned long max,unsigned int n)226480a0a1a9SMatthew Wilcox static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
226580a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n)
226680a0a1a9SMatthew Wilcox {
226780a0a1a9SMatthew Wilcox 	void *entry;
226880a0a1a9SMatthew Wilcox 	unsigned int i = 0;
226980a0a1a9SMatthew Wilcox 
227080a0a1a9SMatthew Wilcox 	rcu_read_lock();
227180a0a1a9SMatthew Wilcox 	xas_for_each(xas, entry, max) {
227280a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
227380a0a1a9SMatthew Wilcox 			continue;
227480a0a1a9SMatthew Wilcox 		dst[i++] = entry;
227580a0a1a9SMatthew Wilcox 		if (i == n)
227680a0a1a9SMatthew Wilcox 			break;
227780a0a1a9SMatthew Wilcox 	}
227880a0a1a9SMatthew Wilcox 	rcu_read_unlock();
227980a0a1a9SMatthew Wilcox 
228080a0a1a9SMatthew Wilcox 	return i;
228180a0a1a9SMatthew Wilcox }
228280a0a1a9SMatthew Wilcox 
xas_extract_marked(struct xa_state * xas,void ** dst,unsigned long max,unsigned int n,xa_mark_t mark)228380a0a1a9SMatthew Wilcox static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
228480a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t mark)
228580a0a1a9SMatthew Wilcox {
228680a0a1a9SMatthew Wilcox 	void *entry;
228780a0a1a9SMatthew Wilcox 	unsigned int i = 0;
228880a0a1a9SMatthew Wilcox 
228980a0a1a9SMatthew Wilcox 	rcu_read_lock();
229080a0a1a9SMatthew Wilcox 	xas_for_each_marked(xas, entry, max, mark) {
229180a0a1a9SMatthew Wilcox 		if (xas_retry(xas, entry))
229280a0a1a9SMatthew Wilcox 			continue;
229380a0a1a9SMatthew Wilcox 		dst[i++] = entry;
229480a0a1a9SMatthew Wilcox 		if (i == n)
229580a0a1a9SMatthew Wilcox 			break;
229680a0a1a9SMatthew Wilcox 	}
229780a0a1a9SMatthew Wilcox 	rcu_read_unlock();
229880a0a1a9SMatthew Wilcox 
229980a0a1a9SMatthew Wilcox 	return i;
230080a0a1a9SMatthew Wilcox }
230180a0a1a9SMatthew Wilcox 
230280a0a1a9SMatthew Wilcox /**
230380a0a1a9SMatthew Wilcox  * xa_extract() - Copy selected entries from the XArray into a normal array.
230480a0a1a9SMatthew Wilcox  * @xa: The source XArray to copy from.
230580a0a1a9SMatthew Wilcox  * @dst: The buffer to copy entries into.
230680a0a1a9SMatthew Wilcox  * @start: The first index in the XArray eligible to be selected.
230780a0a1a9SMatthew Wilcox  * @max: The last index in the XArray eligible to be selected.
230880a0a1a9SMatthew Wilcox  * @n: The maximum number of entries to copy.
230980a0a1a9SMatthew Wilcox  * @filter: Selection criterion.
231080a0a1a9SMatthew Wilcox  *
231180a0a1a9SMatthew Wilcox  * Copies up to @n entries that match @filter from the XArray.  The
231280a0a1a9SMatthew Wilcox  * copied entries will have indices between @start and @max, inclusive.
231380a0a1a9SMatthew Wilcox  *
231480a0a1a9SMatthew Wilcox  * The @filter may be an XArray mark value, in which case entries which are
231580a0a1a9SMatthew Wilcox  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
2316804dfaf0SMatthew Wilcox  * which case all entries which are not %NULL will be copied.
231780a0a1a9SMatthew Wilcox  *
231880a0a1a9SMatthew Wilcox  * The entries returned may not represent a snapshot of the XArray at a
231980a0a1a9SMatthew Wilcox  * moment in time.  For example, if another thread stores to index 5, then
232080a0a1a9SMatthew Wilcox  * index 10, calling xa_extract() may return the old contents of index 5
232180a0a1a9SMatthew Wilcox  * and the new contents of index 10.  Indices not modified while this
232280a0a1a9SMatthew Wilcox  * function is running will not be skipped.
232380a0a1a9SMatthew Wilcox  *
232480a0a1a9SMatthew Wilcox  * If you need stronger guarantees, holding the xa_lock across calls to this
232580a0a1a9SMatthew Wilcox  * function will prevent concurrent modification.
232680a0a1a9SMatthew Wilcox  *
232780a0a1a9SMatthew Wilcox  * Context: Any context.  Takes and releases the RCU lock.
232880a0a1a9SMatthew Wilcox  * Return: The number of entries copied.
232980a0a1a9SMatthew Wilcox  */
xa_extract(struct xarray * xa,void ** dst,unsigned long start,unsigned long max,unsigned int n,xa_mark_t filter)233080a0a1a9SMatthew Wilcox unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
233180a0a1a9SMatthew Wilcox 			unsigned long max, unsigned int n, xa_mark_t filter)
233280a0a1a9SMatthew Wilcox {
233380a0a1a9SMatthew Wilcox 	XA_STATE(xas, xa, start);
233480a0a1a9SMatthew Wilcox 
233580a0a1a9SMatthew Wilcox 	if (!n)
233680a0a1a9SMatthew Wilcox 		return 0;
233780a0a1a9SMatthew Wilcox 
233880a0a1a9SMatthew Wilcox 	if ((__force unsigned int)filter < XA_MAX_MARKS)
233980a0a1a9SMatthew Wilcox 		return xas_extract_marked(&xas, dst, max, n, filter);
234080a0a1a9SMatthew Wilcox 	return xas_extract_present(&xas, dst, max, n);
234180a0a1a9SMatthew Wilcox }
234280a0a1a9SMatthew Wilcox EXPORT_SYMBOL(xa_extract);
234380a0a1a9SMatthew Wilcox 
2344687149fcSMatthew Wilcox /**
2345f82cd2f0SMatthew Wilcox (Oracle)  * xa_delete_node() - Private interface for workingset code.
2346f82cd2f0SMatthew Wilcox (Oracle)  * @node: Node to be removed from the tree.
2347f82cd2f0SMatthew Wilcox (Oracle)  * @update: Function to call to update ancestor nodes.
2348f82cd2f0SMatthew Wilcox (Oracle)  *
2349f82cd2f0SMatthew Wilcox (Oracle)  * Context: xa_lock must be held on entry and will not be released.
2350f82cd2f0SMatthew Wilcox (Oracle)  */
xa_delete_node(struct xa_node * node,xa_update_node_t update)2351f82cd2f0SMatthew Wilcox (Oracle) void xa_delete_node(struct xa_node *node, xa_update_node_t update)
2352f82cd2f0SMatthew Wilcox (Oracle) {
2353f82cd2f0SMatthew Wilcox (Oracle) 	struct xa_state xas = {
2354f82cd2f0SMatthew Wilcox (Oracle) 		.xa = node->array,
2355f82cd2f0SMatthew Wilcox (Oracle) 		.xa_index = (unsigned long)node->offset <<
2356f82cd2f0SMatthew Wilcox (Oracle) 				(node->shift + XA_CHUNK_SHIFT),
2357f82cd2f0SMatthew Wilcox (Oracle) 		.xa_shift = node->shift + XA_CHUNK_SHIFT,
2358f82cd2f0SMatthew Wilcox (Oracle) 		.xa_offset = node->offset,
2359f82cd2f0SMatthew Wilcox (Oracle) 		.xa_node = xa_parent_locked(node->array, node),
2360f82cd2f0SMatthew Wilcox (Oracle) 		.xa_update = update,
2361f82cd2f0SMatthew Wilcox (Oracle) 	};
2362f82cd2f0SMatthew Wilcox (Oracle) 
2363f82cd2f0SMatthew Wilcox (Oracle) 	xas_store(&xas, NULL);
2364f82cd2f0SMatthew Wilcox (Oracle) }
2365f82cd2f0SMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(xa_delete_node);	/* For the benefit of the test suite */
2366f82cd2f0SMatthew Wilcox (Oracle) 
2367f82cd2f0SMatthew Wilcox (Oracle) /**
2368687149fcSMatthew Wilcox  * xa_destroy() - Free all internal data structures.
2369687149fcSMatthew Wilcox  * @xa: XArray.
2370687149fcSMatthew Wilcox  *
2371687149fcSMatthew Wilcox  * After calling this function, the XArray is empty and has freed all memory
2372687149fcSMatthew Wilcox  * allocated for its internal data structures.  You are responsible for
2373687149fcSMatthew Wilcox  * freeing the objects referenced by the XArray.
2374687149fcSMatthew Wilcox  *
2375687149fcSMatthew Wilcox  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
2376687149fcSMatthew Wilcox  */
xa_destroy(struct xarray * xa)2377687149fcSMatthew Wilcox void xa_destroy(struct xarray *xa)
2378687149fcSMatthew Wilcox {
2379687149fcSMatthew Wilcox 	XA_STATE(xas, xa, 0);
2380687149fcSMatthew Wilcox 	unsigned long flags;
2381687149fcSMatthew Wilcox 	void *entry;
2382687149fcSMatthew Wilcox 
2383687149fcSMatthew Wilcox 	xas.xa_node = NULL;
2384687149fcSMatthew Wilcox 	xas_lock_irqsave(&xas, flags);
2385687149fcSMatthew Wilcox 	entry = xa_head_locked(xa);
2386687149fcSMatthew Wilcox 	RCU_INIT_POINTER(xa->xa_head, NULL);
2387687149fcSMatthew Wilcox 	xas_init_marks(&xas);
23883ccaf57aSMatthew Wilcox 	if (xa_zero_busy(xa))
23893ccaf57aSMatthew Wilcox 		xa_mark_clear(xa, XA_FREE_MARK);
2390687149fcSMatthew Wilcox 	/* lockdep checks we're still holding the lock in xas_free_nodes() */
2391687149fcSMatthew Wilcox 	if (xa_is_node(entry))
2392687149fcSMatthew Wilcox 		xas_free_nodes(&xas, xa_to_node(entry));
2393687149fcSMatthew Wilcox 	xas_unlock_irqrestore(&xas, flags);
2394687149fcSMatthew Wilcox }
2395687149fcSMatthew Wilcox EXPORT_SYMBOL(xa_destroy);
2396687149fcSMatthew Wilcox 
2397ad3d6c72SMatthew Wilcox #ifdef XA_DEBUG
xa_dump_node(const struct xa_node * node)2398ad3d6c72SMatthew Wilcox void xa_dump_node(const struct xa_node *node)
2399ad3d6c72SMatthew Wilcox {
2400ad3d6c72SMatthew Wilcox 	unsigned i, j;
2401ad3d6c72SMatthew Wilcox 
2402ad3d6c72SMatthew Wilcox 	if (!node)
2403ad3d6c72SMatthew Wilcox 		return;
2404ad3d6c72SMatthew Wilcox 	if ((unsigned long)node & 3) {
2405ad3d6c72SMatthew Wilcox 		pr_cont("node %px\n", node);
2406ad3d6c72SMatthew Wilcox 		return;
2407ad3d6c72SMatthew Wilcox 	}
2408ad3d6c72SMatthew Wilcox 
2409ad3d6c72SMatthew Wilcox 	pr_cont("node %px %s %d parent %px shift %d count %d values %d "
2410ad3d6c72SMatthew Wilcox 		"array %px list %px %px marks",
2411ad3d6c72SMatthew Wilcox 		node, node->parent ? "offset" : "max", node->offset,
2412ad3d6c72SMatthew Wilcox 		node->parent, node->shift, node->count, node->nr_values,
2413ad3d6c72SMatthew Wilcox 		node->array, node->private_list.prev, node->private_list.next);
2414ad3d6c72SMatthew Wilcox 	for (i = 0; i < XA_MAX_MARKS; i++)
2415ad3d6c72SMatthew Wilcox 		for (j = 0; j < XA_MARK_LONGS; j++)
2416ad3d6c72SMatthew Wilcox 			pr_cont(" %lx", node->marks[i][j]);
2417ad3d6c72SMatthew Wilcox 	pr_cont("\n");
2418ad3d6c72SMatthew Wilcox }
2419ad3d6c72SMatthew Wilcox 
xa_dump_index(unsigned long index,unsigned int shift)2420ad3d6c72SMatthew Wilcox void xa_dump_index(unsigned long index, unsigned int shift)
2421ad3d6c72SMatthew Wilcox {
2422ad3d6c72SMatthew Wilcox 	if (!shift)
2423ad3d6c72SMatthew Wilcox 		pr_info("%lu: ", index);
2424ad3d6c72SMatthew Wilcox 	else if (shift >= BITS_PER_LONG)
2425ad3d6c72SMatthew Wilcox 		pr_info("0-%lu: ", ~0UL);
2426ad3d6c72SMatthew Wilcox 	else
2427ad3d6c72SMatthew Wilcox 		pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
2428ad3d6c72SMatthew Wilcox }
2429ad3d6c72SMatthew Wilcox 
xa_dump_entry(const void * entry,unsigned long index,unsigned long shift)2430ad3d6c72SMatthew Wilcox void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
2431ad3d6c72SMatthew Wilcox {
2432ad3d6c72SMatthew Wilcox 	if (!entry)
2433ad3d6c72SMatthew Wilcox 		return;
2434ad3d6c72SMatthew Wilcox 
2435ad3d6c72SMatthew Wilcox 	xa_dump_index(index, shift);
2436ad3d6c72SMatthew Wilcox 
2437ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry)) {
2438ad3d6c72SMatthew Wilcox 		if (shift == 0) {
2439ad3d6c72SMatthew Wilcox 			pr_cont("%px\n", entry);
2440ad3d6c72SMatthew Wilcox 		} else {
2441ad3d6c72SMatthew Wilcox 			unsigned long i;
2442ad3d6c72SMatthew Wilcox 			struct xa_node *node = xa_to_node(entry);
2443ad3d6c72SMatthew Wilcox 			xa_dump_node(node);
2444ad3d6c72SMatthew Wilcox 			for (i = 0; i < XA_CHUNK_SIZE; i++)
2445ad3d6c72SMatthew Wilcox 				xa_dump_entry(node->slots[i],
2446ad3d6c72SMatthew Wilcox 				      index + (i << node->shift), node->shift);
2447ad3d6c72SMatthew Wilcox 		}
2448ad3d6c72SMatthew Wilcox 	} else if (xa_is_value(entry))
2449ad3d6c72SMatthew Wilcox 		pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
2450ad3d6c72SMatthew Wilcox 						xa_to_value(entry), entry);
2451ad3d6c72SMatthew Wilcox 	else if (!xa_is_internal(entry))
2452ad3d6c72SMatthew Wilcox 		pr_cont("%px\n", entry);
2453ad3d6c72SMatthew Wilcox 	else if (xa_is_retry(entry))
2454ad3d6c72SMatthew Wilcox 		pr_cont("retry (%ld)\n", xa_to_internal(entry));
2455ad3d6c72SMatthew Wilcox 	else if (xa_is_sibling(entry))
2456ad3d6c72SMatthew Wilcox 		pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
24579f14d4f1SMatthew Wilcox 	else if (xa_is_zero(entry))
24589f14d4f1SMatthew Wilcox 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
2459ad3d6c72SMatthew Wilcox 	else
2460ad3d6c72SMatthew Wilcox 		pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2461ad3d6c72SMatthew Wilcox }
2462ad3d6c72SMatthew Wilcox 
xa_dump(const struct xarray * xa)2463ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa)
2464ad3d6c72SMatthew Wilcox {
2465ad3d6c72SMatthew Wilcox 	void *entry = xa->xa_head;
2466ad3d6c72SMatthew Wilcox 	unsigned int shift = 0;
2467ad3d6c72SMatthew Wilcox 
2468ad3d6c72SMatthew Wilcox 	pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
24699b89a035SMatthew Wilcox 			xa->xa_flags, xa_marked(xa, XA_MARK_0),
24709b89a035SMatthew Wilcox 			xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2471ad3d6c72SMatthew Wilcox 	if (xa_is_node(entry))
2472ad3d6c72SMatthew Wilcox 		shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2473ad3d6c72SMatthew Wilcox 	xa_dump_entry(entry, 0, shift);
2474ad3d6c72SMatthew Wilcox }
2475ad3d6c72SMatthew Wilcox #endif
2476