xref: /linux-6.15/lib/test_xarray.c (revision 4e99d4e9)
1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+
2ad3d6c72SMatthew Wilcox /*
3ad3d6c72SMatthew Wilcox  * test_xarray.c: Test the XArray API
4ad3d6c72SMatthew Wilcox  * Copyright (c) 2017-2018 Microsoft Corporation
5ad3d6c72SMatthew Wilcox  * Author: Matthew Wilcox <[email protected]>
6ad3d6c72SMatthew Wilcox  */
7ad3d6c72SMatthew Wilcox 
8ad3d6c72SMatthew Wilcox #include <linux/xarray.h>
9ad3d6c72SMatthew Wilcox #include <linux/module.h>
10ad3d6c72SMatthew Wilcox 
11ad3d6c72SMatthew Wilcox static unsigned int tests_run;
12ad3d6c72SMatthew Wilcox static unsigned int tests_passed;
13ad3d6c72SMatthew Wilcox 
14ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG
15ad3d6c72SMatthew Wilcox # ifdef __KERNEL__
16ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { }
17ad3d6c72SMatthew Wilcox # endif
18ad3d6c72SMatthew Wilcox #undef XA_BUG_ON
19ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do {					\
20ad3d6c72SMatthew Wilcox 	tests_run++;						\
21ad3d6c72SMatthew Wilcox 	if (x) {						\
22ad3d6c72SMatthew Wilcox 		printk("BUG at %s:%d\n", __func__, __LINE__);	\
23ad3d6c72SMatthew Wilcox 		xa_dump(xa);					\
24ad3d6c72SMatthew Wilcox 		dump_stack();					\
25ad3d6c72SMatthew Wilcox 	} else {						\
26ad3d6c72SMatthew Wilcox 		tests_passed++;					\
27ad3d6c72SMatthew Wilcox 	}							\
28ad3d6c72SMatthew Wilcox } while (0)
29ad3d6c72SMatthew Wilcox #endif
30ad3d6c72SMatthew Wilcox 
31ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32ad3d6c72SMatthew Wilcox {
3358d6ea30SMatthew Wilcox 	return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
34ad3d6c72SMatthew Wilcox }
35ad3d6c72SMatthew Wilcox 
36ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index)
37ad3d6c72SMatthew Wilcox {
3858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
3958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
4058d6ea30SMatthew Wilcox }
4158d6ea30SMatthew Wilcox 
4258d6ea30SMatthew Wilcox /*
4358d6ea30SMatthew Wilcox  * If anyone needs this, please move it to xarray.c.  We have no current
4458d6ea30SMatthew Wilcox  * users outside the test suite because all current multislot users want
4558d6ea30SMatthew Wilcox  * to use the advanced API.
4658d6ea30SMatthew Wilcox  */
4758d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index,
4858d6ea30SMatthew Wilcox 		unsigned order, void *entry, gfp_t gfp)
4958d6ea30SMatthew Wilcox {
5058d6ea30SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, index, order);
5158d6ea30SMatthew Wilcox 	void *curr;
5258d6ea30SMatthew Wilcox 
5358d6ea30SMatthew Wilcox 	do {
5458d6ea30SMatthew Wilcox 		xas_lock(&xas);
5558d6ea30SMatthew Wilcox 		curr = xas_store(&xas, entry);
5658d6ea30SMatthew Wilcox 		xas_unlock(&xas);
5758d6ea30SMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
5858d6ea30SMatthew Wilcox 
5958d6ea30SMatthew Wilcox 	return curr;
6058d6ea30SMatthew Wilcox }
6158d6ea30SMatthew Wilcox 
6258d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa)
6358d6ea30SMatthew Wilcox {
6458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
6558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
6658d6ea30SMatthew Wilcox #ifndef __KERNEL__
6758d6ea30SMatthew Wilcox 	/* The kernel does not fail GFP_NOWAIT allocations */
6858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
6958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
7058d6ea30SMatthew Wilcox #endif
7158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
7258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
7358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
7458d6ea30SMatthew Wilcox // kills the test-suite :-(
7558d6ea30SMatthew Wilcox //	XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
76ad3d6c72SMatthew Wilcox }
77ad3d6c72SMatthew Wilcox 
78b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa)
79b803b428SMatthew Wilcox {
80b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
81b803b428SMatthew Wilcox 	void *entry;
82b803b428SMatthew Wilcox 
83b803b428SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
84b803b428SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
85b803b428SMatthew Wilcox 
86b803b428SMatthew Wilcox 	rcu_read_lock();
87b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
88b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
89b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
90b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, NULL));
91b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
92b803b428SMatthew Wilcox 	xas_reset(&xas);
93b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
94b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
95b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != NULL);
96b803b428SMatthew Wilcox 
97b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
98b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
99b803b428SMatthew Wilcox 	xas.xa_node = XAS_RESTART;
100b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
101b803b428SMatthew Wilcox 	rcu_read_unlock();
102b803b428SMatthew Wilcox 
103b803b428SMatthew Wilcox 	/* Make sure we can iterate through retry entries */
104b803b428SMatthew Wilcox 	xas_lock(&xas);
105b803b428SMatthew Wilcox 	xas_set(&xas, 0);
106b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
107b803b428SMatthew Wilcox 	xas_set(&xas, 1);
108b803b428SMatthew Wilcox 	xas_store(&xas, XA_RETRY_ENTRY);
109b803b428SMatthew Wilcox 
110b803b428SMatthew Wilcox 	xas_set(&xas, 0);
111b803b428SMatthew Wilcox 	xas_for_each(&xas, entry, ULONG_MAX) {
112b803b428SMatthew Wilcox 		xas_store(&xas, xa_mk_value(xas.xa_index));
113b803b428SMatthew Wilcox 	}
114b803b428SMatthew Wilcox 	xas_unlock(&xas);
115b803b428SMatthew Wilcox 
116b803b428SMatthew Wilcox 	xa_erase_index(xa, 0);
117b803b428SMatthew Wilcox 	xa_erase_index(xa, 1);
118b803b428SMatthew Wilcox }
119b803b428SMatthew Wilcox 
120ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa)
121ad3d6c72SMatthew Wilcox {
122ad3d6c72SMatthew Wilcox 	unsigned long i, j;
123ad3d6c72SMatthew Wilcox 
124ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
125ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
126ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
127ad3d6c72SMatthew Wilcox 			if (j < i)
128ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
129ad3d6c72SMatthew Wilcox 			else
130ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
131ad3d6c72SMatthew Wilcox 		}
132ad3d6c72SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
133ad3d6c72SMatthew Wilcox 	}
134ad3d6c72SMatthew Wilcox 
135ad3d6c72SMatthew Wilcox 	for (i = 0; i < 1024; i++) {
136ad3d6c72SMatthew Wilcox 		for (j = 0; j < 1024; j++) {
137ad3d6c72SMatthew Wilcox 			void *entry = xa_load(xa, j);
138ad3d6c72SMatthew Wilcox 			if (j >= i)
139ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, xa_to_value(entry) != j);
140ad3d6c72SMatthew Wilcox 			else
141ad3d6c72SMatthew Wilcox 				XA_BUG_ON(xa, entry);
142ad3d6c72SMatthew Wilcox 		}
143ad3d6c72SMatthew Wilcox 		xa_erase_index(xa, i);
144ad3d6c72SMatthew Wilcox 	}
145ad3d6c72SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
146ad3d6c72SMatthew Wilcox }
147ad3d6c72SMatthew Wilcox 
1489b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
1499b89a035SMatthew Wilcox {
15058d6ea30SMatthew Wilcox 	unsigned int order;
15158d6ea30SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
15258d6ea30SMatthew Wilcox 
1539b89a035SMatthew Wilcox 	/* NULL elements have no marks set */
1549b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1559b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1569b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1579b89a035SMatthew Wilcox 
1589b89a035SMatthew Wilcox 	/* Storing a pointer will not make a mark appear */
1599b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
1609b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1619b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1629b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
1639b89a035SMatthew Wilcox 
1649b89a035SMatthew Wilcox 	/* Setting one mark will not set another mark */
1659b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
1669b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
1679b89a035SMatthew Wilcox 
1689b89a035SMatthew Wilcox 	/* Storing NULL clears marks, and they can't be set again */
1699b89a035SMatthew Wilcox 	xa_erase_index(xa, index);
1709b89a035SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
1719b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
1729b89a035SMatthew Wilcox 	xa_set_mark(xa, index, XA_MARK_0);
1739b89a035SMatthew Wilcox 	XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
17458d6ea30SMatthew Wilcox 
17558d6ea30SMatthew Wilcox 	/*
17658d6ea30SMatthew Wilcox 	 * Storing a multi-index entry over entries with marks gives the
17758d6ea30SMatthew Wilcox 	 * entire entry the union of the marks
17858d6ea30SMatthew Wilcox 	 */
17958d6ea30SMatthew Wilcox 	BUG_ON((index % 4) != 0);
18058d6ea30SMatthew Wilcox 	for (order = 2; order < max_order; order++) {
18158d6ea30SMatthew Wilcox 		unsigned long base = round_down(index, 1UL << order);
18258d6ea30SMatthew Wilcox 		unsigned long next = base + (1UL << order);
18358d6ea30SMatthew Wilcox 		unsigned long i;
18458d6ea30SMatthew Wilcox 
18558d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
18658d6ea30SMatthew Wilcox 		xa_set_mark(xa, index + 1, XA_MARK_0);
18758d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
18858d6ea30SMatthew Wilcox 		xa_set_mark(xa, index + 2, XA_MARK_1);
18958d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
19058d6ea30SMatthew Wilcox 		xa_store_order(xa, index, order, xa_mk_value(index),
19158d6ea30SMatthew Wilcox 				GFP_KERNEL);
19258d6ea30SMatthew Wilcox 		for (i = base; i < next; i++) {
19358d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
19458d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
19558d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
19658d6ea30SMatthew Wilcox 		}
19758d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
19858d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
19958d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
20058d6ea30SMatthew Wilcox 		xa_erase_index(xa, index);
20158d6ea30SMatthew Wilcox 		xa_erase_index(xa, next);
20258d6ea30SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
20358d6ea30SMatthew Wilcox 	}
20458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
2059b89a035SMatthew Wilcox }
2069b89a035SMatthew Wilcox 
2079b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa)
2089b89a035SMatthew Wilcox {
2099b89a035SMatthew Wilcox 	unsigned long index;
2109b89a035SMatthew Wilcox 
2119b89a035SMatthew Wilcox 	for (index = 0; index < 16384; index += 4)
2129b89a035SMatthew Wilcox 		check_xa_mark_1(xa, index);
2139b89a035SMatthew Wilcox }
2149b89a035SMatthew Wilcox 
21558d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa)
21658d6ea30SMatthew Wilcox {
21758d6ea30SMatthew Wilcox 	XA_STATE(xas, xa, 1);
21858d6ea30SMatthew Wilcox 	struct xa_node *node;
21958d6ea30SMatthew Wilcox 
22058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
22158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
22258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
22358d6ea30SMatthew Wilcox 
22458d6ea30SMatthew Wilcox 	/*
22558d6ea30SMatthew Wilcox 	 * Check that erasing the entry at 1 shrinks the tree and properly
22658d6ea30SMatthew Wilcox 	 * marks the node as being deleted.
22758d6ea30SMatthew Wilcox 	 */
22858d6ea30SMatthew Wilcox 	xas_lock(&xas);
22958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
23058d6ea30SMatthew Wilcox 	node = xas.xa_node;
23158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
23258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
23358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
23458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
23558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
23658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xas_load(&xas) != NULL);
23758d6ea30SMatthew Wilcox 	xas_unlock(&xas);
23858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
23958d6ea30SMatthew Wilcox 	xa_erase_index(xa, 0);
24058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
24158d6ea30SMatthew Wilcox }
24258d6ea30SMatthew Wilcox 
24341aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa)
24441aec91fSMatthew Wilcox {
24541aec91fSMatthew Wilcox 	void *FIVE = xa_mk_value(5);
24641aec91fSMatthew Wilcox 	void *SIX = xa_mk_value(6);
24741aec91fSMatthew Wilcox 	void *LOTS = xa_mk_value(12345678);
24841aec91fSMatthew Wilcox 
24941aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
25041aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
25141aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
25241aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
25341aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
25441aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
25541aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
25641aec91fSMatthew Wilcox 	XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
25741aec91fSMatthew Wilcox 	xa_erase_index(xa, 12345678);
25841aec91fSMatthew Wilcox 	xa_erase_index(xa, 5);
25941aec91fSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
26041aec91fSMatthew Wilcox }
26141aec91fSMatthew Wilcox 
262b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa)
263b803b428SMatthew Wilcox {
264b803b428SMatthew Wilcox 	XA_STATE(xas, xa, 0);
265b803b428SMatthew Wilcox 	void *entry;
266b803b428SMatthew Wilcox 	unsigned long i, j;
267b803b428SMatthew Wilcox 
268b803b428SMatthew Wilcox 	for (i = 0; i < 200; i++) {
269b803b428SMatthew Wilcox 		for (j = i; j < 2 * i + 17; j++) {
270b803b428SMatthew Wilcox 			xas_set(&xas, j);
271b803b428SMatthew Wilcox 			do {
272b803b428SMatthew Wilcox 				xas_lock(&xas);
273b803b428SMatthew Wilcox 				xas_store(&xas, xa_mk_value(j));
274b803b428SMatthew Wilcox 				xas_unlock(&xas);
275b803b428SMatthew Wilcox 			} while (xas_nomem(&xas, GFP_KERNEL));
276b803b428SMatthew Wilcox 		}
277b803b428SMatthew Wilcox 
278b803b428SMatthew Wilcox 		xas_set(&xas, ULONG_MAX);
279b803b428SMatthew Wilcox 		do {
280b803b428SMatthew Wilcox 			xas_lock(&xas);
281b803b428SMatthew Wilcox 			xas_store(&xas, xa_mk_value(0));
282b803b428SMatthew Wilcox 			xas_unlock(&xas);
283b803b428SMatthew Wilcox 		} while (xas_nomem(&xas, GFP_KERNEL));
284b803b428SMatthew Wilcox 
285b803b428SMatthew Wilcox 		xas_lock(&xas);
286b803b428SMatthew Wilcox 		xas_store(&xas, NULL);
287b803b428SMatthew Wilcox 
288b803b428SMatthew Wilcox 		xas_set(&xas, 0);
289b803b428SMatthew Wilcox 		j = i;
290b803b428SMatthew Wilcox 		xas_for_each(&xas, entry, ULONG_MAX) {
291b803b428SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_value(j));
292b803b428SMatthew Wilcox 			xas_store(&xas, NULL);
293b803b428SMatthew Wilcox 			j++;
294b803b428SMatthew Wilcox 		}
295b803b428SMatthew Wilcox 		xas_unlock(&xas);
296b803b428SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
297b803b428SMatthew Wilcox 	}
298b803b428SMatthew Wilcox }
299b803b428SMatthew Wilcox 
30058d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa)
30158d6ea30SMatthew Wilcox {
30258d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
30358d6ea30SMatthew Wilcox 	unsigned long i, j, k;
30458d6ea30SMatthew Wilcox 	unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
30558d6ea30SMatthew Wilcox 
30658d6ea30SMatthew Wilcox 	/* Loading from any position returns the same value */
30758d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
30858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
30958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
31058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
31158d6ea30SMatthew Wilcox 	rcu_read_lock();
31258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
31358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
31458d6ea30SMatthew Wilcox 	rcu_read_unlock();
31558d6ea30SMatthew Wilcox 
31658d6ea30SMatthew Wilcox 	/* Storing adjacent to the value does not alter the value */
31758d6ea30SMatthew Wilcox 	xa_store(xa, 3, xa, GFP_KERNEL);
31858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
31958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
32058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
32158d6ea30SMatthew Wilcox 	rcu_read_lock();
32258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
32358d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
32458d6ea30SMatthew Wilcox 	rcu_read_unlock();
32558d6ea30SMatthew Wilcox 
32658d6ea30SMatthew Wilcox 	/* Overwriting multiple indexes works */
32758d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
32858d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
32958d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
33058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
33158d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
33258d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
33358d6ea30SMatthew Wilcox 	rcu_read_lock();
33458d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
33558d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
33658d6ea30SMatthew Wilcox 	rcu_read_unlock();
33758d6ea30SMatthew Wilcox 
33858d6ea30SMatthew Wilcox 	/* We can erase multiple values with a single store */
33958d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
34058d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
34158d6ea30SMatthew Wilcox 
34258d6ea30SMatthew Wilcox 	/* Even when the first slot is empty but the others aren't */
34358d6ea30SMatthew Wilcox 	xa_store_index(xa, 1, GFP_KERNEL);
34458d6ea30SMatthew Wilcox 	xa_store_index(xa, 2, GFP_KERNEL);
34558d6ea30SMatthew Wilcox 	xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
34658d6ea30SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
34758d6ea30SMatthew Wilcox 
34858d6ea30SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
34958d6ea30SMatthew Wilcox 		for (j = 0; j < max_order; j++) {
35058d6ea30SMatthew Wilcox 			xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
35158d6ea30SMatthew Wilcox 			xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
35258d6ea30SMatthew Wilcox 
35358d6ea30SMatthew Wilcox 			for (k = 0; k < max_order; k++) {
35458d6ea30SMatthew Wilcox 				void *entry = xa_load(xa, (1UL << k) - 1);
35558d6ea30SMatthew Wilcox 				if ((i < k) && (j < k))
35658d6ea30SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
35758d6ea30SMatthew Wilcox 				else
35858d6ea30SMatthew Wilcox 					XA_BUG_ON(xa, entry != xa_mk_value(j));
35958d6ea30SMatthew Wilcox 			}
36058d6ea30SMatthew Wilcox 
36158d6ea30SMatthew Wilcox 			xa_erase(xa, 0);
36258d6ea30SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
36358d6ea30SMatthew Wilcox 		}
36458d6ea30SMatthew Wilcox 	}
36558d6ea30SMatthew Wilcox #endif
36658d6ea30SMatthew Wilcox }
36758d6ea30SMatthew Wilcox 
368*4e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
369*4e99d4e9SMatthew Wilcox 			unsigned int order, unsigned int present)
370*4e99d4e9SMatthew Wilcox {
371*4e99d4e9SMatthew Wilcox 	XA_STATE_ORDER(xas, xa, start, order);
372*4e99d4e9SMatthew Wilcox 	void *entry;
373*4e99d4e9SMatthew Wilcox 	unsigned int count = 0;
374*4e99d4e9SMatthew Wilcox 
375*4e99d4e9SMatthew Wilcox retry:
376*4e99d4e9SMatthew Wilcox 	xas_lock(&xas);
377*4e99d4e9SMatthew Wilcox 	xas_for_each_conflict(&xas, entry) {
378*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_is_value(entry));
379*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, entry < xa_mk_value(start));
380*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
381*4e99d4e9SMatthew Wilcox 		count++;
382*4e99d4e9SMatthew Wilcox 	}
383*4e99d4e9SMatthew Wilcox 	xas_store(&xas, xa_mk_value(start));
384*4e99d4e9SMatthew Wilcox 	xas_unlock(&xas);
385*4e99d4e9SMatthew Wilcox 	if (xas_nomem(&xas, GFP_KERNEL)) {
386*4e99d4e9SMatthew Wilcox 		count = 0;
387*4e99d4e9SMatthew Wilcox 		goto retry;
388*4e99d4e9SMatthew Wilcox 	}
389*4e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xas_error(&xas));
390*4e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, count != present);
391*4e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
392*4e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
393*4e99d4e9SMatthew Wilcox 			xa_mk_value(start));
394*4e99d4e9SMatthew Wilcox 	xa_erase_index(xa, start);
395*4e99d4e9SMatthew Wilcox }
396*4e99d4e9SMatthew Wilcox 
397*4e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa)
398*4e99d4e9SMatthew Wilcox {
399*4e99d4e9SMatthew Wilcox 	unsigned int i, j;
400*4e99d4e9SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
401*4e99d4e9SMatthew Wilcox 
402*4e99d4e9SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
403*4e99d4e9SMatthew Wilcox 		unsigned int min = 1 << i;
404*4e99d4e9SMatthew Wilcox 		unsigned int max = (2 << i) - 1;
405*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, 0);
406*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
407*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 0);
408*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
409*4e99d4e9SMatthew Wilcox 
410*4e99d4e9SMatthew Wilcox 		xa_store_index(xa, min, GFP_KERNEL);
411*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
412*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
413*4e99d4e9SMatthew Wilcox 		xa_store_index(xa, max, GFP_KERNEL);
414*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, 1);
415*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
416*4e99d4e9SMatthew Wilcox 
417*4e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
418*4e99d4e9SMatthew Wilcox 			xa_store_index(xa, j, GFP_KERNEL);
419*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, 0, i, min);
420*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
421*4e99d4e9SMatthew Wilcox 		for (j = 0; j < min; j++)
422*4e99d4e9SMatthew Wilcox 			xa_store_index(xa, min + j, GFP_KERNEL);
423*4e99d4e9SMatthew Wilcox 		__check_store_iter(xa, min, i, min);
424*4e99d4e9SMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
425*4e99d4e9SMatthew Wilcox 	}
426*4e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
427*4e99d4e9SMatthew Wilcox 	xa_store_index(xa, 63, GFP_KERNEL);
428*4e99d4e9SMatthew Wilcox 	xa_store_index(xa, 65, GFP_KERNEL);
429*4e99d4e9SMatthew Wilcox 	__check_store_iter(xa, 64, 2, 1);
430*4e99d4e9SMatthew Wilcox 	xa_erase_index(xa, 63);
431*4e99d4e9SMatthew Wilcox #endif
432*4e99d4e9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
433*4e99d4e9SMatthew Wilcox }
434*4e99d4e9SMatthew Wilcox 
435b803b428SMatthew Wilcox static noinline void check_multi_find(struct xarray *xa)
436b803b428SMatthew Wilcox {
437b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
438b803b428SMatthew Wilcox 	unsigned long index;
439b803b428SMatthew Wilcox 
440b803b428SMatthew Wilcox 	xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
441b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
442b803b428SMatthew Wilcox 
443b803b428SMatthew Wilcox 	index = 0;
444b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
445b803b428SMatthew Wilcox 			xa_mk_value(12));
446b803b428SMatthew Wilcox 	XA_BUG_ON(xa, index != 12);
447b803b428SMatthew Wilcox 	index = 13;
448b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
449b803b428SMatthew Wilcox 			xa_mk_value(12));
450b803b428SMatthew Wilcox 	XA_BUG_ON(xa, (index < 12) || (index >= 16));
451b803b428SMatthew Wilcox 	XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
452b803b428SMatthew Wilcox 			xa_mk_value(16));
453b803b428SMatthew Wilcox 	XA_BUG_ON(xa, index != 16);
454b803b428SMatthew Wilcox 
455b803b428SMatthew Wilcox 	xa_erase_index(xa, 12);
456b803b428SMatthew Wilcox 	xa_erase_index(xa, 16);
457b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
458b803b428SMatthew Wilcox #endif
459b803b428SMatthew Wilcox }
460b803b428SMatthew Wilcox 
461b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa)
462b803b428SMatthew Wilcox {
463b803b428SMatthew Wilcox 	unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
464b803b428SMatthew Wilcox 	unsigned int i, j;
465b803b428SMatthew Wilcox 	void *entry;
466b803b428SMatthew Wilcox 
467b803b428SMatthew Wilcox 	for (i = 0; i < max_order; i++) {
468b803b428SMatthew Wilcox 		unsigned long index = 1UL << i;
469b803b428SMatthew Wilcox 		for (j = 0; j < index; j++) {
470b803b428SMatthew Wilcox 			XA_STATE(xas, xa, j + index);
471b803b428SMatthew Wilcox 			xa_store_index(xa, index - 1, GFP_KERNEL);
472b803b428SMatthew Wilcox 			xa_store_order(xa, index, i, xa_mk_value(index),
473b803b428SMatthew Wilcox 					GFP_KERNEL);
474b803b428SMatthew Wilcox 			rcu_read_lock();
475b803b428SMatthew Wilcox 			xas_for_each(&xas, entry, ULONG_MAX) {
476b803b428SMatthew Wilcox 				xa_erase_index(xa, index);
477b803b428SMatthew Wilcox 			}
478b803b428SMatthew Wilcox 			rcu_read_unlock();
479b803b428SMatthew Wilcox 			xa_erase_index(xa, index - 1);
480b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_empty(xa));
481b803b428SMatthew Wilcox 		}
482b803b428SMatthew Wilcox 	}
483b803b428SMatthew Wilcox }
484b803b428SMatthew Wilcox 
485b803b428SMatthew Wilcox static noinline void check_find(struct xarray *xa)
486b803b428SMatthew Wilcox {
487b803b428SMatthew Wilcox 	unsigned long i, j, k;
488b803b428SMatthew Wilcox 
489b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
490b803b428SMatthew Wilcox 
491b803b428SMatthew Wilcox 	/*
492b803b428SMatthew Wilcox 	 * Check xa_find with all pairs between 0 and 99 inclusive,
493b803b428SMatthew Wilcox 	 * starting at every index between 0 and 99
494b803b428SMatthew Wilcox 	 */
495b803b428SMatthew Wilcox 	for (i = 0; i < 100; i++) {
496b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
497b803b428SMatthew Wilcox 		xa_set_mark(xa, i, XA_MARK_0);
498b803b428SMatthew Wilcox 		for (j = 0; j < i; j++) {
499b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
500b803b428SMatthew Wilcox 					NULL);
501b803b428SMatthew Wilcox 			xa_set_mark(xa, j, XA_MARK_0);
502b803b428SMatthew Wilcox 			for (k = 0; k < 100; k++) {
503b803b428SMatthew Wilcox 				unsigned long index = k;
504b803b428SMatthew Wilcox 				void *entry = xa_find(xa, &index, ULONG_MAX,
505b803b428SMatthew Wilcox 								XA_PRESENT);
506b803b428SMatthew Wilcox 				if (k <= j)
507b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
508b803b428SMatthew Wilcox 				else if (k <= i)
509b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
510b803b428SMatthew Wilcox 				else
511b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
512b803b428SMatthew Wilcox 
513b803b428SMatthew Wilcox 				index = k;
514b803b428SMatthew Wilcox 				entry = xa_find(xa, &index, ULONG_MAX,
515b803b428SMatthew Wilcox 								XA_MARK_0);
516b803b428SMatthew Wilcox 				if (k <= j)
517b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != j);
518b803b428SMatthew Wilcox 				else if (k <= i)
519b803b428SMatthew Wilcox 					XA_BUG_ON(xa, index != i);
520b803b428SMatthew Wilcox 				else
521b803b428SMatthew Wilcox 					XA_BUG_ON(xa, entry != NULL);
522b803b428SMatthew Wilcox 			}
523b803b428SMatthew Wilcox 			xa_erase_index(xa, j);
524b803b428SMatthew Wilcox 			XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
525b803b428SMatthew Wilcox 			XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
526b803b428SMatthew Wilcox 		}
527b803b428SMatthew Wilcox 		xa_erase_index(xa, i);
528b803b428SMatthew Wilcox 		XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
529b803b428SMatthew Wilcox 	}
530b803b428SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
531b803b428SMatthew Wilcox 	check_multi_find(xa);
532b803b428SMatthew Wilcox 	check_multi_find_2(xa);
533b803b428SMatthew Wilcox }
534b803b428SMatthew Wilcox 
53564d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx)
53664d3e9a9SMatthew Wilcox {
53764d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, 0);
53864d3e9a9SMatthew Wilcox 	unsigned long i;
53964d3e9a9SMatthew Wilcox 
54064d3e9a9SMatthew Wilcox 	xa_store_index(xa, 0, GFP_KERNEL);
54164d3e9a9SMatthew Wilcox 	xa_store_index(xa, idx, GFP_KERNEL);
54264d3e9a9SMatthew Wilcox 
54364d3e9a9SMatthew Wilcox 	rcu_read_lock();
54464d3e9a9SMatthew Wilcox 	for (i = 0; i < idx * 4; i++) {
54564d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
54664d3e9a9SMatthew Wilcox 		if (i <= idx)
54764d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
54864d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
54964d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
55064d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_value(i));
55164d3e9a9SMatthew Wilcox 		else
55264d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
55364d3e9a9SMatthew Wilcox 	}
55464d3e9a9SMatthew Wilcox 	xas_next(&xas);
55564d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != i);
55664d3e9a9SMatthew Wilcox 
55764d3e9a9SMatthew Wilcox 	do {
55864d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
55964d3e9a9SMatthew Wilcox 		i--;
56064d3e9a9SMatthew Wilcox 		if (i <= idx)
56164d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
56264d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xas.xa_index != i);
56364d3e9a9SMatthew Wilcox 		if (i == 0 || i == idx)
56464d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_value(i));
56564d3e9a9SMatthew Wilcox 		else
56664d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
56764d3e9a9SMatthew Wilcox 	} while (i > 0);
56864d3e9a9SMatthew Wilcox 
56964d3e9a9SMatthew Wilcox 	xas_set(&xas, ULONG_MAX);
57064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != NULL);
57164d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
57264d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
57364d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != 0);
57464d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
57564d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
57664d3e9a9SMatthew Wilcox 	rcu_read_unlock();
57764d3e9a9SMatthew Wilcox 
57864d3e9a9SMatthew Wilcox 	xa_erase_index(xa, 0);
57964d3e9a9SMatthew Wilcox 	xa_erase_index(xa, idx);
58064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
58164d3e9a9SMatthew Wilcox }
58264d3e9a9SMatthew Wilcox 
58364d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa)
58464d3e9a9SMatthew Wilcox {
58564d3e9a9SMatthew Wilcox 	XA_STATE(xas, xa, (1 << 16) - 1);
58664d3e9a9SMatthew Wilcox 	unsigned long i;
58764d3e9a9SMatthew Wilcox 
58864d3e9a9SMatthew Wilcox 	for (i = 0; i < (1 << 16); i++)
58964d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
59064d3e9a9SMatthew Wilcox 
59164d3e9a9SMatthew Wilcox 	rcu_read_lock();
59264d3e9a9SMatthew Wilcox 	do {
59364d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
59464d3e9a9SMatthew Wilcox 		i--;
59564d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_value(i));
59664d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
59764d3e9a9SMatthew Wilcox 	} while (i != 0);
59864d3e9a9SMatthew Wilcox 
59964d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
60064d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
60164d3e9a9SMatthew Wilcox 
60264d3e9a9SMatthew Wilcox 	do {
60364d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
60464d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, entry != xa_mk_value(i));
60564d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
60664d3e9a9SMatthew Wilcox 		i++;
60764d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
60864d3e9a9SMatthew Wilcox 	rcu_read_unlock();
60964d3e9a9SMatthew Wilcox 
61064d3e9a9SMatthew Wilcox 	for (i = (1 << 8); i < (1 << 15); i++)
61164d3e9a9SMatthew Wilcox 		xa_erase_index(xa, i);
61264d3e9a9SMatthew Wilcox 
61364d3e9a9SMatthew Wilcox 	i = xas.xa_index;
61464d3e9a9SMatthew Wilcox 
61564d3e9a9SMatthew Wilcox 	rcu_read_lock();
61664d3e9a9SMatthew Wilcox 	do {
61764d3e9a9SMatthew Wilcox 		void *entry = xas_prev(&xas);
61864d3e9a9SMatthew Wilcox 		i--;
61964d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
62064d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_value(i));
62164d3e9a9SMatthew Wilcox 		else
62264d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
62364d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
62464d3e9a9SMatthew Wilcox 	} while (i != 0);
62564d3e9a9SMatthew Wilcox 
62664d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas_prev(&xas) != NULL);
62764d3e9a9SMatthew Wilcox 	XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
62864d3e9a9SMatthew Wilcox 
62964d3e9a9SMatthew Wilcox 	do {
63064d3e9a9SMatthew Wilcox 		void *entry = xas_next(&xas);
63164d3e9a9SMatthew Wilcox 		if ((i < (1 << 8)) || (i >= (1 << 15)))
63264d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != xa_mk_value(i));
63364d3e9a9SMatthew Wilcox 		else
63464d3e9a9SMatthew Wilcox 			XA_BUG_ON(xa, entry != NULL);
63564d3e9a9SMatthew Wilcox 		XA_BUG_ON(xa, i != xas.xa_index);
63664d3e9a9SMatthew Wilcox 		i++;
63764d3e9a9SMatthew Wilcox 	} while (i < (1 << 16));
63864d3e9a9SMatthew Wilcox 	rcu_read_unlock();
63964d3e9a9SMatthew Wilcox 
64064d3e9a9SMatthew Wilcox 	xa_destroy(xa);
64164d3e9a9SMatthew Wilcox 
64264d3e9a9SMatthew Wilcox 	for (i = 0; i < 16; i++)
64364d3e9a9SMatthew Wilcox 		check_move_small(xa, 1UL << i);
64464d3e9a9SMatthew Wilcox 
64564d3e9a9SMatthew Wilcox 	for (i = 2; i < 16; i++)
64664d3e9a9SMatthew Wilcox 		check_move_small(xa, (1UL << i) - 1);
64764d3e9a9SMatthew Wilcox }
64864d3e9a9SMatthew Wilcox 
649687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa)
650687149fcSMatthew Wilcox {
651687149fcSMatthew Wilcox 	unsigned long index;
652687149fcSMatthew Wilcox 
653687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
654687149fcSMatthew Wilcox 
655687149fcSMatthew Wilcox 	/* Destroying an empty array is a no-op */
656687149fcSMatthew Wilcox 	xa_destroy(xa);
657687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
658687149fcSMatthew Wilcox 
659687149fcSMatthew Wilcox 	/* Destroying an array with a single entry */
660687149fcSMatthew Wilcox 	for (index = 0; index < 1000; index++) {
661687149fcSMatthew Wilcox 		xa_store_index(xa, index, GFP_KERNEL);
662687149fcSMatthew Wilcox 		XA_BUG_ON(xa, xa_empty(xa));
663687149fcSMatthew Wilcox 		xa_destroy(xa);
664687149fcSMatthew Wilcox 		XA_BUG_ON(xa, !xa_empty(xa));
665687149fcSMatthew Wilcox 	}
666687149fcSMatthew Wilcox 
667687149fcSMatthew Wilcox 	/* Destroying an array with a single entry at ULONG_MAX */
668687149fcSMatthew Wilcox 	xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
669687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
670687149fcSMatthew Wilcox 	xa_destroy(xa);
671687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
672687149fcSMatthew Wilcox 
673687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI
674687149fcSMatthew Wilcox 	/* Destroying an array with a multi-index entry */
675687149fcSMatthew Wilcox 	xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
676687149fcSMatthew Wilcox 	XA_BUG_ON(xa, xa_empty(xa));
677687149fcSMatthew Wilcox 	xa_destroy(xa);
678687149fcSMatthew Wilcox 	XA_BUG_ON(xa, !xa_empty(xa));
679687149fcSMatthew Wilcox #endif
680687149fcSMatthew Wilcox }
681687149fcSMatthew Wilcox 
68258d6ea30SMatthew Wilcox static DEFINE_XARRAY(array);
683ad3d6c72SMatthew Wilcox 
684ad3d6c72SMatthew Wilcox static int xarray_checks(void)
685ad3d6c72SMatthew Wilcox {
68658d6ea30SMatthew Wilcox 	check_xa_err(&array);
687b803b428SMatthew Wilcox 	check_xas_retry(&array);
688ad3d6c72SMatthew Wilcox 	check_xa_load(&array);
6899b89a035SMatthew Wilcox 	check_xa_mark(&array);
69058d6ea30SMatthew Wilcox 	check_xa_shrink(&array);
691b803b428SMatthew Wilcox 	check_xas_erase(&array);
69241aec91fSMatthew Wilcox 	check_cmpxchg(&array);
69358d6ea30SMatthew Wilcox 	check_multi_store(&array);
694b803b428SMatthew Wilcox 	check_find(&array);
695687149fcSMatthew Wilcox 	check_destroy(&array);
69664d3e9a9SMatthew Wilcox 	check_move(&array);
697*4e99d4e9SMatthew Wilcox 	check_store_iter(&array);
698ad3d6c72SMatthew Wilcox 
699ad3d6c72SMatthew Wilcox 	printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
700ad3d6c72SMatthew Wilcox 	return (tests_run == tests_passed) ? 0 : -EINVAL;
701ad3d6c72SMatthew Wilcox }
702ad3d6c72SMatthew Wilcox 
703ad3d6c72SMatthew Wilcox static void xarray_exit(void)
704ad3d6c72SMatthew Wilcox {
705ad3d6c72SMatthew Wilcox }
706ad3d6c72SMatthew Wilcox 
707ad3d6c72SMatthew Wilcox module_init(xarray_checks);
708ad3d6c72SMatthew Wilcox module_exit(xarray_exit);
709ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <[email protected]>");
710ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL");
711