1ad3d6c72SMatthew Wilcox // SPDX-License-Identifier: GPL-2.0+ 2ad3d6c72SMatthew Wilcox /* 3ad3d6c72SMatthew Wilcox * test_xarray.c: Test the XArray API 4ad3d6c72SMatthew Wilcox * Copyright (c) 2017-2018 Microsoft Corporation 5ad3d6c72SMatthew Wilcox * Author: Matthew Wilcox <[email protected]> 6ad3d6c72SMatthew Wilcox */ 7ad3d6c72SMatthew Wilcox 8ad3d6c72SMatthew Wilcox #include <linux/xarray.h> 9ad3d6c72SMatthew Wilcox #include <linux/module.h> 10ad3d6c72SMatthew Wilcox 11ad3d6c72SMatthew Wilcox static unsigned int tests_run; 12ad3d6c72SMatthew Wilcox static unsigned int tests_passed; 13ad3d6c72SMatthew Wilcox 14ad3d6c72SMatthew Wilcox #ifndef XA_DEBUG 15ad3d6c72SMatthew Wilcox # ifdef __KERNEL__ 16ad3d6c72SMatthew Wilcox void xa_dump(const struct xarray *xa) { } 17ad3d6c72SMatthew Wilcox # endif 18ad3d6c72SMatthew Wilcox #undef XA_BUG_ON 19ad3d6c72SMatthew Wilcox #define XA_BUG_ON(xa, x) do { \ 20ad3d6c72SMatthew Wilcox tests_run++; \ 21ad3d6c72SMatthew Wilcox if (x) { \ 22ad3d6c72SMatthew Wilcox printk("BUG at %s:%d\n", __func__, __LINE__); \ 23ad3d6c72SMatthew Wilcox xa_dump(xa); \ 24ad3d6c72SMatthew Wilcox dump_stack(); \ 25ad3d6c72SMatthew Wilcox } else { \ 26ad3d6c72SMatthew Wilcox tests_passed++; \ 27ad3d6c72SMatthew Wilcox } \ 28ad3d6c72SMatthew Wilcox } while (0) 29ad3d6c72SMatthew Wilcox #endif 30ad3d6c72SMatthew Wilcox 31b7677a13SMatthew Wilcox static void *xa_mk_index(unsigned long index) 32b7677a13SMatthew Wilcox { 33b7677a13SMatthew Wilcox return xa_mk_value(index & LONG_MAX); 34b7677a13SMatthew Wilcox } 35b7677a13SMatthew Wilcox 36ad3d6c72SMatthew Wilcox static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 37ad3d6c72SMatthew Wilcox { 38b7677a13SMatthew Wilcox return xa_store(xa, index, xa_mk_index(index), gfp); 39ad3d6c72SMatthew Wilcox } 40ad3d6c72SMatthew Wilcox 41371c752dSMatthew Wilcox static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 42371c752dSMatthew Wilcox { 43a3e4d3f9SMatthew Wilcox u32 id; 44371c752dSMatthew Wilcox 45a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, 46371c752dSMatthew Wilcox gfp) != 0); 47371c752dSMatthew Wilcox XA_BUG_ON(xa, id != index); 48371c752dSMatthew Wilcox } 49371c752dSMatthew Wilcox 50ad3d6c72SMatthew Wilcox static void xa_erase_index(struct xarray *xa, unsigned long index) 51ad3d6c72SMatthew Wilcox { 52b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); 5358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, index) != NULL); 5458d6ea30SMatthew Wilcox } 5558d6ea30SMatthew Wilcox 5658d6ea30SMatthew Wilcox /* 5758d6ea30SMatthew Wilcox * If anyone needs this, please move it to xarray.c. We have no current 5858d6ea30SMatthew Wilcox * users outside the test suite because all current multislot users want 5958d6ea30SMatthew Wilcox * to use the advanced API. 6058d6ea30SMatthew Wilcox */ 6158d6ea30SMatthew Wilcox static void *xa_store_order(struct xarray *xa, unsigned long index, 6258d6ea30SMatthew Wilcox unsigned order, void *entry, gfp_t gfp) 6358d6ea30SMatthew Wilcox { 6458d6ea30SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 6558d6ea30SMatthew Wilcox void *curr; 6658d6ea30SMatthew Wilcox 6758d6ea30SMatthew Wilcox do { 6858d6ea30SMatthew Wilcox xas_lock(&xas); 6958d6ea30SMatthew Wilcox curr = xas_store(&xas, entry); 7058d6ea30SMatthew Wilcox xas_unlock(&xas); 7158d6ea30SMatthew Wilcox } while (xas_nomem(&xas, gfp)); 7258d6ea30SMatthew Wilcox 7358d6ea30SMatthew Wilcox return curr; 7458d6ea30SMatthew Wilcox } 7558d6ea30SMatthew Wilcox 7658d6ea30SMatthew Wilcox static noinline void check_xa_err(struct xarray *xa) 7758d6ea30SMatthew Wilcox { 7858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); 7958d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); 8058d6ea30SMatthew Wilcox #ifndef __KERNEL__ 8158d6ea30SMatthew Wilcox /* The kernel does not fail GFP_NOWAIT allocations */ 8258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 8358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM); 8458d6ea30SMatthew Wilcox #endif 8558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0); 8658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0); 8758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0); 8858d6ea30SMatthew Wilcox // kills the test-suite :-( 8958d6ea30SMatthew Wilcox // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 90ad3d6c72SMatthew Wilcox } 91ad3d6c72SMatthew Wilcox 92b803b428SMatthew Wilcox static noinline void check_xas_retry(struct xarray *xa) 93b803b428SMatthew Wilcox { 94b803b428SMatthew Wilcox XA_STATE(xas, xa, 0); 95b803b428SMatthew Wilcox void *entry; 96b803b428SMatthew Wilcox 97b803b428SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 98b803b428SMatthew Wilcox xa_store_index(xa, 1, GFP_KERNEL); 99b803b428SMatthew Wilcox 100b803b428SMatthew Wilcox rcu_read_lock(); 101b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); 102b803b428SMatthew Wilcox xa_erase_index(xa, 1); 103b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); 104b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_retry(&xas, NULL)); 105b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); 106b803b428SMatthew Wilcox xas_reset(&xas); 107b803b428SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != XAS_RESTART); 108b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 109b803b428SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != NULL); 110bd54211bSMatthew Wilcox rcu_read_unlock(); 111b803b428SMatthew Wilcox 112b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 113bd54211bSMatthew Wilcox 114bd54211bSMatthew Wilcox rcu_read_lock(); 115b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas))); 116b803b428SMatthew Wilcox xas.xa_node = XAS_RESTART; 117b803b428SMatthew Wilcox XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0)); 118b803b428SMatthew Wilcox rcu_read_unlock(); 119b803b428SMatthew Wilcox 120b803b428SMatthew Wilcox /* Make sure we can iterate through retry entries */ 121b803b428SMatthew Wilcox xas_lock(&xas); 122b803b428SMatthew Wilcox xas_set(&xas, 0); 123b803b428SMatthew Wilcox xas_store(&xas, XA_RETRY_ENTRY); 124b803b428SMatthew Wilcox xas_set(&xas, 1); 125b803b428SMatthew Wilcox xas_store(&xas, XA_RETRY_ENTRY); 126b803b428SMatthew Wilcox 127b803b428SMatthew Wilcox xas_set(&xas, 0); 128b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 129b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(xas.xa_index)); 130b803b428SMatthew Wilcox } 131b803b428SMatthew Wilcox xas_unlock(&xas); 132b803b428SMatthew Wilcox 133b803b428SMatthew Wilcox xa_erase_index(xa, 0); 134b803b428SMatthew Wilcox xa_erase_index(xa, 1); 135b803b428SMatthew Wilcox } 136b803b428SMatthew Wilcox 137ad3d6c72SMatthew Wilcox static noinline void check_xa_load(struct xarray *xa) 138ad3d6c72SMatthew Wilcox { 139ad3d6c72SMatthew Wilcox unsigned long i, j; 140ad3d6c72SMatthew Wilcox 141ad3d6c72SMatthew Wilcox for (i = 0; i < 1024; i++) { 142ad3d6c72SMatthew Wilcox for (j = 0; j < 1024; j++) { 143ad3d6c72SMatthew Wilcox void *entry = xa_load(xa, j); 144ad3d6c72SMatthew Wilcox if (j < i) 145ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_to_value(entry) != j); 146ad3d6c72SMatthew Wilcox else 147ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, entry); 148ad3d6c72SMatthew Wilcox } 149ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 150ad3d6c72SMatthew Wilcox } 151ad3d6c72SMatthew Wilcox 152ad3d6c72SMatthew Wilcox for (i = 0; i < 1024; i++) { 153ad3d6c72SMatthew Wilcox for (j = 0; j < 1024; j++) { 154ad3d6c72SMatthew Wilcox void *entry = xa_load(xa, j); 155ad3d6c72SMatthew Wilcox if (j >= i) 156ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, xa_to_value(entry) != j); 157ad3d6c72SMatthew Wilcox else 158ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, entry); 159ad3d6c72SMatthew Wilcox } 160ad3d6c72SMatthew Wilcox xa_erase_index(xa, i); 161ad3d6c72SMatthew Wilcox } 162ad3d6c72SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 163ad3d6c72SMatthew Wilcox } 164ad3d6c72SMatthew Wilcox 1659b89a035SMatthew Wilcox static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 1669b89a035SMatthew Wilcox { 16758d6ea30SMatthew Wilcox unsigned int order; 16858d6ea30SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; 16958d6ea30SMatthew Wilcox 1709b89a035SMatthew Wilcox /* NULL elements have no marks set */ 1719b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1729b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1739b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1749b89a035SMatthew Wilcox 1759b89a035SMatthew Wilcox /* Storing a pointer will not make a mark appear */ 1769b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL); 1779b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1789b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1799b89a035SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0)); 1809b89a035SMatthew Wilcox 1819b89a035SMatthew Wilcox /* Setting one mark will not set another mark */ 1829b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0)); 1839b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); 1849b89a035SMatthew Wilcox 1859b89a035SMatthew Wilcox /* Storing NULL clears marks, and they can't be set again */ 1869b89a035SMatthew Wilcox xa_erase_index(xa, index); 1879b89a035SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1889b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 1899b89a035SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 1909b89a035SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 19158d6ea30SMatthew Wilcox 19258d6ea30SMatthew Wilcox /* 19358d6ea30SMatthew Wilcox * Storing a multi-index entry over entries with marks gives the 19458d6ea30SMatthew Wilcox * entire entry the union of the marks 19558d6ea30SMatthew Wilcox */ 19658d6ea30SMatthew Wilcox BUG_ON((index % 4) != 0); 19758d6ea30SMatthew Wilcox for (order = 2; order < max_order; order++) { 19858d6ea30SMatthew Wilcox unsigned long base = round_down(index, 1UL << order); 19958d6ea30SMatthew Wilcox unsigned long next = base + (1UL << order); 20058d6ea30SMatthew Wilcox unsigned long i; 20158d6ea30SMatthew Wilcox 20258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL)); 20358d6ea30SMatthew Wilcox xa_set_mark(xa, index + 1, XA_MARK_0); 20458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 205d69d287aSMatthew Wilcox xa_set_mark(xa, index + 2, XA_MARK_2); 20658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 207b7677a13SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), 20858d6ea30SMatthew Wilcox GFP_KERNEL); 20958d6ea30SMatthew Wilcox for (i = base; i < next; i++) { 21093eb07f7SMatthew Wilcox XA_STATE(xas, xa, i); 21193eb07f7SMatthew Wilcox unsigned int seen = 0; 21293eb07f7SMatthew Wilcox void *entry; 21393eb07f7SMatthew Wilcox 21458d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 215d69d287aSMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_1)); 216d69d287aSMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_2)); 21793eb07f7SMatthew Wilcox 21893eb07f7SMatthew Wilcox /* We should see two elements in the array */ 219fffc9a26SMatthew Wilcox rcu_read_lock(); 22093eb07f7SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) 22193eb07f7SMatthew Wilcox seen++; 222fffc9a26SMatthew Wilcox rcu_read_unlock(); 22393eb07f7SMatthew Wilcox XA_BUG_ON(xa, seen != 2); 22493eb07f7SMatthew Wilcox 22593eb07f7SMatthew Wilcox /* One of which is marked */ 22693eb07f7SMatthew Wilcox xas_set(&xas, 0); 22793eb07f7SMatthew Wilcox seen = 0; 228fffc9a26SMatthew Wilcox rcu_read_lock(); 22993eb07f7SMatthew Wilcox xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 23093eb07f7SMatthew Wilcox seen++; 231fffc9a26SMatthew Wilcox rcu_read_unlock(); 23293eb07f7SMatthew Wilcox XA_BUG_ON(xa, seen != 1); 23358d6ea30SMatthew Wilcox } 23458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); 23558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); 23658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); 23758d6ea30SMatthew Wilcox xa_erase_index(xa, index); 23858d6ea30SMatthew Wilcox xa_erase_index(xa, next); 23958d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 24058d6ea30SMatthew Wilcox } 24158d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 2429b89a035SMatthew Wilcox } 2439b89a035SMatthew Wilcox 244adb9d9c4SMatthew Wilcox static noinline void check_xa_mark_2(struct xarray *xa) 245adb9d9c4SMatthew Wilcox { 246adb9d9c4SMatthew Wilcox XA_STATE(xas, xa, 0); 247adb9d9c4SMatthew Wilcox unsigned long index; 248adb9d9c4SMatthew Wilcox unsigned int count = 0; 249adb9d9c4SMatthew Wilcox void *entry; 250adb9d9c4SMatthew Wilcox 251adb9d9c4SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 252adb9d9c4SMatthew Wilcox xa_set_mark(xa, 0, XA_MARK_0); 253adb9d9c4SMatthew Wilcox xas_lock(&xas); 254adb9d9c4SMatthew Wilcox xas_load(&xas); 255adb9d9c4SMatthew Wilcox xas_init_marks(&xas); 256adb9d9c4SMatthew Wilcox xas_unlock(&xas); 257adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0); 258adb9d9c4SMatthew Wilcox 259adb9d9c4SMatthew Wilcox for (index = 3500; index < 4500; index++) { 260adb9d9c4SMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 261adb9d9c4SMatthew Wilcox xa_set_mark(xa, index, XA_MARK_0); 262adb9d9c4SMatthew Wilcox } 263adb9d9c4SMatthew Wilcox 264adb9d9c4SMatthew Wilcox xas_reset(&xas); 265adb9d9c4SMatthew Wilcox rcu_read_lock(); 266adb9d9c4SMatthew Wilcox xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0) 267adb9d9c4SMatthew Wilcox count++; 268adb9d9c4SMatthew Wilcox rcu_read_unlock(); 269adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, count != 1000); 270adb9d9c4SMatthew Wilcox 271adb9d9c4SMatthew Wilcox xas_lock(&xas); 272adb9d9c4SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 273adb9d9c4SMatthew Wilcox xas_init_marks(&xas); 274adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0)); 275adb9d9c4SMatthew Wilcox XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0)); 276adb9d9c4SMatthew Wilcox } 277adb9d9c4SMatthew Wilcox xas_unlock(&xas); 278adb9d9c4SMatthew Wilcox 279adb9d9c4SMatthew Wilcox xa_destroy(xa); 280adb9d9c4SMatthew Wilcox } 281adb9d9c4SMatthew Wilcox 2829b89a035SMatthew Wilcox static noinline void check_xa_mark(struct xarray *xa) 2839b89a035SMatthew Wilcox { 2849b89a035SMatthew Wilcox unsigned long index; 2859b89a035SMatthew Wilcox 2869b89a035SMatthew Wilcox for (index = 0; index < 16384; index += 4) 2879b89a035SMatthew Wilcox check_xa_mark_1(xa, index); 288adb9d9c4SMatthew Wilcox 289adb9d9c4SMatthew Wilcox check_xa_mark_2(xa); 2909b89a035SMatthew Wilcox } 2919b89a035SMatthew Wilcox 29258d6ea30SMatthew Wilcox static noinline void check_xa_shrink(struct xarray *xa) 29358d6ea30SMatthew Wilcox { 29458d6ea30SMatthew Wilcox XA_STATE(xas, xa, 1); 29558d6ea30SMatthew Wilcox struct xa_node *node; 29693eb07f7SMatthew Wilcox unsigned int order; 29793eb07f7SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1; 29858d6ea30SMatthew Wilcox 29958d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 30058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL); 30158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL); 30258d6ea30SMatthew Wilcox 30358d6ea30SMatthew Wilcox /* 30458d6ea30SMatthew Wilcox * Check that erasing the entry at 1 shrinks the tree and properly 30558d6ea30SMatthew Wilcox * marks the node as being deleted. 30658d6ea30SMatthew Wilcox */ 30758d6ea30SMatthew Wilcox xas_lock(&xas); 30858d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1)); 30958d6ea30SMatthew Wilcox node = xas.xa_node; 31058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0)); 31158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 31258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != NULL); 31358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS); 31458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY); 31558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xas_load(&xas) != NULL); 31658d6ea30SMatthew Wilcox xas_unlock(&xas); 31758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 31858d6ea30SMatthew Wilcox xa_erase_index(xa, 0); 31958d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 32093eb07f7SMatthew Wilcox 32193eb07f7SMatthew Wilcox for (order = 0; order < max_order; order++) { 32293eb07f7SMatthew Wilcox unsigned long max = (1UL << order) - 1; 32393eb07f7SMatthew Wilcox xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL); 32493eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0)); 32593eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 32693eb07f7SMatthew Wilcox rcu_read_lock(); 32793eb07f7SMatthew Wilcox node = xa_head(xa); 32893eb07f7SMatthew Wilcox rcu_read_unlock(); 32993eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) != 33093eb07f7SMatthew Wilcox NULL); 33193eb07f7SMatthew Wilcox rcu_read_lock(); 33293eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_head(xa) == node); 33393eb07f7SMatthew Wilcox rcu_read_unlock(); 33493eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); 33593eb07f7SMatthew Wilcox xa_erase_index(xa, ULONG_MAX); 33693eb07f7SMatthew Wilcox XA_BUG_ON(xa, xa->xa_head != node); 33793eb07f7SMatthew Wilcox xa_erase_index(xa, 0); 33893eb07f7SMatthew Wilcox } 33958d6ea30SMatthew Wilcox } 34058d6ea30SMatthew Wilcox 34141aec91fSMatthew Wilcox static noinline void check_cmpxchg(struct xarray *xa) 34241aec91fSMatthew Wilcox { 34341aec91fSMatthew Wilcox void *FIVE = xa_mk_value(5); 34441aec91fSMatthew Wilcox void *SIX = xa_mk_value(6); 34541aec91fSMatthew Wilcox void *LOTS = xa_mk_value(12345678); 34641aec91fSMatthew Wilcox 34741aec91fSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 34841aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL); 349fd9dc93eSMatthew Wilcox XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EBUSY); 35041aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS); 35141aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS); 35241aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE); 35341aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL); 35441aec91fSMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL); 35541aec91fSMatthew Wilcox xa_erase_index(xa, 12345678); 35641aec91fSMatthew Wilcox xa_erase_index(xa, 5); 35741aec91fSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 35841aec91fSMatthew Wilcox } 35941aec91fSMatthew Wilcox 3609f14d4f1SMatthew Wilcox static noinline void check_reserve(struct xarray *xa) 3619f14d4f1SMatthew Wilcox { 3629f14d4f1SMatthew Wilcox void *entry; 3634a31896cSMatthew Wilcox unsigned long index; 3649f14d4f1SMatthew Wilcox 3659f14d4f1SMatthew Wilcox /* An array with a reserved entry is not empty */ 3669f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3679f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3689f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 3699f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 12345678)); 3709f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3719f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3729f14d4f1SMatthew Wilcox 3739f14d4f1SMatthew Wilcox /* Releasing a used entry does nothing */ 3749f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3759f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); 3769f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3779f14d4f1SMatthew Wilcox xa_erase_index(xa, 12345678); 3789f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3799f14d4f1SMatthew Wilcox 3809f14d4f1SMatthew Wilcox /* cmpxchg sees a reserved entry as NULL */ 3819f14d4f1SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 3829f14d4f1SMatthew Wilcox XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678), 3839f14d4f1SMatthew Wilcox GFP_NOWAIT) != NULL); 3849f14d4f1SMatthew Wilcox xa_release(xa, 12345678); 3859f14d4f1SMatthew Wilcox xa_erase_index(xa, 12345678); 3869f14d4f1SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3879f14d4f1SMatthew Wilcox 388b0606fedSMatthew Wilcox /* But xa_insert does not */ 3894c0608f4SMatthew Wilcox xa_reserve(xa, 12345678, GFP_KERNEL); 390b0606fedSMatthew Wilcox XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 391fd9dc93eSMatthew Wilcox -EBUSY); 392b0606fedSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 393b0606fedSMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, 12345678) != NULL); 3944c0608f4SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 3954c0608f4SMatthew Wilcox 3969f14d4f1SMatthew Wilcox /* Can iterate through a reserved entry */ 3979f14d4f1SMatthew Wilcox xa_store_index(xa, 5, GFP_KERNEL); 3989f14d4f1SMatthew Wilcox xa_reserve(xa, 6, GFP_KERNEL); 3999f14d4f1SMatthew Wilcox xa_store_index(xa, 7, GFP_KERNEL); 4009f14d4f1SMatthew Wilcox 4014a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 4029f14d4f1SMatthew Wilcox XA_BUG_ON(xa, index != 5 && index != 7); 4039f14d4f1SMatthew Wilcox } 4049f14d4f1SMatthew Wilcox xa_destroy(xa); 4059f14d4f1SMatthew Wilcox } 4069f14d4f1SMatthew Wilcox 407b803b428SMatthew Wilcox static noinline void check_xas_erase(struct xarray *xa) 408b803b428SMatthew Wilcox { 409b803b428SMatthew Wilcox XA_STATE(xas, xa, 0); 410b803b428SMatthew Wilcox void *entry; 411b803b428SMatthew Wilcox unsigned long i, j; 412b803b428SMatthew Wilcox 413b803b428SMatthew Wilcox for (i = 0; i < 200; i++) { 414b803b428SMatthew Wilcox for (j = i; j < 2 * i + 17; j++) { 415b803b428SMatthew Wilcox xas_set(&xas, j); 416b803b428SMatthew Wilcox do { 417b803b428SMatthew Wilcox xas_lock(&xas); 418b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(j)); 419b803b428SMatthew Wilcox xas_unlock(&xas); 420b803b428SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 421b803b428SMatthew Wilcox } 422b803b428SMatthew Wilcox 423b803b428SMatthew Wilcox xas_set(&xas, ULONG_MAX); 424b803b428SMatthew Wilcox do { 425b803b428SMatthew Wilcox xas_lock(&xas); 426b803b428SMatthew Wilcox xas_store(&xas, xa_mk_value(0)); 427b803b428SMatthew Wilcox xas_unlock(&xas); 428b803b428SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 429b803b428SMatthew Wilcox 430b803b428SMatthew Wilcox xas_lock(&xas); 431b803b428SMatthew Wilcox xas_store(&xas, NULL); 432b803b428SMatthew Wilcox 433b803b428SMatthew Wilcox xas_set(&xas, 0); 434b803b428SMatthew Wilcox j = i; 435b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 436b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(j)); 437b803b428SMatthew Wilcox xas_store(&xas, NULL); 438b803b428SMatthew Wilcox j++; 439b803b428SMatthew Wilcox } 440b803b428SMatthew Wilcox xas_unlock(&xas); 441b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 442b803b428SMatthew Wilcox } 443b803b428SMatthew Wilcox } 444b803b428SMatthew Wilcox 4454f06d630SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 4464f06d630SMatthew Wilcox static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, 4474f06d630SMatthew Wilcox unsigned int order) 4484f06d630SMatthew Wilcox { 4494f06d630SMatthew Wilcox XA_STATE(xas, xa, index); 4504f06d630SMatthew Wilcox unsigned long min = index & ~((1UL << order) - 1); 4514f06d630SMatthew Wilcox unsigned long max = min + (1UL << order); 4524f06d630SMatthew Wilcox 453b7677a13SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 454b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); 455b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); 4564f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != NULL); 4574f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 4584f06d630SMatthew Wilcox 459fffc9a26SMatthew Wilcox xas_lock(&xas); 460b7677a13SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); 461fffc9a26SMatthew Wilcox xas_unlock(&xas); 462b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); 463b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); 4644f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, max) != NULL); 4654f06d630SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 4664f06d630SMatthew Wilcox 4674f06d630SMatthew Wilcox xa_erase_index(xa, min); 4684f06d630SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 4694f06d630SMatthew Wilcox } 4704f06d630SMatthew Wilcox 4714f06d630SMatthew Wilcox static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, 4724f06d630SMatthew Wilcox unsigned int order) 4734f06d630SMatthew Wilcox { 4744f06d630SMatthew Wilcox XA_STATE(xas, xa, index); 4754f06d630SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); 4764f06d630SMatthew Wilcox 477fffc9a26SMatthew Wilcox xas_lock(&xas); 4784f06d630SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0)); 4794f06d630SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != index); 4804f06d630SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1)); 481fffc9a26SMatthew Wilcox xas_unlock(&xas); 4824f06d630SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 4834f06d630SMatthew Wilcox } 4844f145cd6SMatthew Wilcox 4854f145cd6SMatthew Wilcox static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, 4864f145cd6SMatthew Wilcox unsigned int order) 4874f145cd6SMatthew Wilcox { 4884f145cd6SMatthew Wilcox XA_STATE(xas, xa, 0); 4894f145cd6SMatthew Wilcox void *entry; 4904f145cd6SMatthew Wilcox int n = 0; 4914f145cd6SMatthew Wilcox 4924f145cd6SMatthew Wilcox xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); 4934f145cd6SMatthew Wilcox 4944f145cd6SMatthew Wilcox xas_lock(&xas); 4954f145cd6SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 4964f145cd6SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(index)); 4974f145cd6SMatthew Wilcox n++; 4984f145cd6SMatthew Wilcox } 4994f145cd6SMatthew Wilcox XA_BUG_ON(xa, n != 1); 5004f145cd6SMatthew Wilcox xas_set(&xas, index + 1); 5014f145cd6SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 5024f145cd6SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(index)); 5034f145cd6SMatthew Wilcox n++; 5044f145cd6SMatthew Wilcox } 5054f145cd6SMatthew Wilcox XA_BUG_ON(xa, n != 2); 5064f145cd6SMatthew Wilcox xas_unlock(&xas); 5074f145cd6SMatthew Wilcox 5084f145cd6SMatthew Wilcox xa_destroy(xa); 5094f145cd6SMatthew Wilcox } 5104f06d630SMatthew Wilcox #endif 5114f06d630SMatthew Wilcox 51258d6ea30SMatthew Wilcox static noinline void check_multi_store(struct xarray *xa) 51358d6ea30SMatthew Wilcox { 51458d6ea30SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 51558d6ea30SMatthew Wilcox unsigned long i, j, k; 51658d6ea30SMatthew Wilcox unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; 51758d6ea30SMatthew Wilcox 51858d6ea30SMatthew Wilcox /* Loading from any position returns the same value */ 51958d6ea30SMatthew Wilcox xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL); 52058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 52158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 52258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 52358d6ea30SMatthew Wilcox rcu_read_lock(); 52458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2); 52558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 52658d6ea30SMatthew Wilcox rcu_read_unlock(); 52758d6ea30SMatthew Wilcox 52858d6ea30SMatthew Wilcox /* Storing adjacent to the value does not alter the value */ 52958d6ea30SMatthew Wilcox xa_store(xa, 3, xa, GFP_KERNEL); 53058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); 53158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0)); 53258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != NULL); 53358d6ea30SMatthew Wilcox rcu_read_lock(); 53458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3); 53558d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2); 53658d6ea30SMatthew Wilcox rcu_read_unlock(); 53758d6ea30SMatthew Wilcox 53858d6ea30SMatthew Wilcox /* Overwriting multiple indexes works */ 53958d6ea30SMatthew Wilcox xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL); 54058d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1)); 54158d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1)); 54258d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1)); 54358d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1)); 54458d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, 4) != NULL); 54558d6ea30SMatthew Wilcox rcu_read_lock(); 54658d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4); 54758d6ea30SMatthew Wilcox XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4); 54858d6ea30SMatthew Wilcox rcu_read_unlock(); 54958d6ea30SMatthew Wilcox 55058d6ea30SMatthew Wilcox /* We can erase multiple values with a single store */ 5515404a7f1SMatthew Wilcox xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL); 55258d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 55358d6ea30SMatthew Wilcox 55458d6ea30SMatthew Wilcox /* Even when the first slot is empty but the others aren't */ 55558d6ea30SMatthew Wilcox xa_store_index(xa, 1, GFP_KERNEL); 55658d6ea30SMatthew Wilcox xa_store_index(xa, 2, GFP_KERNEL); 55758d6ea30SMatthew Wilcox xa_store_order(xa, 0, 2, NULL, GFP_KERNEL); 55858d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 55958d6ea30SMatthew Wilcox 56058d6ea30SMatthew Wilcox for (i = 0; i < max_order; i++) { 56158d6ea30SMatthew Wilcox for (j = 0; j < max_order; j++) { 562b7677a13SMatthew Wilcox xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); 563b7677a13SMatthew Wilcox xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); 56458d6ea30SMatthew Wilcox 56558d6ea30SMatthew Wilcox for (k = 0; k < max_order; k++) { 56658d6ea30SMatthew Wilcox void *entry = xa_load(xa, (1UL << k) - 1); 56758d6ea30SMatthew Wilcox if ((i < k) && (j < k)) 56858d6ea30SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 56958d6ea30SMatthew Wilcox else 570b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(j)); 57158d6ea30SMatthew Wilcox } 57258d6ea30SMatthew Wilcox 57358d6ea30SMatthew Wilcox xa_erase(xa, 0); 57458d6ea30SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 57558d6ea30SMatthew Wilcox } 57658d6ea30SMatthew Wilcox } 5774f06d630SMatthew Wilcox 5784f06d630SMatthew Wilcox for (i = 0; i < 20; i++) { 5794f06d630SMatthew Wilcox check_multi_store_1(xa, 200, i); 5804f06d630SMatthew Wilcox check_multi_store_1(xa, 0, i); 5814f06d630SMatthew Wilcox check_multi_store_1(xa, (1UL << i) + 1, i); 5824f06d630SMatthew Wilcox } 5834f06d630SMatthew Wilcox check_multi_store_2(xa, 4095, 9); 5844f145cd6SMatthew Wilcox 5854f145cd6SMatthew Wilcox for (i = 1; i < 20; i++) { 5864f145cd6SMatthew Wilcox check_multi_store_3(xa, 0, i); 5874f145cd6SMatthew Wilcox check_multi_store_3(xa, 1UL << i, i); 5884f145cd6SMatthew Wilcox } 58958d6ea30SMatthew Wilcox #endif 59058d6ea30SMatthew Wilcox } 59158d6ea30SMatthew Wilcox 5923ccaf57aSMatthew Wilcox static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) 593371c752dSMatthew Wilcox { 594371c752dSMatthew Wilcox int i; 595371c752dSMatthew Wilcox u32 id; 596371c752dSMatthew Wilcox 5973ccaf57aSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 5983ccaf57aSMatthew Wilcox /* An empty array should assign %base to the first alloc */ 5993ccaf57aSMatthew Wilcox xa_alloc_index(xa, base, GFP_KERNEL); 600371c752dSMatthew Wilcox 601371c752dSMatthew Wilcox /* Erasing it should make the array empty again */ 6023ccaf57aSMatthew Wilcox xa_erase_index(xa, base); 6033ccaf57aSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 604371c752dSMatthew Wilcox 6053ccaf57aSMatthew Wilcox /* And it should assign %base again */ 6063ccaf57aSMatthew Wilcox xa_alloc_index(xa, base, GFP_KERNEL); 607371c752dSMatthew Wilcox 6083ccaf57aSMatthew Wilcox /* Allocating and then erasing a lot should not lose base */ 6093ccaf57aSMatthew Wilcox for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) 6103ccaf57aSMatthew Wilcox xa_alloc_index(xa, i, GFP_KERNEL); 6113ccaf57aSMatthew Wilcox for (i = base; i < 2 * XA_CHUNK_SIZE; i++) 6123ccaf57aSMatthew Wilcox xa_erase_index(xa, i); 6133ccaf57aSMatthew Wilcox xa_alloc_index(xa, base, GFP_KERNEL); 6143ccaf57aSMatthew Wilcox 6153ccaf57aSMatthew Wilcox /* Destroying the array should do the same as erasing */ 6163ccaf57aSMatthew Wilcox xa_destroy(xa); 6173ccaf57aSMatthew Wilcox 6183ccaf57aSMatthew Wilcox /* And it should assign %base again */ 6193ccaf57aSMatthew Wilcox xa_alloc_index(xa, base, GFP_KERNEL); 6203ccaf57aSMatthew Wilcox 6213ccaf57aSMatthew Wilcox /* The next assigned ID should be base+1 */ 6223ccaf57aSMatthew Wilcox xa_alloc_index(xa, base + 1, GFP_KERNEL); 6233ccaf57aSMatthew Wilcox xa_erase_index(xa, base + 1); 624371c752dSMatthew Wilcox 625371c752dSMatthew Wilcox /* Storing a value should mark it used */ 6263ccaf57aSMatthew Wilcox xa_store_index(xa, base + 1, GFP_KERNEL); 6273ccaf57aSMatthew Wilcox xa_alloc_index(xa, base + 2, GFP_KERNEL); 628371c752dSMatthew Wilcox 6293ccaf57aSMatthew Wilcox /* If we then erase base, it should be free */ 6303ccaf57aSMatthew Wilcox xa_erase_index(xa, base); 6313ccaf57aSMatthew Wilcox xa_alloc_index(xa, base, GFP_KERNEL); 632371c752dSMatthew Wilcox 6333ccaf57aSMatthew Wilcox xa_erase_index(xa, base + 1); 6343ccaf57aSMatthew Wilcox xa_erase_index(xa, base + 2); 635371c752dSMatthew Wilcox 636371c752dSMatthew Wilcox for (i = 1; i < 5000; i++) { 6373ccaf57aSMatthew Wilcox xa_alloc_index(xa, base + i, GFP_KERNEL); 638371c752dSMatthew Wilcox } 639371c752dSMatthew Wilcox 6403ccaf57aSMatthew Wilcox xa_destroy(xa); 641371c752dSMatthew Wilcox 6423ccaf57aSMatthew Wilcox /* Check that we fail properly at the limit of allocation */ 643a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), 644a3e4d3f9SMatthew Wilcox XA_LIMIT(UINT_MAX - 1, UINT_MAX), 645371c752dSMatthew Wilcox GFP_KERNEL) != 0); 6463ccaf57aSMatthew Wilcox XA_BUG_ON(xa, id != 0xfffffffeU); 647a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), 648a3e4d3f9SMatthew Wilcox XA_LIMIT(UINT_MAX - 1, UINT_MAX), 649371c752dSMatthew Wilcox GFP_KERNEL) != 0); 6503ccaf57aSMatthew Wilcox XA_BUG_ON(xa, id != 0xffffffffU); 651a3e4d3f9SMatthew Wilcox id = 3; 652a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), 653a3e4d3f9SMatthew Wilcox XA_LIMIT(UINT_MAX - 1, UINT_MAX), 654a3e4d3f9SMatthew Wilcox GFP_KERNEL) != -EBUSY); 655a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, id != 3); 6563ccaf57aSMatthew Wilcox xa_destroy(xa); 65748483614SMatthew Wilcox 658a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 659a3e4d3f9SMatthew Wilcox GFP_KERNEL) != -EBUSY); 6603ccaf57aSMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); 661a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), 662a3e4d3f9SMatthew Wilcox GFP_KERNEL) != -EBUSY); 6633ccaf57aSMatthew Wilcox xa_erase_index(xa, 3); 6643ccaf57aSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 6653ccaf57aSMatthew Wilcox } 6663ccaf57aSMatthew Wilcox 667a3e4d3f9SMatthew Wilcox static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) 668a3e4d3f9SMatthew Wilcox { 669a3e4d3f9SMatthew Wilcox unsigned int i, id; 670a3e4d3f9SMatthew Wilcox unsigned long index; 671a3e4d3f9SMatthew Wilcox void *entry; 672a3e4d3f9SMatthew Wilcox 673a3e4d3f9SMatthew Wilcox /* Allocate and free a NULL and check xa_empty() behaves */ 674a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 675a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 676a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, id != base); 677a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 678a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, id) != NULL); 679a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 680a3e4d3f9SMatthew Wilcox 681a3e4d3f9SMatthew Wilcox /* Ditto, but check destroy instead of erase */ 682a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 683a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 684a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, id != base); 685a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 686a3e4d3f9SMatthew Wilcox xa_destroy(xa); 687a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 688a3e4d3f9SMatthew Wilcox 689a3e4d3f9SMatthew Wilcox for (i = base; i < base + 10; i++) { 690a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, 691a3e4d3f9SMatthew Wilcox GFP_KERNEL) != 0); 692a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, id != i); 693a3e4d3f9SMatthew Wilcox } 694a3e4d3f9SMatthew Wilcox 695a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); 696a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); 697a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); 698a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); 699a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); 700a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, id != 5); 701a3e4d3f9SMatthew Wilcox 702a3e4d3f9SMatthew Wilcox xa_for_each(xa, index, entry) { 703a3e4d3f9SMatthew Wilcox xa_erase_index(xa, index); 704a3e4d3f9SMatthew Wilcox } 705a3e4d3f9SMatthew Wilcox 706a3e4d3f9SMatthew Wilcox for (i = base; i < base + 9; i++) { 707a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, i) != NULL); 708a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 709a3e4d3f9SMatthew Wilcox } 710a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); 711a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 712a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); 713a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 714a3e4d3f9SMatthew Wilcox 715a3e4d3f9SMatthew Wilcox xa_destroy(xa); 716a3e4d3f9SMatthew Wilcox } 717a3e4d3f9SMatthew Wilcox 718*2fa044e5SMatthew Wilcox static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) 719*2fa044e5SMatthew Wilcox { 720*2fa044e5SMatthew Wilcox struct xa_limit limit = XA_LIMIT(1, 0x3fff); 721*2fa044e5SMatthew Wilcox u32 next = 0; 722*2fa044e5SMatthew Wilcox unsigned int i, id; 723*2fa044e5SMatthew Wilcox unsigned long index; 724*2fa044e5SMatthew Wilcox void *entry; 725*2fa044e5SMatthew Wilcox 726*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, 727*2fa044e5SMatthew Wilcox &next, GFP_KERNEL) != 0); 728*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, id != 1); 729*2fa044e5SMatthew Wilcox 730*2fa044e5SMatthew Wilcox next = 0x3ffd; 731*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, 732*2fa044e5SMatthew Wilcox &next, GFP_KERNEL) != 0); 733*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, id != 0x3ffd); 734*2fa044e5SMatthew Wilcox xa_erase_index(xa, 0x3ffd); 735*2fa044e5SMatthew Wilcox xa_erase_index(xa, 1); 736*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 737*2fa044e5SMatthew Wilcox 738*2fa044e5SMatthew Wilcox for (i = 0x3ffe; i < 0x4003; i++) { 739*2fa044e5SMatthew Wilcox if (i < 0x4000) 740*2fa044e5SMatthew Wilcox entry = xa_mk_index(i); 741*2fa044e5SMatthew Wilcox else 742*2fa044e5SMatthew Wilcox entry = xa_mk_index(i - 0x3fff); 743*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, 744*2fa044e5SMatthew Wilcox &next, GFP_KERNEL) != (id == 1)); 745*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_mk_index(id) != entry); 746*2fa044e5SMatthew Wilcox } 747*2fa044e5SMatthew Wilcox 748*2fa044e5SMatthew Wilcox /* Check wrap-around is handled correctly */ 749*2fa044e5SMatthew Wilcox if (base != 0) 750*2fa044e5SMatthew Wilcox xa_erase_index(xa, base); 751*2fa044e5SMatthew Wilcox xa_erase_index(xa, base + 1); 752*2fa044e5SMatthew Wilcox next = UINT_MAX; 753*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), 754*2fa044e5SMatthew Wilcox xa_limit_32b, &next, GFP_KERNEL) != 0); 755*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, id != UINT_MAX); 756*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), 757*2fa044e5SMatthew Wilcox xa_limit_32b, &next, GFP_KERNEL) != 1); 758*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, id != base); 759*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), 760*2fa044e5SMatthew Wilcox xa_limit_32b, &next, GFP_KERNEL) != 0); 761*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, id != base + 1); 762*2fa044e5SMatthew Wilcox 763*2fa044e5SMatthew Wilcox xa_for_each(xa, index, entry) 764*2fa044e5SMatthew Wilcox xa_erase_index(xa, index); 765*2fa044e5SMatthew Wilcox 766*2fa044e5SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 767*2fa044e5SMatthew Wilcox } 768*2fa044e5SMatthew Wilcox 7693ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC(xa0); 7703ccaf57aSMatthew Wilcox static DEFINE_XARRAY_ALLOC1(xa1); 7713ccaf57aSMatthew Wilcox 7723ccaf57aSMatthew Wilcox static noinline void check_xa_alloc(void) 7733ccaf57aSMatthew Wilcox { 7743ccaf57aSMatthew Wilcox check_xa_alloc_1(&xa0, 0); 7753ccaf57aSMatthew Wilcox check_xa_alloc_1(&xa1, 1); 776a3e4d3f9SMatthew Wilcox check_xa_alloc_2(&xa0, 0); 777a3e4d3f9SMatthew Wilcox check_xa_alloc_2(&xa1, 1); 778*2fa044e5SMatthew Wilcox check_xa_alloc_3(&xa0, 0); 779*2fa044e5SMatthew Wilcox check_xa_alloc_3(&xa1, 1); 780371c752dSMatthew Wilcox } 781371c752dSMatthew Wilcox 7824e99d4e9SMatthew Wilcox static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 7834e99d4e9SMatthew Wilcox unsigned int order, unsigned int present) 7844e99d4e9SMatthew Wilcox { 7854e99d4e9SMatthew Wilcox XA_STATE_ORDER(xas, xa, start, order); 7864e99d4e9SMatthew Wilcox void *entry; 7874e99d4e9SMatthew Wilcox unsigned int count = 0; 7884e99d4e9SMatthew Wilcox 7894e99d4e9SMatthew Wilcox retry: 7904e99d4e9SMatthew Wilcox xas_lock(&xas); 7914e99d4e9SMatthew Wilcox xas_for_each_conflict(&xas, entry) { 7924e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_is_value(entry)); 793b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry < xa_mk_index(start)); 794b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); 7954e99d4e9SMatthew Wilcox count++; 7964e99d4e9SMatthew Wilcox } 797b7677a13SMatthew Wilcox xas_store(&xas, xa_mk_index(start)); 7984e99d4e9SMatthew Wilcox xas_unlock(&xas); 7994e99d4e9SMatthew Wilcox if (xas_nomem(&xas, GFP_KERNEL)) { 8004e99d4e9SMatthew Wilcox count = 0; 8014e99d4e9SMatthew Wilcox goto retry; 8024e99d4e9SMatthew Wilcox } 8034e99d4e9SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 8044e99d4e9SMatthew Wilcox XA_BUG_ON(xa, count != present); 805b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); 8064e99d4e9SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 807b7677a13SMatthew Wilcox xa_mk_index(start)); 8084e99d4e9SMatthew Wilcox xa_erase_index(xa, start); 8094e99d4e9SMatthew Wilcox } 8104e99d4e9SMatthew Wilcox 8114e99d4e9SMatthew Wilcox static noinline void check_store_iter(struct xarray *xa) 8124e99d4e9SMatthew Wilcox { 8134e99d4e9SMatthew Wilcox unsigned int i, j; 8144e99d4e9SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; 8154e99d4e9SMatthew Wilcox 8164e99d4e9SMatthew Wilcox for (i = 0; i < max_order; i++) { 8174e99d4e9SMatthew Wilcox unsigned int min = 1 << i; 8184e99d4e9SMatthew Wilcox unsigned int max = (2 << i) - 1; 8194e99d4e9SMatthew Wilcox __check_store_iter(xa, 0, i, 0); 8204e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8214e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 0); 8224e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8234e99d4e9SMatthew Wilcox 8244e99d4e9SMatthew Wilcox xa_store_index(xa, min, GFP_KERNEL); 8254e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 1); 8264e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8274e99d4e9SMatthew Wilcox xa_store_index(xa, max, GFP_KERNEL); 8284e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, 1); 8294e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8304e99d4e9SMatthew Wilcox 8314e99d4e9SMatthew Wilcox for (j = 0; j < min; j++) 8324e99d4e9SMatthew Wilcox xa_store_index(xa, j, GFP_KERNEL); 8334e99d4e9SMatthew Wilcox __check_store_iter(xa, 0, i, min); 8344e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8354e99d4e9SMatthew Wilcox for (j = 0; j < min; j++) 8364e99d4e9SMatthew Wilcox xa_store_index(xa, min + j, GFP_KERNEL); 8374e99d4e9SMatthew Wilcox __check_store_iter(xa, min, i, min); 8384e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8394e99d4e9SMatthew Wilcox } 8404e99d4e9SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 8414e99d4e9SMatthew Wilcox xa_store_index(xa, 63, GFP_KERNEL); 8424e99d4e9SMatthew Wilcox xa_store_index(xa, 65, GFP_KERNEL); 8434e99d4e9SMatthew Wilcox __check_store_iter(xa, 64, 2, 1); 8444e99d4e9SMatthew Wilcox xa_erase_index(xa, 63); 8454e99d4e9SMatthew Wilcox #endif 8464e99d4e9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 8474e99d4e9SMatthew Wilcox } 8484e99d4e9SMatthew Wilcox 849b803b428SMatthew Wilcox static noinline void check_multi_find(struct xarray *xa) 850b803b428SMatthew Wilcox { 851b803b428SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 852b803b428SMatthew Wilcox unsigned long index; 853b803b428SMatthew Wilcox 854b803b428SMatthew Wilcox xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL); 855b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL); 856b803b428SMatthew Wilcox 857b803b428SMatthew Wilcox index = 0; 858b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 859b803b428SMatthew Wilcox xa_mk_value(12)); 860b803b428SMatthew Wilcox XA_BUG_ON(xa, index != 12); 861b803b428SMatthew Wilcox index = 13; 862b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) != 863b803b428SMatthew Wilcox xa_mk_value(12)); 864b803b428SMatthew Wilcox XA_BUG_ON(xa, (index < 12) || (index >= 16)); 865b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) != 866b803b428SMatthew Wilcox xa_mk_value(16)); 867b803b428SMatthew Wilcox XA_BUG_ON(xa, index != 16); 868b803b428SMatthew Wilcox 869b803b428SMatthew Wilcox xa_erase_index(xa, 12); 870b803b428SMatthew Wilcox xa_erase_index(xa, 16); 871b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 872b803b428SMatthew Wilcox #endif 873b803b428SMatthew Wilcox } 874b803b428SMatthew Wilcox 875b803b428SMatthew Wilcox static noinline void check_multi_find_2(struct xarray *xa) 876b803b428SMatthew Wilcox { 877b803b428SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; 878b803b428SMatthew Wilcox unsigned int i, j; 879b803b428SMatthew Wilcox void *entry; 880b803b428SMatthew Wilcox 881b803b428SMatthew Wilcox for (i = 0; i < max_order; i++) { 882b803b428SMatthew Wilcox unsigned long index = 1UL << i; 883b803b428SMatthew Wilcox for (j = 0; j < index; j++) { 884b803b428SMatthew Wilcox XA_STATE(xas, xa, j + index); 885b803b428SMatthew Wilcox xa_store_index(xa, index - 1, GFP_KERNEL); 886b7677a13SMatthew Wilcox xa_store_order(xa, index, i, xa_mk_index(index), 887b803b428SMatthew Wilcox GFP_KERNEL); 888b803b428SMatthew Wilcox rcu_read_lock(); 889b803b428SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 890b803b428SMatthew Wilcox xa_erase_index(xa, index); 891b803b428SMatthew Wilcox } 892b803b428SMatthew Wilcox rcu_read_unlock(); 893b803b428SMatthew Wilcox xa_erase_index(xa, index - 1); 894b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 895b803b428SMatthew Wilcox } 896b803b428SMatthew Wilcox } 897b803b428SMatthew Wilcox } 898b803b428SMatthew Wilcox 8998229706eSMatthew Wilcox static noinline void check_find_1(struct xarray *xa) 900b803b428SMatthew Wilcox { 901b803b428SMatthew Wilcox unsigned long i, j, k; 902b803b428SMatthew Wilcox 903b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 904b803b428SMatthew Wilcox 905b803b428SMatthew Wilcox /* 906b803b428SMatthew Wilcox * Check xa_find with all pairs between 0 and 99 inclusive, 907b803b428SMatthew Wilcox * starting at every index between 0 and 99 908b803b428SMatthew Wilcox */ 909b803b428SMatthew Wilcox for (i = 0; i < 100; i++) { 910b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 911b803b428SMatthew Wilcox xa_set_mark(xa, i, XA_MARK_0); 912b803b428SMatthew Wilcox for (j = 0; j < i; j++) { 913b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) != 914b803b428SMatthew Wilcox NULL); 915b803b428SMatthew Wilcox xa_set_mark(xa, j, XA_MARK_0); 916b803b428SMatthew Wilcox for (k = 0; k < 100; k++) { 917b803b428SMatthew Wilcox unsigned long index = k; 918b803b428SMatthew Wilcox void *entry = xa_find(xa, &index, ULONG_MAX, 919b803b428SMatthew Wilcox XA_PRESENT); 920b803b428SMatthew Wilcox if (k <= j) 921b803b428SMatthew Wilcox XA_BUG_ON(xa, index != j); 922b803b428SMatthew Wilcox else if (k <= i) 923b803b428SMatthew Wilcox XA_BUG_ON(xa, index != i); 924b803b428SMatthew Wilcox else 925b803b428SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 926b803b428SMatthew Wilcox 927b803b428SMatthew Wilcox index = k; 928b803b428SMatthew Wilcox entry = xa_find(xa, &index, ULONG_MAX, 929b803b428SMatthew Wilcox XA_MARK_0); 930b803b428SMatthew Wilcox if (k <= j) 931b803b428SMatthew Wilcox XA_BUG_ON(xa, index != j); 932b803b428SMatthew Wilcox else if (k <= i) 933b803b428SMatthew Wilcox XA_BUG_ON(xa, index != i); 934b803b428SMatthew Wilcox else 935b803b428SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 936b803b428SMatthew Wilcox } 937b803b428SMatthew Wilcox xa_erase_index(xa, j); 938b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); 939b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); 940b803b428SMatthew Wilcox } 941b803b428SMatthew Wilcox xa_erase_index(xa, i); 942b803b428SMatthew Wilcox XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); 943b803b428SMatthew Wilcox } 944b803b428SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 9458229706eSMatthew Wilcox } 9468229706eSMatthew Wilcox 9478229706eSMatthew Wilcox static noinline void check_find_2(struct xarray *xa) 9488229706eSMatthew Wilcox { 9498229706eSMatthew Wilcox void *entry; 9504a31896cSMatthew Wilcox unsigned long i, j, index; 9518229706eSMatthew Wilcox 9524a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 9538229706eSMatthew Wilcox XA_BUG_ON(xa, true); 9548229706eSMatthew Wilcox } 9558229706eSMatthew Wilcox 9568229706eSMatthew Wilcox for (i = 0; i < 1024; i++) { 9578229706eSMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 9588229706eSMatthew Wilcox j = 0; 9594a31896cSMatthew Wilcox xa_for_each(xa, index, entry) { 960b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_mk_index(index) != entry); 9618229706eSMatthew Wilcox XA_BUG_ON(xa, index != j++); 9628229706eSMatthew Wilcox } 9638229706eSMatthew Wilcox } 9648229706eSMatthew Wilcox 9658229706eSMatthew Wilcox xa_destroy(xa); 9668229706eSMatthew Wilcox } 9678229706eSMatthew Wilcox 96848483614SMatthew Wilcox static noinline void check_find_3(struct xarray *xa) 96948483614SMatthew Wilcox { 97048483614SMatthew Wilcox XA_STATE(xas, xa, 0); 97148483614SMatthew Wilcox unsigned long i, j, k; 97248483614SMatthew Wilcox void *entry; 97348483614SMatthew Wilcox 97448483614SMatthew Wilcox for (i = 0; i < 100; i++) { 97548483614SMatthew Wilcox for (j = 0; j < 100; j++) { 976490fd30fSMatthew Wilcox rcu_read_lock(); 97748483614SMatthew Wilcox for (k = 0; k < 100; k++) { 97848483614SMatthew Wilcox xas_set(&xas, j); 97948483614SMatthew Wilcox xas_for_each_marked(&xas, entry, k, XA_MARK_0) 98048483614SMatthew Wilcox ; 98148483614SMatthew Wilcox if (j > k) 98248483614SMatthew Wilcox XA_BUG_ON(xa, 98348483614SMatthew Wilcox xas.xa_node != XAS_RESTART); 98448483614SMatthew Wilcox } 985490fd30fSMatthew Wilcox rcu_read_unlock(); 98648483614SMatthew Wilcox } 98748483614SMatthew Wilcox xa_store_index(xa, i, GFP_KERNEL); 98848483614SMatthew Wilcox xa_set_mark(xa, i, XA_MARK_0); 98948483614SMatthew Wilcox } 99048483614SMatthew Wilcox xa_destroy(xa); 99148483614SMatthew Wilcox } 99248483614SMatthew Wilcox 9938229706eSMatthew Wilcox static noinline void check_find(struct xarray *xa) 9948229706eSMatthew Wilcox { 9958229706eSMatthew Wilcox check_find_1(xa); 9968229706eSMatthew Wilcox check_find_2(xa); 99748483614SMatthew Wilcox check_find_3(xa); 998b803b428SMatthew Wilcox check_multi_find(xa); 999b803b428SMatthew Wilcox check_multi_find_2(xa); 1000b803b428SMatthew Wilcox } 1001b803b428SMatthew Wilcox 1002e21a2955SMatthew Wilcox /* See find_swap_entry() in mm/shmem.c */ 1003e21a2955SMatthew Wilcox static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) 1004e21a2955SMatthew Wilcox { 1005e21a2955SMatthew Wilcox XA_STATE(xas, xa, 0); 1006e21a2955SMatthew Wilcox unsigned int checked = 0; 1007e21a2955SMatthew Wilcox void *entry; 1008e21a2955SMatthew Wilcox 1009e21a2955SMatthew Wilcox rcu_read_lock(); 1010e21a2955SMatthew Wilcox xas_for_each(&xas, entry, ULONG_MAX) { 1011e21a2955SMatthew Wilcox if (xas_retry(&xas, entry)) 1012e21a2955SMatthew Wilcox continue; 1013e21a2955SMatthew Wilcox if (entry == item) 1014e21a2955SMatthew Wilcox break; 1015e21a2955SMatthew Wilcox checked++; 1016e21a2955SMatthew Wilcox if ((checked % 4) != 0) 1017e21a2955SMatthew Wilcox continue; 1018e21a2955SMatthew Wilcox xas_pause(&xas); 1019e21a2955SMatthew Wilcox } 1020e21a2955SMatthew Wilcox rcu_read_unlock(); 1021e21a2955SMatthew Wilcox 1022e21a2955SMatthew Wilcox return entry ? xas.xa_index : -1; 1023e21a2955SMatthew Wilcox } 1024e21a2955SMatthew Wilcox 1025e21a2955SMatthew Wilcox static noinline void check_find_entry(struct xarray *xa) 1026e21a2955SMatthew Wilcox { 1027e21a2955SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1028e21a2955SMatthew Wilcox unsigned int order; 1029e21a2955SMatthew Wilcox unsigned long offset, index; 1030e21a2955SMatthew Wilcox 1031e21a2955SMatthew Wilcox for (order = 0; order < 20; order++) { 1032e21a2955SMatthew Wilcox for (offset = 0; offset < (1UL << (order + 3)); 1033e21a2955SMatthew Wilcox offset += (1UL << order)) { 1034e21a2955SMatthew Wilcox for (index = 0; index < (1UL << (order + 5)); 1035e21a2955SMatthew Wilcox index += (1UL << order)) { 1036e21a2955SMatthew Wilcox xa_store_order(xa, index, order, 1037b7677a13SMatthew Wilcox xa_mk_index(index), GFP_KERNEL); 1038e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, index) != 1039b7677a13SMatthew Wilcox xa_mk_index(index)); 1040e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, 1041b7677a13SMatthew Wilcox xa_mk_index(index)) != index); 1042e21a2955SMatthew Wilcox } 1043e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1044e21a2955SMatthew Wilcox xa_destroy(xa); 1045e21a2955SMatthew Wilcox } 1046e21a2955SMatthew Wilcox } 1047e21a2955SMatthew Wilcox #endif 1048e21a2955SMatthew Wilcox 1049e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1050e21a2955SMatthew Wilcox xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 1051e21a2955SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 1052b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); 1053e21a2955SMatthew Wilcox xa_erase_index(xa, ULONG_MAX); 1054e21a2955SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1055e21a2955SMatthew Wilcox } 1056e21a2955SMatthew Wilcox 105764d3e9a9SMatthew Wilcox static noinline void check_move_small(struct xarray *xa, unsigned long idx) 105864d3e9a9SMatthew Wilcox { 105964d3e9a9SMatthew Wilcox XA_STATE(xas, xa, 0); 106064d3e9a9SMatthew Wilcox unsigned long i; 106164d3e9a9SMatthew Wilcox 106264d3e9a9SMatthew Wilcox xa_store_index(xa, 0, GFP_KERNEL); 106364d3e9a9SMatthew Wilcox xa_store_index(xa, idx, GFP_KERNEL); 106464d3e9a9SMatthew Wilcox 106564d3e9a9SMatthew Wilcox rcu_read_lock(); 106664d3e9a9SMatthew Wilcox for (i = 0; i < idx * 4; i++) { 106764d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 106864d3e9a9SMatthew Wilcox if (i <= idx) 106964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 107064d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 107164d3e9a9SMatthew Wilcox if (i == 0 || i == idx) 1072b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 107364d3e9a9SMatthew Wilcox else 107464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 107564d3e9a9SMatthew Wilcox } 107664d3e9a9SMatthew Wilcox xas_next(&xas); 107764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 107864d3e9a9SMatthew Wilcox 107964d3e9a9SMatthew Wilcox do { 108064d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 108164d3e9a9SMatthew Wilcox i--; 108264d3e9a9SMatthew Wilcox if (i <= idx) 108364d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 108464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != i); 108564d3e9a9SMatthew Wilcox if (i == 0 || i == idx) 1086b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 108764d3e9a9SMatthew Wilcox else 108864d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 108964d3e9a9SMatthew Wilcox } while (i > 0); 109064d3e9a9SMatthew Wilcox 109164d3e9a9SMatthew Wilcox xas_set(&xas, ULONG_MAX); 109264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_next(&xas) != NULL); 109364d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 109464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0)); 109564d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != 0); 109664d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 109764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 109864d3e9a9SMatthew Wilcox rcu_read_unlock(); 109964d3e9a9SMatthew Wilcox 110064d3e9a9SMatthew Wilcox xa_erase_index(xa, 0); 110164d3e9a9SMatthew Wilcox xa_erase_index(xa, idx); 110264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 110364d3e9a9SMatthew Wilcox } 110464d3e9a9SMatthew Wilcox 110564d3e9a9SMatthew Wilcox static noinline void check_move(struct xarray *xa) 110664d3e9a9SMatthew Wilcox { 110764d3e9a9SMatthew Wilcox XA_STATE(xas, xa, (1 << 16) - 1); 110864d3e9a9SMatthew Wilcox unsigned long i; 110964d3e9a9SMatthew Wilcox 111064d3e9a9SMatthew Wilcox for (i = 0; i < (1 << 16); i++) 111164d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL); 111264d3e9a9SMatthew Wilcox 111364d3e9a9SMatthew Wilcox rcu_read_lock(); 111464d3e9a9SMatthew Wilcox do { 111564d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 111664d3e9a9SMatthew Wilcox i--; 1117b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 111864d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 111964d3e9a9SMatthew Wilcox } while (i != 0); 112064d3e9a9SMatthew Wilcox 112164d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 112264d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 112364d3e9a9SMatthew Wilcox 112464d3e9a9SMatthew Wilcox do { 112564d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 1126b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 112764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 112864d3e9a9SMatthew Wilcox i++; 112964d3e9a9SMatthew Wilcox } while (i < (1 << 16)); 113064d3e9a9SMatthew Wilcox rcu_read_unlock(); 113164d3e9a9SMatthew Wilcox 113264d3e9a9SMatthew Wilcox for (i = (1 << 8); i < (1 << 15); i++) 113364d3e9a9SMatthew Wilcox xa_erase_index(xa, i); 113464d3e9a9SMatthew Wilcox 113564d3e9a9SMatthew Wilcox i = xas.xa_index; 113664d3e9a9SMatthew Wilcox 113764d3e9a9SMatthew Wilcox rcu_read_lock(); 113864d3e9a9SMatthew Wilcox do { 113964d3e9a9SMatthew Wilcox void *entry = xas_prev(&xas); 114064d3e9a9SMatthew Wilcox i--; 114164d3e9a9SMatthew Wilcox if ((i < (1 << 8)) || (i >= (1 << 15))) 1142b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 114364d3e9a9SMatthew Wilcox else 114464d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 114564d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 114664d3e9a9SMatthew Wilcox } while (i != 0); 114764d3e9a9SMatthew Wilcox 114864d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas_prev(&xas) != NULL); 114964d3e9a9SMatthew Wilcox XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); 115064d3e9a9SMatthew Wilcox 115164d3e9a9SMatthew Wilcox do { 115264d3e9a9SMatthew Wilcox void *entry = xas_next(&xas); 115364d3e9a9SMatthew Wilcox if ((i < (1 << 8)) || (i >= (1 << 15))) 1154b7677a13SMatthew Wilcox XA_BUG_ON(xa, entry != xa_mk_index(i)); 115564d3e9a9SMatthew Wilcox else 115664d3e9a9SMatthew Wilcox XA_BUG_ON(xa, entry != NULL); 115764d3e9a9SMatthew Wilcox XA_BUG_ON(xa, i != xas.xa_index); 115864d3e9a9SMatthew Wilcox i++; 115964d3e9a9SMatthew Wilcox } while (i < (1 << 16)); 116064d3e9a9SMatthew Wilcox rcu_read_unlock(); 116164d3e9a9SMatthew Wilcox 116264d3e9a9SMatthew Wilcox xa_destroy(xa); 116364d3e9a9SMatthew Wilcox 116464d3e9a9SMatthew Wilcox for (i = 0; i < 16; i++) 116564d3e9a9SMatthew Wilcox check_move_small(xa, 1UL << i); 116664d3e9a9SMatthew Wilcox 116764d3e9a9SMatthew Wilcox for (i = 2; i < 16; i++) 116864d3e9a9SMatthew Wilcox check_move_small(xa, (1UL << i) - 1); 116964d3e9a9SMatthew Wilcox } 117064d3e9a9SMatthew Wilcox 11712264f513SMatthew Wilcox static noinline void xa_store_many_order(struct xarray *xa, 11722264f513SMatthew Wilcox unsigned long index, unsigned order) 11732264f513SMatthew Wilcox { 11742264f513SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 11752264f513SMatthew Wilcox unsigned int i = 0; 11762264f513SMatthew Wilcox 11772264f513SMatthew Wilcox do { 11782264f513SMatthew Wilcox xas_lock(&xas); 11792264f513SMatthew Wilcox XA_BUG_ON(xa, xas_find_conflict(&xas)); 11802264f513SMatthew Wilcox xas_create_range(&xas); 11812264f513SMatthew Wilcox if (xas_error(&xas)) 11822264f513SMatthew Wilcox goto unlock; 11832264f513SMatthew Wilcox for (i = 0; i < (1U << order); i++) { 1184b7677a13SMatthew Wilcox XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); 11852264f513SMatthew Wilcox xas_next(&xas); 11862264f513SMatthew Wilcox } 11872264f513SMatthew Wilcox unlock: 11882264f513SMatthew Wilcox xas_unlock(&xas); 11892264f513SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 11902264f513SMatthew Wilcox 11912264f513SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 11922264f513SMatthew Wilcox } 11932264f513SMatthew Wilcox 11942264f513SMatthew Wilcox static noinline void check_create_range_1(struct xarray *xa, 11952264f513SMatthew Wilcox unsigned long index, unsigned order) 11962264f513SMatthew Wilcox { 11972264f513SMatthew Wilcox unsigned long i; 11982264f513SMatthew Wilcox 11992264f513SMatthew Wilcox xa_store_many_order(xa, index, order); 12002264f513SMatthew Wilcox for (i = index; i < index + (1UL << order); i++) 12012264f513SMatthew Wilcox xa_erase_index(xa, i); 12022264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 12032264f513SMatthew Wilcox } 12042264f513SMatthew Wilcox 12052264f513SMatthew Wilcox static noinline void check_create_range_2(struct xarray *xa, unsigned order) 12062264f513SMatthew Wilcox { 12072264f513SMatthew Wilcox unsigned long i; 12082264f513SMatthew Wilcox unsigned long nr = 1UL << order; 12092264f513SMatthew Wilcox 12102264f513SMatthew Wilcox for (i = 0; i < nr * nr; i += nr) 12112264f513SMatthew Wilcox xa_store_many_order(xa, i, order); 12122264f513SMatthew Wilcox for (i = 0; i < nr * nr; i++) 12132264f513SMatthew Wilcox xa_erase_index(xa, i); 12142264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 12152264f513SMatthew Wilcox } 12162264f513SMatthew Wilcox 12172264f513SMatthew Wilcox static noinline void check_create_range_3(void) 12182264f513SMatthew Wilcox { 12192264f513SMatthew Wilcox XA_STATE(xas, NULL, 0); 12202264f513SMatthew Wilcox xas_set_err(&xas, -EEXIST); 12212264f513SMatthew Wilcox xas_create_range(&xas); 12222264f513SMatthew Wilcox XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); 12232264f513SMatthew Wilcox } 12242264f513SMatthew Wilcox 12252264f513SMatthew Wilcox static noinline void check_create_range_4(struct xarray *xa, 12262264f513SMatthew Wilcox unsigned long index, unsigned order) 12272264f513SMatthew Wilcox { 12282264f513SMatthew Wilcox XA_STATE_ORDER(xas, xa, index, order); 12292264f513SMatthew Wilcox unsigned long base = xas.xa_index; 12302264f513SMatthew Wilcox unsigned long i = 0; 12312264f513SMatthew Wilcox 12322264f513SMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 12332264f513SMatthew Wilcox do { 12342264f513SMatthew Wilcox xas_lock(&xas); 12352264f513SMatthew Wilcox xas_create_range(&xas); 12362264f513SMatthew Wilcox if (xas_error(&xas)) 12372264f513SMatthew Wilcox goto unlock; 12382264f513SMatthew Wilcox for (i = 0; i < (1UL << order); i++) { 1239b7677a13SMatthew Wilcox void *old = xas_store(&xas, xa_mk_index(base + i)); 12402264f513SMatthew Wilcox if (xas.xa_index == index) 1241b7677a13SMatthew Wilcox XA_BUG_ON(xa, old != xa_mk_index(base + i)); 12422264f513SMatthew Wilcox else 12432264f513SMatthew Wilcox XA_BUG_ON(xa, old != NULL); 12442264f513SMatthew Wilcox xas_next(&xas); 12452264f513SMatthew Wilcox } 12462264f513SMatthew Wilcox unlock: 12472264f513SMatthew Wilcox xas_unlock(&xas); 12482264f513SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 12492264f513SMatthew Wilcox 12502264f513SMatthew Wilcox XA_BUG_ON(xa, xas_error(&xas)); 12512264f513SMatthew Wilcox 12522264f513SMatthew Wilcox for (i = base; i < base + (1UL << order); i++) 12532264f513SMatthew Wilcox xa_erase_index(xa, i); 12542264f513SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 12552264f513SMatthew Wilcox } 12562264f513SMatthew Wilcox 12572264f513SMatthew Wilcox static noinline void check_create_range(struct xarray *xa) 12582264f513SMatthew Wilcox { 12592264f513SMatthew Wilcox unsigned int order; 12602264f513SMatthew Wilcox unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; 12612264f513SMatthew Wilcox 12622264f513SMatthew Wilcox for (order = 0; order < max_order; order++) { 12632264f513SMatthew Wilcox check_create_range_1(xa, 0, order); 12642264f513SMatthew Wilcox check_create_range_1(xa, 1U << order, order); 12652264f513SMatthew Wilcox check_create_range_1(xa, 2U << order, order); 12662264f513SMatthew Wilcox check_create_range_1(xa, 3U << order, order); 12672264f513SMatthew Wilcox check_create_range_1(xa, 1U << 24, order); 12682264f513SMatthew Wilcox if (order < 10) 12692264f513SMatthew Wilcox check_create_range_2(xa, order); 12702264f513SMatthew Wilcox 12712264f513SMatthew Wilcox check_create_range_4(xa, 0, order); 12722264f513SMatthew Wilcox check_create_range_4(xa, 1U << order, order); 12732264f513SMatthew Wilcox check_create_range_4(xa, 2U << order, order); 12742264f513SMatthew Wilcox check_create_range_4(xa, 3U << order, order); 12752264f513SMatthew Wilcox check_create_range_4(xa, 1U << 24, order); 12762264f513SMatthew Wilcox 12772264f513SMatthew Wilcox check_create_range_4(xa, 1, order); 12782264f513SMatthew Wilcox check_create_range_4(xa, (1U << order) + 1, order); 12792264f513SMatthew Wilcox check_create_range_4(xa, (2U << order) + 1, order); 12802264f513SMatthew Wilcox check_create_range_4(xa, (2U << order) - 1, order); 12812264f513SMatthew Wilcox check_create_range_4(xa, (3U << order) + 1, order); 12822264f513SMatthew Wilcox check_create_range_4(xa, (3U << order) - 1, order); 12832264f513SMatthew Wilcox check_create_range_4(xa, (1U << 24) + 1, order); 12842264f513SMatthew Wilcox } 12852264f513SMatthew Wilcox 12862264f513SMatthew Wilcox check_create_range_3(); 12872264f513SMatthew Wilcox } 12882264f513SMatthew Wilcox 12890e9446c3SMatthew Wilcox static noinline void __check_store_range(struct xarray *xa, unsigned long first, 12900e9446c3SMatthew Wilcox unsigned long last) 12910e9446c3SMatthew Wilcox { 12920e9446c3SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1293b7677a13SMatthew Wilcox xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); 12940e9446c3SMatthew Wilcox 1295b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); 1296b7677a13SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); 12970e9446c3SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 12980e9446c3SMatthew Wilcox XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 12990e9446c3SMatthew Wilcox 13000e9446c3SMatthew Wilcox xa_store_range(xa, first, last, NULL, GFP_KERNEL); 13010e9446c3SMatthew Wilcox #endif 13020e9446c3SMatthew Wilcox 13030e9446c3SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 13040e9446c3SMatthew Wilcox } 13050e9446c3SMatthew Wilcox 13060e9446c3SMatthew Wilcox static noinline void check_store_range(struct xarray *xa) 13070e9446c3SMatthew Wilcox { 13080e9446c3SMatthew Wilcox unsigned long i, j; 13090e9446c3SMatthew Wilcox 13100e9446c3SMatthew Wilcox for (i = 0; i < 128; i++) { 13110e9446c3SMatthew Wilcox for (j = i; j < 128; j++) { 13120e9446c3SMatthew Wilcox __check_store_range(xa, i, j); 13130e9446c3SMatthew Wilcox __check_store_range(xa, 128 + i, 128 + j); 13140e9446c3SMatthew Wilcox __check_store_range(xa, 4095 + i, 4095 + j); 13150e9446c3SMatthew Wilcox __check_store_range(xa, 4096 + i, 4096 + j); 13160e9446c3SMatthew Wilcox __check_store_range(xa, 123456 + i, 123456 + j); 13175404a7f1SMatthew Wilcox __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); 13180e9446c3SMatthew Wilcox } 13190e9446c3SMatthew Wilcox } 13200e9446c3SMatthew Wilcox } 13210e9446c3SMatthew Wilcox 132276b4e529SMatthew Wilcox static void check_align_1(struct xarray *xa, char *name) 132376b4e529SMatthew Wilcox { 132476b4e529SMatthew Wilcox int i; 132576b4e529SMatthew Wilcox unsigned int id; 132676b4e529SMatthew Wilcox unsigned long index; 132776b4e529SMatthew Wilcox void *entry; 132876b4e529SMatthew Wilcox 132976b4e529SMatthew Wilcox for (i = 0; i < 8; i++) { 1330a3e4d3f9SMatthew Wilcox XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, 1331a3e4d3f9SMatthew Wilcox GFP_KERNEL) != 0); 133276b4e529SMatthew Wilcox XA_BUG_ON(xa, id != i); 133376b4e529SMatthew Wilcox } 133476b4e529SMatthew Wilcox xa_for_each(xa, index, entry) 133576b4e529SMatthew Wilcox XA_BUG_ON(xa, xa_is_err(entry)); 133676b4e529SMatthew Wilcox xa_destroy(xa); 133776b4e529SMatthew Wilcox } 133876b4e529SMatthew Wilcox 133976b4e529SMatthew Wilcox static noinline void check_align(struct xarray *xa) 134076b4e529SMatthew Wilcox { 134176b4e529SMatthew Wilcox char name[] = "Motorola 68000"; 134276b4e529SMatthew Wilcox 134376b4e529SMatthew Wilcox check_align_1(xa, name); 134476b4e529SMatthew Wilcox check_align_1(xa, name + 1); 134576b4e529SMatthew Wilcox check_align_1(xa, name + 2); 134676b4e529SMatthew Wilcox check_align_1(xa, name + 3); 134776b4e529SMatthew Wilcox // check_align_2(xa, name); 134876b4e529SMatthew Wilcox } 134976b4e529SMatthew Wilcox 1350a97e7904SMatthew Wilcox static LIST_HEAD(shadow_nodes); 1351a97e7904SMatthew Wilcox 1352a97e7904SMatthew Wilcox static void test_update_node(struct xa_node *node) 1353a97e7904SMatthew Wilcox { 1354a97e7904SMatthew Wilcox if (node->count && node->count == node->nr_values) { 1355a97e7904SMatthew Wilcox if (list_empty(&node->private_list)) 1356a97e7904SMatthew Wilcox list_add(&shadow_nodes, &node->private_list); 1357a97e7904SMatthew Wilcox } else { 1358a97e7904SMatthew Wilcox if (!list_empty(&node->private_list)) 1359a97e7904SMatthew Wilcox list_del_init(&node->private_list); 1360a97e7904SMatthew Wilcox } 1361a97e7904SMatthew Wilcox } 1362a97e7904SMatthew Wilcox 1363a97e7904SMatthew Wilcox static noinline void shadow_remove(struct xarray *xa) 1364a97e7904SMatthew Wilcox { 1365a97e7904SMatthew Wilcox struct xa_node *node; 1366a97e7904SMatthew Wilcox 1367a97e7904SMatthew Wilcox xa_lock(xa); 1368a97e7904SMatthew Wilcox while ((node = list_first_entry_or_null(&shadow_nodes, 1369a97e7904SMatthew Wilcox struct xa_node, private_list))) { 1370a97e7904SMatthew Wilcox XA_STATE(xas, node->array, 0); 1371a97e7904SMatthew Wilcox XA_BUG_ON(xa, node->array != xa); 1372a97e7904SMatthew Wilcox list_del_init(&node->private_list); 1373a97e7904SMatthew Wilcox xas.xa_node = xa_parent_locked(node->array, node); 1374a97e7904SMatthew Wilcox xas.xa_offset = node->offset; 1375a97e7904SMatthew Wilcox xas.xa_shift = node->shift + XA_CHUNK_SHIFT; 1376a97e7904SMatthew Wilcox xas_set_update(&xas, test_update_node); 1377a97e7904SMatthew Wilcox xas_store(&xas, NULL); 1378a97e7904SMatthew Wilcox } 1379a97e7904SMatthew Wilcox xa_unlock(xa); 1380a97e7904SMatthew Wilcox } 1381a97e7904SMatthew Wilcox 1382a97e7904SMatthew Wilcox static noinline void check_workingset(struct xarray *xa, unsigned long index) 1383a97e7904SMatthew Wilcox { 1384a97e7904SMatthew Wilcox XA_STATE(xas, xa, index); 1385a97e7904SMatthew Wilcox xas_set_update(&xas, test_update_node); 1386a97e7904SMatthew Wilcox 1387a97e7904SMatthew Wilcox do { 1388a97e7904SMatthew Wilcox xas_lock(&xas); 1389a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(0)); 1390a97e7904SMatthew Wilcox xas_next(&xas); 1391a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(1)); 1392a97e7904SMatthew Wilcox xas_unlock(&xas); 1393a97e7904SMatthew Wilcox } while (xas_nomem(&xas, GFP_KERNEL)); 1394a97e7904SMatthew Wilcox 1395a97e7904SMatthew Wilcox XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1396a97e7904SMatthew Wilcox 1397a97e7904SMatthew Wilcox xas_lock(&xas); 1398a97e7904SMatthew Wilcox xas_next(&xas); 1399a97e7904SMatthew Wilcox xas_store(&xas, &xas); 1400a97e7904SMatthew Wilcox XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1401a97e7904SMatthew Wilcox 1402a97e7904SMatthew Wilcox xas_store(&xas, xa_mk_value(2)); 1403a97e7904SMatthew Wilcox xas_unlock(&xas); 1404a97e7904SMatthew Wilcox XA_BUG_ON(xa, list_empty(&shadow_nodes)); 1405a97e7904SMatthew Wilcox 1406a97e7904SMatthew Wilcox shadow_remove(xa); 1407a97e7904SMatthew Wilcox XA_BUG_ON(xa, !list_empty(&shadow_nodes)); 1408a97e7904SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1409a97e7904SMatthew Wilcox } 1410a97e7904SMatthew Wilcox 1411d6427f81SMatthew Wilcox /* 1412d6427f81SMatthew Wilcox * Check that the pointer / value / sibling entries are accounted the 1413d6427f81SMatthew Wilcox * way we expect them to be. 1414d6427f81SMatthew Wilcox */ 1415d6427f81SMatthew Wilcox static noinline void check_account(struct xarray *xa) 1416d6427f81SMatthew Wilcox { 1417d6427f81SMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1418d6427f81SMatthew Wilcox unsigned int order; 1419d6427f81SMatthew Wilcox 1420d6427f81SMatthew Wilcox for (order = 1; order < 12; order++) { 1421d6427f81SMatthew Wilcox XA_STATE(xas, xa, 1 << order); 1422d6427f81SMatthew Wilcox 1423d6427f81SMatthew Wilcox xa_store_order(xa, 0, order, xa, GFP_KERNEL); 1424fffc9a26SMatthew Wilcox rcu_read_lock(); 1425d6427f81SMatthew Wilcox xas_load(&xas); 1426d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count == 0); 1427d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count > (1 << order)); 1428d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1429fffc9a26SMatthew Wilcox rcu_read_unlock(); 1430d6427f81SMatthew Wilcox 1431b7677a13SMatthew Wilcox xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), 1432d6427f81SMatthew Wilcox GFP_KERNEL); 1433d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1434d6427f81SMatthew Wilcox 1435d6427f81SMatthew Wilcox xa_erase(xa, 1 << order); 1436d6427f81SMatthew Wilcox XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1437d6427f81SMatthew Wilcox 1438d6427f81SMatthew Wilcox xa_erase(xa, 0); 1439d6427f81SMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1440d6427f81SMatthew Wilcox } 1441d6427f81SMatthew Wilcox #endif 1442d6427f81SMatthew Wilcox } 1443d6427f81SMatthew Wilcox 1444687149fcSMatthew Wilcox static noinline void check_destroy(struct xarray *xa) 1445687149fcSMatthew Wilcox { 1446687149fcSMatthew Wilcox unsigned long index; 1447687149fcSMatthew Wilcox 1448687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1449687149fcSMatthew Wilcox 1450687149fcSMatthew Wilcox /* Destroying an empty array is a no-op */ 1451687149fcSMatthew Wilcox xa_destroy(xa); 1452687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1453687149fcSMatthew Wilcox 1454687149fcSMatthew Wilcox /* Destroying an array with a single entry */ 1455687149fcSMatthew Wilcox for (index = 0; index < 1000; index++) { 1456687149fcSMatthew Wilcox xa_store_index(xa, index, GFP_KERNEL); 1457687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1458687149fcSMatthew Wilcox xa_destroy(xa); 1459687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1460687149fcSMatthew Wilcox } 1461687149fcSMatthew Wilcox 1462687149fcSMatthew Wilcox /* Destroying an array with a single entry at ULONG_MAX */ 1463687149fcSMatthew Wilcox xa_store(xa, ULONG_MAX, xa, GFP_KERNEL); 1464687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1465687149fcSMatthew Wilcox xa_destroy(xa); 1466687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1467687149fcSMatthew Wilcox 1468687149fcSMatthew Wilcox #ifdef CONFIG_XARRAY_MULTI 1469687149fcSMatthew Wilcox /* Destroying an array with a multi-index entry */ 1470687149fcSMatthew Wilcox xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL); 1471687149fcSMatthew Wilcox XA_BUG_ON(xa, xa_empty(xa)); 1472687149fcSMatthew Wilcox xa_destroy(xa); 1473687149fcSMatthew Wilcox XA_BUG_ON(xa, !xa_empty(xa)); 1474687149fcSMatthew Wilcox #endif 1475687149fcSMatthew Wilcox } 1476687149fcSMatthew Wilcox 147758d6ea30SMatthew Wilcox static DEFINE_XARRAY(array); 1478ad3d6c72SMatthew Wilcox 1479ad3d6c72SMatthew Wilcox static int xarray_checks(void) 1480ad3d6c72SMatthew Wilcox { 148158d6ea30SMatthew Wilcox check_xa_err(&array); 1482b803b428SMatthew Wilcox check_xas_retry(&array); 1483ad3d6c72SMatthew Wilcox check_xa_load(&array); 14849b89a035SMatthew Wilcox check_xa_mark(&array); 148558d6ea30SMatthew Wilcox check_xa_shrink(&array); 1486b803b428SMatthew Wilcox check_xas_erase(&array); 148741aec91fSMatthew Wilcox check_cmpxchg(&array); 14889f14d4f1SMatthew Wilcox check_reserve(&array); 148958d6ea30SMatthew Wilcox check_multi_store(&array); 1490371c752dSMatthew Wilcox check_xa_alloc(); 1491b803b428SMatthew Wilcox check_find(&array); 1492e21a2955SMatthew Wilcox check_find_entry(&array); 1493d6427f81SMatthew Wilcox check_account(&array); 1494687149fcSMatthew Wilcox check_destroy(&array); 149564d3e9a9SMatthew Wilcox check_move(&array); 14962264f513SMatthew Wilcox check_create_range(&array); 14970e9446c3SMatthew Wilcox check_store_range(&array); 14984e99d4e9SMatthew Wilcox check_store_iter(&array); 149976b4e529SMatthew Wilcox check_align(&xa0); 1500ad3d6c72SMatthew Wilcox 1501a97e7904SMatthew Wilcox check_workingset(&array, 0); 1502a97e7904SMatthew Wilcox check_workingset(&array, 64); 1503a97e7904SMatthew Wilcox check_workingset(&array, 4096); 1504a97e7904SMatthew Wilcox 1505ad3d6c72SMatthew Wilcox printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 1506ad3d6c72SMatthew Wilcox return (tests_run == tests_passed) ? 0 : -EINVAL; 1507ad3d6c72SMatthew Wilcox } 1508ad3d6c72SMatthew Wilcox 1509ad3d6c72SMatthew Wilcox static void xarray_exit(void) 1510ad3d6c72SMatthew Wilcox { 1511ad3d6c72SMatthew Wilcox } 1512ad3d6c72SMatthew Wilcox 1513ad3d6c72SMatthew Wilcox module_init(xarray_checks); 1514ad3d6c72SMatthew Wilcox module_exit(xarray_exit); 1515ad3d6c72SMatthew Wilcox MODULE_AUTHOR("Matthew Wilcox <[email protected]>"); 1516ad3d6c72SMatthew Wilcox MODULE_LICENSE("GPL"); 1517